From 877246acc9b79c97c905efb3f57e1b62751eb81c Mon Sep 17 00:00:00 2001 From: "Arno.Edwards" <138990495+Umpire2018@users.noreply.github.com> Date: Thu, 21 Nov 2024 15:02:46 +0800 Subject: [PATCH] feat(settings): remove Config Manager and optimize initialization (#83) - Implemented lazy loading. - Introduce lazy loading to optimize performance by delaying resource initialization until needed. - Remove `config_manager` and config file saving functionality. - Update default model names and remove context window handling. - Introduce `llama_index.llms.openai` for LLM communication to enhance integration and compatibility with updated LLM protocols. - Add `ChatPromptTemplate` to manage prompts more efficiently and improve prompt consistency. - Streamline implementation within `ChatEngine` for improved readability and maintainability. - Fix issue where logger failed to capture full error stack trace in certain cases. - Remove `tiktoken` dependency, directly retrieve token usage from API response. - Add import sorting for improved readability and consistency. - Remove deprecated test cases. - Removed deprecated `config.toml.template` file - Removed `sync_func` to prevent TypeError and ensure file saving. - Removed model name restriction. - Updated README to mention GitHub Actions support. - Migrated to llamaindex for enhanced functionality. - Integrated `llama_index.vector_stores.chroma` and `llama_index.embeddings.openai` for improved vector storage and embeddings. - Added semantic chunking for better document segmentation. - Introduced `ChatPromptTemplate` and `PromptTemplate` for customizable chat prompts. - Added `initialize_with_params` method for dynamic initialization - Changed `print_hierarchy` from a subcommand to an option in the `run` command. - Defined `chat-with-repo` as an optional dependency in `pyproject.toml`. --- .gitignore | 7 +- README.md | 27 +- README_CN.md | 23 +- config.toml.template | 15 - .../book_tools/generate_repoagent_books.py | 1 - examples/init.py | 0 .../book_tools/generate_repoagent_books.md | 40 +- .../book_tools/generate_summary_from_book.md | 139 +- markdown_docs/repo_agent/change_detector.md | 254 +- markdown_docs/repo_agent/chat_engine.md | 382 +-- .../chat_with_repo/gradio_interface.md | 152 -- .../repo_agent/chat_with_repo/json_handler.md | 191 -- .../repo_agent/chat_with_repo/main.md | 14 - .../repo_agent/chat_with_repo/prompt.md | 206 -- .../repo_agent/chat_with_repo/rag.md | 237 -- .../repo_agent/chat_with_repo/vectordb.md | 90 - markdown_docs/repo_agent/doc_meta_info.md | 1228 +++++---- markdown_docs/repo_agent/file_handler.md | 557 ++-- markdown_docs/repo_agent/log.md | 64 +- markdown_docs/repo_agent/main.md | 44 + .../repo_agent/multi_task_dispatch.md | 249 +- markdown_docs/repo_agent/project_manager.md | 199 +- markdown_docs/repo_agent/runner.md | 436 +-- markdown_docs/repo_agent/settings.md | 295 ++ .../repo_agent/utils/gitignore_checker.md | 196 +- .../repo_agent/utils/meta_info_utils.md | 67 + markdown_docs/tests/test_change_detector.md | 129 +- markdown_docs/tests/test_gradio_ui.md | 58 - markdown_docs/tests/test_json_handler.md | 119 +- markdown_docs/tests/test_main.md | 36 - markdown_docs/tests/test_prompt.md | 131 - markdown_docs/tests/test_rag.md | 87 - markdown_docs/tests/test_structure_tree.md | 142 +- markdown_docs/tests/test_vectordb.md | 106 - pdm.lock | 2404 +++++++---------- pyproject.toml | 51 +- repo_agent/__main__.py | 2 +- repo_agent/change_detector.py | 28 +- repo_agent/chat_engine.py | 260 +- repo_agent/chat_with_repo/gradio_interface.py | 137 +- repo_agent/chat_with_repo/json_handler.py | 18 +- repo_agent/chat_with_repo/main.py | 27 +- repo_agent/chat_with_repo/prompt.py | 112 +- repo_agent/chat_with_repo/rag.py | 259 +- .../chat_with_repo/text_analysis_tool.py | 64 + .../chat_with_repo/vector_store_manager.py | 132 + repo_agent/chat_with_repo/vectordb.py | 67 - repo_agent/config_manager.py | 97 - repo_agent/doc_meta_info.py | 413 ++- repo_agent/exceptions.py | 18 - repo_agent/file_handler.py | 64 +- repo_agent/log.py | 42 +- repo_agent/main.py | 252 +- repo_agent/multi_task_dispatch.py | 37 +- repo_agent/project_manager.py | 45 +- repo_agent/prompt.py | 86 +- repo_agent/runner.py | 263 +- repo_agent/settings.py | 111 +- repo_agent/utils/meta_info_utils.py | 76 +- requirements.txt | 1813 +++++-------- tests/test_change_detector.py | 7 +- tests/test_gradio_ui.py | 25 - tests/test_json_handler.py | 8 +- tests/test_main.py | 43 - tests/test_prompt.py | 54 - tests/test_rag.py | 66 - tests/test_structure_tree.py | 1 + tests/test_vectordb.py | 43 - 68 files changed, 5245 insertions(+), 7801 deletions(-) delete mode 100644 config.toml.template delete mode 100644 examples/init.py delete mode 100644 markdown_docs/repo_agent/chat_with_repo/gradio_interface.md delete mode 100644 markdown_docs/repo_agent/chat_with_repo/json_handler.md delete mode 100644 markdown_docs/repo_agent/chat_with_repo/main.md delete mode 100644 markdown_docs/repo_agent/chat_with_repo/prompt.md delete mode 100644 markdown_docs/repo_agent/chat_with_repo/rag.md delete mode 100644 markdown_docs/repo_agent/chat_with_repo/vectordb.md create mode 100644 markdown_docs/repo_agent/main.md create mode 100644 markdown_docs/repo_agent/settings.md create mode 100644 markdown_docs/repo_agent/utils/meta_info_utils.md delete mode 100644 markdown_docs/tests/test_gradio_ui.md delete mode 100644 markdown_docs/tests/test_main.md delete mode 100644 markdown_docs/tests/test_prompt.md delete mode 100644 markdown_docs/tests/test_rag.md delete mode 100644 markdown_docs/tests/test_vectordb.md create mode 100644 repo_agent/chat_with_repo/text_analysis_tool.py create mode 100644 repo_agent/chat_with_repo/vector_store_manager.py delete mode 100644 repo_agent/chat_with_repo/vectordb.py delete mode 100644 repo_agent/config_manager.py delete mode 100644 repo_agent/exceptions.py delete mode 100644 tests/test_gradio_ui.py delete mode 100644 tests/test_main.py delete mode 100644 tests/test_prompt.py delete mode 100644 tests/test_rag.py delete mode 100644 tests/test_vectordb.py diff --git a/.gitignore b/.gitignore index 60d7064..f556184 100644 --- a/.gitignore +++ b/.gitignore @@ -10,7 +10,6 @@ __pycache__/ *$py.class chroma_db/ .vscode -.project_doc_record # C extensions *.so @@ -177,9 +176,11 @@ prompt_output/ .vscode/ # RepoAgent -.project_doc_record log.txt .chroma_db config.yml config.toml -.pre-commit-config.yaml \ No newline at end of file +.project_doc_record + +# Gradio +.gradio \ No newline at end of file diff --git a/README.md b/README.md index 7526564..2000dd1 100644 --- a/README.md +++ b/README.md @@ -55,6 +55,10 @@ Traditionally, creating and maintaining software documentation demanded signific ### Installation Method +#### Using GitHub Actions + +This repository supports GitHub Actions for automating workflows such as building, testing, and deploying. For detailed instructions on setting up and using GitHub Actions with this repository, please refer to the [actions/run-repoagent](https://github.com/Umpire2018/setup-repoagent/actions). + #### Using pip (Recommended for Users) Install the `repoagent` package directly using pip: @@ -108,28 +112,12 @@ set OPENAI_API_KEY=YOUR_API_KEY # on Windows $Env:OPENAI_API_KEY = "YOUR_API_KEY" # on Windows (PowerShell) ``` -Use `repoagent configure` if you need to modify the running parameters. - -```sh -Enter the path to target repository: -Enter the project hierarchy file name [.project_doc_record]: -Enter the Markdown documents folder name [markdown_docs]: -Enter files or directories to ignore, separated by commas []: -Enter the language (ISO 639 code or language name, e.g., 'en', 'eng', 'English') [Chinese]: -Enter the maximum number of threads [4]: -Enter the maximum number of document tokens [1024]: -Enter the log level (DEBUG, INFO, WARNING, ERROR, CRITICAL) [INFO]: -Enter the model [gpt-3.5-turbo]: -Enter the temperature [0.2]: -Enter the request timeout (seconds) [60.0]: -Enter the base URL [https://api.openai.com/v1]: -``` - ## Run RepoAgent Enter the root directory of RepoAgent and try the following command in the terminal: ```sh repoagent run #this command will generate doc, or update docs(pre-commit-hook will automatically call this) +repoagent run --print-hierarchy # Print how repo-agent parse the target repo ``` The run command supports the following optional flags (if set, will override config defaults): @@ -145,12 +133,10 @@ The run command supports the following optional flags (if set, will override con - `-l`, `--language` TEXT: The ISO 639 code or language name for the documentation. Default: `Chinese` - `-ll`, `--log-level` [DEBUG|INFO|WARNING|ERROR|CRITICAL]: Sets the logging level for the application. Default: `INFO` - You can also try the following feature ```sh repoagent clean # Remove repoagent-related cache -repoagent print-hierarchy # Print how repo-agent parse the target repo repoagent diff # Check what docs will be updated/generated based on current code change ``` @@ -217,6 +203,7 @@ We conceptualize **Chat With Repo** as a unified gateway for these downstream ap Here we demonstrate a preliminary prototype of one of our downstream tasks: Automatic Q&A for Issues and Code Explanation. You can start the server by running the following code. ```sh +pip install repoagent[chat-with-repo] repoagent chat-with-repo ``` @@ -224,7 +211,7 @@ repoagent chat-with-repo - [ ] Generate README.md automatically combining with the global documentation - [ ] **Multi-programming-language support** Support more programming languages like Java, C or C++, etc. -- [ ] Local model support like Llama, chatGLM, Qwen, GLM4, etc. +- [x] Local model support like Llama, chatGLM, Qwen, GLM4, etc. ## 🥰 Featured Cases diff --git a/README_CN.md b/README_CN.md index d39c87f..ca6901f 100644 --- a/README_CN.md +++ b/README_CN.md @@ -102,28 +102,12 @@ set OPENAI_API_KEY=YOUR_API_KEY # on Windows $Env:OPENAI_API_KEY = "YOUR_API_KEY" # on Windows (PowerShell) ``` -如果需要修改运行参数,使用 `repoagent configure` - -```sh -Enter the path to target repository: -Enter the project hierarchy file name [.project_doc_record]: -Enter the Markdown documents folder name [markdown_docs]: -Enter files or directories to ignore, separated by commas []: -Enter the language (ISO 639 code or language name, e.g., 'en', 'eng', 'English') [Chinese]: -Enter the maximum number of threads [4]: -Enter the maximum number of document tokens [1024]: -Enter the log level (DEBUG, INFO, WARNING, ERROR, CRITICAL) [INFO]: -Enter the model [gpt-3.5-turbo]: -Enter the temperature [0.2]: -Enter the request timeout (seconds) [60.0]: -Enter the base URL [https://api.openai.com/v1]: -``` - ## 运行RepoAgent 进入RepoAgent根目录并在终端尝试以下命令: ```sh repoagent run # 这条命令会生成文档或自动更新文档 (pre-commit-hook 会自动调用它) +repoagent --print-hierarchy # 此命令将打印repoagent解析出的目标仓库 ``` run 命令支持以下可选标志(如果设置,将覆盖配置默认值): @@ -143,7 +127,6 @@ run 命令支持以下可选标志(如果设置,将覆盖配置默认值) ```sh repoagent clean # 此命令将删除与repoagent相关的缓存 -repoagent print-hierarchy # 此命令将打印repoagent解析出的目标仓库 repoagent diff # 此命令将检查基于当前代码更改将更新/生成哪些文档 ``` @@ -206,7 +189,8 @@ RepoAgent hook会在git commit时自动触发,检测前一步您git add的文 在这里,我们展示了我们的下游任务之一的初步原型:自动issue问题解答和代码解释。您可以通过在终端运行以下代码启动服务。 ```sh -repoagent chat_with_repo +pip install repoagent[chat-with-repo] +repoagent chat-with-repo ``` # ✅ 未来工作 @@ -237,4 +221,3 @@ repoagent chat_with_repo primaryClass={cs.CL} } ``` - diff --git a/config.toml.template b/config.toml.template deleted file mode 100644 index 628caaa..0000000 --- a/config.toml.template +++ /dev/null @@ -1,15 +0,0 @@ -[project] -target_repo = "" -hierarchy_name = ".project_doc_record" -markdown_docs_name = "markdown_docs" -ignore_list = [] -language = "Chinese" -max_thread_count = 4 -max_document_tokens = 1024 -log_level = "info" - -[chat_completion] -model = "gpt-3.5-turbo" -temperature = 0.2 -request_timeout = 60 -base_url = "https://api.openai.com/v1" diff --git a/display/book_tools/generate_repoagent_books.py b/display/book_tools/generate_repoagent_books.py index fa91100..dfc7143 100644 --- a/display/book_tools/generate_repoagent_books.py +++ b/display/book_tools/generate_repoagent_books.py @@ -1,6 +1,5 @@ import os import shutil - import sys diff --git a/examples/init.py b/examples/init.py deleted file mode 100644 index e69de29..0000000 diff --git a/markdown_docs/display/book_tools/generate_repoagent_books.md b/markdown_docs/display/book_tools/generate_repoagent_books.md index f7d98fa..02d84e3 100644 --- a/markdown_docs/display/book_tools/generate_repoagent_books.md +++ b/markdown_docs/display/book_tools/generate_repoagent_books.md @@ -1,39 +1,33 @@ ## FunctionDef main -**main**: The function of main is to copy a folder of markdown documents into a specific book structure, creating the necessary directories and a README.md file if it does not exist. +**main**: The function of main is to create a directory structure for a book and copy Markdown documentation files into it. -**Parameters**: This function does not accept parameters directly through its definition. Instead, it retrieves parameters from the command line arguments: -- The first argument (`sys.argv[1]`) specifies the folder containing markdown documents. -- The second argument (`sys.argv[2]`) specifies the name of the book. -- The third argument (`sys.argv[3]`) specifies the repository path where the markdown documents are located. +**parameters**: The parameters of this Function. +· parameter1: markdown_docs_folder - The name of the folder containing Markdown documentation files to be copied. +· parameter2: book_name - The name of the book for which the directory structure is created. +· parameter3: repo_path - The path to the repository where the Markdown documentation folder is located. -**Code Description**: The `main` function begins by retrieving the necessary paths from the command line arguments. It constructs the destination directory path by combining a base path (`./books`), the book name, and a subdirectory (`src`). It also constructs the path to the source directory containing the markdown documents. +**Code Description**: The main function begins by retrieving command-line arguments that specify the folder containing Markdown documentation, the desired book name, and the repository path. It constructs the destination directory path where the book's source files will be stored, specifically under './books/{book_name}/src'. It also constructs the source directory path for the Markdown documentation files based on the provided repository path and the specified folder name. -The function checks if the destination directory exists. If it does not, the directory is created, and a message indicating the creation of the directory is printed to the console. +The function then checks if the destination directory exists. If it does not exist, it creates the directory and prints a confirmation message indicating that the directory has been created. -Next, the function iterates over all items in the source directory. For each item, it checks if it is a directory or a file. If the item is a directory, it uses `shutil.copytree` to recursively copy the entire directory structure and its contents to the destination. If the item is a file, it uses `shutil.copy2` to copy the file to the destination, preserving metadata. After each copy operation, a message is printed to the console indicating the action taken. +Next, the function iterates over each item in the source directory. For each item, it constructs the full source and destination paths. If the item is a directory, it uses `shutil.copytree` to recursively copy the entire directory to the destination. If the item is a file, it uses `shutil.copy2` to copy the file to the destination. For each copy operation, a message is printed to confirm the action taken. -Additionally, the function includes a nested function named `create_book_readme_if_not_exist`, which is responsible for checking if a README.md file exists in the destination directory. If the file does not exist, the function creates it and writes a header with the book name. +Additionally, the function defines a nested function called `create_book_readme_if_not_exist`, which checks for the existence of a README.md file in the destination directory. If the README.md file does not exist, it creates one and writes the book name as the title in Markdown format. -Finally, the `create_book_readme_if_not_exist` function is called with the destination directory as its argument to ensure that a README.md file is present in the book structure. +Finally, the main function calls `create_book_readme_if_not_exist` to ensure that a README.md file is created for the book if it is not already present. -**Note**: This function relies on external libraries (`os`, `sys`, `shutil`) for file system operations and command line argument processing. It is designed to be executed from the command line, and the user must provide the correct paths and book name as arguments. The function does not perform error handling for incorrect paths or permissions issues, which should be considered when integrating into larger applications or scripts. +**Note**: It is important to ensure that the specified paths and folder names are valid and accessible. The function relies on the presence of the `shutil` and `os` modules, which must be imported for the code to execute successfully. Additionally, the function assumes that the command-line arguments are provided in the correct order and format. ### FunctionDef create_book_readme_if_not_exist(dire) **create_book_readme_if_not_exist**: The function of create_book_readme_if_not_exist is to create a README.md file in a specified directory if it does not already exist. -**Parameters**: -- **dire**: This parameter specifies the directory where the README.md file should be created. +**parameters**: The parameters of this Function. +· dire: A string representing the directory path where the README.md file should be created. -**Code Description**: -The `create_book_readme_if_not_exist` function is designed to ensure that a README.md file exists within a specified directory. It takes a single parameter, `dire`, which represents the directory path where the README.md file is to be checked for existence and potentially created. +**Code Description**: The create_book_readme_if_not_exist function is designed to check for the existence of a README.md file in a specified directory. It takes one parameter, 'dire', which is the path to the directory where the README.md file is intended to be created. -The function begins by constructing the full path to the README.md file within the specified directory using the `os.path.join` method. This method combines the directory path provided in the `dire` parameter with the filename 'README.md' to create a complete path to where the README file should exist. +The function first constructs the full path to the README.md file by joining the provided directory path with the filename 'README.md' using the os.path.join method. It then checks if the file already exists at that path using os.path.exists. If the file does not exist, the function proceeds to create it. -Next, the function checks if a file at the constructed path already exists using the `os.path.exists` method. If the file does not exist (`os.path.exists` returns False), the function proceeds to create the README.md file at the specified path. +Within a context manager (using the 'with' statement), the function opens the README.md file in write mode ('w'). This ensures that if the file is created, it will be properly closed after writing. The function writes a header line to the file, formatted as '# {book_name}', where 'book_name' is expected to be a variable that holds the name of the book. However, it is important to note that 'book_name' must be defined in the scope where this function is called, as it is not passed as a parameter to the function itself. -To create the README.md file, the function opens a new file at the constructed path in write mode (`'w'`). It then writes a markdown header to the file, which includes a placeholder for a book name. The placeholder is formatted as `# {}`, where `{}` is intended to be replaced with the actual name of the book. However, it's important to note that in the provided code snippet, the variable `book_name` is referenced but not defined within the function or passed as a parameter, which suggests that the code snippet may be incomplete or relies on external context for the `book_name` variable. - -**Note**: -- The function assumes that the `dire` parameter provided is a valid directory path and does not perform any checks to verify the existence of the directory itself. -- The function relies on an external variable `book_name` for the content of the README.md file, which is not defined within the function or passed as a parameter. Users of this function will need to ensure that `book_name` is defined in the appropriate context for the function to execute successfully. -- This function only creates a README.md file if it does not already exist. If a README.md file is already present in the specified directory, the function will not modify or overwrite the existing file. +**Note**: It is essential to ensure that the variable 'book_name' is defined before calling this function, as it is used in the content written to the README.md file. Additionally, the function does not handle exceptions that may arise from file operations, so it is advisable to implement error handling if necessary. *** diff --git a/markdown_docs/display/book_tools/generate_summary_from_book.md b/markdown_docs/display/book_tools/generate_summary_from_book.md index 0dc5cc1..720b7bd 100644 --- a/markdown_docs/display/book_tools/generate_summary_from_book.md +++ b/markdown_docs/display/book_tools/generate_summary_from_book.md @@ -1,103 +1,104 @@ ## FunctionDef create_readme_if_not_exist(dire) **create_readme_if_not_exist**: The function of create_readme_if_not_exist is to create a README.md file in a specified directory if it does not already exist. -**Parameters**: -- **dire**: This parameter specifies the directory path where the README.md file should be created if it's not present. +**parameters**: The parameters of this Function. +· dire: The directory path where the README.md file should be created. -**Code Description**: -The create_readme_if_not_exist function is designed to ensure that every directory within a project has a README.md file. It takes a single argument, `dire`, which represents the directory path where the README.md file is to be checked for existence and potentially created. +**Code Description**: The create_readme_if_not_exist function checks if a README.md file exists in the specified directory (denoted by the parameter 'dire'). If the file does not exist, the function creates it and writes a header containing the name of the directory as the title. The path for the README.md file is constructed using the os.path.join method, which combines the directory path with the file name 'README.md'. The function uses os.path.exists to verify the existence of the README.md file. If the file is absent, it opens the file in write mode and writes a formatted string that includes the base name of the directory, which is obtained using os.path.basename. -The function first constructs the full path to the README.md file by combining the `dire` path with the filename 'README.md' using `os.path.join`. It then checks if a file at this path already exists using `os.path.exists`. If the file does not exist, the function proceeds to create it. +This function is called by the output_markdown function, which iterates through the contents of a specified directory. During its execution, output_markdown checks each item in the directory; if an item is a subdirectory, it invokes create_readme_if_not_exist to ensure that a README.md file is present in that subdirectory. This relationship indicates that create_readme_if_not_exist is a utility function designed to support the documentation generation process by ensuring that each directory has a README.md file, which can be useful for providing context or information about the contents of the directory. -To create the README.md file, the function opens a new file at the determined path in write mode. It then retrieves the name of the directory (where the README.md is to be created) using `os.path.basename(dire)` and writes a markdown header to the README.md file with this directory name. This creates a simple README.md file with the directory name as its title. - -This function is called from within the `output_markdown` function, which is part of the process for generating markdown documentation for a project. Specifically, `output_markdown` traverses a directory structure, and for each directory encountered, it calls `create_readme_if_not_exist` to ensure there is a README.md file. This is part of a larger process to generate a comprehensive markdown documentation, where the presence of README.md files in each directory is a prerequisite for creating structured and linked documentation. - -**Note**: -- The function assumes that the directory path provided in the `dire` parameter exists and is accessible. It does not handle cases where the directory path does not exist or cannot be accessed due to permission issues. -- The README.md file created by this function contains only a simple markdown header with the directory name. Users may need to manually edit these files later to add more detailed documentation content. -- This function is dependent on the `os` module for filesystem operations, such as path manipulation and checking for file existence. +**Note**: It is important to ensure that the directory path provided to the create_readme_if_not_exist function is valid and accessible. Additionally, the function will only create the README.md file if it does not already exist, preventing overwriting any existing documentation. ## FunctionDef output_markdown(dire, base_dir, output_file, iter_depth) -**output_markdown**: The function of output_markdown is to generate a markdown file that lists all markdown files and directories within a specified directory, including nested directories, in a structured manner. +**output_markdown**: The function of output_markdown is to generate a Markdown-formatted summary of files and directories, including links to README.md files and relevant Markdown files. + +**parameters**: The parameters of this Function. +· dire: A string representing the directory path to be processed for Markdown files and subdirectories. +· base_dir: A string representing the base directory path used to create relative paths for the output. +· output_file: A file object where the generated Markdown summary will be written. +· iter_depth: An integer indicating the current depth of recursion, used for formatting the output. -**Parameters**: -- **dire**: The directory path where the function starts to search for markdown files and directories. -- **base_dir**: The base directory path used to calculate relative paths for markdown links. -- **output_file**: An open file object where the markdown list will be written. -- **iter_depth**: An integer representing the current depth of recursion, used for indentation in the markdown file. It defaults to 0 for the initial call. +**Code Description**: The output_markdown function is designed to traverse a specified directory (denoted by the parameter 'dire') and its subdirectories, generating a structured Markdown summary of the contents. The function begins by iterating through the files and directories within 'dire'. For each item, it checks if it is a directory. If it is, the function calls create_readme_if_not_exist to ensure that a README.md file exists in that directory. This utility function is crucial for maintaining documentation consistency across directories. -**Code Description**: -The output_markdown function operates in two main stages. In the first stage, it iterates through all items in the specified directory (`dire`). If an item is a directory, it ensures a README.md file exists within that directory by calling the `create_readme_if_not_exist` function. This step is crucial for maintaining a consistent documentation structure across the project. +After ensuring that README.md files are present, the function continues to process each item in the directory. If an item is a directory and contains a README.md file, the function creates a relative Markdown link to that file in the output. The relative path is constructed using os.path.relpath to ensure that the link is correctly formatted based on the base directory. -In the second stage, the function iterates through the items in the directory again. For each item, it checks if it is a directory. If so, and a README.md file exists within it, the function generates a markdown link to the README.md file. This link is written to the `output_file`, with indentation based on the `iter_depth` parameter to reflect the directory's depth in the hierarchy. The function then recursively calls itself with the nested directory as the new `dire` parameter, incrementing `iter_depth` by 1 to adjust the indentation for deeper levels. +For files that are not directories, the function utilizes is_markdown_file to determine if the file is a Markdown file. If the file is identified as a Markdown file and is not excluded by specific conditions (such as being named 'SUMMARY.md' or 'README.md' at the top level), the function writes a relative link to that file in the output. -If the item is not a directory but a file, the function checks if it is a markdown file by calling `is_markdown_file`. If the file is a markdown file (excluding SUMMARY.md and README.md at the root level, and README.md at deeper levels), it generates a markdown link to the file and writes it to the `output_file`, with appropriate indentation. +The output_markdown function is called by the main function, which serves as the entry point of the program. In main, the function is invoked after creating the necessary directory structure and opening the output file for writing. This relationship indicates that output_markdown is a critical component of the documentation generation process, responsible for compiling the contents of the specified directory into a cohesive Markdown summary. -**Note**: -- This function assumes that the `output_file` is already open and ready for writing. It does not handle opening or closing the file. -- The function relies on the presence of the `os` module for directory and file operations and the `is_markdown_file` function to filter markdown files. -- The generated markdown file (usually SUMMARY.md) is intended for use with documentation systems like GitBook, which can use the markdown file to generate a navigable book structure. -- The function's recursive nature allows it to handle directories of any depth, making it versatile for projects with complex directory structures. -- It is important to ensure that the base directory (`base_dir`) is correctly specified relative to the directory being processed (`dire`) to generate accurate relative paths for the markdown links. +**Note**: It is important to ensure that the directory path provided to output_markdown is valid and accessible. The function assumes that the output_file is opened in write mode before being passed to it. Additionally, care should be taken to manage the depth of recursion, as excessive nesting may lead to performance issues or stack overflow errors. ## FunctionDef markdown_file_in_dir(dire) -**markdown_file_in_dir**: The function of `markdown_file_in_dir` is to check if there is any Markdown file in a specified directory. +**markdown_file_in_dir**: The function of markdown_file_in_dir is to check whether any Markdown file (with .md or .markdown extension) exists in a specified directory or its subdirectories. -**Parameters**: -- `dire`: The directory path as a string where the function will search for Markdown files. +**parameters**: +- parameter1: dire (str) - The directory path to be searched for Markdown files. -**Code Description**: -The `markdown_file_in_dir` function is designed to traverse through a given directory and its subdirectories to find files with extensions that indicate they are Markdown files. It uses the `os.walk` method to iterate over the directory tree, where `os.walk` yields a tuple containing the current directory path (`root`), a list of directories within `root` (`dirs`), and a list of files within `root` (`files`). +**Code Description**: +The function `markdown_file_in_dir` is designed to traverse a specified directory (`dire`) and its subdirectories to check for the existence of files with `.md` or `.markdown` extensions. It utilizes Python's `os.walk` function to walk through the directory tree, where `root` is the current directory path, `dirs` is a list of subdirectories, and `files` is a list of filenames in the current directory. -For each file in the current directory, the function uses a regular expression search to check if the file's name ends with either `.md` or `.markdown`, which are common extensions for Markdown files. This is achieved using the `re.search` method with the pattern `'.md$|.markdown$'`, where `$` denotes the end of the string, ensuring that the file extension is at the end of the filename. +For each file in the list `files`, the function checks whether the filename matches the regular expression pattern `'.md$|.markdown$'`, which identifies files with the `.md` or `.markdown` extensions. If such a file is found, the function immediately returns `True`, indicating that at least one Markdown file exists within the directory or its subdirectories. -If a Markdown file is found, the function immediately returns `True`, indicating the presence of at least one Markdown file in the directory. If the function completes the search through the entire directory tree without finding any Markdown files, it returns `False`. +If no Markdown files are found during the entire directory traversal, the function returns `False`. -**Note**: -- The function returns as soon as it finds the first Markdown file, without searching for additional Markdown files. -- The search is case-sensitive, which means it distinguishes between uppercase and lowercase letters in file extensions. Files with extensions like `.MD` or `.MARKDOWN` will not be recognized as Markdown files by this function. -- Ensure that the `os` and `re` modules are imported before using this function, as they are required for directory traversal and regular expression search, respectively. +**Note**: +- The function stops as soon as a Markdown file is found and returns `True`, which means it does not continue searching further once the condition is met. +- The function uses regular expressions to identify files with `.md` or `.markdown` extensions. Be aware that this check is case-sensitive by default, meaning it will only match lowercase `.md` or `.markdown`. If case-insensitive matching is needed, the regular expression pattern can be modified accordingly. +- This function only returns a Boolean value (True or False). It does not provide any information about the specific files found, just the presence or absence of such files. **Output Example**: -- If there is at least one Markdown file in the specified directory or its subdirectories, the function will return `True`. -- If there are no Markdown files in the specified directory or its subdirectories, the function will return `False`. +- If there is at least one `.md` or `.markdown` file in the directory, the return value would be: + `True` +- If there are no `.md` or `.markdown` files in the directory, the return value would be: + `False` ## FunctionDef is_markdown_file(filename) -**is_markdown_file**: The function of `is_markdown_file` is to determine if a given filename corresponds to a Markdown file and, if so, return a modified version of the filename. - -**Parameters**: -- `filename`: A string representing the name of the file to be checked. +**is_markdown_file**: The function of is_markdown_file is to determine if a given filename corresponds to a Markdown file and return the filename without its extension if it does. -**Code Description**: -The `is_markdown_file` function is designed to identify whether a given file is a Markdown file based on its extension and then return a modified version of the filename under specific conditions. It employs regular expression (regex) searching to find filenames that end with either `.md` or `.markdown`, which are common extensions for Markdown files. +**parameters**: The parameters of this Function. +· filename: A string representing the name of the file to be checked. -Upon finding a match, the function checks the length of the matched group (the extension) to determine whether it corresponds to `.md` or `.markdown`. If the extension is `.md`, the function returns the filename with the last three characters removed. If the extension is `.markdown`, it returns the filename with the last nine characters removed. If no match is found, indicating that the file is not a Markdown file, the function returns `False`. +**Code Description**: The is_markdown_file function uses a regular expression to check if the provided filename ends with either '.md' or '.markdown'. If the filename does not match either of these patterns, the function returns False, indicating that the file is not a Markdown file. If the filename matches '.md', the function returns the filename without the last three characters (the '.md' extension). If the filename matches '.markdown', it returns the filename without the last nine characters (the '.markdown' extension). -This function plays a crucial role in the context of its calling object, `output_markdown`, which is part of a script designed to generate summaries or listings of Markdown files within a directory structure. Specifically, `output_markdown` uses `is_markdown_file` to filter out non-Markdown files and to process Markdown filenames before creating links to them in an output file. This ensures that only relevant Markdown files are included in the summary and that their filenames are appropriately formatted. +This function is called within the output_markdown function, which is responsible for generating a Markdown-formatted summary of files and directories. In output_markdown, the is_markdown_file function is used to filter out files that are Markdown files. Specifically, it checks each file in the specified directory and its subdirectories. If a file is identified as a Markdown file (and is not 'SUMMARY.md' or 'README.md' under certain conditions), its relative path is formatted and written to the output file. This relationship highlights the utility of is_markdown_file in ensuring that only relevant Markdown files are included in the generated summary. -**Note**: -- The function assumes that the input `filename` is a string. Passing a non-string argument will result in an error. -- The function's return value is contextually dependent on the filename's extension. It is important to handle the `False` return value when the function is used in broader applications, as it indicates a non-Markdown file. +**Note**: It is important to ensure that the filename passed to the function is a valid string. The function does not handle exceptions for invalid inputs, so care should be taken to validate the input before calling this function. -**Output Example**: -- For a filename `"example.md"`, the function would return `"example"`. -- For a filename `"documentation.markdown"`, the function would return `"documentation"`. -- For a filename `"image.png"`, the function would return `False`. +**Output Example**: +- If the input is 'example.md', the output will be 'example'. +- If the input is 'document.markdown', the output will be 'document'. +- If the input is 'image.png', the output will be False. ## FunctionDef main -**main**: The function of main is to generate a markdown summary file for a specified book directory. +**main**: The function of main is to generate a Markdown summary file for a specified book by creating the necessary directory structure and invoking the output_markdown function. -**Parameters**: This function does not accept parameters directly through its definition. Instead, it retrieves the book name from the command line arguments passed to the script. +**parameters**: The parameters of this Function. +· book_name: A string representing the name of the book, which is passed as a command-line argument. -**Code Description**: The main function begins by extracting the book name from the system arguments. It then constructs a directory path under `./books` with the given book name and a subdirectory `src`. If this directory does not exist, the function prints the directory path and creates it. This step ensures the existence of the directory where the book's content is supposed to be located. +**Code Description**: The main function serves as the entry point for the script, responsible for orchestrating the creation of a Markdown summary file for a book. It begins by retrieving the book name from the command-line arguments using `sys.argv[1]`. This book name is then used to construct the path for the source directory where the summary will be generated, specifically `./books/{book_name}/src`. -After ensuring the directory's existence, the function proceeds to create a markdown file named `SUMMARY.md` within this directory. It opens this file in write mode. The initial content written into `SUMMARY.md` is a markdown header indicating the start of the summary. +The function checks if the specified directory exists using `os.path.exists(dir_input)`. If the directory does not exist, it creates the directory structure using `os.makedirs(dir_input)`. This ensures that the environment is prepared for the subsequent operations. -Following the creation of the summary file, the function calls `output_markdown`, passing the directory path twice (once as the directory to be processed and once as the base directory for calculating relative paths) along with the open file object for `SUMMARY.md`. The `output_markdown` function is responsible for populating the summary file with a structured list of markdown files and directories found within the specified directory, including nested directories. This is achieved by iterating through the directory, ensuring a `README.md` file exists in each subdirectory, and then generating markdown links for each markdown file and directory found. The links are written to the summary file with appropriate indentation to reflect the structure of the book's content. +Once the directory is confirmed to exist, the function proceeds to create the summary file named 'SUMMARY.md' within the specified directory. It opens this file in write mode using `open(output_path, 'w')` and writes a header '# Summary\n\n' to initialize the content. -The main function concludes by printing a message indicating the successful generation of the GitBook auto summary and returns 0, signaling successful execution. +The core functionality of generating the summary is delegated to the `output_markdown` function. This function is called with the parameters `dir_input`, `dir_input` (as the base directory), and the opened output file. The `output_markdown` function is responsible for traversing the directory structure, identifying Markdown files, and generating the appropriate links in the summary file. -**Note**: -- The function relies on the `sys` and `os` modules for handling system arguments and file/directory operations, respectively. Therefore, it's crucial to import these modules before using the function. -- The function assumes the presence of the `output_markdown` function within the same project structure, specifically designed to work with markdown files and directories for documentation purposes. -- The directory structure and naming convention (`./books//src`) are hardcoded, which means the function is tailored for a specific project layout. -- The function does not handle exceptions related to file operations or invalid command line arguments, which might be necessary for more robust applications. +After the summary generation process is completed, the function prints a confirmation message indicating that the GitBook auto summary has finished. The function concludes by returning 0, signaling successful execution. + +The relationship with the `output_markdown` function is crucial, as it handles the detailed processing of the directory contents and the creation of the Markdown links, making it an integral part of the summary generation workflow. + +**Note**: It is important to ensure that the book name provided as a command-line argument is valid and corresponds to an existing book directory structure. The function assumes that the necessary permissions are in place for creating directories and files in the specified path. + +**Output Example**: +When executed with a valid book name, the function will create a directory structure like: +``` +./books/ + └── example_book/ + └── src/ + └── SUMMARY.md +``` +The content of 'SUMMARY.md' might look like: +``` +# Summary -**Output Example**: There is no direct output example since the function's primary purpose is to create and populate a `SUMMARY.md` file within a specified directory. However, after successful execution, the console will display the message "GitBook auto summary finished:) ", and the `SUMMARY.md` file will contain a structured list of markdown links representing the book's content structure. +- [Chapter 1](./chapter1.md) +- [Chapter 2](./chapter2.md) +- [Subdirectory](./subdirectory/README.md) +``` diff --git a/markdown_docs/repo_agent/change_detector.md b/markdown_docs/repo_agent/change_detector.md index 308bd25..fd669c5 100644 --- a/markdown_docs/repo_agent/change_detector.md +++ b/markdown_docs/repo_agent/change_detector.md @@ -1,210 +1,204 @@ ## ClassDef ChangeDetector -**ChangeDetector**: The function of ChangeDetector is to handle file differences and change detection, particularly focusing on identifying changes in files since the last commit, with a potential use of the FileHandler class for accessing the file system. +**ChangeDetector**: The function of ChangeDetector is to handle file differences and change detection in a Git repository. -**Attributes**: -- `repo_path`: The path to the repository where changes are to be detected. -- `repo`: An instance of a git repository, initialized with the path provided during the object's creation. +**attributes**: The attributes of this Class. +· repo_path: The path to the repository. +· repo: An instance of the Git repository initialized with the provided repo_path. -**Code Description**: -The ChangeDetector class is designed to facilitate the detection and handling of file changes within a git repository. It is initialized with a path to a repository, which it uses to create an instance of a git repository. This instance is then utilized across various methods to perform operations such as identifying staged Python files, retrieving file differences, parsing these differences, and identifying structural changes within the code. +**Code Description**: The ChangeDetector class is designed to facilitate the detection of changes in files within a Git repository. It utilizes the GitPython library to interact with the Git repository, allowing it to track staged and unstaged changes effectively. -One of the core functionalities provided by this class is the ability to get a dictionary of staged Python files (`get_staged_pys`) with information on whether they are newly created or modified. This is particularly useful for scenarios where only changes to Python files need to be tracked, such as in continuous integration pipelines or automated code review processes. +Upon initialization, the class requires a repository path, which it uses to create a Git repository object. This object serves as the primary interface for executing Git commands and retrieving information about the repository's state. -The class also offers a method to retrieve the diff of a specific file (`get_file_diff`), which can be used to analyze changes made to a file, whether it's newly added or modified. This functionality is essential for tools that need to perform code analysis or documentation generation based on the latest changes. +The class includes several methods: -Furthermore, the `parse_diffs` method allows for the extraction of added and deleted lines from a diff, which can be used to understand the specific changes made to the codebase. This is particularly useful for generating detailed change logs or for tools that need to understand the context of changes. +1. **get_staged_pys**: This method retrieves Python files that have been staged for commit. It checks the differences between the staging area and the last commit (HEAD) to identify files that are either newly added or modified. The method returns a dictionary where the keys are the file paths and the values are booleans indicating whether the file is new. -The `identify_changes_in_structure` method takes this a step further by identifying the structure (functions or classes) where changes have occurred. This is crucial for tools that need to map changes to specific components of the codebase, such as automated testing frameworks that need to run tests based on the parts of the code that have changed. +2. **get_file_diff**: This method fetches the differences for a specific file. If the file is new, it stages the file first and then retrieves the differences from the staging area. For existing files, it retrieves the differences from the last commit. The result is a list of changes made to the file. -Additionally, the class provides methods to identify files that need to be staged (`get_to_be_staged_files`) and to stage those files (`add_unstaged_files`). These methods are particularly useful in scenarios where changes to documentation or other related files need to be automatically staged based on changes to the code. +3. **parse_diffs**: This method processes the list of differences obtained from get_file_diff. It extracts added and removed lines, returning a structured dictionary that categorizes the changes. -**Note**: -- The class relies on the GitPython library for interacting with the git repository, which requires the repository to be initialized and accessible from the path provided. -- The detection of changes and the handling of file staging are based on the current state of the repository's index and working directory, which means that the repository's state can affect the outcomes of these methods. +4. **identify_changes_in_structure**: This method analyzes the changed lines to determine which functions or classes have been modified. It checks if the changed lines fall within the start and end lines of known structures and records the changes accordingly. -**Output Example**: -For the `get_staged_pys` method, an example output could be: -```python -{ - 'path/to/changed_file.py': False, - 'path/to/new_file.py': True -} -``` -This dictionary indicates that `changed_file.py` has been modified and `new_file.py` is a new file that has been staged. +5. **get_to_be_staged_files**: This method identifies files that are modified but not yet staged, based on specific conditions, such as whether a corresponding Markdown file exists for a staged Python file. It returns a list of paths to these files. -For the `identify_changes_in_structure` method, an example output could be: +6. **add_unstaged_files**: This method stages the identified unstaged files that meet certain conditions, preparing them for the next commit. + +The ChangeDetector class is instantiated in the Runner class of the project, where it is used to monitor changes in the repository. The Runner class initializes the ChangeDetector with the target repository path, allowing it to leverage its methods for detecting and managing file changes. This integration ensures that the project can effectively track modifications and prepare files for version control. + +**Note**: When using the ChangeDetector class, ensure that the repository path is correctly specified and that the GitPython library is properly installed and configured. The methods are designed to interact with the Git command line, so the underlying Git environment must be accessible. + +**Output Example**: A possible output from the get_staged_pys method could be: ```python { - 'added': {('NewFunction', 'SomeClass'), ('AnotherNewFunction', None)}, - 'removed': set() + 'new_test_file.py': True, + 'existing_file.py': False } ``` -This output indicates that `NewFunction` within `SomeClass` and `AnotherNewFunction` at the module level have been added, with no removed structures detected. +This output indicates that 'new_test_file.py' is a newly added file, while 'existing_file.py' has been modified but was already present in the repository. ### FunctionDef __init__(self, repo_path) -**__init__**: The function of `__init__` is to initialize a ChangeDetector object with a specified repository path. - -**Parameters**: -- `repo_path` (str): The path to the repository that the ChangeDetector will monitor. +**__init__**: __init__的功能是初始化一个ChangeDetector对象。 -**Code Description**: -The `__init__` method is a special method in Python that is called when a new instance of a class is created. In the context of the `ChangeDetector` class, this method serves to initialize the newly created object with essential attributes for its operation. +**parameters**: 该函数的参数。 +· repo_path: 一个字符串,表示仓库的路径。 -Upon instantiation of a `ChangeDetector` object, the `__init__` method takes a single parameter, `repo_path`, which is expected to be a string representing the file system path to a git repository. This path is then assigned to the instance variable `self.repo_path`, ensuring that the path is accessible to other methods within the object. +**Code Description**: 该函数是ChangeDetector类的构造函数,用于初始化一个ChangeDetector对象。在调用该函数时,必须提供一个参数repo_path,该参数是一个字符串,表示要监测的Git仓库的路径。函数内部将传入的repo_path赋值给实例变量self.repo_path,以便在对象的其他方法中使用。此外,该函数还使用git库中的Repo类来创建一个新的Repo对象,并将其赋值给self.repo,这样可以通过该对象与指定的Git仓库进行交互。 -Additionally, the `__init__` method initializes another instance variable, `self.repo`, by invoking the `Repo` constructor from the `git` module with `repo_path` as its argument. This effectively creates a `Repo` object that represents the git repository located at the specified path. The `self.repo` variable thus holds a reference to this `Repo` object, enabling the `ChangeDetector` to interact with the git repository (e.g., to check for changes, commit history, etc.) throughout its lifecycle. - -**Note**: -- It is crucial that the `repo_path` provided to the `__init__` method points to a valid git repository. If the path is incorrect or does not correspond to a git repository, the initialization of the `Repo` object may fail, leading to errors in subsequent operations. -- The `git` module, from which the `Repo` class is used, is part of GitPython, a library for interacting with Git repositories in Python. Ensure that GitPython is installed and properly configured in your environment to avoid import errors. +**Note**: 使用该代码时,请确保提供的repo_path是一个有效的Git仓库路径,否则将会引发错误。确保在调用该构造函数之前,已安装并正确配置了git库。 *** ### FunctionDef get_staged_pys(self) -**get_staged_pys**: The function of `get_staged_pys` is to retrieve added Python files in the repository that have been staged for commit. +**get_staged_pys**: The function of get_staged_pys is to retrieve a dictionary of Python files that have been staged in the Git repository. -**Parameters**: This function does not accept any parameters as it operates on the repository associated with the instance of the class it belongs to. +**parameters**: The parameters of this Function. +· None -**Code Description**: The `get_staged_pys` function is a method of the `ChangeDetector` class, designed to identify Python files (.py) that have been staged in a Git repository but not yet committed. It leverages the GitPython library to interact with the Git repository, specifically focusing on the staging area (index) compared to the last commit (HEAD). +**Code Description**: The get_staged_pys function is designed to identify and return a collection of Python files that have been staged in the Git repository. It utilizes the GitPython library to access the repository's index and compare the current state of staged files against the last commit (HEAD). The function specifically looks for files that have been added or modified, indicated by the change types "A" (added) and "M" (modified). -The function initializes an empty dictionary, `staged_files`, to store the results. It then retrieves a list of differences between the staging area and the HEAD commit using `repo.index.diff("HEAD", R=True)`. The `R=True` parameter reverses the comparison logic, treating the staging area as the new state and the last commit as the old state. This approach ensures that new files in the staging area are correctly identified as added rather than deleted. +The function begins by initializing an empty dictionary called staged_files, which will store the paths of the staged Python files as keys and a boolean value indicating whether each file is newly created as the corresponding value. The core logic of the function involves calling the repo.index.diff("HEAD", R=True) method, which retrieves the differences between the current staging area and the last commit. The R=True parameter is crucial as it reverses the comparison logic, allowing the function to correctly identify newly added files that do not exist in the HEAD commit. -For each difference detected, the function checks if the change type is either "A" (added) or "M" (modified) and if the file path ends with ".py", indicating a Python file. If both conditions are met, the file is considered relevant, and its path is added to the `staged_files` dictionary. The value associated with each path is a boolean indicating whether the file is newly created (`True` for added files, `False` for modified files). +The function then iterates over the differences obtained from the diff call. For each difference, it checks if the change type is either "A" or "M" and if the file path ends with the ".py" extension, ensuring that only Python files are considered. If a file is determined to be newly created (change type "A"), the function marks it as such in the staged_files dictionary. -This method is particularly useful in continuous integration/continuous deployment (CI/CD) pipelines or automated scripts where changes to Python files need to be detected and possibly acted upon before committing them to the repository. +This function is called within the test_get_staged_pys method of the TestChangeDetector class, which is part of the testing suite for the ChangeDetector functionality. In the test, a new Python file is created and staged using the Git command. The get_staged_pys function is then invoked to verify that the newly created file is correctly identified as staged. The test asserts that the new file appears in the list of staged files, demonstrating the function's effectiveness in tracking changes to Python files in the repository. -**Note**: It is important to have the GitPython library installed and properly configured to use this function. Additionally, the function assumes that the `self.repo` attribute of the `ChangeDetector` class instance has been initialized with a valid GitPython `Repo` object representing the Git repository to be analyzed. +**Note**: It is important to ensure that the GitPython library is properly configured and that the repository is in a valid state for the function to operate correctly. -**Output Example**: -```python +**Output Example**: An example of the return value from get_staged_pys might look like this: { - 'path/to/added_file.py': True, - 'path/to/modified_file.py': False + 'new_test_file.py': True, + 'existing_file.py': False } -``` -This dictionary indicates that `path/to/added_file.py` is a newly added Python file, while `path/to/modified_file.py` is an existing file that has been modified and staged. - -In the context of its usage within the project, specifically in the `test_get_staged_pys` method of the `TestChangeDetector` class, the `get_staged_pys` function is used to verify that newly created and staged Python files are correctly detected. This is part of the unit testing process to ensure the function behaves as expected under controlled conditions. The test involves creating a new Python file, staging it using Git commands, and then using `get_staged_pys` to confirm that the file is correctly identified as staged and new. This demonstrates the function's practical application in tracking changes to Python files in a Git repository. +In this example, 'new_test_file.py' is a newly created file, while 'existing_file.py' has been modified but was already present in the repository. *** ### FunctionDef get_file_diff(self, file_path, is_new_file) -**get_file_diff**: The function of `get_file_diff` is to retrieve the changes made to a specific file, distinguishing between new and existing files. +**get_file_diff**: The function of get_file_diff is to retrieve the changes made to a specific file. -**Parameters**: -- `file_path` (str): The relative path of the file. -- `is_new_file` (bool): Indicates whether the file is a new file. +**parameters**: The parameters of this Function. +· file_path: The relative path of the file. +· is_new_file: Indicates whether the file is a new file. -**Code Description**: -The `get_file_diff` function is a crucial component of the change detection mechanism within a version-controlled project. It operates by leveraging the Git version control system to identify changes made to files. The function is designed to handle both new files that have been added to the project and existing files that have been modified. +**Code Description**: The get_file_diff function is designed to obtain the differences in a specified file within a Git repository. It takes two parameters: file_path, which is a string representing the relative path of the file in the repository, and is_new_file, a boolean that indicates whether the file is newly created or an existing one. -For new files, the function first adds them to the Git staging area using a shell command. This is necessary because new files are not tracked by Git until they are staged. Once the file is staged, the function uses the `git diff --staged` command to retrieve the differences between the staged file and its last committed state, which, for new files, would essentially list the entire content of the file as additions. +When is_new_file is set to True, the function first stages the new file by executing a Git command to add it to the staging area. This is done using the subprocess module to run the command `git -C {repo.working_dir} add {file_path}`. After staging the file, it retrieves the differences using `repo.git.diff("--staged", file_path)`, which provides the changes that have been staged for the new file. -For existing files, the function directly retrieves the differences between the file's current state and its last committed state using the `git diff HEAD` command. This command compares the file's current state against the HEAD of the current branch, effectively capturing any modifications made since the last commit. +If is_new_file is False, the function retrieves the differences from the last committed state (HEAD) using `repo.git.diff("HEAD", file_path)`. The differences are then split into lines and returned as a list. -The differences retrieved by either method are then split into individual lines and returned as a list. This list represents the changes made to the file, line by line, and is used by other components of the system to further analyze the impact of these changes on the project. +This function is called by the process_file_changes method in the Runner class. The process_file_changes method is responsible for processing changes in files detected in a repository. It utilizes get_file_diff to obtain the changes in the specified file, which are then parsed and analyzed to identify structural changes in the code. The results are logged and may lead to updates in a JSON file that tracks project hierarchy or the generation of Markdown documentation for the changed file. -In the context of its calling situation, as seen in the `process_file_changes` function within `runner.py`, `get_file_diff` is invoked to obtain the detailed list of changes for each file detected as changed. This information is then parsed and analyzed to identify structural changes within the file, such as additions or removals of functions or classes. This analysis is crucial for maintaining up-to-date documentation and project structure information, ensuring that changes are accurately reflected in project metadata and documentation. +**Note**: It is important to ensure that the file path provided is correct and that the Git repository is properly initialized and accessible. Additionally, the subprocess module requires appropriate permissions to execute Git commands. -**Note**: -- It is important to ensure that the Git repository is correctly initialized and that the file paths provided are relative to the repository's root directory. -- The function assumes that the Git command-line tools are available and configured correctly on the system where it is executed. -- The function executes shell commands, which could introduce security risks if not properly managed, especially when dealing with untrusted file paths. - -**Output Example**: -Assuming a file has been modified to add a new function, the output might look like this: -```python +**Output Example**: An example of the output from get_file_diff might look like the following: +``` [ - "+def new_function():", - "+ pass" + "- def old_function():", + "+ def new_function():", + " print('This is a new function')" ] ``` -This output indicates that a new function named `new_function` has been added to the file, with the `+` sign indicating an addition. *** ### FunctionDef parse_diffs(self, diffs) -**parse_diffs**: The function of `parse_diffs` is to parse the differences in content, extracting information about added and deleted lines, which can represent objects such as classes or functions. +**parse_diffs**: The function of parse_diffs is to parse the difference content and extract the added and deleted object information from a list of diffs. -**Parameters**: -- `diffs` (list): A list containing difference content. This is obtained by the `get_file_diff()` function inside the class. +**parameters**: The parameters of this Function. +· diffs: A list containing difference content. Obtained by the get_file_diff() function inside the class. -**Code Description**: -The `parse_diffs` function is designed to analyze the differences between two versions of a file, identifying what has been added or removed. This analysis is crucial for understanding changes in the codebase, especially in the context of version control systems like git. The function takes a list of diff strings as input, which represents the changes made to a file. +**Code Description**: The parse_diffs function processes a list of differences (diffs) typically generated by a version control system like Git. It identifies lines that have been added or removed in the context of a file's changes. The function initializes a dictionary called changed_lines to store the results, which includes two keys: "added" and "removed". Each key holds a list of tuples, where each tuple contains the line number and the corresponding line content. -The function initializes a dictionary named `changed_lines` with two keys: "added" and "removed", each associated with an empty list. These lists will be populated with tuples containing the line number and the line content for each added or removed line. +The function iterates through each line in the diffs list. It first checks for line number information using a regular expression that matches the format of diff headers (e.g., "@@ -43,33 +43,40 @@"). If a match is found, it updates the current line numbers for both the original and changed content. -The function iterates through each line in the `diffs` list. It uses regular expressions to detect and parse line number information from diff metadata lines (those starting with "@@"). This information is crucial for accurately tracking the line numbers of added and removed lines. +For lines that start with a "+", indicating an addition, the function appends the line number and content (excluding the "+") to the "added" list. Conversely, lines that start with a "-", indicating a removal, are appended to the "removed" list. If a line does not indicate a change, the function increments both line numbers to account for unchanged lines. -For lines that represent added content (starting with "+", but not "+++"), the function appends a tuple of the line number and the line content (minus the "+" prefix) to the "added" list. Similarly, for lines representing removed content (starting with "-", but not "---"), it appends a tuple to the "removed" list. Lines that do not indicate a change (neither added nor removed) increment both the current and changed line numbers, ensuring accurate tracking through unchanged content. +The output of this function is a dictionary that provides a structured representation of the changes, allowing other parts of the code to easily access information about what has been added or removed. -This function is integral to the change detection mechanism of the project. It is called by the `process_file_changes` method in the `Runner` class, which processes changes in files detected as either new or modified. The `parse_diffs` function's output is further used to identify structural changes in Python files, such as additions or deletions of functions and classes. This structural change information is then utilized to update documentation and project hierarchy information, reflecting the current state of the codebase accurately. +The parse_diffs function is called within the process_file_changes method of the Runner class. This method is responsible for processing changes in files detected in a repository. It retrieves the diffs for a specific file using the get_file_diff function and then passes this list to parse_diffs to obtain structured information about the changes. The results are subsequently used to identify changes in the file's structure and update relevant documentation accordingly. -**Note**: -- The addition of an object does not necessarily mean it is newly created; modifications are also represented as additions in the diff output. To determine if an object is genuinely new, additional analysis or functions (like `get_added_objs()`) are required. -- The function assumes that the input diffs are correctly formatted and obtained from a reliable source, such as the output of a version control system's diff command. +**Note**: It is important to understand that the additions identified by this function do not necessarily indicate newly created objects; modifications in the code are represented as both deletions and additions in the diff output. To determine if an object is newly added, the get_added_objs() function should be used. -**Output Example**: -```python +**Output Example**: A possible appearance of the code's return value could be: { - 'added': [(87, ' def to_json_new(self, comments = True):'), (88, ' data = {')], - 'removed': [(34, ' def to_json(self):'), (35, ' pass')] + 'added': [ + (86, ' '), + (87, ' def to_json_new(self, comments = True):'), + (88, ' data = {'), + (89, ' "name": self.node_name,'), + (95, '') + ], + 'removed': [] } -``` -This example output shows that lines 87 and 88 were added, representing the addition of a new method `to_json_new`, and lines 34 and 35 were removed, indicating the deletion of an existing method `to_json`. *** ### FunctionDef identify_changes_in_structure(self, changed_lines, structures) -**identify_changes_in_structure**: The function identifies the structures (functions or classes) in which changes have occurred based on the lines changed in a file. +**identify_changes_in_structure**: The function of identify_changes_in_structure is to identify the structures (functions or classes) that have changed in a given set of modified lines of code. -**Parameters**: -- **changed_lines (dict)**: A dictionary containing the line numbers where changes have occurred, categorized into 'added' and 'removed'. Each category contains a list of tuples, with each tuple representing a line number and the content of the change. -- **structures (list)**: A list of tuples representing the structure of functions or classes within the file. Each tuple contains the structure type, name, start line number, end line number, and parent structure name. +**parameters**: The parameters of this Function. +· changed_lines: A dictionary containing the line numbers where changes have occurred, structured as {'added': [(line number, change content)], 'removed': [(line number, change content)]}. +· structures: A list of structures (functions or classes) obtained from get_functions_and_classes, where each structure is represented by its type, name, start line number, end line number, and parent structure name. -**Code Description**: -The `identify_changes_in_structure` function plays a crucial role in tracking modifications within a file's structure, specifically targeting functions and classes. It operates by iterating over each line that has been marked as changed (either added or removed) and determining if this line falls within the boundaries of any known structure's start and end lines. If a match is found, it implies that the structure encompassing this line has undergone changes. The function then records the name of this structure, along with its parent structure's name, into a result dictionary named `changes_in_structures`. This dictionary is organized into two keys: 'added' and 'removed', each holding a set of tuples. Each tuple contains the name of the changed structure and its parent structure's name, if applicable. +**Code Description**: The identify_changes_in_structure function processes a dictionary of changed lines and a list of structures to determine which functions or classes have been modified. It initializes a result dictionary, changes_in_structures, with keys 'added' and 'removed', both containing empty sets. The function then iterates through each change type (either 'added' or 'removed') and the corresponding lines. For each line number that has changed, it checks against the list of structures to see if the line number falls within the start and end line numbers of any structure. If a match is found, the structure's name and its parent structure's name are added to the appropriate set in the changes_in_structures dictionary. -This function is integral to the project's ability to maintain an up-to-date understanding of its codebase structure, especially after modifications. It is called within the `process_file_changes` method of the `Runner` class, which processes file changes detected in a repository. The `process_file_changes` method uses the output from `identify_changes_in_structure` to update various project documentation and metadata, including markdown documents and JSON structure files. This ensures that the project's documentation remains synchronized with the actual codebase, reflecting any structural changes made to functions or classes. +This function is called by the process_file_changes method in the Runner class. In that context, it is used to analyze changes detected in a Python file, where it receives the changed lines and the structures of the file. The output of identify_changes_in_structure is then logged and can be used to update project documentation or JSON structure information. This integration ensures that any modifications in the codebase are accurately reflected in the project's metadata and documentation. -**Note**: -- It is essential to ensure that the `structures` parameter accurately represents the current state of the file's structure before calling this function. This accuracy is crucial for the correct identification of changes. -- The function assumes that the input for `changed_lines` is correctly formatted and contains valid line numbers and change content. +**Note**: It is important to ensure that the structures provided to this function are accurate and up-to-date, as any discrepancies may lead to incorrect identification of changes. -**Output Example**: -```python -{ - 'added': {('MyNewFunction', 'MyClass'), ('AnotherFunction', None)}, - 'removed': set() -} -``` -This example output indicates that `MyNewFunction`, which is a part of `MyClass`, and `AnotherFunction`, which does not belong to any parent structure, have been added. No structures have been removed in this example. +**Output Example**: An example of the function's return value could be: {'added': {('NewFunction', 'ParentClass'), ('AnotherFunction', None)}, 'removed': set()}. This indicates that 'NewFunction' was added under 'ParentClass', while no functions were removed. *** ### FunctionDef get_to_be_staged_files(self) -**get_to_be_staged_files**: The function of `get_to_be_staged_files` is to retrieve all unstaged files in the repository that meet specific conditions and return their paths. +**get_to_be_staged_files**: The function of get_to_be_staged_files is to retrieve all unstaged files in the repository that meet specific conditions for staging. -**Parameters**: This function does not take any parameters as it is designed to be called on an instance of its containing class. +**parameters**: The parameters of this Function. +· No parameters are required for this function. -**Code Description**: The `get_to_be_staged_files` method is a crucial component of the change detection mechanism within a version-controlled project. It operates by identifying files within a repository that are either modified but not yet staged for commit or are new and untracked. The method specifically looks for files that satisfy one of two conditions: -1. The file is a Markdown (.md) document corresponding to an already staged file, typically a Python (.py) file, indicating documentation that needs to be updated alongside code changes. -2. The file's path matches the 'project_hierarchy' field specified in the project's configuration (CONFIG), indicating a critical file for the project's structure or documentation. +**Code Description**: The get_to_be_staged_files method is designed to identify and return a list of file paths that are either modified but not staged or untracked, based on certain criteria. The method performs the following operations: -The method begins by identifying staged files and then proceeds to examine both untracked files and unstaged changes to existing files. For untracked files, it checks if they are Markdown documents potentially corresponding to staged Python files or if they match the project hierarchy. For unstaged changes, it similarly identifies Markdown documents needing updates or files matching the project hierarchy. +1. It initializes an empty list called to_be_staged_files to store the paths of files that need to be staged. +2. It retrieves a list of already staged files by comparing the current index with the HEAD commit using the Git repository's diff method. +3. The method then fetches the current project settings using the SettingsManager's get_setting method, which provides access to configuration details such as project hierarchy and markdown documentation folder. +4. It gathers a list of all unstaged changes (diffs) in the repository and identifies untracked files that exist in the working directory but have not been added to the staging area. +5. The method iterates through the untracked files and checks if they meet the following conditions: + - If the untracked file's path starts with the markdown documentation folder name, it is added to the to_be_staged_files list. + - If the untracked file is a markdown file (.md) and has a corresponding Python file (.py) that is already staged, the markdown file is also added to the list. + - If the untracked file's path matches the project hierarchy, it is added to the list as well. +6. The method then processes the unstaged files, similarly checking if they are markdown files or match the project hierarchy, and adds them to the to_be_staged_files list if they meet the criteria. +7. Finally, the method returns the list of paths that need to be staged. -This method is integral to maintaining synchronization between code changes and their documentation or project structure updates. It is called by the `add_unstaged_files` method to add these identified files to the staging area, ensuring that no critical changes or documentation updates are missed before a commit. The method's functionality is validated through tests, such as ensuring that modified Markdown files are correctly identified as unstaged and that they can be successfully staged by the `add_unstaged_files` method. +This method is called by the add_unstaged_files method within the ChangeDetector class, which utilizes the output of get_to_be_staged_files to determine which files should be added to the staging area. Additionally, it is tested in the TestChangeDetector class through unit tests that verify its functionality by checking if modified markdown files are correctly identified as unstaged. -**Note**: This method assumes that the repository is already initialized and that the CONFIG dictionary is correctly set up with necessary project configurations, including the 'project_hierarchy' field and the 'Markdown_Docs_folder'. It also relies on the GitPython library for interacting with the repository's index and staging area. +**Note**: It is important to ensure that the repository is in a clean state and that the project settings are correctly configured before invoking this method, as any discrepancies may lead to inaccurate results. -**Output Example**: -Assuming the repository has an unstaged Markdown file corresponding to a staged Python file and a file matching the project hierarchy, the method might return: +**Output Example**: A possible appearance of the code's return value when calling get_to_be_staged_files could be: ``` -['/path/to/project/docs/updated_documentation.md', '/path/to/project/project_hierarchy_file'] +[ + 'path/to/repo/markdown_docs/test_file.md', + 'path/to/repo/markdown_docs/another_file.md', + 'path/to/repo/documentation' +] ``` -This output is a list of relative file paths within the repository that need to be staged, indicating that these files have been identified as requiring updates or tracking before the next commit. *** ### FunctionDef add_unstaged_files(self) -**add_unstaged_files**: The function of `add_unstaged_files` is to add unstaged files, which meet specific conditions, to the staging area in a Git repository. +**add_unstaged_files**: The function of add_unstaged_files is to add unstaged files that meet specific conditions to the staging area of a Git repository. + +**parameters**: The parameters of this Function. +· No parameters are required for this function. -**Parameters**: This function does not take any external parameters as it operates on the instance variables of its containing class. +**Code Description**: The add_unstaged_files method is designed to identify and stage files in a Git repository that are currently unstaged but meet certain criteria for staging. This function operates as follows: -**Code Description**: The `add_unstaged_files` method is a critical component of the change management process within a version-controlled project. It begins by invoking the `get_to_be_staged_files` method to retrieve a list of unstaged files that meet predefined conditions. These conditions are determined based on the project's requirements, such as files being of a certain type or matching specific patterns. Once the list of files is obtained, the method iterates over each file path, constructing and executing a Git command to add each file to the staging area. The Git command is executed in the context of the repository's working directory, ensuring that the operation is correctly scoped to the current project. The subprocess module is used to run the Git command, with the `shell=True` parameter allowing the command string to be executed as if it were typed directly into the shell. The `check=True` parameter ensures that an exception is raised if the command exits with a non-zero status, indicating an error. After all applicable files have been added to the staging area, the method returns the list of file paths that were staged. This return value can be useful for logging or further processing within the application. +1. It first calls the get_to_be_staged_files method, which retrieves a list of file paths for all unstaged files that meet specific conditions. These conditions typically include files that are modified but not staged or untracked files that should be staged based on project settings. -**Note**: This method assumes that the Git repository is already initialized and that the instance of the containing class has access to the repository's working directory. It also relies on the correct configuration and implementation of the `get_to_be_staged_files` method to identify the files that should be staged. Errors in the execution of the Git command, such as due to incorrect file paths or permissions issues, will result in a subprocess.CalledProcessError exception. +2. The method then iterates over the list of unstaged files obtained from get_to_be_staged_files. For each file path, it constructs a Git command to add the file to the staging area. The command is formatted as `git -C {self.repo.working_dir} add {file_path}`, where `self.repo.working_dir` is the path to the working directory of the repository. -**Output Example**: Assuming the repository has two unstaged files that meet the conditions for staging, `/path/to/project/docs/updated_documentation.md` and `/path/to/project/project_hierarchy_file`, the method might return: +3. The subprocess.run function is used to execute the constructed Git command. The `shell=True` argument allows the command to be run in the shell, and `check=True` ensures that an exception is raised if the command fails. + +4. After processing all unstaged files, the method returns the list of file paths that were identified as needing to be staged. + +This method is called by the run method in the Runner class, which is responsible for managing the document update process. The run method detects changes in the repository, processes them, and ultimately invokes add_unstaged_files to ensure that any newly generated or modified Markdown files are added to the staging area. Additionally, it is also called in the process_file_changes method, which handles changes to individual files and ensures that any corresponding documentation is updated and staged. + +The add_unstaged_files method is crucial for maintaining an accurate staging area in the Git repository, particularly in workflows that involve automatic documentation generation based on changes in Python files. + +**Note**: It is important to ensure that the repository is in a clean state and that the project settings are correctly configured before invoking this method, as any discrepancies may lead to inaccurate results. + +**Output Example**: A possible appearance of the code's return value when calling add_unstaged_files could be: ``` -['/path/to/project/docs/updated_documentation.md', '/path/to/project/project_hierarchy_file'] +[ + 'path/to/repo/markdown_docs/test_file.md', + 'path/to/repo/markdown_docs/another_file.md', + 'path/to/repo/documentation' +] ``` -This output indicates that these files have been successfully added to the staging area, ready for the next commit. *** diff --git a/markdown_docs/repo_agent/chat_engine.md b/markdown_docs/repo_agent/chat_engine.md index b366408..02b97bd 100644 --- a/markdown_docs/repo_agent/chat_engine.md +++ b/markdown_docs/repo_agent/chat_engine.md @@ -1,367 +1,119 @@ -## ClassDef ContextLengthExceededError -**ContextLengthExceededError**: The function of ContextLengthExceededError is to signal that the input size has surpassed the model's context length limit. - -**Attributes**: This class does not explicitly define any attributes beyond those inherited from its superclass, `Exception`. - -**Code Description**: The `ContextLengthExceededError` class is a custom exception class that extends the built-in `Exception` class in Python. It is designed specifically for use within a chat engine or similar text processing systems where there is a predefined maximum context length that the system can handle. When the input text exceeds this maximum allowable length, an instance of `ContextLengthExceededError` is raised to indicate this specific type of error. - -This class is intentionally simple, containing no additional methods or attributes beyond what it inherits from `Exception`. The primary purpose of defining this class is to provide a clear and specific type of error that can be caught and handled differently from other exceptions. This allows for more granular error handling in the application, enabling developers to provide more informative error messages or take specific actions when this error occurs. - -The docstring of the class provides a concise explanation of the exception's purpose, which aids in code readability and maintenance. By adhering to the convention of extending the base `Exception` class, `ContextLengthExceededError` integrates seamlessly with Python's exception handling mechanisms. - -**Note**: When using `ContextLengthExceededError` in a project, it is important to catch this exception at points where input length might exceed the model's limitations. This allows for graceful handling of the error, such as prompting the user to shorten their input or logging the event for further analysis. It is a best practice to document the specific context length limit of the model in the sections of the code where this exception might be raised, to aid in clarity and maintainability. -## FunctionDef get_import_statements -**get_import_statements**: The function of get_import_statements is to retrieve all import statements from the source code of the current module. - -**Parameters**: This function does not take any parameters. - -**Code Description**: The `get_import_statements` function operates by first utilizing the `inspect.getsourcelines` method to obtain the source lines of the current module. This is achieved by passing `sys.modules[__name__]` to `getsourcelines`, where `__name__` represents the name of the current module, and `sys.modules` is a dictionary that maps module names to module objects. The `[0]` at the end of the `getsourcelines` call is used to access the first item in the returned tuple, which contains the list of source lines. - -Once the source lines are obtained, the function iterates over each line, filtering for lines that either start with "import" or "from" after being stripped of leading and trailing whitespace. This is accomplished through a list comprehension, which checks if `line.strip().startswith("import")` or `line.strip().startswith("from")` for each line in the source lines. - -The filtered lines, which are the import statements, are then returned as a list. - -**Note**: This function is particularly useful for dynamic analysis of a module's dependencies, allowing developers to programmatically access the import statements used within a module. It is important to note that this function will only retrieve import statements that are statically defined in the source code of the module at the time of its execution. Dynamically generated import statements executed at runtime may not be captured. - -**Output Example**: -```python -[ - "import sys", - "from inspect import getsourcelines" -] -``` -This example output shows a possible return value from the `get_import_statements` function, where the current module has two import statements: one importing the `sys` module and another importing the `getsourcelines` function from the `inspect` module. -## FunctionDef build_path_tree(who_reference_me, reference_who, doc_item_path) -**build_path_tree**: The function of `build_path_tree` is to construct a hierarchical tree structure representing the relationships between different parts of a project based on references among them. - -**Parameters**: -- `who_reference_me`: A list of paths indicating which objects are referenced by the current object. -- `reference_who`: A list of paths indicating which objects the current object references. -- `doc_item_path`: The path of the current documentation item, used to mark its position in the generated tree. - -**Code Description**: -The `build_path_tree` function plays a crucial role in visualizing the structure and dependencies within a project. It takes two lists of paths (`who_reference_me` and `reference_who`) that represent the relationships between different components of the project, as well as the path of the current item being documented (`doc_item_path`). The function constructs a tree where each node represents a directory or file, and the structure reflects the hierarchy and references within the project. - -The function begins by creating a nested dictionary structure using a recursive `tree` function, which utilizes `defaultdict` to facilitate the dynamic addition of nodes. It then iterates over both lists of paths, splitting each path into its components and incrementally building out the tree structure by traversing and adding nodes according to the parts of each path. - -After processing both lists, the function modifies the `doc_item_path` by appending a star symbol (`✳️`) to the last component, which marks the current documentation item within the tree. This is followed by integrating this modified path into the tree structure in a similar manner. - -To convert the nested dictionary structure into a human-readable string representation, the `tree_to_string` helper function is employed. It recursively traverses the tree, indenting each level to reflect the hierarchy, and returns a string that visually represents the project's structure. - -This function is integral to the documentation generation process in the `ChatEngine` class, specifically within the `generate_doc` method. It provides a visual representation of the project's structure, highlighting the current object's position and its relationships with other components. This aids in understanding the context and dependencies of the documented item within the overall project. - -**Note**: -- The paths in `who_reference_me` and `reference_who` should use the operating system's file separator (`os.sep`) for compatibility. -- The function assumes that the input paths are correctly formatted and relevant to the project's structure. -- The generated tree structure is a simplified representation and may not include all details of the actual file system or project architecture. - -**Output Example**: -Assuming a project structure and the current documentation item's path, the output might look like this: - -``` -repo_agent - chat_engine.py - build_path_tree - ✳️build_path_tree -``` - -This example demonstrates a part of the tree structure, with the current item (`build_path_tree`) marked with a star symbol, indicating its position within the project hierarchy. -### FunctionDef tree -**Function**: tree - -**Function of tree**: The function creates a recursive defaultdict structure. - -**Parameters**: This function does not take any parameters. - -**Code Description**: The `tree` function is designed to generate a recursive data structure using Python's `defaultdict` from the `collections` module. The key feature of this function is its ability to infinitely nest dictionaries, allowing for the creation of a tree-like structure. This is achieved by passing the `tree` function itself as the default factory to `defaultdict`. When a new key is accessed that does not exist, instead of raising a KeyError, it automatically creates another `defaultdict` with the same properties, thus allowing for the creation of deeply nested structures on the fly. - -**Note**: This function is particularly useful in scenarios where one needs to build a dynamically growing tree structure, such as parsing hierarchical data or building a nested menu system. It's important to be cautious with its use, as it's easy to inadvertently create very deep or infinite structures, which can lead to issues like maximum recursion depth errors or significant memory usage. - -**Output Example**: -```python -# Creating a nested structure with the tree function -nested_dict = tree() -nested_dict['Europe']['Germany']['Bavaria'] = 'Munich' - -# Accessing the nested structure -print(nested_dict['Europe']['Germany']['Bavaria']) -# Output: Munich - -# Attempting to access a non-existent key creates new branches automatically -print(nested_dict['Europe']['Italy']['Tuscany']) -# Output: defaultdict(, {}) -``` - -In the output example, accessing `nested_dict['Europe']['Germany']['Bavaria']` returns 'Munich', demonstrating how values can be assigned to deeply nested keys. Additionally, attempting to access `nested_dict['Europe']['Italy']['Tuscany']`, a path that has not been explicitly created, does not raise an error. Instead, it shows that a new branch has been automatically created, illustrating the recursive and auto-vivifying nature of the `tree` function. -*** -### FunctionDef tree_to_string(tree, indent) -**tree_to_string**: The function of `tree_to_string` is to convert a hierarchical tree structure into a formatted string representation. - -**Parameters**: -- `tree`: A dictionary representing the tree structure where each key-value pair corresponds to a node and its children. The children of a node are also represented as a dictionary. -- `indent`: An integer representing the current indentation level for formatting the string representation. It defaults to 0, meaning no indentation for the root level. - -**Code Description**: -The `tree_to_string` function iterates through each key-value pair in the `tree` dictionary. The keys and values represent nodes and their children, respectively. The function sorts the keys of the dictionary to ensure a consistent order in the output string. - -For each key-value pair, the function appends the key to the output string `s`, prefixed by a number of spaces that corresponds to the current `indent` level. Each key is followed by a newline character to format the tree structure vertically. - -If the value associated with a key is itself a dictionary (indicating the presence of child nodes), the function recursively calls itself with the child dictionary and an incremented `indent` value. This recursive call allows the function to traverse the tree depth-first and to increase the indentation level for each level of depth, thereby formatting the tree structure appropriately in the string representation. - -The function returns the formatted string `s` after iterating through all key-value pairs in the tree. - -**Note**: -- The function assumes that the input `tree` is a properly structured dictionary that represents a tree. Each node's children must also be represented as dictionaries for the function to work correctly. -- The initial call to `tree_to_string` should typically be made with the default `indent` value unless a specific indentation is required from the outset. - -**Output Example**: -Given a tree represented by the following dictionary: -```python -{ - "Fruits": { - "Tropical": { - "Mango": {}, - "Papaya": {} - }, - "Temperate": { - "Apple": {}, - "Cherry": {} - } - }, - "Vegetables": { - "Leafy": { - "Spinach": {}, - "Kale": {} - }, - "Root": { - "Carrot": {}, - "Potato": {} - } - } -} -``` -The output of `tree_to_string(tree)` might look like this: -``` -Fruits - Temperate - Apple - Cherry - Tropical - Mango - Papaya -Vegetables - Leafy - Kale - Spinach - Root - Carrot - Potato -``` -This output represents the tree structure in a human-readable format, with indentation used to denote the hierarchy of nodes. -*** ## ClassDef ChatEngine -**ChatEngine**: The function of ChatEngine is to facilitate the generation of documentation for functions or classes within a software project. - -**Attributes**: -- `config`: A configuration dictionary that stores settings and preferences for the ChatEngine instance. - -**Code Description**: -The ChatEngine class is designed to automate the documentation process for software projects. It primarily focuses on generating documentation based on the code structure and relationships within the project. The class is initialized with a configuration dictionary, which can include various settings such as language preferences, API keys, and model configurations for documentation generation. - -One of the key methods in ChatEngine is `num_tokens_from_string`, which calculates the number of tokens in a given string based on a specified encoding. This method is crucial for understanding the complexity and size of the documentation content in terms of language model processing limits. - -The `generate_doc` method is the core functionality of ChatEngine, where it takes a `DocItem` object representing a piece of code (function or class) and a file handler. It constructs a detailed documentation prompt by analyzing the code's structure, its relationships with other code entities (who references it and whom it references), and its position within the project's hierarchy. This method intelligently handles various aspects such as determining the code type (class or function), managing documentation tokens, and generating relationship descriptions between different code entities. - -The class also includes private methods like `get_referenced_prompt` and `get_referencer_prompt`, which generate textual descriptions of the code's relationships with other entities in the project. These methods are essential for creating a comprehensive documentation that not only explains the code itself but also its context and interactions within the larger project structure. - -Furthermore, ChatEngine handles the complexity of generating documentation by considering the limitations of the underlying language model used for documentation generation. It attempts to find an appropriate balance between the detail of the documentation and the constraints of the model, such as token limits. In cases where the documentation content exceeds these limits, ChatEngine employs strategies to reduce the content size or switch to a model that supports a larger context window. - -**Note**: -- The effectiveness of the ChatEngine in generating accurate and comprehensive documentation heavily relies on the configuration settings provided during initialization. It is crucial to ensure that these settings, especially those related to the language model and API keys, are correctly configured. -- The class assumes the presence of a structured project hierarchy and inter-code relationships. Proper tagging and referencing within the project's codebase are essential for maximizing the utility of ChatEngine. - -**Output Example**: -An example output of the ChatEngine's `generate_doc` method might include a detailed description of a function, its parameters, return values, and examples of usage. Additionally, it would provide information about the function's relationships with other parts of the project, such as which classes or functions call it and which ones it calls, enhancing the understanding of the function's role within the overall project architecture. - -In the context of its usage within the project, ChatEngine is utilized by the `Runner` class in `repo_agent/runner.py`, indicating its role in automating documentation tasks as part of the project's build or maintenance processes. This relationship underscores the importance of ChatEngine in supporting developers by providing up-to-date and comprehensive documentation, facilitating better understanding and navigation of the project's codebase. -### FunctionDef __init__(self, CONFIG) -**__init__**: The function of `__init__` is to initialize a new instance of the ChatEngine class. - -**Parameters**: -- `CONFIG`: A configuration object or dictionary containing settings and parameters required by the ChatEngine. +Doc is waiting to be generated... +### FunctionDef __init__(self, project_manager) +**__init__**: The function of __init__ is to initialize an instance of the ChatEngine class with the necessary configuration settings for the OpenAI API. -**Code Description**: -The `__init__` method is a special method in Python, commonly known as a constructor. This method is automatically called when a new instance of a class is created. In the context of the ChatEngine class, the `__init__` method is designed to perform initial setup for the instance. +**parameters**: The parameters of this Function. +· project_manager: An instance of the ProjectManager class that is responsible for managing the overall project workflow and interactions. -The method takes a single parameter, `CONFIG`, which is expected to be a configuration object or a dictionary. This parameter allows for flexible and dynamic initialization of the ChatEngine instance, as it can carry various configuration settings required for the operation of the chat engine. These settings might include, but are not limited to, API keys, database connection information, or custom settings specific to the chat engine's functionality. +**Code Description**: The __init__ method of the ChatEngine class is designed to set up the initial state of the ChatEngine instance by configuring it with the appropriate settings for the OpenAI API. Upon instantiation, the method first retrieves the current configuration settings by calling the `get_setting` method from the SettingsManager class. This method ensures that the settings are accessed in a consistent manner throughout the application, adhering to the Singleton design pattern. -Within the body of the `__init__` method, the provided `CONFIG` parameter is assigned to an instance variable named `config`. This assignment makes the configuration settings accessible throughout the instance, allowing other methods within the ChatEngine class to utilize these settings as needed. +The retrieved settings include critical parameters such as the OpenAI API key, the base URL for API requests, the timeout duration for requests, the model to be used for chat completions, and the temperature setting that influences the randomness of the generated responses. These parameters are essential for the ChatEngine to function correctly and interact with the OpenAI API effectively. -**Note**: -It is important to ensure that the `CONFIG` parameter provided during the instantiation of the ChatEngine class contains all the necessary configuration settings. Missing or incorrect configuration could lead to unexpected behavior or errors in the chat engine's operation. Additionally, the structure and content of the `CONFIG` object should be well-documented, making it easier for developers to understand and use the ChatEngine class effectively. -*** -### FunctionDef num_tokens_from_string(self, string, encoding_name) -**num_tokens_from_string**: The function of num_tokens_from_string is to return the number of tokens in a text string. - -**Parameters**: -- **string (str)**: The text string to be tokenized. -- **encoding_name (str)**: The name of the encoding to use for tokenization, with a default value of "cl100k_base". - -**Code Description**: -The `num_tokens_from_string` function is a method within the `ChatEngine` class, designed to calculate the number of tokens that a given string can be divided into, based on a specified encoding. This process begins by retrieving the encoding details using the `tiktoken.get_encoding` function, which takes the `encoding_name` as its argument. Once the encoding is obtained, the string is encoded using the `encode` method of the retrieved encoding object. The length of the resulting encoded list represents the number of tokens the input string contains. This value is then returned as the function's output. - -In the context of the `ChatEngine` class, this method plays a crucial role in understanding and processing natural language inputs. By determining the number of tokens in a string, the `ChatEngine` can make informed decisions about handling user inputs, such as generating responses or performing further natural language processing tasks. - -The `num_tokens_from_string` method is utilized by the `generate_doc` method within the same `ChatEngine` class. The `generate_doc` method is responsible for generating documentation for different components of the project, including functions and classes. It uses `num_tokens_from_string` to calculate the total number of tokens in the system and user prompts, which is essential for managing the input size for machine learning models that generate the documentation. This relationship highlights the method's utility in managing and optimizing the generation of dynamic content based on natural language inputs. - -**Note**: -It is important to ensure that the encoding name provided as a parameter to this function matches one of the encodings supported by the `tiktoken` library. Using an unsupported encoding name will result in an error. The default encoding, "cl100k_base", is typically suitable for a wide range of applications, but different encodings can be specified to better suit specific needs or languages. +The OpenAI instance is then created using these settings, allowing the ChatEngine to perform chat-related functionalities, such as generating responses based on user input. The integration of the SettingsManager ensures that the ChatEngine is always configured with the latest settings, promoting maintainability and reducing the risk of errors due to misconfiguration. -**Output Example**: -Suppose the input string is "Hello, world!" and the default encoding "cl100k_base" is used. If this encoding translates the input string into 3 tokens, the function would return: +From a functional perspective, the ChatEngine class relies on the SettingsManager to provide the necessary configuration settings, which are crucial for its operation. This relationship exemplifies the design principle of separation of concerns, where the SettingsManager handles the management of configuration settings, while the ChatEngine focuses on its primary functionality of facilitating chat interactions. -``` -3 -``` - -This output indicates that the string "Hello, world!" consists of 3 tokens according to the specified encoding. +**Note**: It is important to ensure that the SettingsManager is properly configured and that the Setting class contains valid attributes before instantiating the ChatEngine. Any misconfiguration may lead to runtime errors or unexpected behavior when the ChatEngine attempts to utilize the OpenAI API settings. *** -### FunctionDef generate_doc(self, doc_item, file_handler) -**generate_doc**: The function of `generate_doc` is to generate documentation for a given documentation item within a software project's repository. - -**Parameters**: -- `doc_item`: An instance of `DocItem` representing the documentation item for which documentation is to be generated. -- `file_handler`: A file handler object that provides methods for file operations and access to repository paths. - -**Code Description**: -The `generate_doc` function is a method within the `ChatEngine` class, designed to automate the documentation process for software projects. It takes a documentation item (`doc_item`) and a file handler as inputs and generates documentation based on the content and references associated with the `doc_item`. - -The function begins by extracting information from the `doc_item`, such as the type of code (class or function), the name, the actual code content, and whether it has a return value. It also retrieves lists of objects that the current item references (`reference_who`) and objects that reference the current item (`who_reference_me`), along with their respective names. - -A significant part of the function involves building a project structure tree that visually represents the hierarchical relationship of the documentation item within the project. This is achieved by calling the `build_path_tree` function with the lists of references and the documentation item's path. - -The function then generates prompts for referenced objects and referencers, providing insights into how the current item interacts with other parts of the project. These prompts include the names, documentation, and raw code of the referenced and referencer objects. - -Additionally, the function configures language settings based on the project's configuration and prepares a system prompt that includes detailed instructions for generating the documentation. This prompt incorporates the project structure, code information, and reference relationships. - -The documentation generation process involves sending the prepared prompts to an AI model, which returns the generated documentation. The function handles potential issues such as exceeding the maximum token limit by adjusting the input or switching to a model that supports a larger input. - -Throughout the process, the function makes several attempts to generate the documentation, handling connection errors and retrying as necessary. If successful, the generated documentation is returned; otherwise, the function may return `None` or raise an error after exhausting all attempts. - -**Note**: -- The function relies on external configurations and models for language processing and documentation generation. Ensure that the project's configuration is correctly set up with valid API keys and model settings. -- The function handles complex relationships between documentation items, including references and referencers. Accurate and comprehensive documentation requires that these relationships are correctly established in the `DocItem` instances. - -**Output Example**: -Due to the dynamic nature of the documentation generation process, the output is contingent on the input `doc_item` and the project's structure. The generated documentation will typically include a description of the item, its parameters or attributes, usage examples, and information about its relationships within the project. +### FunctionDef build_prompt(self, doc_item) +Doc is waiting to be generated... #### FunctionDef get_referenced_prompt(doc_item) -**get_referenced_prompt**: The function of `get_referenced_prompt` is to generate a detailed prompt string that lists all the objects referenced by a given documentation item, including their names, documentation content, and raw code. +**get_referenced_prompt**: The function of get_referenced_prompt is to generate a formatted string that summarizes the references made by a given DocItem, including details about the referenced objects and their documentation. + +**parameters**: The parameters of this Function. +· doc_item: An instance of the DocItem class, which contains information about the documentation item and its references. -**Parameters**: -- `doc_item`: This parameter is of type `DocItem` and represents the documentation item for which the referenced prompt is being generated. +**Code Description**: The get_referenced_prompt function is designed to create a prompt that outlines the references associated with a specific DocItem. It first checks if the provided doc_item has any references by evaluating the length of the reference_who attribute, which is a list of DocItem instances that reference the current item. If there are no references, the function returns an empty string. -**Code Description**: -The `get_referenced_prompt` function begins by checking if the `doc_item` parameter has any referenced objects in its `reference_who` attribute. If there are no referenced objects, the function returns an empty string, indicating that there are no references to display. +If references are present, the function initializes a list called prompt with a predefined introductory string. It then iterates over each reference_item in the doc_item.reference_who list. For each reference_item, the function constructs a detailed string (instance_prompt) that includes the full name of the referenced object, its corresponding documentation content, and the raw code associated with it. The get_full_name method of the reference_item is called to retrieve its full hierarchical name, ensuring clarity in the context of the documentation. -If there are referenced objects, the function constructs a prompt string that starts with a predefined message indicating that the code calls certain objects, and their code and documentation will follow. For each referenced object, the function appends to the prompt string the full name of the object (obtained through the `get_full_name` method of the referenced object), the last entry of its markdown documentation content (if available), and its raw code content. The raw code content is included only if the `code_content` key exists in the referenced object's `content` dictionary. Each referenced object's information is separated by a series of equal signs (`=`) for clear visual separation. +The instance_prompt is formatted to include the object's name, its documentation (if available), and the raw code, all separated by a visual divider. Each instance_prompt is appended to the prompt list. Finally, the function joins all elements of the prompt list into a single string, separated by newline characters, and returns this string. -The function iterates over all objects referenced by the `doc_item`, appending each object's detailed information to the prompt string. Finally, the function returns the complete prompt string, which includes the documentation and code for all referenced objects. +This function is particularly useful in the context of generating documentation, as it provides a clear overview of how different documentation items are interconnected through references. It aids in understanding the relationships between various code elements, which is essential for maintaining comprehensive and accurate documentation. -**Note**: -- This function is crucial for generating documentation that includes references to other parts of the project. It helps in understanding the dependencies and relationships between different components of the project. -- The function assumes that the `reference_who` attribute of the `doc_item` parameter correctly lists all objects that the documentation item references. It is important to maintain accurate reference tracking within the project to ensure the generated prompt is complete and accurate. -- The inclusion of raw code in the prompt is designed to provide a comprehensive view of the referenced objects, but it may also include sensitive information. Consider this when using the function in environments where code confidentiality is a concern. +**Note**: When using the get_referenced_prompt function, ensure that the doc_item passed to it has been properly initialized and contains valid references. This will guarantee that the generated prompt accurately reflects the relationships and documentation of the referenced items. -**Output Example**: +**Output Example**: An example output of the get_referenced_prompt function for a DocItem with references might look like this: ``` As you can see, the code calls the following objects, their code and docs are as following: obj: repo_agent/doc_meta_info.py/DocItem Document: -**DocItem**: The function of DocItem is to represent a documentation item within a software project's repository... +**DocItem**: The function of DocItem is to represent individual documentation items within a project, encapsulating their metadata and relationships. Raw code:``` class DocItem: - item_type: DocItemType = DocItemType._class_function ... -```========== -obj: repo_agent/doc_meta_info.py/DocItem/get_full_name +``` +obj: repo_agent/another_file.py/AnotherClass Document: -**get_full_name**: The function of `get_full_name` is to generate a string representation of the hierarchical path... +**AnotherClass**: This class serves a different purpose within the project. Raw code:``` - def get_full_name(self, strict = False): - ... -```========== +class AnotherClass: + ... +``` ``` -This output example demonstrates how the function generates a prompt that includes detailed information about each object referenced by the documentation item, facilitating a deeper understanding of the project's structure and dependencies. *** #### FunctionDef get_referencer_prompt(doc_item) -**get_referencer_prompt**: The function of `get_referencer_prompt` is to generate a string prompt detailing objects that have called the given documentation item, including their documentation and raw code. - -**Parameters**: -- `doc_item`: A `DocItem` instance representing the documentation item for which the referencer prompt is being generated. +**get_referencer_prompt**: The function of get_referencer_prompt is to generate a prompt string that lists all the objects that reference a given documentation item, along with their associated documentation and code. -**Code Description**: -The `get_referencer_prompt` function starts by checking if the `doc_item` has any objects that reference it, indicated by the `who_reference_me` attribute of the `DocItem` class. If there are no such objects, the function returns an empty string, indicating that there are no referencers to document. +**parameters**: The parameters of this Function. +· doc_item: An instance of the DocItem class, which represents the documentation item for which the referencing objects are being retrieved. -If there are objects that reference the `doc_item`, the function constructs a prompt starting with a predefined message indicating that the code has been called by other objects. It then iterates over each object in the `who_reference_me` list of the `doc_item`. For each referencer object, it generates a detailed prompt that includes: -- The full name of the referencer object obtained via the `get_full_name` method. -- The most recent markdown documentation of the referencer object, if available, or 'None' if not. -- The raw code content of the referencer object, if available in its `content` dictionary under the key 'code_content', or 'None' if not. +**Code Description**: The get_referencer_prompt function is designed to create a formatted string that provides information about the objects that reference a specific documentation item. It begins by checking if the provided doc_item has any references in its who_reference_me attribute, which is a list of DocItem instances that reference the current item. If this list is empty, the function returns an empty string, indicating that there are no references to display. -Each referencer object's details are appended to a list, which is then joined into a single string with newline characters for separation, and returned. +If there are references, the function initializes a prompt list with a header string that introduces the subsequent information. It then iterates over each DocItem in the who_reference_me list. For each referencing item, it constructs a detailed string that includes the full name of the referencing object (obtained by calling the get_full_name method on the referencer_item), the last version of its markdown content (if available), and its raw code content (if present). Each of these details is formatted in a readable manner, separated by line breaks and a visual divider. -This function is crucial for generating documentation that provides insights into how different parts of the project are interconnected. By detailing which objects call a given documentation item, it helps developers understand dependencies and the impact of changes in the codebase. +Finally, the function joins all the strings in the prompt list into a single string, separated by newline characters, and returns this formatted string. This output serves as a comprehensive reference for developers, allowing them to quickly understand which objects are related to the given documentation item and to access their associated documentation and code. -**Note**: -- The function assumes that each `DocItem` object has a correctly populated `who_reference_me` list. This list should be maintained as part of the documentation generation and updating process. -- The output of this function is intended for inclusion in documentation files, where it can provide valuable context about the usage and importance of different code elements. +The get_referencer_prompt function is particularly useful in the context of documentation generation and management, as it helps to clarify the relationships between different code elements. By providing a clear overview of the references, it aids developers in navigating the documentation and understanding the dependencies within the codebase. -**Output Example**: -Assuming `doc_item` is referenced by two objects, the output might look like this: +**Note**: When using this function, ensure that the doc_item parameter is a properly initialized instance of the DocItem class with an established hierarchy and references. This will ensure accurate and meaningful output. +**Output Example**: An example output of the get_referencer_prompt function might look like this: ``` Also, the code has been called by the following objects, their code and docs are as following: -obj: repo_agent/chat_engine.py/ChatEngine +obj: repo_agent/doc_meta_info.py/DocItem Document: -None +This is a documentation item that describes a specific code element. Raw code:``` -class ChatEngine: - def __init__(self): - pass +class DocItem: + ... ``` ========== -obj: repo_agent/utils.py/UtilityFunction +obj: repo_agent/another_file.py/AnotherClass Document: -**UtilityFunction**: This function helps with... +This class interacts with the DocItem and provides additional functionality. Raw code:``` -def UtilityFunction(): - pass +class AnotherClass: + ... ``` -========== ``` - -This output provides a clear and detailed view of the objects referencing the `doc_item`, including their documentation and code, which is invaluable for understanding the context and relationships within the project. *** #### FunctionDef get_relationship_description(referencer_content, reference_letter) -**get_relationship_description**: The function of get_relationship_description is to generate a description of the relationship between different components in a project from a functional perspective, based on the provided parameters. +**get_relationship_description**: The function of get_relationship_description is to generate a descriptive string regarding the relationship of a referencer with its callers and callees based on the provided inputs. + +**parameters**: The parameters of this Function. +· referencer_content: A boolean indicating whether there is content related to the referencer. +· reference_letter: A boolean indicating whether there is a reference letter available. -**Parameters**: -- **referencer_content**: This parameter is expected to be a truthy value if there is content that references other components within the project. It indicates the presence of callers in the relationship. -- **reference_letter**: This parameter is expected to be a truthy value if there is a reference to the component from other parts of the project. It indicates the presence of callees in the relationship. +**Code Description**: The get_relationship_description function evaluates the presence of two boolean parameters: referencer_content and reference_letter. It constructs and returns a specific string based on the combination of these parameters. -**Code Description**: -The `get_relationship_description` function plays a crucial role in providing insights into the functional relationships within a project. It evaluates the presence of referencer content and reference letters to determine the nature of the relationship between components. The function operates under the following logic: -- If both `referencer_content` and `reference_letter` are provided and truthy, it implies that the component in question has both callers and callees within the project. In this case, the function prepares a description that requests the inclusion of the reference relationship from both perspectives. -- If only `referencer_content` is truthy, this indicates that the component has callers but not necessarily callees. The function then generates a description focusing on the relationship with its callers. -- Conversely, if only `reference_letter` is truthy, it suggests that the component is referenced by other components (callees) but does not reference others. The function returns a description centered on the relationship with its callees. -- If neither parameter is truthy, it indicates that there is no functional relationship to describe, and the function returns an empty string. +- If both referencer_content and reference_letter are true, the function returns a string that requests the inclusion of the reference relationship with both callers and callees from a functional perspective. +- If only referencer_content is true, it returns a string that requests the inclusion of the relationship with callers from a functional perspective. +- If only reference_letter is true, it returns a string that requests the inclusion of the relationship with callees from a functional perspective. +- If neither parameter is true, the function returns an empty string. -**Note**: -It is important for developers to accurately provide the `referencer_content` and `reference_letter` parameters based on the actual relationships within the project. Misinterpretation or incorrect values can lead to inaccurate descriptions of the project's architecture and functionality. +This design allows for flexible output based on the available information regarding the referencer, ensuring that the user receives relevant instructions based on the context provided. -**Output Example**: -- If both parameters are truthy: "And please include the reference relationship with its callers and callees in the project from a functional perspective" -- If only `referencer_content` is truthy: "And please include the relationship with its callers in the project from a functional perspective." -- If only `reference_letter` is truthy: "And please include the relationship with its callees in the project from a functional perspective." -- If neither parameter is truthy: "" (an empty string) +**Note**: It is important to ensure that the parameters are boolean values, as the function logic relies on their truthiness to determine the appropriate output. Providing non-boolean values may lead to unexpected results. + +**Output Example**: +- If both parameters are true: "And please include the reference relationship with its callers and callees in the project from a functional perspective." +- If only referencer_content is true: "And please include the relationship with its callers in the project from a functional perspective." +- If only reference_letter is true: "And please include the relationship with its callees in the project from a functional perspective." +- If neither parameter is true: "" (an empty string). +*** *** +### FunctionDef generate_doc(self, doc_item) +Doc is waiting to be generated... *** diff --git a/markdown_docs/repo_agent/chat_with_repo/gradio_interface.md b/markdown_docs/repo_agent/chat_with_repo/gradio_interface.md deleted file mode 100644 index fa14d9a..0000000 --- a/markdown_docs/repo_agent/chat_with_repo/gradio_interface.md +++ /dev/null @@ -1,152 +0,0 @@ -## ClassDef GradioInterface -**GradioInterface**: The function of GradioInterface is to create a user interface for interacting with a response function through Gradio, a Python library for building machine learning and data science demos and applications. - -**Attributes**: -- `respond`: A function that the Gradio interface will use to generate responses based on user input. -- `cssa` and `cssb`: Strings containing CSS styles used to format the output displayed in the Gradio interface. -- `setup_gradio_interface`: A method that initializes and configures the Gradio interface, including defining input fields, buttons, and output areas. - -**Code Description**: -The GradioInterface class is designed to wrap around a given response function, allowing users to interact with this function through a web-based graphical interface. The class constructor (`__init__`) takes a single argument, `respond_function`, which is expected to be a callable that takes user input and returns a response. This function is stored in the `respond` attribute for later use. - -The class defines custom CSS styles (`cssa` and `cssb`) to ensure that the output presented to the user is well-formatted and visually appealing. These styles are applied to the output content generated by the `respond` function. - -The `wrapper_respond` method is a wrapper around the original `respond` function. It formats the output of the `respond` function using Markdown and the predefined CSS styles, and then returns the formatted output. This method is designed to be used as a callback for Gradio interface components. - -The `clean` method resets the output areas in the Gradio interface to their initial, empty state. This is useful for clearing the interface between interactions. - -The `setup_gradio_interface` method creates the actual Gradio interface. It defines the layout and components of the interface, including input fields for user questions and optional instructions, buttons for submitting queries and clearing the interface, and areas for displaying responses and other outputs. The method configures the interface to use `wrapper_respond` as the callback function for processing user input and displaying responses. - -This class is utilized in the project by being instantiated with a specific `respond` function, which is provided by another part of the application (as seen in the `main.py` file). This setup allows the GradioInterface to serve as the user-facing component of the application, enabling users to interact with the underlying response function in a user-friendly manner. - -**Note**: -- It is essential to ensure that the `respond` function passed to the GradioInterface constructor is compatible with the expected input and output format of the `wrapper_respond` method. -- The Gradio interface is launched with the `launch` method, which is called at the end of `setup_gradio_interface`. This method should be called in an environment where a web server can be started (e.g., a local development environment or a cloud-based notebook). - -**Output Example**: -An example appearance of the interface after a user submits a query could be: -- A formatted response displayed under the "Response" title. -- Additional information or related content under the "Embedding Recall" and "Code" titles. -- The interface also includes input fields for new queries and buttons for submitting new queries or clearing the interface. -### FunctionDef __init__(self, respond_function) -**__init__**: The function of __init__ is to initialize the GradioInterface class with a response function and predefined CSS styles for the user interface. - -**parameters**: The parameters of this Function. -- respond_function: A function that the Gradio interface will use to generate responses based on user input. - -**Code Description**: The `__init__` method is a constructor for the GradioInterface class, which is designed to facilitate interaction with the RepoAgent chat application through a Gradio web interface. Upon instantiation of a GradioInterface object, this method initializes the object with a given response function and sets up CSS styling for the web interface. - -The `respond_function` parameter is expected to be a function that takes user input and returns a response. This function is stored in the `self.respond` attribute of the GradioInterface object, making it accessible to other methods within the class. - -The method also defines two CSS strings, `self.cssa` and `self.cssb`, which contain CSS styles for the web interface. These styles are used to enhance the visual presentation of the chat interface, including styling for outer and inner boxes, titles, and content areas. The CSS ensures that the interface is visually appealing and user-friendly, with clear distinctions between different sections of the chat output. - -Finally, the `__init__` method calls the `setup_gradio_interface` method. This method is responsible for constructing the actual Gradio interface, utilizing the Gradio library's Blocks API to create a complex, interactive web interface. The interface includes input fields for user questions, buttons for submitting questions and clearing inputs, and areas for displaying responses and additional information. The CSS styles defined in `__init__` are applied to this interface to ensure consistent styling. - -**Note**: It is crucial that the `respond_function` provided as a parameter is properly implemented and capable of processing user input to generate responses. This function is a key component of the chat application, as it determines how the system interacts with users. Additionally, the CSS styling defined in this method is specifically tailored for the Gradio interface constructed by the `setup_gradio_interface` method, ensuring a cohesive and user-friendly design. -*** -### FunctionDef wrapper_respond(self, msg_input, system_input) -**wrapper_respond**: The function of `wrapper_respond` is to process and format the responses for a Gradio interface based on user input and system instructions. - -**Parameters**: -- `msg_input`: The message input from the user. -- `system_input`: System instructions or additional input provided by the user. - -**Code Description**: -The `wrapper_respond` function is a crucial component of the GradioInterface class within the chat_with_repo module. It serves as a wrapper for the `respond` function, enhancing its output for display in a Gradio web interface. The function takes two parameters: `msg_input` and `system_input`, which represent the user's message and any system-level instructions, respectively. - -Upon invocation, `wrapper_respond` calls the `respond` method of the same class with the provided inputs. The `respond` method is expected to return multiple outputs, including the original message, three different outputs (`output1`, `output2`, `output3`), a code snippet (`code`), and a Codex response (`codex`). - -The function then processes `output1`, `output2`, and `code` by converting them to Markdown format using the `markdown` library. This conversion is necessary for proper rendering in the Gradio interface, which supports HTML and Markdown content. - -After converting to Markdown, the function wraps `output1`, `output2`, and `code` in HTML blocks, each with a distinct title ("Response", "Embedding Recall", and "Code", respectively). These blocks are styled using CSS defined in the `cssa` and `cssb` attributes of the `GradioInterface` class, ensuring a consistent and visually appealing presentation in the web interface. - -Finally, the function returns the original message, the three formatted HTML blocks (`output1`, `output2`, and `code`), the unmodified `output3`, and the Codex response. These return values are then used as outputs in the Gradio interface, specifically in the setup defined by the `setup_gradio_interface` method of the `GradioInterface` class. This setup includes interactive elements such as text boxes and buttons, allowing users to input questions, receive responses, and interact with the system in a structured manner. - -**Note**: -- It is important to ensure that the `respond` method called within `wrapper_respond` is correctly implemented and returns the expected outputs, as the formatting and presentation logic in `wrapper_respond` relies on these outputs. -- The CSS attributes `cssa` and `cssb` must be properly defined to achieve the desired styling of the response blocks in the Gradio interface. - -**Output Example**: -Assuming the `respond` method returns a simple greeting as `output1`, a reference to an embedding technique as `output2`, and a sample code snippet as `code`, the return value of `wrapper_respond` might look like this (simplified for clarity): - -``` -("Hello, world!", - "
Response
Hello, world!
", - "
Embedding Recall
Embedding technique reference
", - "Unmodified output3", - "
Code
Sample code snippet
", - "Codex response") -``` - -This output includes the original message, formatted HTML blocks for the response, embedding recall, and code, as well as the unmodified third output and the Codex response. -*** -### FunctionDef clean(self) -**clean**: The function of clean is to reset the interface elements to their initial state. - -**parameters**: This function does not take any parameters. - -**Code Description**: The `clean` function is designed to reset the content of various interface elements within the GradioInterface, specifically for a web-based application interface. It achieves this by creating new instances of `gr.HTML` for certain elements, namely `output1`, `output2`, and `code`, with predefined HTML content that includes titles ("Response", "Embedding Recall", and "Code") and a structure for displaying content. These elements are styled using CSS defined in `self.cssa` and `self.cssb`. The function also initializes two variables, `msg` and `codex`, with empty strings. These variables, along with the newly created `gr.HTML` instances, are returned by the function, effectively resetting the display content of the interface. - -In the context of its calling situation within the project, the `clean` function is bound to a `ClearButton` in the `setup_gradio_interface` method of the same class. This setup indicates that when the clear button is clicked by the user, the `clean` function is invoked to reset the interface elements, including text inputs and HTML content displays. This functionality is crucial for providing a clean slate for the user to input new queries and receive fresh responses without the clutter of previous interactions. The function plays a significant role in enhancing user experience by maintaining a clean and organized interface. - -**Note**: It is important to ensure that the CSS variables (`self.cssa` and `self.cssb`) used in the function are properly defined within the class to avoid any styling issues in the interface. Additionally, the function assumes that the interface elements to be reset are specifically designed to be reset through the replacement of their content with new `gr.HTML` instances. - -**Output Example**: The function returns a tuple with the following structure: -- `msg`: An empty string. -- `output1`, `output2`, `code`: New instances of `gr.HTML` with predefined HTML content for displaying titles and structured content areas, styled with CSS. -- `output3`: An empty string. -- `codex`: An empty string. - -This output is used to reset the content and appearance of specific interface elements within the web-based application. -*** -### FunctionDef setup_gradio_interface(self) -**setup_gradio_interface**: The function of setup_gradio_interface is to initialize and configure the Gradio interface for the RepoAgent chat application. - -**parameters**: This function does not take any parameters. - -**Code Description**: The `setup_gradio_interface` function is a critical component of the GradioInterface class, responsible for setting up the user interface for the RepoAgent chat application. It utilizes the Gradio library's Blocks API to construct a complex, interactive web interface. The function begins by creating a Gradio Blocks container named `demo`, which serves as the root for the interface layout. - -Inside this container, a Markdown component is used to display the main title of the application. Following this, a tab named "main chat" is created to house the chat interface. Within this tab, the layout is divided into rows and columns to organize the interface elements effectively. - -The first row contains a column with several input and control elements: -- A `Textbox` for the user to input their question, labeled "Question Input". -- An optional `Textbox` for instruction editing, labeled "(Optional) instruction editing". -- A `Button` labeled "Submit" for sending the question. -- A `ClearButton` for clearing the input fields and outputs. -- Another `Button` labeled "record". - -The output of the chat is displayed in a custom HTML component styled with CSS, showcasing the response from the system. Another row is dedicated to displaying additional information such as "Embedding Recall" and "Code", each within their own HTML component, also styled with CSS. Additional `Textbox` components are provided for inputting and displaying keywords and their corresponding code. - -The `Button` components are configured with click events. The "Submit" button is linked to the `wrapper_respond` function, which processes the user's input and displays the response. The `ClearButton` is linked to the `clean` function, which resets the interface elements to their initial state. Additionally, the message `Textbox` is configured to submit its content upon pressing the enter key, triggering the same `wrapper_respond` function. - -Finally, the function concludes by closing any previously opened Gradio interfaces, launching the newly set up interface with the `queue().launch(share=True, height=800)` method, which makes the interface accessible via a web link and sets its height. - -**Note**: -- It is essential to ensure that the `wrapper_respond` and `clean` functions are correctly implemented and integrated, as they play a significant role in the functionality of the chat interface. -- The CSS styling (`self.cssa` and `self.cssb`) used in this function is defined in the `__init__` method of the GradioInterface class, ensuring a consistent and visually appealing design across the interface. -- The use of Gradio's Blocks API allows for a flexible and modular design, making it easier to modify or extend the interface in the future. -*** -## FunctionDef respond_function(msg, system) -**respond_function**: The function of respond_function is to process a message and return a predefined response along with additional fixed output strings. - -**Parameters**: -- **msg**: This parameter is expected to be a string. It represents the message that needs to be processed by the function. -- **system**: This parameter is included for compatibility with system-level configurations or requirements. However, its value or usage is not defined within the scope of the provided function. - -**Code Description**: -The `respond_function` is designed to accept two parameters: `msg` and `system`. The primary role of this function is to process an incoming message and return a tuple consisting of several elements. The first element is the original message (`msg`) passed into the function. This indicates that the function echoes back the input message without any modification. - -Following the original message, the function returns a hardcoded empty string (`RAG`). This placeholder suggests that in a more complete implementation, `RAG` might be intended to hold a dynamically generated response or data, but as it stands, it does not serve any functional purpose. - -The subsequent elements in the return tuple are fixed strings: `"Embedding_recall_output"`, `"Key_words_output"`, and `"Code_output"`. These strings appear to be placeholders for outputs that one might expect from a more complex processing system, possibly involving text analysis, keyword extraction, and code generation or retrieval based on the input message. However, in the current implementation, they are static and do not change in response to the input. - -**Note**: -- The `system` parameter is not utilized within the function, suggesting that its intended use might be for future enhancements or for compatibility with a broader system where such a parameter is required. -- The placeholders and the static nature of the return values indicate that this function is a stub or a template meant to be expanded upon with actual logic and data processing capabilities. - -**Output Example**: -For an input message `"Hello, world!"`, the function would return the following tuple: -```python -("Hello, world!", "", "Embedding_recall_output", "Key_words_output", "Code_output") -``` -This output demonstrates the function's current behavior of echoing the input message and appending predefined static strings. diff --git a/markdown_docs/repo_agent/chat_with_repo/json_handler.md b/markdown_docs/repo_agent/chat_with_repo/json_handler.md deleted file mode 100644 index 8ab21c3..0000000 --- a/markdown_docs/repo_agent/chat_with_repo/json_handler.md +++ /dev/null @@ -1,191 +0,0 @@ -## ClassDef JsonFileProcessor -**JsonFileProcessor**: The function of JsonFileProcessor is to handle operations related to JSON files, including reading, extracting data, and searching within the JSON structure. - -**Attributes**: -- `file_path`: The path to the JSON file that the processor will operate on. - -**Code Description**: -The `JsonFileProcessor` class is designed to facilitate various operations on JSON files, which are crucial for handling structured data within the project. It provides methods for reading JSON files, extracting specific data based on the structure of the JSON, and performing recursive searches within the JSON data. - -1. **Initialization**: The class is initialized with a `file_path` argument, which specifies the location of the JSON file to be processed. - -2. **Reading JSON Files**: The `read_json_file` method attempts to open and read the JSON file specified by `file_path`. If successful, it returns the parsed JSON data. In case the file is not found, it logs an exception and exits the program. - -3. **Extracting Data**: The `extract_data` method reads the JSON file and extracts specific pieces of information based on the structure of the JSON data. It is designed to handle both old and new data formats, making it adaptable to changes in the JSON structure. This method returns two lists: one containing markdown contents and another with extracted items as dictionaries. - -4. **Recursive Search**: The `recursive_search` method is a utility function designed to perform a deep search within the JSON data for items matching a specific criterion, in this case, the 'name' of an item. It updates the provided lists with code contents and markdown contents of matching items. - -5. **Search by Name**: The `search_code_contents_by_name` method utilizes `recursive_search` to find items by their 'name' within the JSON data. It returns lists of code contents and markdown contents for the matching items. This method handles file opening errors and JSON parsing errors gracefully, returning appropriate error messages. - -**Relationship with Callers**: -- The `JsonFileProcessor` class is instantiated in the `TextAnalysisTool` and `RepoAssistant` classes with the database path as the JSON file path. This indicates its use in analyzing text and assisting in repository management tasks by processing JSON data. -- In the test suite (`TestJsonFileProcessor`), the class is used to verify its functionality, including reading JSON files and extracting contents. This ensures the reliability of the `JsonFileProcessor` in handling JSON data as expected. - -**Note**: -- It is crucial to ensure that the JSON file exists at the specified `file_path` before attempting operations to avoid runtime errors. -- The class is designed to exit the program upon encountering a `FileNotFoundError` during the reading process, which might not be desirable in all contexts. Consider handling this exception externally if program continuation is necessary. - -**Output Example**: -- Reading a JSON file might return: - ```json - {"name": "Example", "data": [{"id": 1, "value": "Content"}]} - ``` -- Extracting data might result in: - ```python - (["Markdown content"], [{"type": "ExampleType", "name": "ExampleName", "code_start_line": 1, "code_end_line": 10, "have_return": False, "code_content": "Example code", "name_column": 0, "item_status": "Active"}]) - ``` -- Searching by name might return: - ```python - (["Example code"], ["Markdown content"]) - ``` -### FunctionDef __init__(self, file_path) -**__init__**: The function of __init__ is to initialize a new instance of the JsonFileProcessor class with a specified file path. - -**Parameters**: -- **file_path**: The path to the JSON file that this instance will process. - -**Code Description**: -The `__init__` method is a special method in Python, commonly known as a constructor. It is automatically called when a new instance of a class is created. In the context of the JsonFileProcessor class, the `__init__` method is designed to initialize the newly created object with essential data it needs to perform its tasks. - -The method takes a single parameter, `file_path`, which is expected to be a string representing the path to a JSON file. This file path is then assigned to the instance variable `self.file_path`. The instance variable `self.file_path` holds the location of the JSON file that the JsonFileProcessor instance will work with. This setup is crucial for the instance to know which file to read from or write to during its lifecycle. - -By storing the file path in an instance variable, the JsonFileProcessor class encapsulates the file path within each instance, allowing different instances to work with different JSON files without interference. This design promotes flexibility and reusability of the JsonFileProcessor instances across different parts of an application. - -**Note**: -- It is important to ensure that the `file_path` provided to the `__init__` method is a valid path to a JSON file. The method does not perform any validation on the `file_path` argument, so it is the responsibility of the caller to provide a correct and accessible file path. -- The JsonFileProcessor class, as suggested by its name and the initialization process, is tailored for processing JSON files. Therefore, the functionality and methods provided by instances of this class will be specific to handling JSON data. -*** -### FunctionDef read_json_file(self) -**read_json_file**: The function of read_json_file is to read and return the data from a JSON file specified by the file path stored in the instance. - -**Parameters**: This function does not take any parameters apart from the implicit `self` parameter, which represents the instance of the class through which the function is called. - -**Code Description**: The `read_json_file` function is designed to handle the reading of JSON formatted data from a file. It attempts to open a file using the path stored in `self.file_path` with read-only access and UTF-8 encoding. Upon successfully opening the file, it utilizes the `json.load` method to parse the JSON content from the file into a Python dictionary or list, depending on the structure of the JSON data. This data is then returned to the caller. - -In case the specified file does not exist or the path is incorrect, resulting in a `FileNotFoundError`, the function logs an exception message indicating the file was not found, along with the path it attempted to access. Subsequently, it terminates the program with an exit code of 1 to indicate an error condition. - -The function is utilized within the project in two distinct contexts: -1. In the `extract_data` method of the `JsonFileProcessor` class, where it is called to load JSON data from a file. The loaded data is then processed to extract specific information based on the structure of the JSON, demonstrating how `read_json_file` serves as a foundational step in data processing workflows within the application. -2. In the unit tests for the `JsonFileProcessor` class, specifically in the `test_read_json_file` method, where it is used to verify that the function correctly reads and returns the expected data structure from a mock JSON file. This usage underscores the importance of `read_json_file` in ensuring the reliability and correctness of the application's data handling capabilities. - -**Note**: It is crucial to ensure that the file path stored in `self.file_path` is correct and points to a valid JSON file before calling this function. Additionally, users should be prepared to handle the termination of the program in scenarios where the specified file cannot be found, as indicated by the function's error handling behavior. - -**Output Example**: Assuming the JSON file contains the following content: -```json -{ - "name": "John Doe", - "age": 30, - "is_student": false -} -``` -The function would return a Python dictionary: -```python -{"name": "John Doe", "age": 30, "is_student": False} -``` -*** -### FunctionDef extract_data(self) -**extract_data**: The function of extract_data is to load JSON data from a file and extract specific content and metadata from it. - -**Parameters**: This function does not require any parameters apart from the implicit `self` parameter, which represents the instance of the `JsonFileProcessor` class through which the function is called. - -**Code Description**: The `extract_data` function begins by calling the `read_json_file` method of the same class to load JSON data from a file specified by the file path stored in the instance. The loaded data is expected to be a dictionary where each key represents a file and its value contains items, potentially in a new list format. - -The function then initializes two empty lists, `md_contents` and `extracted_contents`, to store markdown content and extracted item metadata, respectively. It iterates through each item in the JSON data. If the item's value is a list (indicating the new format), it further iterates through each element in the list. For each element, it checks if the `md_content` key exists and has a non-empty value. If so, the first element of `md_content` is appended to the `md_contents` list. - -Additionally, for each item, it constructs a dictionary containing various pieces of information such as item type, name, code start and end lines, whether it has a return statement, the content of the code, the column of the name, and the item's status. This dictionary is then appended to the `extracted_contents` list. The function finally returns both lists, `md_contents` and `extracted_contents`. - -**Note**: It is crucial for the JSON data loaded by the `read_json_file` method to follow the expected structure for the `extract_data` function to work correctly. The function assumes that each item in the JSON data can potentially be a list of items (the new format) and that each item in this list contains specific keys (`md_content`, `type`, `name`, etc.). The absence of these keys or a deviation from the expected data structure could lead to incorrect or incomplete data extraction. - -**Output Example**: Assuming the JSON data loaded by `read_json_file` contains information about various code snippets, the function might return something like the following: - -First return value (`md_contents` list): -```python -["# Sample Markdown Content", "# Another Markdown Example"] -``` - -Second return value (`extracted_contents` list): -```python -[ - { - "type": "Function", - "name": "extract_data", - "code_start_line": 10, - "code_end_line": 50, - "have_return": True, - "code_content": "def extract_data(self):...", - "name_column": 5, - "item_status": "Reviewed" - }, - { - "type": "Class", - "name": "JsonFileProcessor", - "code_start_line": 1, - "code_end_line": 100, - "have_return": False, - "code_content": "class JsonFileProcessor:...", - "name_column": 0, - "item_status": "PendingReview" - } -] -``` - -This output example demonstrates how the function can be used to extract and organize specific pieces of information from structured JSON data, facilitating further processing or analysis of the content and metadata of items described in the JSON file. -*** -### FunctionDef recursive_search(self, data_item, search_text, code_results, md_results) -**recursive_search**: The function of `recursive_search` is to search recursively through a data structure for items matching a specific search text and collect their associated code and markdown content. - -**Parameters**: -- `data_item`: The current data item (a dictionary or list) being searched through. -- `search_text`: The text string being searched for within the data items. -- `code_results`: A list that accumulates the `code_content` of items matching the search text. -- `md_results`: A list that accumulates the `md_content` of items matching the search text. - -**Code Description**: -The `recursive_search` function is a crucial component of the JsonFileProcessor class, designed to navigate through nested dictionaries and lists within a JSON structure to find items that match a given search text. This function is specifically tailored to handle the data format where relevant items may be deeply nested within the structure. - -Upon encountering a dictionary, the function iterates through its key-value pairs. If a value is another dictionary or a list, the function calls itself with this value, allowing it to dive deeper into the data structure. This recursive approach ensures that all levels of nesting are explored. - -When processing a list, the function examines each item. If an item is a dictionary and has a 'name' key that matches the `search_text`, the function checks for the presence of 'code_content' within this item. If found, the 'code_content' and its corresponding 'md_content' are appended to the `code_results` and `md_results` lists, respectively. This process captures the code and markdown content associated with the search text. - -The function also makes recursive calls when encountering nested lists or dictionaries within a list, ensuring no potential match is overlooked due to the structure's depth. - -This method is invoked by the `search_code_contents_by_name` method within the same class, which aims to find and return the code and markdown content associated with a given name within a JSON file. The `search_code_contents_by_name` method initializes the search by loading the JSON data from a file and then calls `recursive_search` with this data, the search text, and empty lists for code and markdown results. Based on the search outcome, it either returns the found contents or indicates that no matching item was found. - -**Note**: -- The `recursive_search` function is designed to work specifically with the data structure expected in the JSON files processed by the JsonFileProcessor class. It assumes that relevant items will have 'name', 'code_content', and 'md_content' keys. -- This function modifies the `code_results` and `md_results` lists in place, meaning it does not return any value. Instead, the results are accumulated directly in the lists passed as arguments. -- Proper error handling in the calling method (`search_code_contents_by_name`) is crucial to manage exceptions that may arise from file operations or JSON parsing, ensuring the robustness of the overall search process. -*** -### FunctionDef search_code_contents_by_name(self, file_path, search_text) -**search_code_contents_by_name**: The function of `search_code_contents_by_name` is to search for and retrieve code and markdown contents from a JSON file based on a specified search text. - -**Parameters**: -- `file_path`: The path to the JSON file from which data is to be read. -- `search_text`: The text string to search for within the JSON file's contents. - -**Code Description**: -The `search_code_contents_by_name` function is designed to open and read from a JSON file specified by `file_path`, searching for items that match the `search_text`. It utilizes a helper function, `recursive_search`, to navigate through the JSON file's hierarchical structure, looking for the search text in the 'name' keys of dictionaries within the file. The search is exhaustive and recursive, ensuring that all potential matches are found regardless of their depth in the file's structure. - -Upon finding a match, the function collects the 'code_content' and 'md_content' associated with the matched item into two separate lists: `code_results` and `md_results`. These lists are then returned to the caller, providing them with the relevant code and markdown contents found in the file. - -The function is robust, with error handling for common issues such as file not found, invalid JSON format, and other unexpected errors. In the case of a `FileNotFoundError`, it returns a simple "File not found." message. For a `json.JSONDecodeError`, indicating an invalid JSON file, it returns "Invalid JSON file." For any other exceptions, it returns a message indicating that an error occurred, along with the exception message. - -**Note**: -- The function assumes the JSON file is properly formatted and that relevant items will contain 'name', 'code_content', and 'md_content' keys. -- It is designed to return meaningful error messages to assist in troubleshooting potential issues with file access or format. -- The function returns a tuple of two lists (`code_results`, `md_results`) when matches are found, or a tuple of two lists each containing "No matching item found." if no matches are found. In case of errors, a single string message is returned. - -**Output Example**: -Assuming a successful search with matches found, the function might return: -``` -(["code snippet 1", "code snippet 2"], ["markdown content 1", "markdown content 2"]) -``` -In case no matches are found, the output would be: -``` -(["No matching item found."], ["No matching item found."]) -``` -For a file not found error, the output would simply be: -``` -"File not found." -``` -*** diff --git a/markdown_docs/repo_agent/chat_with_repo/main.md b/markdown_docs/repo_agent/chat_with_repo/main.md deleted file mode 100644 index 25f681f..0000000 --- a/markdown_docs/repo_agent/chat_with_repo/main.md +++ /dev/null @@ -1,14 +0,0 @@ -## FunctionDef main -**main**: The function of main is to initialize the application, set up the repository assistant with configuration settings, and launch a Gradio interface for user interaction. - -**Parameters**: This function does not take any parameters. - -**Code Description**: The `main` function serves as the entry point for the application. It begins by extracting necessary configuration details such as the model name, API key, base URL, and database path from a predefined configuration object (`CONFIG`). These details are crucial for setting up the application's components, including the repository assistant and the Gradio interface. - -The function proceeds to instantiate a `RepoAssistant` object with the API key, base URL, and database path. This assistant is responsible for handling interactions with the repository, including data extraction, processing, and response generation based on user queries. - -Following the initialization of the `RepoAssistant`, the function calls the `extract_data` method of the assistant's `json_data` attribute to load and extract markdown contents and metadata from a JSON file. This data is then passed to the `create_vector_store` method of the assistant's `chroma_data` attribute, which processes the markdown content and stores it in a vector database for efficient retrieval and analysis. - -Finally, the `main` function initializes a `GradioInterface` with the `respond` method of the `RepoAssistant`. This interface provides a user-friendly way for users to interact with the application through a web-based graphical interface. The interface allows users to input queries and receive responses generated by the application. - -**Note**: The successful execution of the `main` function and the application's functionality heavily depend on the correct configuration of the `CONFIG` object, including valid API keys, URLs, and paths. Additionally, the Gradio interface requires a suitable environment to launch a web server for user interaction. The application integrates several components, including data processing, vector storage, and user interface modules, to provide a comprehensive solution for interacting with repository data. diff --git a/markdown_docs/repo_agent/chat_with_repo/prompt.md b/markdown_docs/repo_agent/chat_with_repo/prompt.md deleted file mode 100644 index 62cf636..0000000 --- a/markdown_docs/repo_agent/chat_with_repo/prompt.md +++ /dev/null @@ -1,206 +0,0 @@ -## ClassDef TextAnalysisTool -**TextAnalysisTool**: The function of TextAnalysisTool is to provide various text analysis capabilities, including keyword extraction, tree structure generation from text, formatting chat prompts, querying code blocks, converting search results to markdown, and named entity recognition queries, utilizing a language model and a JSON file processor. - -**Attributes**: -- `llm`: An instance of a language model used for generating text-based responses. -- `db_path`: The file path to a JSON database used for searching code contents. -- `jsonsearch`: An instance of JsonFileProcessor initialized with `db_path` for processing JSON files. - -**Code Description**: -The TextAnalysisTool class is designed to facilitate text analysis and processing within a software development context, leveraging a language model (LLM) for generating responses and a JSON file processor for querying a database. It is initialized with a language model and a database path, setting up the necessary components for its operations. - -- The `keyword` method generates up to three keywords based on a given query by prompting the language model. This is useful for extracting key concepts or terms from a query. -- The `tree` method creates a hierarchical tree structure from a given text, which can be particularly useful for understanding the organization or structure of the text. -- The `format_chat_prompt` method formats a chat prompt with a given message and instruction, standardizing the way prompts are presented to the language model. -- The `queryblock` method searches the JSON database for code contents related to a given message, utilizing the `jsonsearch` attribute. -- The `list_to_markdown` method converts a list of search results into a markdown-formatted string, facilitating the presentation of these results. -- The `nerquery` method extracts the most relevant class or function name from a given message, adhering to strict output criteria. This is particularly useful for identifying specific code elements from text. - -The class is utilized within the project to enhance interactions with a repository, such as querying for specific code blocks or analyzing text from chat inputs. It serves as a bridge between natural language processing and code repository management, enabling more intuitive and efficient ways to access and analyze code. - -**Note**: -- Ensure that the language model and JSON file processor are correctly initialized and configured before using instances of this class. -- The effectiveness of the methods depends on the capabilities and training of the provided language model. - -**Output Example**: -For a `keyword` method call with the query "How to sort an array in Python?": -- Possible return value: `"sort, array, Python"` - -For a `tree` method call with a simple structured text: -- Possible return value: `"Root -> Child1 -> Grandchild1; Child2"` - -For a `queryblock` method call with the message "binary search": -- Possible return value: `("function binarySearch(array, target) {...}", "```python\nfunction binarySearch(array, target) {...}\n```")` -### FunctionDef __init__(self, llm, db_path) -**__init__**: The function of __init__ is to initialize an instance of the TextAnalysisTool class with specific configurations. - -**Parameters**: -- `llm`: This parameter is expected to be an instance or a configuration related to a language model that the TextAnalysisTool will use for text analysis purposes. -- `db_path`: A string representing the file path to the database (in JSON format) that the TextAnalysisTool will interact with for reading and processing data. - -**Code Description**: -The `__init__` method is the constructor for the TextAnalysisTool class. It performs the initial setup required for the TextAnalysisTool to function correctly. This setup includes initializing a JsonFileProcessor instance and storing the language model and database path provided as parameters. - -1. The `db_path` parameter is used to create an instance of the JsonFileProcessor class. This is crucial because the TextAnalysisTool relies on the JsonFileProcessor for all operations related to reading from and searching within the JSON database. The JsonFileProcessor handles tasks such as reading JSON files, extracting data, and performing searches based on specific criteria within the JSON structure. - -2. The `llm` parameter is stored directly within the instance. This parameter is intended to represent a language model or a similar tool that the TextAnalysisTool might use for analyzing text. The exact nature and use of this parameter would depend on the broader context of the TextAnalysisTool's implementation and its intended functionalities. - -3. The `db_path` is also stored within the instance for potential future reference. This could be useful for operations that require direct access to the database file path, such as logging, error handling, or additional database operations not covered by the JsonFileProcessor. - -**Relationship with Callees**: -- The instantiation of the JsonFileProcessor within the `__init__` method signifies a dependency relationship between the TextAnalysisTool and the JsonFileProcessor. The TextAnalysisTool delegates the responsibility of handling JSON file operations to the JsonFileProcessor, thereby adhering to the principle of separation of concerns. This allows the TextAnalysisTool to focus on its primary functionalities, relying on the JsonFileProcessor for data handling. - -**Note**: -- It is important to ensure that the `db_path` provided points to a valid JSON file that the JsonFileProcessor can read and process. Failure to provide a valid path could lead to runtime errors or the inability of the TextAnalysisTool to perform its intended operations. -- The nature and capabilities of the `llm` parameter are not described within this method. Users of the TextAnalysisTool should ensure that they understand the expected type and functionalities of the `llm` parameter based on the broader context of the tool's usage and implementation. -*** -### FunctionDef keyword(self, query) -**Function**: keyword - -**Parameters**: -- `self`: Represents the instance of the class where the `keyword` function is defined. -- `query`: A string that contains the query for which keywords are to be generated. - -**Code Description**: -The `keyword` function is designed to generate a list of up to three keywords based on a given query. It constructs a prompt string that includes the query and a specific instruction for the language model to output no more than three keywords. This prompt is then passed to the language model (referred to as `llm` in the code) through its `complete` method, which is expected to return a response based on the input prompt. The response, presumably containing the requested keywords, is then returned by the `keyword` function. - -In the context of its usage within the project, the `keyword` function plays a crucial role in extracting keywords from text inputs, which are then utilized in various parts of the system for further processing. For instance, in the `respond` method of the `RepoAssistant` class, the function is used to extract keywords from a formatted chat prompt. These keywords are instrumental in generating queries for retrieving relevant documents and metadata from a data collection, thereby aiding in the construction of a response to the user's query. Additionally, in the test suite (`test_keyword` method in `test_prompt.py`), the function is tested to ensure it correctly extracts keywords from a given query, which is vital for maintaining the reliability and accuracy of the system's text analysis capabilities. - -**Note**: -The effectiveness and accuracy of the `keyword` function are heavily dependent on the performance of the underlying language model (`llm`). Therefore, the quality of the generated keywords can vary based on the model's understanding of the input query and its ability to adhere to the instruction of outputting no more than three keywords. - -**Output Example**: -Assuming the language model functions correctly and adheres to the prompt instructions, an example output for a query "test query" could be: -``` -"keyword1, keyword2, keyword3" -``` -This output represents a comma-separated list of keywords generated by the language model in response to the input query. -*** -### FunctionDef tree(self, query) -**tree**: The function of `tree` is to analyze a given text and generate a tree structure based on its hierarchy. - -**Parameters**: -- `query`: A string containing the text to be analyzed. - -**Code Description**: -The `tree` function is a method within the `TextAnalysisTool` class, designed to process a text query by generating a prompt that requests the analysis of the text and the creation of a tree structure representing its hierarchical organization. This prompt is then passed to a language model (referred to as `llm` within the code), which is expected to return a response that fulfills the request. The function encapsulates the interaction with the language model, abstracting away the details of how the prompt is constructed and how the response is obtained. This design allows for a clear separation of concerns, where the `TextAnalysisTool` focuses on the generation of analysis requests, and the language model handles the actual analysis and generation tasks. - -From a functional perspective, this method is directly tested by a unit test in the project, specifically within the `test_tree` method of the `TestTextAnalysisTool` class. The test mocks the response of the language model to return a predefined string ("tree structure") when given a specific query ("test query"). It then asserts that the `tree` function correctly returns this mocked response, thereby validating the function's ability to interact with the language model and return its response as expected. - -**Note**: -- The actual output of the `tree` function is highly dependent on the implementation and capabilities of the underlying language model (`llm`). As such, the quality and accuracy of the generated tree structure are contingent upon the model's performance. -- The function assumes that the language model is capable of understanding the prompt and generating a meaningful response that accurately represents the hierarchical structure of the input text. - -**Output Example**: -Assuming the language model is well-trained and capable of understanding the prompt, an example output for a query about a simple organizational structure might look something like this: -``` -- Organization - - Department A - - Team 1 - - Team 2 - - Department B - - Team 3 -``` -This output represents a tree structure where "Organization" is at the top level, followed by "Department A" and "Department B" as sub-levels, each containing their respective teams. -*** -### FunctionDef format_chat_prompt(self, message, instruction) -**format_chat_prompt**: The function of `format_chat_prompt` is to format a chat prompt for further processing. - -**Parameters**: -- `message`: The user's message that needs to be included in the prompt. -- `instruction`: The system's instruction or context that precedes the user's message. - -**Code Description**: The `format_chat_prompt` function constructs a formatted string that structures a conversation between a system and a user. It takes two parameters: `message`, which represents the user's input, and `instruction`, which is a directive or context provided by the system. The function then concatenates these parameters into a formatted string that mimics a chat dialogue, with the system's instruction and the user's message clearly delineated. This formatted string is intended for use in natural language processing or chatbot applications where understanding the flow of conversation is crucial. The inclusion of "Assistant:" at the end of the prompt suggests that the function is designed to prepare the text for a response generation step, possibly by an AI assistant. - -In the context of its usage within the project, `format_chat_prompt` is called by the `RepoAssistant` class in the `respond` method, where it is used to format the initial prompt for querying a repository and generating responses based on the user's message and a given instruction. This indicates that the function plays a critical role in preparing the input for complex operations such as keyword extraction, query generation, and document retrieval based on the conversation context. - -Additionally, the function is tested in `test_format_chat_prompt` within `tests/test_prompt.py`, which verifies that the function correctly incorporates the user's message into the formatted prompt. This test ensures the reliability and expected behavior of the function in the context of automated testing frameworks. - -**Note**: It is important for the `message` and `instruction` parameters to be accurately and clearly defined, as they directly influence the structure of the generated prompt and, consequently, the effectiveness of any downstream processing or response generation. - -**Output Example**: -``` -System:Please provide your query. -User: How do I format a date in Python? -Assistant: -``` -This output demonstrates how the function formats a conversation where the system provides an instruction, followed by the user's question, and leaves a placeholder for the assistant's response. -*** -### FunctionDef queryblock(self, message) -**queryblock**: The function of `queryblock` is to search for and retrieve code and markdown contents from a JSON file based on a specified message. - -**Parameters**: -- `message`: A string representing the search text to be used for querying the JSON file. - -**Code Description**: -The `queryblock` function is a method within a class that primarily interacts with a JSON file to search for specific content. It leverages the `search_code_contents_by_name` method from the `JsonFileProcessor` class to perform this task. Upon invocation, `queryblock` passes the database path (`self.db_path`) and the search message to `search_code_contents_by_name`. This method is designed to search through the JSON file located at the given database path for items whose 'name' keys match the provided message. - -The search process is thorough, utilizing a recursive search strategy to ensure that all potential matches are found, regardless of their depth within the JSON file's structure. If matches are found, the method returns two lists: one containing the code contents (`code_results`) and the other containing the markdown contents (`md_results`) associated with the matched items. In the event of an error, such as a file not found or invalid JSON format, the method is designed to return meaningful error messages to assist in troubleshooting. - -**Note**: -- The function assumes that the JSON file is properly formatted and that the items of interest will contain 'name', 'code_content', and 'md_content' keys. -- It is crucial for the caller to handle the returned values appropriately, especially considering that the method can return lists of results or error messages. -- The function is part of a larger system that involves text analysis and retrieval, indicating its use in contexts where extracting specific code and markdown content based on textual queries is necessary. - -**Output Example**: -Assuming a successful search with matches found, the function might return: -``` -(["code snippet related to message"], ["markdown content related to message"]) -``` -In case no matches are found, the output would be: -``` -(["No matching item found."], ["No matching item found."]) -``` -For a file not found error, the output would be a simple error message string, such as: -``` -"File not found." -``` -*** -### FunctionDef list_to_markdown(self, search_result) -**list_to_markdown**: The function of `list_to_markdown` is to convert a list of strings into a Markdown formatted string, where each item is numbered and followed by a newline. - -**Parameters**: -- `self`: Represents the instance of the class that contains this method. -- `search_result`: A list of strings that are to be converted into Markdown format. - -**Code Description**: -The `list_to_markdown` function iterates over each element in the `search_result` list, which is passed as a parameter. For each element, it enumerates the list starting with the index 1. It then formats each element into a Markdown numbered list item by appending the index and the content of the element to a string, followed by two newline characters for spacing. This process is repeated for every element in the list, resulting in a single string that represents the entire list in Markdown format. This string is then returned as the output of the function. - -In the context of its calling situation within the project, specifically in the `respond` method of the `RepoAssistant` class, the `list_to_markdown` function is used to format lists of unique documents and code snippets into a Markdown string. This is particularly useful for presenting search results or processed data in a structured and readable format, which can be especially beneficial for logging, debugging, or displaying information to the end-user in a more organized manner. - -**Note**: -- The function assumes that the input `search_result` is a list of strings. If the list contains non-string elements, it may lead to unexpected behavior or errors. -- The function adds two newline characters after each item for spacing. If a different format is desired (e.g., single newline or additional Markdown formatting), modifications to the function would be required. - -**Output Example**: -Given a list `["Apple", "Banana", "Cherry"]`, the `list_to_markdown` function would return the following string: -``` -1. Apple - -2. Banana - -3. Cherry -``` -This output demonstrates how the function formats each item in the list as a numbered entry in Markdown, making it clear and easy to read. -*** -### FunctionDef nerquery(self, message) -**nerquery**: The function of nerquery is to extract the most relevant class or function name from a given message. - -**Parameters**: -- `self`: Represents the instance of the class where the nerquery function resides. -- `message`: A string containing the text from which a class or function name needs to be extracted. - -**Code Description**: -The `nerquery` function is designed to process a given text message and identify the most relevant class or function name within that message. It achieves this by constructing a query that includes a set of instructions and the input message. These instructions guide the extraction process, emphasizing that the output must be a single, pure function or class name without any additional characters. Examples of valid outputs are provided within the instructions to clarify the expected format. - -Upon constructing the query, the function utilizes a language model (`self.llm.complete(query)`) to generate a response based on the provided instructions and input message. This response is expected to be the name of a class or function extracted from the input message. The function then returns this response. - -In the context of its usage within the project, `nerquery` is called in scenarios where identifying specific code entities (classes or functions) from text is necessary. For instance, in the `respond` method of the `RepoAssistant` class, `nerquery` is used to extract keywords from bot messages and prompts. These keywords are then used to further query and retrieve relevant code blocks or documentation. This demonstrates the function's role in enabling more intelligent and context-aware responses in the system by identifying and acting upon specific code entities mentioned in user inputs or system-generated text. - -**Note**: -- The accuracy of the `nerquery` function's output heavily depends on the language model's ability to understand and process the instructions and the input message. Therefore, the quality of the input message and the clarity of the instructions are crucial for achieving desirable results. -- The function assumes that the language model is already instantiated and accessible through `self.llm`. - -**Output Example**: -If the input message is "How do I use the calculateSum function in my code?", an example output could be `"calculateSum"`, assuming the language model correctly identifies "calculateSum" as the most relevant function name within the message. -*** diff --git a/markdown_docs/repo_agent/chat_with_repo/rag.md b/markdown_docs/repo_agent/chat_with_repo/rag.md deleted file mode 100644 index ee62017..0000000 --- a/markdown_docs/repo_agent/chat_with_repo/rag.md +++ /dev/null @@ -1,237 +0,0 @@ -## ClassDef RepoAssistant -**RepoAssistant**: The function of RepoAssistant is to facilitate interaction with repositories by generating queries, reranking documents, and providing responses based on repository data and user queries. - -**Attributes**: -- `api_key`: The API key used for authentication with external services. -- `api_base`: The base URL for the API endpoints. -- `db_path`: The path to the database file. -- `md_contents`: A list to store markdown contents. -- `llm`: An instance of the OpenAI class initialized with GPT-3.5 model. -- `client`: Another instance of the OpenAI class but initialized with GPT-4 model. -- `lm`: An instance of the AI class for making API requests. -- `textanslys`: An instance of TextAnalysisTool for text analysis operations. -- `json_data`: An instance of JsonFileProcessor for processing JSON files. -- `chroma_data`: An instance of ChromaManager for managing chroma data. - -**Code Description**: -The `RepoAssistant` class is designed to assist in handling various tasks related to repository management and interaction. It initializes with necessary configurations for API interactions and database path for local data storage. The class is equipped with methods to generate search queries (`generate_queries`), rerank documents based on relevance (`rerank`), generate responses using a Retrieve and Generate (RAG) approach (`rag` and `rag_ar`), and format lists into markdown (`list_to_markdown`). Additionally, it provides a `respond` method to handle incoming messages and generate appropriate responses based on the context and available data. - -The `RepoAssistant` is instantiated in the main module of the project, where it is configured with API keys, base URL, and database path derived from the project's configuration. It plays a crucial role in initializing the assistant with the required data and setting up the Gradio interface for interaction. - -In the testing environment, the class is initialized with mocked dependencies to facilitate unit testing without the need for actual external service calls. This approach ensures that the functionality of `RepoAssistant` can be tested in isolation, verifying its behavior under controlled conditions. - -**Note**: -- Ensure that the API key and base URL are correctly configured for the intended services. -- The database path should be valid and accessible for reading and writing JSON data. -- The methods within `RepoAssistant` rely on external services and local data; hence, network connectivity and data integrity are crucial for its operation. - -**Output Example**: -A possible appearance of the code's return value from the `respond` method could be a tuple containing the original message, the bot's response, a markdown list of retrieved documents, identified questions, unique code snippets, and formatted code snippets. For instance: - -``` -("How do I implement a linked list in Python?", -"Here's how you can implement a linked list in Python: [...]", -"1. Document A\n2. Document B\n", -"What is a linked list?", -"def linked_list_example():\n # Linked list implementation", -"1. def linked_list_example():\n # Linked list implementation\n") -``` -### FunctionDef __init__(self, api_key, api_base, db_path) -**__init__**: The function of __init__ is to initialize the RepoAssistant object with necessary configurations and components for its operation. - -**Parameters**: -- `api_key`: The API key required for authentication with external services. -- `api_base`: The base URL for the API endpoints of the external services. -- `db_path`: The file path to the database used for storing and retrieving data. - -**Code Description**: -The `__init__` method of the `RepoAssistant` class is designed to set up the initial state of the object by initializing its attributes and configuring its dependencies. Upon instantiation, it performs the following operations: - -1. **Attribute Initialization**: It initializes several attributes with the values passed through its parameters (`api_key`, `api_base`, `db_path`). Additionally, it initializes `md_contents` as an empty list, which is presumably used to store markdown content related to the repository. - -2. **Language Model Configuration**: Two instances of language models are created using the `OpenAI` class, with one configured to use the "gpt-3.5-turbo-1106" model and the other to use the "gpt-4-1106-preview" model. This indicates the use of different generative models for various tasks within the assistant's operations. - -3. **AI Model Initialization**: An instance of an AI model is created using the `AI` class, indicating another component for processing or generating text-based responses. - -4. **Text Analysis Tool Initialization**: An instance of `TextAnalysisTool` is created, passing the language model (`llm`) and the database path (`db_path`). This component is responsible for providing text analysis capabilities, such as keyword extraction and named entity recognition, utilizing the language model. - -5. **JSON Data Processing**: An instance of `JsonFileProcessor` is created with the database path (`db_path`), indicating the assistant's capability to process JSON files, which could include reading, extracting, and searching data within JSON structures. - -6. **Chroma Data Management**: An instance of `ChromaManager` is initialized with the `api_key` and `api_base`, which is used for managing interactions with a vector database. This component is essential for storing and retrieving document embeddings, facilitating the retrieval of semantically similar documents. - -**Relationship with Callees**: -- The `OpenAI` and `AI` instances indicate the use of external AI services for text generation and processing, essential for the assistant's functionality in generating responses or analyzing text. -- The `TextAnalysisTool` and `JsonFileProcessor` instances highlight the assistant's capabilities in processing and analyzing text and structured data, which are crucial for tasks such as querying code blocks or extracting information from JSON files. -- The `ChromaManager` instance underscores the assistant's role in managing document embeddings, which is vital for operations involving the retrieval of documents based on semantic similarity. - -**Note**: -- It is crucial to ensure that the `api_key` and `api_base` provided are valid and have the necessary permissions for accessing the respective services. -- The `db_path` should be a valid path to a database file that the assistant will use for storing and retrieving data. -- The initialization of multiple language models and the AI model suggests a flexible design, allowing the assistant to leverage different models for specific tasks. However, this also implies a dependency on external services, which should be managed carefully, especially concerning API rate limits and costs. -*** -### FunctionDef generate_queries(self, query_str, num_queries) -**generate_queries**: The function of generate_queries is to generate a specified number of search queries based on a single input query. - -**Parameters**: -- **query_str**: A string representing the input query from which the search queries will be generated. -- **num_queries**: An integer specifying the number of search queries to generate. It defaults to 4 if not provided. - -**Code Description**: -The `generate_queries` function is designed to assist in generating multiple search queries from a single input query. It utilizes a template string to instruct an underlying language model (referred to as `llm` within the code) to produce the desired number of queries. The template instructs the model to act as a helpful assistant that generates search queries based on the provided input query. The number of queries to generate is adjustable via the `num_queries` parameter, with a default value of 4. The function formats this template with the actual number of queries (adjusted by subtracting 1 to account for the input query itself) and the input query string. It then sends this formatted prompt to the language model and receives a response. The response is expected to be a text containing the generated queries, separated by new lines. These queries are then split by new lines to create a list of individual queries, which is returned by the function. - -From a functional perspective within the project, `generate_queries` is called by the `respond` method of the `RepoAssistant` class. In the `respond` method, it is used to generate search queries based on a prompt derived from a message and instruction. These generated queries are then used to retrieve relevant documents and metadata from a data collection, aiding in constructing a comprehensive response to the initial message. This demonstrates the function's role in facilitating dynamic and contextually relevant search operations within the assistant's response generation process. - -Additionally, the function is tested in the project's test suite, specifically in the `test_generate_queries` method of the `TestRepoAssistant` class. This test verifies that the function correctly generates the expected number of queries based on a mock input and a specified number of queries. - -**Note**: -- The function relies on the successful execution of the language model's `complete` method to generate queries. It is important to ensure that the language model is properly initialized and configured in the `RepoAssistant` class. -- The function assumes that the language model's response will be appropriately formatted, with each generated query separated by a new line. Any changes in the model's response format may require adjustments to the function's implementation. - -**Output Example**: -Assuming the language model's response is "Query1\nQuery2\nQuery3\nQuery4", the function's return value would be: -``` -["Query1", "Query2", "Query3", "Query4"] -``` -*** -### FunctionDef rerank(self, query, docs) -**rerank**: The function of rerank is to reorder a list of documents based on their relevance to a given query, using a language model to score their relevance, and return the top 5 most relevant documents. - -**Parameters**: -- **query**: A string representing the user's search query. -- **docs**: A list of documents to be ranked according to their relevance to the query. - -**Code Description**: -The `rerank` function is a critical component of the RepoAssistant's document retrieval process, specifically designed to enhance the relevance of documents retrieved in response to a user query. It leverages a language model, specifically 'gpt-4-1106-preview', to evaluate the relevance of each document to the given query. The function initiates by sending a structured prompt to the language model, instructing it to act as an expert relevance ranker. This prompt includes the query and the documents to be ranked, formatted in a way that the model can understand. - -Upon receiving the response from the language model, the function parses the JSON response to extract the relevance scores of the documents. These scores are then used to sort the documents in descending order of relevance. The sorting is based on a key, 'relevance_score', which is assumed to be a part of each document's representation in the response. - -Finally, the function extracts the content of the top 5 documents based on their relevance scores and returns these as a list. This subset of documents is considered the most relevant to the user's query and is intended for further processing or presentation to the user. - -In the context of its calling situation within the project, the `rerank` function is invoked by the `respond` method of the RepoAssistant. The `respond` method is responsible for handling user queries, retrieving documents based on those queries, and then using `rerank` to ensure that the documents presented to the user are of the highest relevance. This process involves generating queries, retrieving documents from a collection, and then refining these documents' list by reranking them based on relevance before presenting the final set to the user. - -**Note**: -- The function assumes that the language model's response is well-formed and can be parsed into a JSON object without errors. It is crucial to ensure that the model is correctly configured and the response format is as expected. -- The relevance scoring and ranking are entirely dependent on the language model's capabilities and the quality of the input provided to it. The effectiveness of the reranking process is thus influenced by the model's performance and the clarity and relevance of the initial documents and query. - -**Output Example**: -Assuming the function processes a set of documents related to "machine learning", an example output might look like this: -```python -[ - "Document about neural networks and deep learning", - "An introduction to machine learning algorithms", - "Advanced techniques in machine learning", - "Machine learning in healthcare", - "The future of machine learning and AI" -] -``` -This output represents the content of the top 5 documents deemed most relevant to the query, sorted in order of their relevance. -*** -### FunctionDef rag(self, query, retrieved_documents) -**rag**: The function of rag is to generate a response to a user's query based on retrieved documents relevant to the query. - -**Parameters**: -- `query`: A string representing the user's question or query. -- `retrieved_documents`: A list of strings, where each string is a document retrieved based on the query's relevance. - -**Code Description**: -The `rag` function is a crucial component of the RepoAssistant's ability to interact with repository queries. It takes a user's query and a list of documents retrieved based on the relevance to this query. The function then formats these inputs into a structured message that includes a brief introduction, the user's query, and the concatenated retrieved documents. This structured message is then passed to a language model (referred to as `self.llm.complete` in the code), which generates a response based on the provided context. - -The introduction part of the message sets the stage for the language model, indicating that it acts as a helpful assistant in a repository Q&A context. It specifies that the assistant's responses should be based solely on the information given in the retrieved documents. - -The function's integration into the project is demonstrated through its calls in different contexts. For instance, in the `respond` method of the RepoAssistant, `rag` is used to generate a response after processing and reranking documents based on the user's message. This indicates that `rag` serves as a final step in formulating the assistant's reply to the user, leveraging the pre-processed and filtered information. - -In the testing environment, specifically in `test_rag` of `TestRepoAssistant`, the function is tested to ensure it correctly generates a response given a mock query and documents. The test validates the function's ability to integrate with the language model and produce the expected output. - -**Note**: -- The effectiveness of the `rag` function heavily depends on the quality and relevance of the `retrieved_documents` provided to it. Therefore, ensuring that the document retrieval and ranking mechanisms are accurately implemented is crucial for the function's success. -- The language model's performance and the structure of the message passed to it can significantly impact the quality of the generated response. Adjustments to the message structure or the model used may be necessary based on the specific requirements of the repository Q&A context. - -**Output Example**: -Assuming the language model is well-tuned and the retrieved documents are highly relevant, an example output for a query "How do I contribute to the repository?" might look like: - -"Contributing to the repository can be done by following the contribution guidelines outlined in the README.md document. Please ensure you have read the guidelines thoroughly before submitting your contribution." -*** -### FunctionDef list_to_markdown(self, list_items) -**list_to_markdown**: The function of list_to_markdown is to convert a list of items into a markdown-formatted string, where each item is presented as a numbered list entry. - -**Parameters**: -- `self`: Represents the instance of the class where the function is defined. -- `list_items`: A list of items that are to be converted into markdown format. - -**Code Description**: The `list_to_markdown` function starts by initializing an empty string named `markdown_content`. It then iterates over the `list_items` provided as an argument, with each iteration adding a new line to `markdown_content` formatted as a numbered list entry. This is achieved by using the `enumerate` function, which not only returns each item in the list but also its index (starting from 1, as specified by the `start=1` argument). The index and item are formatted into a string that follows the markdown syntax for numbered lists (`{index}. {item}\n`) and appended to `markdown_content`. Once all items have been processed, the function returns `markdown_content`, which is now a string that represents the original list in markdown format. - -In the context of its calling situation within the project, specifically within the `respond` method of the `RepoAssistant` class, `list_to_markdown` is used to format lists of documents and code snippets into a markdown format before they are included in the response generated by the bot. This allows for a more organized and readable presentation of lists when the bot communicates its findings or responses to the user. The function plays a crucial role in ensuring that the output is not only informative but also aesthetically pleasing and easy to navigate, enhancing the overall user experience. - -**Note**: It is important to ensure that the `list_items` parameter is indeed a list and that it contains items that can be meaningfully converted to string format. This function assumes that each item in the list can be represented as a string, as it directly formats the items into the markdown string without any type checking or conversion. - -**Output Example**: -If `list_to_markdown` is called with `list_items` parameter as `['Apple', 'Banana', 'Cherry']`, the output would be: -``` -1. Apple -2. Banana -3. Cherry -``` -This output is a string that follows the markdown syntax for numbered lists, making it ready to be used in markdown documents or displayed in environments that support markdown formatting. -*** -### FunctionDef rag_ar(self, query, related_code, embedding_recall, project_name) -**rag_ar**: The function of rag_ar is to generate a response to a user's query by integrating related code snippets, documents, and project-specific information into a comprehensive answer. - -**Parameters**: -- **self**: Represents the instance of the class from which rag_ar is called. -- **query**: The user's question or query that needs to be addressed. -- **related_code**: Code snippets that are most likely related to the user's query, as recalled by a retriever system. -- **embedding_recall**: Documents that are most relevant to the user's query, as recalled by a retriever system. -- **project_name**: The name of the project within which the user's query is being addressed. - -**Code Description**: -The `rag_ar` function is designed to act as a sophisticated Q&A assistant for software repository-related inquiries. It takes a user's query, along with related code snippets and documents that have been identified as potentially relevant to the query, and the name of the current project. The function then constructs a detailed message that outlines its role as a repository-level software Q&A assistant, the user's query, and the provided related code and documents. This message is designed to simulate a context in which the assistant is aware of its task, the project it is currently addressing, and the resources it has at its disposal to provide a meaningful response. - -The function emphasizes the importance of filtering the provided information to accurately address the user's query, ensuring responses are detailed, accurate, and presented in a professional manner. It also includes a directive to decline to answer if the query cannot be accurately addressed with the provided information, highlighting the importance of not fabricating information. - -After constructing this detailed message, the function uses a client (presumably an AI or machine learning model) to generate a response based on the constructed message. The response is then returned to the caller. - -In the context of its calling situation within the project, `rag_ar` is invoked by the `respond` method of the `RepoAssistant` class. The `respond` method is responsible for processing a message (user's query), extracting relevant questions, retrieving related documents and code snippets, and then calling `rag_ar` to generate a comprehensive response based on the retrieved information and the original query. This highlights `rag_ar`'s role in synthesizing information and providing detailed answers within the broader system of responding to user queries about a software repository. - -**Note**: -- It is crucial that the information provided to `rag_ar` as parameters is accurate and relevant to the user's query to ensure the quality of the response. -- The function assumes the availability of a client capable of generating responses based on the constructed message. The specifics of this client (e.g., its implementation or how it generates responses) are not detailed within the function, implying a dependency on external AI or machine learning capabilities. - -**Output Example**: -A possible appearance of the code's return value could be a detailed, professional response to the user's query, incorporating insights from the related code and documents, and tailored to the specifics of the project in question. For instance, if the query was about how to implement a specific feature in the project, the response might include a step-by-step guide referencing the provided code snippets and documents, ensuring the answer is relevant and useful within the context of the project. -*** -### FunctionDef respond(self, message, instruction) -**respond**: The function of respond is to generate a comprehensive response to a user's message based on a given instruction and related repository data. - -**Parameters**: -- `message`: A string representing the user's message or query. -- `instruction`: A string providing context or instruction related to the user's message. - -**Code Description**: -The `respond` function is a method within the `RepoAssistant` class designed to process a user's query and generate a detailed response. This process involves several steps, each leveraging different components of the system to gather information, analyze it, and construct a response that is both informative and relevant to the user's original query. - -Initially, the function formats the user's message and the given instruction into a chat prompt using the `format_chat_prompt` method. This formatted prompt is then used to extract keywords with the `keyword` method, which aids in querying the repository's data. - -Subsequently, the function generates a set of queries from the prompt using the `generate_queries` method. For each generated query, it retrieves documents and metadata from a chroma data collection, accumulating results and unique identifiers for further processing. - -The unique identifiers are used to filter out duplicate entries, ensuring that each document and code snippet considered in the response is distinct. The function then reranks the retrieved documents based on their relevance to the user's message using the `rerank` method, ensuring that the most pertinent information is prioritized. - -The `rag` method is called with the prompt and the reranked documents to generate an initial response. Additionally, the function extracts and analyzes keywords from the bot's message and the original prompt using the `nerquery` method, which helps in identifying relevant code blocks and markdown content. - -The function combines code and markdown content related to both sets of keywords, ensuring uniqueness among them. It then reranks the combined list of documents and code snippets to refine the response further. - -Finally, the `rag_ar` method is invoked to generate the final bot message, incorporating the reranked documents and code snippets into a comprehensive response. The function returns multiple elements, including the original message, the final bot message, a markdown-formatted list of retrieved documents, extracted questions, unique code content, and a markdown-formatted list of code snippets. - -**Note**: -- The function relies on the successful integration and operation of multiple components within the `RepoAssistant` class, including text analysis, query generation, document retrieval, and response generation. -- The quality and relevance of the generated response heavily depend on the underlying data collection, the effectiveness of the keyword extraction, and the performance of the reranking and response generation models. -- The function is designed to handle complex queries and generate responses that are as informative and relevant as possible, but the accuracy and completeness of the response may vary based on the query's specificity and the available repository data. - -**Output Example**: -A possible output of the `respond` function could include: -- The original user message: "How do I contribute to the repository?" -- A detailed bot message providing instructions on contributing to the repository, possibly including relevant code snippets and documentation links. -- A markdown-formatted list of retrieved documents that were considered in generating the response. -- Extracted questions or keywords from the user's message. -- Unique code content related to the query, formatted in markdown. -- A markdown-formatted list of code snippets that were included in the final response. -*** diff --git a/markdown_docs/repo_agent/chat_with_repo/vectordb.md b/markdown_docs/repo_agent/chat_with_repo/vectordb.md deleted file mode 100644 index b53e0aa..0000000 --- a/markdown_docs/repo_agent/chat_with_repo/vectordb.md +++ /dev/null @@ -1,90 +0,0 @@ -## ClassDef ChromaManager -**ChromaManager**: The function of ChromaManager is to manage the interaction with a vector database for storing and retrieving document embeddings. - -**Attributes**: -- `api_key`: The API key used for authentication with the embedding service. -- `api_base`: The base URL of the embedding service API. -- `chroma_collection`: A reference to the collection within the vector database where documents are stored. -- `is_new_collection`: A boolean flag indicating whether the `chroma_collection` was newly created during the initialization of the ChromaManager instance. - -**Code Description**: -The `ChromaManager` class is designed to facilitate the storage and retrieval of document embeddings in a vector database, specifically using a collection named "test". It initializes a connection to the vector database through a `PersistentClient` and checks if a collection named "test" exists. If the collection exists, it is loaded; otherwise, a new collection is created with the specified embedding function. The embedding function utilizes an API for generating embeddings, configured with the provided `api_key`, `api_base`, and a predefined model name. - -The `init_chroma_collection` method is responsible for initializing the `chroma_collection`. It uses the `PersistentClient` from the `chromadb` package to interact with the vector database, located at "./chroma_db". The method checks for the existence of the "test" collection and either loads it or creates it, setting the `is_new_collection` flag accordingly. - -The `create_vector_store` method allows for the storage of markdown content and associated metadata in the `chroma_collection`. This method is particularly designed to handle new collections by ensuring that the number of documents and metadata entries match and storing them in the vector database. For existing collections, it currently logs the `is_new_collection` status for debugging purposes. - -From a functional perspective within the project, the `ChromaManager` is instantiated in the `RepoAssistant` class, indicating its role in managing document embeddings for a repository assistant tool. The `RepoAssistant` initializes the `ChromaManager` with an API key and base URL, which are used for embedding function calls. This setup suggests that the `ChromaManager` plays a critical role in processing and storing document embeddings, facilitating the retrieval of semantically similar documents based on their embeddings. - -**Note**: -- It is crucial to ensure that the API key and base URL provided to the `ChromaManager` are valid and have the necessary permissions for accessing the embedding service. -- The `chroma_collection` is hardcoded to "test", which may need to be configured differently based on the application's requirements. -- Error handling is implemented for the case where a collection creation attempt fails due to the collection already existing, indicating robustness in collection management. -- The `create_vector_store` method's functionality is currently limited to handling new collections, and its behavior or implementation may need to be extended for more comprehensive management of document embeddings in existing collections. -### FunctionDef __init__(self, api_key, api_base) -**__init__**: The function of `__init__` is to initialize a ChromaManager instance with API credentials and prepare a chroma collection for use. - -**Parameters**: -- `api_key`: The API key required for authentication with the vector database or embedding service. -- `api_base`: The base URL of the API service. - -**Code Description**: The `__init__` method is a constructor for the `ChromaManager` class, responsible for setting up the necessary attributes for managing chroma collections within a vector database. Upon instantiation of a `ChromaManager` object, the method performs the following operations: - -1. It assigns the provided `api_key` and `api_base` to the instance variables `self.api_key` and `self.api_base`, respectively. These credentials are essential for authenticating requests to the vector database or embedding service. - -2. It initializes `self.chroma_collection` to `None`. This attribute is intended to hold a reference to a chroma collection, which is a specific type of collection within the vector database designed for storing and managing chroma vectors. - -3. It sets `self.is_new_collection` to `False`. This boolean attribute indicates whether the chroma collection referenced by `self.chroma_collection` was newly created during the initialization process or if it was an existing collection that was retrieved. - -4. Finally, it calls the `init_chroma_collection` method. This method is crucial for ensuring that a chroma collection named "test" is either initialized or retrieved from the vector database, making it ready for use with specific embedding functions. The `init_chroma_collection` method checks if the collection exists, creates it if it does not, or retrieves it if it does. It also sets the `self.chroma_collection` attribute to reference the initialized or retrieved collection and updates the `self.is_new_collection` attribute based on whether the collection was newly created or already existed. - -**Relationship with Callees**: The `__init__` method directly calls the `init_chroma_collection` method as part of the ChromaManager's initialization process. This call is essential for preparing the chroma collection for use, as detailed in the `init_chroma_collection` method's documentation. The successful execution of `init_chroma_collection` ensures that the `ChromaManager` instance is fully prepared to manage chroma collections within the vector database, with the `self.chroma_collection` attribute correctly set to reference the relevant collection. - -**Note**: It is important to provide valid API credentials (`api_key` and `api_base`) when instantiating a `ChromaManager` object. These credentials are required for authenticating requests to the vector database or embedding service and for configuring the embedding function used by the chroma collection. Failure to provide valid credentials may result in authentication errors or other issues when attempting to manage chroma collections. -*** -### FunctionDef init_chroma_collection(self) -**init_chroma_collection**: The function of `init_chroma_collection` is to initialize or retrieve a chroma collection named "test" from a persistent database, setting it up for use with specific embedding functions. - -**Parameters**: This function does not accept any parameters. - -**Code Description**: The `init_chroma_collection` function is a crucial component of the `ChromaManager` class, designed to manage collections within a vector database, specifically for handling chroma collections. It performs several key operations as follows: - -1. **Initialization of Persistent Client**: It starts by creating a `PersistentClient` from the `chromadb` library, targeting a local database located at "./chroma_db". This client is used to interact with the database for operations like listing, creating, and retrieving collections. - -2. **Listing Existing Collections**: The function retrieves a list of all existing collections in the database using the `list_collections` method of the `chroma_client`. It logs the names of these collections for debugging purposes. - -3. **Collection Existence Check**: It checks if a collection named "test" already exists within the database. This is done by searching for "test" in the list of existing collections. - -4. **Collection Handling**: - - If the "test" collection exists, it is loaded using the `get_collection` method, specifying an embedding function through the `OpenAIEmbeddingFunction`. This embedding function is configured with API keys and base URL, along with a model name "text-embedding-3-large". - - If the "test" collection does not exist, the function attempts to create it using the `create_collection` method with the same embedding function configuration. If creation fails due to a `UniqueConstraintError`, indicating the collection already exists due to a race condition or similar issue, it falls back to retrieving the collection as if it existed. - -5. **Collection State Tracking**: The function tracks whether the "test" collection was newly created or already existed using the `is_new_collection` boolean attribute of the `ChromaManager` class. This attribute is set to `True` if a new collection is created, and `False` otherwise. - -**Relationship with Callers**: The `init_chroma_collection` function is directly called during the initialization of a `ChromaManager` instance, as seen in the `__init__` method of the `ChromaManager` class. This ensures that a chroma collection is ready for use immediately upon the creation of a `ChromaManager` object. Additionally, it is tested in the project's testing suite, specifically in `test_init_chroma_collection` within `test_vectordb.py`, to verify its functionality in both creating a new collection and retrieving an existing one, as well as ensuring the `chroma_collection` attribute is correctly set. - -**Note**: It is important to ensure that the API keys and base URL provided to the `OpenAIEmbeddingFunction` are valid and have the necessary permissions for creating and managing collections in the database. Additionally, handling the `UniqueConstraintError` ensures robustness in scenarios where the collection might be concurrently accessed or modified. -*** -### FunctionDef create_vector_store(self, md_contents, meta_data) -**create_vector_store**: The function of `create_vector_store` is to process Markdown content and store it in a vector database, specifically within a Chroma collection. - -**Parameters**: -- `md_contents`: A list containing Markdown content that needs to be processed and stored. -- `meta_data`: A list containing metadata associated with each Markdown content in `md_contents`. - -**Code Description**: -The `create_vector_store` function is a method within the `ChromaManager` class, designed to handle the storage of Markdown content and its associated metadata into a vector database, leveraging the capabilities of a Chroma collection. The function operates differently based on whether it is dealing with a new collection or an existing one, as indicated by the `is_new_collection` attribute of the `ChromaManager` class. - -When dealing with a new collection (`is_new_collection` is True), the function first ensures that the number of identifiers (`ids`) matches the length of the shorter list between `md_contents` and `meta_data`. This is to maintain consistency and avoid indexing errors. It generates a list of string identifiers ranging from 0 to the minimum length of `md_contents` and `meta_data` minus one. Subsequently, it adds the content and metadata to the Chroma collection using these identifiers, but only up to the length of the shortest list, ensuring that each piece of content has corresponding metadata. - -If the collection is not new (`is_new_collection` is False), the function logs a debug message indicating the status of `is_new_collection`. This branch of the function's logic does not perform any operation on the Chroma collection, suggesting that additional steps or conditions might be required to update or add new content to an existing collection. - -The function is utilized within the project in two main contexts: -1. In the `main` function of the `main.py` script, where Markdown content and metadata extracted by an instance of `RepoAssistant` are passed to `create_vector_store` for storage. This usage suggests that the function plays a critical role in initializing or updating the project's vector database with new or modified content. -2. In the `test_create_vector_store` method of the `TestChromaManager` class within `test_vectordb.py`, which is part of the project's test suite. This test verifies that `create_vector_store` behaves as expected when adding content to a Chroma collection, including the generation of identifiers and the correct handling of content and metadata. The test uses mock objects to simulate the behavior of the Chroma collection and embedding functions, ensuring that the function's logic is correctly implemented without the need for a live database connection. - -**Note**: -- It is crucial to ensure that the `md_contents` and `meta_data` lists are correctly aligned and of equal length when dealing with new collections to avoid data inconsistency. -- The function's behavior and effectiveness are contingent upon the correct setting of the `is_new_collection` attribute before its invocation. -- The debug logging when `is_new_collection` is False suggests that additional implementation or external steps may be required to handle updates to existing collections. -*** diff --git a/markdown_docs/repo_agent/doc_meta_info.md b/markdown_docs/repo_agent/doc_meta_info.md index 1f7a9c9..cc0e319 100644 --- a/markdown_docs/repo_agent/doc_meta_info.md +++ b/markdown_docs/repo_agent/doc_meta_info.md @@ -1,944 +1,926 @@ ## ClassDef EdgeType -**EdgeType**: The function of EdgeType is to enumerate the types of relationships (edges) that can exist between different items within a repository. +**EdgeType**: EdgeType的功能是定义不同类型的边缘关系。 -**Attributes**: -- `reference_edge`: Represents a relationship where one object references another object. -- `subfile_edge`: Indicates a relationship where a file or folder is part of another folder. -- `file_item_edge`: Denotes a relationship where an object is part of a file. +**attributes**: 该类的属性包括: +· reference_edge: 表示一个对象引用另一个对象的边缘关系。 +· subfile_edge: 表示一个文件或文件夹属于一个文件夹的边缘关系。 +· file_item_edge: 表示一个对象属于一个文件的边缘关系。 -**Code Description**: -The `EdgeType` class is an enumeration that defines the types of relationships (edges) that can exist between items in a repository. This class is crucial for understanding and managing the hierarchical and referential relationships within the repository's structure. It categorizes the relationships into three distinct types: +**Code Description**: EdgeType类是一个枚举类,使用Enum模块定义了三种不同的边缘类型。每种边缘类型都通过auto()函数自动分配一个唯一的值。具体来说: +- reference_edge表示一种关系,其中一个对象引用另一个对象。这种关系通常用于表示对象之间的依赖或连接。 +- subfile_edge用于表示文件或文件夹的层级关系,指明某个文件或文件夹是另一个文件夹的子项。这在文件系统的结构中非常常见。 +- file_item_edge则表示某个对象是一个文件的组成部分,通常用于描述文件内部的结构或内容。 -1. `reference_edge`: This type is used when there is a direct reference from one object to another. It is essential for understanding dependencies and linkages between different objects in the repository. +通过使用EdgeType类,开发者可以清晰地定义和区分不同的边缘关系,从而在处理对象之间的关系时提高代码的可读性和可维护性。 -2. `subfile_edge`: This type is applied to represent the hierarchical relationship between files and folders, specifically indicating that a file or folder is contained within another folder. This is vital for managing the file system structure within the repository. - -3. `file_item_edge`: This type signifies that an object is part of a file, which is crucial for tracking the association between repository objects and their corresponding files. - -The `EdgeType` enumeration is utilized by other components within the project, such as the `get_edge_type` method in the `DocItemType` class. The `get_edge_type` method determines the type of edge (relationship) between two items, based on their types. By doing so, it leverages the `EdgeType` enumeration to categorize and return the appropriate relationship type. This integration highlights the importance of `EdgeType` in defining and managing the relationships between different items in the repository, facilitating operations like documentation generation, repository navigation, and structure analysis. - -**Note**: -When using the `EdgeType` enumeration, it is important to understand the context and nature of the relationship between items to accurately apply the correct edge type. Misclassification of relationships can lead to incorrect assumptions about the repository's structure and dependencies. Therefore, developers should carefully consider the specific characteristics of the relationship they are modeling when selecting an edge type from this enumeration. +**Note**: 使用EdgeType时,请确保在适当的上下文中引用正确的边缘类型,以避免逻辑错误。 ## ClassDef DocItemType -**DocItemType**: The function of DocItemType is to define possible types of document items within a repository, ranging from the repository root to more granular elements like functions and variables. +**DocItemType**: The function of DocItemType is to define various types of documentation items in a structured manner. -**Attributes**: -- `_repo`: Represents the root of the repository, for which a README needs to be generated. -- `_dir`: Represents a directory within the repository. -- `_file`: Represents a file within the repository. -- `_class`: Represents a class definition within a file. -- `_class_function`: Represents a function defined within a class. -- `_function`: Represents a standalone function within a file. -- `_sub_function`: Represents a function defined within another function. -- `_global_var`: Represents a global variable within a file. +**attributes**: The attributes of this Class. +· _repo: Represents the root node, which requires a README to be generated. +· _dir: Represents a directory. +· _file: Represents a file. +· _class: Represents a class. +· _class_function: Represents a function defined within a class. +· _function: Represents a general function defined within a file. +· _sub_function: Represents a sub-function defined within another function. +· _global_var: Represents a global variable. -**Code Description**: -The `DocItemType` class is an enumeration that categorizes different types of documentable items in a software project's repository. It provides a structured way to identify and differentiate between various levels of documentation granularity, from the entire repository down to individual sub-functions and global variables within the code. This classification is crucial for generating documentation that accurately reflects the structure and components of the project. +**Code Description**: The DocItemType class is an enumeration that categorizes different types of documentation items within a project. It provides a clear structure for identifying the nature of each item, whether it is a repository, directory, file, class, function, or variable. Each enumeration member is automatically assigned a unique value using the `auto()` function, which simplifies the management of these types. -The class includes methods such as `to_str` and `print_self`, which respectively return string representations of the item types and print the item types with specific colors for visual distinction in the terminal. The `get_edge_type` method is a placeholder for determining the relationship type between two document items, though its implementation is not provided in the class definition. +The class includes several methods that enhance its functionality: +- **to_str**: This method converts the enumeration member to a string representation. It provides specific string outputs for class and function types, while returning the name of the enumeration member for others. This is useful for generating readable documentation or logs. + +- **print_self**: This method returns a colored string representation of the enumeration member based on its type. It uses color coding to visually distinguish between different types of documentation items when printed, enhancing the readability of output in the console. -In the context of the project, `DocItemType` is used extensively to categorize and manage documentation tasks. For example, the `DocItem` class uses `DocItemType` to specify the type of each documentation item, which influences how documentation is generated and organized. The `need_to_generate` function in `runner.py` uses `DocItemType` to filter out items that do not require documentation generation, such as files, directories, and the repository root, focusing instead on more granular elements like classes and functions. +- **get_edge_type**: This method is defined but not implemented. It is intended to determine the relationship between two documentation item types, which could be useful for understanding how different items interact within the documentation structure. -**Note**: -When using `DocItemType` in the project, it is important to correctly categorize each item to ensure that the documentation structure accurately reflects the codebase. Misclassification can lead to incorrect documentation generation or organization. +The DocItemType class is utilized within the DocItem class, which represents individual documentation items in the project. The DocItem class uses the DocItemType enumeration to set the type of each item, allowing for organized management of documentation elements. For example, the `need_to_generate` function checks the type of a DocItem against the DocItemType enumeration to determine whether documentation should be generated for that item. It specifically skips generating documentation for items classified as files, directories, or repositories, focusing instead on finer-grained items like functions and classes. -**Output Example**: -An example usage of `DocItemType` could be categorizing a Python class in a file for documentation purposes: -```python -item_type = DocItemType._class -print(item_type.to_str()) # Output: ClassDef -``` +**Note**: When using the DocItemType enumeration, it is important to understand the hierarchy of documentation items and their types to ensure proper documentation generation and management. The color coding in the print_self method is particularly useful for debugging and visual representation in command-line interfaces. -This example demonstrates how an item type is assigned to a class and how its string representation can be obtained, which in this case would be "ClassDef", indicating that the item is a class definition. +**Output Example**: An example output of the `print_self` method for a class type might look like this in the console: +`"\033[31m_class\033[0m"` +This indicates that the item is a class, with the text displayed in red. ### FunctionDef to_str(self) -**to_str**: The function of `to_str` is to convert an enumeration member of `DocItemType` into a string representation that describes the type of documentation item it represents. - -**Parameters**: This function does not accept any parameters as it is designed to be called on an instance of the `DocItemType` enumeration. +**to_str**: to_str的功能是将DocItemType的类型转换为字符串表示。 -**Code Description**: The `to_str` function is a method of the `DocItemType` enumeration, which is used to categorize different types of documentation items within the project, such as classes, functions, and sub-functions. When called, it checks the instance of `DocItemType` it is called on and returns a string that represents the type of documentation item. Specifically, it returns "ClassDef" if the instance represents a class, and "FunctionDef" for functions, class functions, and sub-functions. If the instance does not match any of the predefined types, it defaults to returning the name of the enumeration member. This method is crucial for generating human-readable representations of documentation item types, which enhances the readability and maintainability of the documentation. +**parameters**: 此函数没有参数。 -The function plays a significant role in the context of its callers within the project. For instance, in the `walk_file` method of the `MetaInfo` class, it is used to convert the documentation item type of a `DocItem` object into a string, which is then included in the JSON object representing the file hierarchy. This allows for a more understandable and accessible representation of the documentation structure when exporting or analyzing the project's documentation. +**Code Description**: to_str函数是DocItemType类中的一个方法,用于将当前对象的类型转换为相应的字符串表示。该函数通过比较当前对象(self)与DocItemType类中定义的不同类型(如_class、_function、_class_function和_sub_function)来确定返回的字符串。如果当前对象是_class,则返回"ClassDef";如果是_function、_class_function或_sub_function,则返回"FunctionDef"。如果当前对象与这些类型都不匹配,则返回当前对象的名称(self.name)。 -Similarly, in the `to_markdown` method within the `Runner` class, `to_str` is used to prepend the type of the documentation item (converted into a string) to its name, forming a markdown header that clearly indicates the type of the item being documented. This aids in creating structured and easily navigable markdown documentation. +在项目中,to_str函数被多个地方调用,主要用于获取DocItem的类型字符串,以便在生成JSON结构或Markdown文档时使用。在repo_agent/doc_meta_info.py中的walk_file函数中,to_str被用来为每个DocItem对象设置"type"字段,这样可以在生成的JSON中清晰地表示每个对象的类型。此外,在repo_agent/runner.py中的to_markdown函数中,to_str被用来在Markdown文档中显示对象的类型,确保文档的结构和内容清晰易懂。 -**Note**: It is important to ensure that the `DocItemType` enumeration is correctly maintained and updated to reflect all types of documentation items that can be encountered in the project. Failure to do so may result in incorrect or misleading representations of documentation items when using the `to_str` method. +**Note**: 使用此函数时,请确保DocItemType类中的类型已正确定义,以避免返回不准确的字符串表示。 -**Output Example**: If the `to_str` method is called on an instance of `DocItemType` that represents a class, the output will be "ClassDef". If called on an instance representing a function, class function, or sub-function, the output will be "FunctionDef". +**Output Example**: 假设当前对象的类型是_function,则to_str函数的返回值将是"FunctionDef"。 *** ### FunctionDef print_self(self) -**print_self**: The function of `print_self` is to return a string representation of the `DocItemType` instance it is called on, with the name of the item type colored according to its category. +**print_self**: print_self函数的功能是返回当前DocItemType对象的名称,并根据对象类型设置相应的颜色。 -**Parameters**: This function does not take any parameters apart from the implicit `self` parameter, which refers to the instance of the `DocItemType` class on which the function is called. +**parameters**: 此函数没有参数。 -**Code Description**: The `print_self` function begins by setting a default text color to white. It then checks the type of the `DocItemType` instance it is called on and changes the color based on the category of the item. There are four categories checked: directory (`_dir`), file (`_file`), class (`_class`), and function (`_function`). Each category is associated with a different color: green for directories, yellow for files, blue for classes, and red for functions. After determining the appropriate color, the function concatenates this color with the name of the item type and resets the style at the end to ensure that the color change only applies to this specific string. The resulting string, which includes the colored name of the item type, is then returned. +**Code Description**: print_self函数用于返回当前DocItemType对象的名称,并根据对象的类型(如目录、文件、类、函数等)设置不同的颜色。具体来说,当对象是DocItemType._dir时,返回绿色;当对象是DocItemType._file时,返回黄色;当对象是DocItemType._class时,返回红色;而当对象是DocItemType._function、DocItemType._sub_function或DocItemType._class_function时,返回蓝色。最终返回的字符串将包含颜色代码和对象名称,并在字符串末尾重置颜色样式。 -The function is utilized in the project by the `print_recursive` method of the `DocItem` class, specifically for printing a representation of repository objects in a structured and visually differentiated manner. When `print_recursive` prints the details of a `DocItem`, it calls `print_self` to obtain a colored string representation of the item's type, enhancing the readability of the output by visually distinguishing between different types of items (directories, files, classes, functions) in the repository structure. +该函数在print_recursive方法中被调用。print_recursive方法负责递归打印repo对象的结构。在打印每个对象时,它会调用item_type的print_self方法,以获取该对象的类型名称和颜色,从而在输出中提供更直观的信息。这种设计使得在打印复杂的repo结构时,能够清晰地识别每个对象的类型。 -**Note**: It is important to ensure that the color constants used (`Fore.WHITE`, `Fore.GREEN`, `Fore.YELLOW`, `Fore.BLUE`, `Fore.RED`) are defined and imported from a library that supports colored terminal text, such as `colorama`. Additionally, `Style.RESET_ALL` is used to reset the text style to default after changing its color, preventing unintended color changes to subsequent terminal outputs. +**Note**: 使用该函数时,请确保在合适的上下文中调用,以便正确显示对象的类型和颜色。 -**Output Example**: Assuming the `DocItemType` instance represents a class, the output of calling `print_self` on this instance could look something like this (assuming the terminal supports colored text and `Fore.BLUE` corresponds to blue text): -``` -[Blue Text]ClassName[Reset Style] -``` +**Output Example**: 假设当前对象是DocItemType._file,print_self函数的返回值可能是“\033[33m文件名\033[0m”,其中“\033[33m”表示黄色,文件名是该对象的名称。 *** -### FunctionDef get_edge_type(from_item_type, to_item_type) -**get_edge_type**: The function of get_edge_type is to determine the type of relationship (edge) between two items within a repository based on their item types. +### FunctionDef get_edge_type(self, from_item_type, to_item_type) +**get_edge_type**: get_edge_type的功能是确定从一个文档项类型到另一个文档项类型之间的边缘类型。 -**Parameters**: -- `from_item_type`: The type of the originating item in the relationship. -- `to_item_type`: The type of the target item in the relationship. +**parameters**: 该函数的参数。 +· parameter1: from_item_type - 表示边缘起始的文档项类型,类型为DocItemType。 +· parameter2: to_item_type - 表示边缘结束的文档项类型,类型为DocItemType。 -**Code Description**: The `get_edge_type` function plays a critical role in understanding and managing the relationships between different items in a repository. It accepts two parameters, both of which are instances of `DocItemType`. These parameters represent the types of the two items between which the relationship is being determined. The function then returns an `EdgeType`, which is an enumeration that categorizes the relationship into one of several predefined types. +**Code Description**: get_edge_type函数的目的是在两个文档项类型之间建立一种关系或连接。该函数接收两个参数,from_item_type和to_item_type,均为DocItemType类型。虽然当前函数体内没有实现具体的逻辑(使用了pass语句),但可以推测该函数的设计意图是为了在未来的实现中,根据这两个文档项类型的特征,返回一个表示它们之间关系的边缘类型。这种边缘类型可能用于图形化表示文档结构或在文档处理过程中进行逻辑推理。 -The relationship types, as defined by the `EdgeType` enumeration, include `reference_edge`, `subfile_edge`, and `file_item_edge`. Each of these types represents a specific kind of relationship that can exist between items in the repository: -- A `reference_edge` indicates a direct reference from one object to another, highlighting dependencies or linkages. -- A `subfile_edge` denotes a hierarchical relationship, where one file or folder is part of another folder, crucial for understanding the repository's structure. -- A `file_item_edge` signifies that an object is part of a file, important for tracking associations between repository objects and files. - -By determining the type of edge between items, `get_edge_type` facilitates various operations within the repository, such as documentation generation, navigation, and structure analysis. It leverages the `EdgeType` enumeration to accurately categorize and return the appropriate relationship type based on the item types provided. - -**Note**: When utilizing the `get_edge_type` function, it is essential to accurately understand the nature of the items' relationship to select the correct item types as inputs. This ensures the correct edge type is determined, maintaining the integrity of the repository's structure and the accuracy of its documentation and analysis. Incorrect inputs may lead to misclassification of relationships, affecting the repository's overall management and understanding. +**Note**: 使用该函数时,确保传入的参数from_item_type和to_item_type都是有效的DocItemType实例,以避免潜在的类型错误。同时,由于该函数尚未实现具体逻辑,调用该函数时需注意其返回值的处理。 *** ## ClassDef DocItemStatus -**DocItemStatus**: The function of DocItemStatus is to represent the various states a documentation item can be in within the documentation generation process. +**DocItemStatus**: DocItemStatus的功能是表示文档项的状态。 -**Attributes**: -- `doc_up_to_date`: Indicates that the documentation for the item is current and does not need to be updated. -- `doc_has_not_been_generated`: Indicates that the documentation for the item has not yet been generated and needs to be created. -- `code_changed`: Indicates that the source code for the item has been modified, necessitating an update to the documentation. -- `add_new_referencer`: Indicates that a new referencer has been added to the item, potentially affecting its documentation. -- `referencer_not_exist`: Indicates that a previous referencer of the item has been deleted or no longer references it, which may impact the documentation. +**attributes**: 该类的属性包括: +· doc_up_to_date: 表示文档已是最新,无需生成文档。 +· doc_has_not_been_generated: 表示文档尚未生成,需要生成文档。 +· code_changed: 表示源码已被修改,需要更新文档。 +· add_new_referencer: 表示添加了新的引用者。 +· referencer_not_exist: 表示曾经引用该对象的对象已被删除或不再引用。 -**Code Description**: -The `DocItemStatus` class is an enumeration that defines the possible states of documentation items within the project's documentation generation process. This class plays a crucial role in managing the documentation workflow by indicating whether a documentation item is up to date, needs to be generated or updated due to code changes, or has changes in its references. It is used throughout the project to make decisions about when and how documentation should be generated or updated. +**Code Description**: DocItemStatus是一个枚举类,用于定义文档项的不同状态。它提供了五种状态,分别用于指示文档的生成和更新情况。具体来说: +- doc_up_to_date状态表示文档已经是最新的,不需要进行任何生成操作。 +- doc_has_not_been_generated状态表示文档尚未生成,开发者需要进行文档的生成。 +- code_changed状态表示源代码已经发生了变化,因此需要对文档进行更新以反映这些变化。 +- add_new_referencer状态表示有新的引用者被添加到文档项中,可能影响文档的内容或结构。 +- referencer_not_exist状态表示曾经引用该文档项的对象已经被删除或不再引用该项,这可能导致文档的引用关系发生变化。 -For instance, in the `DocItem` class, the `item_status` attribute is initialized with `DocItemStatus.doc_has_not_been_generated`, indicating that when a `DocItem` instance is created, its documentation needs to be generated. This status can change as the documentation generation process progresses, based on whether the source code changes, new references are added, or existing references are removed. +在项目中,DocItemStatus被多个对象调用,主要用于判断文档项的状态并决定是否需要生成或更新文档。例如,在need_to_generate函数中,DocItemStatus用于检查文档项的状态,以决定是否跳过文档生成的过程。此外,在DocItem类中,item_status属性使用DocItemStatus来跟踪每个文档项的状态,这对于管理文档生成和更新的逻辑至关重要。 -The status is also used in the documentation generation logic, as seen in the `need_to_generate` function, where items with a status other than `doc_up_to_date` may be considered for documentation generation. Similarly, in the `generate_doc_for_a_single_item` method, the item's status is updated to `doc_up_to_date` once its documentation has been successfully generated, indicating that the documentation is current. +**Note**: 使用DocItemStatus时,请确保在处理文档生成和更新逻辑时,正确地检查和更新文档项的状态,以避免出现文档不一致或遗漏的情况。 +## FunctionDef need_to_generate(doc_item, ignore_list) +**need_to_generate**: The function of need_to_generate is to determine whether documentation should be generated for a given DocItem based on its status and type, while also considering an ignore list. -Furthermore, the `DocItemStatus` is utilized in the process of loading documentation from older project metadata and updating it according to the latest project structure and source code. This involves checking if the source code has changed or if there are changes in the references to the item, which would require updating the documentation status accordingly. +**parameters**: The parameters of this Function. +· doc_item: An instance of DocItem representing the documentation item to evaluate. +· ignore_list: A list of strings that specifies paths of items to ignore during the documentation generation process (default is an empty list). -**Note**: -When working with the `DocItemStatus` enumeration, it is important to understand the implications of each status on the documentation generation process. The status of an item directly influences whether its documentation needs to be generated or updated, which is critical for maintaining accurate and up-to-date documentation in the project. Developers should ensure that the status is correctly updated in response to changes in the source code or the item's references to manage the documentation workflow effectively. -## ClassDef DocItem -**DocItem**: The function of DocItem is to represent a documentation item within a software project's repository. This includes classes, functions, variables, and other elements that can be documented. - -**Attributes**: -- `item_type`: Specifies the type of the documentation item, such as a class, function, or variable. -- `item_status`: Indicates the current status of the documentation for the item, such as whether it has been generated or needs to be updated. -- `obj_name`: The name of the object or element being documented. -- `code_start_line` and `code_end_line`: Define the range of lines in the source code file that the documentation item covers. -- `md_content`: Stores the markdown content for different versions of the documentation. -- `content`: Stores the original information related to the documentation item. -- `children`: A dictionary of child documentation items, allowing for the representation of hierarchical structures such as classes containing methods. -- `father`: A reference to the parent documentation item, if any. -- `depth`: Indicates the depth of the item in the documentation hierarchy. -- `tree_path`: A list representing the entire path from the root to this item in the documentation hierarchy. -- `max_reference_ansce`: The maximum reference ancestor, used in determining documentation structure. -- `reference_who` and `who_reference_me`: Lists that track which items this item references and which items reference this item, respectively. -- `special_reference_type`: A list of booleans indicating special types of references. -- `reference_who_name_list` and `who_reference_me_name_list`: Lists storing the names of items that this item references and that reference this item, potentially from older versions. -- `visited`: A boolean flag used to mark whether the item has been visited, useful in traversing documentation structures. -- `multithread_task_id`: An identifier for tasks in a multithreading context. - -**Code Description**: -The `DocItem` class is a comprehensive representation of an item that can be documented within a software project. It is designed to capture not only the basic metadata such as the item's name and type but also its relationships within the project, such as parent-child relationships and reference relationships. This allows for a detailed and structured approach to generating and managing documentation. - -The class includes methods for determining ancestor relationships, traversing the documentation hierarchy, calculating depth, and parsing the tree path. It also provides functionality for finding specific items based on file paths and for printing the documentation structure recursively. The `get_file_name` and `get_full_name` methods are particularly useful for generating file paths and full names for documentation items, taking into account potential issues such as name duplication. - -**Note**: -When working with `DocItem`, it is important to accurately set the `item_type` and `item_status` attributes, as these directly affect how the documentation is generated and updated. The hierarchical structure represented by the `children` and `father` attributes enables the construction of a detailed documentation tree, which is crucial for understanding the organization of the project's codebase. Additionally, managing the reference relationships (`reference_who`, `who_reference_me`) is key to creating comprehensive documentation that accurately reflects the dependencies and interactions between different parts of the code. - -**Output Example**: -An example usage of `DocItem` could involve creating a documentation item for a Python function within a class, setting its attributes, and adding it to the documentation structure: -```python -doc_item = DocItem() -doc_item.obj_name = "example_function" -doc_item.item_type = DocItemType._class_function -doc_item.code_start_line = 10 -doc_item.code_end_line = 20 -# Assuming parent_item represents the class containing this function -parent_item.children[doc_item.obj_name] = doc_item -doc_item.father = parent_item -``` -This example demonstrates how to create a `DocItem` for a function, set its basic attributes, and integrate it into the documentation hierarchy under its parent class. -### FunctionDef has_ans_relation(now_a, now_b) -**has_ans_relation**: The function of `has_ans_relation` is to check if there is an ancestor relationship between two nodes and return the earlier node if such a relationship exists. +**Code Description**: The need_to_generate function evaluates whether documentation should be generated for a specific DocItem. It first checks the status of the doc_item. If the status is DocItemStatus.doc_up_to_date, it returns False, indicating that no documentation generation is necessary. -**Parameters**: -- `now_a (DocItem)`: The first node in the comparison. -- `now_b (DocItem)`: The second node in the comparison. +Next, the function retrieves the full name of the doc_item using the get_full_name method, which constructs a hierarchical path of the item. The function then checks the type of the doc_item against the DocItemType enumeration. If the item type is one of _file, _dir, or _repo, the function returns False, as documentation generation is not intended for these higher-level items. -**Code Description**: -The `has_ans_relation` function is designed to determine if one `DocItem` node is an ancestor of another within a documentation or code hierarchy. This is achieved by checking if one node exists in the `tree_path` of the other. The `tree_path` is presumably a property of `DocItem` instances that lists the ancestors or the path through the hierarchy to reach that node. +If the item type is appropriate for documentation generation, the function traverses up the hierarchy of the doc_item by accessing its father attribute. During this traversal, it checks if the current item is a file. If it is, the function evaluates whether the full path of the current item starts with any of the paths in the ignore_list. If it does, the function returns False, indicating that the item should be skipped. If the item is not in the ignore list, the function returns True, indicating that documentation generation is warranted. -- If `now_b` is found in the `tree_path` of `now_a`, it implies that `now_b` is an ancestor of `now_a`, and thus `now_b` is returned. -- Conversely, if `now_a` is found in the `tree_path` of `now_b`, this indicates that `now_a` is an ancestor of `now_b`, leading to the return of `now_a`. -- If neither node is an ancestor of the other, the function returns `None`. +The need_to_generate function is called by other functions within the project, such as check_has_task and print_recursive methods of the DocItem class, as well as the generate_doc_for_a_single_item method in the Runner class. These functions rely on need_to_generate to determine if a task should be marked for documentation generation or if it should be skipped based on the current state and hierarchy of the documentation items. -This function plays a crucial role in the context of parsing references within a project's documentation or codebase, as seen in its usage within the `walk_file` method of the `MetaInfo` class. Specifically, it is used to filter out references that occur between ancestor nodes, which are not considered in certain analyses or operations. This is important for avoiding circular references or simplifying the reference structure by ignoring internal references within a hierarchical path. +**Note**: When using the need_to_generate function, it is crucial to ensure that the doc_item has been properly initialized and that the ignore_list accurately reflects the paths of items that should be excluded from documentation generation. -**Note**: -- It is essential to ensure that the `tree_path` property of `DocItem` instances accurately reflects the hierarchy of nodes to guarantee the correct functioning of this function. -- This function assumes that both input parameters, `now_a` and `now_b`, are instances of `DocItem` and that they have the `tree_path` attribute available and correctly populated. +**Output Example**: A possible return value of the function could be True or False, depending on the evaluation of the doc_item's status, type, and its presence in the ignore_list. For instance, if the doc_item is a function that has not been generated yet and is not in the ignore list, the function would return True, indicating that documentation should be generated. +## ClassDef DocItem +**DocItem**: The function of DocItem is to represent individual documentation items within a project, encapsulating their metadata and relationships. + +**attributes**: The attributes of this Class. +· item_type: Specifies the type of the documentation item, defined by the DocItemType enumeration. +· item_status: Indicates the current status of the documentation item, defined by the DocItemStatus enumeration. +· obj_name: A string representing the name of the object. +· code_start_line: An integer indicating the starting line number of the code associated with the item. +· code_end_line: An integer indicating the ending line number of the code associated with the item. +· md_content: A list that stores different versions of the documentation content. +· content: A dictionary that holds the original information related to the documentation item. +· children: A dictionary mapping child object names to their corresponding DocItem instances, representing the hierarchical structure. +· father: A reference to the parent DocItem, establishing a parent-child relationship in the hierarchy. +· depth: An integer representing the depth of the item in the documentation tree. +· tree_path: A list that maintains the path from the root to the current item in the documentation tree. +· max_reference_ansce: A reference to the maximum ancestor DocItem, if applicable. +· reference_who: A list of DocItem instances that reference the current item. +· who_reference_me: A list of DocItem instances that the current item references. +· special_reference_type: A list of boolean values indicating special reference types for the current item. +· reference_who_name_list: A list of strings representing the names of items that reference the current item, potentially from an older version. +· who_reference_me_name_list: A list of strings representing the names of items that the current item references, potentially from an older version. +· has_task: A boolean indicating whether the item has an associated task for documentation generation. +· multithread_task_id: An integer representing the task ID in a multithreaded context. + +**Code Description**: The DocItem class serves as a fundamental building block for managing documentation items within a project. Each instance of DocItem encapsulates essential metadata about a specific code element, including its type, status, name, and the range of code it covers. The hierarchical structure of documentation items is maintained through parent-child relationships, allowing for a tree-like organization of documentation. + +The class provides several methods to facilitate various operations: +- `has_ans_relation(now_a: DocItem, now_b: DocItem)`: A static method that checks if there is an ancestor relationship between two DocItem instances, returning the earlier node if such a relationship exists. +- `get_travel_list()`: This method performs a pre-order traversal of the documentation tree, returning a list of DocItem instances in the order they are visited. +- `check_depth()`: This method recursively calculates and updates the depth of the current item based on its children, ensuring that the depth attribute accurately reflects the item's position in the tree. +- `parse_tree_path(now_path)`: This method recursively constructs the path from the root to the current item, updating the tree_path attribute. +- `get_file_name()`: Returns the file name associated with the current DocItem, derived from its full name. +- `get_full_name(strict=False)`: Constructs and returns the full hierarchical name of the current item, optionally including information about duplicate names. +- `find(recursive_file_path: list)`: Searches for a corresponding DocItem based on a list of file paths, returning the item if found or None otherwise. +- `check_has_task(now_item: DocItem, ignore_list: List[str] = [])`: A static method that checks if a DocItem requires documentation generation, updating the has_task attribute accordingly. +- `print_recursive(...)`: A method that recursively prints the details of the DocItem and its children, providing a visual representation of the documentation structure. + +The DocItem class is utilized throughout the project, particularly in the context of the MetaInfo class, which manages the overall structure of documentation items. The relationships established by DocItem instances are crucial for understanding how different code elements reference each other, which is essential for generating accurate and comprehensive documentation. + +**Note**: When using the DocItem class, it is important to maintain the integrity of the hierarchical relationships and ensure that the item statuses are updated appropriately to reflect changes in the codebase. This will facilitate accurate documentation generation and management. + +**Output Example**: An example output of the `get_full_name()` method for a class type might look like this: +`"repo_agent/doc_meta_info.py/DocItem"` +This indicates the full path of the DocItem within the project structure. +### FunctionDef has_ans_relation(now_a, now_b) +**has_ans_relation**: has_ans_relation的功能是检查两个节点之间是否存在祖先关系,并在存在时返回较早的节点。 + +**parameters**: 此函数的参数如下: +· parameter1: now_a (DocItem): 第一个节点。 +· parameter2: now_b (DocItem): 第二个节点。 + +**Code Description**: has_ans_relation函数用于判断两个DocItem节点之间是否存在祖先关系。具体来说,它会检查now_b是否在now_a的tree_path中,如果是,则返回now_b,表示now_b是now_a的祖先节点。反之,如果now_a在now_b的tree_path中,则返回now_a,表示now_a是now_b的祖先节点。如果两者之间没有祖先关系,则返回None。 -**Output Example**: -- If `now_a` is an ancestor of `now_b`, `now_a` is returned. -- If `now_b` is an ancestor of `now_a`, `now_b` is returned. -- If neither is an ancestor of the other, `None` is returned. +该函数在项目中的调用场景主要出现在walk_file函数中。在walk_file函数中,遍历当前对象的引用时,会调用has_ans_relation来判断当前对象now_obj与引用者referencer_node之间的关系。如果这两个节点之间存在祖先关系,则不再考虑它们之间的引用关系,避免了在同一层级的节点之间的循环引用问题。这种设计确保了引用关系的清晰性和准确性,避免了不必要的复杂性。 -For instance, if `now_a.tree_path` contains `['root', 'child1', 'child2']` and `now_b.tree_path` is `['root', 'child1']`, calling `has_ans_relation(now_a, now_b)` would return `now_b`, indicating that `now_b` is an ancestor of `now_a`. +**Note**: 使用此函数时,确保传入的参数都是有效的DocItem对象,以避免运行时错误。 + +**Output Example**: 假设now_a和now_b分别为两个节点,且now_b是now_a的祖先节点,则函数返回now_b。如果两者没有祖先关系,则返回None。 *** ### FunctionDef get_travel_list(self) -**get_travel_list**: The function of `get_travel_list` is to return a list of nodes in a pre-order traversal sequence, with the root node at the beginning. +**get_travel_list**: get_travel_list的功能是返回当前节点及其所有子节点的先序遍历列表。 -**Parameters**: This function does not take any external parameters except for the implicit `self` parameter, which refers to the instance of the class from which it is called. +**parameters**: 该函数没有参数。 -**Code Description**: The `get_travel_list` function is designed to traverse a tree-like structure in a pre-order manner. It starts with the node on which it is called (referred to as `self`) and then recursively traverses through all its children, aggregating the results into a single list. The traversal is done by iterating over the `children` attribute of the node, which is expected to be a dictionary where keys are identifiers of the children and values are the child nodes themselves. For each child, the function calls itself (`get_travel_list`) to get the list of nodes in the subtree rooted at that child. These lists are then concatenated with the current node to build the complete traversal list. The function finally returns this list, which contains the nodes in the order they were visited. +**Code Description**: get_travel_list函数实现了树形结构的先序遍历,返回一个包含当前节点及其所有子节点的列表。该函数首先将当前节点(self)放入一个列表now_list中,然后遍历当前节点的所有子节点。对于每一个子节点,递归调用get_travel_list函数,将返回的子节点列表添加到now_list中。最终,函数返回now_list,包含了从根节点到所有子节点的顺序。 -In the context of its usage within the project, specifically in the `get_task_manager` method of the `MetaInfo` class, the `get_travel_list` function plays a crucial role in gathering all the nodes (or `DocItem` instances) that are part of a documentation tree. This list of nodes is then filtered and sorted based on certain criteria (like being part of a whitelist or satisfying a task availability condition) to prepare a task list for processing. The pre-order traversal ensures that parent nodes are processed before their children, which is essential for resolving dependencies in tasks that are associated with these nodes. +在项目中,get_travel_list函数被get_task_manager函数调用。get_task_manager函数的目的是根据拓扑引用关系获取任务管理器。它首先调用now_node.get_travel_list()来获取当前节点及其所有子节点的列表(即doc_items)。接下来,get_task_manager函数会对这些节点进行过滤和排序,以构建一个有效的任务管理器。通过这种方式,get_travel_list函数为任务管理器的构建提供了必要的节点信息。 -**Note**: It is important for users of this function to ensure that the tree structure does not contain cycles, as the recursive nature of `get_travel_list` does not handle cycles and could lead to infinite recursion. Additionally, the function assumes that each node's children are stored in a dictionary attribute named `children`, which should be considered when designing classes that will utilize this function. +**Note**: 使用该函数时,请确保当前节点具有子节点,否则返回的列表将仅包含当前节点。 -**Output Example**: -Assuming a simple tree structure where a root node has two children and one of those children has a child of its own, the output of calling `get_travel_list` on the root node might look like this: -``` -[root_node, child1, child1_child, child2] -``` -This list represents the nodes in the order they were visited during the pre-order traversal, starting with the root node, followed by its first child and that child's child, and finally the root node's second child. +**Output Example**: 假设当前节点为A,A有两个子节点B和C,B又有一个子节点D,则get_travel_list的返回值可能为:[A, B, D, C]。 *** ### FunctionDef check_depth(self) -**check_depth**: The function of `check_depth` is to recursively calculate the depth of a node within a tree structure. +**check_depth**: check_depth函数用于递归计算树中节点的深度。 -**Parameters**: This function does not take any external parameters as it operates on the instance of the object it belongs to. +**parameters**: 此函数没有参数。 -**Code Description**: The `check_depth` function is a method of the `DocItem` class, designed to determine the depth of a node in a hierarchical tree structure, such as a file system or a nested documentation structure. The depth is calculated based on the node's position in the tree, with the root node having a depth of 0. The function first checks if the current node (`self`) has any children. If not, it sets the node's depth to 0, indicating it is a leaf node. If the node does have children, the function iterates through each child, recursively calling `check_depth` on them to determine their depth. The maximum depth among all children is found, and the current node's depth is set to this maximum value plus one, to account for the current node's position above its children. This method ensures that each node in the tree accurately reflects its depth relative to the root node. +**Code Description**: check_depth函数通过递归的方式计算当前节点在树中的深度。首先,它检查当前节点是否有子节点。如果没有子节点,深度被设置为0并返回。若存在子节点,函数会遍历所有子节点,递归调用check_depth以获取每个子节点的深度,并记录最大子节点深度。最后,当前节点的深度为最大子节点深度加1,并返回该值。 -In the context of the project, `check_depth` is invoked on the root node of the hierarchical tree representing the project's structure after the tree has been fully constructed from the project hierarchy JSON. This is crucial for understanding the structure and depth of the project's documentation or file system, as it allows for operations that depend on the depth of specific nodes, such as rendering visual representations or performing depth-specific analyses. +该函数在项目中由MetaInfo类的from_project_hierarchy_json方法调用。该方法负责解析项目的层次结构,并构建DocItem对象的树形结构。在构建完成后,调用check_depth函数以计算整个树的深度。这一过程确保了每个DocItem节点的深度信息被正确计算并存储,便于后续的树形结构操作和分析。 -**Note**: It is important to ensure that the tree structure is fully constructed and that each node correctly references its children before calling `check_depth`. Failing to do so may result in incorrect depth calculations. Additionally, since this function modifies the `depth` attribute of the nodes, it should be used with caution if the tree structure is subject to concurrent modifications. +**Note**: 使用该函数时,请确保在调用之前已经构建了完整的树结构,以保证深度计算的准确性。 -**Output Example**: For a simple tree with a root node and two levels of child nodes, calling `check_depth` on the root node would set its depth to 2, the depth of the first level of children to 1, and the depth of the leaf nodes to 0. +**Output Example**: 假设一个节点的最大子节点深度为2,则该节点的check_depth函数返回值将为3。 *** ### FunctionDef parse_tree_path(self, now_path) -**parse_tree_path**: The function of `parse_tree_path` is to recursively parse the tree path by appending the current node to the given path. - -**Parameters**: -- `now_path` (list): The current path in the tree, represented as a list. +**parse_tree_path**: parse_tree_path的功能是递归解析树路径,通过将当前节点附加到给定路径中。 -**Code Description**: -The `parse_tree_path` function is a method of the `DocItem` class, designed to construct the path for each node within a hierarchical tree structure. This method plays a crucial role in organizing and maintaining the relationship between nodes in the tree, specifically within the context of documenting and managing project hierarchies. +**parameters**: 该函数的参数如下: +· now_path: 当前树中的路径,类型为列表。 -Upon invocation, `parse_tree_path` updates the `tree_path` attribute of the current `DocItem` instance by appending itself (`self`) to the `now_path` list. This operation effectively records the path taken to reach the current node from the root of the tree. Following this, the function iterates over the `children` dictionary of the current node. For each child node, represented as a key-value pair where the key is an identifier and the value is a `DocItem` instance, the function recursively calls `parse_tree_path` on the child node, passing the updated `tree_path` as the argument. This recursive approach ensures that the path for each node in the tree is accurately constructed, reflecting the hierarchical structure of the project. +**Code Description**: parse_tree_path函数用于递归地解析树结构中的路径。它接受一个列表now_path作为参数,该列表表示当前的路径。在函数内部,首先将当前节点(即调用该函数的对象)添加到now_path中,形成新的路径tree_path。接着,函数遍历当前节点的所有子节点,并对每个子节点递归调用parse_tree_path函数,将更新后的tree_path传递给它。 -In the broader context of the project, `parse_tree_path` is called on the root node of the project's hierarchical tree structure after the tree has been fully constructed. This is evident from its usage in the `from_project_hierarchy_json` method of the `MetaInfo` class, where it is used to finalize the construction of the project hierarchy by establishing the paths for all nodes. This step is crucial for enabling efficient navigation and management of the project's structure, as it allows for the direct association of each node with its location within the hierarchy. +该函数在项目中的调用发生在MetaInfo类的from_project_hierarchy_json方法中。在该方法中,首先创建了一个DocItem对象作为树的根节点。然后,通过解析项目的层次结构JSON,构建树的子节点关系。最后,调用parse_tree_path方法来解析整个树的路径,确保每个节点都能正确地记录其在树中的位置。 -**Note**: -- It is essential to ensure that the tree structure is fully constructed and all parent-child relationships are correctly established before calling `parse_tree_path`. Calling this function prematurely may result in incomplete or incorrect path construction. -- The method modifies the `tree_path` attribute of `DocItem` instances in place, which means that the original `now_path` list provided to the function will not be altered. This design choice helps prevent unintended side effects on the input data. +**Note**: 使用该函数时,需要确保传入的now_path参数是一个有效的列表,并且在调用该函数之前,树的结构已经正确构建。此函数不返回任何值,而是直接修改调用对象的tree_path属性。 *** ### FunctionDef get_file_name(self) -**get_file_name**: The function of `get_file_name` is to retrieve the file name of the current object with a ".py" extension. +**get_file_name**: get_file_name的功能是返回当前对象的文件名,去掉.py后缀。 -**Parameters**: This function does not accept any parameters. +**parameters**: 此函数没有参数。 -**Code Description**: The `get_file_name` function is designed to extract the file name associated with the current object. It begins by calling the `get_full_name` method to obtain the full hierarchical path of the object within the project structure. This path includes the object's name and its ancestors, separated by slashes. The `get_full_name` method can operate in a strict mode, which checks for name duplication issues, but `get_file_name` uses it in its default mode. +**Code Description**: +get_file_name函数用于获取当前对象的文件名。它首先调用get_full_name方法以获取当前对象的完整名称,然后通过字符串操作去掉文件名中的.py后缀,并在末尾添加.py后缀,最终返回处理后的文件名。具体实现中,full_name变量存储了完整的对象名称,使用split方法将其按“.py”分割,取第一个部分并加上“.py”后缀。 -Once the full name is obtained, `get_file_name` processes this string to ensure the file name ends with a ".py" extension. It does this by splitting the full name string at the ".py" substring, effectively removing any additional path or file information that might follow the ".py" part of the string. After splitting, it appends ".py" back to the first part of the split result, ensuring the returned file name correctly ends with the ".py" extension. This approach guarantees that the function returns a valid Python file name, even if the original full name contained additional extensions or path-like structures after the ".py" part. +该函数在项目中被多个其他函数调用。例如,在MetaInfo类的parse_reference方法中,get_file_name被用来获取文件节点的文件名,以便进行引用关系的解析。在in_white_list内部函数中,get_file_name用于检查当前对象是否在白名单中。通过这些调用,可以看出get_file_name在文件处理和引用关系解析中起着重要的作用。 -**Note**: It is important to note that the function assumes the presence of a ".py" extension in the full name obtained from `get_full_name`. If the full name does not contain ".py", the function's behavior might not align with expectations. Additionally, the function's accuracy and effectiveness are directly tied to the correct implementation and behavior of the `get_full_name` method. Any changes or issues in `get_full_name` could impact `get_file_name`. +**Note**: 使用此函数时,请确保当前对象已经正确初始化,并且get_full_name方法能够返回有效的完整名称。 -**Output Example**: If the full hierarchical path of the object is "repo_agent/doc_meta_info.py/DocItem", the output of `get_file_name` would be "doc_meta_info.py". This output demonstrates how the function extracts and ensures the file name ends with a ".py" extension, making it suitable for documentation and reference purposes within the project. +**Output Example**: 假设当前对象的完整名称为"repo_agent/example.py",则get_file_name的返回值将为"repo_agent/example.py"。 *** ### FunctionDef get_full_name(self, strict) -**get_full_name**: The function of `get_full_name` is to generate a string representation of the hierarchical path from the current object to its highest ancestor in the project structure, with each level separated by a slash. +**get_full_name**: The function of get_full_name is to retrieve the names of the object and its ancestors in a hierarchical structure, separated by slashes. -**Parameters**: -- `strict` (bool, optional): A flag to determine if the function should operate in strict mode. In strict mode, if an object's name differs from the key used by its parent to reference it, the function appends "(name_duplicate_version)" to the name. Defaults to False. - -**Code Description**: -The `get_full_name` function constructs a hierarchical path for an object within a project's structure, starting from the object itself and traversing up to its root (highest ancestor). It does this by iteratively accessing each object's parent (`father`) and compiling the names (`obj_name`) of each object encountered into a list. This list is then reversed to ensure the path is constructed from the top down (root to the current object) and joined into a single string with slashes (`/`) as separators. - -If the `strict` parameter is set to True, the function performs an additional check for each object to see if its name matches the key its parent uses to reference it. If there is a discrepancy, indicating a name duplication issue, the function appends "(name_duplicate_version)" to the object's name to highlight this fact. +**parameters**: The parameters of this Function. +· strict: A boolean that determines whether to enforce strict naming conventions when retrieving names. -This function is essential for generating documentation, as it provides a clear and navigable path to each object within the project's hierarchy, facilitating better understanding and navigation of the project structure. +**Code Description**: The get_full_name function is designed to traverse the hierarchy of an object, starting from the current object and moving upwards to its ancestors, collecting their names along the way. If the strict parameter is set to True, the function checks for name duplicates among the siblings of the current object and appends "(name_duplicate_version)" to the name if a duplicate is found. The function initializes an empty list, name_list, to store the names. It then enters a loop that continues until there are no more ancestors (i.e., when the father attribute is None). In each iteration, it retrieves the name of the current object and checks for duplicates if strict mode is enabled. The name is then added to the front of the name_list. After traversing all ancestors, the function removes the first element of name_list (which corresponds to the current object) and joins the remaining names with slashes to form a single string, which is returned as the output. -**Note**: -- It is important to ensure that the object hierarchy is correctly maintained within the project to guarantee accurate path generation. -- The function assumes that each object has a `father` attribute pointing to its parent in the hierarchy and an `obj_name` attribute containing its name. If these assumptions do not hold, the function may not work as expected. +This function is called within the build_prompt method of the ChatEngine class. In this context, it is used to obtain the full path of the DocItem object, which is essential for generating documentation and understanding the context of the code being processed. The full name is then utilized to provide a clear reference to the location of the object within the project structure. -**Output Example**: -For an object named "ChildFunction" within a class "ParentClass" in a module "module_name", and assuming strict mode is not enabled, the output of `get_full_name` would be: -``` -module_name/ParentClass/ChildFunction -``` -If strict mode is enabled and the object's name as referenced by its parent differs, the output might look like this: -``` -module_name/ParentClass/ChildFunction(name_duplicate_version) -``` +**Note**: When using this function, ensure that the object has been properly initialized and that the hierarchy is correctly established so that the function can accurately retrieve the names of the ancestors. -**Relationship with Callers**: -The `get_full_name` function is utilized in various parts of the project to generate a full hierarchical path for objects, which is crucial for documentation generation, reference tracking, and understanding the project's structure. For instance, it is used in the `generate_doc` method of the `ChatEngine` class to construct the file path needed for documentation output. It also plays a role in the `get_file_name` method to derive the file name from the full path, and in the `parse_reference` method within `MetaInfo` to establish and navigate the relationships between objects based on their hierarchical paths. This widespread use underscores the function's importance in facilitating project navigation and documentation coherence. +**Output Example**: If the current object has the name "example" and its ancestors are "folder1" and "folder2", the output of get_full_name would be "folder2/folder1/example". *** ### FunctionDef find(self, recursive_file_path) -**find**: The function of find is to locate a specific file within the repository based on a given list of file paths. +**find**: The function of find is to locate a specific file within the repository based on a list of file paths, returning the corresponding DocItem if found, or None if not. **parameters**: The parameters of this Function. -- recursive_file_path (list): A list representing the hierarchical file path to search for within the repository. +· recursive_file_path: A list of file paths to search for within the repository. -**Code Description**: This function begins by asserting that the object it is called upon is of the type representing the root of the repository (`_repo`). It initializes a position counter (`pos`) and sets the current working object (`now`) to itself. The function then iterates over the elements in the `recursive_file_path` list. For each element, it checks if the current element exists as a key in the `now` object's children dictionary. If the element is not found, the function returns `None`, indicating that the specified path does not exist within the repository structure. If the element is found, the function updates the `now` object to be the child corresponding to the current path element and increments the position counter. This process continues until all elements in the path list have been processed. If the entire path is successfully traversed, the function returns the `DocItem` object corresponding to the final path element, indicating the target file has been found. +**Code Description**: The find function is designed to traverse the hierarchical structure of documentation items within a repository, starting from the root node. It takes a list of file paths (recursive_file_path) as input and attempts to locate the corresponding file in the repository's structure. -**Note**: It is crucial to ensure that the `recursive_file_path` accurately represents the hierarchical structure of the repository from the root to the target file. Incorrect or incomplete paths will result in the function returning `None`. Additionally, this function assumes that it is called on an object representing the repository's root (`_repo`), and misuse in a different context may lead to unexpected behavior. +The function begins by asserting that the current item's type is a repository (DocItemType._repo). It initializes a position counter (pos) and a reference to the current item (now), which starts at the root. The function then enters a while loop that continues as long as the position counter is less than the length of the recursive_file_path list. -**Output Example**: Assuming a repository structure where a file named `example.py` exists within a directory `dir1` which is in the root of the repository, calling `find(['dir1', 'example.py'])` on the repository root object would return the `DocItem` object representing `example.py`. If the file or path does not exist, the function would return `None`. -*** -### FunctionDef print_recursive(self, indent, print_content) -**print_recursive**: The function of `print_recursive` is to recursively print the structure of a repository object, including its type, name, and children, with optional content printing. +Within the loop, it checks if the current path segment (recursive_file_path[pos]) exists as a key in the children of the current item (now). If the path segment is not found, the function returns None, indicating that the file does not exist in the specified path. If the path segment is found, it updates the current item reference to the corresponding child and increments the position counter. -**Parameters**: -- `indent`: An integer representing the level of indentation for the current object in the printout. It defaults to 0, indicating the top level with no indentation. -- `print_content`: A boolean flag that, when set to True, enables the printing of additional content for each object. It defaults to False, indicating that only the structure is printed. +Once all segments of the path have been successfully traversed, the function returns the current item (now), which represents the found file as a DocItem. This function is crucial for navigating the repository's structure and is utilized in other parts of the code, such as the walk_file function within the MetaInfo class. The walk_file function calls find to locate files based on their paths while processing references within those files. -**Code Description**: -The `print_recursive` function is designed to visually represent the hierarchical structure of repository objects in a clear and structured manner. It starts by defining a nested function, `print_indent`, which generates a string of spaces for indentation based on the current level (`indent`) of the object. This helps in visually distinguishing between different levels of the hierarchy. +**Note**: It is important to ensure that the recursive_file_path provided is accurate and corresponds to the structure of the repository; otherwise, the function will return None. -The function then proceeds to print the current object's type and name. The type is printed using a colored string representation obtained from the `print_self` method of the `DocItemType` class. This method returns the object's type (e.g., directory, file, class, function) in a specific color that enhances readability and visual differentiation in the output. The name of the object is printed alongside its type. +**Output Example**: If the function successfully finds a file located at "src/utils/helper.py", it might return a DocItem object representing that file, while if the file does not exist, it will return None. +*** +### FunctionDef check_has_task(now_item, ignore_list) +**check_has_task**: The function of check_has_task is to determine whether a given DocItem or any of its children has a task that requires documentation generation. -If the current object has children (indicated by the presence of items in the `self.children` dictionary), the function prints the number of children. This provides a quick overview of the complexity or size of the current object in terms of its sub-objects. +**parameters**: The parameters of this Function. +· now_item: An instance of DocItem representing the current documentation item being evaluated for tasks. +· ignore_list: A list of strings that specifies paths of items to ignore during the documentation generation process (default is an empty list). -The function then recursively calls itself for each child object, increasing the `indent` parameter by 1 for each level of depth. This recursive approach ensures that the entire structure of the repository object, down to the lowest level, is printed in a structured and indented format. +**Code Description**: The check_has_task function operates on a DocItem instance, referred to as now_item, and assesses whether it or any of its child items necessitate documentation generation. The function begins by invoking the need_to_generate function, passing the now_item and the ignore_list as arguments. This call determines if the current item should be marked as having a task based on its status and type. -If the `print_content` parameter is set to True, additional content for each object can be printed, although the provided code snippet does not include the implementation for printing content. This parameter allows for flexible control over the level of detail included in the output. +If need_to_generate returns True, the has_task attribute of now_item is set to True, indicating that documentation generation is warranted for this item. The function then iterates over the children of now_item, recursively calling check_has_task on each child. This recursive evaluation ensures that if any child item is marked with a task, the parent item (now_item) will also be marked as having a task. The has_task attribute of now_item is updated to reflect the status of its children, using a logical OR operation to combine the results. -**Note**: -- The visual differentiation of object types in the output relies on the terminal's support for colored text. Ensure that the necessary libraries for colored text output (e.g., `colorama`) are correctly installed and imported. -- The `print_recursive` function is particularly useful for debugging or documentation purposes, where understanding the hierarchical structure of repository objects is necessary. +The check_has_task function is called within the diff function, which is responsible for checking changes in documentation and determining which documents need to be updated or generated. In this context, check_has_task is used to evaluate the hierarchical tree of documentation items represented by new_meta_info.target_repo_hierarchical_tree. The ignore_list is passed from the project settings to ensure that specific paths are excluded from the evaluation. -**Output Example**: -Assuming a repository structure with a directory named "Project" containing two files, "README.md" and "main.py", the output might look like this: -``` -[Green Text]Directory: Project, 2 children - |- [Yellow Text]File: README.md - |- [Yellow Text]File: main.py -``` -This example assumes the terminal supports colored text, with green representing directories and yellow representing files. The indentation and the "|-" symbol visually indicate the hierarchical relationship between the directory and its files. -#### FunctionDef print_indent(indent) -**print_indent**: The function of `print_indent` is to generate a string that represents indentation and a leading marker for hierarchical display. +**Note**: When using the check_has_task function, it is important to ensure that the now_item has been properly initialized and that the ignore_list accurately reflects the paths of items that should be excluded from documentation generation. This function is crucial for maintaining an accurate representation of which documentation items require updates based on their current state and hierarchy. +*** +### FunctionDef print_recursive(self, indent, print_content, diff_status, ignore_list) +### `print_recursive` Function Documentation -**Parameters**: -- `indent` (optional): An integer representing the level of indentation. Defaults to 0. +#### Function Overview: +The `print_recursive` function is responsible for recursively printing the structure of a repository object, including its type, name, and status. It prints the current item and iterates over its child items, formatting their output with appropriate indentation. The function also provides an option to print additional content and handle status differences between items. -**Code Description**: -The `print_indent` function is designed to assist in visually formatting hierarchical structures by providing an indentation mechanism. It takes a single optional parameter, `indent`, which specifies the depth of indentation. The function works as follows: -- If the `indent` parameter is 0, which means no indentation is required, the function returns an empty string. This case is typically used for the root level in a hierarchical display where no indentation is needed. -- For any value of `indent` greater than 0, the function returns a string composed of two spaces (`" "`) repeated `indent` times, followed by a vertical bar and a hyphen (`"|-"`). This pattern visually represents the level of indentation and marks the beginning of a new hierarchical level or a child item. +#### Parameters: +- **`indent`** (`int`, default=0): + The number of spaces to indent when printing the item. This is used to visually represent the hierarchical structure of the repository. Higher values indicate deeper levels in the hierarchy. -**Note**: -- The function assumes that an indentation level is visually represented by two spaces. This is a design choice and could be adjusted if a different spacing is desired for the indentation. -- The addition of the `"|-"` at the end of the indentation spaces serves as a visual cue to indicate a new level or item in the hierarchy. It is important to maintain consistency in its use across the application to ensure a uniform hierarchical representation. +- **`print_content`** (`bool`, default=False): + A flag that determines whether additional content of the item should be printed. This parameter is not currently used within the function, but it is included for potential future use or extensions. -**Output Example**: -For an `indent` value of 3, the output of `print_indent` would be: -``` - |- -``` -This output demonstrates how the function visually represents three levels of indentation followed by the marker for a new hierarchical item. -*** -*** -## FunctionDef find_all_referencer(repo_path, variable_name, file_path, line_number, column_number, in_file_only) -**find_all_referencer**: The function of `find_all_referencer` is to find all references of a given variable within a specified file or project scope and return their locations. +- **`diff_status`** (`bool`, default=False): + A flag indicating whether the function should consider differences in status between items. If `True`, only items with status differences will be printed, and this will be based on whether the item requires generation (using `need_to_generate`). -**Parameters**: -- `repo_path`: The path to the repository where the search is conducted. -- `variable_name`: The name of the variable for which references are being searched. -- `file_path`: The path to the file within the repository in which the variable is located. -- `line_number`: The line number where the variable is defined. -- `column_number`: The column number where the variable is defined. -- `in_file_only`: A boolean flag indicating whether to search for references only within the same file or throughout the entire project. +- **`ignore_list`** (`List[str]`, default=[]): + A list of item names to be ignored during the recursive print operation. This allows selective exclusion of items from being printed based on their names. -**Code Description**: -The `find_all_referencer` function utilizes the `jedi` library to analyze Python code and find references to a specified variable. It constructs a `jedi.Script` object by combining the `repo_path` and `file_path` to locate the file in question. Depending on the `in_file_only` flag, it either searches for references within the same file (`scope="file"`) or across the entire project. It then filters the found references to match the `variable_name` and excludes the reference if it points to the variable's definition itself. The function returns a list of tuples, each containing the relative path to the module (from `repo_path`), and the line and column numbers where a reference was found. +#### Function Description: +The `print_recursive` function starts by printing the name and type of the current item (typically a `DocItem`). It formats the output with indentation based on the `indent` parameter and applies a specific color to the item type to enhance visual clarity. If `diff_status` is enabled and the item requires documentation generation, the item’s status is also included in the printed output. -This function is called within the context of parsing and analyzing documentation and code references in a project. Specifically, it is used in the `walk_file` method of a class responsible for parsing references within files. This method iterates through objects in a file, checking if they are in a whitelist (to optimize search speed by limiting the scope to the same file) and then uses `find_all_referencer` to find references to these objects. The references found are then processed to handle cases like unstaged or untracked files and to update the documentation and reference structure of the project accordingly. +- The function first checks whether the item type is `_repo`, in which case it uses a target repository name defined in the settings. +- It then prints the item’s type and name, applying a color-coding scheme via the `print_self` method of the `DocItemType` class. +- For each child item in the current item’s `children` dictionary, the function calls itself recursively, increasing the `indent` level and passing the current parameters to handle deeper levels of the repository structure. -**Note**: -- It is important to ensure that the `repo_path` and `file_path` are correctly specified to avoid path-related errors. -- The function gracefully handles exceptions by logging them along with the parameters that led to the error, but it returns an empty list in such cases, which callers should be prepared to handle. -- The `jedi` library's ability to find references is dependent on the correctness and completeness of the code being analyzed. +#### Behavior: +- **Indentation**: Indentation increases with each recursive call, visually nesting child items under their parents. +- **Item Types**: The function uses `DocItemType.print_self()` to print a color-coded representation of the item type. The possible item types are `_repo`, `_dir`, `_file`, `_class`, `_function`, `_sub_function`, and `_global_var`. +- **Status Handling**: If `diff_status` is enabled, only items that have a status difference (i.e., those for which documentation needs to be generated) are printed. The `need_to_generate` function is used to check if the item meets this condition. +- **Child Filtering**: If a child item has `has_task` set to `False`, it will be skipped when `diff_status` is enabled. -**Output Example**: -Assuming a variable named `my_var` is defined in `my_project/my_module.py` at line 10, column 5, and is referenced in two other places within the same file and once in a different file within the project, calling `find_all_referencer` with `in_file_only=False` might return: +#### Example Usage: ```python -[ - ("my_module.py", 20, 5), - ("my_module.py", 25, 10), - ("other_module.py", 15, 3) -] +repo_item.print_recursive(indent=2, diff_status=True, ignore_list=["file1", "file2"]) ``` -This output indicates that `my_var` is referenced at line 20, column 5, and line 25, column 10 within `my_module.py`, and at line 15, column 3 in `other_module.py`, relative to the `repo_path`. -## ClassDef MetaInfo -**MetaInfo**: The function of MetaInfo is to manage and store metadata related to the documentation process of a software project repository. +This call will recursively print the structure of the repository starting with an indentation level of 2. It will only print items with a status difference and will ignore items named "file1" and "file2". -**Attributes**: -- `repo_path`: The path to the repository for which documentation is being generated. -- `document_version`: A string representing the version of the documentation, typically a commit hash. An empty string indicates that the documentation is not yet completed. -- `target_repo_hierarchical_tree`: Represents the hierarchical structure of the repository's documentation items. -- `white_list`: A list specifying which files or objects should be included or excluded from the documentation process. -- `fake_file_reflection`: A dictionary mapping between original file paths and their corresponding fake or temporary file paths used during documentation generation. -- `jump_files`: A list of files to be skipped or ignored during the documentation process. -- `deleted_items_from_older_meta`: A list of items that were present in an older version of the metadata but have been deleted in the current version. -- `in_generation_process`: A boolean flag indicating whether the documentation generation process is currently ongoing. -- `checkpoint_lock`: A threading lock to ensure thread safety when saving the MetaInfo object. +#### Related Methods: +- **`DocItemType.print_self`**: Used to print the color-coded item type. The function formats the item type as a string and applies a color based on the type. +- **`need_to_generate`**: A function that checks whether an item requires documentation generation. -**Code Description**: -The `MetaInfo` class is designed to encapsulate all necessary metadata required for generating and managing documentation for a software project repository. It includes static methods for initializing the MetaInfo object from a repository path or from an existing checkpoint directory, which allows for resuming the documentation process from a saved state. The class also provides methods for saving the current state of the MetaInfo object to a specified directory, printing a list of documentation tasks, retrieving all file nodes within the repository, and finding documentation items based on line numbers within files. +#### Notes: +- The `print_recursive` function is typically used in the context of displaying or debugging the hierarchical structure of documentation items within a repository. +- It leverages the hierarchical nature of `DocItem` objects, where each item can have child items, and the function ensures that the entire structure is traversed and printed. -The `checkpoint` method is particularly important as it serializes and saves the current state of the MetaInfo object, including the hierarchical structure of the repository and any modifications made during the documentation process. This method ensures that progress is not lost and can be resumed or reviewed at a later time. +#### FunctionDef print_indent(indent) +**print_indent**: print_indent的功能是根据给定的缩进级别返回相应的缩进字符串。 -The `parse_reference` method is used to extract all bidirectional reference relations between documentation items, which is crucial for understanding dependencies and relationships within the project's codebase. This method takes into account various scenarios such as white-listed files, fake files, and jump files to ensure accurate documentation. +**parameters**: 此函数的参数如下: +· indent: 一个整数,表示缩进的级别,默认为0。 -The `get_task_manager` and `get_topology` methods are used to calculate the order in which documentation tasks should be executed based on the dependencies between documentation items. This is essential for efficient and accurate documentation generation, especially in large projects with complex interdependencies. +**Code Description**: print_indent函数用于生成一个特定格式的缩进字符串。该函数接受一个名为indent的参数,表示缩进的级别。若indent为0,函数将返回一个空字符串,表示没有缩进;若indent大于0,函数将返回一个由空格和字符组成的字符串,表示相应的缩进。具体来说,函数会返回“ ”(两个空格)重复indent次后加上“|-”字符。这个格式通常用于树形结构的可视化,帮助用户更清晰地理解层级关系。 -The `load_doc_from_older_meta` method allows for merging documentation from an older version of the metadata with the current version, facilitating incremental updates to the documentation as the project evolves. +**Note**: 使用此函数时,请确保传入的indent参数为非负整数。负数值将导致不符合预期的结果。 -**Note**: -When using the `MetaInfo` class, it is important to correctly configure the `repo_path`, `white_list`, `fake_file_reflection`, and `jump_files` attributes to match the specific needs and structure of your project. Additionally, care should be taken to ensure thread safety when accessing or modifying the MetaInfo object from multiple threads. +**Output Example**: +- 当调用print_indent(0)时,返回值为""(空字符串)。 +- 当调用print_indent(1)时,返回值为" |-" +- 当调用print_indent(2)时,返回值为" |-" +- 当调用print_indent(3)时,返回值为" |-" +*** +*** +## FunctionDef find_all_referencer(repo_path, variable_name, file_path, line_number, column_number, in_file_only) +**find_all_referencer**: The function of find_all_referencer is to locate all references to a specified variable within a given file in a repository. -**Output Example**: -Due to the nature of the `MetaInfo` class, there is no direct "output" in the traditional sense. However, an example usage scenario could involve initializing a `MetaInfo` object with the path to a project repository, generating documentation, and then saving the state of the MetaInfo object to a checkpoint directory for future reference or updates. -### FunctionDef init_meta_info(file_path_reflections, jump_files) -**init_meta_info**: The function of `init_meta_info` is to initialize a `MetaInfo` object with the repository's structure and metadata based on given file path reflections and jump files. +**parameters**: The parameters of this Function. +· repo_path: The path to the repository where the file is located. +· variable_name: The name of the variable for which references are being searched. +· file_path: The path to the file in which to search for references. +· line_number: The line number in the file where the variable is defined. +· column_number: The column number in the file where the variable is defined. +· in_file_only: A boolean flag indicating whether to restrict the search to the current file only (default is False). + +**Code Description**: The find_all_referencer function utilizes the Jedi library to analyze Python code and find references to a specified variable. It constructs a Jedi Script object using the provided repository path and file path. Depending on the in_file_only parameter, it either searches for references within the entire scope of the file or restricts the search to the current file context. -**Parameters**: -- `file_path_reflections`: A dictionary mapping original file paths to their "reflected" paths within the repository. This parameter is used to handle files that may have been moved or renamed. -- `jump_files`: A list of file paths that should be excluded from the repository's structure and metadata initialization process. +The function retrieves all references to the variable at the specified line and column, filtering the results to include only those that match the variable_name. It then constructs a list of tuples containing the relative path to the module, line number, and column number of each reference, excluding the original definition of the variable. -**Code Description**: -The `init_meta_info` function begins by retrieving the absolute path of the project repository from a configuration object (`CONFIG["repo_path"]`). It then prints a message indicating the initialization of the `MetaInfo` object with the specified repository path. +In the event of an exception, the function logs the error message along with the parameters that were used in the call, returning an empty list to indicate that no references were found or an error occurred. -A `FileHandler` object is instantiated with the project's absolute path and a `None` value for its file path attribute. This object is responsible for managing file operations within the repository, such as reading, writing, and generating the structure of files and directories. +The find_all_referencer function is called within the walk_file function of the MetaInfo class in the same module. The walk_file function iterates through all variables in a file and uses find_all_referencer to gather references for each variable. This integration allows for efficient tracking of variable usage across the codebase, enabling developers to understand dependencies and relationships between different parts of the code. -The function calls the `generate_overall_structure` method of the `FileHandler` object, passing in the `file_path_reflections` and `jump_files` parameters. This method returns a dictionary representing the overall structure of the repository, excluding files specified in `jump_files`. The structure includes information about all files and directories within the repository, as well as the relationships between them. +**Note**: It is important to ensure that the Jedi library is properly installed and configured in the environment where this function is executed. Additionally, the in_file_only parameter can significantly affect performance; setting it to True can speed up the search when working with large codebases. -A new `MetaInfo` object is created by calling the `from_project_hierarchy_json` static method of the `MetaInfo` class, which constructs a `MetaInfo` object from the JSON representation of the project's hierarchical structure returned by `generate_overall_structure`. +**Output Example**: A possible return value from the function could be: +``` +[ + ('src/module_a.py', 10, 5), + ('src/module_b.py', 15, 12), + ('src/module_c.py', 20, 8) +] +``` +This output indicates that the variable was referenced in three different files, along with the respective line and column numbers where the references occur. +## ClassDef MetaInfo +Doc is waiting to be generated... +### FunctionDef init_meta_info(file_path_reflections, jump_files) +**init_meta_info**: The function of init_meta_info is to initialize a MetaInfo object from a specified repository path. -The `repo_path`, `fake_file_reflection`, and `jump_files` attributes of the newly created `MetaInfo` object are then set to the project's absolute path, the `file_path_reflections` dictionary, and the `jump_files` list, respectively. +**parameters**: The parameters of this Function. +· file_path_reflections: A list of file paths that reflect the current state of the repository. +· jump_files: A list of files that should be skipped or treated differently during the initialization process. -Finally, the function returns the initialized `MetaInfo` object. +**Code Description**: The init_meta_info function is responsible for creating and returning a MetaInfo object that encapsulates the hierarchical structure of a project repository. It begins by retrieving the current project settings through the SettingsManager class, which ensures that the settings are consistently accessed throughout the application. The project’s absolute path is then obtained from the settings. -**Note**: -- The function assumes that the `CONFIG` object contains a valid `repo_path` key pointing to the root directory of the repository. -- The `file_path_reflections` and `jump_files` parameters allow for flexibility in handling files that may not be present in their original locations or should be excluded from the documentation process. -- The `MetaInfo` object returned by this function encapsulates the repository's structure and metadata, which can be used for further documentation and analysis tasks. +The function proceeds to print a message indicating the initialization process, specifying the repository path being used. It then creates an instance of FileHandler, which is tasked with generating an overall structure of the repository based on the provided file_path_reflections and jump_files. This structure is generated by invoking the generate_overall_structure method of the FileHandler class. -**Output Example**: -The function returns an instance of the `MetaInfo` class, which contains detailed information about the repository's structure, including files, directories, and their relationships. The exact structure of the `MetaInfo` object depends on the repository's contents and the parameters passed to the function. +Once the repository structure is obtained, the function calls the from_project_hierarchy_json method of the MetaInfo class. This method takes the generated repository structure as input and constructs a corresponding MetaInfo object. The resulting MetaInfo object is then populated with additional attributes: repo_path, fake_file_reflection, and jump_files, which are set to the project’s absolute path, the provided file_path_reflections, and jump_files, respectively. + +Finally, the fully constructed MetaInfo object is returned. This function is called by various components within the project, including the diff function in the main module and the __init__ method of the Runner class. In the diff function, init_meta_info is used to create a new MetaInfo object that reflects the current state of the repository before checking for changes and updating documentation. In the Runner class, it is invoked when initializing the meta_info attribute if the project hierarchy path does not exist, ensuring that the project structure is accurately represented. + +**Note**: It is essential to ensure that the file_path_reflections and jump_files parameters accurately represent the current state of the repository to avoid inconsistencies in the generated MetaInfo object. + +**Output Example**: A possible appearance of the code's return value could be a MetaInfo object containing a structured representation of the project's documentation items, with a hierarchical tree of DocItem instances reflecting the project's organization. For instance: +``` +MetaInfo( + repo_path='path/to/repo', + fake_file_reflection=['file1.py', 'file2.py'], + jump_files=['file3.py'], + target_repo_hierarchical_tree=DocItem( + item_type=DocItemType._repo, + obj_name="full_repo", + children={ + "src": DocItem( + item_type=DocItemType._dir, + obj_name="src", + children={ + "main.py": DocItem( + item_type=DocItemType._file, + obj_name="main.py", + ... + ) + } + ) + } + ) +) +``` *** ### FunctionDef from_checkpoint_path(checkpoint_dir_path) -**from_checkpoint_path**: The function of `from_checkpoint_path` is to load a `MetaInfo` object from a specified checkpoint directory path. - -**Parameters**: -- `checkpoint_dir_path` (str): The path to the checkpoint directory from which the `MetaInfo` object should be loaded. +**from_checkpoint_path**: The function of from_checkpoint_path is to load a MetaInfo object from an existing checkpoint directory containing project metadata. -**Code Description**: -The `from_checkpoint_path` function is designed to reconstruct a `MetaInfo` object by reading and parsing metadata files stored in a given checkpoint directory. This process involves several key steps: +**parameters**: The parameters of this Function. +· checkpoint_dir_path: Path - The directory path where the checkpoint files, including project hierarchy and metadata, are stored. -1. The function constructs the path to the `project_hierarchy.json` file within the specified checkpoint directory and opens this file. The JSON content, which represents the hierarchical structure of the project, is loaded into a variable. +**Code Description**: The from_checkpoint_path function is responsible for reading and reconstructing a MetaInfo object from a specified checkpoint directory. It begins by retrieving the current project settings using the SettingsManager class, which ensures that the configuration settings are consistently accessed throughout the application. -2. It then calls the `from_project_hierarchy_json` method of the `MetaInfo` class, passing the loaded project hierarchy JSON. This method constructs a `MetaInfo` object that reflects the project's structure as defined in the `project_hierarchy.json` file. +The function constructs the path to the project_hierarchy.json file located within the provided checkpoint directory. It then opens this JSON file and loads its content into a Python dictionary. This dictionary represents the hierarchical structure of the project, which is subsequently passed to the MetaInfo.from_project_hierarchy_json method. This method parses the JSON representation and constructs a corresponding MetaInfo object that reflects the project's organization. -3. The function proceeds to open the `meta-info.json` file located within the checkpoint directory. This file contains additional metadata about the project, such as the document version, fake file reflections, jump files, items in the generation process, and deleted items from older metadata versions. +Next, the function proceeds to load the meta-info.json file from the checkpoint directory. This file contains additional metadata about the project, such as the document version, fake file reflections, jump files, and information about items deleted from older metadata. The function reads this JSON file, extracts the relevant data, and populates the corresponding attributes of the MetaInfo object. -4. The metadata from `meta-info.json` is loaded, and its contents are used to update the `MetaInfo` object's attributes accordingly. This includes setting the repository path from a global configuration (`CONFIG["repo_path"]`), the document version, fake file reflections, jump files, items currently in the generation process, and any deleted items from older metadata versions. +Throughout the process, the function provides feedback to the user by printing a message indicating that the MetaInfo is being loaded from the specified checkpoint directory. -5. Finally, the function prints a message indicating that the `MetaInfo` object has been successfully loaded from the specified checkpoint directory path. +The from_checkpoint_path function is called within the Runner class's __init__ method. If the absolute project hierarchy path does not exist, it initializes the MetaInfo object using the init_meta_info method. However, if the hierarchy path is found, it invokes from_checkpoint_path to load the existing MetaInfo, ensuring that the application can resume its state based on previously saved metadata. -**Note**: -- It is crucial that the `checkpoint_dir_path` parameter points to a valid directory containing the required `project_hierarchy.json` and `meta-info.json` files. The integrity and correctness of these files directly affect the successful reconstruction of the `MetaInfo` object. -- The global configuration (`CONFIG`) must be correctly set, especially the `repo_path`, as it is used to set the repository path in the `MetaInfo` object. +**Note**: It is essential to ensure that the checkpoint directory contains the required JSON files (project_hierarchy.json and meta-info.json) in the correct format to avoid runtime errors during the loading process. -**Output Example**: -While the function does not explicitly return a visual output, it returns a `MetaInfo` object populated with the project's hierarchical structure and additional metadata as defined in the checkpoint directory's files. This `MetaInfo` object can then be used within the application to access and manipulate project documentation and metadata information effectively. +**Output Example**: A possible appearance of the code's return value could be a MetaInfo object populated with the project's hierarchical structure and metadata, such as: +``` +MetaInfo( + repo_path='path/to/repo', + document_version='1.0', + fake_file_reflection={'file1': 'reflection1', 'file2': 'reflection2'}, + jump_files=['file1', 'file2'], + in_generation_process=False, + deleted_items_from_older_meta=['item1', 'item2'] +) +``` *** ### FunctionDef checkpoint(self, target_dir_path, flash_reference_relation) -**checkpoint**: The function of checkpoint is to save the MetaInfo object to the specified directory. +**checkpoint**: The function of checkpoint is to save the MetaInfo object to a specified directory. + +**parameters**: The parameters of this Function. +· target_dir_path: The path to the target directory where the MetaInfo will be saved. +· flash_reference_relation: Whether to include flash reference relation in the saved MetaInfo. Defaults to False. -**Parameters**: -- `target_dir_path` (str): The path to the target directory where the MetaInfo will be saved. -- `flash_reference_relation` (bool, optional): Whether to include flash reference relation in the saved MetaInfo. Defaults to False. +**Code Description**: The checkpoint function is responsible for persisting the current state of the MetaInfo object to a specified directory. It begins by acquiring a lock to ensure thread safety during the save operation. The function prints a message indicating that the MetaInfo is being refreshed and saved. -**Code Description**: -The `checkpoint` function is designed to serialize and save the current state of the MetaInfo object into a specified directory. This process involves two main steps: saving the project hierarchy as a JSON file and saving the meta-information of the documentation process as another JSON file. +The first step within the function checks if the target directory exists. If it does not, the function creates the directory structure. Following this, the function calls the to_hierarchy_json method to convert the current state of the MetaInfo into a hierarchical JSON representation. The flash_reference_relation parameter determines whether detailed reference information should be included in this JSON output. -Upon invocation, the function first checks if the target directory exists; if not, it creates the directory. It then proceeds to generate a hierarchical JSON representation of the project's documentation metadata by calling the `to_hierarchy_json` method. This representation includes details such as document versions, the generation process status, and reference relations between documents, depending on the `flash_reference_relation` flag. The resulting JSON is saved to a file named "project_hierarchy.json" in the target directory. +Once the JSON representation is generated, the function writes two files to the target directory: "project_hierarchy.json" and "meta-info.json". The first file contains the hierarchical structure of the documentation items, while the second file includes metadata about the document version, the generation process status, reflections of fake files, jump files, and any deleted items from older metadata. -Subsequently, the function compiles a dictionary containing essential meta-information about the documentation process, including the document version, generation process status, and details about any fake file reflections or jump files used during the documentation generation. It also includes information about items deleted from older versions of the meta-information. This dictionary is then serialized to JSON and saved to a file named "meta-info.json" in the same directory. +The checkpoint function is called in various contexts within the project. For instance, it is invoked during the initialization of the Runner class when the project hierarchy does not exist, ensuring that the initial state of the MetaInfo is saved. It is also called after generating documentation for individual items, allowing for real-time updates to the saved MetaInfo. Additionally, the function is utilized at the end of the first_generate method to save the updated document version after all documents have been generated. -**Note**: -- The function is thread-safe, guarded by a `checkpoint_lock` to prevent concurrent modifications that could lead to data corruption or inconsistencies. -- The `flash_reference_relation` parameter allows for the inclusion of detailed reference relations in the saved meta-information. This can be useful for tracking document dependencies and references but may result in larger file sizes due to the additional data. -- This function plays a critical role in persisting the state of the documentation process, enabling recovery and resumption of the process in case of interruptions or errors. It is also used to update the saved state after significant events, such as the completion of document generation tasks or changes in the project structure. +**Note**: When using the checkpoint function, ensure that the target directory is accessible and that the flash_reference_relation parameter is set according to the desired level of detail in the saved MetaInfo. This function is critical for maintaining an accurate and up-to-date representation of the project's documentation structure. *** ### FunctionDef print_task_list(self, task_dict) -**print_task_list**: The function of print_task_list is to display a formatted table of tasks, including their IDs, documentation generation reasons, paths, and dependencies. +**print_task_list**: The function of print_task_list is to display a formatted table of tasks along with their statuses and dependencies. -**Parameters**: -- `task_dict`: A dictionary where the key is the task ID and the value is a Task object. This parameter contains all the tasks that need to be printed. +**parameters**: The parameters of this Function. +· task_dict: A dictionary where the keys are task IDs and the values are Task objects containing information about each task. -**Code Description**: -The `print_task_list` method is designed to provide a clear and structured overview of tasks within a documentation generation system. It utilizes the `PrettyTable` library to create a visually appealing table that lists essential details about each task. The table columns include the task ID, the reason for documentation generation (extracted from the task's extra information), the full path of the task (also from the task's extra information), and a list of dependencies. +**Code Description**: The print_task_list method is designed to present a clear and organized view of tasks managed within a multi-tasking framework. It takes a dictionary of tasks (task_dict) as input, where each task is represented by a unique identifier (task_id) and associated with various attributes such as status and dependencies. -For each task in the `task_dict`, the method checks if there are any dependencies. If a task has dependencies, it concatenates their IDs into a string. To ensure the table remains readable, if the concatenated string of dependencies exceeds 20 characters, it is truncated and represented in a shortened form, showing the first and last 8 characters separated by ellipses. +The method utilizes the PrettyTable library to create a visually appealing table format. It initializes the table with headers: "task_id", "Doc Generation Reason", "Path", and "dependency". For each task in the task_dict, it retrieves the task_id and task_info. The task_info is an instance of the Task class, which contains details about the task's status and its dependencies. -The method then adds a row to the table for each task, including the task ID, the reason for documentation generation, the full path, and the dependencies string. Finally, the table is printed to the console, providing a comprehensive overview of the tasks to be completed. +The method checks if the task has any dependencies. If dependencies exist, it constructs a string representation of the dependency task IDs. To maintain readability, if the string exceeds 20 characters, it truncates the string and adds ellipses. Each task's information is then added as a new row in the task_table. -**Note**: -This method is crucial for understanding the state and dependencies of tasks within the documentation generation process. It aids in debugging and managing the workflow by clearly showing which tasks are pending and how they are interconnected. The use of `PrettyTable` enhances readability, making it easier for users to quickly assess the status of tasks. However, users should be aware that the dependencies are represented by task IDs, and they may need to refer back to the task list or documentation to understand the specific tasks these IDs refer to. +Finally, the method prints the completed task_table to the console, providing a comprehensive overview of the tasks, their statuses, and their dependencies. + +This method is called within the first_generate method of the Runner class and the run method of the same class. In first_generate, it is invoked after initializing or loading a task list, allowing users to see the current state of tasks before document generation begins. In the run method, it is called to display the task list after detecting changes in the project files, ensuring that users are informed of the tasks that need to be processed. + +**Note**: When using the print_task_list method, it is important to ensure that the task_dict is populated with valid Task objects to avoid errors during execution. Additionally, the output format relies on the PrettyTable library, which must be properly installed and imported in the project. *** ### FunctionDef get_all_files(self) -**get_all_files**: The function of get_all_files is to retrieve all file nodes from a hierarchical tree structure representing a software project's repository. +**get_all_files**: The function of get_all_files is to retrieve all file nodes from the hierarchical tree of documentation items. -**Parameters**: This function does not take any external parameters as it operates on the instance's state. +**parameters**: The parameters of this Function. +· None -**Code Description**: The `get_all_files` function is designed to traverse a hierarchical tree structure, which represents the organization of files and other entities within a software project's repository. It starts the traversal from the root of the tree, which is stored in the instance variable `self.target_repo_hierarchical_tree`. The function defines a nested helper function, `walk_tree`, which is a recursive function used to walk through each node of the tree. +**Code Description**: The get_all_files function is designed to traverse the hierarchical structure of documentation items represented by the target_repo_hierarchical_tree attribute of the containing class. It initializes an empty list named files to store the file nodes encountered during the traversal. The function defines a nested helper function called walk_tree, which takes a current node (now_node) as an argument. -The `walk_tree` function checks if the current node (`now_node`) is of the type `DocItemType._file`, indicating it is a file node. If so, the node is appended to the `files` list, which is initialized at the beginning of the `get_all_files` function. After checking the current node, `walk_tree` iterates over the children of the current node, calling itself recursively for each child. This process continues until all nodes in the tree have been visited. +The walk_tree function checks if the current node's item_type is of type DocItemType._file. If it is, the current node is appended to the files list. The function then iterates over the children of the current node, recursively calling walk_tree for each child. This recursive approach ensures that all levels of the hierarchical tree are explored, allowing the function to collect all file nodes present in the structure. -Once the traversal is complete, the `get_all_files` function returns the `files` list, which now contains all file nodes found in the hierarchical tree. +Once the traversal is complete, the get_all_files function returns the files list, which contains all the file nodes found in the documentation hierarchy. -**Note**: It is important to note that this function relies on the structure and integrity of the hierarchical tree stored in `self.target_repo_hierarchical_tree`. The tree must accurately represent the repository's structure for the function to return correct results. Additionally, the function assumes that the tree's nodes are instances of `DocItem` or a similar structure that includes an `item_type` attribute and a `children` dictionary. +This function is called by other methods within the MetaInfo class, such as parse_reference and to_hierarchy_json. In parse_reference, get_all_files is used to gather all file nodes for further analysis of bidirectional reference relationships among documentation items. In to_hierarchy_json, it retrieves file items to convert the documentation metadata into a hierarchical JSON representation. The get_all_files function plays a crucial role in enabling these higher-level functionalities by providing access to the underlying file nodes. -**Output Example**: Assuming the hierarchical tree represents a repository with three files, the output of `get_all_files` might look like the following: +**Note**: When using the get_all_files function, it is important to ensure that the target_repo_hierarchical_tree is properly initialized and structured, as the function relies on this hierarchical representation to retrieve the file nodes accurately. + +**Output Example**: A possible output of the get_all_files function could be a list of DocItem instances representing the file nodes, such as: ```python [ - DocItem(item_type=DocItemType._file, obj_name='file1.py', ...), - DocItem(item_type=DocItemType._file, obj_name='file2.py', ...), - DocItem(item_type=DocItemType._file, obj_name='subdir/file3.py', ...) + DocItem(obj_name="file1.py", item_type=DocItemType._file), + DocItem(obj_name="file2.py", item_type=DocItemType._file), + DocItem(obj_name="file3.py", item_type=DocItemType._file) ] ``` -This list contains `DocItem` instances representing each file found in the repository's hierarchical structure. #### FunctionDef walk_tree(now_node) -**walk_tree**: The function of walk_tree is to recursively traverse a hierarchical structure, identifying and collecting file-type nodes. +**walk_tree**: The function of walk_tree is to recursively traverse a tree structure of documentation items and collect all file nodes. + +**parameters**: The parameters of this Function. +· now_node: This parameter represents the current node in the tree structure being traversed. It is expected to be an instance of a class that contains attributes indicating its type and its children. -**parameters**: -- now_node: The current node in the hierarchy being examined. +**Code Description**: The walk_tree function is designed to navigate through a hierarchical structure of documentation items, represented as nodes in a tree. Each node can have a type defined by the DocItemType enumeration, which categorizes it as a file, directory, class, function, or other types of documentation items. -**Code Description**: The `walk_tree` function is designed to operate within a hierarchical structure, such as a file system or a nested set of objects that mimic a directory tree. It takes a single parameter, `now_node`, which represents the current node being processed. The function first checks if the `now_node` is of type `_file` by comparing its `item_type` attribute against the `_file` attribute of the `DocItemType` enumeration. If the condition is met, indicating that the current node is a file, it is appended to a globally accessible list named `files`. This list is intended to collect all file-type nodes encountered during the traversal. +The function begins by checking if the current node (now_node) is of the type DocItemType._file. If it is, the node is appended to a list named 'files', which is intended to store all file nodes encountered during the traversal. This indicates that the function's primary purpose is to gather all file items from the documentation structure. -After handling the current node, the function iterates over all children of `now_node`, if any, by accessing the `children` attribute, which is expected to be a dictionary where keys are identifiers and values are child nodes. For each child node, the function recursively calls itself with the child as the new `now_node`, allowing it to traverse the entire hierarchy depth-first. This recursive approach ensures that all nodes in the structure are visited, and all file-type nodes are collected. +Following this check, the function iterates over the children of the current node. The children are expected to be stored in a dictionary-like structure, where each child can also be a node with its own type and children. The function calls itself recursively for each child node, allowing it to traverse the entire tree structure. This recursive approach ensures that all levels of the tree are explored, and all file nodes are collected regardless of their depth in the hierarchy. -The function relies on the `DocItemType` enumeration to distinguish between different types of nodes, specifically identifying file nodes. This relationship with `DocItemType` is crucial for the function's operation, as it determines the action taken for each node based on its type. +The relationship with its callees, particularly the DocItemType enumeration, is crucial for the functionality of walk_tree. The function relies on the type checking provided by DocItemType to determine whether a node is a file. This structured categorization allows for efficient filtering of nodes during the traversal process. -**Note**: It is important to ensure that the `files` list is accessible within the scope of the `walk_tree` function and is properly initialized before the function is called. Additionally, the hierarchical structure passed to this function must correctly implement the `item_type` attribute and the `children` dictionary for each node to enable accurate traversal and identification of file-type nodes. The function does not return any value; instead, it populates the `files` list with the nodes it identifies as files, making the list the primary output of the traversal process. +**Note**: When using the walk_tree function, it is important to ensure that the input node (now_node) is properly structured and contains the necessary attributes for type checking and child node retrieval. The function assumes that the 'files' list is defined in the appropriate scope where walk_tree is called, as it appends file nodes to this list. *** *** ### FunctionDef find_obj_with_lineno(self, file_node, start_line_num) -**find_obj_with_lineno**: The function of `find_obj_with_lineno` is to identify and return the documentation item (`DocItem`) associated with a specific line number within a file. - -**Parameters**: -- `file_node`: A `DocItem` instance representing the root node of the file in which the search is conducted. -- `start_line_num`: An integer representing the line number for which the corresponding documentation item is sought. +**find_obj_with_lineno**: The function of find_obj_with_lineno is to locate the documentation object corresponding to a specific line number within a given file node, ensuring that the identified object does not belong to any of its child objects' ranges. -**Code Description**: -The `find_obj_with_lineno` function begins by taking a `DocItem` instance as the root node of a file and an integer representing a line number. It aims to traverse the hierarchical structure of documentation items starting from this root node to find the most specific (deepest in the hierarchy) `DocItem` that encompasses the given line number within its start and end line boundaries. +**parameters**: The parameters of this Function. +· file_node: An instance of DocItem representing the current file node being analyzed. +· start_line_num: An integer indicating the line number for which the corresponding documentation object is to be found. -The function asserts that the initial `file_node` is not `None` to ensure that a valid root node is provided. It then enters a loop to traverse down the hierarchy of documentation items. Within each iteration, it checks the children of the current node to find a child whose code range (defined by `code_start_line` and `code_end_line` in the `content` dictionary) includes the specified line number. +**Code Description**: The find_obj_with_lineno function operates by traversing the hierarchical structure of DocItem instances, starting from the provided file_node. It checks each child of the current node to determine if the specified start_line_num falls within the range defined by the child's code_start_line and code_end_line attributes. If a child node is found that encompasses the specified line number, the function updates the current node to this child and continues the search. This process repeats until a node is reached that has no children or where the specified line number does not fall within the range of any child nodes. The function then returns the current node, which represents the documentation object corresponding to the specified line number. -If such a child is found, the search moves down to this child, making it the new current node (`now_node`). This process repeats, moving deeper into the hierarchy until a node is reached that either has no children or none of its children's code ranges include the specified line number. At this point, the function concludes that the current node is the most specific documentation item that encompasses the given line number and returns it. +This function is called within the context of the walk_file function, which iterates over all variables in a file. The walk_file function utilizes find_obj_with_lineno to identify the specific documentation object associated with a line number where a reference to a variable is found. This relationship is crucial for establishing connections between different documentation items, as it allows for the identification of which documentation object is being referenced at a particular line in the code. -**Note**: -- It is crucial to ensure that the `file_node` provided as input accurately represents the root of the file's documentation item hierarchy for the function to work correctly. -- The function assumes that the `content` dictionary of each `DocItem` contains valid `code_start_line` and `code_end_line` entries. These entries are essential for determining the code range that each documentation item covers. -- This function does not handle cases where the specified line number does not fall within the range of any documentation item. In such cases, it would return the most recent `DocItem` that was considered before determining that no children encompass the line number. +**Note**: When using find_obj_with_lineno, it is essential to ensure that the file_node provided is valid and that the start_line_num is within the range of lines covered by the documentation items in the hierarchy. This will prevent assertion errors and ensure accurate identification of the corresponding documentation object. -**Output Example**: -Suppose we have a file represented by a `DocItem` hierarchy where the root node covers lines 1-100, and it has a child node covering lines 10-20. If `find_obj_with_lineno` is called with this root node and the line number 15, the function will return the child node covering lines 10-20, as it is the most specific node encompassing line 15. +**Output Example**: An example output of the function might return a DocItem instance representing a specific function or class that starts and ends at the line numbers encompassing the provided start_line_num, such as: +`DocItem(obj_name="MyClass", code_start_line=10, code_end_line=50)` *** ### FunctionDef parse_reference(self) -**parse_reference**: The function of `parse_reference` is to bidirectionally extract all reference relationships among objects within a project's files. - -**Parameters**: This function does not accept any parameters. +**parse_reference**: The function of parse_reference is to extract all bidirectional reference relationships among documentation items within the specified files. -**Code Description**: The `parse_reference` function is a comprehensive method designed to analyze and record the reference relationships between different objects across all files in a project. It begins by retrieving a list of all file nodes within the project using the `get_all_files` method. The function then initializes two lists, `white_list_file_names` and `white_list_obj_names`, which are intended to store file paths and object identifiers from a predefined whitelist. If a whitelist is provided (`self.white_list` is not None), these lists are populated accordingly. +**parameters**: The parameters of this Function. +· None -The function iterates over each file node, using a progress bar (via `tqdm`) to visually indicate the progress of parsing bidirectional references. For each file, it performs several checks to ensure that the file is not a "jump-file" (a file that should be excluded from the analysis) and that it is included in the whitelist if one is specified. These checks are crucial for focusing the analysis on relevant files and objects. +**Code Description**: The parse_reference function is designed to analyze and extract bidirectional reference relationships from all relevant files in the documentation hierarchy. It begins by retrieving all file nodes using the get_all_files method. The function also initializes two lists, white_list_file_names and white_list_obj_names, which are used to filter the files and objects to be processed based on a specified whitelist, if provided. -Within each file, the function recursively walks through all objects (`DocItem` instances) using a nested function `walk_file`. This nested function is responsible for finding all references to the current object (`now_obj`) within the same file or across different files, depending on whether a whitelist is specified and whether the reference is within the same file. The search for references is conducted using the `find_all_referencer` function, which looks for occurrences of the object's name in the project, taking into account the object's location and name. +The function iterates through each file node, ensuring that it does not process any jump files or files that are not part of the whitelist. For each file node, it defines a nested function called walk_file, which recursively traverses the variables within the file. This nested function identifies all references to the current object and checks their validity based on certain conditions, such as whether the reference comes from a fake file or an unstaged version. -For each found reference, the function checks if the reference comes from an "unstaged" or "untracked" file, in which case it is ignored. Otherwise, it attempts to locate the referencing object within the project's hierarchical structure. If successful, it checks for a direct name match to avoid duplicates and then updates the reference relationships between the current object and the referencing object. This includes marking whether the reference is of a special type (e.g., a function calling another function) and updating lists that track which objects reference each other. +During the traversal, if a valid reference is found, it updates the reference relationships between the objects, ensuring that the relationships are bidirectional. Specifically, it appends the current object to the list of references for the referencer node and vice versa. The function keeps track of the reference count to monitor the number of references processed. -Throughout this process, the function maintains a count of references (`ref_count`) for each object, providing insight into how interconnected the objects are within the project. +The parse_reference function is called by other methods within the MetaInfo class, such as get_topology and load_doc_from_older_meta. In get_topology, it is used to establish the reference relationships before calculating the topological order of the objects in the repository. In load_doc_from_older_meta, it is invoked to update the reference relationships after merging documentation from an older version, ensuring that any changes in references are accurately reflected in the new version. -**Note**: It is important to note that this function assumes the project's files and objects are organized into a hierarchical tree structure (`self.target_repo_hierarchical_tree`). The accuracy of the reference parsing depends on the integrity of this structure. Additionally, the function relies on several assumptions about the project's organization, such as the use of "jump-files" and "fake-file" conventions, which should be clearly defined and documented within the project. The function's performance and accuracy can be significantly affected by the completeness and correctness of the whitelist, if used. +**Note**: When using the parse_reference function, ensure that the target repository's hierarchical tree is properly initialized and that any specified whitelists are correctly defined to avoid missing relevant references. #### FunctionDef walk_file(now_obj) -**walk_file**: The function of `walk_file` is to traverse all variables within a file and process their references. +**walk_file**: The function of walk_file is to traverse all variables within a file and gather their references. -**Parameters**: -- `now_obj`: A `DocItem` instance representing the current documentation item being processed. +**parameters**: The parameters of this Function. +· now_obj (DocItem): The current documentation item representing a variable or object being processed. + +**Code Description**: The walk_file function is designed to recursively analyze a given DocItem (now_obj) that represents a variable or object within a file. It identifies and collects all references to this object throughout the file, while also managing relationships with other documentation items. -**Code Description**: -The `walk_file` function is designed to recursively traverse and process documentation items (variables, functions, classes, etc.) within a file. It operates within the context of parsing references in a documentation generation system, specifically focusing on identifying and handling references to and from the current documentation item (`now_obj`). +The function begins by checking if there is a whitelist of object names (white_list_obj_names). If the current object's name is not in this whitelist and the whitelist is not empty, it sets the in_file_only flag to True. This flag is used to optimize the search for references, ensuring that only references within the same file are considered when the whitelist is applied. -The function begins by determining if the current item should only be considered for references within the same file. This is controlled by checking against a whitelist of object names (`white_list_obj_names`). If the current object's name is not in the whitelist, the search for references is limited to the same file (`in_file_only` flag is set to `True`). +Next, the function calls find_all_referencer, which is responsible for locating all references to the variable represented by now_obj. This function requires several parameters, including the repository path, variable name, file path, line number, and column number. The in_file_only flag is passed to restrict the search to the current file if necessary. The result is a list of positions where the variable is referenced. -It then proceeds to find all references to the current object using the `find_all_referencer` function, which returns a list of positions where the current object is referenced. Each reference is processed to determine its nature and relevance. References from unstaged or untracked files are skipped, as indicated by checks against `fake_file_reflection` and `jump_files`. +For each reference found, the function checks if the reference comes from unstaged or untracked files, skipping those references if they do. It uses the self.fake_file_reflection and self.jump_files attributes to determine the status of the referencing files. If a reference is valid, the function attempts to locate the corresponding DocItem for the referencing file using self.target_repo_hierarchical_tree.find. -For each valid reference found, the function attempts to locate the referencing documentation item within the project's hierarchical structure. This is achieved by splitting the reference file path and using the `find` method of `DocItem` to navigate the project's documentation hierarchy. If the referencing item is found, and it is not a self-reference, the function checks if there is an ancestor relationship between the current item and the referencing item using the `has_ans_relation` method. This step ensures that references between ancestor nodes are not considered, avoiding circular references or simplifying the reference structure. +Once the referencer file item is identified, the function checks if the reference is valid by ensuring that it does not create a circular reference with now_obj. If the reference is valid and does not belong to an ancestor node, it updates the reference relationships between now_obj and the referencer_node. Specifically, it appends the referencer_node to now_obj.who_reference_me and vice versa, while also maintaining a count of references (ref_count). -If the reference passes all checks, it is added to the list of references for both the current item and the referencing item, effectively linking them within the documentation structure. The `ref_count` is incremented for each valid reference found, providing a count of total references processed. +Finally, the function recursively processes any child items of now_obj by calling itself for each child, ensuring that all variables within the hierarchy are analyzed for references. -After processing all references for the current item, the function recursively calls itself for each child of the current item, ensuring that the entire file's documentation items are traversed and processed. +The walk_file function is integral to the documentation generation process, as it establishes the relationships between different documentation items and helps in understanding how variables are referenced throughout the codebase. It relies on several other functions and classes, including find_all_referencer, DocItem, and DocItemType, to perform its tasks effectively. -**Note**: -- The function relies on several external variables and methods, such as `ref_count`, `white_list_file_names`, `find_all_referencer`, and `find_obj_with_lineno`, which are assumed to be defined in the broader scope of the class or module in which `walk_file` is defined. -- The function's ability to accurately process references depends on the correct initialization and maintenance of the project's documentation hierarchy (`DocItem` instances) and the accurate implementation of methods like `find_all_referencer` and `find_obj_with_lineno`. -- It is crucial to ensure that the whitelist of object names (`white_list_obj_names`) and the lists of unstaged or untracked files (`fake_file_reflection`, `jump_files`) are correctly managed to avoid skipping relevant references or processing irrelevant ones. -- The function's recursive nature allows it to thoroughly process all documentation items within a file, but care should be taken to manage the depth of recursion to avoid potential stack overflow issues in cases of deeply nested documentation structures. +**Note**: When using the walk_file function, ensure that the now_obj parameter is a valid DocItem instance representing a variable or object. Additionally, be aware of the implications of the in_file_only flag, as it can significantly affect the performance and results of the reference search. *** *** ### FunctionDef get_task_manager(self, now_node, task_available_func) -**get_task_manager**: The function of `get_task_manager` is to generate a `TaskManager` instance that manages tasks based on the documentation items' topology and specific conditions. - -**Parameters**: -- `now_node`: A `DocItem` instance representing the current node in the documentation hierarchy from which the traversal begins. -- `task_available_func`: A function that determines whether a given documentation item should be considered for task creation. This function takes a `DocItem` as its input and returns a boolean value. - -**Code Description**: -The `get_task_manager` function initiates by traversing the documentation hierarchy starting from the provided `now_node`. It generates a list of documentation items (`doc_items`) by performing a pre-order traversal, ensuring that parent nodes are processed before their children. +**get_task_manager**: The function of get_task_manager is to construct a TaskManager instance that organizes tasks based on the hierarchical relationships of document items. -If a whitelist is defined (`self.white_list` is not `None`), the function filters the `doc_items` list to include only those items that match the criteria specified in the whitelist. The criteria involve matching the file path and object name of the documentation items against the whitelist entries. +**parameters**: The parameters of this Function. +· now_node: DocItem - The current document item from which to derive the task list. +· task_available_func: Callable - A function that determines the availability of tasks based on specific criteria. -Subsequently, the function further filters the `doc_items` list using the `task_available_func` to include only those items for which this function returns `True`. This step ensures that only relevant documentation items are considered for task creation. +**Code Description**: The get_task_manager function is responsible for generating a TaskManager that manages tasks derived from the hierarchical structure of document items. It begins by retrieving a list of document items through a pre-order traversal of the current node (now_node) using the get_travel_list method. If a white list is provided, it filters the document items to include only those that match the criteria defined in the white list. Subsequently, it applies the task_available_func to further filter the document items based on their availability. -The filtered list of documentation items is then sorted based on their depth in the documentation hierarchy, prioritizing leaf nodes (those with no children) to be processed first. +The filtered document items are then sorted by their depth in the hierarchy, ensuring that leaf nodes are processed first. The function initializes an empty list to keep track of processed items and creates a new TaskManager instance to manage the tasks. -The function iterates through the sorted list of documentation items, selecting items for task creation based on their dependencies and reference relationships. It handles complex scenarios, including potential circular references and special cases where documentation items may reference each other. +The core logic of the function involves iterating through the document items to determine dependencies and establish a task order. For each document item, it assesses the number of dependencies it has, both from its children and from other referenced items. If a document item has no dependencies, it is selected as the target item for task creation. If dependencies exist, the function identifies the item with the least number of unresolved dependencies. -For each selected documentation item, the function calculates its dependencies based on its children and referenced items. It then creates a task in the `TaskManager` instance for the item, specifying its dependencies and associating the item itself as extra information with the task. +Once the target item is determined, the function collects its dependency task IDs and adds a new task to the TaskManager using the add_task method. This process continues until all document items have been processed. -The function updates the progress of task creation using a progress bar (`tqdm`) and continues until all eligible documentation items have been processed and associated tasks have been created in the `TaskManager`. +The get_task_manager function is called within the get_topology method of the MetaInfo class, which orchestrates the overall process of calculating the topological order of all objects in a repository. The get_topology method first parses the references and then invokes get_task_manager to construct the TaskManager based on the hierarchical tree of document items. -**Note**: -- The function assumes that the documentation hierarchy does not contain cycles that cannot be resolved, as it attempts to handle circular references to the best extent possible. However, in cases where circular references cannot be resolved, it may result in tasks that cannot be executed due to unresolved dependencies. -- The `task_available_func` plays a crucial role in determining which documentation items are considered for task creation. It should be carefully implemented to ensure that only relevant items are processed. +Additionally, the get_task_manager function is utilized in the run method of the Runner class. In this context, it is called to generate a task manager that processes document updates based on changes detected in the project files. -**Output Example**: -The output of the `get_task_manager` function is an instance of `TaskManager` populated with tasks corresponding to the documentation items that need to be processed. Each task in the `TaskManager` has a unique ID, a list of dependency task IDs, and is associated with a specific documentation item (`extra` information). +**Note**: When using this function, ensure that the task_available_func is correctly defined to accurately reflect the availability of tasks. Be aware of potential circular references in the document item relationships, as this may complicate task management. -This `TaskManager` instance can then be used to manage and execute the tasks in a multi-threaded environment, ensuring that documentation items are processed in an order that respects their dependencies and the overall documentation hierarchy. +**Output Example**: A possible return value from the get_task_manager function could be a TaskManager instance containing a series of tasks organized by their dependencies, ready for execution. #### FunctionDef in_white_list(item) -**in_white_list**: The function of `in_white_list` is to determine if a given documentation item is included in a predefined white list. - -**Parameters**: -- `item`: A `DocItem` instance representing the documentation item to be checked against the white list. +**in_white_list**: The function of in_white_list is to determine whether a given DocItem is present in a predefined white list based on its file name and object name. -**Code Description**: The `in_white_list` function iterates through a list of conditions (the white list) stored within the `MetaInfo` class. Each condition in the white list is represented as a dictionary containing at least two keys: `"file_path"` and `"id_text"`. The function checks if the given `DocItem` (`item`) matches any of the conditions in the white list based on two criteria: -1. The file name of the `DocItem`, obtained through its `get_file_name` method, must match the `"file_path"` value in the white list condition. -2. The `obj_name` attribute of the `DocItem`, which represents the name of the object or element being documented, must match the `"id_text"` value in the white list condition. +**parameters**: The parameters of this Function. +· item: An instance of DocItem that is being checked against the white list. -If both conditions are met for any entry in the white list, the function returns `True`, indicating that the `DocItem` is in the white list. If no match is found after iterating through the entire white list, the function returns `False`, indicating that the `DocItem` is not in the white list. +**Code Description**: The in_white_list function iterates through a collection called self.white_list, which contains entries that define valid file paths and corresponding object names. For each entry in the white list, the function checks if the file name of the provided DocItem (obtained by calling the get_file_name method) matches the "file_path" in the white list entry and if the object name of the DocItem matches the "id_text" in the same entry. If both conditions are satisfied for any entry, the function returns True, indicating that the item is in the white list. If no matches are found after checking all entries, the function returns False. -The relationship with its callees, particularly the `DocItem` and its method `get_file_name`, is crucial for the functionality of `in_white_list`. The `DocItem` class represents a documentation item and provides the `get_file_name` method to retrieve the file name of the documentation item. This method is essential for the `in_white_list` function to perform the first part of its matching criteria. +This function is particularly useful in contexts where certain documentation items need to be validated against a set of approved or recognized items, ensuring that only those items that meet specific criteria are processed further. The reliance on the get_file_name method of the DocItem class highlights the importance of accurately retrieving the file name associated with the documentation item, which is crucial for the comparison against the white list. -**Note**: It is important to ensure that the white list is correctly populated with the necessary conditions for this function to operate as intended. Each condition must accurately specify the `"file_path"` and `"id_text"` to match the documentation items of interest. Additionally, the `DocItem` instances passed to this function should have their attributes, especially `obj_name`, properly set to reflect the actual documentation elements they represent. +**Note**: When using this function, ensure that the white list is properly populated with valid entries before invoking in_white_list. This will guarantee accurate results when checking if a DocItem is included in the white list. -**Output Example**: Assuming a white list contains an entry with `"file_path": "doc_meta_info.py"` and `"id_text": "MetaInfo"`, and a `DocItem` instance with a file name of "doc_meta_info.py" and `obj_name` of "MetaInfo" is passed to `in_white_list`, the function would return `True`, indicating that this `DocItem` is in the white list. +**Output Example**: If the white list contains an entry with "file_path" as "repo_agent/example.py" and "id_text" as "ExampleClass", and the provided DocItem has the same file name and object name, the function will return True. Otherwise, it will return False. *** *** ### FunctionDef get_topology(self, task_available_func) -**get_topology**: The function of get_topology is to calculate the topological order of all objects in a repository. +**get_topology**: The function of get_topology is to calculate the topological order of all objects in the repository. -**Parameters**: -- `task_available_func`: A function that determines whether a task is available for processing. It accepts a single argument and returns a boolean value indicating the availability of the task. +**parameters**: The parameters of this Function. +· task_available_func: Callable - A function that determines the availability of tasks based on specific criteria. + +**Code Description**: The get_topology method is designed to orchestrate the process of calculating the topological order of all objects within a repository. It begins by invoking the parse_reference method, which extracts all bidirectional reference relationships among documentation items. This step is crucial as it establishes the dependencies between various objects, allowing for a correct topological sorting. -**Code Description**: -The `get_topology` function is a crucial component within the documentation generation process, specifically designed to organize and manage the sequence in which documentation tasks are executed based on the dependencies among objects in a repository. Initially, the function invokes `parse_reference` to analyze and establish bidirectional reference relationships among objects within the project. This step is fundamental for understanding how objects are interconnected, which directly influences the calculation of the topological order. +Following the parsing of references, the method calls get_task_manager, passing the hierarchical tree of the target repository and the task_available_func as arguments. The get_task_manager function constructs a TaskManager instance that organizes tasks based on the hierarchical relationships of document items. It filters the document items according to the availability criteria defined by task_available_func and sorts them by their depth in the hierarchy, ensuring that leaf nodes are processed first. -Following the establishment of reference relationships, the function proceeds to create a `TaskManager` instance. This is achieved by calling `get_task_manager` with the repository's hierarchical tree and the `task_available_func` parameter. The `task_available_func` plays a significant role here, as it filters the tasks that should be included in the task management process based on specific criteria, such as whether an object's documentation needs to be generated or updated. +The TaskManager created by get_task_manager is responsible for managing and dispatching tasks based on their dependencies. It contains a dictionary of tasks, each associated with its dependencies, and provides methods to add tasks, retrieve the next available task, and mark tasks as completed. -The `TaskManager` instance returned by `get_task_manager` is then returned by `get_topology`. This instance is equipped to manage and dispatch tasks in a multi-threaded environment, ensuring that tasks are executed in an order that respects their dependencies. This is particularly important in complex documentation projects where the generation of certain parts of the documentation depends on the completion of others. +The get_topology method ultimately returns the TaskManager instance, which contains the organized tasks ready for execution. This method is called by the first_generate method in the Runner class, where it is used to generate documentation in a specific order based on the calculated topology. The first_generate method ensures that the documentation generation process adheres to the established order of tasks, thereby maintaining the integrity of the documentation. -**Note**: -- The `task_available_func` parameter is critical for the function's operation as it directly influences which tasks are considered for execution. It should be carefully implemented to accurately reflect the conditions under which tasks are available for processing. -- Before calling `get_topology`, it is essential to ensure that the repository's objects and their references are correctly defined and that the repository's hierarchical structure accurately represents the relationships among objects. +**Note**: When utilizing the get_topology method, it is essential to ensure that the task_available_func is correctly defined to accurately reflect the availability of tasks. Additionally, the repository's hierarchical tree must be properly initialized to facilitate the parsing of references and the subsequent task management. -**Output Example**: -The output of `get_topology` is an instance of `TaskManager` populated with tasks corresponding to the documentation items that need to be processed. This `TaskManager` organizes tasks based on their dependencies, ensuring that documentation generation follows a logical and efficient sequence. For example, if object A depends on object B, the task associated with object B will be scheduled for execution before the task associated with object A. +**Output Example**: A possible return value from the get_topology method could be a TaskManager instance containing a series of tasks organized by their dependencies, ready for execution. *** ### FunctionDef _map(self, deal_func) -**_map**: The function of _map is to apply a specified operation to all nodes within a hierarchical structure. +**_map**: The function of _map is to apply a specified operation to all nodes in a hierarchical structure. -**Parameters**: -- **deal_func**: A Callable that defines the operation to be applied to each node. - -**Code Description**: -The `_map` function is designed to traverse a hierarchical structure, specifically a tree, and apply a given function (`deal_func`) to every node within this structure. The traversal is initiated from a specific starting point, referred to as `self.target_repo_hierarchical_tree`, which represents the root of the tree or the top-level node in the hierarchical structure. +**parameters**: The parameters of this Function. +· deal_func: A callable function that defines the operation to be performed on each node. -The core of this function is the `travel` inner function, which is defined to take a single parameter, `now_item`, representing the current node being visited during the traversal. The `travel` function applies the `deal_func` to `now_item`, effectively performing the desired operation on the current node. After applying the function to the current node, the `travel` function iterates over all children of `now_item`, recursively calling itself for each child. This recursive approach ensures that the `deal_func` is applied to every node in the tree, from the root down to the leaf nodes, following a depth-first traversal pattern. +**Code Description**: The _map function is designed to traverse a hierarchical structure represented by the target_repo_hierarchical_tree attribute of the class. It takes a single parameter, deal_func, which is a callable function that will be applied to each node (DocItem) in the tree. -The traversal and operation application process is initiated by calling the `travel` function with `self.target_repo_hierarchical_tree` as its argument, setting off the recursive traversal and operation application from the root of the tree. +The function defines an inner function named travel, which is responsible for the recursive traversal of the tree. The travel function takes a single argument, now_item, which represents the current node being processed. Upon invocation, travel first applies the deal_func to now_item, effectively performing the specified operation on that node. After processing the current node, the function iterates over the children of now_item, recursively calling travel for each child node. This ensures that the operation defined by deal_func is applied to every node in the entire hierarchical structure, starting from the root node (self.target_repo_hierarchical_tree) and proceeding down through all levels of the tree. -**Note**: -- The `deal_func` passed to `_map` must be capable of handling the type of objects stored within the hierarchical structure, typically instances of `DocItem` or a similar object with a `children` attribute. -- The hierarchical structure is expected to have a `children` attribute for each node, which is a dictionary where keys are identifiers and values are child nodes. This structure is essential for the traversal logic to function correctly. -- The `_map` function does not return any value. Its purpose is solely to apply the given `deal_func` to each node in the structure. -- Care should be taken when implementing `deal_func` to avoid modifying the structure in a way that could interfere with the traversal process, such as removing nodes currently being traversed. +**Note**: It is important to ensure that the deal_func provided is capable of handling the structure of DocItem objects, as it will be called for each node in the hierarchy. Additionally, care should be taken to avoid infinite recursion by ensuring that the tree structure is well-defined and that each node has a finite number of children. #### FunctionDef travel(now_item) -**travel**: The function of travel is to process a documentation item and recursively process all of its child items. +**travel**: The function of travel is to recursively process a documentation item and its children. -**Parameters**: -- `now_item`: The current documentation item being processed. - -**Code Description**: -The `travel` function is designed to operate on a `DocItem` object, which represents a documentation item within a software project's repository. This function performs two primary actions. Firstly, it processes the current documentation item by calling the `deal_func` function on it. The `deal_func` function is not defined within the provided code snippet, but based on the context, it is responsible for handling or modifying the documentation item in some way, such as generating or updating its documentation. +**parameters**: The parameters of this Function. +· now_item: An instance of DocItem representing the current documentation item to be processed. -After dealing with the current item, the function iterates over all the child items of the current documentation item. This is achieved through a for loop that accesses the `children` attribute of the `now_item`. The `children` attribute is a dictionary where each key-value pair represents a child documentation item and its associated key. For each child item, the `travel` function is called recursively. This recursive call ensures that not only the current documentation item but also all items in its hierarchy (i.e., all its descendants) are processed. +**Code Description**: The travel function is designed to perform a recursive traversal of a documentation item represented by the now_item parameter. It first invokes the deal_func function on the current item, which is responsible for handling the specific processing logic associated with that documentation item. Following this, the function iterates over all child items contained within the now_item's children attribute, which is a dictionary mapping child object names to their corresponding DocItem instances. For each child, the travel function calls itself, thereby ensuring that all descendants of the current documentation item are processed in a depth-first manner. -The recursion ends when a documentation item with no children is encountered, as the for loop will not execute and the function will simply return, unwinding the recursion stack. +This recursive approach allows for comprehensive handling of the entire documentation tree structure, starting from the specified now_item and extending to all of its children and their respective descendants. The relationship with the DocItem class is crucial, as the travel function relies on the hierarchical organization established by the DocItem instances, which encapsulate metadata and relationships among documentation items. The effective traversal of this structure is essential for tasks such as documentation generation, analysis, or any operation that requires a complete view of the documentation hierarchy. -**Note**: -It is important to ensure that the `DocItem` objects passed to the `travel` function are correctly instantiated and populated, especially the `children` attribute, to accurately reflect the hierarchical structure of the documentation items. Misrepresentation of the hierarchy could lead to incomplete processing of documentation items. Additionally, since this function involves recursion, care should be taken to avoid creating circular references in the `children` attribute of `DocItem` objects, as this would lead to infinite recursion and a stack overflow error. +**Note**: When using the travel function, it is important to ensure that the now_item passed to it is a valid instance of DocItem and that it has been properly initialized with its children. This will guarantee that the recursive traversal operates correctly and efficiently processes all relevant documentation items. *** *** ### FunctionDef load_doc_from_older_meta(self, older_meta) -**load_doc_from_older_meta**: The function of `load_doc_from_older_meta` is to merge documentation from an older version of meta information into the current version. +**load_doc_from_older_meta**: The function of load_doc_from_older_meta is to merge documentation from an older version of metadata into the current version, updating the status and content of documentation items as necessary. + +**parameters**: The parameters of this Function. +· older_meta: An instance of MetaInfo representing the older version of metadata that contains previously generated documentation. -**Parameters**: -- `older_meta`: An instance of `MetaInfo` representing the older version of meta information that has already generated documentation. +**Code Description**: The load_doc_from_older_meta function is designed to integrate documentation from an older version of metadata into the current metadata structure. It begins by logging the action of merging documentation from the older version. The function initializes the root item of the current repository's hierarchical tree and prepares a list to track any items that have been deleted in the new version. -**Code Description**: -The `load_doc_from_older_meta` function is designed to integrate documentation from an older version of a project's meta information into the current version. This process is crucial for maintaining continuity and accuracy in documentation across versions of a project. The function operates in several key steps: +The function defines a nested helper function, find_item, which is responsible for locating a corresponding documentation item in the new version based on the original item from the older version. This function recursively checks the parent items until it finds the root node, ensuring that the correct item is identified even if there are naming conflicts. -1. **Merging Documentation**: It starts by identifying the root item of the current project's hierarchical tree. It then recursively traverses the older meta information's hierarchical tree, attempting to find corresponding items in the current version. If an item from the older version does not exist in the current version, it is marked as deleted. +Another nested function, travel, is defined to traverse the older metadata's hierarchical tree. It utilizes the find_item function to locate each item in the new version. If an item from the older version cannot be found in the new version, it is added to the deleted_items list. If the item is found, its markdown content and status are updated. Additionally, if there is a change in the code content, the item's status is updated to reflect that the code has changed. -2. **Updating Documentation Content**: For items that exist in both versions, the function updates the current item's documentation content (`md_content`), status (`item_status`), and, if applicable, code content. This ensures that the documentation reflects any changes in the code or structure of the project. +After processing the items from the older metadata, the function calls self.parse_reference() to analyze and update the bidirectional reference relationships among documentation items. This ensures that any changes in references are accurately reflected in the new version. -3. **Handling Deleted Items**: The function keeps track of items that exist in the older version but not in the current version, considering them as deleted. This information is crucial for cleaning up or updating references in the documentation. +A second traversal function, travel2, is then defined to check if the references for each item have changed. It compares the new reference names with the old ones and updates the item status accordingly, indicating whether references have been added or removed. -4. **Parsing References**: After merging the documentation, the function calls `parse_reference` to update the bidirectional reference relationships among objects within the project. This step is essential for maintaining the accuracy of references in the documentation. +Finally, the function stores any deleted items from the older metadata in self.deleted_items_from_older_meta for further processing. -5. **Detecting Reference Changes**: Finally, the function checks if the references to and from an item have changed between the older and current versions. It updates the item's status based on whether new references have been added or existing references no longer exist. +This function is called by the diff function in the repo_agent/main.py file, which is responsible for checking changes and determining which documents need to be updated or generated. The diff function creates a new instance of MetaInfo and invokes load_doc_from_older_meta to merge the older metadata into the new instance, ensuring that the documentation is up to date with the latest changes in the source code. -**Note**: -- The function assumes that both the older and current versions of meta information are organized into a hierarchical tree structure. This structure is critical for mapping items between versions. -- The function uses assertions to ensure that certain conditions are met, such as the existence of real names for items and the presence of code content when expected. These assertions help maintain the integrity of the documentation merging process. -- Deleted items and changes in references are tracked to ensure that the documentation accurately reflects the current state of the project, including any structural or content changes. +**Note**: When using the load_doc_from_older_meta function, ensure that the older_meta parameter is a valid instance of MetaInfo containing the correct structure and data from the previous version to avoid inconsistencies during the merge process. -**Output Example**: -This function does not return a value but updates the current meta information instance (`self`) with merged documentation content, updated item statuses, and a list of deleted items from the older meta. The changes are reflected in the state of the `self` object after the function execution. +**Output Example**: An example of the function's operation could result in a list of deleted items such as: +- ["path/to/deleted_item", "DocItemType.function"] +indicating that a function item at the specified path has been removed in the current version. #### FunctionDef find_item(now_item) -**find_item**: The function of find_item is to locate an item in the new version of a metadata structure based on its counterpart in an older version. +**find_item**: The function of find_item is to locate an item in the new version of metadata based on its original item. -**Parameters**: -- `now_item` (DocItem): The original item to be found in the new version of the metadata. +**parameters**: The parameters of this Function. +· now_item: DocItem - The original item to be found in the new version of meta. + +**Code Description**: The find_item function is designed to traverse a hierarchical structure of documentation items represented by the DocItem class. It takes a single parameter, now_item, which is an instance of DocItem that represents the original documentation item that needs to be located in the updated metadata. -**Code Description**: -The `find_item` function is designed to navigate through a hierarchical structure of documentation items, attempting to find a corresponding item in a newer version of the metadata based on an item from an older version. It operates recursively, leveraging the hierarchical nature of the documentation structure where each item can have a parent (except for the root) and children. +The function begins by checking if the now_item has a parent (father). If now_item is a root node (i.e., it has no parent), the function immediately returns the root_item, which is a reference to the top-level documentation item. This ensures that root nodes can always be found, as they are the starting point of the hierarchy. -The function starts by checking if the `now_item` is a root item, identified by having no parent (`father` attribute is None). If it is the root, the function returns the `root_item`, assuming the root item is always present and accessible through a nonlocal variable. +If the now_item has a parent, the function recursively calls itself to find the parent item in the new version of the metadata. The result of this recursive call is stored in the variable father_find_result. If the parent item cannot be found (i.e., father_find_result is None), the function returns None, indicating that the original item cannot be located in the new version. -If the item is not the root, the function attempts to find the parent of the current item in the new metadata structure by recursively calling itself with the parent of `now_item`. If the parent cannot be found (`father_find_result` is None), it implies that the current item does not exist in the new structure, and the function returns None. +Next, the function attempts to identify the actual name of the now_item within its parent's children. It iterates through the children of the now_item's father, checking for a match with the now_item itself. This is crucial because there may be multiple items with the same name, and the function needs to ensure it is referencing the correct instance. If a match is found, the real_name variable is set to the corresponding child name. -Upon successfully finding the parent in the new structure, the function then tries to identify the `now_item` among the children of the found parent. This involves matching the `now_item` with the correct child, taking into account the possibility of items having the same name (`obj_name`). It does so by iterating through the children of the original item's parent and comparing each child with `now_item` until a match is found. The real name of the item (`real_name`) is determined during this process. +An assertion is made to ensure that real_name is not None, which would indicate that the now_item was not found among its siblings. Following this, the function checks if the real_name exists in the children of the father_find_result. If it does, the corresponding item is returned as the result_item. If not, the function returns None, indicating that the item could not be found. -Finally, if the real name of the item is found among the children of the found parent in the new structure, the corresponding item is returned. If no matching child is found, the function returns None, indicating that the item does not exist in the new version of the metadata. +The find_item function is called by other functions within the MetaInfo class, specifically travel and travel2. These functions utilize find_item to locate corresponding items in the new version of the metadata while traversing the documentation tree. The travel function focuses on checking if the source code has been modified, while travel2 assesses changes in the references associated with the documentation items. Both functions rely on find_item to ensure they are working with the correct items in the updated structure. -**Note**: -- The function assumes that the root item of the documentation structure is always present and can be accessed through a nonlocal variable `root_item`. -- It is crucial to understand that the function relies on the hierarchical relationship between documentation items, specifically the parent-child relationship, to navigate the structure. -- The function handles cases where items might have the same name but are different entities by ensuring it matches the correct child through direct comparison rather than just name comparison. +**Note**: When using the find_item function, it is essential to maintain the integrity of the hierarchical relationships within the DocItem instances. This ensures accurate retrieval of items and prevents potential errors during the traversal of the documentation structure. -**Output Example**: -Assuming a documentation structure where items are organized hierarchically, and each item is an instance of `DocItem` with a unique relationship to its parent and children, an example return value of `find_item` could be another `DocItem` instance that corresponds to the `now_item` in the new metadata version. If no corresponding item is found, the function would return `None`. +**Output Example**: A possible return value of the find_item function could be an instance of DocItem representing the corresponding item in the new version of the metadata, or None if the item is not found. For example, if the original item was located successfully, the output might look like this: +`` +This indicates that the function was found and provides details about the retrieved DocItem instance. *** #### FunctionDef travel(now_older_item) -**travel**: The function of travel is to update the documentation of an item based on its counterpart from an older version of the metadata, and recursively do the same for all its children. +**travel**: The function of travel is to recursively traverse a documentation item and check for modifications in the source code compared to a newer version. + +**parameters**: The parameters of this Function. +· now_older_item: An instance of DocItem representing the original documentation item that is being checked for modifications. -**Parameters**: -- `now_older_item`: This parameter is of type `DocItem`, representing the documentation item from the older version of the metadata that needs to be updated in the current version. +**Code Description**: The travel function is designed to navigate through a hierarchical structure of documentation items represented by the DocItem class. It takes a single parameter, now_older_item, which is the original documentation item that needs to be compared against its newer version. -**Code Description**: -The `travel` function plays a crucial role in the process of updating documentation items when transitioning from an older version of the project's metadata to a newer one. It ensures that each item's documentation is carried over to the new version if the item still exists, and it updates the documentation status if the source code has changed. +The function begins by calling the find_item function to locate the corresponding item in the new version of metadata. If the item cannot be found (i.e., result_item is None), it indicates that the original item has been deleted or is no longer present in the updated structure. In this case, the function appends the full name and type of the now_older_item to a list called deleted_items and returns, effectively marking the item as deleted. -Initially, the function attempts to find the current version of the `now_older_item` by calling the `find_item` function. If the item does not exist in the new version (indicated by `find_item` returning `None`), the item's full name and type are added to a list named `deleted_items`, signifying that the item has been deleted or cannot be found in the new version. +If the corresponding item is found, the function updates the md_content and item_status attributes of result_item with the values from now_older_item. This ensures that the metadata of the found item reflects the original item's content and status. -If the item is found in the new version, the function proceeds to update the markdown content (`md_content`) and the item status (`item_status`) of the found item with those from the older version. This ensures that the documentation content and status are preserved across versions. +Next, the function checks if the now_older_item contains a key "code_content" in its content dictionary. If it does, it asserts that the same key exists in result_item's content. The function then compares the code_content of both items. If they differ, it indicates that the source code has been modified, and the item_status of result_item is updated to DocItemStatus.code_changed, signaling that the documentation needs to be updated to reflect these changes. -Next, the function checks if the `code_content` key exists in the content of both the older and the newer item. If it does, and the code content has changed, the item status is updated to `DocItemStatus.code_changed`, indicating that the source code for this item has been modified and the documentation needs to be updated accordingly. +Finally, the function iterates over the children of now_older_item and recursively calls itself for each child, allowing it to traverse the entire documentation tree and check for modifications at all levels. -The function then recursively calls itself for each child of the `now_older_item`, ensuring that the entire hierarchy of documentation items is updated. This recursive approach allows the function to traverse and update the entire documentation structure starting from a given item. +The travel function is called within the context of the MetaInfo class, specifically in the load_doc_from_older_meta method. It plays a crucial role in ensuring that the documentation accurately reflects the current state of the source code by identifying changes and marking items accordingly. -**Note**: -- It is essential to ensure that the `deleted_items` list is accessible within the scope of the `travel` function, as it tracks items that no longer exist in the new version of the metadata. -- The function relies on the `find_item` function to locate the current version of an item based on its older version. The correct functioning of `find_item` is crucial for the `travel` function to work as intended. -- The preservation of documentation content and status across versions is vital for maintaining the integrity and continuity of the project's documentation as the codebase evolves. +**Note**: When using the travel function, it is essential to ensure that the hierarchical relationships between DocItem instances are maintained. This will facilitate accurate traversal and modification checks, preventing potential inconsistencies in the documentation. -**Output Example**: -There is no direct output from the `travel` function since its primary purpose is to update the documentation items in place. However, the effects of running this function include updated documentation items in the new version of the metadata, with preserved markdown content and appropriately updated item statuses. Additionally, the `deleted_items` list would contain the full names and types of any items that could not be found in the new version, indicating items that have been removed or significantly altered. +**Output Example**: A possible outcome of the travel function could be the updating of a DocItem instance's item_status to code_changed if modifications are detected. For example, if the original item was found and its code_content was altered, the output might reflect the updated status: +`result_item.item_status = DocItemStatus.code_changed` +This indicates that the source code has been modified and the documentation needs to be updated accordingly. *** #### FunctionDef travel2(now_older_item) -**travel2**: The function of travel2 is to update the documentation status of an item based on changes in its references and recursively apply these updates to its children in the documentation hierarchy. +**travel2**: The function of travel2 is to recursively traverse and analyze the relationships of documentation items, updating their statuses based on reference changes. + +**parameters**: The parameters of this Function. +· now_older_item: DocItem - The original documentation item that is being analyzed for reference changes. + +**Code Description**: The travel2 function is designed to perform a recursive traversal of documentation items represented by the DocItem class. It takes a single parameter, now_older_item, which is an instance of DocItem that represents the original documentation item to be analyzed. -**Parameters**: -- `now_older_item`: This parameter is an instance of `DocItem`, representing the current item in the older documentation hierarchy that needs to be updated based on the newer version. +The function begins by calling the find_item function to locate the corresponding item in the new version of the metadata based on the now_older_item. If the corresponding item cannot be found (i.e., result_item is None), the function returns early, indicating that there is no further processing required for this item. -**Code Description**: -The `travel2` function begins by attempting to find a corresponding item in the newer version of the documentation hierarchy for the given `now_older_item` using the `find_item` function. If no corresponding item is found, indicating that the `now_older_item` no longer exists in the newer documentation, the function returns immediately without making any updates. +Next, the function retrieves the list of names of items that reference the result_item in the new version by iterating over the who_reference_me attribute of result_item. It constructs a new list of reference names, new_reference_names. It also retrieves the list of reference names from the now_older_item, stored in the who_reference_me_name_list attribute. -If a corresponding item (`result_item`) is found, the function then compares the set of names of items that reference the `result_item` in the new documentation (`new_reference_names`) with the set of names of items that referenced `now_older_item` in the older version (`old_reference_names`). This comparison is used to determine if there have been any changes in the references to the item, which would necessitate an update to its documentation status. +The function then compares the two sets of reference names to determine if there have been any changes. If the sets are not equal and the result_item's status is doc_up_to_date, it proceeds to check the relationship between the old and new reference names. If the new references are a subset of the old references, it updates the result_item's status to referencer_not_exist, indicating that some references have been removed. Conversely, if the new references include additional references, it updates the status to add_new_referencer, indicating that new references have been added. -If the sets of reference names are not equal, and the `result_item`'s current documentation status is `DocItemStatus.doc_up_to_date`, indicating that its documentation was previously considered up to date, the function then checks if the new set of references is a subset of the old set. If it is, this implies that some references to the item have been removed, and the `result_item`'s status is updated to `DocItemStatus.referencer_not_exist`. If the new set of references is not a subset of the old set, indicating that new references have been added, the `result_item`'s status is updated to `DocItemStatus.add_new_referencer`. +Finally, the function recursively calls itself for each child of the now_older_item, allowing it to traverse the entire hierarchy of documentation items and apply the same analysis to each child. -After updating the `result_item`'s documentation status based on reference changes, the function recursively calls itself for each child of the `now_older_item`, ensuring that the entire hierarchy of documentation items is updated accordingly. +The travel2 function is closely related to the find_item function, which it uses to locate the corresponding documentation item in the new version. This relationship is crucial for ensuring that the analysis performed by travel2 is based on the most current metadata structure. -**Note**: -- It is crucial to ensure that the `now_older_item` parameter is correctly initialized with an instance of `DocItem` that represents an item from the older documentation hierarchy. -- The function relies on the `find_item` function to locate corresponding items in the newer documentation version. Therefore, the accuracy of `find_item` directly affects the effectiveness of `travel2`. -- The function does not return any value but instead updates the documentation status of items in place. This means that the effects of calling `travel2` are observed through changes in the documentation items themselves, rather than through the function's return value. +**Note**: When using the travel2 function, it is essential to ensure that the documentation items are properly structured and that the relationships between them are accurately maintained. This will facilitate the correct updating of item statuses and ensure that the documentation reflects the current state of the codebase. -**Output Example**: -Since `travel2` does not return a value but updates the documentation items' statuses in place, there's no direct output example. However, after executing `travel2` on an older documentation hierarchy, one might observe that certain `DocItem` instances have their `item_status` attribute updated to reflect changes in their references, such as transitioning from `DocItemStatus.doc_up_to_date` to `DocItemStatus.referencer_not_exist` or `DocItemStatus.add_new_referencer`, depending on whether references to them have been removed or added in the newer documentation version. +**Output Example**: A possible outcome of the travel2 function could be the updated status of a DocItem instance, such as: +`` +This indicates that the function has been successfully analyzed and that new references have been added to the documentation item. *** *** ### FunctionDef from_project_hierarchy_path(repo_path) -**from_project_hierarchy_path**: The function of `from_project_hierarchy_path` is to create a `MetaInfo` object from a project's hierarchical structure stored in a JSON file. +**from_project_hierarchy_path**: The function of from_project_hierarchy_path is to convert a flattened JSON representation of a project's directory structure into a structured MetaInfo object. -**Parameters**: -- `repo_path` (str): The path to the repository containing the `project_hierarchy.json` file. +**parameters**: The parameters of this Function. +· repo_path: A string representing the path to the repository where the project_hierarchy.json file is located. -**Code Description**: -The `from_project_hierarchy_path` function is designed to parse a project's hierarchical structure from a JSON file named `project_hierarchy.json`, located within a specified repository path. This function is a crucial part of the process of converting a flat representation of a project's files and directories (as stored in the JSON file) into a structured `MetaInfo` object that accurately represents the project's hierarchy. +**Code Description**: The from_project_hierarchy_path function begins by constructing the path to the project_hierarchy.json file located within the specified repository path. It logs the action of parsing this JSON file. The function then checks if the file exists; if it does not, it raises a NotImplementedError indicating that an invalid operation has been detected. -Initially, the function constructs the full path to the `project_hierarchy.json` file by joining the provided repository path with the filename. It logs this action for tracking purposes. The existence of the JSON file is then checked, and if the file does not exist, a `NotImplementedError` is raised with a placeholder message. +Upon confirming the existence of the file, the function opens it for reading with UTF-8 encoding and loads its content into a Python dictionary using the json.load method. This dictionary represents the hierarchical structure of the project, where keys are file names and values are their respective contents. -Upon confirming the file's existence, the function opens and reads the JSON file, loading its content into a variable. This content is expected to be a dictionary representing the hierarchical structure of the project, where keys are paths to files and directories, and values provide additional metadata or content descriptions. +The function subsequently calls the from_project_hierarchy_json method of the MetaInfo class, passing the loaded project_hierarchy_json dictionary as an argument. This method is responsible for transforming the JSON representation into a structured MetaInfo object, which encapsulates the project's documentation items in a hierarchical format. -The core functionality of this function relies on another method, `from_project_hierarchy_json`, which is responsible for parsing the loaded JSON content and constructing the `MetaInfo` object. This method takes the project hierarchy as represented in the JSON and translates it into a structured format that encapsulates the project's files, directories, and their relationships. The `from_project_hierarchy_json` method is detailed in its own documentation, highlighting its role in parsing the JSON structure, creating directory and file items, and establishing parent-child relationships among them. +The from_project_hierarchy_path function is typically invoked by other methods within the MetaInfo class, such as init_meta_info and from_checkpoint_path, which utilize it to initialize or load the MetaInfo object based on different sources of project structure data. This establishes a clear relationship between from_project_hierarchy_path and its callees, as it serves as a foundational step in constructing the MetaInfo object from a JSON representation. -**Note**: -- The JSON file `project_hierarchy.json` must be present in the root of the specified repository path and correctly formatted to represent the project's hierarchical structure. The function does not handle cases where the JSON structure is malformed or does not accurately reflect the project's file and directory layout. -- The error message "怪" in the `NotImplementedError` is a placeholder and should ideally be replaced with a more descriptive message indicating the absence of the required JSON file. +**Note**: When using this function, ensure that the repo_path parameter accurately points to a valid repository containing the project_hierarchy.json file to avoid errors during execution. -**Output Example**: -While the function itself does not directly return a visual output, it returns a `MetaInfo` object. This object encapsulates the hierarchical structure of the project as derived from the `project_hierarchy.json` file. The structure includes `DocItem` instances representing each file and directory, organized to reflect their relationships and hierarchy within the project. +**Output Example**: A possible appearance of the code's return value could be a MetaInfo object containing a structured representation of the project's documentation items, with a hierarchical tree of DocItem instances reflecting the project's organization. For instance: +``` +MetaInfo( + target_repo_hierarchical_tree=DocItem( + item_type=DocItemType._repo, + obj_name="full_repo", + children={ + "src": DocItem( + item_type=DocItemType._dir, + obj_name="src", + children={ + "main.py": DocItem( + item_type=DocItemType._file, + obj_name="main.py", + ... + ) + } + ) + } + ) +) +``` *** ### FunctionDef to_hierarchy_json(self, flash_reference_relation) -**to_hierarchy_json**: The function of to_hierarchy_json is to convert the document metadata into a hierarchical JSON representation. - -**Parameters**: -- `flash_reference_relation` (bool): Determines whether the latest bidirectional reference relations should be written back to the meta file. Default is False. +**to_hierarchy_json**: The function of to_hierarchy_json is to convert the document metadata to a hierarchical JSON representation. -**Code Description**: -The `to_hierarchy_json` function is designed to create a hierarchical JSON structure that represents the metadata of documents within a project. It operates by first retrieving a list of all file items in the project using the `get_all_files` method. For each file item, it initializes an empty list to hold its hierarchical content. +**parameters**: The parameters of this Function. +· flash_reference_relation: A boolean that determines whether the latest bidirectional reference relations will be included in the output JSON. -The core of this function is the `walk_file` nested function, which recursively traverses the document structure starting from a given document item (`DocItem`). For each document item, it constructs a temporary JSON object (`temp_json_obj`) containing the item's content, name, type, markdown content, and item status. The item's type is converted to a string representation using the `to_str` method of the item type. +**Code Description**: The to_hierarchy_json function is designed to create a structured JSON representation of document metadata by traversing the hierarchical tree of documentation items. It begins by initializing an empty dictionary, hierachy_json, to store the resulting JSON structure. The function retrieves all file items from the documentation hierarchy by calling the get_all_files method, which collects all nodes of type DocItemType._file. -If the `flash_reference_relation` parameter is set to True, the function also includes information about which items reference the current item (`who_reference_me`) and which items the current item references (`reference_who`), as well as any special reference types (`special_reference_type`). If `flash_reference_relation` is False, it instead includes lists of names for items that reference the current item and items that the current item references, without the special reference type. +For each file item, the function initializes an empty list, file_hierarchy_content, to hold the metadata of the file and its children. A nested helper function, walk_file, is defined to recursively traverse each file's children. Within walk_file, the current document item (now_obj) is processed to extract its content, name, type, markdown content, and status. If flash_reference_relation is set to True, the function includes detailed reference information, such as who references the current item and whom it references, along with any special reference types. If it is False, only the names of the referencing items are included. -After processing each document item, it appends the `temp_json_obj` to the `file_hierarchy_content` list. This process is repeated recursively for each child of the current document item, effectively walking through the entire document structure. +The function appends the constructed JSON object for each file item to file_hierarchy_content and continues to traverse its children. After processing all children, the file_hierarchy_content is added to the hierachy_json dictionary under the full name of the file item, which is obtained by calling the get_full_name method. Finally, the function returns the complete hierachy_json dictionary, representing the hierarchical structure of the document metadata. -Finally, the function constructs the `hierarchy_json` dictionary, where each key is the full name of a file item (obtained using the `get_full_name` method), and the value is the corresponding `file_hierarchy_content` list. This dictionary represents the hierarchical JSON structure of the document metadata and is returned as the function's output. +This function is called by the checkpoint method of the MetaInfo class. In this context, it is used to generate a JSON representation of the document hierarchy that is then saved to a specified directory. The checkpoint method utilizes to_hierarchy_json to gather the necessary metadata before writing it to files, ensuring that the documentation structure is preserved and can be referenced later. -**Note**: -- The function relies on the integrity and structure of the document items and their relationships within the project. It is crucial that these items are correctly defined and linked for the function to produce accurate and meaningful output. -- The inclusion of bidirectional reference relations (when `flash_reference_relation` is True) can provide a more detailed view of the document metadata, especially useful for understanding the interconnections between different parts of the project. +**Note**: When using the to_hierarchy_json function, ensure that the hierarchical structure of documentation items is properly established and that the flash_reference_relation parameter is set according to the desired level of detail in the output. -**Output Example**: -The output is a dictionary where each key is the full name of a file item, and the value is a list of dictionaries representing the hierarchical content of that file item. An example output might look like this: +**Output Example**: A possible output of the to_hierarchy_json function could be a dictionary structured as follows: ```json { - "project/module/file1": [ - { - "name": "Section1", - "type": "section", - "md_content": "Content of Section1", - "item_status": "active", - "who_reference_me": ["project/module/file2"], - "reference_who": ["project/module/file3"], - "special_reference_type": null - } - ] + "folder1/file1.py": [ + { + "name": "file1.py", + "type": "file", + "md_content": "Content of file1", + "item_status": "active", + "who_reference_me": ["folder2/file2.py"], + "reference_who": ["folder3/file3.py"], + "special_reference_type": "typeA" + } + ], + "folder2/file2.py": [ + { + "name": "file2.py", + "type": "file", + "md_content": "Content of file2", + "item_status": "inactive", + "who_reference_me": [], + "reference_who": ["folder1/file1.py"], + "special_reference_type": null + } + ] } ``` -This example represents a simplified view of the hierarchical JSON structure for a single file item, including references and content details. #### FunctionDef walk_file(now_obj) -**walk_file**: The function of `walk_file` is to traverse a documentation item and its children recursively, building a JSON representation of the documentation structure. +**walk_file**: The function of walk_file is to recursively traverse a DocItem object and construct a JSON representation of its metadata and relationships. -**Parameters**: -- `now_obj`: The current documentation item (`DocItem`) being processed. +**parameters**: The parameters of this Function. +· now_obj: An instance of DocItem that represents the current documentation item being processed. -**Code Description**: -The `walk_file` function is a recursive method designed to traverse the documentation structure starting from a given documentation item (`DocItem`) and proceeding through all its children. This traversal is aimed at constructing a JSON object that represents the hierarchical structure of documentation items within a project. +**Code Description**: The walk_file function is designed to build a hierarchical JSON representation of documentation items within a project. It takes a single parameter, now_obj, which is an instance of the DocItem class. The function utilizes nonlocal variables file_hierarchy_content and flash_reference_relation to store the generated JSON structure and manage reference relationships, respectively. -Upon invocation, the function first accesses several nonlocal variables: `file_hierarchy_content`, which accumulates the JSON representation of the documentation structure, and `flash_reference_relation`, a flag indicating whether to include detailed reference information in the JSON object. +Initially, the function extracts relevant metadata from the now_obj instance, including its name, type (converted to a string using the to_str method of the DocItemType enumeration), markdown content, and item status. This information is stored in a temporary JSON object, temp_json_obj. -For the current documentation item (`now_obj`), the function populates a temporary JSON object (`temp_json_obj`) with various attributes of `now_obj`: -- The name of the documentation item (`obj_name`). -- The type of the documentation item, converted to a string representation through the `to_str` method of the `item_type` attribute. This method categorizes the documentation item as a class, function, or other types, enhancing the readability of the JSON structure. -- The markdown content (`md_content`) associated with the documentation item. -- The status of the documentation item (`item_status`), indicating whether the documentation has been generated or needs to be updated. +If the flash_reference_relation variable is set to True, the function populates the temp_json_obj with additional reference information, including the names of items that reference the current item (who_reference_me) and the items that the current item references (reference_who). It also includes the special reference type associated with the current item. If flash_reference_relation is False, the function instead uses pre-existing name lists (who_reference_me_name_list and reference_who_name_list) to populate the corresponding fields in the JSON object. -Depending on the state of `flash_reference_relation`, the function either includes detailed reference information (who references the current item and whom the current item references, along with any special reference types) or simply lists the names of these references. This flexibility allows for a more detailed or concise representation of reference relationships based on the needs of the project. +After constructing the temp_json_obj, it is appended to the file_hierarchy_content list, which accumulates the JSON representations of all processed items. -After populating the temporary JSON object with the current item's information, the function appends this object to `file_hierarchy_content`, gradually building up the JSON representation of the entire documentation structure. +The function then iterates through the children of the now_obj instance, recursively calling itself for each child. This ensures that the entire hierarchy of documentation items is traversed and represented in the final JSON structure. -The function then recursively calls itself for each child of the current documentation item, ensuring that the entire hierarchy beneath the current item is processed and included in the JSON structure. +The walk_file function is integral to the overall documentation generation process, as it systematically collects and organizes metadata from DocItem instances, facilitating the creation of a comprehensive and structured JSON output that reflects the relationships and statuses of documentation items within the project. -**Note**: -- The `walk_file` function is a critical component of the documentation generation process, enabling the construction of a detailed and navigable JSON representation of a project's documentation structure. -- It is important to ensure that the `DocItem` objects passed to this function are correctly initialized and populated with accurate information, as this directly impacts the quality and accuracy of the generated documentation structure. -- The use of nonlocal variables `file_hierarchy_content` and `flash_reference_relation` implies that this function is designed to be used within a larger context where these variables are defined and managed, typically within a method of a class that orchestrates the documentation generation process. +**Note**: When using the walk_file function, ensure that the DocItem instances are properly initialized and that the hierarchical relationships are correctly established. This will guarantee accurate representation in the generated JSON structure. Additionally, be mindful of the flash_reference_relation variable, as its state will influence the inclusion of reference information in the output. *** *** ### FunctionDef from_project_hierarchy_json(project_hierarchy_json) -**from_project_hierarchy_json**: The function of `from_project_hierarchy_json` is to construct a `MetaInfo` object from a JSON representation of a project's hierarchical structure. +**from_project_hierarchy_json**: The function of from_project_hierarchy_json is to parse a JSON representation of a project's hierarchical structure and construct a corresponding MetaInfo object. + +**parameters**: The parameters of this Function. +· project_hierarchy_json: A dictionary representing the hierarchical structure of the project, where keys are file names and values are their respective contents. -**Parameters**: -- `project_hierarchy_json` (dict): A dictionary representing the hierarchical structure of a project, where keys are file paths and values are content descriptions of those files. +**Code Description**: The from_project_hierarchy_json function is responsible for transforming a JSON representation of a project's directory and file structure into a structured MetaInfo object. It begins by retrieving the current project settings using the SettingsManager class. The function initializes a target_meta_info object, which serves as the root of the hierarchical tree structure, represented by a DocItem instance. -**Code Description**: -The `from_project_hierarchy_json` function is responsible for parsing a JSON object that represents the hierarchical structure of a project and constructing a `MetaInfo` object that encapsulates this structure. The function starts by creating an instance of `MetaInfo`, initializing it with a root `DocItem` representing the full repository. +The function then iterates over each file in the provided project_hierarchy_json. For each file, it checks if the file exists in the target repository and whether it has content. If the file does not exist or is empty, it logs an informational message and continues to the next file. -The function iterates over each item in the `project_hierarchy_json`, using a progress bar (`tqdm`) to visually indicate the progress of parsing. For each file in the project hierarchy, it first checks if the file exists within the repository path specified in the configuration (`CONFIG["repo_path"]`). If the file does not exist or is empty, it logs this information and skips to the next file. +For valid files, the function splits the file name into its directory components and navigates through the hierarchical structure, creating DocItem instances for directories and files as necessary. It ensures that the parent-child relationships are established correctly within the tree structure. -For files that do exist and contain content, the function constructs a hierarchical path (`recursive_file_path`) by splitting the file path. It then traverses or constructs the necessary directory structure within the `MetaInfo` object to place the file correctly within the hierarchy. This involves creating `DocItem` instances for directories (`_dir`) and files (`_file`) as needed, ensuring that parent-child relationships are correctly established. +After constructing the tree, the function processes the content of each file, which is expected to be a list of documentation items. It creates DocItem instances for each item, populating their attributes based on the content provided. The function also identifies potential parent-child relationships among these documentation items based on their code ranges. -After constructing the directory and file structure, the function parses the content of each file. It asserts that the file content is a list, indicating a collection of documentation items (e.g., classes, functions). For each item in the file content, it creates a `DocItem` instance, setting various attributes based on the content, such as the item's name, markdown content, code start and end lines, and any references it has or is referenced by. +Finally, the function invokes the change_items helper function to update the item types of the documentation items based on their content type (e.g., class, function). It concludes by parsing the tree paths and checking the depth of the hierarchical structure before returning the fully constructed target_meta_info object. -The function also handles finding potential parent items for each documentation item based on code containment logic, ensuring that the hierarchical structure reflects the logical structure of the code. It resolves name duplication issues by renaming items as necessary. +This function is called by several other methods within the MetaInfo class, including init_meta_info, from_checkpoint_path, and from_project_hierarchy_path. Each of these methods utilizes from_project_hierarchy_json to initialize or load the MetaInfo object based on different sources of project structure data. -Finally, the function calls `parse_tree_path` and `check_depth` on the root `DocItem` to finalize the hierarchical structure, calculating the depth of each item and parsing the tree path for each item. The constructed `MetaInfo` object, now fully populated with the project's hierarchical structure, is returned. +**Note**: When using this function, ensure that the project_hierarchy_json parameter accurately reflects the project's directory and file structure to avoid inconsistencies in the generated MetaInfo object. -**Note**: -- The JSON representation of the project hierarchy must accurately reflect the file and directory structure of the project for this function to work correctly. -- The function assumes that the `CONFIG["repo_path"]` is correctly set to the root of the repository being parsed. -- Name duplication handling ensures that each `DocItem` within the same parent has a unique name, even if this requires renaming some items. +**Output Example**: A possible appearance of the code's return value could be a MetaInfo object containing a structured representation of the project's documentation items, with a hierarchical tree of DocItem instances reflecting the project's organization. For instance: +``` +MetaInfo( + target_repo_hierarchical_tree=DocItem( + item_type=DocItemType._repo, + obj_name="full_repo", + children={ + "src": DocItem( + item_type=DocItemType._dir, + obj_name="src", + children={ + "main.py": DocItem( + item_type=DocItemType._file, + obj_name="main.py", + ... + ) + } + ) + } + ) +) +``` +#### FunctionDef change_items(now_item) +**change_items**: The function of change_items is to recursively update the item type of a DocItem based on its content type and its relationship with its parent item. -**Output Example**: -The function returns a `MetaInfo` object that encapsulates the hierarchical structure of the project, with `DocItem` instances representing files, directories, and documentation items such as classes and functions, all organized according to their logical and physical structure within the project. -#### FunctionDef code_contain(item, other_item) -**Function Name**: code_contain +**parameters**: The parameters of this Function. +· now_item: An instance of DocItem representing the current documentation item being processed. -**Function Purpose**: The function `code_contain` determines whether one code segment is contained within another based on their start and end lines. +**Code Description**: The change_items function is designed to traverse a hierarchy of DocItem instances and update their item types according to specific rules. It first checks if the provided now_item is not of type _file. If it is a class definition (ClassDef), it updates the item type to _class. If it is a function definition (FunctionDef), it sets the item type to _function. Additionally, if the parent item (father) of now_item is classified as a _class, it further refines the item type to _class_function. Conversely, if the parent item is either a _function or a _sub_function, the item type is updated to _sub_function. -**Parameters**: -- `item`: The first code segment, which is checked to see if it contains the second code segment. -- `other_item`: The second code segment, which is checked to see if it is contained within the first code segment. +The function then iterates over all child items of now_item, recursively calling change_items on each child to ensure that the entire hierarchy is processed and updated accordingly. This recursive nature allows for a comprehensive update of item types throughout the documentation structure. -**Code Description**: -The `code_contain` function takes two parameters, `item` and `other_item`, each representing a code segment with properties `code_end_line` and `code_start_line`. These properties indicate the starting and ending lines of the code segments in a file. +The change_items function relies on the DocItemType enumeration to define the various types of documentation items, ensuring that each item is categorized correctly based on its context within the codebase. The relationship with the DocItem class is crucial, as change_items operates on instances of DocItem, modifying their attributes based on the defined logic. -The function first checks if both the `code_end_line` and `code_start_line` of `other_item` are equal to those of `item`. If this condition is true, it means that both code segments are exactly the same in terms of their line range, and the function returns `False`, indicating that `other_item` is not contained within `item` but rather they are identical. +**Note**: It is important to ensure that the now_item passed to the change_items function is properly initialized and represents a valid documentation item within the hierarchy. The function assumes that the content attribute of now_item contains the necessary information to determine its type, and any modifications made will affect the documentation generation process. +*** +#### FunctionDef code_contain(item, other_item) +**code_contain**: code_contain函数的功能是判断两个代码项是否存在重叠关系。 -Next, the function checks if the `code_end_line` of `other_item` is less than the `code_end_line` of `item` or if the `code_start_line` of `other_item` is greater than the `code_start_line` of `item`. If either of these conditions is true, it implies that `other_item` does not fall within the line range of `item`, and the function returns `False`. +**parameters**: 该函数的参数说明如下: +· parameter1: item - 第一个代码项,包含起始和结束行信息。 +· parameter2: other_item - 第二个代码项,包含起始和结束行信息。 -If neither of the above conditions is met, it indicates that `other_item` is indeed contained within the line range of `item`, and the function returns `True`. +**Code Description**: code_contain函数用于判断两个代码项之间的行数是否重叠。函数首先检查两个代码项的结束行和起始行是否完全相同,如果相同,则返回False,表示没有重叠。接着,函数判断other_item的结束行是否小于item的结束行,或者other_item的起始行是否大于item的起始行,如果满足任一条件,则返回False,表示没有重叠。最后,如果以上条件都不满足,函数返回True,表示两个代码项存在重叠。 -**Note**: -This function is useful in scenarios where there is a need to determine the containment relationship between two segments of code, especially in tools related to code analysis, refactoring, or documentation generation. It assumes that the line numbers are accurately represented and that a smaller line number corresponds to an earlier position in the code. +**Note**: 使用该函数时,确保传入的item和other_item对象都包含code_start_line和code_end_line属性,以避免运行时错误。 -**Output Example**: -- If `item` represents a code segment from lines 1 to 10 and `other_item` represents a code segment from lines 2 to 9, calling `code_contain(item, other_item)` will return `True`. -- If `item` represents a code segment from lines 1 to 5 and `other_item` also represents a code segment from lines 1 to 5, calling `code_contain(item, other_item)` will return `False`, indicating they are identical rather than one containing the other. +**Output Example**: 如果item的code_start_line为10,code_end_line为20,而other_item的code_start_line为15,code_end_line为18,则函数返回True,表示这两个代码项存在重叠。 *** *** diff --git a/markdown_docs/repo_agent/file_handler.md b/markdown_docs/repo_agent/file_handler.md index 33bb352..7f5a586 100644 --- a/markdown_docs/repo_agent/file_handler.md +++ b/markdown_docs/repo_agent/file_handler.md @@ -1,373 +1,392 @@ ## ClassDef FileHandler -**FileHandler**: The function of FileHandler is to manage file operations related to a repository, including reading, writing, and analyzing file content, as well as generating file and repository structures for documentation purposes. - -**Attributes**: -- `file_path`: The relative path of the file within the repository. -- `repo_path`: The path to the root of the repository. -- `project_hierarchy`: The path to the project hierarchy JSON file, which is used to store the structure of the project. - -**Code Description**: -The FileHandler class is designed to facilitate various file operations within a software repository. It provides functionalities such as reading the content of files, writing content to files, extracting code information, and generating documentation structures based on the code. - -- The `read_file` method reads the content of the file specified by `file_path` and returns it as a string. -- The `get_obj_code_info` method extracts information about a code object, such as a function or class, from a file. It returns a dictionary containing details like the type of the code object, its name, start and end line numbers, parameters, and whether it contains a return statement. -- The `write_file` method writes a given content to a specified file path within the repository. It ensures the creation of necessary directories. -- The `get_modified_file_versions` method retrieves the current and previous versions of a modified file, aiding in the analysis of changes. -- The `get_end_lineno` method calculates the end line number of a code object, which is useful for documentation and analysis purposes. -- The `add_parent_references` method enriches the abstract syntax tree (AST) by adding parent references to each node, facilitating hierarchical analysis of code objects. -- The `get_functions_and_classes` method parses the content of a file to identify and list all functions and classes, including their parameters and hierarchical relationships. -- The `generate_file_structure` method generates a structured representation of the functions and classes within a file, which can be used for documentation. -- The `generate_overall_structure` method compiles the structures of all files within the repository that are not ignored by `.gitignore` or specified to be jumped over. It is crucial for generating comprehensive documentation of the repository. -- The `convert_to_markdown_file` method converts the structured representation of a file into markdown format, which is ready for documentation purposes. - -This class is utilized in various parts of the project to handle file-related operations, especially in the context of generating and updating documentation. For instance, it is used to initialize meta information about the repository, process file changes, and generate documentation for individual code objects. Its functionalities support the automated generation of documentation by analyzing the codebase, identifying changes, and structuring the information in a human-readable format. - -**Note**: -- It is important to ensure that the `repo_path` and `file_path` are correctly set to reflect the actual structure of the repository. -- The methods that parse and analyze code (like `get_functions_and_classes`) rely on the Python AST module, which means they are primarily designed to work with Python source files. - -**Output Example**: -An example output of the `get_obj_code_info` method might look like this: +# Class `FileHandler` + +The `FileHandler` class provides a set of methods to interact with files within a Git repository, specifically for handling changes, reading file contents, extracting code information, and writing back changes to the repository. This class allows for tasks such as retrieving modified file versions, extracting function and class structures from code, and generating project file structures using Abstract Syntax Tree (AST) parsing. + +## Methods Overview + +### `__init__(self, repo_path, file_path)` +Initializes a `FileHandler` instance with the given repository and file path. + +#### Parameters: +- `repo_path` (str): The absolute path to the Git repository. +- `file_path` (str): The relative path of the file within the repository. + +### `read_file(self)` +Reads the contents of the file specified by `file_path`. + +#### Returns: +- `str`: The content of the current file. + +### `get_obj_code_info(self, code_type, code_name, start_line, end_line, params, file_path=None)` +Retrieves detailed information about a given code object (e.g., function or class) in the file. + +#### Parameters: +- `code_type` (str): The type of the code object (e.g., 'FunctionDef', 'ClassDef'). +- `code_name` (str): The name of the code object. +- `start_line` (int): The starting line number of the code object. +- `end_line` (int): The ending line number of the code object. +- `params` (list): A list of parameters associated with the code object. +- `file_path` (str, optional): The path to the file containing the code object. Defaults to `None`, in which case the `file_path` provided during initialization is used. + +#### Returns: +- `dict`: A dictionary containing information about the code object, including its content, line numbers, and parameters. + +### `write_file(self, file_path, content)` +Writes the provided content to a file at the specified path. + +#### Parameters: +- `file_path` (str): The relative path of the file to write to. +- `content` (str): The content to write into the file. + +### `get_modified_file_versions(self)` +Retrieves the current and previous versions of a modified file. + +#### Returns: +- `tuple`: A tuple containing: + - `current_version` (str): The content of the current version of the file. + - `previous_version` (str): The content of the previous version of the file (from the last Git commit). + +### `get_end_lineno(self, node)` +Gets the end line number of a given AST node. + +#### Parameters: +- `node`: The AST node for which to determine the end line number. + +#### Returns: +- `int`: The end line number of the node, or `-1` if no line number is available. + +### `add_parent_references(self, node, parent=None)` +Recursively adds a reference to the parent node for all child nodes in an Abstract Syntax Tree (AST). + +#### Parameters: +- `node`: The AST node to start from. +- `parent` (optional): The parent node, which defaults to `None`. + +#### Returns: +- `None` + +### `get_functions_and_classes(self, code_content)` +Extracts all functions, classes, and their parameters from a given code content, including hierarchical relationships. + +#### Parameters: +- `code_content` (str): The code content to parse. + +#### Returns: +- `list`: A list of tuples, each containing: + - The type of the node (e.g., `FunctionDef`, `ClassDef`), + - The name of the node, + - The starting line number, + - The ending line number, + - The list of parameters (if any). + +### `generate_file_structure(self, file_path)` +Generates the file structure of a given file, including all functions, classes, and their parameters. + +#### Parameters: +- `file_path` (str): The relative path of the file to process. + +#### Returns: +- `list`: A list of dictionaries, each containing code information for a function or class in the file. + +### `generate_overall_structure(self, file_path_reflections, jump_files)` +Generates the overall file structure for a repository, parsing all relevant files and skipping files that are either ignored or not staged. + +#### Parameters: +- `file_path_reflections` (dict): A dictionary mapping file paths to their corresponding reflections (for handling fake files or renamed files). +- `jump_files` (list): A list of files to skip during processing. + +#### Returns: +- `dict`: A dictionary representing the overall structure of the repository, with file paths as keys and lists of code object information as values. + +### `convert_to_markdown_file(self, file_path=None)` +Converts the content of a file to markdown format. + +#### Parameters: +- `file_path` (str, optional): The relative path of the file to convert. If not provided, the default `file_path` will be used. + +#### Returns: +- `str`: The content of the file in markdown format. + +#### Raises: +- `ValueError`: If no file object is found for the specified file path. + +--- + +## Usage Example + ```python -{ - 'type': 'FunctionDef', - 'name': 'my_function', - 'md_content': [], - 'code_start_line': 10, - 'code_end_line': 20, - 'params': ['param1', 'param2'], - 'have_return': True, - 'code_content': 'def my_function(param1, param2):\n return param1 + param2', - 'name_column': 4 -} +# Initialize the FileHandler with the repository path and file path +file_handler = FileHandler(repo_path="/path/to/repo", file_path="src/example.py") + +# Read the content of the file +file_content = file_handler.read_file() + +# Get code information for a function named 'example_function' +code_info = file_handler.get_obj_code_info( + code_type="FunctionDef", + code_name="example_function", + start_line=10, + end_line=20, + params=["param1", "param2"] +) + +# Write new content to the file +file_handler.write_file(file_path="src/example.py", content="new content") + +# Get the current and previous versions of the modified file +current_version, previous_version = file_handler.get_modified_file_versions() + +# Generate the file structure for a given file +file_structure = file_handler.generate_file_structure(file_path="src/example.py") + +# Generate the overall file structure for the repository, skipping specified files +repo_structure = file_handler.generate_overall_structure(file_path_reflections={}, jump_files=["skip_file.py"]) + +# Convert the file content to markdown +markdown_content = file_handler.convert_to_markdown_file(file_path="src/example.py") ``` -This dictionary contains detailed information about a function named `my_function`, including its type, name, start and end line numbers, parameters, and its content. + +## Dependencies +- `os`: For file path manipulation and file operations. +- `gitpython`: For interacting with the Git repository. +- `ast`: For parsing Python code into an Abstract Syntax Tree. +- `tqdm`: For progress bar display during repository processing. +- `logging`: For logging error messages. + +The `FileHandler` class provides an effective set of utilities for managing and analyzing code files in a Git repository, making it ideal for scenarios involving file change tracking, code analysis, and file versioning. ### FunctionDef __init__(self, repo_path, file_path) -**__init__**: The function of __init__ is to initialize a FileHandler object with specific repository and file paths, and to set up the path for the project hierarchy configuration file. +**__init__**: The function of __init__ is to initialize an instance of the FileHandler class with the specified repository and file paths. + +**parameters**: The parameters of this Function. +· repo_path: This parameter represents the path to the repository where the project files are located. It is expected to be an absolute or relative path that points to the root of the repository. +· file_path: This parameter is the path to a specific file within the repository. It should be a path relative to the root directory of the repository. -**Parameters**: -- **repo_path**: The path to the root directory of the repository. This is the base path where the repository resides on the filesystem. -- **file_path**: The path to a specific file within the repository. This path is relative to the repository's root directory. +**Code Description**: The __init__ method serves as the constructor for the FileHandler class. It initializes the instance by setting two attributes: `file_path` and `repo_path`. The `file_path` attribute is assigned the value of the `file_path` parameter, which is intended to be relative to the root directory of the repository. The `repo_path` attribute is similarly assigned the value of the `repo_path` parameter, establishing a reference to the repository's location. -**Code Description**: -The `__init__` method is a constructor for the `FileHandler` class. It takes two arguments: `repo_path` and `file_path`. These parameters are essential for setting up the FileHandler object to work with files within a specific repository. +Additionally, the method retrieves the current project settings by invoking the `get_setting` method from the SettingsManager class. This call ensures that the FileHandler instance has access to the latest configuration settings defined for the project. The retrieved settings are then used to construct the `project_hierarchy` attribute, which combines the target repository path with the hierarchy name specified in the project settings. This hierarchical structure is essential for managing files and directories within the project context. -- `self.file_path` is assigned the value of `file_path`, which represents the path to a file relative to the root of the repository. This path is used by other methods of the `FileHandler` class to access or manipulate the specified file. -- `self.repo_path` is assigned the value of `repo_path`, which is the absolute path to the root directory of the repository. This path is crucial for constructing absolute paths to files and directories within the repository. -- `self.project_hierarchy` is constructed using `os.path.join` to concatenate `repo_path`, a configuration value `CONFIG["project_hierarchy"]`, and the filename `"project_hierarchy.json"`. This results in the absolute path to a JSON file that presumably contains information about the project's hierarchical structure. The `CONFIG` variable is assumed to be a global or otherwise accessible dictionary that contains configuration settings for the project, including the relative path to where project hierarchy information is stored. +The relationship with the SettingsManager is critical, as it centralizes the configuration management for the project. By utilizing the `get_setting` method, the FileHandler class ensures that it operates with the most up-to-date settings, which may include paths, logging configurations, and other project-specific parameters. This design promotes consistency and reduces the risk of errors that could arise from hardcoded values or outdated configurations. -**Note**: -- It is important to ensure that `repo_path` is an absolute path to avoid any ambiguity or errors when accessing files within the repository. -- The `file_path` should be carefully provided relative to the repository's root directory to ensure correct file access and manipulation. -- The `CONFIG` dictionary must be properly configured and accessible within the scope of this method, specifically containing a valid path under the key `"project_hierarchy"`, for the `self.project_hierarchy` attribute to be correctly set up. This configuration is essential for the `FileHandler` to locate and interact with the project hierarchy information. +**Note**: It is important to ensure that the SettingsManager is properly configured before instantiating the FileHandler class. Any misconfiguration in the settings may lead to runtime errors or unexpected behavior when accessing the project hierarchy or file paths. *** ### FunctionDef read_file(self) -**read_file**: The function of read_file is to read and return the content of a file associated with the FileHandler instance. +**read_file**: read_file的功能是读取当前更改文件的内容。 -**Parameters**: This function does not take any parameters apart from the implicit `self` parameter, which represents the instance of the `FileHandler` class through which the function is called. +**parameters**: 该函数没有参数。 -**Code Description**: The `read_file` function is a crucial component of the FileHandler class, designed to handle the reading of file contents within a repository. It constructs an absolute file path by combining the repository path (`repo_path`) and the relative file path (`file_path`) stored within the FileHandler instance. The function then opens the file in read mode with UTF-8 encoding to ensure compatibility with a wide range of text formats and reads its entire content into a string. Finally, it returns this string to the caller. +**Code Description**: +read_file函数用于读取指定路径的文件内容。它首先通过os.path.join方法将存储库路径(repo_path)和文件路径(file_path)组合成一个绝对文件路径(abs_file_path)。接着,函数以只读模式打开该文件,并使用UTF-8编码读取文件的全部内容。读取完成后,函数将文件内容作为字符串返回。 -This function plays a significant role in the project, particularly in scenarios where file contents need to be processed or analyzed. For instance, in the `Runner` class's `add_new_item` method, `read_file` is used to retrieve the content of a newly added file so that its structure can be analyzed and documented. Similarly, in the `process_file_changes` method, it is employed to fetch the content of files that have been modified, allowing the system to identify structural changes and update documentation accordingly. These use cases underscore the function's importance in enabling dynamic documentation generation and update processes based on file content within the project. +在项目中,read_file函数被多个对象调用。具体来说,在repo_agent/runner.py中的add_new_item和process_file_changes方法中都有调用。add_new_item方法使用read_file函数来获取文件的源代码,以便提取文件中的函数和类信息,并生成相应的文档。process_file_changes方法则在处理文件变更时调用read_file,获取整个Python文件的代码,以便分析文件的变更情况。这表明read_file函数在文件处理和文档生成的过程中起到了关键作用。 -**Note**: It is important to ensure that the `repo_path` and `file_path` attributes of the FileHandler instance are correctly set before calling `read_file`. The function assumes these paths are valid and will raise an error if the constructed file path does not exist or is inaccessible. +**Note**: 使用该函数时,请确保提供的repo_path和file_path是有效的路径,以避免文件读取错误。 -**Output Example**: -Assuming the file located at the constructed absolute file path contains the text "Hello, world!", the `read_file` function would return: -``` -"Hello, world!" -``` +**Output Example**: 假设文件内容为“Hello, World!”,则该函数的返回值将是字符串“Hello, World!”。 *** ### FunctionDef get_obj_code_info(self, code_type, code_name, start_line, end_line, params, file_path) -**get_obj_code_info**: The function of `get_obj_code_info` is to retrieve and compile detailed information about a specific segment of code within a file. +**get_obj_code_info**: The function of get_obj_code_info is to retrieve detailed information about a specific code segment within a file. -**Parameters**: -- `code_type` (str): Specifies the type of the code (e.g., function, class). -- `code_name` (str): The name identifier of the code segment. -- `start_line` (int): The line number where the code segment begins. -- `end_line` (int): The line number where the code segment ends. -- `params` (str): Parameters associated with the code segment. -- `file_path` (str, optional): The path to the file containing the code. If not provided, a default path associated with the `FileHandler` instance is used. +**parameters**: The parameters of this Function. +· code_type: A string representing the type of the code being analyzed. +· code_name: A string indicating the name of the code object. +· start_line: An integer specifying the starting line number of the code segment. +· end_line: An integer specifying the ending line number of the code segment. +· params: A collection of parameters associated with the code. +· file_path: An optional string that provides the path to the file. If not specified, it defaults to None. -**Code Description**: -The `get_obj_code_info` function is designed to extract and organize information about a code segment, identified by its start and end lines within a file. This function is a critical component of the `FileHandler` class, facilitating the analysis and documentation of code structures within a repository. +**Code Description**: The get_obj_code_info function is designed to extract and return information about a specific segment of code from a file. It takes in several parameters that define the characteristics of the code segment, including its type, name, and the range of lines it occupies. The function initializes a dictionary, code_info, to store various attributes related to the code segment. -Upon invocation, the function initializes a dictionary to store the code information, including its type, name, parameters, and line numbers. It then reads the file, either from a specified path or a default path, and extracts the code segment's content. Additionally, it identifies the column position of the code name in its first line and checks for the presence of a return statement within the code segment. +The function opens the specified file in read mode and reads all lines into a list. It then concatenates the lines from start_line to end_line to form the complete code content. Additionally, it checks for the presence of the code_name in the first line of the specified range to determine its column position. The function also checks if the code segment contains a return statement, which is a common indicator of a function's output. -The function encapsulates this information in a dictionary, which includes the type, name, starting and ending line numbers, parameters, presence of a return statement, the actual code content, and the column position of the code name. This structured information is crucial for generating documentation and analyzing the code's structure and behavior. +Finally, the function populates the code_info dictionary with the gathered information, including the type, name, start and end lines, parameters, the presence of a return statement, the code content, and the column position of the code name. The populated dictionary is then returned as the output of the function. -**Relationship with Callers**: -- In the `generate_file_structure` method of the `FileHandler` class, `get_obj_code_info` is used to gather information about each code segment identified within a file. This information is then compiled into a list that represents the file's structure, aiding in the documentation process. -- The `add_new_item` method in the `Runner` class utilizes `get_obj_code_info` to extract information about code segments for new projects. This information is used to generate documentation and update project structure information stored in JSON format. +**Note**: It is important to ensure that the specified start_line and end_line are valid and within the bounds of the file's total line count to avoid potential errors when reading the file. The file_path parameter should be correctly set to point to the desired file location. -**Note**: -- The function assumes that the file containing the code segment is accessible from the provided or default file path. -- The line numbers (`start_line` and `end_line`) are inclusive and should accurately reflect the code segment's boundaries. -- The function does not perform syntax or semantic analysis of the code content; it primarily focuses on structural and basic behavioral aspects (e.g., presence of a return statement). - -**Output Example**: -```python +**Output Example**: A possible return value of the function could look like this: { "type": "function", - "name": "example_function", + "name": "calculate_sum", "md_content": [], "code_start_line": 10, - "code_end_line": 20, - "params": "param1, param2", - "have_return": True, - "code_content": "def example_function(param1, param2):\n return param1 + param2", + "code_end_line": 15, + "params": ["a", "b"], + "have_return": true, + "code_content": "def calculate_sum(a, b):\n return a + b\n", "name_column": 4 } -``` -This example output represents the information dictionary for a hypothetical function `example_function`, detailing its structure and content within a file. *** ### FunctionDef write_file(self, file_path, content) -**write_file**: The function of write_file is to write content to a file at a specified path. +**write_file**: write_file的功能是将内容写入指定路径的文件中。 -**Parameters**: -- **file_path** (str): The relative path of the file where the content will be written. -- **content** (str): The content to be written to the file. +**parameters**: 该函数的参数如下: +· parameter1: file_path (str) - 文件的相对路径。 +· parameter2: content (str) - 要写入文件的内容。 -**Code Description**: -The `write_file` function is designed to facilitate the writing of content to a file within a repository. It first checks if the provided `file_path` is an absolute path (i.e., starts with "/"). If it is, the leading "/" is removed to ensure the path is relative. This is crucial because the function constructs an absolute path by joining the `file_path` with a base path stored in `self.repo_path`, which represents the root directory of the repository. - -After constructing the absolute file path, the function ensures that the directory structure needed to accommodate the file exists. This is done using `os.makedirs`, with `exist_ok=True` to avoid raising an error if the directory already exists. Finally, the content is written to the file using `open` with write ("w") mode and specifying UTF-8 encoding to support a wide range of characters. +**Code Description**: write_file函数用于将指定内容写入到给定的文件路径。首先,该函数会检查file_path是否为绝对路径,如果是,则去掉路径开头的斜杠,以确保file_path是相对路径。接着,函数通过os.path.join将repo_path与file_path组合成绝对路径abs_file_path,并使用os.makedirs确保该路径的目录存在,如果不存在则创建它。然后,函数以写入模式打开文件,并将内容写入该文件,使用utf-8编码格式。 -This function plays a critical role in the project's file management system, particularly in the context of generating and updating documentation. It is called in two scenarios within the project: -1. **Adding New Items**: When new projects are added to the JSON file and corresponding documentation needs to be generated, `write_file` is used to write markdown content to documentation files. This process involves generating documentation for all objects within a new file and updating the project's JSON structure to reflect these additions. -2. **Processing File Changes**: When changes are detected in existing files, `write_file` is employed to update the markdown documentation corresponding to these files. This ensures that the project's documentation remains in sync with the codebase, reflecting any additions, deletions, or modifications to the code structures. +在项目中,write_file函数被Runner类中的add_new_item和process_file_changes两个方法调用。在add_new_item方法中,write_file用于将生成的Markdown文档写入到指定的.md文件中,确保新添加的项目的文档能够被正确保存。而在process_file_changes方法中,write_file同样用于更新Markdown文档,确保在文件变更后,文档内容能够及时反映最新的代码结构信息。这两个调用场景表明,write_file函数在文件处理和文档生成中起到了重要的作用。 -**Note**: -- It is important to ensure that the `file_path` provided does not lead to unintended directory traversal. The function assumes that the path is relative to `self.repo_path`, and any leading "/" is removed to enforce this assumption. -- The function overwrites the content of the file if it already exists. Care should be taken to avoid accidental data loss, especially when updating existing files. +**Note**: 使用该函数时,请确保提供的file_path是相对路径,并且确保repo_path已正确设置,以避免文件写入错误。 *** ### FunctionDef get_modified_file_versions(self) -**get_modified_file_versions**: The function retrieves the current and previous versions of a modified file within a repository. - -**Parameters**: This function does not accept any parameters directly as it is designed to be called on an instance of the FileHandler class, which should have `repo_path` and `file_path` attributes already set. +**get_modified_file_versions**: get_modified_file_versions的功能是获取被修改文件的当前版本和之前版本。 -**Code Description**: The `get_modified_file_versions` function is a crucial component of the FileHandler class, designed to interact with a Git repository to fetch the current and previous versions of a specified file. It first initializes a Git repository object using the `repo_path` attribute of the FileHandler instance. This repository object is then used to perform operations related to Git. +**parameters**: 该函数没有参数。 -The function proceeds to read the current version of the file directly from the working directory using the `file_path` attribute. It opens the file in read mode, ensuring it reads text by specifying the encoding as "utf-8", and stores the content in `current_version`. +**Code Description**: get_modified_file_versions函数用于获取指定文件的当前版本和上一个版本。首先,它通过git库获取当前工作目录中指定文件的内容,作为当前版本。然后,它通过访问git提交历史记录,获取该文件在最近一次提交中的内容,作为之前版本。如果文件在之前的提交中不存在(例如,文件是新添加的),则之前版本将被设置为None。最终,该函数返回一个包含当前版本和之前版本的元组。 -To obtain the previous version of the file, the function retrieves the last commit affecting the specified file using the `iter_commits` method of the repository object, limited to the most recent commit (`max_count=1`) that modified the specified file path. If such a commit exists, it attempts to read the file's content from this commit. If the file does not exist in the commit (indicating it might be a new addition), `previous_version` is set to None, acknowledging that there is no previous version available. +该函数在项目中的调用场景主要出现在Runner类的get_new_objects方法中。在该方法中,get_modified_file_versions被用来获取当前和之前版本的文件内容,以便比较这两个版本之间的差异。具体来说,get_new_objects方法利用当前版本和之前版本的信息,解析出新增和删除的对象,从而实现对文件内容变化的检测。 -This function is integral to tracking changes in files within a Git repository, especially for applications that need to analyze or process these changes, such as determining new or deleted objects in the file across versions. +**Note**: 使用该函数时,请确保指定的文件路径正确,并且该文件在git仓库中存在,以避免KeyError异常。 -In the context of its calling situation within the project, specifically by the `get_new_objects` method in `runner.py`, `get_modified_file_versions` provides the necessary data to compare the current and previous versions of a Python file. This comparison is used to identify new and deleted objects (functions or classes) by parsing the content of these versions, highlighting its importance in enabling version-based analysis and processing within the project. - -**Note**: It is essential to ensure that the FileHandler instance calling this function has valid `repo_path` and `file_path` attributes that correctly point to the Git repository and the file of interest, respectively. Additionally, the function assumes that the file exists in the current working directory of the repository and that the repository's history is accessible for retrieving the previous version. - -**Output Example**: -```python -("", "") +**Output Example**: 可能的返回值示例为: +``` +( + "def new_function():\n pass\n", + "def old_function():\n pass\n" +) ``` -In this example, the first element of the tuple is a string containing the entire content of the file as it currently exists in the working directory, and the second element is a string containing the content of the file from the last commit in which it was modified, or `None` if the file was not present in the last commit. *** ### FunctionDef get_end_lineno(self, node) -**get_end_lineno**: The function of `get_end_lineno` is to determine the ending line number of a specified node within a file. - -**Parameters**: -- **node**: The node for which the end line number is to be found. This node is expected to be part of an Abstract Syntax Tree (AST) representing the structure of Python code. - -**Code Description**: -The `get_end_lineno` function plays a crucial role in analyzing Python code by providing the ability to find the end line number of a given node within an AST. This is particularly useful when dealing with nodes that represent constructs such as functions, classes, or any block of code that spans multiple lines. +**get_end_lineno**: get_end_lineno的功能是获取给定节点的结束行号。 -The function starts by checking if the provided node has the attribute `lineno`, which is a standard attribute in AST nodes representing the starting line number. If the node does not have this attribute, the function returns `-1`, indicating that the end line number could not be determined. +**parameters**: 此函数的参数。 +· parameter1: node - 要查找结束行号的节点。 -If the node has a `lineno` attribute, the function initializes `end_lineno` with this value. It then iterates over all child nodes of the given node using `ast.iter_child_nodes(node)`. For each child node, it attempts to find the child's end line number. This is done by first trying to directly access the `end_lineno` attribute of the child. If this attribute is not present, the function recursively calls itself with the child node as the argument to determine the child's end line number. +**Code Description**: get_end_lineno函数用于获取AST(抽象语法树)节点的结束行号。首先,该函数检查传入的节点是否具有行号属性。如果节点没有行号,则返回-1,表示该节点没有有效的行号。接下来,函数初始化一个变量end_lineno为节点的行号,并遍历该节点的所有子节点。对于每个子节点,函数尝试获取其结束行号,如果子节点没有结束行号,则递归调用get_end_lineno函数来获取其结束行号。只有当子节点的结束行号有效时,end_lineno才会被更新为子节点的结束行号和当前节点的结束行号中的较大值。最终,函数返回计算得到的结束行号。 -During the iteration, if a child node has a valid end line number (`child_end > -1`), the function updates `end_lineno` to be the maximum of the current `end_lineno` and the child's end line number. This ensures that `end_lineno` represents the furthest line number reached by the node or any of its descendants. +该函数在get_functions_and_classes函数中被调用,用于获取每个函数或类节点的结束行号。get_functions_and_classes函数解析整个代码内容,遍历AST树中的所有节点,并将每个函数和类的相关信息(包括开始行号和结束行号)收集到一个列表中。通过调用get_end_lineno,get_functions_and_classes能够准确地获取每个节点的结束行号,从而提供更完整的节点信息。 -This function is utilized within the project, specifically in the `get_functions_and_classes` method of the `FileHandler` class. In that context, `get_end_lineno` is used to determine the ending line numbers of functions and classes within a Python file. This information, along with the starting line number, name, and parameters of the functions and classes, is compiled into a list of tuples. This list serves as a comprehensive overview of the code structure, facilitating further analysis or manipulation of the code content. +**Note**: 使用此代码时,请确保传入的节点是有效的AST节点,并且具有相应的行号属性,以避免返回-1的情况。 -**Note**: -- The function assumes that the input `node` is part of an AST generated by the `ast` module. Therefore, it is important to ensure that the node is correctly parsed and represents a valid structure within Python code before calling this function. -- The function's ability to accurately determine the end line number depends on the presence of `lineno` and `end_lineno` attributes in the AST nodes. These attributes are typically available in AST nodes generated for Python code but may vary based on the Python version and the specifics of the AST generation process. - -**Output Example**: -If the function is called with a node representing a Python function that starts on line 10 and ends on line 20, the function would return `20`. If the node does not have a line number, the function would return `-1`. +**Output Example**: 假设传入的节点的行号为10,且其子节点的结束行号为15,则该函数的返回值将为15。 *** ### FunctionDef add_parent_references(self, node, parent) -**add_parent_references**: The function of add_parent_references is to recursively add a parent reference to each node in an Abstract Syntax Tree (AST). - -**Parameters**: -- **node**: The current node in the AST being processed. -- **parent** (Optional): The parent node of the current node. Defaults to None if the current node is the root of the AST. +**add_parent_references**: add_parent_references的功能是为抽象语法树(AST)中的每个节点添加父引用。 -**Code Description**: -The `add_parent_references` function is a crucial component within the `FileHandler` class, designed to enhance the AST by adding parent references to each node. This augmentation allows for backward navigation within the tree, which is not natively supported by the AST structure provided by Python's `ast` module. +**parameters**: 该函数的参数如下: +· parameter1: node - 当前在AST中的节点。 +· parameter2: parent - 当前节点的父节点,默认为None。 -The function operates recursively, starting from a given node (typically the root of the AST) and traversing through all its child nodes. For each child node encountered, the function assigns the current node as its parent (`child.parent = node`) and then calls itself with the child node as the new current node and the current node as the new parent. This process continues until all nodes in the AST have been processed and have had parent references added to them. +**Code Description**: add_parent_references函数用于遍历给定的抽象语法树(AST)节点,并为每个节点添加一个指向其父节点的引用。函数首先通过ast.iter_child_nodes(node)获取当前节点的所有子节点,然后将当前节点(node)赋值给每个子节点的parent属性。接着,函数递归调用自身以处理每个子节点,确保所有节点都能正确地引用其父节点。 -This function is particularly useful in scenarios where understanding the hierarchical relationship between nodes is necessary. For example, in the context of the `get_functions_and_classes` method within the same `FileHandler` class, `add_parent_references` is invoked to preprocess the AST. This preprocessing step is critical for enabling the subsequent extraction of functions, classes, and their relationships, including hierarchical ones, from the code content being analyzed. By having parent references available, it becomes feasible to navigate the AST in both directions (towards the children and towards the parent), thereby facilitating more complex analyses such as determining the scope of variables or understanding the nesting of functions and classes. +该函数在get_functions_and_classes方法中被调用。get_functions_and_classes的主要功能是解析给定的代码内容,提取出所有函数和类及其参数,并建立它们之间的层级关系。在解析AST树时,首先调用add_parent_references函数,以确保每个节点都能访问到其父节点的信息,这对于后续的层级关系分析至关重要。通过这种方式,get_functions_and_classes能够准确地构建出函数和类的层级结构,提供更清晰的代码解析结果。 -**Note**: -- It is important to ensure that the AST passed to this function is correctly formed and represents the structure of the code accurately. Malformed ASTs could lead to incorrect parent references being added or the function failing to process the tree entirely. -- The function modifies the AST in-place by adding parent references to the nodes. Therefore, if the original AST is needed for other purposes without these modifications, it should be cloned before passing it to this function. -- This function does not return any value as it modifies the AST in-place. +**Note**: 使用该函数时,请确保传入的节点是有效的AST节点,并注意在递归调用时可能导致的栈溢出问题,尤其是在处理深层嵌套的AST时。 *** ### FunctionDef get_functions_and_classes(self, code_content) -**get_functions_and_classes**: The function of get_functions_and_classes is to retrieve all functions and classes from a given code content, along with their parameters (if any), and their hierarchical relationships. +**get_functions_and_classes**: get_functions_and_classes的功能是检索所有函数、类及其参数(如果有的话)以及它们的层级关系。 -**Parameters**: -- **code_content**: The code content of the whole file to be parsed. This is a string containing the source code from which functions and classes are to be extracted. +**parameters**: 此函数的参数如下: +· parameter1: code_content - 要解析的整个文件的代码内容。 -**Code Description**: -The `get_functions_and_classes` function is designed to parse Python code content and extract a comprehensive list of functions and classes defined within. It utilizes the `ast` module to parse the given code content into an Abstract Syntax Tree (AST). Before extracting functions and classes, it calls `add_parent_references` to enhance the AST by adding parent references to each node, enabling the identification of hierarchical relationships between nodes. +**Code Description**: get_functions_and_classes函数用于解析给定的代码内容,提取出所有函数和类的相关信息,包括它们的名称、起始行号、结束行号、父节点名称以及参数列表。该函数首先使用ast.parse将代码内容转换为抽象语法树(AST),然后调用add_parent_references函数为每个节点添加父引用,以便后续分析时能够访问到父节点的信息。 -The function iterates over all nodes in the AST using `ast.walk`, filtering for nodes that are instances of `ast.FunctionDef`, `ast.ClassDef`, or `ast.AsyncFunctionDef`. For each of these nodes, it determines the starting line number directly from the node's `lineno` attribute. The ending line number is obtained by calling the `get_end_lineno` method, which recursively finds the maximum line number reached by the node or any of its descendants. +接下来,函数遍历AST树中的所有节点,检查每个节点是否为函数定义(FunctionDef)、类定义(ClassDef)或异步函数定义(AsyncFunctionDef)。对于每个符合条件的节点,函数获取其起始行号和结束行号,并提取参数列表。最终,所有收集到的信息以元组的形式存储在一个列表中并返回。 -Parameters of functions are extracted by iterating over `node.args.args` and collecting the `arg` attribute of each parameter. The function compiles a list of tuples, each representing a function or class. Each tuple contains the type of the node (`FunctionDef`, `ClassDef`, `AsyncFunctionDef`), the name of the node, the starting line number, the ending line number, and a list of parameters. +该函数在多个地方被调用,例如在generate_file_structure函数中用于生成文件结构时,和在add_new_item函数中用于处理新增项目时。通过调用get_functions_and_classes,其他函数能够获取到代码中的结构信息,从而进行进一步的处理和文档生成。 -**Note**: -- The function assumes the input `code_content` is valid Python code. Invalid or syntactically incorrect code may lead to parsing errors. -- The hierarchical relationships are determined based on the AST structure. However, this version of the function does not include the parent node's name in the output tuples due to commented-out sections of the code that would otherwise perform this task. -- The function modifies the AST in-place by adding parent references. If the original AST is needed for other purposes without these modifications, it should be cloned before passing it to this function. +**Note**: 使用此函数时,请确保传入的代码内容是有效的Python代码,以便能够正确解析AST并提取信息。 -**Output Example**: -An example output of this function could look like this: -```python +**Output Example**: 假设传入的代码内容包含以下函数和类定义,函数的返回值可能如下所示: [ - ('FunctionDef', 'my_function', 10, 20, ['param1', 'param2']), - ('ClassDef', 'MyClass', 22, 30, []), - ('FunctionDef', 'another_function', 32, 40, ['arg1']) + ('FunctionDef', 'AI_give_params', 86, 95, None, ['param1', 'param2']), + ('ClassDef', 'PipelineEngine', 97, 104, None, []), + ('FunctionDef', 'get_all_pys', 99, 104, 'PipelineEngine', ['param1']) ] -``` -This output indicates that the code contains a function named `my_function` starting at line 10 and ending at line 20 with two parameters, a class named `MyClass` between lines 22 and 30 with no parameters, and another function named `another_function` from line 32 to 40 with one parameter. *** ### FunctionDef generate_file_structure(self, file_path) -**generate_file_structure**: The function of `generate_file_structure` is to generate the file structure for a given file path. - -**Parameters**: -- `file_path` (str): The relative path of the file for which the structure is to be generated. +**generate_file_structure**: generate_file_structure的功能是生成给定文件路径的文件结构。 -**Code Description**: -The `generate_file_structure` function is a method of the `FileHandler` class designed to analyze a file's content and extract its structural components, such as functions and classes, including their details like type, name, start and end lines, and parameters. It operates by reading the content of a file located at a specified path relative to the repository's root directory. This is achieved by combining the repository path stored in `self.repo_path` with the provided `file_path`. +**parameters**: 此函数的参数如下: +· parameter1: file_path (str): 文件的相对路径。 -Upon reading the file, the method utilizes another function, `get_functions_and_classes`, to parse the file's content and identify all functions and classes within it. This parsing process results in a list of structures, where each structure contains information about a specific code segment, such as its type (function or class), name, start line, end line, and parameters. +**Code Description**: generate_file_structure函数用于生成指定文件路径的文件结构信息。该函数首先打开指定路径的文件,并读取其内容。接着,它调用get_functions_and_classes方法来解析文件内容,提取出所有函数和类的相关信息,包括它们的名称、起始行号、结束行号及参数列表。解析得到的结构信息以元组的形式存储在一个列表中。 -For each identified structure, the method calls `get_obj_code_info`, which further processes each structure to compile detailed information, including the code segment's content and additional metadata like the presence of a return statement and the column position of the code name. This information is compiled into a dictionary for each code segment. +在获取到所有结构信息后,函数会遍历这些信息,并调用get_obj_code_info方法来获取每个对象的详细代码信息,包括对象的类型、名称、起始和结束行号、参数等。最终,所有收集到的对象信息以列表的形式返回。 -The method returns a list of dictionaries, with each dictionary representing the detailed information of a code segment within the file. This list serves as a comprehensive representation of the file's structure, aiding in documentation and analysis tasks. +该函数被generate_overall_structure函数调用,用于生成目标仓库中所有文件的结构信息。generate_overall_structure函数会遍历所有未被忽略的文件,并对每个文件调用generate_file_structure,以获取其结构信息并存储在repo_structure字典中。 -**Note**: -- The function assumes that the file exists at the given path relative to the repository's root directory. If the file does not exist, an error will occur. -- The function relies on accurate parsing of the file's content. Therefore, the file content should be valid Python code for the parsing to be successful. -- The detailed information generated by this function is crucial for documentation purposes and for understanding the structure and components of the file. +**Note**: 使用此函数时,请确保传入的文件路径是有效的,并且文件内容是有效的Python代码,以便能够正确解析并提取信息。 -**Output Example**: -An example output of this function could be a list of dictionaries, each representing a function or class within the file: -```python +**Output Example**: 假设传入的文件路径对应的文件内容包含以下函数和类定义,函数的返回值可能如下所示: [ { - "type": "function", - "name": "example_function", - "start_line": 10, - "end_line": 20, - "params": ["param1", "param2"], - "code_content": "def example_function(param1, param2):\n return param1 + param2", - "parent": "class_name" - }, - { - "type": "class", - "name": "ExampleClass", - "start_line": 5, - "end_line": 25, - "params": [], - "code_content": "class ExampleClass:\n def example_method(self):\n pass", - "parent": None + "function_name": { + "type": "function", + "start_line": 10, + "end_line": 20, + "parent": "class_name" + }, + "class_name": { + "type": "class", + "start_line": 5, + "end_line": 25, + "parent": None + } } ] -``` -This output indicates that the file contains a function named `example_function` with parameters `param1` and `param2`, and a class named `ExampleClass`. Each dictionary includes the type, name, start and end lines, parameters, and the actual code content of the code segment. *** ### FunctionDef generate_overall_structure(self, file_path_reflections, jump_files) -**generate_overall_structure**: The function of `generate_overall_structure` is to obtain the file structure of a target repository by walking through its Abstract Syntax Tree (AST) and identifying all objects, while excluding files specified in `jump_files`. +**generate_overall_structure**: The function of generate_overall_structure is to retrieve the file structure of a target repository by analyzing its contents while excluding certain files based on specified criteria. -**Parameters**: -- `file_path_reflections` (dict): A mapping of file paths that may include "fake" file paths used for reflection in the repository's structure. -- `jump_files` (list): A list of file paths that should not be parsed and are to be treated as non-existent. +**parameters**: The parameters of this Function. +· parameter1: file_path_reflections (dict) - A dictionary mapping original file paths to their reflections, used to identify files that may have been renamed or moved. +· parameter2: jump_files (list) - A list of file names that should be ignored during the processing, as they are not to be parsed. -**Code Description**: -The `generate_overall_structure` function is a method within the `FileHandler` class designed to analyze a repository's file structure, excluding files specified in `jump_files`. It initializes a `GitignoreChecker` instance with the repository path and its `.gitignore` file to filter out files that should be ignored according to the `.gitignore` rules. +**Code Description**: The generate_overall_structure method is designed to construct a comprehensive representation of the file structure within a specified repository. It begins by initializing an empty dictionary called repo_structure, which will ultimately hold the file paths and their corresponding structures. -The function iterates over the files in the repository that are not ignored by `.gitignore`, as determined by the `GitignoreChecker`. During the iteration, it checks if a file is in `jump_files` or ends with a specific substring indicating it is the latest version and should be skipped. If a file meets these conditions, it is ignored, and a message is printed to indicate this action. +The method instantiates a GitignoreChecker object, which is responsible for checking the repository directory against patterns defined in a .gitignore file. This checker is crucial for filtering out files and folders that should be ignored based on the project's version control settings. -For each file that is not ignored, the function attempts to generate its file structure using the `generate_file_structure` method of the `FileHandler` class. This method analyzes the file's content to extract its structural components, such as functions and classes. If an error occurs during this process, an alert message is printed, and the file is skipped. +The method then utilizes the tqdm library to create a progress bar that reflects the ongoing process of checking files and folders. It iterates over the list of non-ignored files provided by the GitignoreChecker's check_files_and_folders method. For each file, the following checks are performed: -The function accumulates the structures of all processed files in a dictionary, `repo_structure`, mapping file names to their respective structures. This dictionary represents the overall structure of the repository, excluding ignored files and those specified in `jump_files`. +1. If the file is present in the jump_files list, it is skipped, and a message is printed to indicate that the file will not be processed. +2. If the file name ends with a specific substring indicating a "latest version," it is also skipped, with a corresponding message printed to the console. -**Note**: -- The function assumes that the `.gitignore` file is located at the root of the repository and that the repository path is correctly set in the `FileHandler` instance. -- Files in `jump_files` are completely excluded from the analysis, as if they do not exist in the repository. -- The function prints messages to the console to indicate files that are being ignored or skipped, providing feedback on the processing status. +If the file passes these checks, the method attempts to generate its structure by calling the generate_file_structure method, passing the file name as an argument. If an error occurs during this process, it is logged, and the method continues to the next file. -**Output Example**: -An example return value of the `generate_overall_structure` function could be a dictionary where each key is a file name (not ignored and not in `jump_files`) and each value is the file's structure as generated by `generate_file_structure`: -```python +The progress bar is updated to reflect the current file being processed, and once all files have been evaluated, the method returns the repo_structure dictionary, which contains the paths of the files and their respective structures. + +This method is integral to the FileHandler class, as it consolidates the information about the repository's file structure while adhering to the rules defined in the .gitignore file and respecting the files specified in the jump_files list. + +**Note**: It is essential to ensure that the .gitignore file is correctly formatted and accessible to avoid unintended exclusions of files. Additionally, the jump_files list should be accurately populated to ensure that the intended files are ignored during processing. + +**Output Example**: An example output of the generate_overall_structure method might look like this: +``` { - "src/main.py": [ - { - "type": "function", - "name": "main", - "start_line": 1, - "end_line": 10, - "params": ["args"], - "code_content": "def main(args):\n print(args)", - "parent": None - } - ], - "src/utils.py": [ - { - "type": "class", - "name": "Helper", - "start_line": 1, - "end_line": 20, - "params": [], - "code_content": "class Helper:\n def assist(self):\n pass", - "parent": None - } - ] + "src/module1.py": { ... }, # Structure of module1.py + "src/module2.py": { ... }, # Structure of module2.py + "tests/test_module1.py": { ... } # Structure of test_module1.py } ``` -This output indicates that the repository contains two files, `src/main.py` and `src/utils.py`, not ignored by `.gitignore` or listed in `jump_files`, and provides a detailed structure of their contents. +This output indicates that the method has successfully generated the structures for the specified files, with each file path mapped to its corresponding structure representation. *** ### FunctionDef convert_to_markdown_file(self, file_path) -**convert_to_markdown_file**: The function of `convert_to_markdown_file` is to convert the content of a specified file into markdown format based on the project's hierarchical structure stored in a JSON file. - -**Parameters**: -- `file_path` (str, optional): The relative path of the file to be converted. If not provided, a default file path set within the object will be used. +**convert_to_markdown_file**: The function of convert_to_markdown_file is to convert the content of a specified file into markdown format. -**Code Description**: -The `convert_to_markdown_file` function begins by reading the project hierarchy from a JSON file specified by the `project_hierarchy` attribute of the `FileHandler` object. It then checks if a `file_path` is provided; if not, it uses a default path stored in the `FileHandler` object. +**parameters**: The parameters of this Function. +· file_path: (str, optional) The relative path of the file to be converted. If not provided, the default file path will be used. -The function searches the JSON data for the specified `file_path`. If the file path does not exist in the JSON data, it raises a `ValueError`, indicating the absence of the file object in `project_hierarchy.json`. +**Code Description**: The convert_to_markdown_file function is designed to read a file's metadata from a JSON structure and convert it into a markdown representation. The function begins by opening a JSON file that contains the project hierarchy, which is expected to be structured in a way that associates file paths with their corresponding metadata. If the file_path parameter is not provided, the function defaults to using an internal file path attribute. -For the found file object, the function iterates through its contents, which are expected to be structured data representing different code objects (like functions, classes, etc.) within the file. It sorts these objects by their starting line number to maintain their order as in the source file. +The function retrieves the relevant file object from the loaded JSON data using the specified or default file path. If no matching file object is found, it raises a ValueError, indicating that the specified file path does not exist in the project hierarchy. -The function then generates markdown content by iterating over each object. It determines the nesting level of each object based on its parent-child relationships, represented in the JSON data, to format the markdown correctly with appropriate heading levels. Special markdown separators are added between top-level objects for better readability. +Once the file object is successfully located, the function initializes an empty string to accumulate the markdown content. It sorts the objects associated with the file based on their starting line numbers in the code. The function then constructs a parent-child relationship mapping for the objects, which is crucial for determining the hierarchy levels in the markdown output. -The markdown content for each object includes its type (e.g., function, class), name, and parameters if applicable, followed by custom markdown content stored in the `md_content` field of each object. +For each object, the function calculates its level in the hierarchy by traversing the parent dictionary. It constructs the markdown string by appending the object's type, name, and parameters, formatted according to its level. The markdown content includes the last piece of markdown content associated with the object, if available. Finally, the function appends a closing separator to the markdown string and returns the complete markdown representation. -This function is integral to the project's documentation process, as seen in its usage within the `Runner` class methods `add_new_item` and `process_file_changes`. In both cases, after updating the project's JSON structure to reflect changes or additions, `convert_to_markdown_file` is called to generate updated markdown documentation for the affected file. This documentation is then written to a markdown file, ensuring that the project's documentation stays in sync with its codebase. - -**Note**: -- The function assumes that the JSON data structure correctly represents the project's file hierarchy and that each code object within the file has been accurately captured in this structure. -- The function's ability to generate accurate markdown documentation depends on the completeness and correctness of the `md_content` field for each code object in the JSON data. - -**Output Example**: -Assuming the JSON data contains information about a Python file with two functions, `func_a` and `func_b`, where `func_a` is the parent of `func_b`, the output might look like this: +**Note**: It is important to ensure that the project_hierarchy.json file is correctly formatted and accessible, as the function relies on this data to perform its operations. Additionally, the function expects the objects within the JSON to have specific attributes such as "type", "name", "params", and "md_content" for proper markdown generation. +**Output Example**: +A possible appearance of the code's return value could be: ``` -# Function func_a(): -This function does something. +# FunctionDef my_function(param1, param2): +This function does something important. + +# AsyncFunctionDef my_async_function(): +This async function handles asynchronous operations. -*** -## Function func_b(): -This function does something else. *** ``` - -This example demonstrates the markdown content with headings indicating the object types and names, followed by their descriptions, and separated by markdown separators for readability. *** diff --git a/markdown_docs/repo_agent/log.md b/markdown_docs/repo_agent/log.md index ef751c5..896d6c3 100644 --- a/markdown_docs/repo_agent/log.md +++ b/markdown_docs/repo_agent/log.md @@ -1,57 +1,45 @@ ## ClassDef InterceptHandler -**InterceptHandler**: The function of InterceptHandler is to intercept log messages from the standard Python logging module and redirect them to the Loguru logger. +**InterceptHandler**: The function of InterceptHandler is to redirect standard logging output to the Loguru logging system. -**Attributes**: This class does not explicitly define custom attributes; it inherits attributes from the logging.Handler class. +**attributes**: The attributes of this Class. +· record: logging.LogRecord - This parameter represents the log record containing all the information pertinent to the event being logged. -**Code Description**: The InterceptHandler class is a custom logging handler designed to integrate the standard Python logging module with the Loguru logging library. This integration is crucial for projects that wish to leverage Loguru's advanced logging capabilities while maintaining compatibility with libraries and modules that use the standard logging module. +**Code Description**: The InterceptHandler class extends the logging.Handler class to facilitate the integration of Python's standard logging module with the Loguru logging system. The primary method of this class is `emit`, which is responsible for processing log records. -The class overrides the `emit` method of the logging.Handler class. The `emit` method is called whenever a log message is emitted by the standard logging module. The method performs the following operations: +When a log record is received, the `emit` method first attempts to map the standard logging level (e.g., DEBUG, INFO, WARNING) to the corresponding Loguru level. If the mapping fails, it defaults to using the numeric level of the log record. This ensures that all log messages are appropriately categorized according to their severity. -1. **Level Conversion**: It attempts to map the log level from the standard logging module (e.g., INFO, WARNING) to the corresponding Loguru level. If the mapping fails (e.g., due to an invalid level name), it defaults to using the numeric log level. +Next, the method identifies the caller of the log message by traversing the call stack. It uses the `inspect.currentframe()` function to obtain the current stack frame and iterates through the frames to find the origin of the log message. This is particularly useful for debugging, as it provides context about where the log message was generated. -2. **Caller Identification**: The method identifies the caller's frame, i.e., the part of the code where the log message originated. This is necessary because Loguru supports automatic capturing of the caller's information, enhancing the log message with contextual details. The identification process involves traversing the call stack until it finds a frame outside of the standard logging module's file. This ensures that the log message accurately reflects the original source of the log call, rather than the InterceptHandler itself. +Finally, the method logs the message using Loguru's logging capabilities, including any exception information if present. The `logger.opt()` method is utilized to set the depth of the stack trace and to include exception details, ensuring that the log output is informative and relevant. -3. **Logging to Loguru**: Finally, the method logs the message to Loguru, using the determined log level and including any exception information (`exc_info`) if present. It specifies the depth in the call stack to ensure Loguru captures the correct caller information. +The InterceptHandler is specifically invoked within the `set_logger_level_from_config` function. This function configures the Loguru logger with a specified log level and integrates it with the standard logging module. By calling `logging.basicConfig()` with an instance of InterceptHandler, it effectively redirects all standard logging output to Loguru, allowing for a unified logging approach across the application. This integration is crucial for maintaining consistent logging behavior, especially in applications that utilize both standard logging and Loguru. -The integration with Loguru is facilitated through the `set_logger_level_from_config` function, which configures the logging system at runtime. This function sets the log level based on a configuration, adds the InterceptHandler to the basicConfig of the standard logging module, and ensures that all standard logging messages are intercepted and redirected to Loguru. This approach allows developers to benefit from Loguru's enhanced logging features, such as rich formatting and better exception handling, without modifying existing logging calls throughout their codebase. - -**Note**: When using InterceptHandler, it is important to ensure that Loguru is properly configured before intercepting log messages. This includes setting the appropriate log level and log targets (e.g., files, standard error). Failure to do so may result in lost log messages or unexpected logging behavior. Additionally, developers should be aware of the performance implications of traversing the call stack to identify the caller's frame, especially in applications with high logging volume. +**Note**: When using the InterceptHandler, it is important to ensure that the logging configuration is set up correctly to avoid conflicts between standard logging and Loguru. Additionally, developers should be aware of the performance implications of logging, particularly in multi-threaded environments, where the `enqueue=True` option in Loguru can help manage log messages safely. ### FunctionDef emit(self, record) -**emit**: The function of emit is to log messages using the Loguru logger with the appropriate logging level and caller information. - -**Parameters**: -- `self`: Represents the instance of the `InterceptHandler` class. -- `record`: An object containing all the information pertinent to the event being logged. - -**Code Description**: -The `emit` function is designed to integrate Python's standard logging module with the Loguru logging library. It performs this integration by intercepting log messages that are emitted by the standard logging handlers and then redirecting these messages to be logged by Loguru instead, ensuring that the log messages retain their original logging level and source information. +**emit**: emit函数的功能是将日志记录发送到Loguru日志系统。 -1. The function starts by attempting to map the log level from the `record` (provided by the standard logging module) to the corresponding Loguru log level. This is necessary because Loguru might use different names or levels for logging. If the mapping is successful, the Loguru level name is used; otherwise, the numeric level from the `record` is used. +**parameters**: 该函数的参数。 +· record: logging.LogRecord - 包含日志记录信息的对象。 -2. It then proceeds to find the caller's information. This is important for debugging purposes, as it allows the log messages to include information about where in the code the log call originated. The function iterates through the call stack using `logging.currentframe()` and skips frames that belong to the logging module itself. This is done to find the frame where the log message was generated, ensuring that the log message accurately reflects the source of the log call. +**Code Description**: emit函数首先尝试获取与传入的日志记录的级别相对应的Loguru级别。如果成功,则使用该级别;如果失败,则使用记录的级别号。接着,函数通过inspect模块获取当前调用栈的帧信息,以确定日志消息的来源。它会遍历调用栈,直到找到一个非logging模块的帧,从而确定日志消息的深度。最后,使用Loguru的logger对象,结合深度和异常信息,记录日志消息。 -3. Finally, the function logs the message to Loguru, using the determined log level and the message from the `record`. It also passes the `depth` parameter to Loguru's `opt` method, which helps Loguru in correctly identifying the call site of the log message. If the `record` contains exception information (`exc_info`), it is also passed to Loguru, allowing the logging library to handle and display exceptions appropriately. +具体步骤如下: +1. 使用logger.level方法获取与record.levelname对应的Loguru级别名称。如果该级别不存在,则使用record.levelno作为级别。 +2. 通过inspect.currentframe()获取当前帧,并初始化深度为0。然后,使用while循环遍历调用栈,直到找到一个非logging模块的帧。 +3. 使用logger.opt方法记录日志,传入深度和异常信息,并调用record.getMessage()获取日志消息的内容。 -**Note**: -- This function is a critical component for applications that wish to leverage the advanced logging capabilities of Loguru while maintaining compatibility with Python's standard logging framework. -- Understanding the mapping between standard logging levels and Loguru's levels is essential for correctly interpreting the log messages. -- The accurate determination of the call site (`depth` calculation) is crucial for meaningful log messages, especially when diagnosing issues in complex applications. +**Note**: 使用该函数时,请确保传入的record对象是有效的logging.LogRecord实例,以避免潜在的错误。同时,确保Loguru库已正确配置,以便能够处理日志记录。 *** -## FunctionDef set_logger_level_from_config -**set_logger_level_from_config**: The function of set_logger_level_from_config is to configure the logging level based on a predefined configuration and ensure that all log messages, including those from the standard Python logging module, are handled by Loguru. - -**Parameters**: This function does not accept any parameters. - -**Code Description**: The `set_logger_level_from_config` function is designed to initialize and configure the logging system for an application, specifically tailoring it to use the Loguru library for logging purposes. The function performs several key operations as follows: - -1. **Configuration Retrieval**: It begins by retrieving the desired log level from a configuration source, identified by the `CONFIG` dictionary with a default value of "INFO" if not specified. This allows for dynamic adjustment of the log level without altering the codebase, facilitating easier debugging and logging control. +## FunctionDef set_logger_level_from_config(log_level) +**set_logger_level_from_config**: The function of set_logger_level_from_config is to configure the loguru logger with a specified log level and integrate it with the standard logging module. -2. **Logger Reconfiguration**: The function then proceeds to remove any existing handlers attached to the Loguru logger to prevent duplicate logging. It adds a new handler that directs log messages to `sys.stderr`, with the log level set according to the retrieved configuration. This ensures that all log messages are output to the standard error stream, making them easily visible and separable from standard output. +**parameters**: The parameters of this Function. +· log_level: str - The log level to set for loguru (e.g., "DEBUG", "INFO", "WARNING"). -3. **Standard Logging Interception**: To integrate the standard Python logging module with Loguru, the function employs an `InterceptHandler`. This custom handler is added to the `basicConfig` of the standard logging module, with a log level of 0 (which corresponds to `NOTSET`, ensuring that all messages regardless of level are captured) and `force=True` to override any existing configurations. This handler intercepts log messages from the standard logging system and redirects them to Loguru, leveraging its advanced formatting and handling capabilities. +**Code Description**: The set_logger_level_from_config function is designed to set the logging level for the loguru logger based on the provided log_level argument. It begins by removing any existing loguru handlers to ensure that there are no conflicts or duplications in logging output. Following this, it adds a new handler to the loguru logger that directs output to stderr at the specified log level. The parameters `enqueue=True`, `backtrace=False`, and `diagnose=False` are used to ensure that logging is thread-safe, minimizes detailed traceback information, and suppresses additional diagnostic information, respectively. -4. **Success and Error Handling**: Upon successful configuration, the function logs a success message indicating the set log level. If an invalid log level is specified in the configuration, resulting in a `ValueError`, a warning is logged indicating the use of the default log level instead. +Additionally, the function redirects the standard logging output to the loguru logger by utilizing the InterceptHandler class. This integration allows loguru to handle all logging consistently across the application, which is particularly useful in scenarios where both standard logging and loguru are used. The function concludes by logging a success message indicating that the log level has been set. -The relationship with its callees, particularly the `InterceptHandler`, is crucial for achieving seamless integration between the standard logging module and Loguru. The `InterceptHandler` plays a pivotal role by capturing log messages from the standard logging system and redirecting them to Loguru, thus centralizing log management and enhancing the logging capabilities available to the application. +The set_logger_level_from_config function is called within the run function located in the repo_agent/main.py file. In this context, it retrieves the logging configuration from the SettingsManager and applies it by calling set_logger_level_from_config with the appropriate log level. This ensures that the logging configuration is established before any tasks are executed, allowing for consistent logging behavior throughout the application. -**Note**: It is important to ensure that the `CONFIG` dictionary is properly initialized and accessible before calling this function to avoid errors due to undefined configuration. Additionally, since this function configures global logging behavior, it should be called at the application's startup before any logging occurs to ensure consistent logging behavior throughout the application's lifecycle. +**Note**: When using the set_logger_level_from_config function, it is essential to ensure that the logging configuration is correctly set up to avoid conflicts between standard logging and loguru. Developers should also consider the implications of logging performance, especially in multi-threaded environments, where the `enqueue=True` option can help manage log messages safely. diff --git a/markdown_docs/repo_agent/main.md b/markdown_docs/repo_agent/main.md new file mode 100644 index 0000000..edb0cf5 --- /dev/null +++ b/markdown_docs/repo_agent/main.md @@ -0,0 +1,44 @@ +## FunctionDef cli +**cli**: cli函数的功能是为基于LLM的框架提供仓库级代码文档生成。 + +**parameters**: 该函数没有参数。 + +**Code Description**: cli函数是一个空函数,当前没有实现任何具体的功能。根据其文档字符串,cli的目的是为一个基于大型语言模型(LLM)的框架提供仓库级别的代码文档生成。这表明该函数可能是未来扩展的基础,旨在处理与代码文档生成相关的任务。 + +在项目结构中,cli函数被调用于repo_agent/__main__.py文件中。虽然在__main__.py中没有提供具体的调用代码,但通常情况下,__main__.py文件是Python程序的入口点,cli函数可能会在程序启动时被调用,以初始化或配置文档生成的相关功能。 + +**Note**: 由于cli函数目前未实现任何功能,开发者在使用时应注意该函数尚未完成,可能需要进一步的开发和实现才能达到预期的文档生成效果。 +## FunctionDef handle_setting_error(e) +**handle_setting_error**: handle_setting_error的功能是处理设置中的配置错误。 + +**parameters**: 该函数的参数。 +· e: ValidationError - 表示验证错误的异常对象,包含有关配置错误的详细信息。 + +**Code Description**: handle_setting_error函数用于处理在程序运行过程中遇到的配置错误。当程序尝试获取设置时,如果出现ValidationError异常,该函数将被调用。函数首先通过click库打印一条通用的错误消息,提示用户检查其设置。接着,函数遍历ValidationError对象中的错误信息,针对每个错误输出更详细的字段缺失信息,并使用不同的颜色进行区分。 + +如果错误类型为“missing”,函数会提示用户缺少必需的字段,并建议设置相应的环境变量;如果是其他类型的错误,则直接输出错误消息。最后,函数通过抛出click.ClickException优雅地终止程序,并显示一条终止程序的错误消息。 + +在项目中,handle_setting_error函数被多个函数调用,包括run、print_hierarchy和diff。这些函数在尝试获取设置时,如果遇到ValidationError异常,都会调用handle_setting_error来处理错误并输出相关信息,从而确保用户能够及时了解配置问题并进行修正。 + +**Note**: 使用该函数时,请确保传入的参数是ValidationError类型的异常对象,以便正确处理和输出错误信息。 +## FunctionDef run +Doc is waiting to be generated... +## FunctionDef clean +**clean**: The function of clean is to remove the fake files generated by the documentation process. + +**parameters**: The parameters of this Function. +· No parameters are required for this function. + +**Code Description**: The clean function is designed to facilitate the cleanup of temporary files, referred to as "fake files," that are created during the documentation generation process. This function achieves its purpose by invoking the delete_fake_files function, which is responsible for identifying and removing these temporary files. + +When the clean function is called, it executes the delete_fake_files function, which performs a thorough search through the project's directory structure to locate and delete any files that match specific criteria indicative of temporary files. Upon successful completion of the deletion process, the clean function logs a success message indicating that the fake files have been cleaned up. + +The delete_fake_files function operates by first retrieving the project settings through the SettingsManager's get_setting method. It then utilizes a nested helper function, gci, to recursively traverse the specified directory. The gci function checks each file and directory, identifying those that are temporary based on their naming conventions. If a temporary file is found, it either deletes it if it is empty or renames it back to its original name if it contains content. + +The clean function is crucial in ensuring that the workspace remains free of unnecessary files after documentation tasks are completed. It is typically called at the end of the documentation process to maintain an organized project structure. + +**Note**: It is important to ensure that the project settings are correctly configured and that the target repository is accessible before invoking the clean function. Any issues related to file permissions or incorrect paths may lead to errors during the cleanup process. +## FunctionDef print_hierarchy +Doc is waiting to be generated... +## FunctionDef diff +Doc is waiting to be generated... diff --git a/markdown_docs/repo_agent/multi_task_dispatch.md b/markdown_docs/repo_agent/multi_task_dispatch.md index 97d2e29..4e9d248 100644 --- a/markdown_docs/repo_agent/multi_task_dispatch.md +++ b/markdown_docs/repo_agent/multi_task_dispatch.md @@ -1,203 +1,162 @@ ## ClassDef Task -**Task**: The function of Task is to represent a unit of work within a multi-task dispatch system, including its dependencies and status. +**Task**: The function of Task is to represent a unit of work with its dependencies and status. -**Attributes**: -- `task_id`: An integer representing the unique identifier of the task. -- `dependencies`: A list of Task objects that the current task depends on to be completed before it can start. -- `extra_info`: An optional parameter that can hold any additional information related to the task. This could be of any type. -- `status`: An integer indicating the current status of the task. The status codes are as follows: 0 for not started, 1 for in progress, 2 for completed, and 3 for error. +**attributes**: The attributes of this Class. +· task_id: An integer that uniquely identifies the task. +· dependencies: A list of Task objects that this task depends on. +· extra_info: Any additional information associated with the task, which can be of any type. +· status: An integer representing the current status of the task (0 for not started, 1 for in progress, 2 for completed, 3 for error). -**Code Description**: -The `Task` class is designed to encapsulate all necessary details about a task within a task management or dispatch system. It is initialized with a unique `task_id`, a list of `dependencies` which are other Task instances that must be completed before this task can commence, and an optional `extra_info` parameter for any additional task-related information. The `status` attribute is used to track the progress of the task through its lifecycle, from not started, through in progress and completed, to error states if any issues arise. +**Code Description**: The Task class is designed to encapsulate the concept of a task within a multi-tasking framework. Each Task object is initialized with a unique identifier (task_id), a list of dependencies that must be completed before this task can start, and optional extra information that can provide context or metadata about the task. The status attribute tracks the current state of the task, allowing for management and monitoring of its progress. -In the context of the project, the Task class plays a crucial role in managing dependencies and execution order of tasks within the `TaskManager`. The `TaskManager` utilizes the `Task` class to create and manage tasks, including adding new tasks with dependencies through the `add_task` method. This method dynamically creates Task instances based on the provided dependencies and extra information, then assigns a unique task ID to each new Task instance. The `TaskManager` maintains a dictionary of these tasks, allowing for efficient lookup and status management. +The Task class is utilized within the MultiTaskDispatch system, where it plays a crucial role in task management. Specifically, the TaskManager class, which is responsible for managing multiple tasks, creates instances of the Task class when new tasks are added. The add_task method in TaskManager takes a list of dependency task IDs and creates a new Task object, linking it to its dependencies. This relationship ensures that tasks are executed in the correct order based on their dependencies. -The `print_task_list` method in `doc_meta_info.py/MetaInfo` interacts with Task instances by generating a human-readable table of tasks, including their IDs, dependency relationships, and any extra information. This showcases how Task instances are used across the system to track and display task information and dependencies. +Furthermore, the Task class is referenced in the print_task_list method of the MetaInfo class, which formats and displays a list of tasks along with their statuses and dependencies. This integration highlights the importance of the Task class in providing a structured way to manage and visualize tasks within the system. -**Note**: -When using the `Task` class, it is important to ensure that the dependencies are correctly managed to prevent deadlocks or circular dependencies, which could halt the execution of tasks. Additionally, the `extra_info` attribute's flexibility allows for storing a wide range of information, but care should be taken to maintain consistency in the type of data stored, especially if it's being used by other parts of the system like the `print_task_list` method for reporting or logging purposes. +**Note**: When using the Task class, it is important to ensure that the dependencies are properly managed to avoid circular dependencies, which could lead to errors in task execution. Additionally, the status attribute should be updated appropriately to reflect the current state of the task throughout its lifecycle. ### FunctionDef __init__(self, task_id, dependencies, extra_info) -**__init__**: The function of __init__ is to initialize a Task object with its unique identifier, dependencies, optional extra information, and set its initial status. - -**Parameters**: -- **task_id (int)**: A unique identifier for the task. This is used to distinguish the task from others in a system where multiple tasks are managed. -- **dependencies (List[Task])**: A list of Task objects that the current task depends on. This task cannot start until all its dependencies have been completed. -- **extra_info (Any, optional)**: Any additional information that needs to be associated with the task. This parameter is optional and can be used to store extra data relevant to the task. - -**Code Description**: -The `__init__` method is a special method in Python that is automatically called when a new instance of a class is created. In the context of the `Task` class, this method is used to initialize a new task with several important attributes: -- `task_id` is stored to uniquely identify the task. This is crucial for tracking and managing tasks within a system. -- `extra_info` allows for storing additional, potentially variable data associated with the task. This flexibility supports a wide range of applications where tasks might carry extra context or configuration. -- `dependencies` is a list that contains other tasks that must be completed before this task can start. This is essential for managing complex workflows where tasks are interdependent. -- `status` is initialized to `0`, which represents the initial state of the task. The status codes are defined as follows: `0` for not started, `1` for in progress, `2` for completed, and `3` for error. This status attribute is critical for tracking the progress and state of the task throughout its lifecycle. - -**Note**: -- It is important to ensure that the `task_id` provided is unique within the context it is used. Duplicate IDs can lead to confusion or errors in task management. -- The `dependencies` parameter expects a list of Task objects. This means that all dependencies must be instances of the Task class or a subclass thereof. Proper instantiation and management of dependencies are crucial for the correct execution of tasks. -- The `extra_info` parameter is highly flexible and can be used to attach any additional data to a task. However, users of the Task class should document and standardize the use of `extra_info` within their applications to maintain clarity and consistency. +**__init__**: The function of __init__ is 初始化任务对象。 + +**parameters**: The parameters of this Function. +· parameter1: task_id (int) - 任务的唯一标识符。 +· parameter2: dependencies (List[Task]) - 该任务所依赖的其他任务列表。 +· parameter3: extra_info (Any, 可选) - 额外的信息,可以是任何类型的数据,默认为None。 + +**Code Description**: 该__init__函数是任务类的构造函数,用于初始化任务对象的基本属性。首先,它接收一个整数类型的参数task_id,用于唯一标识该任务。接着,dependencies参数是一个任务对象的列表,表示当前任务所依赖的其他任务,这对于任务调度和执行顺序非常重要。extra_info参数是一个可选参数,可以存储与任务相关的额外信息,默认为None。最后,status属性被初始化为0,表示任务的初始状态为“未开始”。状态值的定义如下:0表示未开始,1表示正在进行,2表示已经完成,3表示出错了。 + +**Note**: 在使用该构造函数时,确保传入的dependencies参数是一个有效的任务列表,以避免在后续任务调度中出现错误。同时,task_id应保持唯一性,以确保任务的正确识别和管理。 *** ## ClassDef TaskManager -**TaskManager**: The function of TaskManager is to manage and dispatch tasks in a multi-threaded environment, ensuring thread-safe operations and dependencies handling among tasks. +**TaskManager**: The function of TaskManager is to manage and dispatch multiple tasks based on their dependencies. -**Attributes**: -- `task_dict`: A dictionary mapping task IDs to Task objects, facilitating task management and dependency resolution. -- `task_lock`: A threading lock used to ensure thread-safe access and modifications to the task dictionary. -- `now_id`: An integer representing the current task ID, used to assign unique IDs to new tasks. -- `query_id`: An integer representing the current query ID, used for internal management and possibly for optimization purposes. -- `sync_func`: A placeholder for a synchronization function that can be used to synchronize tasks or perform cleanup operations periodically. +**attributes**: The attributes of this Class. +· task_dict: A dictionary that maps task IDs to Task objects. +· task_lock: A threading.Lock used for thread synchronization when accessing the task_dict. +· now_id: An integer representing the current task ID. +· query_id: An integer representing the current query ID. +· sync_func: A placeholder for a synchronization function, initially set to None. -**Code Description**: -The `TaskManager` class is designed to handle the addition, execution, and completion of tasks in a multi-threaded environment. It uses a dictionary to store tasks, where each task is associated with a unique ID. This design allows for efficient task retrieval and management. The class provides methods to add new tasks, retrieve the next task for processing, and mark tasks as completed. +**Code Description**: The TaskManager class is designed to facilitate the management of multiple tasks in a concurrent environment. It initializes with an empty task dictionary (task_dict) that will hold Task objects indexed by their unique IDs. The class employs a threading lock (task_lock) to ensure that access to the task dictionary is thread-safe, preventing race conditions when multiple threads attempt to modify the task list simultaneously. -Tasks can have dependencies, which are other tasks that must be completed before the task can start. The `add_task` method allows adding tasks with dependencies, ensuring that tasks are executed in the correct order. The method assigns a unique ID to each new task and stores it in the task dictionary. +The now_id attribute keeps track of the next available task ID, while query_id is used to track the number of queries made for tasks. The sync_func attribute is intended to hold a function that can be called for synchronization purposes, though it is not defined upon initialization. -The `get_next_task` method retrieves the next available task for a given process ID, considering task dependencies and ensuring that only ready tasks are returned. It also implements a mechanism to periodically invoke a synchronization function, which can be useful for maintaining the overall state of the task manager. +The class provides several key methods: +- **all_success**: A property that checks if all tasks have been completed by verifying if the task dictionary is empty. +- **add_task**: This method allows the addition of a new task to the task dictionary. It takes a list of dependency task IDs and optional extra information. The method locks the task dictionary during the addition process to ensure thread safety, creates a new Task object, and increments the now_id for the next task. +- **get_next_task**: This method retrieves the next available task for a specified process ID. It checks the task dictionary for tasks that have no dependencies and are not currently in progress. If a task is found, it updates its status to indicate that it is now being processed and may call the sync_func every ten queries. +- **mark_completed**: This method marks a specified task as completed and removes it from the task dictionary. It also updates the dependencies of other tasks that may rely on the completed task. -The `mark_completed` method marks a task as completed and removes it from the task dictionary. It also updates the dependencies of other tasks, ensuring that tasks dependent on the completed task are now eligible for execution. +The TaskManager class is utilized within the MetaInfo class in the repo_agent/doc_meta_info.py file. Specifically, it is called in the methods get_task_manager and get_topology. The get_task_manager method constructs a TaskManager instance and populates it with tasks based on the dependencies of document items in a hierarchical structure. The get_topology method orchestrates the overall process of calculating the topological order of all objects in a repository, leveraging the TaskManager to manage the tasks that arise from this calculation. -In the context of the project, the `TaskManager` is utilized to manage documentation tasks, as seen in the `get_task_manager` and `get_topology` methods of the `MetaInfo` class. These methods calculate the topology of objects in a repository and manage tasks related to document generation, ensuring that tasks are executed in an order that respects their dependencies. This is particularly useful for handling complex documentation projects where the generation of certain parts of the documentation depends on the completion of others. +**Note**: When using the TaskManager, ensure that the sync_func is properly defined if synchronization is required during task processing. Additionally, be aware of potential circular dependencies in task management, which may complicate the task retrieval process. -**Note**: -- It is crucial to ensure that the `sync_func` attribute is properly set if synchronization or periodic cleanup operations are required. -- The `TaskManager` class assumes that tasks are independent or have clearly defined dependencies. Circular dependencies must be resolved or avoided in the task setup phase to prevent deadlocks. +**Output Example**: A possible return value from the get_next_task method could be a tuple containing a Task object and its ID, such as (Task(task_id=0, dependencies=[], extra_info=None), 0), indicating that the task with ID 0 is ready for processing. +### FunctionDef __init__(self) +**__init__**: The function of __init__ is to initialize a MultiTaskDispatch object. -**Output Example**: -An example of using the `TaskManager` might involve adding tasks with dependencies and then sequentially processing them. After adding tasks, the task dictionary could look like this: +**parameters**: The __init__ function does not take any parameters. -```python -{ - 0: Task(task_id=0, dependencies=[], extra_info=None), - 1: Task(task_id=1, dependencies=[Task(task_id=0)], extra_info='Extra Info for Task 1') -} -``` +**Code Description**: The __init__ method is responsible for setting up a new instance of the MultiTaskDispatch class. It initializes several key attributes that are essential for managing multiple tasks within a multi-tasking framework. -This indicates that there are two tasks, where Task 1 depends on Task 0. After processing Task 0 and marking it as completed, Task 1 becomes eligible for execution. -### FunctionDef __init__(self) -**__init__**: The function of __init__ is to initialize a MultiTaskDispatch object with necessary attributes for task management. +- `task_dict`: This attribute is a dictionary that maps integer task IDs to Task objects. It serves as a central repository for all tasks being managed, allowing for efficient retrieval and management of tasks based on their unique identifiers. -**Parameters**: This function does not take any parameters beyond the implicit `self`. +- `task_lock`: This attribute is an instance of `threading.Lock`, which is utilized for thread synchronization. It ensures that access to the `task_dict` is thread-safe, preventing race conditions that could occur when multiple threads attempt to modify or access the task dictionary simultaneously. -**Code Description**: The `__init__` method is crucial for setting up the foundational structure of the `MultiTaskDispatch` object within the task management system. Upon instantiation, it initializes several attributes essential for managing tasks: +- `now_id`: This integer attribute keeps track of the current task ID being processed. It is initialized to zero, indicating that no tasks have been processed yet. -- `task_dict`: A dictionary that serves as the central repository for all tasks managed by the `MultiTaskDispatch` object. It maps unique task IDs (integers) to their corresponding `Task` objects. This attribute is vital for tracking and accessing tasks efficiently. -- `task_lock`: A threading lock from the `threading` module, ensuring that operations on `task_dict` are thread-safe. This lock prevents race conditions and ensures that only one thread can modify `task_dict` at a time, which is crucial in a multi-threaded environment. -- `now_id`: An integer that keeps track of the current task ID. It is used to assign unique IDs to new tasks as they are added to the system. -- `query_id`: An integer used to track the current query ID. Its specific use may vary, but generally, it could be involved in operations that require identifying or tracking queries within the system. -- `sync_func`: Initially set to None, this attribute is a placeholder for a synchronization function that might be defined later. This function could be used for synchronizing tasks or data across different components or threads. +- `query_id`: Similar to `now_id`, this integer attribute is used to track the current query ID. It is also initialized to zero. -The `Task` class, which is referenced in the `task_dict` attribute, encapsulates details about individual tasks, such as their ID, dependencies, status, and additional information. This class is fundamental to the task management system, allowing the `MultiTaskDispatch` object to manage tasks' lifecycle, dependencies, and execution order. +- `sync_func`: This attribute is initialized to None and serves as a placeholder for a synchronization function that may be defined later. This allows for flexibility in managing task synchronization as needed. -**Note**: When working with the `MultiTaskDispatch` object, it is important to be aware of threading issues, especially when accessing or modifying the `task_dict`. The `task_lock` should be used appropriately to lock and unlock the dictionary during such operations to maintain data integrity. Additionally, while the `sync_func` attribute is initialized as None, it should be properly defined and utilized if synchronization functionalities are required in the system. The design of the `Task` class and its integration into the `MultiTaskDispatch` object highlight the importance of managing dependencies and execution order in a multi-task environment. Proper management of task IDs and query IDs is also crucial for the system's operation and integrity. +The initialization of these attributes is crucial for the proper functioning of the MultiTaskDispatch system, as they lay the groundwork for task management and synchronization. The MultiTaskDispatch class relies on the Task class to represent individual tasks, which are stored in `task_dict`. The relationship between MultiTaskDispatch and Task is fundamental, as MultiTaskDispatch orchestrates the execution and management of these Task objects, ensuring that tasks are executed in accordance with their dependencies and statuses. + +**Note**: When using the MultiTaskDispatch class, it is important to ensure that the task management system is properly configured, particularly with respect to thread safety and the handling of task dependencies. Proper initialization of the attributes is essential for the smooth operation of the task management framework. *** ### FunctionDef all_success(self) -**all_success**: The function of `all_success` is to determine if all tasks have been successfully completed. +**all_success**: all_success的功能是检查任务管理器中的任务字典是否为空。 -**Parameters**: This function does not take any parameters. +**parameters**: 此函数没有参数。 -**Code Description**: The `all_success` function is a method of the `TaskManager` class, designed to check if there are any pending tasks in the task management system. It operates by comparing the length of the `task_dict` dictionary, which contains all the tasks that need to be processed, to zero. If the length is zero, this means that there are no tasks left to process, indicating that all tasks have been successfully completed. This function returns a boolean value: `True` if there are no tasks remaining (indicating success), and `False` otherwise. +**Code Description**: all_success函数用于判断任务管理器中的任务字典(task_dict)是否为空。具体来说,它通过计算任务字典的长度来实现这一点。如果任务字典的长度为零,表示没有待处理的任务,函数将返回True;否则,返回False。 -In the context of its usage within the project, particularly in the `run` method of the `Runner` class, the `all_success` function plays a crucial role in determining the flow of document generation and updating. After initiating the document generation process, checking for changes, and setting up tasks for document generation, the `all_success` method is called to check if the task queue is empty. If it returns `True`, indicating that there are no tasks in the queue and all documents are up to date, the process logs this information and proceeds to finalize the document generation process. This includes tasks like joining threads that were started for document generation, updating the document version, and refreshing the markdown documents. Thus, `all_success` serves as a checkpoint to ensure that all necessary document updates have been completed before moving on to the final steps of the process. +在项目中,all_success函数被调用于repo_agent/runner.py中的Runner类的run方法。在run方法中,任务管理器的状态被检查,以确定是否所有文档生成任务都已完成。如果all_success返回True,表示任务队列中没有任务,所有文档都已完成且是最新的,这时会记录一条日志,表明没有任务在队列中。 -**Note**: It is important for developers to ensure that the `task_dict` is accurately maintained throughout the task management process. Any tasks that are added or removed should be reflected in this dictionary to ensure that the `all_success` function provides a reliable indication of task completion status. +**Note**: 使用此函数时,请确保任务字典的状态已正确更新,以避免误判任务是否完成。 -**Output Example**: -- If there are no tasks left to process, the function will return `True`. -- If there are still tasks pending in the `task_dict`, the function will return `False`. +**Output Example**: 假设任务字典为空,调用all_success将返回True。 *** ### FunctionDef add_task(self, dependency_task_id, extra) -**add_task**: The function of add_task is to add a new task to the task management system with specified dependencies and optional extra information. +**add_task**: The function of add_task is to add a new task to the task dictionary while managing its dependencies. + +**parameters**: The parameters of this Function. +· dependency_task_id: List[int] - A list of task IDs that the new task depends on. +· extra: Any, optional - Extra information associated with the task. Defaults to None. + +**Code Description**: The add_task method is responsible for creating and adding a new task to the task manager's internal dictionary of tasks. It takes a list of dependency task IDs, which represent other tasks that must be completed before the new task can start. The method also accepts an optional parameter, extra, which can hold any additional information related to the task. + +When the add_task method is invoked, it first acquires a lock (self.task_lock) to ensure thread safety while modifying the task dictionary. It then retrieves the Task objects corresponding to the provided dependency_task_id list. These Task objects are stored in the depend_tasks list. -**Parameters**: -- `dependency_task_id`: A list of integers representing the IDs of tasks that the new task depends on. This ensures that the task execution respects the specified dependencies. -- `extra`: An optional parameter that can hold any additional information associated with the task. This could be of any type and is defaulted to None if not provided. +Next, a new Task object is instantiated using the current task ID (self.now_id), the list of dependencies (depend_tasks), and the optional extra information. This new Task object is then added to the task dictionary with the current task ID as the key. After successfully adding the task, the method increments the now_id counter to ensure that the next task added will have a unique identifier. Finally, the method returns the ID of the newly added task. -**Code Description**: The `add_task` function is a critical component of the task management system, designed to handle the addition of new tasks with dependencies. It operates within a thread-safe block, ensuring that task addition is atomic and prevents race conditions in a multi-threaded environment. The function iterates over the list of dependency task IDs provided, retrieving each corresponding task from the task dictionary (`task_dict`) using the ID. It then creates a new `Task` instance with a unique ID (`now_id`), the list of dependency tasks, and any extra information provided. This new task is added to the task dictionary. The task ID is incremented after adding the task to ensure uniqueness for subsequent tasks. Finally, the function returns the ID of the newly added task, which can be used for further operations or reference. +The add_task method is called within the get_task_manager method of the MetaInfo class. In this context, get_task_manager is responsible for constructing a TaskManager instance and populating it with tasks based on the relationships between various document items. As it processes each document item, it determines the dependencies for the task to be created and invokes add_task to register the new task in the TaskManager. This integration highlights the role of add_task in establishing the task management framework, ensuring that tasks are created with the correct dependencies and are properly tracked within the system. -**Note**: It is crucial to ensure that the `dependency_task_id` list does not contain any invalid or non-existent task IDs, as this would raise a KeyError when attempting to retrieve the task from `task_dict`. Additionally, managing dependencies carefully is essential to avoid creating circular dependencies, which could lead to deadlocks or infinite loops in task execution. The optional `extra` parameter provides flexibility in associating additional information with a task, but it should be used consistently to maintain the integrity and readability of the task data. +**Note**: When using the add_task method, it is essential to ensure that the dependency_task_id list does not contain circular references, as this could lead to issues in task execution. Additionally, the extra parameter should be used judiciously to provide relevant context for the task without introducing unnecessary complexity. -**Output Example**: If the `add_task` function is called with a `dependency_task_id` list containing `[1, 2]` and no extra information, and assuming the current `now_id` is 10, the function would return `10`. The task dictionary would then include a new `Task` instance with an ID of 10, dependencies on tasks with IDs 1 and 2, and default extra information of `None`. +**Output Example**: A possible return value of the add_task method could be an integer representing the ID of the newly added task, such as 5, indicating that the task has been successfully added to the task manager with that identifier. *** ### FunctionDef get_next_task(self, process_id) -**get_next_task**: The function retrieves the next available task for a specified process ID. - -**Parameters**: -- `process_id` (int): The ID of the process requesting a task. - -**Code Description**: -The `get_next_task` function is designed to select and return the next task from a collection of tasks that is ready to be executed, based on a given process ID. It operates under the following logic: - -1. The function locks the task collection to ensure thread safety during task selection. -2. It increments an internal query ID each time the function is called, which is used for internal tracking and potentially for synchronization purposes. -3. The function iterates over the task dictionary (`task_dict`), which contains all tasks managed by the `TaskManager`. -4. For each task, it checks if the task has no dependencies (`len(self.task_dict[task_id].dependencies) == 0`) and if its status is `0` (indicating it is ready to be executed). -5. When a ready task is found, its status is updated to `1` (indicating it is in progress), and a message is printed to the console indicating the process ID that has acquired the task, the task ID, and the remaining number of tasks. -6. Every 10th query, a synchronization function (`self.sync_func()`) is called, which could be used for maintaining consistency or updating the state of the task collection. -7. The function then returns a tuple containing the task object and its ID, indicating the task has been successfully assigned. -8. If no ready tasks are found, the function returns `(None, -1)`, indicating there are no available tasks for execution. - -**Note**: -- The function assumes the existence of a task dictionary (`task_dict`), a query ID (`query_id`), and a synchronization function (`sync_func`) as part of the `TaskManager` class. These elements should be properly initialized and maintained within the class. -- The task status is represented by integers, where `0` indicates a task is ready, and `1` indicates a task is in progress. This status mechanism is crucial for the function's operation. -- The function uses a lock (`task_lock`) to ensure that task selection and status updates are thread-safe. This is important in a multi-threaded environment to prevent race conditions. -- The printed message uses color formatting for better visibility in the console, which requires the `colorama` module or a similar library. - -**Output Example**: -Assuming a task with ID `5` is ready and the process ID is `2`, the function might print the following message to the console: -``` -[process 2]: get task(5), remain(9) -``` -And return a tuple like `(TaskObject, 5)`, where `TaskObject` is a placeholder for the actual task object returned. If no tasks are ready, it would return `(None, -1)`. -*** -### FunctionDef mark_completed(self, task_id) -**Function**: mark_completed +**get_next_task**: get_next_task的功能是为给定的进程ID获取下一个任务。 -**Parameters**: -- `task_id` (int): The ID of the task to mark as completed. +**parameters**: 该函数的参数。 +· parameter1: process_id (int) - 进程的ID。 -**Function Description**: -The `mark_completed` function is designed to update the status of a specific task, identified by its `task_id`, to indicate that it has been completed. This function operates within a multi-tasking environment where tasks may have dependencies on one another. The primary operations performed by this function include: +**Code Description**: +get_next_task函数用于根据提供的进程ID获取下一个可用的任务。函数首先通过self.task_lock锁定任务,以确保在多线程环境中对任务的安全访问。接着,query_id自增1,用于跟踪查询次数。函数遍历task_dict字典中的所有任务ID,检查每个任务的依赖关系和状态。只有当任务的依赖关系为空且状态为0(表示任务可用)时,才将其标记为已获取(状态设置为1)。在获取任务时,函数会打印出当前进程ID、获取的任务ID以及剩余任务的数量。如果query_id是10的倍数,则调用sync_func函数进行同步。最后,函数返回获取的任务对象及其ID。如果没有可用的任务,函数将返回(None, -1)。 -1. **Acquiring a Lock**: The function starts by acquiring a lock on the task data structure to ensure thread-safe modifications. This is crucial in a multi-threaded environment to prevent data corruption or inconsistencies. +**Note**: 使用该函数时,请确保在调用前已正确初始化task_dict,并且在多线程环境中使用task_lock来避免竞争条件。 -2. **Identifying the Target Task**: It retrieves the task object associated with the provided `task_id` from a dictionary (`task_dict`) that maps task IDs to task objects. This dictionary is a central repository where all tasks are stored and managed. +**Output Example**: +假设有一个可用的任务,其ID为5,返回值可能为: +(task_object, 5) # 其中task_object是获取的任务对象。 -3. **Updating Dependencies**: The function iterates over all tasks in the `task_dict` to check if any of them have the completed task as a dependency. If so, the completed task is removed from their dependency list. This step is essential to ensure that tasks waiting on the completed task can proceed if they no longer have any unmet dependencies. +如果没有可用的任务,返回值将为: +(None, -1) +*** +### FunctionDef mark_completed(self, task_id) +**mark_completed**: mark_completed的功能是将指定任务标记为已完成,并从任务字典中移除该任务。 + +**parameters**: 该函数的参数。 +· parameter1: task_id (int) - 要标记为已完成的任务的ID。 -4. **Removing the Task**: Finally, the task is removed from the `task_dict`, effectively marking it as completed and no longer active or pending in the system. +**Code Description**: mark_completed函数用于将指定的任务标记为已完成,并从任务管理器的任务字典中删除该任务。函数接收一个整数类型的参数task_id,表示要处理的任务的唯一标识符。函数内部首先通过自我锁定(self.task_lock)来确保在多线程环境下对任务字典的安全访问。接着,函数通过task_id从任务字典中获取目标任务(target_task)。然后,函数遍历任务字典中的所有任务,检查目标任务是否在其他任务的依赖列表中。如果目标任务存在于其他任务的依赖中,则将其从依赖列表中移除。最后,函数调用pop方法从任务字典中删除该任务,确保任务不再被管理。 -**Note**: -- It is important to ensure that the `task_id` provided to the function is valid and exists within the `task_dict`. If an invalid `task_id` is passed, the function may raise a `KeyError`. -- The function assumes that the `task_lock` and `task_dict` are accessible within the context of the function, typically as attributes of the class to which this function belongs. The `task_lock` is used to synchronize access to the `task_dict` across multiple threads. -- Removing a task's dependencies is a critical step in a task dependency management system, as it allows other tasks that were waiting on the completed task to move forward. This contributes to the overall efficiency and responsiveness of the task management system. +**Note**: 使用该函数时,请确保传入的task_id是有效的,并且对应的任务在任务字典中存在。调用此函数后,相关依赖关系也会被更新,因此在调用之前应考虑任务之间的依赖关系。 *** ## FunctionDef worker(task_manager, process_id, handler) -**worker**: The function of worker is to execute tasks assigned by the task manager in a loop until all tasks are successfully completed. +**worker**: worker函数用于执行由任务管理器分配的任务。 -**Parameters**: -- **task_manager**: The task manager object that assigns tasks to workers. -- **process_id (int)**: The ID of the current worker process. -- **handler (Callable)**: The function that handles the tasks. +**parameters**: 该函数的参数如下: +· parameter1: task_manager - 任务管理器对象,用于分配任务给工作线程。 +· parameter2: process_id (int) - 当前工作进程的ID。 +· parameter3: handler (Callable) - 处理任务的函数。 -**Code Description**: -The `worker` function is designed to operate within a multi-threaded environment, where each instance of the function acts as a separate worker process. The function enters a continuous loop, where it interacts with a task manager to receive and execute tasks. The loop continues until a condition is met, indicating that all tasks have been successfully completed. +**Code Description**: worker函数是一个无限循环的工作线程,它从任务管理器中获取任务并执行。首先,它会检查任务管理器的状态,如果所有任务都已成功完成,则函数返回,结束执行。接着,worker调用task_manager的get_next_task方法,获取当前进程ID对应的下一个任务及其ID。如果没有可用的任务,worker会暂停0.5秒后继续循环。 -Upon each iteration, the worker checks if all tasks have been successfully completed through the `task_manager.all_success` flag. If all tasks are completed, the worker exits the loop and the function ends. If there are remaining tasks, the worker requests the next task from the task manager using its process ID. If a task is available, the worker proceeds to execute the task by calling the provided `handler` function with the task's extra information. After successfully handling the task, the worker informs the task manager that the task has been completed by calling `task_manager.mark_completed` with the task ID. +一旦获取到任务,worker会调用传入的handler函数,处理任务的额外信息。处理完成后,worker会调用task_manager的mark_completed方法,标记该任务为已完成。此函数的设计允许多个worker并行处理任务,提升了任务执行的效率。 -This function is utilized in a multi-threaded documentation generation process, as seen in its calls within the `Runner` class methods `first_generate` and `run`. In these contexts, the `worker` function is responsible for generating documentation for parts of a project in parallel. The `handler` function passed to the worker is responsible for the actual documentation generation for a single item. This parallel processing significantly speeds up the documentation generation process, especially for large projects with many components to document. +在项目中,worker函数被repo_agent/runner.py中的first_generate和run方法调用。具体来说,这两个方法在生成文档的过程中会创建多个线程,每个线程都运行worker函数,以并行处理任务。first_generate方法负责初始化任务列表并启动worker线程,而run方法则在检测到文件变更时重新生成文档,并同样启动worker线程来处理任务。 -**Note**: -- The `worker` function is designed to be used in a multi-threaded environment. Care should be taken to ensure thread safety, especially in the manipulation of shared resources such as the task manager. -- The function relies on the task manager's implementation of task distribution, success tracking, and task completion marking. Any changes to these implementations may require corresponding adjustments in the `worker` function. +**Note**: 使用该函数时,需要确保任务管理器的状态正确,以避免在没有任务可执行时造成不必要的等待。 -**Output Example**: -The `worker` function does not produce a direct output since its primary role is to execute tasks. However, the successful execution of tasks will result in the task manager's state being updated to reflect the completion of tasks, and in the context of documentation generation, the creation or update of documentation files. +**Output Example**: 假设任务管理器分配了一个任务,worker函数在处理后可能会返回如下信息: +``` +任务ID: 12345 已成功完成。 +``` ## FunctionDef some_function -**Function Name**: some_function - -**Function**: The function of some_function is to pause the execution of the program for a random duration of up to 3 seconds. +**some_function**: some_function的功能是随机暂停一段时间。 -**Parameters**: This function does not take any parameters. +**parameters**: 该函数没有参数。 -**Code Description**: The `some_function` is designed to introduce a delay in the execution flow of a program. It utilizes the `sleep` function from the `time` module and the `random` function from the `random` module. Specifically, `random.random()` generates a floating-point number between 0.0 and 1.0. This value is then multiplied by 3 to scale the range of possible delays from 0 to up to 3 seconds. The resulting value is passed to `time.sleep()`, which pauses the program's execution for that duration. This technique can be useful in simulating real-world scenarios where operations do not complete instantaneously or in testing to introduce variability in execution flow. +**Code Description**: some_function是一个简单的函数,其主要功能是使程序随机暂停一段时间。具体实现上,函数内部调用了time.sleep()方法,传入的参数是一个随机生成的浮点数,该浮点数的范围是0到3秒之间。这个随机数是通过random.random()生成的,random.random()返回一个在[0.0, 1.0)范围内的随机浮点数,因此乘以3后,最终的暂停时间会在0到3秒之间变化。这种随机暂停的功能可以用于需要模拟延迟或等待的场景,例如在多线程或异步编程中,可能需要随机延迟以避免资源竞争或模拟真实用户的操作行为。 -**Note**: When using `some_function`, it's important to be aware that the delay introduced is non-deterministic due to its reliance on random number generation. This means that each call to `some_function` will likely result in a different delay duration, within the specified range of 0 to 3 seconds. Additionally, the use of this function can affect the performance and responsiveness of your program, especially in time-sensitive applications. Therefore, it should be used judiciously and tested thoroughly in the context of the overall program behavior. +**Note**: 使用该函数时,请注意它会导致程序暂停,因此在需要高性能或实时响应的场景中应谨慎使用。此外,由于暂停时间是随机的,可能会影响程序的可预测性。 diff --git a/markdown_docs/repo_agent/project_manager.md b/markdown_docs/repo_agent/project_manager.md index 8297989..55db4ac 100644 --- a/markdown_docs/repo_agent/project_manager.md +++ b/markdown_docs/repo_agent/project_manager.md @@ -1,111 +1,156 @@ ## ClassDef ProjectManager -**ProjectManager**: The function of ProjectManager is to manage and retrieve the structure of a project within a repository. +**ProjectManager**: The function of ProjectManager is to manage and retrieve the structure of a project repository. -**Attributes**: -- `repo_path`: The file system path to the repository containing the project. -- `project`: An instance of a jedi.Project, initialized with the repository path. -- `project_hierarchy`: The path to the project's hierarchy JSON file, which is constructed by joining the repository path, the project hierarchy directory, and the filename "project_hierarchy.json". +**attributes**: The attributes of this Class. +· repo_path: The file path to the project repository. +· project: An instance of the Jedi Project class, initialized with the repo_path. +· project_hierarchy: The file path to the project hierarchy JSON file, constructed from the repo_path and project_hierarchy parameter. -**Code Description**: -The `ProjectManager` class is designed to facilitate the management of a software project's structure within a repository. It is initialized with two parameters: `repo_path`, which specifies the path to the repository, and `project_hierarchy`, which denotes the relative path from the repository to the project's hierarchy directory. Upon initialization, it creates a `jedi.Project` instance for the given repository path. This is significant for understanding and navigating the project's codebase efficiently. +**Code Description**: The ProjectManager class is designed to facilitate the management of a project repository by providing methods to retrieve the project's directory structure and build a reference path tree. Upon initialization, the class requires two parameters: `repo_path`, which specifies the location of the project repository, and `project_hierarchy`, which indicates the name of the hierarchy to be used. The class constructs the path to the project hierarchy JSON file by combining the repo_path with the project_hierarchy name. -The class features a method, `get_project_structure`, which returns the structure of the project by recursively walking through the directory tree of the repository. This method is particularly useful for generating a textual representation of the project's file and directory structure, focusing on Python files (`.py`). It ignores hidden files and directories (those starting with a dot) and constructs a string that represents the hierarchy of Python files and directories within the project. This functionality is crucial for developers and tools that need to understand or display the project's layout. +The `get_project_structure` method is responsible for returning the structure of the project by recursively traversing the directory tree starting from the repo_path. It constructs a string representation of the project structure, including all directories and Python files, while ignoring hidden files and directories. This method utilizes a nested function `walk_dir` to perform the recursive traversal. -In the context of the project, the `ProjectManager` is instantiated within the `Runner` class, which is part of the project's execution or management script. The `Runner` class uses the `ProjectManager` to access the project's structure, indicating that the `ProjectManager` plays a critical role in the broader system for managing and interacting with the project's codebase. The `Runner` class initializes the `ProjectManager` with configuration parameters that specify the repository path and the project hierarchy, demonstrating how the `ProjectManager` is integrated into the project's workflow. +The `build_path_tree` method creates a hierarchical tree structure based on two lists of paths: `who_reference_me` and `reference_who`, as well as a specific `doc_item_path`. It constructs a nested dictionary using `defaultdict` to represent the tree structure. The method modifies the last part of the `doc_item_path` to indicate a specific item with a star symbol. Finally, it converts the tree structure into a string format for easier visualization. -**Note**: -- It is essential to ensure that the `repo_path` and `project_hierarchy` parameters are correctly set to reflect the actual structure of the project within the repository. -- The `jedi.Project` instance requires the `jedi` library, which should be installed and properly configured in the project's environment. +The ProjectManager class is instantiated within the Runner class, where it is initialized with the target repository and hierarchy name obtained from the SettingsManager. This integration allows the Runner to leverage the ProjectManager's capabilities to manage and retrieve project structure information, which is essential for the overall functionality of the application. -**Output Example**: -Assuming a project structure like this: -``` -repo_agent/ - project_manager.py - runner.py - utils/ - helper.py -``` -The output of `get_project_structure` might look like: +**Note**: When using the ProjectManager class, ensure that the provided repo_path is valid and accessible. The project_hierarchy should correspond to an existing hierarchy name to avoid file path errors. + +**Output Example**: A possible output of the `get_project_structure` method might look like this: ``` -repo_agent - project_manager.py - runner.py - utils - helper.py +project_root + src + main.py + utils.py + tests + test_main.py ``` -This output provides a clear, textual representation of the project's structure, focusing on Python files and directories, excluding hidden files and directories. ### FunctionDef __init__(self, repo_path, project_hierarchy) -**__init__**: The function of __init__ is to initialize a new instance of the ProjectManager class. - -**Parameters**: -- **repo_path**: The file system path to the repository that the project manager will operate on. -- **project_hierarchy**: The relative path from the repository to the project hierarchy file, excluding the file name. - -**Code Description**: -The `__init__` method is a special method in Python that is called when a new instance of a class is created. In the context of the ProjectManager class, this method serves to initialize the instance with specific attributes and configurations necessary for its operation. - -Upon instantiation, the `__init__` method takes two parameters: `repo_path` and `project_hierarchy`. The `repo_path` parameter is expected to be a string representing the path to the repository this instance of ProjectManager will manage. This path is then used to initialize a `jedi.Project` object, which is assigned to the instance's `project` attribute. The `jedi.Project` object is likely used for interacting with the project's codebase in a way that understands the structure and syntax of the code, leveraging the Jedi library's capabilities. +**__init__**: __init__的功能是初始化ProjectManager类的实例。 -The `project_hierarchy` parameter is also a string, representing the relative path from the repository's root to the location where the project hierarchy file is (or will be) stored. However, it does not include the name of the file itself. The method constructs the full path to the project hierarchy file by joining the `repo_path` with the `project_hierarchy` and appending `"project_hierarchy.json"` to it. This full path is then stored in the instance's `project_hierarchy` attribute. +**parameters**: 该函数的参数如下: +· parameter1: repo_path - 指定项目的存储库路径。 +· parameter2: project_hierarchy - 指定项目层次结构的路径。 -This setup implies that the ProjectManager class is designed to work with projects that have a specific structure and configuration, part of which includes a JSON file named `project_hierarchy.json` that presumably contains information about the project's structure or configuration. +**Code Description**: 该__init__函数用于初始化ProjectManager类的实例。在函数内部,首先将传入的repo_path参数赋值给实例变量self.repo_path,以便在类的其他方法中使用。接着,使用jedi库创建一个新的Project对象,并将其赋值给self.project,传入的repo_path作为参数。这使得ProjectManager能够利用jedi库提供的功能来处理代码分析和自动补全等任务。最后,函数通过os.path.join方法构建项目层次结构的完整路径,将其赋值给self.project_hierarchy。该路径由repo_path、project_hierarchy参数和一个名为"project_hierarchy.json"的文件名组成,这样可以方便地访问项目的层次结构数据。 -**Note**: -- It is important to ensure that the `repo_path` provided is valid and points to the intended repository, as this path is used for initializing the Jedi project and constructing the path to the project hierarchy file. -- The `project_hierarchy` should be provided relative to the `repo_path` and should correctly lead to the directory where the `project_hierarchy.json` file is expected to be found or created. Incorrect paths could lead to errors in locating or interacting with the project hierarchy file. +**Note**: 使用该代码时,请确保传入的repo_path是有效的文件路径,并且project_hierarchy参数指向的目录中存在"project_hierarchy.json"文件,以避免在实例化过程中出现错误。 *** ### FunctionDef get_project_structure(self) -**get_project_structure**: The function of `get_project_structure` is to return the structure of the project by recursively walking through the directory tree. +**get_project_structure**: The function of get_project_structure is to return the structure of the project by recursively walking through the directory tree. -**Parameters**: This function does not take any parameters except for the implicit `self` parameter, which is a reference to the instance of the class that contains the project's repository path. +**parameters**: The parameters of this Function. +· There are no parameters for this function. -**Code Description**: The `get_project_structure` function is designed to provide a textual representation of the project's directory structure, focusing specifically on Python files. It achieves this through a nested function named `walk_dir`, which performs the recursive directory traversal. +**Code Description**: The get_project_structure function is designed to generate a string representation of the project's directory structure. It does this by defining an inner function called walk_dir, which takes two arguments: root (the current directory being processed) and prefix (a string used to format the output). The function initializes an empty list called structure to hold the formatted directory and file names. -The `walk_dir` function takes two parameters: `root`, which is the directory path to start the traversal from, and `prefix`, which is used to maintain the indentation level for the visual representation of the directory structure. The `prefix` parameter is initialized with an empty string and gets incremented with two spaces for each level of recursion, enhancing the readability of the output. +The walk_dir function begins by appending the base name of the current directory (root) to the structure list, prefixed by the provided prefix. It then creates a new prefix by adding two spaces to the existing prefix to indicate a deeper level in the directory hierarchy. The function proceeds to iterate over the sorted list of items in the current directory, skipping any hidden files or directories (those starting with a dot). -The function begins by appending the name of the current directory (`root`) to a list named `structure`. It then iterates over all items in the directory, sorted alphabetically by their names. Hidden files and directories (those starting with a dot) are ignored to focus on relevant project files. For each item, if it is a directory, `walk_dir` is called recursively with the updated `prefix`. If the item is a Python file (determined by the `.py` extension), its name is appended to the `structure` list with the current indentation level. +For each item, it constructs the full path and checks if it is a directory or a Python file (ending with ".py"). If it is a directory, the function calls itself recursively with the new prefix. If it is a Python file, it appends the file name to the structure list with the new prefix. -After the recursive traversal is complete, the `structure` list, which now contains the formatted project structure, is joined into a single string with newline characters separating each item. This string is then returned as the function's output. +Finally, after the walk_dir function has processed all directories and files, the get_project_structure function joins the elements of the structure list into a single string, separated by newline characters, and returns this string. -**Note**: The function relies on the `os` module for directory and file operations, including checking whether a path is a directory or a file, listing directory contents, and joining paths. It is essential that the `repo_path` attribute of the class instance calling this function is correctly set to the root directory of the project for accurate results. +**Note**: It is important to ensure that the repo_path attribute of the class instance is correctly set to the root directory of the project before calling this function. The function will only include Python files in the output, ignoring other file types. -**Output Example**: +**Output Example**: ``` -project_root - main.py - utils - __init__.py - helper.py - models - __init__.py - user.py +project_name + module1 + file1.py + file2.py + module2 + file3.py + README.md ``` -This example output represents a project with a `main.py` file in the root directory, and two subdirectories (`utils` and `models`), each containing an `__init__.py` file and another Python file. The indentation indicates the directory hierarchy. #### FunctionDef walk_dir(root, prefix) -**walk_dir**: The function of walk_dir is to recursively walk through a directory structure, listing all Python files and directories, while ignoring hidden files and directories. +**walk_dir**: walk_dir的功能是遍历指定目录及其子目录,并收集所有Python文件的结构信息。 + +**parameters**: 此函数的参数如下: +· parameter1: root - 要遍历的根目录的路径。 +· parameter2: prefix - 用于格式化输出的前缀字符串,默认为空字符串。 + +**Code Description**: +walk_dir函数用于递归遍历给定的目录(root)及其所有子目录。它首先将当前目录的名称(通过os.path.basename(root)获取)添加到结构列表中(structure),并在前缀字符串(prefix)后添加空格以便于格式化。接着,函数使用os.listdir(root)列出当前目录中的所有文件和子目录,并对这些名称进行排序。 -**Parameters**: -- **root**: The root directory from which the traversal begins. It is a string representing the path to the directory. -- **prefix**: An optional string parameter used to prefix directory names and filenames to indicate their level in the directory hierarchy. It defaults to an empty string. +在遍历每个名称时,函数会检查名称是否以点(.)开头,以此来忽略隐藏文件和目录。如果名称不是隐藏的,函数会构造该名称的完整路径(path)。如果该路径是一个目录,函数会递归调用walk_dir,传入新的前缀(new_prefix)。如果该路径是一个文件且文件名以“.py”结尾,函数则将该文件的名称添加到结构列表中,前面加上新的前缀。 -**Code Description**: -The `walk_dir` function is designed to create a hierarchical representation of a directory structure, focusing specifically on Python files (`.py`). It takes two parameters: `root`, which specifies the starting point of the directory traversal, and `prefix`, which is used to visually represent the depth of directories and files in the structure. +该函数的设计使得它能够有效地收集指定目录下所有Python文件的结构信息,并以层级方式展示。 + +**Note**: 使用此代码时,请确保传入的根目录路径是有效的,并且具有读取权限。此外,函数会忽略所有以点开头的文件和目录,因此如果需要处理这些文件,需调整相关逻辑。 +*** +*** +### FunctionDef build_path_tree(self, who_reference_me, reference_who, doc_item_path) +**build_path_tree**: The function of build_path_tree is to construct a hierarchical representation of file paths based on two reference lists and a specific document item path. -Upon invocation, the function first appends the basename of the `root` directory to a global list named `structure`, prefixed by the current `prefix`. This action marks the beginning of a new level in the directory hierarchy. +**parameters**: The parameters of this Function. +· who_reference_me: A list of file paths that reference the current object. +· reference_who: A list of file paths that are referenced by the current object. +· doc_item_path: A specific file path that needs to be highlighted in the tree structure. -The `prefix` is then extended by two spaces, creating a new `new_prefix` that will be used for items within the `root` directory, indicating their nested level. +**Code Description**: The build_path_tree function creates a nested dictionary structure representing a tree of file paths. It utilizes the `defaultdict` from the `collections` module to facilitate the creation of this tree. The function begins by defining an inner function, `tree`, which initializes a new `defaultdict` that can recursively create nested dictionaries. -The function iterates over all items in the `root` directory, sorted alphabetically by their names. It ignores items that start with a dot (`.`), which are typically hidden files or directories in Unix-like systems. +The function then processes the two input lists, `who_reference_me` and `reference_who`. For each path in these lists, it splits the path into its components using the operating system's path separator (`os.sep`). It traverses the tree structure, creating a new node for each part of the path. -For each item, the function checks if it is a directory or a file: -- If the item is a directory (not hidden), `walk_dir` is called recursively with the path to the directory and the updated `new_prefix`. This recursion allows the function to traverse the directory structure depth-first. -- If the item is a file and it ends with `.py`, indicating it is a Python file, the file's name, prefixed by the `new_prefix`, is appended to the `structure` list. This inclusion of Python files in the structure list is selective, based on the file extension. +Next, the function processes the `doc_item_path`. It splits this path into components as well, but modifies the last component by prefixing it with a star symbol (✳️) to indicate that it is the item of interest. This modified path is then added to the tree in the same manner as the previous paths. -**Note**: -- The function relies on a global list named `structure` to accumulate the directory structure. Ensure this list is defined in the scope where `walk_dir` is called. -- The function is designed to ignore hidden files and directories, which may exclude relevant files if they are named accordingly. -- The `prefix` parameter is used internally to maintain the visual hierarchy of the directory structure. It is incremented with two spaces for each level of depth, but this can be adjusted if a different visual representation is desired. -- The function does not return a value; instead, it modifies the global `structure` list in place. Ensure to process or print the `structure` list after calling `walk_dir` to view the directory structure. +Finally, the function defines another inner function, `tree_to_string`, which converts the nested dictionary structure into a formatted string representation. This function recursively traverses the tree, indenting each level of the hierarchy for clarity. The resulting string is returned as the output of the build_path_tree function. + +**Note**: It is important to ensure that the paths provided in `who_reference_me` and `reference_who` are valid and correctly formatted. The function assumes that the paths are well-structured and uses the operating system's path separator for splitting. + +**Output Example**: +Given the following inputs: +- who_reference_me: ["folder1/fileA.txt", "folder1/folder2/fileB.txt"] +- reference_who: ["folder3/fileC.txt"] +- doc_item_path: "folder1/folder2/fileB.txt" + +The output of the function might look like this: +``` +folder1 + fileA.txt + folder2 + ✳️fileB.txt +folder3 + fileC.txt +``` +#### FunctionDef tree +**tree**: tree函数的功能是返回一个默认字典,该字典的默认值是一个新的tree函数。 + +**parameters**: 该函数没有参数。 + +**Code Description**: tree函数使用了Python的defaultdict类。defaultdict是collections模块中的一个字典子类,它提供了一个默认值,当访问的键不存在时,会自动调用一个指定的工厂函数来生成这个默认值。在这个函数中,tree函数本身被用作工厂函数,这意味着每当访问一个不存在的键时,defaultdict将会创建一个新的tree对象。这种递归的结构使得返回的字典可以用于构建树形结构,其中每个节点都可以有多个子节点,且子节点的数量和内容是动态生成的。 + +**Note**: 使用该函数时,请注意避免无限递归的情况。由于tree函数返回的是一个defaultdict,其默认值也是tree函数本身,因此在访问未定义的键时会不断创建新的defaultdict,可能导致内存消耗过大。 + +**Output Example**: 调用tree函数后,可能的返回值如下: +``` +defaultdict(, {}) +``` +此返回值表示一个空的defaultdict,且其默认值是tree函数本身。若访问一个不存在的键,例如`my_tree['a']`,则会创建一个新的defaultdict,作为'a'的值。 +*** +#### FunctionDef tree_to_string(tree, indent) +**tree_to_string**: tree_to_string的功能是将树形结构转换为字符串格式,便于可视化展示。 + +**parameters**: 此函数的参数如下: +· parameter1: tree - 一个字典类型的树形结构,其中键表示节点,值可以是子节点的字典或其他类型。 +· parameter2: indent - 一个整数,表示当前节点的缩进级别,默认为0。 + +**Code Description**: tree_to_string函数通过递归的方式将树形结构转换为字符串。首先,函数初始化一个空字符串s。然后,它对传入的tree字典进行排序,并遍历每一个键值对。在遍历过程中,函数将当前键(节点)添加到字符串s中,并根据indent参数添加相应数量的空格以实现缩进。如果当前值是一个字典,表示该节点有子节点,函数会递归调用tree_to_string,将子节点转换为字符串并添加到s中。最终,函数返回构建好的字符串s。 + +**Note**: 使用此函数时,请确保传入的tree参数是一个有效的字典结构,并且可以包含嵌套的字典。indent参数用于控制输出的格式,通常不需要手动设置,除非在特定情况下需要调整缩进。 + +**Output Example**: 假设输入的tree为如下结构: +{ + "根节点": { + "子节点1": {}, + "子节点2": { + "孙节点1": {} + } + } +} +调用tree_to_string(tree)将返回: +根节点 + 子节点1 + 子节点2 + 孙节点1 *** *** diff --git a/markdown_docs/repo_agent/runner.md b/markdown_docs/repo_agent/runner.md index 6b9758c..76ef10f 100644 --- a/markdown_docs/repo_agent/runner.md +++ b/markdown_docs/repo_agent/runner.md @@ -1,430 +1,162 @@ -## FunctionDef make_fake_files -**make_fake_files**: The function of make_fake_files is to prepare the repository for documentation generation by handling unstaged changes and untracked files according to their status in git. - -**parameters**: This function does not take any parameters. - -**Code Description**: The `make_fake_files` function plays a crucial role in the documentation generation process within a version-controlled project. Initially, it calls the `delete_fake_files` function to clean up any existing fake or temporary files, ensuring a clean state before proceeding. It then utilizes the GitPython library to interact with the repository, identifying unstaged changes and untracked files. - -For untracked files, particularly Python files, the function simply logs a message indicating that these files are being skipped. This is because untracked files are considered out of scope for the current documentation update process. - -The function pays special attention to unstaged changes, categorizing them into files that have been added, modified, or deleted without being staged for commit. It filters out files based on a naming convention (denoted by `latest_verison_substring`) to avoid processing temporary or fake files that are part of the documentation workflow itself. - -For modified or deleted files, the function performs a series of steps: -- Renames the original file by appending a specific substring to its name, effectively marking it as a "fake" version. This step is skipped if the file does not exist in the repository path, which would be the case for deleted files. -- Creates a new file with the original name and fills it with the content from the unstaged changes. This ensures that the documentation generation process works with the latest changes made to the file. - -The function maintains a mapping (`file_path_reflections`) between the original file paths and their corresponding "fake" versions. This mapping, along with a list of files that were skipped (`jump_files`), is returned at the end of the function's execution. - -**Note**: It is important to ensure that the repository path (`CONFIG["repo_path"]`) and the naming convention for temporary files (`latest_verison_substring`) are correctly configured before invoking this function. The function is designed to be called in scenarios where the documentation needs to be generated or updated based on the latest changes in the repository, including during the initial setup by the `Runner` class and as part of the document update process. - -**Output Example**: -The function returns a tuple containing two elements: -1. A dictionary mapping original file paths to their "fake" versions, e.g., `{'src/my_module.py': 'src/my_module_fake.py'}`. -2. A list of file paths that were skipped during the process, e.g., `['tests/test_my_module.py']`. -## FunctionDef delete_fake_files -**delete_fake_files**: The function of delete_fake_files is to delete all temporary or fake files generated during the task execution process. - -**parameters**: This function does not take any parameters. - -**Code Description**: The `delete_fake_files` function is designed to navigate through the file system starting from a specified root directory, identified by the `CONFIG["repo_path"]` configuration. It recursively searches for files that are considered "fake" or temporary, based on a specific naming convention indicated by the `latest_verison_substring`. This naming convention is used to identify files that have been temporarily created or modified as part of the project's workflow, particularly in the context of document generation and version control. - -The function operates by traversing all files and directories under the given root path. For each file encountered, it checks if the file name ends with the `latest_verison_substring`. If a file matches this criterion, the function performs the following actions: -- It attempts to revert the file to its original state by renaming it, removing the `latest_verison_substring` from its name, effectively restoring the original file extension (typically `.py` for Python files). -- If the file with the `latest_verison_substring` is found to be empty (size 0), it indicates that the file is a temporary placeholder and should be deleted. A message is printed to the console to inform the user of the deletion. -- If the file is not empty, it is considered a backup of the latest version of a modified file. The function then renames this file to its original name, effectively recovering the latest version of the file. A message is printed to the console to inform the user of this recovery action. - -The function is called in two specific contexts within the project: -1. **During the fake file creation process**: In the `make_fake_files` function, `delete_fake_files` is invoked at the beginning to clean up any existing fake or temporary files before proceeding with the creation of new fake files based on the current state of the repository. This ensures that the repository is in a clean state and prevents the accumulation of unnecessary temporary files. -2. **After document generation and update process**: In the `run` method of the `Runner` class, `delete_fake_files` is called at the end of the document generation and update process. This call serves as a cleanup step to remove any temporary files that were created during the document update process, ensuring that the repository remains clean and only contains necessary files. - -**Note**: It is crucial to ensure that the `CONFIG["repo_path"]` is correctly configured before invoking this function, as it relies on this configuration to determine the root directory from which to start the file cleanup process. Additionally, the `latest_verison_substring` must be consistently used across the project to mark temporary or fake files, as the function specifically targets files ending with this substring for deletion or recovery. -### FunctionDef gci(filepath) -**gci**: The function of gci is to recursively delete specific temporary files and recover the latest version of files in a given directory. - -**Parameters**: -- `filepath`: The path of the directory to be traversed for file operations. - -**Code Description**: -The `gci` function is designed to traverse all files within the specified `filepath`, including its subdirectories. It performs a recursive search to identify and process files based on specific criteria. - -1. The function starts by listing all items in the given `filepath` using `os.listdir(filepath)`. -2. It iterates through each item (`fi`) in the list. For each item, it constructs the full path (`fi_d`) by joining the `filepath` with the item name. -3. If the item is a directory (`os.path.isdir(fi_d)` returns `True`), the function calls itself recursively with the directory's path, allowing it to traverse subdirectories. -4. If the item is not a directory, the function checks if its name ends with a predefined substring (`latest_verison_substring`). This substring is used to identify specific temporary files or versions of files that need to be processed. -5. For files ending with this substring, the function performs the following operations: - - It generates the original file name (`origin_name`) by replacing the `latest_verison_substring` with `.py`, assuming the original files are Python files. - - It deletes the original file by calling `os.remove(origin_name)`. - - If the size of the file (`fi_d`) is 0 (indicating an empty file), it prints a message indicating the deletion of a temporary file and removes `fi_d`. - - If the file is not empty, it prints a message indicating the recovery of the latest version of the file and renames `fi_d` to `origin_name`, effectively replacing the original file with this latest version. - -**Note**: -- The function uses `os.listdir`, `os.path.join`, `os.path.isdir`, `os.remove`, and `os.rename` from the `os` module to manipulate files and directories, making it platform-independent. -- It assumes that the temporary files or specific versions of files to be deleted are identified by a unique substring (`latest_verison_substring`) at the end of their names. -- The function prints messages to the console using `print` with formatted strings that include color codes (`Fore.LIGHTRED_EX`, `Style.RESET_ALL`) for highlighting. These color codes require the `colorama` module. -- The messages include the relative path of the processed files, calculated by trimming `CONFIG['repo_path']` from the full path of the files. This requires the `CONFIG` dictionary to be predefined and contain the key `repo_path` with the appropriate value. -- Care should be taken when specifying `latest_verison_substring` and `CONFIG['repo_path']` to ensure they accurately reflect the intended files to be processed and the base repository path, respectively. -*** -## FunctionDef need_to_generate(doc_item, ignore_list) -**need_to_generate**: The function of `need_to_generate` is to determine whether documentation needs to be generated for a given documentation item, based on its status and type, while also considering a list of paths to ignore. - -**Parameters**: -- `doc_item`: An instance of `DocItem` representing the documentation item to be evaluated. -- `ignore_list`: A list of file paths that should be ignored during documentation generation. - -**Code Description**: -The `need_to_generate` function plays a critical role in the documentation generation process by filtering out items that do not require documentation. It first checks if the documentation item's status is up to date (`DocItemStatus.doc_up_to_date`), in which case it immediately returns `False`, indicating that no documentation needs to be generated for this item. - -Next, it retrieves the full path of the documentation item using the `get_full_name` method. This path is used to determine if the item or any of its parent directories is listed in the `ignore_list`. If the item's type is either a file, directory, or repository (`DocItemType._file`, `DocItemType._dir`, or `DocItemType._repo`), the function returns `False`, as the current implementation is designed to skip documentation generation for these types of items. - -The function then iterates through the item's ancestors by accessing each item's `father` attribute. If any ancestor is a file (`DocItemType._file`) and is not in the `ignore_list` (or not a subpath of any path in the `ignore_list`), the function returns `True`, indicating that documentation should be generated for this item. If no such condition is met, the function ultimately returns `False`. - -**Note**: -- This function is crucial for optimizing the documentation generation process by ensuring that only necessary documentation is generated. This prevents unnecessary processing for items that are either up to date or not intended to be documented (e.g., files and directories). -- The `ignore_list` parameter allows for flexibility in excluding specific paths from documentation generation, which can be useful for skipping third-party libraries or other non-essential parts of the codebase. -- It is important to maintain the `DocItem` hierarchy accurately, as the function relies on traversing this hierarchy to make decisions. - -**Output Example**: -Assuming a documentation item for a function within a file that is not in the `ignore_list` and whose documentation is not up to date, the function would return `True`, indicating that documentation should be generated for this item. Conversely, if the item were a file listed in the `ignore_list`, the function would return `False`. -## FunctionDef load_whitelist -**load_whitelist**: The function of `load_whitelist` is to load a whitelist from a specified JSON file path defined in the configuration. - -**Parameters**: This function does not take any parameters. - -**Code Description**: The `load_whitelist` function is designed to read a whitelist from a JSON file whose path is specified in a global configuration object (`CONFIG`). It first checks if the `whitelist_path` key in the `CONFIG` dictionary is not `None`. If the path is valid and the file exists, it asserts this condition to ensure the file's presence. An error message is raised if the file does not exist, guiding the user to ensure that the `whitelist_path` points to a valid JSON file. - -The function then opens and reads the JSON file, loading its contents into a Python dictionary using the `json.load` method. This dictionary, which represents the whitelist data, is then returned to the caller. - -If the `whitelist_path` in the `CONFIG` is `None`, indicating that no whitelist path was configured, the function returns `None`. This behavior allows for optional use of a whitelist in the broader application. - -In the context of its calling situation within the project, specifically in the `__init__` method of a `Runner` class, the `load_whitelist` function is used to load whitelist data right after initializing various components of the `Runner`. The loaded whitelist is then assigned to the `white_list` attribute of a `MetaInfo` instance. This indicates that the whitelist is a crucial part of the metadata management within the application, potentially used to filter or allow certain operations based on the items listed in the whitelist. After loading and assigning the whitelist, the metadata information, including the whitelist, is checkpointed (saved) to a specified directory path, ensuring that the whitelist is part of the persistent state of the application. - -**Note**: It is important to ensure that the `whitelist_path` in the `CONFIG` points to a valid JSON file. The file must exist at the specified path for the `load_whitelist` function to work correctly. The function assumes the file is in a readable JSON format and will raise an error if it cannot find the file or if the file's format is incorrect. - -**Output Example**: Assuming the JSON file contains a list of allowed project names, the function might return a dictionary like the following: -```python -{ - "allowed_projects": ["project1", "project2", "project3"] -} -``` ## ClassDef Runner Doc is waiting to be generated... ### FunctionDef __init__(self) Doc is waiting to be generated... *** ### FunctionDef get_all_pys(self, directory) -**get_all_pys**: The function of get_all_pys is to retrieve all Python files within a specified directory and its subdirectories. +**get_all_pys**: get_all_pys的功能是获取指定目录下的所有Python文件。 -**Parameters**: -- **directory (str)**: The directory path where the search for Python files (.py) will be conducted. +**parameters**: 该函数的参数。 +· directory: 需要搜索的目录,类型为字符串。 -**Code Description**: -The `get_all_pys` function is designed to search through a given directory and all of its subdirectories to find files that end with the `.py` extension, which are Python files. It utilizes the `os.walk` method to traverse the directory tree. The `os.walk` method yields a tuple containing the root directory path (`root`), a list of directories (`dirs`), and a list of files (`files`) for each iteration. +**Code Description**: get_all_pys函数用于遍历给定的目录,查找并返回该目录及其子目录下的所有Python文件的路径。函数首先初始化一个空列表python_files,用于存储找到的Python文件路径。接着,使用os.walk(directory)方法递归遍历指定目录。os.walk会返回一个生成器,生成器的每个元素都是一个三元组(root, dirs, files),其中root是当前遍历到的目录路径,dirs是该目录下的子目录列表,files是该目录下的文件列表。函数随后对每个文件进行检查,如果文件名以“.py”结尾,则将该文件的完整路径(通过os.path.join(root, file)构建)添加到python_files列表中。最后,函数返回包含所有找到的Python文件路径的列表。 -For each file in the list of files, the function checks if the file name ends with the `.py` extension using the `endswith` method. If the condition is met, the file is considered a Python file, and its path is constructed by joining the root path with the file name using `os.path.join`. This path is then appended to the `python_files` list. +**Note**: 使用该函数时,请确保传入的directory参数是一个有效的目录路径。此外,确保在调用该函数之前已导入os模块,以避免运行时错误。 -After traversing all directories and files, the function returns the `python_files` list, which contains the paths to all the Python files found within the specified directory and its subdirectories. +**Output Example**: 假设在指定目录下找到以下Python文件: +- /path/to/directory/script1.py +- /path/to/directory/subdirectory/script2.py -**Note**: -- The function assumes that the input `directory` is a valid directory path. If the directory does not exist or the path is invalid, `os.walk` may raise an error. -- This function does not search for Python files in hidden directories or files that start with a dot (.) on Unix-like systems, as `os.walk` includes them in its traversal. - -**Output Example**: -Assuming the directory `/projects/my_project` contains two Python files: `/projects/my_project/app.py` and `/projects/my_project/utils/util.py`, the function call `get_all_pys('/projects/my_project')` would return: -``` -['/projects/my_project/app.py', '/projects/my_project/utils/util.py'] +则该函数的返回值将是: +```python +[ + '/path/to/directory/script1.py', + '/path/to/directory/subdirectory/script2.py' +] ``` *** ### FunctionDef generate_doc_for_a_single_item(self, doc_item) -**generate_doc_for_a_single_item**: The function of `generate_doc_for_a_single_item` is to generate documentation for a single item within a software project's repository. - -**Parameters**: -- `doc_item`: An instance of `DocItem` representing the documentation item for which documentation is to be generated. - -**Code Description**: -The `generate_doc_for_a_single_item` function is a crucial component of the documentation generation process, designed to handle the documentation of individual items such as classes, functions, or methods within a software project. The function operates as follows: - -1. It begins by retrieving the relative file path of the `doc_item` to be documented. -2. It checks against a configurable ignore list to determine if the item should be skipped. This is useful for excluding certain files or directories from the documentation process. -3. If the item is not to be ignored, the function proceeds to generate documentation. It does this by first printing a message indicating the start of the documentation generation process for the item. -4. A `FileHandler` instance is created with the repository path and the relative file path of the item. This handler facilitates file operations such as reading and writing. -5. The function then invokes the `chat_engine.generate_doc` method, passing the `doc_item` and the `FileHandler` instance. This method is responsible for generating the actual documentation content based on the item's code and metadata. -6. The generated documentation content is appended to the `md_content` attribute of the `doc_item`, and its status is updated to indicate that the documentation is up to date. -7. Finally, a checkpoint is created to save the current state of the documentation process. This is useful for resuming the process in case of interruptions. - -The function also includes error handling to log and skip items that fail to generate documentation after multiple attempts. This ensures that the documentation process can continue even if certain items present challenges. - -**Note**: -- This function is part of a larger system designed to automate the generation of documentation for software projects. It interacts with other components such as the `chat_engine` for content generation and `FileHandler` for file operations. -- The ignore list and other configurations are crucial for controlling the scope of the documentation process, allowing developers to exclude non-essential items or directories. -- The function's reliance on the `chat_engine` and `FileHandler` means that changes to these components could affect its behavior. Therefore, it's important to maintain compatibility across these components. -- The checkpoint mechanism is vital for maintaining progress and ensuring that the documentation process can be efficiently resumed or rolled back as needed. +Doc is waiting to be generated... *** ### FunctionDef first_generate(self) -**first_generate**: The function of first_generate is to initiate the process of generating documentation for all objects within a project repository. - -**Parameters**: This function does not accept any parameters. - -**Code Description**: The `first_generate` function is a critical component of the documentation generation system, designed to kick-start the documentation process for a software project. It operates under the assumption that no prior documentation exists or that a complete regeneration of documentation is required. The function performs several key operations as outlined below: - -1. **Logging Start**: It logs the initiation of the documentation generation process, indicating that the process has started. - -2. **Configuration and Ignoring Specific Paths**: The function retrieves a list of paths to ignore during documentation generation from the configuration. This list is used to filter out objects that should not be documented, such as third-party libraries or specific files and directories designated by the user. +Doc is waiting to be generated... +*** +### FunctionDef markdown_refresh(self) +**markdown_refresh**: The function of markdown_refresh is to write the latest document information into a markdown format folder, regardless of whether the markdown content has changed. -3. **Task Management**: It utilizes a partial function, `check_task_available_func`, which wraps around the `need_to_generate` function with the ignore list as an argument. This setup is used to filter tasks that need documentation generated. A task manager object is then created, which organizes tasks based on the project's topology, ensuring that documentation is generated in an appropriate order that respects dependencies among objects. +**parameters**: The parameters of this Function. +· None -4. **Initialization and Task List Loading**: The function checks if the documentation generation process is already in progress. If not, it marks the process as started and logs the initialization of a new task list. If the process was previously started and interrupted, it logs that an existing task list is being loaded. +**Code Description**: The markdown_refresh function is responsible for generating and updating markdown documentation for the project. It begins by acquiring a lock to ensure thread safety during the execution of the function. The first step is to delete any existing content in the markdown folder specified by the project settings. This is achieved using the shutil.rmtree method, which removes the directory and all its contents, followed by the creation of a new markdown folder. -5. **Printing Task List**: The current task list is printed to provide an overview of the tasks that will be processed. +Next, the function retrieves a list of all file items from the documentation hierarchy using the get_all_files method from the MetaInfo class. It iterates through each file item, checking whether it contains any documentation content using a recursive helper function named recursive_check. This function inspects the DocItem objects to determine if they have any markdown content or if their children contain markdown content. -6. **Parallel Documentation Generation**: The function sets up a synchronization mechanism, `sync_func`, to refresh markdown documentation. It then creates and starts multiple threads, each responsible for generating documentation for a portion of the tasks. This parallel processing accelerates the documentation generation process. +If a file item does not contain any documentation, it is skipped. For file items that do contain documentation, the function constructs the markdown content using another helper function called to_markdown. This function generates the markdown representation of the DocItem and its children, formatting the output according to the hierarchical structure of the documentation. -7. **Completion Handling**: Upon successful completion of all tasks, the function updates the `document_version` to reflect the current repository state, marks the generation process as finished, and performs a checkpoint operation. This operation saves the current state of the documentation process, including generated documentation and metadata, to the filesystem. +Once the markdown content is generated, it is written to a .md file in the markdown folder. The file path is constructed by replacing the .py extension of the file item with .md. The function ensures that the necessary directories are created before writing the markdown content to the file. -8. **Error Handling**: If an error occurs during the documentation generation process, the function logs the error and the number of documents that were successfully generated before the error occurred. +Finally, the function logs an informational message indicating that the markdown documents have been refreshed successfully. -**Note**: It is crucial to ensure that the target repository's code does not change during the documentation generation process. This requirement is because the documentation generation process must be bound to a specific version of the codebase to ensure accuracy and consistency of the generated documentation. The function is designed to be robust, capable of resuming the documentation generation process from where it left off in case of interruptions, and it leverages multi-threading to efficiently handle the generation of documentation for large projects. -*** -### FunctionDef markdown_refresh(self) -**markdown_refresh**: The function of markdown_refresh is to update the project's documentation by writing the latest document information into a markdown format, regardless of whether the markdown content has changed. +The markdown_refresh function is called within the first_generate method and the run method of the Runner class. In first_generate, it is used to refresh the markdown documentation after generating all documents for the first time. In the run method, it is invoked after processing changes to ensure that the markdown documentation is up to date with the latest changes in the project. -**Parameters**: This function does not accept any parameters. +**Note**: When using this function, ensure that the project settings are correctly configured, and that the target repository is accessible. The function assumes that the markdown folder is specified in the project settings and that the necessary permissions are in place for file operations. -**Code Description**: The `markdown_refresh` function plays a crucial role in maintaining the project documentation up-to-date. It operates within a locked context to ensure thread safety, indicated by the use of `self.runner_lock`. The function begins by identifying the markdown folder path using configurations from `CONFIG["repo_path"]` and `CONFIG["Markdown_Docs_folder"]`. If the markdown folder exists, it is removed along with all its contents to ensure a fresh start, and then recreated. +**Output Example**: A possible output of the markdown_refresh function could be a markdown file structured as follows: -The function retrieves a list of all file items in the project using `self.meta_info.get_all_files()`. For each file item, it checks if there is any documentation content available. This is done through a recursive function `recursive_check`, which traverses the documentation tree of each file to find any non-empty markdown content (`md_content`). Files without documentation are skipped. +# Class Example +This is the documentation for the Example class. -For files with documentation, the function constructs the markdown content. It does this by iterating over the children of the file item and converting each documentation item into markdown format. This conversion is handled by another recursive function `to_markdown`, which formats the documentation content into markdown, including the object's type, name, parameters, and documentation text. The markdown content is then written to a corresponding markdown file within the markdown folder, with the file name derived from the original Python file name but with a `.md` extension. +## Method example_method +This method does something important. -Finally, the function logs a message indicating the completion of the markdown document refresh process. +### Parameters +- param1: Description of parameter 1. +- param2: Description of parameter 2. -**Note**: -- The function assumes the existence of a configuration dictionary `CONFIG` that contains paths and settings for the repository and documentation folders. -- It is designed to work within a multi-threaded environment, as indicated by the use of a lock (`self.runner_lock`). -- The function relies on the correct implementation of `self.meta_info.get_all_files()`, `recursive_check`, and `to_markdown` to function properly. -- Error handling for file operations (e.g., file writing failures) is not explicitly covered in the function's implementation. +*** -**Output Example**: There is no direct output from this function as it performs file operations. However, after its execution, the markdown documentation folder will be populated with updated markdown files corresponding to the project's documentation state. Each markdown file will contain structured documentation for a specific file in the project, formatted according to the logic defined in the `to_markdown` function. +This structure would be repeated for each documented item, providing a clear and organized representation of the project's documentation in markdown format. #### FunctionDef recursive_check(doc_item) -**recursive_check**: The function of recursive_check is to check if a documentation item or any of its children contains markdown content. +**recursive_check**: The function of recursive_check is to determine whether a given documentation item contains any Markdown content or if any of its child items contain Markdown content. -**Parameters**: -- `doc_item`: A `DocItem` object representing a documentation item within a software project's repository. +**parameters**: The parameters of this Function. +· doc_item: An instance of the DocItem class, representing the documentation item to be checked. -**Code Description**: -The `recursive_check` function is designed to determine whether a given documentation item (`DocItem`), such as a class, function, or variable, contains any markdown content in its `md_content` attribute or in any of its descendant items. This function plays a crucial role in identifying documentation items that are documented or need documentation updates within a software project's repository. +**Code Description**: The recursive_check function operates by first checking if the provided DocItem instance, referred to as doc_item, has any Markdown content stored in its md_content attribute. If this attribute is not empty (i.e., it contains one or more Markdown entries), the function immediately returns True, indicating that the documentation item has associated content. -The function starts by checking the `md_content` attribute of the `doc_item` parameter. If this attribute is not empty, it indicates that the current documentation item contains markdown content, and the function immediately returns `True`, signifying that markdown content is present. +If the md_content attribute is empty, the function proceeds to iterate through the children of the doc_item. The children are stored in the children attribute, which is a dictionary mapping child object names to their corresponding DocItem instances. For each child DocItem, the recursive_check function is called recursively. If any child returns True, indicating that it contains Markdown content, the parent function will also return True. -If the `md_content` attribute is empty, the function proceeds to recursively check each child of the current documentation item. This is achieved by iterating over the `children` attribute of `doc_item`, which is a dictionary where keys are the names of the child items and values are the `DocItem` objects representing those children. For each child, the function calls itself (`recursive_check`) with the child as the argument. +If neither the doc_item nor any of its children contain Markdown content, the function ultimately returns False. This recursive approach allows the function to traverse the entire hierarchy of documentation items, ensuring that all levels are checked for content. -If any child (or descendant thereof) is found to contain markdown content, the recursive call to `recursive_check` will return `True`, causing the parent call to also return `True`. This process continues up the call stack until the original caller receives the result. +The recursive_check function is closely related to the DocItem class, which encapsulates the metadata and relationships of documentation items within a project. The function leverages the hierarchical structure established by the DocItem instances to perform its checks effectively. -If neither the documentation item nor any of its descendants contain markdown content, the function returns `False`, indicating the absence of markdown documentation. +**Note**: It is important to ensure that the doc_item passed to the recursive_check function is a valid instance of the DocItem class, as the function relies on the attributes defined within this class to perform its checks accurately. -**Note**: -This function is essential for automating the process of identifying which parts of a software project's codebase are documented and which parts may require documentation updates. It leverages the hierarchical structure of documentation items represented by the `DocItem` class, allowing for efficient traversal and checking of documentation status across a project. - -**Output Example**: -- If a `DocItem` or any of its children has markdown content, the function returns `True`. -- If neither the `DocItem` nor any of its children has markdown content, the function returns `False`. +**Output Example**: If a DocItem instance has Markdown content, the function would return True. Conversely, if it and all its children lack Markdown content, the function would return False. For instance, if doc_item.md_content is an empty list and all children also have empty md_content, the output would be: +False *** #### FunctionDef to_markdown(item, now_level) -**to_markdown**: The function of `to_markdown` is to convert a documentation item and its children into a markdown formatted string. +**to_markdown**: The function of to_markdown is to generate a Markdown representation of a documentation item and its children. -**Parameters**: -- `item`: A `DocItem` instance representing the documentation item to be converted into markdown format. -- `now_level`: An integer indicating the current markdown header level for the item. +**parameters**: The parameters of this Function. +· item: An instance of DocItem, representing the documentation item to be converted to Markdown. +· now_level: An integer indicating the current level of the documentation item in the hierarchy, which affects the Markdown heading level. -**Code Description**: -The `to_markdown` function starts by initializing an empty string, `markdown_content`, which will be populated with the markdown representation of the documentation item passed as the `item` parameter. It constructs a markdown header using the `now_level` parameter to determine the number of `#` symbols to prepend, indicating the header level. The header also includes the type of the documentation item (converted to a string using the `to_str` method of the `DocItemType` enumeration) and the name of the item (`obj_name`). +**Code Description**: The to_markdown function constructs a Markdown string that represents a given documentation item (DocItem) and its hierarchical children. It begins by initializing an empty string called markdown_content. The function then appends a header to this string, which consists of a number of hash symbols corresponding to the now_level parameter, followed by the string representation of the item's type (obtained by calling the to_str method on item.item_type) and the object's name (item.obj_name). -If the documentation item contains parameters (indicated by the presence of a "params" key in the `item.content` dictionary and the existence of at least one parameter), these parameters are appended to the header in a comma-separated list enclosed in parentheses. +If the item contains parameters (checked by verifying the presence of "params" in item.content and ensuring it has a length greater than zero), these parameters are formatted and appended to the markdown_content string in parentheses. Following this, the function adds the last entry from item.md_content to the markdown_content, or a placeholder message if md_content is empty. -Following the header, the function appends the last entry of the `item.md_content` list to `markdown_content`, which represents the markdown content of the documentation item. If `item.md_content` is empty, a placeholder string "Doc is waiting to be generated..." is appended instead. +The function then iterates over the children of the current item (item.children), recursively calling to_markdown for each child with an incremented now_level. Each child's Markdown output is appended to the markdown_content, separated by a line of asterisks for clarity. -The function then recursively calls itself for each child of the documentation item, incrementing `now_level` by 1 for each level of recursion to ensure that child items are represented as subheaders in the markdown content. After processing each child, a separator line (`"***\n"`) is appended to `markdown_content`. +Finally, the complete markdown_content string is returned, providing a structured Markdown representation of the documentation item and its children. -Finally, the function returns the `markdown_content` string, which contains the markdown representation of the documentation item and its hierarchical structure. +This function relies on the DocItem class, which encapsulates the metadata and relationships of documentation items, and the DocItemType class, which provides the to_str method to convert item types into string representations. The to_markdown function is essential for generating readable documentation in Markdown format, facilitating better understanding and accessibility of the project's documentation structure. -**Note**: -- It is important to ensure that the `item` parameter is a valid `DocItem` instance with correctly populated attributes, as the function relies on these attributes to generate the markdown content. -- The `now_level` parameter should be appropriately set based on the desired starting header level for the markdown content. Typically, this would be 1 for top-level documentation items. +**Note**: When using this function, ensure that the DocItem instances are properly structured and that their content is accurately populated to avoid incomplete or misleading documentation output. -**Output Example**: -Assuming a `DocItem` instance representing a function with one parameter and no children, and `now_level` set to 1, the output might look like this: +**Output Example**: An example output of the to_markdown function for a DocItem representing a function might look like this: ``` -# FunctionDef example_function(param1) -Doc is waiting to be generated... +## FunctionDef my_function_name (param1, param2) +This function does something important... +*** +### FunctionDef my_child_function_name +This child function does something else... *** ``` *** *** ### FunctionDef git_commit(self, commit_message) -**git_commit**: The function of `git_commit` is to commit changes to a Git repository with a specified commit message. +**git_commit**: git_commit的功能是执行一个Git提交操作,使用指定的提交信息。 -**Parameters**: -- `commit_message`: A string that contains the message to be used for the Git commit. +**parameters**: 该函数的参数。 +· commit_message: 提交信息,用于描述本次提交的内容。 -**Code Description**: -The `git_commit` function is designed to automate the process of committing changes to a Git repository using a specified commit message. It achieves this by executing a Git command through the `subprocess.check_call` method. The specific command executed is `git commit --no-verify -m`, followed by the commit message provided by the `commit_message` parameter. +**Code Description**: git_commit函数用于在Git版本控制系统中执行提交操作。该函数接受一个参数commit_message,表示提交的描述信息。函数内部使用subprocess模块调用系统命令行,执行`git commit`命令。具体来说,使用`subprocess.check_call`方法来运行命令,命令的参数包括`--no-verify`选项,表示在提交时跳过钩子验证,和`-m`选项后跟提交信息。若在执行过程中发生错误,函数会捕获subprocess.CalledProcessError异常,并打印出错误信息,提示用户提交操作失败的原因。 -The `--no-verify` option is used to bypass any pre-commit hooks, allowing the commit to proceed without being halted by any checks that might be configured to run before a commit is allowed. This can be useful in scenarios where the developer is confident about the changes and wishes to expedite the commit process. - -In the event that the commit operation encounters an error, such as failing to execute the Git command due to issues like unstaged changes or a misconfigured Git environment, the function catches the `subprocess.CalledProcessError` exception. Upon catching this exception, it prints an error message to the console, indicating that an error occurred during the commit operation. The error message includes the exception's string representation to provide insight into the nature of the error encountered. - -**Note**: -- It is important to ensure that the working directory of the script executing this function is the root of the Git repository where the commit is intended to be made. This is because the Git command is executed without specifying a working directory, and it will default to the current working directory of the script. -- Users should be cautious when using the `--no-verify` option, as it bypasses all checks provided by pre-commit hooks, which might include important validations or tests that help maintain code quality and prevent potential issues. -- This function does not handle staging of changes. Therefore, all changes intended to be committed must be staged (added to the index) before calling this function. +**Note**: 使用该函数时,请确保已在正确的Git仓库目录下,并且有未提交的更改。同时,注意commit_message应为有效的字符串,以便清晰地描述提交内容。 *** ### FunctionDef run(self) Doc is waiting to be generated... *** ### FunctionDef add_new_item(self, file_handler, json_data) -**add_new_item**: The function of `add_new_item` is to add new projects to the JSON file and generate corresponding documentation. - -**Parameters**: -- `file_handler` (FileHandler): The file handler object for reading and writing files. -- `json_data` (dict): The JSON data storing the project structure information. - -**Code Description**: -The `add_new_item` function is designed to handle the addition of new projects by updating a JSON file with the project's structure and generating markdown documentation for it. Initially, an empty dictionary named `file_dict` is created to store information about the new project. - -The function iterates over all functions and classes within the file, as retrieved by the `file_handler.get_functions_and_classes` method, which parses the file's content to extract these elements. For each function or class, detailed information including its type, name, start and end lines, parent, and parameters is obtained using the `file_handler.get_obj_code_info` method. - -For each item, the function then generates documentation using the `self.chat_engine.generate_doc` method, which takes the code information and file handler as arguments. The generated markdown content is added to the `code_info` dictionary under the key `md_content`. - -The `file_dict` dictionary is updated with a new entry for each function or class, using its name as the key and the `code_info` dictionary as the value. This updated dictionary represents the structure of the new project. - -The function updates the `json_data` with the new project's structure by assigning the `file_dict` to the key corresponding to the file's path. The updated `json_data` is then written back to the JSON file, effectively adding the new project's structure to it. - -Finally, the function generates a markdown file for the new project. It converts the JSON data to markdown format using the `file_handler.convert_to_markdown_file` method and writes the markdown content to a file using the `file_handler.write_file` method. The markdown file is saved in a directory specified by the `CONFIG["Markdown_Docs_folder"]` configuration, with its name derived from the original file path by replacing the `.py` extension with `.md`. - -**Note**: -- The function assumes that the `file_handler` object is correctly initialized with the repository path (`repo_path`) and the file path (`file_path`). -- It is crucial that the JSON file (`self.project_manager.project_hierarchy`) and the markdown documentation folder (`CONFIG["Markdown_Docs_folder"]`) are correctly configured and accessible. -- The function logs the completion of adding the new project's structure to the JSON file and the generation of the markdown documentation, providing feedback on the operation's success. -- This function is part of a larger system for managing project documentation, and it interacts with other components such as the `FileHandler` for file operations and the `ChatEngine` for generating documentation content. +Doc is waiting to be generated... *** ### FunctionDef process_file_changes(self, repo_path, file_path, is_new_file) -**process_file_changes**: The function of `process_file_changes` is to process changed files in a repository, handling both new and existing files, and updating documentation and project structure accordingly. - -**Parameters**: -- `repo_path` (str): The path to the repository. -- `file_path` (str): The relative path to the file. -- `is_new_file` (bool): Indicates whether the file is new or not. - -**Code Description**: -The `process_file_changes` function is a comprehensive method designed to handle changes detected in files within a repository. It operates by first initializing a `FileHandler` instance with the repository and file paths. This handler is then used to read the file's source code and to detect changes through a series of steps involving the `change_detector` object. - -The function retrieves the differences in the file content, whether it is a new file or an existing one, by calling `change_detector.get_file_diff`. It then parses these differences to identify added or removed lines using `change_detector.parse_diffs`. Subsequently, it identifies structural changes (e.g., additions or removals of functions and classes) within the file by comparing the parsed differences against the file's structure obtained from the `FileHandler`. - -Upon detecting changes, the function updates the project's JSON structure file if the file path exists within it. This involves either updating existing items in the JSON file with `update_existing_item` or adding a new item with `add_new_item` if the file is not already listed. These updates are crucial for maintaining an accurate representation of the project's structure. - -For documentation purposes, the function generates markdown content reflecting the current state of the file and writes this content to a markdown file. This step is essential for keeping the project's documentation in sync with its codebase. - -Finally, the function stages any updated markdown files for commit by adding them to the Git staging area using `change_detector.add_unstaged_files`. This ensures that documentation changes are ready to be committed to the repository. - -**Note**: -- It is critical to ensure that the repository path (`repo_path`) and file path (`file_path`) are correctly set and point to valid locations within the project. -- The function relies on the accurate detection and parsing of file changes. Therefore, the underlying methods and tools (e.g., Git) must be correctly configured and operational. -- The function's ability to update project documentation and structure dynamically makes it a key component in maintaining project integrity and consistency, especially in projects with frequent changes. -- The interaction with `FileHandler`, `change_detector`, and other components highlights the function's role in a larger system designed for automated project management and documentation. +Doc is waiting to be generated... *** ### FunctionDef update_existing_item(self, file_dict, file_handler, changes_in_pyfile) -**update_existing_item**: The function of `update_existing_item` is to update the documentation and structure information of existing objects in a project based on recent changes. - -**Parameters**: -- `file_dict`: A dictionary containing file structure information. -- `file_handler`: The file handler object, which provides methods for file operations. -- `changes_in_pyfile`: A dictionary containing information about the objects that have changed in the file. - -**Code Description**: -The `update_existing_item` function is designed to handle updates to existing items within a project's documentation and structure information. This process involves several key steps: - -1. **Identifying New and Deleted Objects**: The function starts by identifying new and deleted objects in the file using the `get_new_objects` method. This method compares the current and previous versions of the file to determine which objects have been added or removed. - -2. **Handling Deleted Objects**: For each object identified as deleted, the function removes the corresponding entry from the `file_dict`, effectively updating the project's structure information to reflect these deletions. - -3. **Generating File Structure Information**: The function generates the current file structure information by calling the `generate_file_structure` method of the `file_handler`. This step is crucial for understanding the current state of the file, including all existing objects and their details. - -4. **Updating Global File Structure Information**: The function updates the global file structure information stored in `file_dict` with the newly generated file structure information. This includes updating details such as the object type, code start and end lines, parent object, and name column for each object. - -5. **Handling Added Objects**: For objects identified as added, the function retrieves a list of referencers for each object. This is done by finding all objects that reference the newly added objects, which is essential for generating accurate documentation. - -6. **Concurrent Documentation Generation**: Using a thread pool executor, the function concurrently generates documentation for each added object by calling the `update_object` method. This method updates the corresponding field information and generates documentation content for each object. - -7. **Returning Updated File Structure Information**: Finally, the function returns the updated `file_dict`, which now contains the latest structure information and documentation for the file. - -**Note**: -- The function relies on accurate and up-to-date information provided by the `file_dict` and `changes_in_pyfile` parameters. It is crucial that these parameters accurately reflect the current state of the file and the changes that have occurred. -- The function uses concurrent execution to speed up the documentation generation process for added objects. It is important to ensure that the system running this code can handle the concurrency level specified. - -**Output Example**: -An updated `file_dict` might look like this after the function execution: -```python -{ - "function_name": { - "type": "function", - "code_start_line": 10, - "code_end_line": 20, - "parent": "class_name", - "name_column": 5 - }, - "class_name": { - "type": "class", - "code_start_line": 5, - "code_end_line": 25, - "parent": None, - "name_column": 1 - } -} -``` -This example shows the updated structure information for a function and a class within the file, including their types, start and end lines, parent objects, and name column positions. +Doc is waiting to be generated... *** ### FunctionDef update_object(self, file_dict, file_handler, obj_name, obj_referencer_list) -**update_object**: The function of `update_object` is to generate documentation content and update corresponding field information of an object. - -**Parameters**: -- `file_dict`: A dictionary containing old object information. -- `file_handler`: The file handler, which is an object that provides methods for file operations and access to repository paths. -- `obj_name`: The name of the object as a string. -- `obj_referencer_list`: A list of object referencers, which are objects that reference the current object. - -**Code Description**: -The `update_object` function plays a crucial role in maintaining and updating the documentation of objects within a software project. It is designed to work with a dictionary that holds information about objects (`file_dict`), a file handler for file operations, the name of the object to be updated (`obj_name`), and a list of objects that reference the current object (`obj_referencer_list`). - -Upon invocation, the function first checks if the object name exists within the `file_dict`. If it does, the function proceeds to retrieve the object's current information. It then calls the `generate_doc` method of the `chat_engine` object, passing the object, the file handler, and the list of object referencers as arguments. The `generate_doc` method is responsible for generating the documentation content based on the object's information and its relationship with other objects in the project. - -The response from `generate_doc` contains the generated documentation content, which is then used to update the `md_content` field of the object within `file_dict`. This process ensures that the documentation for the object is up-to-date, reflecting any changes or updates made to the object or its relationships within the project. - -**Note**: -The effectiveness of the `update_object` function relies heavily on the accuracy and completeness of the `file_dict`, the capabilities of the `file_handler`, and the correct identification of `obj_referencer_list`. It is essential that the `file_dict` accurately represents the current state of objects within the project, and that the `obj_referencer_list` correctly identifies all objects that reference the current object. This ensures that the documentation generated is comprehensive and accurately reflects the object's usage and relationships within the project. +Doc is waiting to be generated... *** ### FunctionDef get_new_objects(self, file_handler) -**get_new_objects**: The function of get_new_objects is to identify newly added and deleted objects in a Python file by comparing its current and previous versions. +**get_new_objects**: The function of get_new_objects is to identify and return the newly added and deleted objects by comparing the current and previous versions of a Python file. -**Parameters**: -- **file_handler (FileHandler)**: The file handler object used for accessing and manipulating file data. +**parameters**: The parameters of this Function. +· file_handler: An instance of the FileHandler class, responsible for managing file operations and retrieving file versions. -**Code Description**: -The `get_new_objects` function plays a crucial role in tracking changes within Python files, specifically focusing on the addition and deletion of objects such as functions and classes. It operates by leveraging two key functionalities provided by the `file_handler` object: `get_modified_file_versions` and `get_functions_and_classes`. +**Code Description**: The get_new_objects function is designed to analyze the differences between the current and previous versions of a Python file. It utilizes the file_handler parameter to access the modified file versions and extract the functions and classes defined in both versions. -Initially, the function retrieves the current and previous versions of the file content through `file_handler.get_modified_file_versions()`. This step is essential for identifying any modifications between these two versions. +The function begins by calling the method get_modified_file_versions on the file_handler object, which returns the current and previous versions of the file. It then retrieves the functions and classes from both versions using the get_functions_and_classes method. If there is no previous version, it initializes parse_previous_py as an empty list. -Subsequently, it parses both versions of the file to extract functions and classes using `file_handler.get_functions_and_classes()`. This parsing process results in lists of current and previous objects, where each object is represented by its name. +Next, the function constructs two sets: current_obj and previous_obj, which contain the names of the objects (functions and classes) from the current and previous versions, respectively. By performing set operations, it calculates the newly added objects (new_obj) and the deleted objects (del_obj). The function returns these two lists as a tuple. -The core of the function lies in comparing these lists to identify new and deleted objects. It accomplishes this by converting the lists into sets and performing set operations. Specifically, it calculates the difference between the current objects set and the previous objects set to identify newly added objects (`new_obj`). Conversely, it identifies deleted objects (`del_obj`) by calculating the difference in the opposite direction. +This function is called by the update_existing_item method within the same class. The update_existing_item method is responsible for updating the file structure information based on changes detected in the Python file. It utilizes the output of get_new_objects to determine which objects have been added or deleted, allowing it to update the file_dict accordingly. Specifically, it removes any deleted objects from the file_dict and updates the information of existing objects based on the current version of the file. -Finally, the function returns a tuple containing two lists: one for newly added objects and another for deleted objects. This output is crucial for further processing, such as updating documentation or refactoring code based on the changes detected. +**Note**: It is important to ensure that the file_handler object passed to this function is properly initialized and contains the necessary methods for retrieving file versions and parsing the file content. -**Note**: -- It is imperative that the `file_handler` object is correctly initialized with the path to the repository and the specific file to be analyzed. This ensures accurate retrieval of file versions and subsequent parsing. -- The function assumes that the file content is valid Python code and that the repository's history is accessible for retrieving the previous version of the file. - -**Output Example**: -```python -(['add_context_stack', '__init__'], []) -``` -This example output indicates that the functions `add_context_stack` and `__init__` were added to the Python file, with no objects being deleted. +**Output Example**: A possible return value of the function could be: +new_obj: ['add_context_stack', '__init__'] +del_obj: [] *** diff --git a/markdown_docs/repo_agent/settings.md b/markdown_docs/repo_agent/settings.md new file mode 100644 index 0000000..7ea9408 --- /dev/null +++ b/markdown_docs/repo_agent/settings.md @@ -0,0 +1,295 @@ +## ClassDef LogLevel +**LogLevel**: LogLevel 的功能是定义日志级别的枚举类型。 + +**attributes**: 该类的属性包括: +· DEBUG: 表示调试信息的日志级别。 +· INFO: 表示一般信息的日志级别。 +· WARNING: 表示警告信息的日志级别。 +· ERROR: 表示错误信息的日志级别。 +· CRITICAL: 表示严重错误信息的日志级别。 + +**Code Description**: LogLevel 类继承自 StrEnum,定义了一组常量,用于表示不同的日志级别。这些日志级别包括 DEBUG、INFO、WARNING、ERROR 和 CRITICAL,分别对应不同的日志记录重要性。使用枚举类型的好处在于,它提供了一种清晰且类型安全的方式来处理日志级别,避免了使用字符串常量可能带来的错误。 + +在项目中,LogLevel 类被 ProjectSettings 类引用,作为 log_level 属性的类型。ProjectSettings 类是一个配置类,负责管理项目的设置,其中 log_level 属性默认设置为 LogLevel.INFO。这意味着在没有特别指定的情况下,项目的日志级别将为信息级别。 + +此外,ProjectSettings 类中的 set_log_level 方法用于验证和设置日志级别。该方法会将输入的字符串转换为大写,并检查其是否为有效的日志级别。如果输入的值不在 LogLevel 的定义范围内,将会抛出一个 ValueError 异常。这确保了在项目中使用的日志级别始终是有效且一致的。 + +**Note**: 使用 LogLevel 时,请确保所使用的日志级别是预定义的常量之一,以避免运行时错误。在设置日志级别时,建议使用大写字母输入,以符合枚举的定义。 +## ClassDef ProjectSettings +**ProjectSettings**: The function of ProjectSettings is to manage the configuration settings for the project. + +**attributes**: The attributes of this Class. +· target_repo: DirectoryPath - Specifies the target repository directory path. +· hierarchy_name: str - Defines the name of the hierarchy for project documentation. +· markdown_docs_name: str - Indicates the name of the directory where markdown documentation is stored. +· ignore_list: list[str] - A list of items to be ignored in the project settings. +· language: str - Specifies the language used in the project, defaulting to "Chinese". +· max_thread_count: PositiveInt - Sets the maximum number of threads allowed, defaulting to 4. +· log_level: LogLevel - Defines the logging level for the project, defaulting to LogLevel.INFO. + +**Code Description**: The ProjectSettings class inherits from BaseSettings and serves as a configuration class that encapsulates various settings required for the project. It includes attributes that define the target repository, documentation hierarchy, language preferences, and logging configurations. + +The class utilizes field validators to ensure that the values assigned to certain attributes are valid. For instance, the `validate_language_code` method checks if the provided language code corresponds to a valid ISO 639 code or language name, raising a ValueError if the input is invalid. This ensures that only recognized language codes are accepted, enhancing the robustness of the configuration. + +Similarly, the `set_log_level` method validates the log level input, converting it to uppercase and checking its validity against the predefined LogLevel enumeration. If the input does not match any of the defined log levels, a ValueError is raised, ensuring that the logging configuration remains consistent and valid throughout the project. + +The ProjectSettings class is referenced by the Setting class, which aggregates various settings for the project, including ProjectSettings and ChatCompletionSettings. This hierarchical structure allows for organized management of project configurations, where ProjectSettings plays a crucial role in defining the core settings that govern the behavior of the application. + +**Note**: When using the ProjectSettings class, ensure that the values assigned to attributes like language and log_level are valid to avoid runtime errors. It is recommended to use the predefined constants for log levels and valid ISO codes for languages to maintain consistency and reliability in the project's configuration. + +**Output Example**: An instance of ProjectSettings might look like this: +``` +ProjectSettings( + target_repo="/path/to/repo", + hierarchy_name=".project_doc_record", + markdown_docs_name="markdown_docs", + ignore_list=["temp", "cache"], + language="English", + max_thread_count=4, + log_level=LogLevel.INFO +) +``` +### FunctionDef validate_language_code(cls, v) +**validate_language_code**: validate_language_code的功能是验证并返回有效的语言名称。 + +**parameters**: 该函数的参数。 +· v: 字符串类型,表示待验证的语言代码或语言名称。 + +**Code Description**: validate_language_code是一个类方法,用于验证输入的语言代码或语言名称是否有效。该方法接受一个字符串参数v,表示用户输入的语言代码或名称。函数内部使用Language.match(v)来尝试匹配输入的语言。如果匹配成功,将返回对应的语言名称。如果输入的语言代码或名称无效,则会引发LanguageNotFoundError异常,进而抛出一个ValueError,提示用户输入有效的ISO 639代码或语言名称。 + +该函数的主要目的是确保用户输入的语言信息是有效的,并提供相应的反馈,以便用户能够纠正输入错误。 + +**Note**: 使用该函数时,请确保传入的参数是字符串类型,并且符合ISO 639标准或已知的语言名称。若输入无效,函数将抛出异常,需在调用时做好异常处理。 + +**Output Example**: 假设输入参数为"en",函数将返回"English"。如果输入参数为"invalid_code",则将抛出ValueError,提示"Invalid language input. Please enter a valid ISO 639 code or language name." +*** +### FunctionDef set_log_level(cls, v) +**set_log_level**: The function of set_log_level is to validate and set the logging level for the application. + +**parameters**: The parameters of this Function. +· cls: This parameter refers to the class itself, allowing the method to be called on the class rather than an instance. +· v: A string that represents the desired logging level to be set. + +**Code Description**: The set_log_level function is a class method designed to validate and convert a provided string input into a corresponding LogLevel enumeration value. The function first checks if the input value v is of type string. If it is, the function converts the string to uppercase to ensure consistency with the predefined log level constants. + +Next, the function checks if the uppercase version of v exists within the members of the LogLevel enumeration, specifically by referencing LogLevel._value2member_map_. This mapping allows the function to verify if the provided value corresponds to one of the valid log levels defined in the LogLevel class, which includes DEBUG, INFO, WARNING, ERROR, and CRITICAL. + +If the value is valid, the function returns the corresponding LogLevel enumeration member. However, if the value does not match any of the predefined log levels, the function raises a ValueError, indicating that the provided log level is invalid. This mechanism ensures that only valid log levels are accepted, maintaining the integrity of the logging configuration within the application. + +The set_log_level function is closely related to the LogLevel class, which defines the valid logging levels as an enumeration. This relationship is crucial as it ensures that the logging level set by the ProjectSettings class is always one of the predefined constants, thus preventing runtime errors associated with invalid log levels. + +**Note**: When using the set_log_level function, it is important to provide the log level as a string in uppercase to match the enumeration definitions. This practice helps avoid errors and ensures that the logging configuration is set correctly. + +**Output Example**: If the input value is "info", the function will convert it to "INFO" and return LogLevel.INFO. If the input value is "verbose", the function will raise a ValueError with the message "Invalid log level: VERBOSE". +*** +## ClassDef MaxInputTokens +**MaxInputTokens**: The function of MaxInputTokens is to define and manage the token limits for various AI models. + +**attributes**: The attributes of this Class. +· gpt_4o_mini: int - Represents the token limit for the "gpt-4o-mini" model, defaulting to 128,000 tokens. +· gpt_4o: int - Represents the token limit for the "gpt-4o" model, defaulting to 128,000 tokens. +· o1_preview: int - Represents the token limit for the "o1-preview" model, defaulting to 128,000 tokens. +· o1_mini: int - Represents the token limit for the "o1-mini" model, defaulting to 128,000 tokens. + +**Code Description**: The MaxInputTokens class is a subclass of BaseModel, which is likely part of a data validation library such as Pydantic. This class is designed to encapsulate the configuration of token limits for different AI models. Each model has a predefined token limit set to 128,000 tokens. The class utilizes the `Field` function to define these attributes, allowing for the specification of aliases that can be used to refer to these fields in a more user-friendly manner. + +The class includes two class methods: `get_valid_models` and `get_token_limit`. The `get_valid_models` method returns a list of valid model names by iterating over the model fields and extracting their aliases. This is useful for validating model names against a known set of options. The `get_token_limit` method takes a model name as an argument, creates an instance of the MaxInputTokens class, and retrieves the corresponding token limit by accessing the attribute that matches the model name (with hyphens replaced by underscores). + +The MaxInputTokens class is utilized by other components in the project, specifically in the ChatCompletionSettings class. The `validate_model` method in ChatCompletionSettings calls `MaxInputTokens.get_valid_models()` to ensure that the provided model name is valid. If the model name is not found in the list of valid models, a ValueError is raised, ensuring that only acceptable model names are processed. + +Additionally, the `get_token_limit` method in ChatCompletionSettings leverages `MaxInputTokens.get_token_limit(self.model)` to retrieve the token limit for the model specified in the settings. This integration ensures that the token limits are consistently applied and validated across the application. + +**Note**: It is important to ensure that the model names used in the application match the aliases defined in the MaxInputTokens class to avoid validation errors. + +**Output Example**: For a valid model name "gpt-4o", calling `MaxInputTokens.get_token_limit("gpt-4o")` would return 128000, indicating the token limit for that model. +### FunctionDef get_valid_models(cls) +**get_valid_models**: get_valid_models的功能是返回所有有效模型的名称或别名列表。 + +**parameters**: 此函数没有参数。 + +**Code Description**: get_valid_models是一个类方法,主要用于获取与模型相关的所有字段的别名或名称。它通过访问类的model_fields属性,遍历其中的每一个字段,提取出字段的别名(如果存在)或字段的名称。返回的结果是一个字符串列表,包含了所有有效模型的名称或别名。 + +在项目中,get_valid_models函数被ChatCompletionSettings类的validate_model方法调用。validate_model方法的作用是验证传入的模型名称是否在有效模型列表中。如果传入的模型名称不在由get_valid_models返回的有效模型列表中,validate_model将抛出一个ValueError异常,提示用户输入的模型无效,并列出所有有效模型。这种设计确保了只有有效的模型名称才能被使用,从而提高了代码的健壮性和可维护性。 + +**Note**: 使用此代码时,请确保model_fields属性已正确定义并包含所需的字段信息,以避免运行时错误。 + +**Output Example**: 假设model_fields包含以下字段: +- name: "gpt-3.5-turbo", alias: "gpt-3.5" +- name: "gpt-4", alias: None + +那么get_valid_models的返回值将是: +["gpt-3.5", "gpt-4"] +*** +### FunctionDef get_token_limit(cls, model_name) +**get_token_limit**: get_token_limit的功能是根据给定的模型名称返回相应的令牌限制值。 + +**parameters**: 该函数的参数。 +· model_name: 字符串类型,表示模型的名称。 + +**Code Description**: get_token_limit是一个类方法,接受一个字符串参数model_name。该方法首先创建当前类的一个实例,然后通过将model_name中的短横线(-)替换为下划线(_)来获取相应的属性值。最终,它返回该属性的值,该值通常代表与指定模型相关的令牌限制。此方法的设计使得可以灵活地根据不同的模型名称动态获取其对应的令牌限制。 + +**Note**: 使用该代码时,请确保model_name参数对应的属性在类中是存在的,否则将引发AttributeError。确保传入的模型名称格式正确,以避免不必要的错误。 + +**Output Example**: 假设调用get_token_limit("gpt-3"),如果gpt-3对应的属性值为4096,则返回值将是4096。 +*** +## ClassDef ChatCompletionSettings +**ChatCompletionSettings**: The function of ChatCompletionSettings is to manage and validate settings related to chat completion models used in the application. + +**attributes**: The attributes of this Class. +· model: str - The model to be used for chat completion, defaulting to "gpt-4o-mini". +· temperature: PositiveFloat - A float value that influences the randomness of the model's output, defaulting to 0.2. +· request_timeout: PositiveFloat - The timeout duration for requests, defaulting to 5 seconds. +· openai_base_url: str - The base URL for the OpenAI API, defaulting to "https://api.openai.com/v1". +· openai_api_key: SecretStr - The API key required for authentication with the OpenAI service, marked to be excluded from certain outputs. + +**Code Description**: The ChatCompletionSettings class inherits from BaseSettings and is designed to encapsulate the configuration settings necessary for interacting with OpenAI's chat completion models. It includes attributes for specifying the model type, temperature, request timeout, base URL, and API key. The class employs field validators to ensure that the provided values for the model and base URL conform to expected formats and constraints. + +The `convert_base_url_to_str` method is a class method that converts the base URL into a string format before validation, ensuring that the URL is correctly formatted. The `validate_model` method checks if the specified model is valid by comparing it against a list of acceptable models obtained from the MaxInputTokens class. If the model is invalid, it raises a ValueError with a descriptive message. + +Additionally, the class includes a method `get_token_limit`, which retrieves the token limit based on the specified model. This method interacts with the MaxInputTokens class to determine the appropriate limit for the current model setting. + +In the context of the project, the ChatCompletionSettings class is instantiated within the Setting class, where it is used to define the chat completion settings for the application. This relationship indicates that any instance of Setting will have a corresponding ChatCompletionSettings object, allowing for structured management of chat-related configurations. + +**Note**: It is important to ensure that the model specified is valid and that the API key is securely managed, as it is critical for authenticating requests to the OpenAI service. + +**Output Example**: An example of the output when retrieving the token limit for a valid model might look like this: +``` +{ + "model": "gpt-4o-mini", + "token_limit": 4096 +} +``` +### FunctionDef convert_base_url_to_str(cls, openai_base_url) +**convert_base_url_to_str**: convert_base_url_to_str 的功能是将给定的 openai_base_url 转换为字符串格式。 + +**parameters**: 此函数的参数。 +· openai_base_url: 类型为 HttpUrl 的参数,表示 OpenAI 的基础 URL。 + +**Code Description**: convert_base_url_to_str 是一个类方法,接受一个 HttpUrl 类型的参数 openai_base_url,并将其转换为字符串。该方法使用 Python 的内置 str() 函数来实现转换。HttpUrl 是一个类型提示,通常用于确保传入的 URL 是有效的格式。此方法的主要用途是在需要将 URL 作为字符串处理时,确保类型的一致性和正确性。 + +**Note**: 使用此代码时,请确保传入的 openai_base_url 是有效的 HttpUrl 类型,以避免类型错误或异常。 + +**Output Example**: 假设传入的 openai_base_url 为 "https://api.openai.com/v1/", 则该函数的返回值将是 "https://api.openai.com/v1/"。 +*** +### FunctionDef validate_model(cls, value) +**validate_model**: The function of validate_model is to ensure that a given model name is valid by checking it against a list of predefined valid models. + +**parameters**: +· value: str - A string representing the model name to be validated. + +**Code Description**: +The `validate_model` method is a class method that verifies if a given model name is part of the set of valid model names. This function accepts a single parameter, `value`, which is expected to be a string representing the model name. + +1. **Validation Process**: + The function calls the `get_valid_models` method from the `MaxInputTokens` class. This method returns a list of valid model names, which includes the aliases of the models defined in the `MaxInputTokens` class. + +2. **Comparison**: + The provided `value` (the model name to be validated) is then checked to see if it exists within the list of valid models. If the model name is not found, the function raises a `ValueError`, indicating that the provided model is invalid and listing the valid options. + +3. **Return**: + If the model name is valid (i.e., it exists in the list of valid models), the function returns the same model name (`value`). + +The `validate_model` function is used primarily to ensure that only models which are defined as valid in the system are accepted for further processing. By calling the `MaxInputTokens.get_valid_models()` method, the function directly leverages the list of predefined models to perform this check. + +**Note**: +- It is important to ensure that the `MaxInputTokens.get_valid_models()` method correctly returns the list of valid model names, including any aliases or variations. If the model name provided to `validate_model` does not match a valid entry, a `ValueError` will be raised, which could interrupt the workflow. +- This function expects the model names to be exactly as defined in the valid models list, and does not perform any automatic corrections or formatting on the input value. + +**Output Example**: +For a valid input model name "gpt-4o", assuming this model is present in the valid models list returned by `MaxInputTokens.get_valid_models()`, the function would simply return "gpt-4o". + +In the case of an invalid model name like "gpt-5", the function would raise an exception: +``` +ValueError: Invalid model 'gpt-5'. Must be one of ['gpt-4o', 'gpt-4o-mini', 'o1-preview', 'o1-mini']. +``` +*** +### FunctionDef get_token_limit(self) +**get_token_limit**: The function of get_token_limit is to retrieve the token limit associated with a specified AI model. + +**parameters**: +· None. + +**Code Description**: +The `get_token_limit` function is a method defined within the `ChatCompletionSettings` class. It is responsible for retrieving the token limit corresponding to the model specified in the instance's `model` attribute. + +The function works by calling the `get_token_limit` method of the `MaxInputTokens` class, which is designed to return the token limit for a given AI model. The method passes the value of `self.model` (which represents the model name) to `MaxInputTokens.get_token_limit()`. The `get_token_limit` method in `MaxInputTokens` is a class method that accepts a model name as a string and returns the token limit for that model. It does this by accessing the appropriate attribute in the `MaxInputTokens` class, which corresponds to the given model name (with hyphens replaced by underscores). + +The relationship with other components in the project is as follows: +1. The `ChatCompletionSettings` class utilizes the `get_token_limit` method to dynamically fetch the token limit for the model specified in its settings. +2. The method relies on the `MaxInputTokens` class, which encapsulates predefined token limits for different models. This connection ensures that the `get_token_limit` function in `ChatCompletionSettings` accurately reflects the correct token limit based on the specified model. +3. In the `MaxInputTokens` class, the `get_token_limit` method is a class method that matches model names with their corresponding attributes and retrieves the token limit (defaulting to 128,000 tokens for each model). + +**Note**: +It is important to ensure that the model name specified in `self.model` matches one of the valid model names defined in the `MaxInputTokens` class, such as "gpt-4o" or "o1-mini", to avoid errors. If an invalid model name is provided, the method will raise an exception when attempting to fetch the token limit. + +**Output Example**: +If the `model` attribute of the `ChatCompletionSettings` instance is set to `"gpt-4o"`, calling `get_token_limit()` will return `128000`, which is the token limit for the "gpt-4o" model as defined in the `MaxInputTokens` class. +*** +## ClassDef Setting +**Setting**: The function of Setting is to aggregate and manage configuration settings for the project, including project-specific and chat completion settings. + +**attributes**: The attributes of this Class. +· project: ProjectSettings - An instance that holds the configuration settings related to the project, including repository paths, documentation hierarchy, language preferences, and logging configurations. +· chat_completion: ChatCompletionSettings - An instance that manages settings related to chat completion models, including model type, temperature, request timeout, and API key. + +**Code Description**: The Setting class inherits from BaseSettings and serves as a central configuration class that encapsulates various settings required for the project. It contains two primary attributes: `project`, which is an instance of the ProjectSettings class, and `chat_completion`, which is an instance of the ChatCompletionSettings class. + +The ProjectSettings class is responsible for managing the configuration settings specific to the project, such as the target repository directory path, hierarchy name for documentation, language preferences, maximum thread count, and logging level. It ensures that the values assigned to these attributes are valid through field validators, enhancing the robustness of the configuration. + +The ChatCompletionSettings class, on the other hand, manages settings related to chat completion models used in the application. It includes attributes for specifying the model type, temperature, request timeout, base URL for the OpenAI API, and the API key required for authentication. This class also employs field validators to ensure that the provided values conform to expected formats and constraints. + +The Setting class is referenced by the SettingsManager class, which is responsible for managing the instantiation of the Setting object. The SettingsManager maintains a private class attribute `_setting_instance` that holds the instance of the Setting class. The `get_setting` class method checks if the `_setting_instance` has been initialized; if not, it creates a new instance of Setting. This design pattern ensures that there is a single instance of the Setting class throughout the application, promoting consistent access to configuration settings. + +**Note**: When using the Setting class, it is important to ensure that the values assigned to the attributes of ProjectSettings and ChatCompletionSettings are valid to avoid runtime errors. Proper management of the API key in ChatCompletionSettings is crucial for secure authentication with the OpenAI service. +## ClassDef SettingsManager +**SettingsManager**: The function of SettingsManager is to manage the instantiation and access to the configuration settings for the project. + +**attributes**: The attributes of this Class. +· _setting_instance: Optional[Setting] - A private class attribute that holds the singleton instance of the Setting class, initially set to None. + +**Code Description**: The SettingsManager class is designed to provide a centralized access point for the configuration settings of the project. It utilizes a class method, `get_setting`, to ensure that there is only one instance of the Setting class throughout the application, implementing the Singleton design pattern. + +The class maintains a private class attribute, `_setting_instance`, which is initially set to None. When the `get_setting` method is called, it first checks if `_setting_instance` is None, indicating that the Setting object has not yet been instantiated. If this is the case, it creates a new instance of the Setting class and assigns it to `_setting_instance`. This ensures that subsequent calls to `get_setting` return the same instance of the Setting class, thereby promoting consistent access to configuration settings across the application. + +The SettingsManager class is called by various components within the project, including the ChangeDetector, ChatEngine, and MetaInfo classes. For instance, in the `get_to_be_staged_files` method of the ChangeDetector class, the SettingsManager is invoked to retrieve the current settings, which are then used to determine the project hierarchy and manage file staging. Similarly, in the ChatEngine's `__init__` method, the SettingsManager is used to access the OpenAI API settings, ensuring that the chat engine is configured correctly with the necessary parameters. + +This design allows for a clear separation of concerns, where the SettingsManager handles the instantiation and retrieval of settings, while other components focus on their specific functionalities. By centralizing the configuration management, the SettingsManager enhances the maintainability and scalability of the project. + +**Note**: It is important to ensure that the Setting class is properly configured before accessing its attributes through the SettingsManager. Any misconfiguration may lead to runtime errors when the application attempts to utilize the settings. + +**Output Example**: A possible appearance of the code's return value when calling `SettingsManager.get_setting()` could be an instance of the Setting class containing project-specific configurations such as project paths, logging levels, and chat completion settings. +### FunctionDef get_setting(cls) +**get_setting**: The function of get_setting is to provide a singleton instance of the Setting class, ensuring that configuration settings are consistently accessed throughout the application. + +**parameters**: The parameters of this Function. +· No parameters are required for this function. + +**Code Description**: The get_setting class method is a crucial component of the SettingsManager class, designed to manage the instantiation of the Setting object. This method first checks if the class attribute `_setting_instance` is None, indicating that the Setting instance has not yet been created. If it is None, the method initializes `_setting_instance` by creating a new instance of the Setting class. This ensures that only one instance of the Setting class exists, adhering to the singleton design pattern. The method then returns the `_setting_instance`, allowing other parts of the application to access the configuration settings encapsulated within the Setting instance. + +The Setting class itself is responsible for managing various configuration settings for the project, including project-specific settings and chat completion settings. It contains attributes that hold instances of ProjectSettings and ChatCompletionSettings, which further manage specific configurations related to the project and chat functionalities, respectively. + +The get_setting method is called by various components within the project, such as the ChangeDetector, ChatEngine, and MetaInfo classes. For instance, in the ChangeDetector's get_to_be_staged_files method, get_setting is invoked to retrieve the current project settings, which are then used to determine which files need to be staged based on the project's hierarchy and markdown documentation requirements. Similarly, in the ChatEngine's __init__ method, get_setting is called to configure the OpenAI API settings, ensuring that the chat functionalities are properly initialized with the correct parameters. + +This method plays a vital role in maintaining a centralized access point for configuration settings, promoting consistency and reducing the risk of errors that may arise from multiple instances of the Setting class. + +**Note**: It is important to ensure that the Setting class is properly configured before accessing its attributes through get_setting. Any misconfiguration may lead to runtime errors or unexpected behavior in the application. + +**Output Example**: A possible appearance of the code's return value could be an instance of the Setting class containing initialized attributes for project settings and chat completion settings, such as: +``` +Setting( + project=ProjectSettings( + target_repo='path/to/repo', + hierarchy_name='documentation', + log_level='INFO', + ignore_list=['*.pyc', '__pycache__'] + ), + chat_completion=ChatCompletionSettings( + openai_api_key='your_api_key', + openai_base_url='https://api.openai.com', + request_timeout=30, + model='gpt-3.5-turbo', + temperature=0.7 + ) +) +``` +*** diff --git a/markdown_docs/repo_agent/utils/gitignore_checker.md b/markdown_docs/repo_agent/utils/gitignore_checker.md index 7e97038..9ee6c7b 100644 --- a/markdown_docs/repo_agent/utils/gitignore_checker.md +++ b/markdown_docs/repo_agent/utils/gitignore_checker.md @@ -1,169 +1,143 @@ ## ClassDef GitignoreChecker -**GitignoreChecker**: The function of GitignoreChecker is to check files and folders against patterns specified in a `.gitignore` file to determine which ones should be ignored. +**GitignoreChecker**: The function of GitignoreChecker is to check files and folders in a specified directory against patterns defined in a .gitignore file, identifying which files are not ignored and have a specific extension. -**Attributes**: -- `directory`: The directory to be checked against the `.gitignore` patterns. -- `gitignore_path`: The path to the `.gitignore` file used for checking ignore patterns. -- `folder_patterns`: A list of folder-specific patterns extracted from the `.gitignore` file. -- `file_patterns`: A list of file-specific patterns extracted from the `.gitignore` file. +**attributes**: The attributes of this Class. +· directory: The directory to be checked for files and folders. +· gitignore_path: The path to the .gitignore file. +· folder_patterns: A list of folder patterns extracted from the .gitignore file. +· file_patterns: A list of file patterns extracted from the .gitignore file. -**Code Description**: -The `GitignoreChecker` class is designed to facilitate the identification of files and directories that should be ignored based on the patterns specified in a `.gitignore` file. This is particularly useful in projects where certain files or directories are not meant to be processed or included in version control. +**Code Description**: The GitignoreChecker class is designed to facilitate the checking of files and folders in a specified directory against the rules defined in a .gitignore file. Upon initialization, it requires two parameters: the directory to be checked and the path to the .gitignore file. The constructor reads the .gitignore file, parsing its contents to separate folder patterns from file patterns. -Upon initialization, the class takes two parameters: the directory to check and the path to the `.gitignore` file. It then loads and parses the `.gitignore` file, separating the ignore patterns into those applicable to folders and those applicable to files. +The class contains several methods: +- `_load_gitignore_patterns`: This method attempts to load the .gitignore file from the specified path. If the file is not found, it falls back to a default .gitignore file located two directories up from the current file. It returns a tuple containing lists of folder and file patterns. +- `_parse_gitignore`: This static method processes the content of the .gitignore file, extracting valid patterns while ignoring comments and empty lines. +- `_split_gitignore_patterns`: This static method takes a list of patterns and categorizes them into folder patterns (ending with a '/') and file patterns. +- `_is_ignored`: This static method checks if a given path matches any of the provided patterns, determining if the path should be ignored based on whether it is a directory or a file. +- `check_files_and_folders`: This method walks through the specified directory, checking each file and folder against the extracted patterns. It returns a list of file paths that are not ignored and have a '.py' extension, with paths being relative to the specified directory. -The core functionality of the class is encapsulated in the `check_files_and_folders` method, which traverses the specified directory, checking each file and folder against the `.gitignore` patterns. Files that do not match any of the ignore patterns and have a `.py` extension are considered not ignored and are returned as a list of paths relative to the specified directory. +The GitignoreChecker is utilized in the `generate_overall_structure` method of the FileHandler class. In this context, it is instantiated to check the repository's directory for files that are not ignored by the .gitignore rules. The method iterates over the list of non-ignored files, performing additional checks and processing for each file, ultimately contributing to the generation of the repository's overall structure. -This class is utilized in the project within the `generate_overall_structure` method of a `FileHandler` object. In this context, the `GitignoreChecker` is used to filter out files that should be ignored based on the `.gitignore` file of a repository. This ensures that only relevant files are processed for generating the overall structure of the repository, excluding files specified in `.gitignore`. +**Note**: When using the GitignoreChecker, ensure that the specified .gitignore file is accessible and correctly formatted to avoid falling back to the default path unintentionally. -**Note**: -- The class provides a fallback mechanism for loading a default `.gitignore` file if the specified one is not found. This ensures that the class functions even in the absence of a specified `.gitignore` path. -- The ignore checking mechanism is sensitive to both folder-specific and file-specific patterns, accurately reflecting the behavior of `.gitignore` processing in version control systems. - -**Output Example**: -Assuming a directory structure with some Python files and a `.gitignore` file specifying to ignore all `.log` files, an example output of the `check_files_and_folders` method might look like this: -```python -['src/main.py', 'tests/test_main.py'] +**Output Example**: An example output of the `check_files_and_folders` method might look like this: ``` -This output indicates that `main.py` in the `src` directory and `test_main.py` in the `tests` directory are not ignored by the `.gitignore` file and have the `.py` extension, making them relevant for further processing. +[ + "src/module1.py", + "src/module2.py", + "tests/test_module1.py" +] +``` +This output indicates that the listed Python files are not ignored according to the rules defined in the .gitignore file. ### FunctionDef __init__(self, directory, gitignore_path) **__init__**: The function of __init__ is to initialize the GitignoreChecker with a specific directory and the path to a .gitignore file. -**Parameters**: -- **directory (str)**: The directory that will be checked against the patterns defined in the .gitignore file. -- **gitignore_path (str)**: The filesystem path to the .gitignore file that contains patterns to be used for checking. +**parameters**: The parameters of this Function. +· directory: The directory to be checked. +· gitignore_path: The path to the .gitignore file. + +**Code Description**: The __init__ method is the constructor for the GitignoreChecker class. It takes two parameters: `directory`, which specifies the directory that will be checked for files and folders to ignore, and `gitignore_path`, which indicates the location of the .gitignore file that contains the ignore patterns. Upon initialization, these parameters are assigned to instance variables `self.directory` and `self.gitignore_path`, respectively. -**Code Description**: The `__init__` method is the constructor for the GitignoreChecker class, setting up the initial state of an instance by storing the provided directory and .gitignore file path. Upon initialization, it also calls the `_load_gitignore_patterns` method, which is responsible for loading and parsing the .gitignore file specified by the `gitignore_path` parameter. This method categorizes the patterns found in the .gitignore file into two lists: one for folder patterns and another for file patterns. These lists are then stored as instance attributes (`self.folder_patterns` and `self.file_patterns`), making them accessible to other methods within the class. This setup allows the GitignoreChecker to efficiently determine whether files or directories within the specified directory should be ignored based on the patterns defined in the .gitignore file. +Additionally, the constructor calls the private method `_load_gitignore_patterns`, which is responsible for loading and parsing the .gitignore file. This method returns a tuple containing two lists: one for folder patterns and another for file patterns. These lists are then assigned to the instance variables `self.folder_patterns` and `self.file_patterns`. This structured approach ensures that the GitignoreChecker has immediate access to the relevant patterns for processing files and directories according to the rules defined in the .gitignore file. -The `_load_gitignore_patterns` method, which is called during initialization, plays a crucial role in the functionality of the GitignoreChecker. It ensures that the patterns from the .gitignore file are correctly loaded, parsed, and categorized, enabling the GitignoreChecker to perform its primary function of checking files and directories against these patterns. This method handles the reading of the .gitignore file, gracefully managing cases where the file might not be found at the specified path by attempting to fall back to a default path. After loading the content of the .gitignore file, it processes this content to extract and categorize the ignore patterns, which are then used throughout the lifetime of the GitignoreChecker instance to evaluate whether specific files or directories are ignored. +The `_load_gitignore_patterns` method is crucial for the initialization process, as it ensures that the patterns are correctly loaded and categorized. If the specified .gitignore file is not found, the method will attempt to load a default .gitignore file from a predetermined location, ensuring that the GitignoreChecker can still function even in the absence of a user-defined file. -**Note**: It is essential to provide valid paths for both the directory and the .gitignore file when creating an instance of GitignoreChecker. Incorrect paths or inaccessible files could lead to unexpected behavior, as the GitignoreChecker relies on these paths to function correctly. The effectiveness of the GitignoreChecker is directly related to the accuracy and comprehensiveness of the patterns defined in the .gitignore file, as these patterns determine which files and directories are ignored during checks. +**Note**: It is important to ensure that the provided .gitignore file is correctly formatted and accessible at the specified path to avoid falling back to the default file unintentionally. Proper handling of file paths and existence checks is essential for the reliable operation of the GitignoreChecker. *** ### FunctionDef _load_gitignore_patterns(self) -**_load_gitignore_patterns**: The function of _load_gitignore_patterns is to load and parse the .gitignore file, categorizing its patterns into folder and file patterns. +**_load_gitignore_patterns**: The function of _load_gitignore_patterns is to load and parse the .gitignore file, then split the patterns into folder and file patterns. -**Parameters**: This function does not accept any parameters as it is designed to be called within an instance of its class, utilizing instance attributes for its operations. +**parameters**: The parameters of this Function. +· self: An instance of the GitignoreChecker class, which contains the attributes necessary for loading the .gitignore file. -**Code Description**: The `_load_gitignore_patterns` function is a crucial component of the GitignoreChecker class, responsible for handling .gitignore files within a given project. It operates by attempting to open and read the .gitignore file specified by the `gitignore_path` attribute of the GitignoreChecker instance. If the file is not found at the specified path, the function falls back to a default path, which is constructed relative to the file location of the function itself. This ensures that the function has a robust mechanism for locating a .gitignore file even in cases where the specified path is incorrect or the file has been moved. +**Code Description**: The _load_gitignore_patterns method is responsible for reading the content of a .gitignore file from a specified path. If the specified file is not found, it falls back to a default .gitignore file located two directories up from the current file's directory. The method attempts to open the .gitignore file in read mode with UTF-8 encoding. If successful, it reads the entire content of the file into a string variable named gitignore_content. In the event of a FileNotFoundError, the method constructs a default path and attempts to read from that file instead. -Upon successfully opening the .gitignore file, the function reads its content as a string. This content is then passed to the `_parse_gitignore` method, which extracts individual patterns from the .gitignore file, ignoring comments and empty lines. The extracted patterns are then further processed by the `_split_gitignore_patterns` method, which categorizes them into two lists: one for folder patterns and another for file patterns. This categorization is based on the syntax used in .gitignore files, where patterns intended to match directories typically end with a forward slash. +Once the content of the .gitignore file is obtained, the method calls the _parse_gitignore function, passing the gitignore_content as an argument. This function processes the content and returns a list of patterns that are relevant for ignoring files and directories. Subsequently, the _load_gitignore_patterns method calls the _split_gitignore_patterns function, providing it with the list of patterns. This function categorizes the patterns into two separate lists: one for folder patterns and another for file patterns. Finally, _load_gitignore_patterns returns a tuple containing these two lists. -The function ultimately returns a tuple containing these two lists, providing a structured way to access and utilize the parsed .gitignore patterns within other parts of the GitignoreChecker class or by external callers. +This method is invoked during the initialization of the GitignoreChecker class, where it is used to populate the folder_patterns and file_patterns attributes with the relevant patterns extracted from the .gitignore file. This structured approach ensures that the patterns are readily available for further processing or application within the project. -**Note**: It is important to note that this function relies on the correct setting of the `gitignore_path` attribute during the initialization of the GitignoreChecker instance. Incorrect paths or inaccessible files will trigger the fallback mechanism, which may not always result in the desired .gitignore file being used. Additionally, the function assumes that the .gitignore file follows the standard syntax and conventions, as the parsing logic is designed with these assumptions in mind. +**Note**: It is essential to ensure that the .gitignore file is properly formatted and accessible at the specified path to avoid falling back to the default file unintentionally. -**Output Example**: While the function does not produce a visual output, an example return value could be as follows, given a .gitignore file containing patterns for ignoring log files and a node_modules directory: +**Output Example**: An example of the return value from _load_gitignore_patterns could be: ```python -(["node_modules"], ["*.log"]) +(['src', 'docs'], ['README.md', 'LICENSE']) ``` -This example demonstrates how the function separates directory-specific patterns (e.g., "node_modules/") from file-specific patterns (e.g., "*.log"), facilitating targeted application of these patterns in file system operations or checks performed by the GitignoreChecker. +In this example, the method would return a tuple where the first list contains folder patterns 'src' and 'docs', while the second list contains file patterns 'README.md' and 'LICENSE'. *** ### FunctionDef _parse_gitignore(gitignore_content) -**_parse_gitignore**: The function of _parse_gitignore is to parse the content of a .gitignore file and return a list of patterns found within it. +**_parse_gitignore**: The function of _parse_gitignore is to parse the content of a .gitignore file and return a list of patterns. -**Parameters**: -- **gitignore_content (str)**: The content of the .gitignore file as a string. +**parameters**: The parameters of this Function. +· gitignore_content: A string representing the content of the .gitignore file. -**Code Description**: -The `_parse_gitignore` function is designed to process the content of a .gitignore file, which is passed to it as a string. It aims to extract and return all the valid patterns specified within the file, ignoring any lines that are either empty or start with a hash symbol (`#`), which are considered comments in the context of a .gitignore file. +**Code Description**: The _parse_gitignore function is designed to process the content of a .gitignore file, which typically contains rules for ignoring files and directories in a Git repository. The function takes a single argument, gitignore_content, which is expected to be a string containing the raw text of the .gitignore file. -The function begins by initializing an empty list named `patterns`. It then splits the input string `gitignore_content` into individual lines and iterates over each line. Each line is stripped of leading and trailing whitespace using the `strip()` method to ensure that empty lines or lines with only whitespace are not processed further. +The function begins by initializing an empty list called patterns. It then splits the gitignore_content into individual lines using the splitlines() method. For each line, it performs the following operations: +1. It trims any leading or trailing whitespace using the strip() method. +2. It checks if the line is not empty and does not start with a "#" character, which denotes a comment in .gitignore files. +3. If the line meets these criteria, it appends the line to the patterns list. -For each line, if it is not empty and does not start with a `#`, it is considered a valid pattern and is appended to the `patterns` list. This process effectively filters out comments and empty lines from the .gitignore content, focusing only on the patterns meant to be used for ignoring files and directories in Git operations. +Once all lines have been processed, the function returns the patterns list, which contains only the relevant patterns extracted from the .gitignore content. -This function is called by `_load_gitignore_patterns` within the same `GitignoreChecker` class. The `_load_gitignore_patterns` function is responsible for reading the .gitignore file's content and then utilizing `_parse_gitignore` to parse this content into a list of patterns. After parsing, `_load_gitignore_patterns` further processes these patterns to categorize them into folder and file patterns, showcasing a practical application of the `_parse_gitignore` function within the project's workflow. +The _parse_gitignore function is called by the _load_gitignore_patterns method within the GitignoreChecker class. The _load_gitignore_patterns method is responsible for loading the content of a .gitignore file from a specified path. After reading the file content, it invokes _parse_gitignore to extract the patterns before further processing them. This relationship highlights the utility of _parse_gitignore as a helper function that simplifies the task of filtering out valid patterns from the potentially noisy content of a .gitignore file. -**Note**: -- The function assumes that the input string `gitignore_content` correctly represents the content of a .gitignore file. It does not perform any validation on the format of the .gitignore content itself. -- Lines that are purely whitespace or start with `#` are ignored, as they are considered comments or empty lines in the context of a .gitignore file. +**Note**: It is important to ensure that the input to _parse_gitignore is a properly formatted string representing the content of a .gitignore file. Lines that are empty or comments will be ignored in the output. -**Output Example**: -If the content of a .gitignore file is as follows: -``` -# This is a comment -*.log -temp/ -``` -Then, calling `_parse_gitignore` with this content as input would return the following list: -``` -["*.log", "temp/"] +**Output Example**: An example of the return value from _parse_gitignore could be: +```python +["*.log", "temp/", "build/", "# Ignore all .env files"] ``` +In this example, the function would return a list containing the patterns that are relevant for ignoring files and directories, excluding any comments or empty lines. *** ### FunctionDef _split_gitignore_patterns(gitignore_patterns) -**_split_gitignore_patterns**: The function of _split_gitignore_patterns is to divide .gitignore patterns into two distinct lists based on whether they apply to folders or files. - -**Parameters**: -- **gitignore_patterns (list)**: A list of patterns extracted from a .gitignore file. - -**Code Description**: -The `_split_gitignore_patterns` function plays a crucial role in handling .gitignore patterns within the GitignoreChecker class. It takes a single parameter, `gitignore_patterns`, which is expected to be a list of strings. Each string in this list represents a pattern found in a .gitignore file. +**_split_gitignore_patterns**: The function of _split_gitignore_patterns is to separate .gitignore patterns into distinct lists for folder patterns and file patterns. -The function categorizes these patterns into two types: folder patterns and file patterns. This categorization is based on the observation that folder patterns in .gitignore files typically end with a forward slash ("/"). Therefore, the function iterates over each pattern in the provided list, checking if it ends with a "/". If it does, the pattern is considered a folder pattern, and the trailing slash is removed before adding it to the `folder_patterns` list. Patterns that do not end with a slash are treated as file patterns and are added directly to the `file_patterns` list. +**parameters**: The parameters of this Function. +· gitignore_patterns: A list of patterns extracted from the .gitignore file. -This separation is essential for the GitignoreChecker's functionality, as it allows the checker to apply the correct patterns to directories and files when determining what to ignore. +**Code Description**: The _split_gitignore_patterns function takes a list of patterns from a .gitignore file as input. It iterates through each pattern and checks whether it ends with a forward slash ("/"). If a pattern ends with "/", it is identified as a folder pattern, and the trailing slash is removed before appending it to the folder_patterns list. If a pattern does not end with "/", it is treated as a file pattern and is added to the file_patterns list. The function ultimately returns a tuple containing two lists: the first list includes all folder patterns, while the second list contains all file patterns. -The function is called by `_load_gitignore_patterns`, another method within the GitignoreChecker class. `_load_gitignore_patterns` is responsible for reading the .gitignore file, parsing its contents into patterns, and then utilizing `_split_gitignore_patterns` to categorize these patterns appropriately. This separation of concerns enhances the modularity and readability of the code, allowing for easier maintenance and updates. +This function is called by the _load_gitignore_patterns method within the GitignoreChecker class. The _load_gitignore_patterns method is responsible for loading and parsing the contents of a .gitignore file. After reading the file, it utilizes the _parse_gitignore method to extract the patterns from the content. Once the patterns are obtained, _load_gitignore_patterns calls _split_gitignore_patterns to categorize these patterns into folder and file patterns before returning them as a tuple. This structured approach ensures that the patterns are organized for further processing or application within the project. -**Note**: -- It is important to ensure that the `gitignore_patterns` list is correctly populated with patterns from a .gitignore file before calling this function. Incorrect or malformed patterns may lead to unexpected behavior. -- The function assumes that all folder patterns in the .gitignore file are correctly suffixed with a "/", which is a common convention but may not always be the case in every .gitignore file. +**Note**: It is important to ensure that the input list of gitignore_patterns is properly formatted according to .gitignore syntax to achieve accurate results when splitting the patterns. -**Output Example**: -Given an input list `[".DS_Store", "node_modules/", "*.log"]`, the function would return the following tuple: -- Folder patterns: `["node_modules"]` -- File patterns: `[".DS_Store", "*.log"]` - -This output demonstrates how the function effectively separates folder-specific patterns from file-specific patterns, allowing for targeted application of these patterns in subsequent operations. +**Output Example**: An example of the function's return value could be: +```python +(['src', 'docs'], ['README.md', 'LICENSE']) +``` +In this example, the first list contains folder patterns 'src' and 'docs', while the second list contains file patterns 'README.md' and 'LICENSE'. *** ### FunctionDef _is_ignored(path, patterns, is_dir) -**_is_ignored**: The function of _is_ignored is to determine if a given path matches any patterns specified, indicating whether it should be ignored or not. - -**Parameters**: -- **path (str)**: The path to check against the patterns. This could be a file or directory path. -- **patterns (list)**: A list of patterns to check the path against. These patterns can be simple strings or patterns that match filenames or directories. -- **is_dir (bool)**: A boolean flag indicating whether the path being checked is a directory. Defaults to False. +**_is_ignored**: The function of _is_ignored is to determine if a given path matches any specified patterns, indicating whether the path should be ignored. -**Code Description**: -The `_is_ignored` function is a utility designed to assess if a specific path should be ignored based on a list of patterns. It operates by iterating through each pattern provided in the `patterns` list and using the `fnmatch.fnmatch` method to check if the `path` matches any of these patterns. If a match is found, the function immediately returns `True`, indicating the path matches a pattern and should be ignored. +**parameters**: The parameters of this Function. +· parameter1: path (str) - The path to check against the patterns. +· parameter2: patterns (list) - A list of patterns that the path will be checked against. +· parameter3: is_dir (bool) - A boolean indicating if the path is a directory; defaults to False. -Additionally, the function has a special consideration for directories. If the `is_dir` flag is set to `True` and a pattern ends with a "/", indicating it is intended to match directories, the function also checks if the path matches the pattern without the trailing slash. This allows for more flexible directory matching, accommodating patterns that are specifically meant to apply to directories. +**Code Description**: The _is_ignored function checks if the provided path matches any of the patterns in the given list. It utilizes the fnmatch module to perform pattern matching. The function iterates through each pattern in the patterns list and checks if the path matches the pattern directly. If the path is a directory (indicated by the is_dir parameter being True), it also checks if the pattern ends with a slash ("/") and if the path matches the pattern without the trailing slash. If any match is found, the function returns True, indicating that the path should be ignored. If no matches are found after checking all patterns, it returns False. -In the context of its usage within the project, specifically by the `check_files_and_folders` method of the `GitignoreChecker` class, `_is_ignored` plays a critical role in filtering out files and directories that should be ignored based on patterns derived from a `.gitignore` file or similar. This method utilizes `_is_ignored` to efficiently exclude ignored directories from further processing and to filter out individual files that do not match the desired criteria (in this case, files that are not ignored and have a '.py' extension). +This function is called by the check_files_and_folders method within the GitignoreChecker class. The check_files_and_folders method is responsible for traversing a specified directory and checking each file and folder against the patterns defined for files and folders. It uses _is_ignored to filter out any directories and files that should be ignored based on the patterns provided. The result of this method is a list of files that are not ignored and have a '.py' extension, thus ensuring that only relevant files are returned for further processing. -**Note**: -- It is important to ensure that the patterns provided to `_is_ignored` are correctly formatted and meaningful in the context of the file system and naming conventions being used. Incorrect patterns may lead to unexpected behavior. -- The function assumes that the path and patterns are provided in a compatible format for `fnmatch.fnmatch`. Adjustments may be needed if working with raw patterns from a `.gitignore` file or similar sources. +**Note**: It is important to ensure that the patterns provided are correctly formatted for fnmatch to work as expected. Additionally, the is_dir parameter should be set appropriately when checking directory paths to ensure accurate matching. -**Output Example**: -```python -# Assuming the patterns list contains patterns to ignore Python bytecode and directory 'build/' -patterns = ['*.pyc', 'build/'] -print(_is_ignored('example.pyc', patterns)) # Output: True -print(_is_ignored('build', patterns, is_dir=True)) # Output: True -print(_is_ignored('src/example.py', patterns)) # Output: False -``` +**Output Example**: If the function is called with the path "src/main.py" and the patterns ["*.py", "test/"], the expected return value would be True if "src/main.py" matches any of the patterns, indicating that it is not ignored. If the path were "src/test.py" and the patterns were ["test/"], the function would return True, indicating that it should be ignored. *** ### FunctionDef check_files_and_folders(self) -**check_files_and_folders**: The function of check_files_and_folders is to identify and return a list of Python files in a given directory that are not ignored by gitignore patterns. +**check_files_and_folders**: The function of check_files_and_folders is to check all files and folders in the specified directory against the defined gitignore patterns and return a list of files that are not ignored and have the '.py' extension. -**Parameters**: This function does not accept any parameters except for the implicit `self` parameter, as it is a method of a class and operates on the class's instance variables. +**parameters**: The parameters of this Function. +· parameter1: None - This function does not take any parameters directly as it operates on the instance's attributes. -**Code Description**: The `check_files_and_folders` method is designed to traverse a directory structure recursively, starting from a base directory specified in the instance variable `self.directory`. It filters out both files and directories that match patterns specified in a `.gitignore` file or equivalent, focusing specifically on files with a `.py` extension that are not meant to be ignored according to these patterns. +**Code Description**: The check_files_and_folders method is responsible for traversing the directory specified by the instance variable self.directory. It utilizes the os.walk function to iterate through all directories and files within the specified path. For each directory, it filters out those that should be ignored based on the patterns defined in self.folder_patterns by calling the _is_ignored method with the is_dir parameter set to True. -The method utilizes the `os.walk` function to iterate through the directory tree. For each directory encountered, it filters out subdirectories that should be ignored, based on the `self._is_ignored` method, which checks against `self.folder_patterns`. This ensures that the traversal does not descend into ignored directories, optimizing the process. +For each file encountered, the method constructs the full file path and its relative path to the base directory. It then checks if the file should be ignored by calling the _is_ignored method again, this time with the file name and the patterns defined in self.file_patterns. Additionally, it checks if the file has a '.py' extension. If both conditions are satisfied (the file is not ignored and has a '.py' extension), the relative path of the file is added to the not_ignored_files list. -For each file encountered in the traversal, the method constructs its full path and then its relative path with respect to `self.directory`. It then checks if the file should be ignored, using the `self._is_ignored` method with `self.file_patterns`. If a file is not to be ignored and has a `.py` extension, its relative path is added to the list `not_ignored_files`. +The method ultimately returns a list of paths to Python files that are not ignored, allowing further processing of relevant files in the project. -Finally, the method returns the list of relative paths to Python files that are not ignored. This list can be used for further processing, such as analyzing the Python files' structure or content. +This method is called by the generate_overall_structure method in the FileHandler class. In this context, it is used to gather a list of files that should be processed from a repository, excluding any files that are ignored according to the gitignore patterns. The results from check_files_and_folders are then iterated over, and each file is further processed to generate the overall structure of the repository. -**Note**: -- The effectiveness of this method relies on the accurate specification of ignore patterns in `self.file_patterns` and `self.folder_patterns`, which should be derived from a `.gitignore` file or similar. Incorrect or incomplete patterns may result in unwanted files being included or desired files being excluded. -- The method assumes that the base directory (`self.directory`) is correctly set and accessible. Errors in setting this path could lead to incorrect results or runtime errors. -- The returned file paths are relative to `self.directory`, which may require conversion to absolute paths for certain operations outside the scope of this method. +**Note**: It is essential to ensure that the gitignore patterns are correctly defined and formatted for accurate matching. The method relies on the _is_ignored function to determine which files and directories should be excluded based on these patterns. -**Output Example**: Assuming the base directory contains several Python files and directories, some of which are ignored by gitignore patterns, an example output might look like this: -```python -['src/main.py', 'tests/test_main.py'] -``` -This output indicates that `main.py` in the `src` directory and `test_main.py` in the `tests` directory are the Python files not ignored by the specified patterns. +**Output Example**: If the method is executed in a directory containing files such as "script.py", "test_script.py", and "README.md", and the gitignore patterns include "*.py", the expected return value would be a list like ["script.py", "test_script.py"] if those files are not ignored. *** diff --git a/markdown_docs/repo_agent/utils/meta_info_utils.md b/markdown_docs/repo_agent/utils/meta_info_utils.md new file mode 100644 index 0000000..f19ece4 --- /dev/null +++ b/markdown_docs/repo_agent/utils/meta_info_utils.md @@ -0,0 +1,67 @@ +## FunctionDef make_fake_files +**make_fake_files**: The function of make_fake_files is to analyze the git status of a repository and create temporary files that reflect the current state of the working directory, specifically for untracked and unstaged changes. + +**parameters**: The parameters of this Function. +· No parameters are required for this function. + +**Code Description**: The make_fake_files function is designed to interact with a Git repository to detect changes in the working directory that have not been staged for commit. It performs the following key operations: + +1. **Delete Existing Fake Files**: The function begins by calling delete_fake_files to ensure that any previously created temporary files are removed before generating new ones. + +2. **Retrieve Project Settings**: It retrieves the current project settings using the SettingsManager's get_setting method, which ensures consistent access to configuration settings throughout the application. + +3. **Initialize Git Repository**: The function initializes a Git repository object using the target repository path specified in the project settings. + +4. **Detect Unstaged Changes**: It identifies unstaged changes in the repository using the index.diff method, which returns a list of modified files that have not been added to the staging area. Additionally, it collects untracked files that exist in the file system but are not tracked by Git. + +5. **Skip Untracked Python Files**: The function iterates through the list of untracked files and skips any that have a ".py" extension, logging a message for each skipped file. + +6. **Handle New and Modified Files**: For files that have been modified (but not staged), the function checks if they end with a specific substring (latest_verison_substring). If they do, an error is logged, and the function exits. Otherwise, it renames the original file to include the latest version substring and creates a new file with the original name, writing the original content back into it. + +7. **Return Values**: Finally, the function returns a dictionary mapping the original file paths to their corresponding fake file paths, along with a list of files that were skipped during processing. + +The make_fake_files function is called within the diff function in the main.py file. This function is responsible for checking for changes in the repository and determining which documents need to be updated or generated. By calling make_fake_files, the diff function ensures that the current state of the repository is accurately reflected in the documentation process. + +**Note**: It is crucial to ensure that the target repository is properly configured and that the latest_verison_substring does not conflict with existing file names. Any misconfiguration may lead to runtime errors or unexpected behavior during the execution of this function. + +**Output Example**: A possible appearance of the code's return value when calling make_fake_files could be: +``` +({ + 'original_file_path.py': 'original_file_path.latest_version', + 'another_file.py': 'another_file.latest_version' +}, ['skipped_file.py']) +``` +## FunctionDef delete_fake_files +**delete_fake_files**: The function of delete_fake_files is to remove temporary files generated during the documentation process after the task execution is completed. + +**parameters**: The parameters of this Function. +· No parameters are required for this function. + +**Code Description**: The delete_fake_files function is responsible for cleaning up temporary files, referred to as "fake files," that are created during the documentation generation process. This function utilizes a nested helper function, gci, which performs a recursive traversal of the directory specified by the project settings to identify and delete or rename files based on specific criteria. + +The function begins by retrieving the project settings through the SettingsManager's get_setting method, which ensures that the configuration settings are consistently accessed throughout the application. The gci function is then called with the target repository path, which is obtained from the settings. + +Within the gci function, the following operations are performed: +1. The function lists all files and directories within the specified filepath. +2. For each file, it checks if it is a directory and recursively calls itself if it is. +3. If the file ends with a specific substring (latest_verison_substring), it indicates that it is a temporary file. The function then constructs the original file name by replacing the substring with ".py". +4. If the original file exists and is successfully deleted, the function prints a message indicating that the temporary file has been deleted. If the temporary file is empty, it is also deleted. +5. If the original file exists but is not empty, the temporary file is renamed back to the original file name, and a message is printed indicating that the latest version has been recovered. + +The delete_fake_files function is called in various parts of the project, including the clean function in main.py, which explicitly invokes delete_fake_files to ensure that all temporary files are removed after the documentation process. Additionally, it is called within the diff function to clean up any fake files before checking for changes in the repository. The run method of the Runner class also calls delete_fake_files after completing the document update process, ensuring that any temporary files created during the run are cleaned up. + +**Note**: It is important to ensure that the target repository is correctly configured and accessible before invoking delete_fake_files. Any issues with file permissions or incorrect paths may lead to runtime errors during the deletion or renaming processes. +### FunctionDef gci(filepath) +**gci**: The function of gci is to traverse a specified directory and its subdirectories to delete or rename files based on specific criteria. + +**parameters**: The parameters of this Function. +· filepath: A string representing the path of the directory to be traversed. + +**Code Description**: The gci function begins by listing all files and directories within the specified filepath. It iterates through each item found in the directory. If an item is a directory, the function calls itself recursively to traverse that subdirectory. For files, it checks if the filename ends with a specific substring defined as `latest_verison_substring`. If this condition is met, the function constructs an original filename by replacing the substring with ".py". + +The function then checks the size of the file. If the file size is zero, it indicates that the file is empty, and the function proceeds to delete both the empty file and its corresponding original file. A message is printed to the console indicating the deletion of the temporary file. Conversely, if the file is not empty, the function renames the temporary file back to its original name and prints a message indicating that the latest version is being recovered. + +This function effectively manages temporary files by either deleting them if they are empty or restoring the original file if they contain data, ensuring that the directory remains clean and organized. + +**Note**: It is important to ensure that the `latest_verison_substring` variable is defined in the scope where this function is used, as it is crucial for determining which files to process. Additionally, the function relies on the presence of the `setting.project.target_repo` variable to format the output messages correctly. +*** diff --git a/markdown_docs/tests/test_change_detector.md b/markdown_docs/tests/test_change_detector.md index 8352c17..5139614 100644 --- a/markdown_docs/tests/test_change_detector.md +++ b/markdown_docs/tests/test_change_detector.md @@ -1,113 +1,104 @@ ## ClassDef TestChangeDetector -**TestChangeDetector**: The function of TestChangeDetector is to test the functionality of a change detection system within a Git repository environment. +**TestChangeDetector**: The function of TestChangeDetector is to perform unit tests on the ChangeDetector class, specifically focusing on the detection and management of staged and unstaged files in a Git repository. -**Attributes**: -- `test_repo_path`: Stores the path to the test repository used during the tests. -- `repo`: Represents the initialized Git repository at the test path. +**attributes**: The attributes of this Class. +· test_repo_path: The file path to the test repository created for the unit tests. +· repo: The initialized Git repository object used for testing. -**Code Description**: -The `TestChangeDetector` class is designed to validate the operations of a change detection mechanism in a Git repository, specifically focusing on staging and unstaging Python and Markdown files. It inherits from `unittest.TestCase`, enabling it to use a wide range of assertions for testing. +**Code Description**: The TestChangeDetector class is a unit test case that inherits from unittest.TestCase, providing a framework for testing the functionality of the ChangeDetector class. The class includes setup and teardown methods to prepare and clean up the test environment, specifically a Git repository used for testing file changes. -1. **Setup and Teardown**: - - `setUpClass`: This class method is executed once before running the tests. It prepares the environment by creating a test Git repository, configuring user information, and creating initial test files (`test_file.py` and `test_file.md`). This setup includes committing these files to simulate a real-world scenario where the repository already contains some committed files. - - `tearDownClass`: This class method is called after all tests have been executed. It cleans up by closing the repository and removing the test repository directory, ensuring no residual data affects subsequent tests. +The setUpClass method is a class method that initializes the test environment before any tests are run. It defines the path for the test repository, creates the directory if it does not exist, initializes a new Git repository, and configures user information for Git operations. It also creates two test files: a Python file and a Markdown file, and performs an initial commit to the repository. -2. **Test Methods**: - - `test_get_staged_pys`: Tests the detection of newly staged Python files. It simulates adding a new Python file to the staging area and asserts that the change detection system correctly identifies this file as staged. - - `test_get_unstaged_mds`: Focuses on detecting unstaged modifications in Markdown files. It modifies a Markdown file without staging the changes and verifies that the change detection system accurately identifies the file as unstaged. - - `test_add_unstaged_mds`: Tests the functionality of adding unstaged Markdown files to the staging area. It ensures that after the operation, no unstaged Markdown files remain, demonstrating the system's ability to stage files as expected. +The class contains three test methods: +1. test_get_staged_pys: This method tests the ChangeDetector's ability to identify staged Python files. It creates a new Python file, stages it, and asserts that the file is included in the list of staged files returned by the ChangeDetector. + +2. test_get_unstaged_mds: This method tests the ChangeDetector's ability to identify unstaged Markdown files. It modifies an existing Markdown file without staging it and asserts that the modified file is included in the list of unstaged files returned by the ChangeDetector. -**Note**: -- The tests rely on the filesystem and Git operations, making the test environment closely resemble a real-world project setup. -- The `ChangeDetector` class, which is tested by this suite, is assumed to provide methods like `get_staged_pys`, `get_to_be_staged_files`, and `add_unstaged_files` to interact with the Git repository. These methods are crucial for the tests but are not defined within this class, indicating that `ChangeDetector` is a separate component of the system. -- It is important to ensure that the test environment is isolated and does not interfere with actual project data or other tests. This isolation is achieved through the use of a dedicated test repository and cleanup procedures. -### FunctionDef setUpClass(cls) -**setUpClass**: The function of setUpClass is to initialize a test environment for a class of tests by setting up a test Git repository with initial configurations and test files. - -**Parameters**: -- `cls`: This parameter represents the class itself and is used to access class variables and methods. +3. test_add_unstaged_mds: This method ensures that there are unstaged Markdown files and then uses the ChangeDetector to stage them. It checks that after the staging operation, there are no remaining unstaged Markdown files, asserting that the operation was successful. -**Code Description**: -The `setUpClass` method is a class method, denoted by the `@classmethod` decorator in Python (not shown in the snippet but implied by the use of `cls`), which is executed once for the class before any tests are run. This method is specifically designed to set up a test environment that will be shared across all test cases in the class. The method performs the following operations: +The tearDownClass method is a class method that cleans up the test environment after all tests have been executed. It closes the Git repository and removes the test repository directory to ensure no residual files remain. -1. **Define the Path of the Test Repository**: It constructs the path to a test repository named 'test_repo' located in the same directory as the test script. This is achieved by joining the directory path of the current file (`__file__`) with the folder name 'test_repo'. - -2. **Create the Test Repository Directory**: It checks if the directory for the test repository exists. If it does not, the method creates the directory using `os.makedirs`. +**Note**: It is important to ensure that the ChangeDetector class is properly implemented and available in the testing environment for these tests to execute successfully. Additionally, the tests rely on the presence of the Git command-line tools and the appropriate permissions to create and manipulate files and directories. +### FunctionDef setUpClass(cls) +**setUpClass**: setUpClass的功能是为测试准备一个Git仓库及相关文件。 -3. **Initialize a Git Repository**: It initializes a new Git repository in the test repository directory. This is done using the `Repo.init` method from a Git library (presumably GitPython), which returns a `Repo` object representing the newly created Git repository. +**parameters**: 此函数没有参数。 -4. **Configure Git User Information**: The method configures the user email and name for the Git repository. This is essential for committing changes to the repository, as Git requires user identification for commits. +**Code Description**: +setUpClass是一个类方法,用于在测试类执行之前设置测试环境。该方法首先定义了测试仓库的路径,将其设置为当前文件所在目录下的'test_repo'文件夹。如果该文件夹不存在,方法会创建它。接着,使用GitPython库初始化一个新的Git仓库,并将其与指定的路径关联。 -5. **Create Test Files**: It creates two test files within the test repository: 'test_file.py' and 'test_file.md'. The Python test file contains a simple print statement, while the Markdown test file contains a header. This simulates the presence of actual code and documentation within the repository. +在初始化Git仓库后,方法配置了Git用户信息,包括用户的电子邮件和姓名,以便在后续的Git操作中使用。接下来,方法创建了两个测试文件:一个Python文件'test_file.py',其中包含一行打印语句;另一个Markdown文件'test_file.md',其中包含一个Markdown标题。 -6. **Simulate Git Operations**: Finally, it simulates basic Git operations by adding all files to the staging area (`git.add(A=True)`) and committing them with a message 'Initial commit'. This sets up the repository in a state as if it has been actively used. +最后,方法模拟了Git操作,通过将所有文件添加到暂存区并提交一个初始提交,完成了测试环境的设置。这些操作确保了在测试执行时,测试环境是干净且可控的。 -**Note**: -- This method is crucial for tests that interact with a Git repository, as it ensures a consistent and isolated test environment. By performing setup at the class level, it minimizes the overhead of initializing the environment for each test case. -- It is important that the test repository is properly cleaned up after the tests are run to avoid interference with subsequent tests. This cleanup is typically done in a corresponding `tearDownClass` method, which is not shown in the provided code snippet. +**Note**: 使用此方法时,请确保在测试类中调用setUpClass,以便在所有测试用例执行之前正确设置测试环境。同时,确保已安装GitPython库,以支持Git操作。 *** ### FunctionDef test_get_staged_pys(self) -**test_get_staged_pys**: The function of `test_get_staged_pys` is to test the retrieval of staged Python files in a Git repository using the `ChangeDetector` class. +**test_get_staged_pys**: The function of test_get_staged_pys is to verify that a newly created Python file is correctly identified as staged in the Git repository. -**Parameters**: This function does not accept any parameters as it is a test method within a test class, designed to operate on a predefined test environment setup. +**parameters**: The parameters of this Function. +· None -**Code Description**: The `test_get_staged_pys` function is a critical component of the testing suite for the `ChangeDetector` class, specifically focusing on the `get_staged_pys` method. The test follows a structured approach to validate the functionality of identifying staged Python files within a Git repository. Initially, it creates a new Python file in the test repository and writes a simple print statement into it. This file is then staged using Git commands, simulating a real-world scenario where a developer stages changes before committing them. +**Code Description**: The test_get_staged_pys function is a unit test designed to validate the functionality of the ChangeDetector class, specifically its ability to detect staged Python files within a Git repository. The function begins by creating a new Python file named 'new_test_file.py' in a specified test repository path. This file contains a simple print statement. Once the file is created, it is added to the staging area of the Git repository using the Git command `git add`. -Following the setup, the test instantiates the `ChangeDetector` class with the path to the test repository. This object is then used to call the `get_staged_pys` method, which returns a dictionary of staged Python files. The key aspect of this test is to assert that the newly created and staged Python file is correctly identified and listed in the returned dictionary. This assertion is made by checking if the filename of the newly created file is present in the list of staged files, which is derived by extracting the basename of each path in the returned dictionary. +Following the staging of the new file, an instance of the ChangeDetector class is instantiated with the test repository path. The method get_staged_pys of the ChangeDetector instance is then called to retrieve a list of Python files that are currently staged for commit. This method is responsible for checking the differences between the staging area and the last commit (HEAD) to identify which files have been added or modified. -The function concludes by printing the list of staged Python files, providing a clear output of the test results for verification purposes. This test method directly interacts with the `ChangeDetector` class, specifically testing its ability to accurately identify and list Python files that have been staged for commit. It simulates a realistic use case of the `ChangeDetector` in a continuous integration/continuous deployment (CI/CD) pipeline or an automated script where changes to Python files need to be detected before committing them to the repository. +The test then asserts that 'new_test_file.py' is included in the list of staged files returned by get_staged_pys. This assertion confirms that the ChangeDetector class is functioning as expected, accurately tracking the newly staged Python file. Additionally, the function prints the list of staged Python files for verification purposes. -**Note**: This test function assumes the presence of a Git repository as part of the test environment setup. It also relies on the correct functioning of the GitPython library for staging files and retrieving staged file information. The test is designed to run in a controlled environment where the test repository's state can be manipulated without affecting actual development work. It is essential to ensure that the test repository path (`self.test_repo_path`) is correctly set up and points to a valid Git repository. +This test is crucial for ensuring that the ChangeDetector class operates correctly in identifying changes within a Git repository, particularly for Python files. It serves as a safeguard against potential regressions in the functionality of the change detection mechanism. + +**Note**: It is important to ensure that the test environment is properly set up, including the availability of a valid Git repository and the necessary permissions to create and stage files. The GitPython library must also be correctly configured to facilitate interaction with the Git repository. *** ### FunctionDef test_get_unstaged_mds(self) -**test_get_unstaged_mds**: The function of `test_get_unstaged_mds` is to verify that the ChangeDetector correctly identifies Markdown files that have been modified but not yet staged in a git repository. +**test_get_unstaged_mds**: The function of test_get_unstaged_mds is to verify that a modified Markdown file, which has not been staged, is correctly identified as an unstaged file by the ChangeDetector class. -**Parameters**: This function does not take any parameters as it is designed to be a test case within a testing framework, typically executed by the test runner. +**parameters**: The parameters of this Function. +· No parameters are required for this function. -**Code Description**: The `test_get_unstaged_mds` function is part of a suite of automated tests aimed at ensuring the reliability and correctness of the ChangeDetector's functionality, specifically its ability to detect unstaged Markdown (.md) files within a git repository. The test performs the following steps: +**Code Description**: The test_get_unstaged_mds function is a unit test designed to validate the functionality of the ChangeDetector class, specifically its ability to identify unstaged Markdown files in a Git repository. The function performs the following operations: -1. Modifies a Markdown file within a test repository by appending additional content to it, simulating a change that a developer might make during the documentation process. -2. Instantiates a ChangeDetector object with the path to the test repository. This object is responsible for identifying file changes within the repository. -3. Calls the `get_to_be_staged_files` method on the ChangeDetector instance to retrieve a list of files that have been modified but not yet staged for commit. -4. Asserts that the modified Markdown file is correctly identified and included in the list of unstaged files returned by the ChangeDetector. This assertion verifies that the ChangeDetector is accurately tracking changes to Markdown files that have not been staged. -5. Outputs the list of unstaged Markdown files to the console for verification and debugging purposes. +1. It begins by defining the path to a Markdown file named 'test_file.md' within a test repository directory specified by `self.test_repo_path`. +2. The function opens this Markdown file in append mode and writes additional content to it, simulating a modification that has not yet been staged. +3. An instance of the ChangeDetector class is then created, initialized with the path to the test repository. +4. The method `get_to_be_staged_files` of the ChangeDetector instance is called to retrieve a list of files that have been modified but not staged. +5. The function asserts that 'test_file.md' is included in the list of unstaged files by checking if its basename is present in the returned list. +6. Finally, it prints the list of unstaged Markdown files for verification. -The test case is critical for ensuring that documentation changes (in this case, modifications to Markdown files) are not overlooked and can be correctly identified and staged alongside code changes. This helps maintain consistency and accuracy in project documentation, especially in development environments where changes to documentation and code often occur simultaneously. +This function is called within the test_add_unstaged_mds function, which ensures that there is at least one unstaged Markdown file before attempting to add unstaged files to the staging area. The test_add_unstaged_mds function relies on the successful execution of test_get_unstaged_mds to confirm that the ChangeDetector can accurately identify unstaged files, thereby establishing a dependency between these two test functions. -**Note**: This test function relies on the correct initialization and configuration of a git repository within the test environment. It assumes that the repository is in a state where changes can be made and detected. Additionally, the test's effectiveness is contingent upon the ChangeDetector's reliance on the GitPython library for interacting with the git repository, which means that the library's functionality and the repository's state significantly influence the test outcome. +**Note**: It is essential to ensure that the test repository is correctly set up and that the necessary files exist before running this test. The test environment should be clean to avoid false positives or negatives in the test results. *** ### FunctionDef test_add_unstaged_mds(self) -**test_add_unstaged_mds**: The function of `test_add_unstaged_mds` is to validate that the ChangeDetector correctly stages previously unstaged Markdown files in a git repository. +**test_add_unstaged_mds**: The function of test_add_unstaged_mds is to verify that the ChangeDetector class correctly stages unstaged Markdown files in a Git repository. + +**parameters**: The parameters of this Function. +· No parameters are required for this function. -**Parameters**: This function does not take any parameters as it is designed to be a test case within a testing framework, typically executed by the test runner. +**Code Description**: The test_add_unstaged_mds function is a unit test designed to validate the functionality of the ChangeDetector class, specifically its ability to add unstaged Markdown files to the staging area of a Git repository. The function performs the following operations: -**Code Description**: The `test_add_unstaged_mds` function is a critical component of a suite of automated tests aimed at ensuring the reliability and correctness of the ChangeDetector's functionality, specifically its ability to stage unstaged Markdown (.md) files within a git repository. The test performs the following steps: +1. It first ensures that there is at least one unstaged Markdown file by invoking the test_get_unstaged_mds function. This function modifies a Markdown file in the test repository, ensuring that it is recognized as unstaged. -1. It first ensures the presence of an unstaged Markdown file by calling the `test_get_unstaged_mds` function. This step is crucial as it sets up the test environment by simulating a scenario where a Markdown file has been modified but not yet staged for commit. +2. An instance of the ChangeDetector class is created, initialized with the path to the test repository specified by `self.test_repo_path`. This instance will be used to manage the staging of files. -2. A ChangeDetector object is then instantiated with the path to the test repository. The ChangeDetector class is responsible for identifying and handling file changes within the repository, including staging files that meet specific criteria. +3. The add_unstaged_files method of the ChangeDetector instance is called. This method identifies all unstaged files that meet specific criteria and stages them in the Git repository. -3. The `add_unstaged_files` method of the ChangeDetector object is called to add the previously identified unstaged Markdown files to the staging area. This method internally uses the `get_to_be_staged_files` to identify files that need to be staged based on predefined conditions and stages them accordingly. +4. After attempting to stage the files, the function retrieves the list of files that are still unstaged by calling the get_to_be_staged_files method. This method checks for any files that remain unstaged after the add operation. -4. After the staging operation, the test verifies the outcome by calling the `get_to_be_staged_files` method again to retrieve a list of files that are still unstaged. The expectation is that the list will be empty, indicating that all previously unstaged Markdown files have been successfully staged. +5. The function asserts that the length of the list of unstaged files after the add operation is zero, indicating that all unstaged Markdown files have been successfully staged. -5. The test concludes by asserting that the number of unstaged files after the staging operation is zero, confirming that the ChangeDetector has correctly staged all relevant Markdown files. Additionally, it prints the number of remaining unstaged Markdown files for verification and debugging purposes. +6. Finally, it prints the number of remaining unstaged Markdown files, which should be zero if the test passes. -This test case is essential for ensuring that the ChangeDetector can accurately identify and stage documentation changes (in this case, modifications to Markdown files) alongside code changes. This helps maintain consistency and accuracy in project documentation, especially in development environments where changes to documentation and code often occur simultaneously. +This function is dependent on the successful execution of the test_get_unstaged_mds function, which ensures that there is at least one unstaged Markdown file before the add operation is attempted. The relationship between these two functions is crucial, as test_add_unstaged_mds relies on the outcome of test_get_unstaged_mds to validate the staging functionality of the ChangeDetector class. -**Note**: This test function relies on the correct initialization and configuration of a git repository within the test environment. It assumes that the repository is in a state where changes can be made and detected. The effectiveness of the test is contingent upon the ChangeDetector's reliance on the GitPython library for interacting with the git repository, which means that the library's functionality and the repository's state significantly influence the test outcome. +**Note**: It is essential to ensure that the test repository is correctly set up and that the necessary files exist before running this test. The test environment should be clean to avoid false positives or negatives in the test results. *** ### FunctionDef tearDownClass(cls) -**tearDownClass**: The function of tearDownClass is to clean up resources after all tests in the class have been run. - -**Parameters**: This function takes a single parameter: -- cls: A reference to the class on which the method is called. It is used to access class variables and methods. - -**Code Description**: The `tearDownClass` method is a class method, indicated by the `cls` parameter, which is a convention in Python to refer to the class itself rather than an instance of the class. This method is specifically designed to be executed after all the tests in the test case have been run. Its primary purpose is to perform any cleanup actions that are necessary to restore the system to a state before the test case was executed. In the provided code, the method performs two main actions: +**tearDownClass**: tearDownClass的功能是清理测试仓库。 -1. It calls the `close` method on the `repo` attribute of the class. This is presumably to close any open resources or connections associated with the `repo` object, which might have been used during the tests. This is an important step to release resources that are no longer needed and to prevent resource leaks which can lead to performance issues. +**parameters**: 该函数没有参数。 -2. It executes a shell command to remove the test repository directory, as specified by the `test_repo_path` class attribute. This is done using the `os.system` function with the command `rm -rf` followed by the path to the directory. This step ensures that any files or data created during the tests are removed, leaving the environment clean for subsequent tests or operations. The use of `rm -rf` is a powerful command that recursively removes a directory and all its contents without prompting for confirmation, so it should be used with caution. +**Code Description**: +tearDownClass是一个类方法,用于在测试类的所有测试用例执行完毕后进行清理工作。该方法首先调用cls.repo.close(),用于关闭与测试仓库相关的资源,确保没有未关闭的连接或文件句柄。接着,使用os.system('rm -rf ' + cls.test_repo_path)命令删除测试仓库的文件夹及其内容。这里的cls.test_repo_path是一个类属性,指向测试仓库的路径。通过这种方式,tearDownClass确保了测试环境的整洁,避免了后续测试受到之前测试的影响。 -**Note**: It is crucial to ensure that the `tearDownClass` method is correctly implemented to avoid any side effects on the environment where the tests are run. This includes making sure that all resources are properly released and any changes to the environment are reverted. Additionally, since this method involves executing a shell command, it's important to validate the inputs to avoid any security vulnerabilities, such as command injection attacks. Lastly, this method should only be used in a test environment to prevent accidental deletion of important data or resources in a production environment. +**Note**: 使用该函数时,请确保在测试用例执行后调用,以避免资源泄漏或文件冲突。同时,注意使用os.system删除文件时要小心,以免误删其他重要文件。 *** diff --git a/markdown_docs/tests/test_gradio_ui.md b/markdown_docs/tests/test_gradio_ui.md deleted file mode 100644 index b9a4821..0000000 --- a/markdown_docs/tests/test_gradio_ui.md +++ /dev/null @@ -1,58 +0,0 @@ -## ClassDef TestGradioInterface -**TestGradioInterface**: The function of TestGradioInterface is to test the integration and functionality of a GradioInterface within a unit test framework. - -**Attributes**: -- `mock_respond_function`: A MagicMock object that simulates the behavior of a response function to be used with the GradioInterface. -- `gradio_interface`: An instance of GradioInterface initialized with `mock_respond_function`. - -**Code Description**: -The `TestGradioInterface` class is designed to operate within a unit testing framework, specifically utilizing the `unittest.TestCase` class from Python's standard library. This class is structured to test the functionality and integration of a custom GradioInterface, which is assumed to be a wrapper or a component facilitating the creation of Gradio app interfaces. - -1. **Initialization (`setUp` method)**: This method is automatically called before each test method runs. It initializes a `MagicMock` object as `mock_respond_function` to simulate a response function. This mock function is then passed to the `GradioInterface` constructor, and an instance of `GradioInterface` is stored in `self.gradio_interface`. This setup ensures that each test starts with a fresh instance of the interface and a mock function. - -2. **Testing Gradio Interface Setup (`test_setup_gradio_interface` method)**: This test method uses the `patch` decorator from the `unittest.mock` module to mock the `gradio.Blocks` class, which is presumably a part of the Gradio interface setup. The method then calls `setup_gradio_interface` on the `gradio_interface` instance and verifies that `gradio.Blocks` was indeed called. This test ensures that the Gradio interface setup process integrates correctly with the expected Gradio components. - -3. **Testing Respond Function Integration (`test_respond_function_integration` method)**: This method tests the integration and correct functioning of the respond function within the GradioInterface. It calls the `respond` method on the `gradio_interface` instance with a test message and a system message. The test then verifies that the `mock_respond_function` was called with the correct arguments. This ensures that the GradioInterface correctly integrates and utilizes the provided respond function. - -**Note**: -- It is crucial to ensure that the `GradioInterface` class and its methods (`setup_gradio_interface` and `respond`) are correctly implemented and integrated with the Gradio library. The tests in `TestGradioInterface` assume such an implementation. -- The use of `MagicMock` and the `patch` decorator are essential for isolating the unit tests from external dependencies and for testing the behavior of the system under test in a controlled environment. -### FunctionDef setUp(self) -**setUp**: The function of setUp is to initialize the necessary components for a test case in the TestGradioInterface class. - -**parameters**: This function does not take any parameters except for the implicit `self` parameter, which is a reference to the instance of the class that is being set up. - -**Code Description**: The `setUp` method is a crucial part of the TestGradioInterface class, designed to prepare the testing environment before each test method is executed. It performs the initialization of components that are required for the tests to run. Specifically, the method accomplishes two main tasks: - -1. It creates a `MagicMock` object and assigns it to `self.mock_respond_function`. The `MagicMock` class is a part of the `unittest.mock` module, which allows for the simulation of interface behaviors without relying on their actual implementations. This is particularly useful in testing scenarios where the actual behavior is either irrelevant to the test's purpose or cumbersome to replicate. In this context, `self.mock_respond_function` simulates a response function that would be used by the GradioInterface, allowing the tests to focus on the interface's behavior rather than the specifics of the response generation. - -2. It instantiates a `GradioInterface` object with `self.mock_respond_function` as its argument. This step is crucial as it sets up the GradioInterface, which is the subject under test. The GradioInterface is designed to create a user interface for interacting with a response function through Gradio, facilitating the building of machine learning and data science demos and applications. By passing the mocked response function to the GradioInterface, the `setUp` method ensures that the interface is initialized in a controlled testing environment, where the behavior of the response function is predictable and manipulable. - -The relationship between `setUp` and its callees, particularly the instantiation of `GradioInterface`, is foundational for testing the interface's functionality. It ensures that each test starts with a fresh instance of the interface, configured with a mock response function, thereby isolating the tests from each other and from external dependencies. This isolation is essential for achieving accurate and reliable test results. - -**Note**: It is important to understand that the `setUp` method is automatically called before each test method in the test case class. This ensures that the test environment is correctly initialized for every test, preventing side effects from one test affecting others. Additionally, the use of `MagicMock` for the response function allows testers to specify return values and assert calls to the function, enabling comprehensive testing of the GradioInterface's interaction with response functions. -*** -### FunctionDef test_setup_gradio_interface(self, MockBlocks) -**test_setup_gradio_interface**: The function of test_setup_gradio_interface is to verify the proper setup and initialization of the Gradio interface within a testing environment. - -**parameters**: -- `self`: Represents an instance of the class containing the test method. It allows access to the attributes and methods within the class. -- `MockBlocks`: A mock object passed to the function, simulating the behavior of Gradio's Blocks API for testing purposes. - -**Code Description**: This function is designed to test the `setup_gradio_interface` method of the `GradioInterface` class, ensuring that it correctly initializes the Gradio interface for the RepoAgent chat application. The test begins by invoking the `setup_gradio_interface` method on the `gradio_interface` attribute of the test class instance. This action is intended to simulate the setup process of the Gradio interface as it would occur in a live environment, but within a controlled test scenario. - -Following the setup invocation, the function checks if the `MockBlocks` object was called as expected during the setup process. This step is crucial as it verifies that the Gradio Blocks API, which is central to creating the interactive web interface of the RepoAgent chat application, is utilized correctly. The `MockBlocks.assert_called()` method is a part of the testing framework that confirms whether the mock object was invoked, ensuring that the interface setup involves the necessary calls to Gradio's Blocks API. - -The relationship between this test function and its callee, `setup_gradio_interface`, is direct and functional. The test is specifically designed to validate the behavior of `setup_gradio_interface`, ensuring that it performs its intended task of setting up the Gradio interface correctly. This includes initializing the interface with the appropriate components, layout, and event handlers as detailed in the `setup_gradio_interface` method documentation. The use of a mock object (`MockBlocks`) in this context allows for the isolation of the method's functionality from the external dependencies, providing a focused and reliable test scenario. - -**Note**: This test function is part of a larger suite of tests aimed at ensuring the reliability and correctness of the Gradio interface setup process within the RepoAgent chat application. It is important for developers to run this test after modifications to the `setup_gradio_interface` method or the Gradio interface configuration to ensure that changes do not break the expected behavior. The use of mock objects like `MockBlocks` is a common practice in testing to simulate external dependencies, allowing for more controlled and predictable test outcomes. -*** -### FunctionDef test_respond_function_integration(self) -**test_respond_function_integration**: The function of test_respond_function_integration is to verify the correct integration and invocation of the respond function within the Gradio interface. - -**parameters**: This function does not explicitly take parameters, as it is designed to be used within a test suite, specifically utilizing the self reference to access instance variables and methods. - -**Code Description**: The `test_respond_function_integration` function is a unit test designed to ensure that the `respond` function of a Gradio interface is correctly integrated and can be called with the expected parameters. The test begins by defining a test message, `test_msg`, with the value "Hello", and a system message, `test_system`, with the value "System Message". It then calls the `respond` method of the `gradio_interface`, which is presumably an instance variable of the test class, passing in the test and system messages as arguments. The crucial part of this test is the use of `self.mock_respond_function.assert_called_with(test_msg, test_system)`. This line verifies that the `respond` function, which has been mocked in the test setup, was called with the correct arguments (`test_msg` and `test_system`). This ensures that the integration between the Gradio interface and its respond function works as expected, and that the function is invoked correctly when the interface receives the specified input. - -**Note**: This test function is part of a larger test suite focused on the Gradio interface. It assumes that the `respond` function has been mocked (replaced with a mock object for testing purposes) before this test runs, which is a common practice in unit testing to isolate the functionality being tested. The test also relies on the `self.gradio_interface` being properly initialized and available, which suggests that there might be setup methods not shown here that prepare the test environment. This function is a critical component of ensuring the reliability and correctness of the Gradio interface's interaction with its respond function, highlighting the importance of thorough testing in software development. -*** diff --git a/markdown_docs/tests/test_json_handler.md b/markdown_docs/tests/test_json_handler.md index defe565..1736ca3 100644 --- a/markdown_docs/tests/test_json_handler.md +++ b/markdown_docs/tests/test_json_handler.md @@ -1,93 +1,86 @@ ## ClassDef TestJsonFileProcessor -**TestJsonFileProcessor**: The function of TestJsonFileProcessor is to test the functionality of the JsonFileProcessor class, ensuring its methods for reading, extracting data from, and searching within JSON files work as expected. +**TestJsonFileProcessor**: The function of TestJsonFileProcessor is to test the functionalities of the JsonFileProcessor class, specifically its methods for reading and extracting data from JSON files. -**Attributes**: -- `processor`: An instance of the JsonFileProcessor class initialized with a test JSON file path. +**attributes**: The attributes of this Class. +· processor: An instance of the JsonFileProcessor class initialized with the filename "test.json". -**Code Description**: -The TestJsonFileProcessor class is a collection of unit tests for the JsonFileProcessor class, designed to validate its core functionalities. It extends `unittest.TestCase`, a standard class in Python's unittest framework, providing a structure for writing test cases. The class tests three primary methods of the JsonFileProcessor: reading JSON files, extracting markdown contents, and performing nested searches within JSON data. +**Code Description**: The TestJsonFileProcessor class is a unit test case that inherits from unittest.TestCase. It is designed to validate the behavior of the JsonFileProcessor class, which is responsible for handling JSON file operations. The class contains several test methods that utilize the unittest framework's features, such as setup methods and mocking. -1. **Initialization and Setup**: The `setUp` method initializes a JsonFileProcessor instance with a predefined JSON file path ("test.json"). This setup runs before each test method, ensuring a fresh environment for every test case. +The setUp method initializes an instance of JsonFileProcessor with a test JSON file named "test.json". This setup is executed before each test method runs, ensuring that each test has a fresh instance of the processor. -2. **Testing JSON File Reading**: The `test_read_json_file` method uses the `@patch` decorator from Python's unittest.mock module to mock the built-in `open` function, simulating the reading of a JSON file. It verifies that the `read_json_file` method correctly reads and parses the mocked JSON data, asserting the equality of the returned data with the expected result. It also checks that the file was opened with the correct arguments. +The test_read_json_file method tests the read_json_file method of the JsonFileProcessor class. It uses the @patch decorator to mock the built-in open function, simulating the reading of a JSON file containing a specific structure. The test asserts that the data returned by read_json_file matches the expected dictionary structure and verifies that the open function was called with the correct parameters. -3. **Testing Markdown Content Extraction**: The `test_extract_md_contents` method mocks the `read_json_file` method of the JsonFileProcessor to return a predefined JSON structure. It tests the `extract_md_contents` method, ensuring it correctly extracts and includes "content1" from the mocked JSON data. +The test_extract_md_contents method tests the extract_md_contents method of the JsonFileProcessor class. It mocks the read_json_file method to return a predefined JSON structure. The test checks that the extracted markdown content includes the expected value "content1". -4. **Testing Nested JSON Search**: The `test_search_in_json_nested` method again mocks the built-in `open` function to simulate reading a different JSON structure. It tests the `search_in_json_nested` method's ability to find and return a nested JSON object by name. The method asserts the correctness of the search result and verifies that the file was opened with the correct parameters. +The test_search_in_json_nested method tests the search_in_json_nested method of the JsonFileProcessor class. Similar to the previous tests, it mocks the open function to provide a different JSON structure. The test asserts that the result of the search matches the expected dictionary for the specified file name and verifies the correct invocation of the open function. -**Relationship with Callees**: -The TestJsonFileProcessor class directly interacts with the JsonFileProcessor class by creating an instance of it and calling its methods. The tests simulate the environment and inputs JsonFileProcessor would encounter during its operation in the project, such as reading JSON files and extracting data. These tests ensure the reliability and correctness of the JsonFileProcessor's functionality before it is used in other parts of the project, such as the TextAnalysisTool and RepoAssistant, for processing JSON data. +**Note**: It is important to ensure that the JsonFileProcessor class is implemented correctly for these tests to pass. The tests rely on the structure of the JSON data being consistent with the expectations set in the test cases. -**Note**: -- It's essential to ensure that the JsonFileProcessor class and its methods are correctly implemented and that any changes to its functionality are reflected in the TestJsonFileProcessor tests to maintain the integrity of the testing suite. -- The use of the `@patch` decorator and `mock_open` function in the tests allows for the simulation of file operations without the need for actual files, enabling isolated testing of file processing logic. +**Output Example**: +For the test_read_json_file method, the expected output when read_json_file is called would be: +{"files": [{"objects": [{"md_content": "content1"}]}]} -**Output Example**: -An example output of the `test_read_json_file` method could be a successful assertion indicating that the method correctly reads and parses the JSON data as expected. For instance, after running the test suite, one might see a console output indicating that all tests passed, such as: -``` ----------------------------------------------------------------------- -Ran 3 tests in 0.002s +For the test_extract_md_contents method, the expected output for md_contents would include: +["content1"] -OK -``` +For the test_search_in_json_nested method, the expected output when searching for "file1" would be: +{"name": "file1"} ### FunctionDef setUp(self) -**setUp**: The function of setUp is to initialize the test environment for each test method within the test case. +**setUp**: setUp的功能是初始化测试环境。 -**parameters**: This function does not take any external parameters. +**parameters**: 该函数没有参数。 -**Code Description**: The `setUp` method is a special method in the unittest framework. It is called before the execution of each test method in the test case class. In this specific implementation, the `setUp` method is used to create an instance of the `JsonFileProcessor` class, which is assigned to the `processor` attribute of the test case instance. The `JsonFileProcessor` is initialized with a file path, `"test.json"`, indicating that this processor is intended to operate on a JSON file named `test.json` located in the same directory or a specified path. The `JsonFileProcessor` class, as detailed in its documentation, is designed to handle operations related to JSON files, such as reading, extracting data, and searching within the JSON structure. By initializing this processor in the `setUp` method, each test method in the `TestJsonFileProcessor` class can use this pre-configured `JsonFileProcessor` instance to perform operations on the `test.json` file, facilitating the testing of the processor's functionality in a controlled environment. +**Code Description**: setUp函数是一个测试准备函数,通常在单元测试框架中使用。在这个函数中,创建了一个名为processor的实例,类型为JsonFileProcessor,并传入了一个字符串参数"test.json"。这个实例化的过程意味着在每个测试用例执行之前,都会创建一个新的JsonFileProcessor对象,确保每个测试用例都在一个干净的状态下运行。JsonFileProcessor类的具体功能和实现细节并未在此代码片段中提供,但可以推测它与处理JSON文件相关。 -**Relationship with Callees**: The `setUp` method directly interacts with the `JsonFileProcessor` class by creating its instance. This relationship is crucial for testing as it ensures that a fresh instance of `JsonFileProcessor` is available for each test, preventing the side effects of one test from affecting another. The `JsonFileProcessor` class, in turn, is responsible for performing various operations on JSON files, as described in its documentation. The initialization of `JsonFileProcessor` within `setUp` demonstrates the method's role in preparing the necessary objects and conditions for conducting tests effectively. - -**Note**: It is important to ensure that the `test.json` file exists and is accessible at the specified location before running the tests. If the file is missing or the path is incorrect, the `JsonFileProcessor` instance will not be able to perform its intended operations, which could lead to test failures. Additionally, the use of `setUp` for initializing test prerequisites is a common practice in writing unit tests, as it helps in maintaining test isolation and reducing code duplication across test methods. +**Note**: 使用setUp函数时,确保JsonFileProcessor类已正确实现,并且"test.json"文件存在于预期的路径中,以避免在测试执行时出现文件未找到的错误。 *** ### FunctionDef test_read_json_file(self, mock_file) -**test_read_json_file**: The function of test_read_json_file is to verify the correct functionality of the read_json_file method in the JsonFileProcessor class by checking if it accurately reads and returns data from a JSON file. - -**Parameters**: -- `self`: Represents the instance of the class containing the test, allowing access to class attributes and methods. -- `mock_file`: A mock object used to simulate the behavior of the file handling functions without performing actual file operations. +**test_read_json_file**: The function of test_read_json_file is 测试 read_json_file 方法的功能。 -**Code Description**: This function is a unit test designed to ensure that the `read_json_file` method of the `JsonFileProcessor` class functions as expected. It does so by invoking the `read_json_file` method on an instance of `JsonFileProcessor` referred to by `self.processor`. The method is expected to read data from a JSON file and return it. The test then asserts that the returned data matches the expected data structure, `{"files": [{"objects": [{"md_content": "content1"}]}]}`, verifying the method's ability to correctly parse and return the contents of a JSON file. +**parameters**: 此函数的参数。 +· mock_file: 一个模拟文件对象,用于测试文件读取操作。 -The `mock_file` parameter is utilized to mock the behavior of opening a file. This is crucial for unit testing because it allows the test to simulate the reading of a file without relying on an actual file being present on the filesystem. The mock ensures that when the `read_json_file` method attempts to open a file, it behaves as if a file named "test.json" was opened successfully with read mode and UTF-8 encoding. This mocking is validated by asserting that `mock_file` was called with the correct arguments. +**Code Description**: 该函数用于测试 `read_json_file` 方法的正确性。首先,它调用 `self.processor.read_json_file()` 方法以读取 JSON 文件的数据。接着,使用 `self.assertEqual` 方法验证读取的数据是否与预期的字典结构相符,即 `{"files": [{"objects": [{"md_content": "content1"}]}]}`。最后,`mock_file.assert_called_with("test.json", "r", encoding="utf-8")` 用于确认在读取文件时,是否以正确的参数调用了模拟的文件对象,确保文件名为 "test.json",模式为只读("r"),并且使用 UTF-8 编码。 -This test is directly related to the `read_json_file` method in the `JsonFileProcessor` class. The `read_json_file` method is responsible for opening a specified JSON file, reading its contents, and returning the data as a Python dictionary or list. The method's functionality is critical for the application's ability to process JSON files, and this test ensures that the method can correctly read and interpret JSON data as expected. - -**Note**: It is important to ensure that the `mock_file` is correctly set up to simulate the file reading operation accurately. This includes specifying the correct file name, mode, and encoding that the `read_json_file` method expects. Additionally, the expected data structure used in the assertion should accurately reflect the data structure that the `read_json_file` method is intended to return when reading the specific test JSON file. This ensures that the test is a valid and effective verification of the method's functionality. +**Note**: 使用此代码时,请确保已正确设置模拟文件对象,以便能够准确测试文件读取功能。同时,确保 `read_json_file` 方法能够处理预期的文件格式和内容。 *** ### FunctionDef test_extract_md_contents(self, mock_read_json) -**test_extract_md_contents**: The function of test_extract_md_contents is to verify that the `extract_md_contents` method correctly extracts Markdown content from a structured JSON input. - -**Parameters**: This function does not take any parameters directly. It uses a fixture `mock_read_json` to simulate the behavior of an external dependency that reads JSON data. - -**Code Description**: The `test_extract_md_contents` function is designed to test the `extract_md_contents` method within a class, presumably named something like `JsonFileProcessor` or similar, based on the context. The test begins by setting up a mock object `mock_read_json` to return a predefined JSON structure when called. This JSON structure mimics the expected format of the data that `extract_md_contents` would process, containing a list of files, each with a list of objects, and each object containing a `md_content` field with Markdown content. - -The mock setup is as follows: `mock_read_json.return_value = {"files": [{"objects": [{"md_content": "content1"}]}]}`. This means that when the `extract_md_contents` method attempts to read JSON data (presumably by calling a method that has been replaced by `mock_read_json`), it will receive this predefined structure. - -After setting up the mock, the test calls the `extract_md_contents` method on the `processor` instance (which is likely an instance of the class being tested, though the setup for this instance is not shown in the provided code snippet). The method is expected to process the mocked JSON data and extract the Markdown content. - -The test then uses `self.assertIn("content1", md_contents)` to verify that the Markdown content extracted by `extract_md_contents` includes the string "content1". This assertion checks that the method correctly navigates the JSON structure, extracts the Markdown content, and includes it in the returned value. +**test_extract_md_contents**: The function of test_extract_md_contents is 测试 extract_md_contents 方法的功能。 + +**parameters**: 此函数的参数。 +· mock_read_json: 一个模拟的函数,用于替代实际的 JSON 读取操作。 + +**Code Description**: +该函数主要用于测试 `extract_md_contents` 方法的正确性。首先,使用 `mock_read_json` 模拟读取 JSON 文件的操作,返回一个包含文件信息的字典,其中包含一个对象列表,列表中的每个对象都有一个 `md_content` 字段。具体来说,模拟返回的 JSON 数据结构为: +```json +{ + "files": [ + { + "objects": [ + { + "md_content": "content1" + } + ] + } + ] +} +``` +接下来,调用 `self.processor.extract_md_contents()` 方法,该方法的目的是提取所有的 `md_content` 内容。最后,使用 `self.assertIn("content1", md_contents)` 断言来验证提取的内容中是否包含 "content1"。如果包含,则测试通过,表明 `extract_md_contents` 方法能够正确提取出 JSON 数据中的 Markdown 内容。 -**Note**: This test function relies on the `mock_read_json` fixture to simulate reading JSON data. It is important that the JSON structure returned by the mock accurately reflects the structure expected by the `extract_md_contents` method for the test to be valid. Additionally, the test assumes that the `extract_md_contents` method returns a collection (such as a list) of Markdown contents extracted from the JSON data. +**Note**: 使用此代码时,请确保 `extract_md_contents` 方法能够处理模拟的 JSON 数据结构,并且在测试环境中正确配置了 `mock_read_json`。 -**Output Example**: While the function itself does not return a value (as it is a test function), the expected behavior of the `extract_md_contents` method being tested would be to return a list containing the Markdown strings extracted from the JSON data, such as `["content1"]` in this test scenario. +**Output Example**: 该函数的返回值可能类似于以下结构: +```python +["content1"] +``` *** ### FunctionDef test_search_in_json_nested(self, mock_file) -**test_search_in_json_nested**: The function of test_search_in_json_nested is to verify the functionality of the `search_in_json_nested` method within a JSON file processor class, specifically testing its ability to correctly find and return a nested JSON object by key. - -**Parameters**: -- `self`: Represents the instance of the class in which this test function is defined, allowing access to the class attributes and methods. -- `mock_file`: A mock object provided to simulate the behavior of file operations, ensuring that the test does not depend on external files or the file system. +**test_search_in_json_nested**: The function of test_search_in_json_nested is 测试 search_in_json_nested 方法的功能。 -**Code Description**: -This function is designed to test the `search_in_json_nested` method of a JSON file processor class. It follows a structured approach to ensure the method functions as expected when searching for a nested JSON object by a specific key. The test is conducted as follows: +**parameters**: 该函数的参数。 +· parameter1: mock_file - 一个模拟文件对象,用于测试文件操作。 -1. A call is made to the `search_in_json_nested` method of the processor instance, which is part of the test class. This method is expected to search within a JSON file named "test.json" for a nested object with the key "file1". -2. The result of this method call is then compared against the expected output, which in this case is a dictionary `{"name": "file1"}`. This comparison is done using the `assertEqual` method from the unittest framework, ensuring that the actual output matches the expected output exactly. -3. Finally, the test verifies that the `mock_file` object was called with the correct parameters. This is to ensure that the method attempted to open the file "test.json" in read mode with UTF-8 encoding. This step is crucial for validating that the method interacts with the file system as expected, albeit in a controlled, simulated environment provided by `mock_file`. +**Code Description**: 该函数用于测试 `search_in_json_nested` 方法的功能。首先,它调用 `self.processor.search_in_json_nested` 方法,传入两个参数:文件名 `"test.json"` 和要搜索的关键字 `"file1"`。该方法的预期结果是返回一个字典 `{"name": "file1"}`,表示在 JSON 文件中成功找到与关键字匹配的条目。接着,使用 `self.assertEqual` 方法验证返回结果是否与预期结果相符。如果结果匹配,则测试通过。最后,`mock_file.assert_called_with` 用于验证在测试过程中是否以正确的参数调用了文件打开方法,确保文件 `"test.json"` 以只读模式("r")和 UTF-8 编码打开。 -**Note**: -- This test function is part of a larger test suite designed to ensure the reliability and correctness of the JSON file processor's functionality. It specifically targets the ability of the processor to handle nested JSON structures, which is a common requirement in many applications. -- The use of a mock object for file operations is a best practice in unit testing, as it allows the test to be run in isolation without the need for actual files. This not only makes the test more reliable and faster to execute but also ensures that it does not inadvertently affect the file system. +**Note**: 使用该代码时,请确保 `mock_file` 已正确配置为模拟文件操作,以避免实际文件的读写操作影响测试结果。同时,确保 `search_in_json_nested` 方法的实现能够正确处理嵌套 JSON 数据,以便返回预期的结果。 *** diff --git a/markdown_docs/tests/test_main.md b/markdown_docs/tests/test_main.md deleted file mode 100644 index 80ade77..0000000 --- a/markdown_docs/tests/test_main.md +++ /dev/null @@ -1,36 +0,0 @@ -## ClassDef TestYourScript -Doc is waiting to be generated... -### FunctionDef test_load_config(self) -**test_load_config**: The function of test_load_config is to test the load_config function's ability to correctly parse and load configuration data from a YAML file. - -**Parameters**: This function does not accept any parameters as it is designed to be run as a part of a test suite, specifically using a testing framework that provides a context in which the function is executed. - -**Code Description**: The `test_load_config` function is designed to verify the correct functionality of the `load_config` function, which is responsible for loading and parsing configuration data from a YAML file. The test is conducted by creating a mock data string that simulates the content of a YAML configuration file, including keys and values for `api_key`, `api_base`, and `db_path`. - -The test employs the `patch` function from Python's `unittest.mock` module to temporarily replace the `open` function with a mock object. This mock object is configured to return the mock data string when read, simulating the reading of a YAML file named "dummy_config.yml". This approach allows the test to run without the need for an actual file on the filesystem, ensuring the test's independence and reliability. - -After setting up the mock, the `load_config` function is called with the argument "dummy_config.yml", and the returned configuration dictionary is checked to ensure it contains the correct values for `api_key`, `api_base`, and `db_path` as defined in the mock data. This is achieved using the `assertEqual` method provided by the testing framework, which compares the expected values to the actual values returned by the `load_config` function. - -**Note**: This test function assumes the existence of a `load_config` function that is capable of parsing YAML data into a Python dictionary. It also relies on the `unittest.mock` module for mocking the file reading process, which is a common practice in unit testing to isolate the function being tested from external dependencies. The test is designed to be part of a larger test suite, typically executed using a testing framework that provides the necessary context and utilities for running tests and reporting results. -*** -### FunctionDef test_main(self, mock_load_config, mock_gradio_interface, mock_repo_assistant) -**test_main**: The function of test_main is to test the main function's behavior in initializing components and setting up the application environment. - -**Parameters**: -- `self`: Represents the instance of the class in which the test_main function is defined, allowing access to the class attributes and methods. -- `mock_load_config`: A mock object for the function responsible for loading configuration settings. -- `mock_gradio_interface`: A mock object for the GradioInterface class, used to simulate the initialization of a Gradio interface without executing the actual Gradio code. -- `mock_repo_assistant`: A mock object for the RepoAssistant class, used to simulate the initialization of the repository assistant without executing the actual repository assistant code. - -**Code Description**: The test_main function is designed to validate the correct initialization and setup of the application's main components, specifically focusing on the configuration loading, repository assistant, and Gradio interface initialization. Initially, it sets up mock responses for the configuration loading function to simulate the presence of an API key, API base URL, and database path. It then proceeds to execute the main function, which is expected to use these configuration settings to initialize the application components. - -The function checks if the RepoAssistant was initialized with the correct parameters (API key, API base URL, and database path) by verifying the arguments with which the mock_repo_assistant was called. This step ensures that the main function correctly uses the configuration settings to set up the repository assistant. - -Furthermore, the test_main function verifies that the GradioInterface was initialized with the correct function, specifically the respond method of the RepoAssistant instance. This check ensures that the main function correctly sets up the Gradio interface to use the repository assistant's respond method for handling user queries. - -The relationship with its callees in the project from a functional perspective is as follows: The main function, which is the target of this test, is responsible for initializing the application by setting up the repository assistant with the provided configuration settings and launching a Gradio interface for user interaction. The test_main function validates this behavior by simulating the configuration loading and checking the initialization of the RepoAssistant and GradioInterface with the expected parameters and methods. - -**Note**: This test function relies on the use of mock objects to simulate the behavior of external dependencies (configuration loading, RepoAssistant, and GradioInterface) without requiring actual external resources or services. This approach allows for isolated testing of the main function's behavior in a controlled environment. - -**Output Example**: Since this function is a test case designed to verify the behavior of the main function rather than produce a return value, there is no direct output example. However, the expected outcome is that all assertions pass, indicating that the main function correctly initializes the application components with the provided configuration settings. -*** diff --git a/markdown_docs/tests/test_prompt.md b/markdown_docs/tests/test_prompt.md deleted file mode 100644 index 0af7ce4..0000000 --- a/markdown_docs/tests/test_prompt.md +++ /dev/null @@ -1,131 +0,0 @@ -## ClassDef TestTextAnalysisTool -**TestTextAnalysisTool**: The function of TestTextAnalysisTool is to conduct unit tests on the TextAnalysisTool class, ensuring its methods function as expected. - -**Attributes**: -- `mock_llm`: A mocked instance of a language model used for generating text-based responses. -- `mock_json_processor`: A mocked instance of JsonFileProcessor for processing JSON files. -- `openai_patch`: A patch object for the OpenAI class to replace its behavior with `mock_llm`. -- `json_processor_patch`: A patch object for the JsonFileProcessor class to replace its behavior with `mock_json_processor`. -- `text_analysis_tool`: An instance of TextAnalysisTool initialized with mocked dependencies for testing. - -**Code Description**: -The TestTextAnalysisTool class is designed to validate the functionality of the TextAnalysisTool class, which is crucial for text analysis and processing within a software development context. This testing class uses the unittest framework to structure its tests, employing setUp and tearDown methods to prepare and clean up the testing environment, respectively. Mock objects are utilized to simulate the behavior of external dependencies, such as a language model and a JSON file processor, allowing for isolated testing of the TextAnalysisTool's logic. - -The class tests various functionalities of the TextAnalysisTool: -- `test_keyword` verifies that the keyword extraction method correctly identifies keywords from a given query. -- `test_tree` checks if the tree structure generation method accurately creates a hierarchical representation from text. -- `test_format_chat_prompt` ensures that chat prompts are formatted correctly with a given message and instruction. -- `test_queryblock` tests the ability to query code blocks from a JSON database based on a given message. -- `test_nerquery` assesses the named entity recognition query method for extracting relevant class or function names from a message. - -These tests are critical for ensuring that the TextAnalysisTool operates as intended, especially when integrated into larger systems or workflows. By mocking external dependencies, the tests focus solely on the logic within the TextAnalysisTool, ensuring its reliability and effectiveness in processing and analyzing text. - -**Note**: -- It is essential to ensure that all external dependencies are correctly mocked and that the patches are started before and stopped after each test to maintain test isolation and prevent side effects. -- The effectiveness of these tests relies on the accuracy and relevance of the mocked return values to simulate real-world scenarios as closely as possible. - -**Output Example**: -For a successful execution of `test_keyword`, assuming the mocked language model returns `"keyword1, keyword2, keyword3"` for a given query, the test would verify that `"keyword1"` is indeed part of the keywords extracted by the TextAnalysisTool, demonstrating the method's ability to extract relevant keywords from text. -### FunctionDef setUp(self) -**setUp**: The function of setUp is to prepare the necessary environment and mock objects before each test case in the TestTextAnalysisTool test suite. - -**parameters**: This function does not take any parameters as it is designed to set up the environment for test cases in a class. - -**Code Description**: The setUp function plays a crucial role in the unit testing of the TextAnalysisTool class by preparing a controlled test environment. It begins by creating mock objects for both the OpenAI language model and the JsonFileProcessor, which are essential components of the TextAnalysisTool. These mock objects are created using the MagicMock class, allowing the test cases to simulate interactions with these external dependencies without making actual calls to them. - -Following the creation of mock objects, the setUp function proceeds to patch the OpenAI and JsonFileProcessor classes within the 'your_module' namespace. This is achieved using the patch function from the unittest.mock module, which temporarily replaces these classes with the mock objects created earlier. By specifying the return_value parameter, any instantiation of these classes within the scope of the tests will return the predefined mock objects, ensuring that the tests are not dependent on the external behavior of these components. - -After starting the patches, the function initializes an instance of the TextAnalysisTool, passing the mock language model and a dummy database path ("db_path") as arguments. This instance is then available for use in the test cases, allowing them to interact with a version of the TextAnalysisTool that is isolated from its external dependencies. This isolation is key to achieving reliable and deterministic test outcomes. - -The relationship between the setUp function and its callees, specifically the TextAnalysisTool and the mocked dependencies, is foundational for testing the behavior of the TextAnalysisTool in a controlled environment. By mocking the dependencies, the tests can focus on the logic within the TextAnalysisTool without the unpredictability of external services. This approach ensures that the tests are both fast and reliable, as they do not require network calls or file system access. - -**Note**: It is important to stop the patches after the tests have completed to clean up the test environment and avoid side effects on other tests. This is typically done in a tearDown method, which was not shown in the provided code snippet. - -**Output Example**: Since setUp is a setup function for unit tests rather than a function that returns a value, there is no direct output example. However, after executing setUp, one would expect that the TextAnalysisTool instance is correctly initialized with mock objects, ready for testing. -*** -### FunctionDef tearDown(self) -**tearDown**: The function of tearDown is to stop the patches initiated during the test setup. - -**Parameters**: This function does not take any external parameters. - -**Code Description**: The `tearDown` method is a crucial part of the testing lifecycle in the `TestTextAnalysisTool` class, which is located within the `tests/test_prompt.py` file. This method is called after each test method in the class has been executed. Its primary role is to clean up or reset the environment to a stable state so that subsequent tests can run in a clean environment without being affected by the outcomes of previous tests. - -In the provided code snippet, the `tearDown` method performs the specific task of stopping two patches - `self.openai_patch` and `self.json_processor_patch`. These patches are presumably started in the setup phase of the test (often in a method named `setUp`) to mock or replace certain functionalities or external dependencies, such as interactions with the OpenAI API or JSON processing functionalities, for the duration of the test. By stopping these patches, the `tearDown` method ensures that the mocked or replaced functionalities are reverted back to their original state, thus preventing any side effects on other tests. - -**Note**: It is important to ensure that all patches or mock objects initiated during the test setup are properly stopped or cleaned up in the `tearDown` method. Failing to do so can lead to tests that are not isolated, potentially causing unpredictable test outcomes or interference between tests. This practice enhances the reliability and maintainability of the test suite. -*** -### FunctionDef test_keyword(self) -**test_keyword**: The function of test_keyword is to verify that the keyword extraction functionality correctly identifies and returns expected keywords from a given query. - -**Parameters**: This function does not accept any parameters directly as it is designed to be called without arguments in the context of a test suite. - -**Code Description**: The `test_keyword` function is a unit test designed to assess the accuracy and functionality of the `keyword` method within the `TextAnalysisTool` class. It begins by setting up a mock return value for the `complete` method of the `mock_llm` object, which simulates the behavior of a language model. This mock return value is a string containing three keywords, "keyword1, keyword2, keyword3", which serves as a predefined response to simulate the output of the language model when given a specific query. - -Following the setup, the `test_keyword` function invokes the `keyword` method of the `text_analysis_tool` instance, passing "test query" as the argument. This method is expected to construct a prompt based on the input query and obtain a response from the language model, which, in this test scenario, is mocked to return the predefined string of keywords. - -The core of the test lies in verifying that the keyword "keyword1" is included in the output of the `keyword` method. This is achieved through the use of the `assertIn` method, which checks if the first keyword from the mock response is part of the list of keywords returned by the `keyword` method. This assertion ensures that the `keyword` method correctly processes the language model's output and extracts the expected keywords from it. - -The relationship between the `test_keyword` function and its callee, the `keyword` method, is crucial for validating the text analysis capabilities of the system. By simulating the language model's response and verifying the extraction of keywords, this test plays a vital role in ensuring that the system can accurately identify and utilize keywords from user queries for further processing and analysis. - -**Note**: The effectiveness of this test is contingent upon the accurate simulation of the language model's response and the correct implementation of the `keyword` method. It is essential to update the test if the underlying logic of the `keyword` method changes, to ensure continued accuracy in testing. - -**Output Example**: While the `test_keyword` function does not return a value, a successful execution of this test would imply that the assertion passed, confirming that "keyword1" is correctly extracted from the simulated language model response "keyword1, keyword2, keyword3". -*** -### FunctionDef test_tree(self) -**test_tree**: The function of `test_tree` is to verify the correct functionality of the `tree` method within the `TextAnalysisTool` class, ensuring it properly interacts with a language model to generate a tree structure based on a given text query. - -**Parameters**: This function does not accept any parameters as it is designed to be executed within a test framework that automatically handles its invocation. - -**Code Description**: The `test_tree` function plays a crucial role in the testing phase of the `TextAnalysisTool` class, specifically focusing on the `tree` method. It begins by mocking the response of the language model to return a predefined string, "tree structure", when it receives a specific query, "test query". This setup is crucial for isolating the test from external dependencies, allowing for a controlled testing environment where the behavior of the language model is predictable and consistent. - -Following the mock setup, the `test_tree` function invokes the `tree` method of the `TextAnalysisTool` instance with a test query. The `tree` method, as described in its documentation, is responsible for generating a prompt based on the input query, which is then sent to the language model. The language model, mocked in this context, is expected to return a response that represents a tree structure of the analyzed text. - -The core of the `test_tree` function is the assertion that follows the method invocation. It asserts that the response from the `tree` method matches the mocked response ("tree structure"). This assertion is critical as it verifies that the `tree` method correctly communicates with the language model and accurately returns the model's response. A successful assertion indicates that the `tree` method functions as intended, capable of generating a tree structure based on the hierarchy of the input text. - -**Note**: It is important to understand that the effectiveness of this test is contingent upon the accuracy of the mock setup. The test assumes that the mocked response accurately represents a possible output of the language model. However, the actual functionality and performance of the `tree` method in a real-world scenario would depend on the implementation and capabilities of the language model it interacts with. - -**Output Example**: Since this function is designed for testing purposes, it does not return a value. However, the expected outcome of the `tree` method's invocation within this test is a string "tree structure", which represents the mocked response of the language model to the test query. This outcome is used to validate the method's functionality through assertion. -*** -### FunctionDef test_format_chat_prompt(self) -**test_format_chat_prompt**: The function of `test_format_chat_prompt` is to verify the correct formatting of chat prompts by the `TextAnalysisTool`. - -**Parameters**: This function does not accept any parameters directly as inputs since it is a test method within a test class. It operates on the instance of the test class it belongs to. - -**Code Description**: The `test_format_chat_prompt` function is a unit test designed to ensure the `format_chat_prompt` method of the `TextAnalysisTool` class correctly formats a chat prompt. The test does this by calling `format_chat_prompt` with predefined `message` and `instruction` arguments and then checks if the formatted prompt includes the expected "User: message" format. This is achieved using the `assertIn` method, which is part of the unittest framework, to verify that the substring "User: message" is indeed part of the formatted prompt returned by `format_chat_prompt`. The success of this test indicates that the `format_chat_prompt` method accurately incorporates the user's message into the formatted prompt, adhering to the expected dialogue structure. This is crucial for ensuring that the chat prompt formatting functionality of the `TextAnalysisTool` works as intended, facilitating its role in preparing inputs for natural language processing or chatbot applications. - -**Note**: It is essential for developers to maintain the integrity of this test when modifying the `format_chat_prompt` method or related components of the `TextAnalysisTool`. Changes that affect the formatting of the chat prompt should be accompanied by corresponding updates to this test to ensure continued accuracy and reliability of the testing process. This test plays a critical role in the automated testing framework, helping to catch regressions or unintended changes in the chat prompt formatting logic. -*** -### FunctionDef test_queryblock(self, mock_jsonsearch) -**test_queryblock**: The function of `test_queryblock` is to test the `queryblock` method of the `TextAnalysisTool` class, ensuring it correctly searches for and retrieves code content based on a specified message. - -**Parameters**: -- `self`: Represents an instance of the class containing the `test_queryblock` method. -- `mock_jsonsearch`: A mock object used to simulate the behavior of the `JsonFileProcessor` class, which is responsible for searching within a JSON file. - -**Code Description**: The `test_queryblock` function is designed to validate the functionality of the `queryblock` method within the `TextAnalysisTool` class. It begins by setting up a mock return value for the `search_in_json_nested` method of the `mock_jsonsearch` object. This mock setup is crucial as it simulates the scenario where the `queryblock` method successfully finds a match within the JSON file, returning a dictionary with a key `code_content` and a value `'test_code'`. - -Following the mock setup, the `test_queryblock` function invokes the `queryblock` method on an instance of the `TextAnalysisTool` class, passing a test message `"test message"` as an argument. The `queryblock` method, as described in its documentation, searches for this message within a JSON file and is expected to return the search results. - -The core of the `test_queryblock` function lies in its assertion statement. It asserts that the result of the `queryblock` method call should equal `'test_code'`, the mock value set up earlier. This assertion ensures that the `queryblock` method is correctly interfacing with the `JsonFileProcessor` (mocked as `mock_jsonsearch` in this test) and that it can retrieve the expected code content based on the input message. - -**Note**: This test function is crucial for ensuring the reliability and correctness of the `queryblock` method in the `TextAnalysisTool` class. It leverages mocking to simulate dependencies, allowing for isolated testing of the method's functionality. It is important for developers to understand that the success of this test relies on the accurate setup of the mock object and the expected behavior of the `queryblock` method. - -**Output Example**: Given the setup in this test function, the output of the `queryblock` method call would be `'test_code'`, which is a string representing the code content found in the JSON file based on the specified message. This output is what the test function expects to assert successfully. -*** -### FunctionDef test_nerquery(self) -**test_nerquery**: The function of test_nerquery is to verify the correct behavior of the nerquery method within the TextAnalysisTool class. - -**Parameters**: This function does not accept any parameters as it is designed to be executed within a test framework that automatically handles its invocation. - -**Code Description**: The test_nerquery function is a unit test designed to ensure that the nerquery method of the TextAnalysisTool class functions as expected. It begins by setting a return value "function_name" for the mock object self.mock_llm.complete, which simulates the behavior of a language model's response. This setup is crucial as it isolates the test from external dependencies, allowing for a controlled test environment. - -The function then proceeds to call the nerquery method on an instance of the TextAnalysisTool class, passing "test message" as an argument. This method is expected to construct a query based on the input message and use a language model to extract a relevant class or function name from the text. In this test scenario, the mocked language model is configured to return "function_name" as the extracted name. - -Following the method call, the test asserts that the return value of the nerquery method matches the expected mock return value "function_name". This assertion verifies that the nerquery method correctly processes the input message and retrieves the expected output from the language model. - -Additionally, the test checks that the debug method of the logger obtained from self.mock_logger_manager.get_logger() is called. This ensures that the nerquery method appropriately logs its operations, which is vital for debugging and monitoring the system's behavior. - -**Note**: This test function plays a critical role in the development process by ensuring that changes to the nerquery method or its dependencies do not inadvertently break its expected behavior. It is part of a larger suite of tests aimed at maintaining the reliability and stability of the TextAnalysisTool class. By mocking external dependencies, the test achieves isolation, making it robust against changes outside the scope of the nerquery method. - -**Output Example**: Since this function is a test, it does not return a value in the conventional sense. Instead, its execution will result in either a successful pass, indicating that the nerquery method behaves as expected, or a failure, highlighting a discrepancy between the expected and actual behavior of the method. -*** diff --git a/markdown_docs/tests/test_rag.md b/markdown_docs/tests/test_rag.md deleted file mode 100644 index 3b99041..0000000 --- a/markdown_docs/tests/test_rag.md +++ /dev/null @@ -1,87 +0,0 @@ -## ClassDef TestRepoAssistant -Doc is waiting to be generated... -### FunctionDef setUp(self) -Doc is waiting to be generated... -*** -### FunctionDef tearDown(self) -**Function Name**: tearDown - -**Function Purpose**: The function of `tearDown` is to stop the patches initiated during the test setup. - -**Parameters**: This function does not take any external parameters. - -**Code Description**: The `tearDown` method is a crucial part of the testing lifecycle in the `TestRepoAssistant` class, which is located within the `tests/test_rag.py` file. This method is automatically called after each test method in the class has been executed. Its primary role is to clean up or reset the environment to a known state before the next test runs. In the context of this specific implementation, the `tearDown` method is responsible for stopping a series of patches that were presumably started during the test setup phase. These patches include: - -- `self.openai_patch`: Likely patches interactions with the OpenAI API, ensuring that tests do not make actual calls to the OpenAI service. -- `self.text_analysis_tool_patch`: Presumably patches a text analysis tool, which could be used for processing or analyzing text within the tests without invoking the real tool. -- `self.json_file_processor_patch`: Suggests a patch for JSON file processing, possibly to mock the behavior of reading from or writing to JSON files during tests. -- `self.chroma_manager_patch`: Indicates a patch for managing chroma-related functionalities, which might involve color management or similar features within the application being tested. - -By stopping these patches, the `tearDown` method ensures that any mocked or patched behavior does not persist beyond the scope of the individual test, thereby preventing unintended side effects on subsequent tests. - -**Note**: It is important for developers to ensure that all patches started during the test setup or within the test methods themselves are properly stopped in the `tearDown` method. This practice helps maintain test isolation and ensures that each test runs in a clean environment. Failure to stop patches can lead to tests that are flaky, difficult to understand, or misleading in their results. -*** -### FunctionDef test_generate_queries(self) -**test_generate_queries**: The function of test_generate_queries is to verify the correct functionality of the generate_queries method within the RepoAssistant class, ensuring it generates the expected number of queries based on a given input. - -**Parameters**: This function does not accept any parameters as it is a test method designed to be executed by a test runner. - -**Code Description**: The test_generate_queries function is a critical component of the test suite for the RepoAssistant class, specifically focusing on the generate_queries method. The function begins by setting a mock return value for the `complete` method of an OpenAI model to simulate the generation of three distinct queries ("Query1\nQuery2\nQuery3"). This setup is crucial for isolating the test environment from external dependencies, ensuring that the test's outcome is solely determined by the code under test and not by the current behavior or availability of the OpenAI API. - -Following the mock setup, the test invokes the generate_queries method of the RepoAssistant instance with a test query string and a specified number of queries to generate (in this case, 3). The generate_queries method, as described in its documentation, is designed to generate a specified number of search queries based on a single input query. It utilizes an underlying language model to produce these queries, which are then returned as a list. - -The core assertion of this test is to verify that the length of the returned list of queries matches the expected number of queries (3 in this test scenario). This assertion ensures that the generate_queries method correctly interprets the input parameters and that the integration with the mocked language model functions as intended. - -From a functional perspective within the project, this test plays a vital role in ensuring the reliability and correctness of the RepoAssistant's ability to generate search queries. These queries are fundamental in retrieving relevant documents and information, thereby supporting the assistant's response generation process. By validating the generate_queries method, the test contributes to the overall quality assurance of the system's search and response capabilities. - -**Note**: It is important to note that this test relies on the mocking of external dependencies (the OpenAI model in this case) to simulate the generation of queries. This approach allows for a controlled test environment but also necessitates that the mock setup accurately reflects the expected behavior of the external dependency. - -**Output Example**: While the test itself does not return a value, the expected outcome of the generate_queries method being tested is a list of strings representing the generated queries, such as `["Query1", "Query2", "Query3"]`. This outcome is simulated in the test setup and verified through the assertion. -*** -### FunctionDef test_rag(self) -**test_rag**: The function of test_rag is to verify the correct behavior of the `rag` method within the RepoAssistant class, ensuring it generates the expected response based on a given query and a set of retrieved documents. - -**Parameters**: This function does not accept any parameters as it is a test method within a test class. - -**Code Description**: The `test_rag` function is a critical component of the testing suite for the RepoAssistant class, specifically designed to assess the functionality of the `rag` method. The method begins by setting up a mock response from an external language model API, which is expected to be called by the `rag` method. This is achieved by using the `mock_openai.complete.return_value` to simulate a language model's response, in this case, a simple text response "Response". - -Following the setup, the `test_rag` function invokes the `rag` method of the RepoAssistant instance with a test query and a list of mock documents. The purpose here is to simulate a real-world scenario where the RepoAssistant needs to generate a response based on a user's query and relevant documents retrieved from a repository. - -The core of the test lies in validating that the `rag` method behaves as expected. This is done through the `self.assertEqual` assertion, which compares the actual response from the `rag` method against the expected mock response set up earlier ("Response"). A successful test indicates that the `rag` method correctly interacts with the mocked language model API and returns the expected response. - -From a functional perspective, this test ensures that the `rag` method can effectively format the input query and documents, pass them to a language model, and return the model's generated response. It validates the integration point between the RepoAssistant's logic and the external language model, crucial for the assistant's ability to generate contextually relevant responses based on the repository's documents. - -**Note**: It is important to recognize that this test relies on mocking external dependencies, specifically the language model API. This approach isolates the test from external factors, focusing solely on the `rag` method's logic. However, it also means that changes in the actual behavior of the language model API or its interface could require adjustments to the test setup. - -**Output Example**: Given the mock setup in this test, the expected output when calling the `rag` method with any query and set of documents would be the string "Response". This output is a placeholder and does not reflect the complexity or content of a real response from a language model, which would typically generate a response based on the content of the provided documents and the query. -*** -### FunctionDef test_extract_and_format_documents(self) -**test_extract_and_format_documents**: The function of test_extract_and_format_documents is to verify the correct extraction and formatting of documents from a given result set. - -**Parameters**: This function does not take any parameters apart from the implicit `self` parameter, which represents an instance of the class containing this test method. - -**Code Description**: The `test_extract_and_format_documents` function is designed to test the `extract_and_format_documents` method of an assistant object, ensuring it correctly processes and formats document data. The test begins by defining a `test_results` variable, which simulates the output that might be returned from a hypothetical document retrieval process. This output is structured as a list of dictionaries, with each dictionary containing a key `documents` that maps to a list of document strings. - -The test proceeds by calling the `extract_and_format_documents` method on the `assistant` object, passing `test_results` as the argument. This method is expected to process the input and return a formatted version of the documents contained within. The exact nature of the formatting is not specified in the test code itself but is implied to involve extracting the document strings from their nested structure and possibly performing additional processing. - -After the call to `extract_and_format_documents`, the test checks that the returned value (`formatted_docs`) contains the expected document strings (`"doc1"` and `"doc2"`). This is done using the `assertIn` method from the testing framework, which verifies that each specified document string is indeed present in the `formatted_docs` collection. If either of the document strings is not found, the test will fail, indicating a problem with the `extract_and_format_documents` method's implementation. - -**Note**: This test function assumes that the `extract_and_format_documents` method is already implemented in the `assistant` object. It is designed to validate the correctness of that method's behavior rather than to test its performance or how it handles edge cases. Developers should ensure that additional tests are written to cover a wider range of scenarios, including invalid inputs and empty result sets, to fully validate the method's robustness and error handling capabilities. -*** -### FunctionDef test_respond(self) -**test_respond**: The function of test_respond is to verify the behavior of the respond method within the TestRepoAssistant class. - -**Parameters**: This function does not accept any parameters as it is a test method designed to run within a test suite. - -**Code Description**: The test_respond function is crafted to assess the functionality of the `respond` method in the `RepoAssistant` class, ensuring it operates as expected. The test simulates a scenario where a user sends a message along with an instruction, and the system is expected to generate a response. To achieve this, the test sets up mock objects and their return values to mimic the behavior of external dependencies involved in the respond method's execution. These dependencies include a text analysis tool, a chroma manager, and an OpenAI completion service. - -The test begins by configuring the mock objects to return predefined values when their respective methods are called. This setup includes formatting a chat prompt, extracting keywords, querying a document collection, and generating a text response. Following the setup, the test invokes the `respond` method with a test message and instruction, capturing the bot's response. - -The core of the test lies in verifying that the bot's response contains the expected text, which is mocked to be "Response" in this case. This verification is crucial as it confirms the method's ability to integrate various components (text analysis, document querying, and response generation) to produce a coherent and relevant reply to the user's query. - -The relationship between the test_respond function and its callees (mocked methods and the `respond` method) is foundational to understanding how the `RepoAssistant` processes user queries. The mocked methods represent external dependencies that the `respond` method relies on to perform text analysis, document retrieval, and response generation. By mocking these dependencies, the test isolates the `respond` method's logic, ensuring that the test's outcome is solely dependent on the method's internal logic and not on external factors. - -**Note**: It is important to understand that this test function uses mock objects to simulate the behavior of external dependencies. This approach allows for the isolation of the `respond` method's functionality, making the test more reliable and focused. However, it also means that the test's effectiveness is contingent on the accuracy of the mock objects' configurations. Therefore, changes in the external dependencies' behavior may necessitate adjustments to the test setup. - -**Output Example**: While the test function itself does not return a value, it is designed to assert that the bot's response contains the expected text. An example outcome of this test could be a successful assertion indicating that the bot's message indeed includes the text "Response", thereby passing the test. -*** diff --git a/markdown_docs/tests/test_structure_tree.md b/markdown_docs/tests/test_structure_tree.md index b940560..eed6808 100644 --- a/markdown_docs/tests/test_structure_tree.md +++ b/markdown_docs/tests/test_structure_tree.md @@ -1,96 +1,80 @@ ## FunctionDef build_path_tree(who_reference_me, reference_who, doc_item_path) -**build_path_tree**: The function of `build_path_tree` is to generate a hierarchical representation of paths and document item paths, marking the specified document item path with a special symbol. +**build_path_tree**: The function of build_path_tree is to create a hierarchical representation of file paths based on provided references and a specific document item path. -**Parameters**: -- `who_reference_me`: A list of paths that reference the document item. -- `reference_who`: A list of paths that the document item references. -- `doc_item_path`: The specific path of the document item to be marked in the tree. +**parameters**: The parameters of this Function. +· parameter1: who_reference_me - A list of file paths that reference the current entity. +· parameter2: reference_who - A list of file paths that reference another entity. +· parameter3: doc_item_path - A specific file path that needs to be highlighted in the output. -**Code Description**: -The `build_path_tree` function creates a hierarchical tree structure to represent the relationships between different paths, including those that reference a document item and those that are referenced by it. It uses a nested `defaultdict` to dynamically create the tree as paths are added. The function processes two lists of paths (`who_reference_me` and `reference_who`) to build the initial tree structure. Each path is split into parts using the OS-specific path separator, and these parts are used to navigate and populate the tree. +**Code Description**: The build_path_tree function constructs a nested dictionary structure representing a tree of file paths. It begins by defining an inner function, tree, which initializes a defaultdict that allows for the creation of nested dictionaries automatically. The variable path_tree is then assigned the result of calling this inner function. -After constructing the tree with references, the function processes the `doc_item_path` by splitting it into parts and appending a special symbol (`✳️`) to the last part, which represents the document item itself. This modified path is then added to the tree, ensuring the document item is distinctly marked. +The function processes two lists of paths: who_reference_me and reference_who. For each path in these lists, it splits the path into its components using the operating system's path separator (os.sep). It then traverses the path_tree structure, creating nested dictionaries for each part of the path. -The `tree_to_string` inner function is used to convert the tree into a string representation. It recursively traverses the tree, converting each level to a string with appropriate indentation to reflect the hierarchical structure. The keys (path parts) are sorted alphabetically at each level to ensure a consistent order in the output. +After processing the reference paths, the function handles the doc_item_path. It splits this path into components as well, but modifies the last component by prefixing it with a star symbol (✳️) to indicate it as a special item. The function again traverses the path_tree to include this modified path. -**Note**: -- The function uses the `os.sep` constant for splitting paths, which makes it platform-independent (works on Unix/Linux, Windows, etc.). -- The special symbol `✳️` is used to mark the document item in the tree. Ensure that this symbol does not conflict with any actual path names. -- The tree is represented as a nested `defaultdict`, which might not be immediately intuitive for those unfamiliar with this data structure. It allows for the dynamic addition of nodes without explicitly checking if they exist. +Finally, the function defines another inner function, tree_to_string, which recursively converts the tree structure into a string representation. This function sorts the keys at each level and adds indentation based on the depth of the tree. The resulting string representation of the path_tree is returned as the output of the build_path_tree function. -**Output Example**: -Given the following inputs: -- `who_reference_me`: `["tests/test_structure_tree.py", "tests/test_other.py"]` -- `reference_who`: `["src/module.py"]` -- `doc_item_path`: `"tests/test_structure_tree.py/build_path_tree"` +**Note**: It is important to ensure that the input paths are formatted correctly and that the os module is imported for the path separator to function properly. The output string will visually represent the hierarchy of paths, with the doc_item_path clearly marked. -The possible output could look like this: -``` -src - module.py -tests - test_other.py - test_structure_tree.py - build_path_tree - ✳️build_path_tree -``` -This output represents a tree where the `build_path_tree` document item is marked within its path, showing both its references and what references it in a structured and readable format. -### FunctionDef tree -**Function**: tree - -**Function of tree**: The function creates a recursive defaultdict structure. - -**Parameters**: This function does not take any parameters. +**Output Example**: +Assuming the following inputs: +who_reference_me = ["folder1/fileA.txt", "folder1/folder2/fileB.txt"] +reference_who = ["folder3/fileC.txt"] +doc_item_path = "folder1/folder2/fileB.txt" -**Code Description**: The `tree` function is designed to generate a recursive data structure using Python's `defaultdict` from the `collections` module. The unique aspect of this function is its self-referencing nature, which allows it to create an infinitely nested dictionary. When the `tree` function is called, it returns a `defaultdict` object where the default factory is the `tree` function itself. This means that any attempt to access a non-existent key in the dictionary will result in the creation of another `defaultdict` with the same properties, allowing for dynamic and virtually unlimited nesting. - -**Note**: The use of the `tree` function can be particularly useful in scenarios where one needs to build a dynamically expanding tree-like data structure, such as in the representation of file systems, organizational hierarchies, or any scenario requiring nested mappings. However, users should be cautious of the potential for creating deeply nested structures that could lead to issues like maximum recursion depth errors if not handled properly. - -**Output Example**: -```python -from collections import defaultdict +The output of the function could look like this: +folder1 + fileA.txt + folder2 + ✳️fileB.txt +folder3 + fileC.txt +### FunctionDef tree +**tree**: tree函数的功能是返回一个默认字典,该字典的默认值是一个新的tree函数。 -# Assuming the tree function is defined as per the given code -nested_dict = tree() +**parameters**: 该函数没有参数。 -# Adding items to the nested dictionary -nested_dict['level1']['level2']['level3'] = 'deep value' +**Code Description**: tree函数使用了Python的collections模块中的defaultdict。defaultdict是一个字典子类,它提供了一个默认值,当访问一个不存在的键时,会自动创建一个新的值。在这个实现中,tree函数返回一个defaultdict,其中的默认值是调用tree函数本身。这意味着每当访问一个不存在的键时,defaultdict会自动创建一个新的defaultdict。这种递归的结构可以用于构建树形数据结构,其中每个节点可以有多个子节点,且子节点的数量和名称是动态生成的。 -# Accessing the nested value -print(nested_dict['level1']['level2']['level3']) # Output: deep value +**Note**: 使用此代码时,请注意避免无限递归的情况。由于tree函数返回的defaultdict的默认值是tree函数本身,因此在访问不存在的键时,会不断创建新的defaultdict,直到达到某种条件或限制。 -# Attempting to access a non-existent key creates a new defaultdict at that level -print(nested_dict['level1']['level2']['new_level']) # Output: defaultdict(, {}) +**Output Example**: 调用tree函数后,可能会得到如下结构: ``` - -In the output example, a nested dictionary structure is created and manipulated. The example demonstrates how accessing or setting keys at any depth automatically creates the necessary nested dictionaries without explicitly initializing them. This behavior is facilitated by the recursive nature of the `tree` function. +defaultdict(, { + 'key1': defaultdict(, { + 'subkey1': defaultdict(, {}), + 'subkey2': defaultdict(, {}) + }), + 'key2': defaultdict(, {}) +}) +``` +在这个例子中,'key1'和'key2'是顶层键,而'subkey1'和'subkey2'是'key1'下的子键。 *** ### FunctionDef tree_to_string(tree, indent) -**tree_to_string**: The function of `tree_to_string` is to convert a hierarchical tree structure into a formatted string representation. - -**Parameters**: -- `tree`: A dictionary representing the tree structure where each key is a node, and its value is either another dictionary (representing a subtree) or an end node. -- `indent`: An integer representing the current indentation level for formatting the output string. It defaults to 0, meaning no indentation for the root level. - -**Code Description**: -The `tree_to_string` function iterates over the items of the provided `tree` dictionary. The items are sorted by keys to ensure a consistent output order. For each key-value pair, the function appends to a string `s` the key followed by a newline character. The key is prefixed with a series of spaces that correspond to the current `indent` level, where each level adds four spaces to the indentation, simulating a tree structure visually. - -If the value associated with a key is itself a dictionary, indicating a subtree, the function recursively calls itself with the subtree and an incremented `indent` value. This process adds the subtree's string representation to `s` with the appropriate indentation, effectively traversing the entire tree depth-first and building a hierarchical string representation of it. - -**Note**: -- The function assumes that the input `tree` is a dictionary and will not work correctly if passed other types without modification. -- The output string does not have a trailing newline character at the end. If needed, this should be added after the function call. -- The function uses recursion, which means it could reach the Python recursion limit with very deep trees. - -**Output Example**: -Given a tree structure like `{'a': {'b': {}, 'c': {'d': {}}}}`, the `tree_to_string` function would return the following string representation: - -``` -a - b - c - d -``` - -This output demonstrates the hierarchical nature of the input tree, with each level of depth indented further to visually represent the structure. +**tree_to_string**: tree_to_string 函数的功能是将树形结构转换为字符串格式,便于可视化展示。 + +**parameters**: 此函数的参数如下: +· parameter1: tree - 一个字典类型的树形结构,其中包含键值对,键为节点名称,值为子节点(可以是字典或其他类型)。 +· parameter2: indent - 一个整数,表示当前节点的缩进级别,默认为0。 + +**Code Description**: tree_to_string 函数通过递归的方式遍历给定的树形结构,并将其格式化为字符串。函数首先初始化一个空字符串 s,用于存储最终的结果。接着,函数对树中的每个键值对进行排序,并逐个处理每个键。对于每个键,函数会在字符串中添加相应数量的空格(由 indent 参数控制),然后添加键的名称,并换行。如果该键对应的值是一个字典,函数会递归调用自身,增加缩进级别(indent + 1),以处理子树。最终,函数返回构建好的字符串,展示了树形结构的层次关系。 + +**Note**: 使用此函数时,请确保传入的 tree 参数为字典类型,并且其值可以是字典或其他类型。缩进参数 indent 应为非负整数,以确保输出格式正确。 + +**Output Example**: 假设输入的树形结构为: +{ + "根节点": { + "子节点1": {}, + "子节点2": { + "孙节点1": {} + } + }, + "另一个根节点": {} +} +调用 tree_to_string 函数后,返回的字符串可能如下所示: +根节点 + 子节点1 + 子节点2 + 孙节点1 +另一个根节点 *** diff --git a/markdown_docs/tests/test_vectordb.md b/markdown_docs/tests/test_vectordb.md deleted file mode 100644 index 1d34b7d..0000000 --- a/markdown_docs/tests/test_vectordb.md +++ /dev/null @@ -1,106 +0,0 @@ -## ClassDef TestChromaManager -**TestChromaManager**: The function of TestChromaManager is to provide a suite of unit tests for the ChromaManager class, ensuring its methods function as expected. - -**Attributes**: -- `mock_client`: A mocked instance of the ChromaDB Client used for testing without the need for a real database connection. -- `mock_collection`: A MagicMock object simulating a collection within the ChromaDB Client. -- `chroma_manager`: An instance of the ChromaManager class initialized with dummy API credentials, used for testing its methods. - -**Code Description**: -The TestChromaManager class is designed to test the functionality of the ChromaManager class, which is part of a system for managing chroma collections in a database. It inherits from `unittest.TestCase`, allowing it to use a wide range of assertions and testing capabilities provided by the unittest framework. - -- The `setUp` method is decorated with `@patch('chromadb.Client')`, indicating that it temporarily replaces the `Client` class from the `chromadb` module with a mock object during the test. This method initializes the mock objects and the `chroma_manager` instance with dummy parameters, ensuring that tests run in a controlled environment without external dependencies like a real database connection. - -- The `test_init` method verifies that the `chroma_manager` object is initialized correctly, specifically checking that its `chroma_collection` attribute is not `None`. This ensures that the ChromaManager class can successfully initialize its internal state. - -- The `test_init_chroma_collection` method tests the `init_chroma_collection` method of the `chroma_manager`. It checks that the appropriate methods on the mock client are called to create and retrieve a collection, and also verifies that the `chroma_collection` attribute of the `chroma_manager` is correctly initialized. - -- The `test_create_vector_store` method is decorated with `@patch('chromadb.utils.embedding_functions.OpenAIEmbeddingFunction')`, which mocks the embedding function used to generate vector embeddings for documents. This test verifies that the `create_vector_store` method correctly processes a list of documents, generates embeddings using the mocked embedding function, and calls the `add` method on the mock collection with the correct parameters. - -**Note**: -- The use of mock objects and the `patch` decorator is crucial for isolating the unit tests from external dependencies, allowing for more reliable and faster tests. -- These tests do not interact with a real database or external services, making them suitable for continuous integration environments. -- The tests assume the existence of certain methods (`create_collection`, `get_collection`, `add`) on the mocked objects, which should match the interface of the actual objects they represent. - -**Output Example**: -Since this documentation describes a test class, there is no direct output from running the class itself. However, when the test suite is executed, it might produce an output similar to the following if all tests pass: -``` -.... ----------------------------------------------------------------------- -Ran 3 tests in 0.002s - -OK -``` -This indicates that three tests were run successfully without any failures or errors. -### FunctionDef setUp(self, MockClient) -**setUp**: The function of setUp is to initialize the test environment for the TestChromaManager class by setting up a mock ChromaDB client and creating an instance of ChromaManager with predefined API key and base URL. - -**Parameters**: -- MockClient: A mock object used to simulate the behavior of the ChromaDB client. - -**Code Description**: -The `setUp` function is designed to prepare the testing environment for the `TestChromaManager` class. It begins by creating a mock instance of the ChromaDB client using the `MockClient` parameter. This mock client is essential for isolating the unit tests from external dependencies, ensuring that the tests are deterministic and can run in any environment without requiring access to a live database. - -Once the mock client is set up, the function proceeds to create a mock collection object using `MagicMock()`. This mock collection simulates the behavior of a collection within the ChromaDB, allowing for the testing of collection-related operations without interacting with an actual database. The mock client is then configured to return this mock collection whenever its `create_collection` or `get_collection` methods are called. This setup ensures that any collection operations performed during the tests are routed to the mock collection, further isolating the tests from external dependencies. - -After setting up the mock ChromaDB client and collection, the function initializes an instance of the `ChromaManager` class with a dummy API key and base URL. The `ChromaManager` is a crucial component of the system, responsible for managing interactions with a vector database for storing and retrieving document embeddings. By initializing it within the `setUp` function, the tests can verify the behavior of the `ChromaManager` under controlled conditions, ensuring that it correctly interacts with the mock ChromaDB client and performs its intended functions. - -The relationship between the `setUp` function and its callees, particularly the `ChromaManager`, is foundational for testing the integration and functionality of the `ChromaManager` within the system. By using mock objects and a controlled environment, the `setUp` function allows for thorough testing of the `ChromaManager`'s behavior, ensuring that it correctly manages the vector database interactions as intended. - -**Note**: -- It is important to ensure that the mock objects used in the `setUp` function accurately simulate the behavior of their real counterparts to ensure the validity of the tests. -- The dummy API key and base URL used to initialize the `ChromaManager` are placeholders and should be replaced with actual values when testing against a live system. - -**Output Example**: -Since `setUp` is a setup function for unit tests and does not return a value, there is no direct output example. However, after executing `setUp`, the test environment will have a mock ChromaDB client and a `ChromaManager` instance ready for testing. -*** -### FunctionDef test_init(self) -**test_init**: The function of test_init is to verify that the ChromaManager object is initialized correctly. - -**Parameters**: This function does not take any external parameters, as it is designed to operate on the instance variables of its class context. - -**Code Description**: The `test_init` function is a unit test designed to ensure that the `chroma_manager` object, presumably a part of the test class, is initialized properly. It specifically checks that the `chroma_collection` attribute of the `chroma_manager` object is not `None` after initialization. This is achieved through the use of the `assertIsNotNone` method from a testing framework, which is called on `self.chroma_manager.chroma_collection`. The method `assertIsNotNone` is used to make sure that the `chroma_collection` attribute exists and has been set to a value other than `None`, indicating that the initialization process of the `chroma_manager` object has successfully completed and it is ready for further operations. This kind of test is crucial in test-driven development (TDD) environments or any scenario where ensuring the correct initialization of objects is necessary for the stability and reliability of the software. - -**Note**: It is important to ensure that the `chroma_manager` object and its `chroma_collection` attribute are correctly defined and accessible within the scope of this test function. The test assumes that the `chroma_manager` object has already been instantiated and that its `chroma_collection` attribute is intended to be initialized during the object's instantiation process. Failure of this test could indicate issues in the object's constructor method or in the setup phase of the test environment. -*** -### FunctionDef test_init_chroma_collection(self) -**test_init_chroma_collection**: The function of `test_init_chroma_collection` is to verify the initialization process of a chroma collection within the `ChromaManager`. - -**Parameters**: This function does not accept any parameters as it is a test method within a test class, designed to operate on the instance's state and behavior. - -**Code Description**: The `test_init_chroma_collection` function plays a critical role in ensuring the reliability and correctness of the `init_chroma_collection` method of the `ChromaManager` class. The test function follows a structured approach to validate the initialization process: - -1. **Initialization Call**: It begins by invoking the `init_chroma_collection` method on the `chroma_manager` instance. This is the primary action under test, intended to either initialize a new chroma collection named "test" or retrieve it if it already exists in the database. - -2. **Verification of Method Calls**: The test then proceeds to verify that the `create_collection` and `get_collection` methods of the mock client (`mock_client`) are called exactly once. This step is crucial for ensuring that the `init_chroma_collection` method interacts with the database as expected, attempting to create a new collection and then retrieving it. - -3. **State Assertion**: Finally, the test asserts that the `chroma_collection` attribute of the `chroma_manager` is not `None` after the initialization process. This assertion confirms that the `init_chroma_collection` method successfully sets the `chroma_collection` attribute, indicating that a collection is ready for use. - -**Relationship with Callees**: The `test_init_chroma_collection` function directly interacts with the `init_chroma_collection` method of the `ChromaManager` class, serving as a validation mechanism for its functionality. By simulating the initialization process and verifying the interactions with the mock database client, this test ensures that the `ChromaManager` can correctly initialize or retrieve a chroma collection named "test". This relationship highlights the importance of unit testing in verifying individual components of a system, ensuring they perform as designed in isolation. - -**Note**: It is essential to understand that this test function relies on a mock database client (`mock_client`) to simulate the database interactions without affecting a real database. This approach allows for testing the functionality of the `init_chroma_collection` method in a controlled environment, ensuring the test's reliability and repeatability. -*** -### FunctionDef test_create_vector_store(self, MockEmbeddingFunction) -**test_create_vector_store**: The function of `test_create_vector_store` is to verify the correct behavior of the `create_vector_store` method within the `ChromaManager` class, specifically ensuring that it processes and stores Markdown content in a vector database accurately. - -**Parameters**: -- `self`: Represents an instance of the test class, allowing access to its attributes and methods. -- `MockEmbeddingFunction`: A mock object passed to the test function, simulating the behavior of an external embedding function that would typically generate embeddings for the Markdown content. - -**Code Description**: -The `test_create_vector_store` function begins by setting up a mock embedding function, `mock_embedding_function`, using the provided `MockEmbeddingFunction` parameter. This mock is configured to return a predefined list of embeddings, `[0.1, 0.2, 0.3]`, when called. This setup simulates the scenario where Markdown documents are converted into embeddings by an external function, which is a common step in processing documents for storage in a vector database. - -The test proceeds by defining a list of Markdown contents, `md_contents`, containing three sample documents. It then calls the `create_vector_store` method of the `ChromaManager` instance, `self.chroma_manager`, passing `md_contents` as the argument. This method call is the core action being tested, as it is responsible for processing the Markdown content and storing it in the vector database. - -Following the method call, the test verifies two critical behaviors: -1. It checks that the mock embedding function was called with the correct arguments, specifically, the `md_contents` list. This step ensures that the `create_vector_store` method attempts to generate embeddings for the provided Markdown content, which is essential for their subsequent storage in the vector database. -2. It asserts that the `add` method of the mock collection, `self.mock_collection`, was called with the correct parameters. These parameters include a list of string identifiers (`['0', '1', '2']`), the original `md_contents`, and the mock embeddings. This assertion verifies that the `create_vector_store` method correctly attempts to store the documents and their embeddings in the database, using generated identifiers for each document. - -The relationship of this test function with its callee, the `create_vector_store` method, is fundamental for ensuring the integrity and reliability of the document storage process within the `ChromaManager` class. By simulating the embedding generation and verifying the interactions with the mock collection, the test ensures that the `create_vector_store` method behaves as expected, accurately processing and storing Markdown content in the vector database. - -**Note**: -- The use of mock objects in this test function allows for the isolation of the `create_vector_store` method's behavior, eliminating dependencies on external systems or functions. -- Ensuring that the mock embedding function and the mock collection are called with the correct arguments is crucial for validating the correctness of the `create_vector_store` method's implementation. - -**Output Example**: Not applicable, as this function does not return a value but instead focuses on verifying method calls and interactions with mock objects. -*** diff --git a/pdm.lock b/pdm.lock index 41d2fd7..ee7eb05 100644 --- a/pdm.lock +++ b/pdm.lock @@ -2,20 +2,20 @@ # It is not intended for manual editing. [metadata] -groups = ["default", "dev", "test"] -strategy = ["cross_platform", "inherit_metadata"] +groups = ["default", "chat_with_repo", "dev", "lint", "test"] +strategy = ["inherit_metadata"] lock_version = "4.5.0" -content_hash = "sha256:876695fb810cf4656cadeddb35e5010c8cc214ad96d675375c7024da1b2d7ed1" +content_hash = "sha256:766ee25c5b22cc82969d2853c150cb00c44d39eacbad7867be439c5912377b47" [[metadata.targets]] -requires_python = "~=3.10" +requires_python = "~=3.11" [[package]] name = "aiofiles" version = "23.2.1" requires_python = ">=3.7" summary = "File support for asyncio." -groups = ["default"] +groups = ["chat_with_repo"] files = [ {file = "aiofiles-23.2.1-py3-none-any.whl", hash = "sha256:19297512c647d4b27a2cf7c34caa7e405c0d60b5560618a29a9fe027b18b0107"}, {file = "aiofiles-23.2.1.tar.gz", hash = "sha256:84ec2218d8419404abcb9f0c02df3f34c6e0a68ed41072acfb1cef5cbc29051a"}, @@ -26,7 +26,7 @@ name = "aiohappyeyeballs" version = "2.4.3" requires_python = ">=3.8" summary = "Happy Eyeballs for asyncio" -groups = ["default"] +groups = ["default", "chat_with_repo"] files = [ {file = "aiohappyeyeballs-2.4.3-py3-none-any.whl", hash = "sha256:8a7a83727b2756f394ab2895ea0765a0a8c475e3c71e98d43d76f22b4b435572"}, {file = "aiohappyeyeballs-2.4.3.tar.gz", hash = "sha256:75cf88a15106a5002a8eb1dab212525c00d1f4c0fa96e551c9fbe6f09a621586"}, @@ -34,81 +34,67 @@ files = [ [[package]] name = "aiohttp" -version = "3.10.9" -requires_python = ">=3.8" +version = "3.11.2" +requires_python = ">=3.9" summary = "Async http client/server framework (asyncio)" -groups = ["default"] +groups = ["default", "chat_with_repo"] dependencies = [ "aiohappyeyeballs>=2.3.0", "aiosignal>=1.1.2", - "async-timeout<5.0,>=4.0; python_version < \"3.11\"", + "async-timeout<6.0,>=4.0; python_version < \"3.11\"", "attrs>=17.3.0", "frozenlist>=1.1.1", "multidict<7.0,>=4.5", - "yarl<2.0,>=1.12.0", -] -files = [ - {file = "aiohttp-3.10.9-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:8b3fb28a9ac8f2558760d8e637dbf27aef1e8b7f1d221e8669a1074d1a266bb2"}, - {file = "aiohttp-3.10.9-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:91aa966858593f64c8a65cdefa3d6dc8fe3c2768b159da84c1ddbbb2c01ab4ef"}, - {file = "aiohttp-3.10.9-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:63649309da83277f06a15bbdc2a54fbe75efb92caa2c25bb57ca37762789c746"}, - {file = "aiohttp-3.10.9-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e3e7fabedb3fe06933f47f1538df7b3a8d78e13d7167195f51ca47ee12690373"}, - {file = "aiohttp-3.10.9-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:5c070430fda1a550a1c3a4c2d7281d3b8cfc0c6715f616e40e3332201a253067"}, - {file = "aiohttp-3.10.9-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:51d0a4901b27272ae54e42067bc4b9a90e619a690b4dc43ea5950eb3070afc32"}, - {file = "aiohttp-3.10.9-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fec5fac7aea6c060f317f07494961236434928e6f4374e170ef50b3001e14581"}, - {file = "aiohttp-3.10.9-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:172ad884bb61ad31ed7beed8be776eb17e7fb423f1c1be836d5cb357a096bf12"}, - {file = "aiohttp-3.10.9-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:d646fdd74c25bbdd4a055414f0fe32896c400f38ffbdfc78c68e62812a9e0257"}, - {file = "aiohttp-3.10.9-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:e86260b76786c28acf0b5fe31c8dca4c2add95098c709b11e8c35b424ebd4f5b"}, - {file = "aiohttp-3.10.9-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:c7d7cafc11d70fdd8801abfc2ff276744ae4cb39d8060b6b542c7e44e5f2cfc2"}, - {file = "aiohttp-3.10.9-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:fc262c3df78c8ff6020c782d9ce02e4bcffe4900ad71c0ecdad59943cba54442"}, - {file = "aiohttp-3.10.9-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:482c85cf3d429844396d939b22bc2a03849cb9ad33344689ad1c85697bcba33a"}, - {file = "aiohttp-3.10.9-cp310-cp310-win32.whl", hash = "sha256:aeebd3061f6f1747c011e1d0b0b5f04f9f54ad1a2ca183e687e7277bef2e0da2"}, - {file = "aiohttp-3.10.9-cp310-cp310-win_amd64.whl", hash = "sha256:fa430b871220dc62572cef9c69b41e0d70fcb9d486a4a207a5de4c1f25d82593"}, - {file = "aiohttp-3.10.9-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:16e6a51d8bc96b77f04a6764b4ad03eeef43baa32014fce71e882bd71302c7e4"}, - {file = "aiohttp-3.10.9-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:8bd9125dd0cc8ebd84bff2be64b10fdba7dc6fd7be431b5eaf67723557de3a31"}, - {file = "aiohttp-3.10.9-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:dcf354661f54e6a49193d0b5653a1b011ba856e0b7a76bda2c33e4c6892f34ea"}, - {file = "aiohttp-3.10.9-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:42775de0ca04f90c10c5c46291535ec08e9bcc4756f1b48f02a0657febe89b10"}, - {file = "aiohttp-3.10.9-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:87d1e4185c5d7187684d41ebb50c9aeaaaa06ca1875f4c57593071b0409d2444"}, - {file = "aiohttp-3.10.9-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:c2695c61cf53a5d4345a43d689f37fc0f6d3a2dc520660aec27ec0f06288d1f9"}, - {file = "aiohttp-3.10.9-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8a3f063b41cc06e8d0b3fcbbfc9c05b7420f41287e0cd4f75ce0a1f3d80729e6"}, - {file = "aiohttp-3.10.9-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:2d37f4718002863b82c6f391c8efd4d3a817da37030a29e2682a94d2716209de"}, - {file = "aiohttp-3.10.9-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:2746d8994ebca1bdc55a1e998feff4e94222da709623bb18f6e5cfec8ec01baf"}, - {file = "aiohttp-3.10.9-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:6f3c6648aa123bcd73d6f26607d59967b607b0da8ffcc27d418a4b59f4c98c7c"}, - {file = "aiohttp-3.10.9-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:558b3d223fd631ad134d89adea876e7fdb4c93c849ef195049c063ada82b7d08"}, - {file = "aiohttp-3.10.9-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:4e6cb75f8ddd9c2132d00bc03c9716add57f4beff1263463724f6398b813e7eb"}, - {file = "aiohttp-3.10.9-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:608cecd8d58d285bfd52dbca5b6251ca8d6ea567022c8a0eaae03c2589cd9af9"}, - {file = "aiohttp-3.10.9-cp311-cp311-win32.whl", hash = "sha256:36d4fba838be5f083f5490ddd281813b44d69685db910907636bc5dca6322316"}, - {file = "aiohttp-3.10.9-cp311-cp311-win_amd64.whl", hash = "sha256:8be1a65487bdfc285bd5e9baf3208c2132ca92a9b4020e9f27df1b16fab998a9"}, - {file = "aiohttp-3.10.9-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:4fd16b30567c5b8e167923be6e027eeae0f20cf2b8a26b98a25115f28ad48ee0"}, - {file = "aiohttp-3.10.9-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:40ff5b7660f903dc587ed36ef08a88d46840182d9d4b5694e7607877ced698a1"}, - {file = "aiohttp-3.10.9-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:4edc3fd701e2b9a0d605a7b23d3de4ad23137d23fc0dbab726aa71d92f11aaaf"}, - {file = "aiohttp-3.10.9-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e525b69ee8a92c146ae5b4da9ecd15e518df4d40003b01b454ad694a27f498b5"}, - {file = "aiohttp-3.10.9-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:5002a02c17fcfd796d20bac719981d2fca9c006aac0797eb8f430a58e9d12431"}, - {file = "aiohttp-3.10.9-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:fd4ceeae2fb8cabdd1b71c82bfdd39662473d3433ec95b962200e9e752fb70d0"}, - {file = "aiohttp-3.10.9-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d6e395c3d1f773cf0651cd3559e25182eb0c03a2777b53b4575d8adc1149c6e9"}, - {file = "aiohttp-3.10.9-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:bbdb8def5268f3f9cd753a265756f49228a20ed14a480d151df727808b4531dd"}, - {file = "aiohttp-3.10.9-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:f82ace0ec57c94aaf5b0e118d4366cff5889097412c75aa14b4fd5fc0c44ee3e"}, - {file = "aiohttp-3.10.9-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:6ebdc3b3714afe1b134b3bbeb5f745eed3ecbcff92ab25d80e4ef299e83a5465"}, - {file = "aiohttp-3.10.9-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:f9ca09414003c0e96a735daa1f071f7d7ed06962ef4fa29ceb6c80d06696d900"}, - {file = "aiohttp-3.10.9-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:1298b854fd31d0567cbb916091be9d3278168064fca88e70b8468875ef9ff7e7"}, - {file = "aiohttp-3.10.9-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:60ad5b8a7452c0f5645c73d4dad7490afd6119d453d302cd5b72b678a85d6044"}, - {file = "aiohttp-3.10.9-cp312-cp312-win32.whl", hash = "sha256:1a0ee6c0d590c917f1b9629371fce5f3d3f22c317aa96fbdcce3260754d7ea21"}, - {file = "aiohttp-3.10.9-cp312-cp312-win_amd64.whl", hash = "sha256:c46131c6112b534b178d4e002abe450a0a29840b61413ac25243f1291613806a"}, - {file = "aiohttp-3.10.9-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:2bd9f3eac515c16c4360a6a00c38119333901b8590fe93c3257a9b536026594d"}, - {file = "aiohttp-3.10.9-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:8cc0d13b4e3b1362d424ce3f4e8c79e1f7247a00d792823ffd640878abf28e56"}, - {file = "aiohttp-3.10.9-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:ba1a599255ad6a41022e261e31bc2f6f9355a419575b391f9655c4d9e5df5ff5"}, - {file = "aiohttp-3.10.9-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:776e9f3c9b377fcf097c4a04b241b15691e6662d850168642ff976780609303c"}, - {file = "aiohttp-3.10.9-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:8debb45545ad95b58cc16c3c1cc19ad82cffcb106db12b437885dbee265f0ab5"}, - {file = "aiohttp-3.10.9-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:c2555e4949c8d8782f18ef20e9d39730d2656e218a6f1a21a4c4c0b56546a02e"}, - {file = "aiohttp-3.10.9-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c54dc329cd44f7f7883a9f4baaefe686e8b9662e2c6c184ea15cceee587d8d69"}, - {file = "aiohttp-3.10.9-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e709d6ac598c5416f879bb1bae3fd751366120ac3fa235a01de763537385d036"}, - {file = "aiohttp-3.10.9-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:17c272cfe7b07a5bb0c6ad3f234e0c336fb53f3bf17840f66bd77b5815ab3d16"}, - {file = "aiohttp-3.10.9-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:0c21c82df33b264216abffff9f8370f303dab65d8eee3767efbbd2734363f677"}, - {file = "aiohttp-3.10.9-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:9331dd34145ff105177855017920dde140b447049cd62bb589de320fd6ddd582"}, - {file = "aiohttp-3.10.9-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:ac3196952c673822ebed8871cf8802e17254fff2a2ed4835d9c045d9b88c5ec7"}, - {file = "aiohttp-3.10.9-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:2c33fa6e10bb7ed262e3ff03cc69d52869514f16558db0626a7c5c61dde3c29f"}, - {file = "aiohttp-3.10.9-cp313-cp313-win32.whl", hash = "sha256:a14e4b672c257a6b94fe934ee62666bacbc8e45b7876f9dd9502d0f0fe69db16"}, - {file = "aiohttp-3.10.9-cp313-cp313-win_amd64.whl", hash = "sha256:a35ed3d03910785f7d9d6f5381f0c24002b2b888b298e6f941b2fc94c5055fcd"}, - {file = "aiohttp-3.10.9.tar.gz", hash = "sha256:143b0026a9dab07a05ad2dd9e46aa859bffdd6348ddc5967b42161168c24f857"}, + "propcache>=0.2.0", + "yarl<2.0,>=1.17.0", +] +files = [ + {file = "aiohttp-3.11.2-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:50e0aee4adc9abcd2109c618a8d1b2c93b85ac277b24a003ab147d91e068b06d"}, + {file = "aiohttp-3.11.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:9aa4e68f1e4f303971ec42976fb170204fb5092de199034b57199a1747e78a2d"}, + {file = "aiohttp-3.11.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:d84930b4145991214602372edd7305fc76b700220db79ac0dd57d3afd0f0a1ca"}, + {file = "aiohttp-3.11.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b4ec8afd362356b8798c8caa806e91deb3f0602d8ffae8e91d2d3ced2a90c35e"}, + {file = "aiohttp-3.11.2-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:fb0544a0e8294a5a5e20d3cacdaaa9a911d7c0a9150f5264aef36e7d8fdfa07e"}, + {file = "aiohttp-3.11.2-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a7b0a1618060e3f5aa73d3526ca2108a16a1b6bf86612cd0bb2ddcbef9879d06"}, + {file = "aiohttp-3.11.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5d878a0186023ac391861958035174d0486f3259cabf8fd94e591985468da3ea"}, + {file = "aiohttp-3.11.2-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1e33a7eddcd07545ccf5c3ab230f60314a17dc33e285475e8405e26e21f02660"}, + {file = "aiohttp-3.11.2-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:4d7fad8c456d180a6d2f44c41cfab4b80e2e81451815825097db48b8293f59d5"}, + {file = "aiohttp-3.11.2-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:8d954ba0eae7f33884d27dc00629ca4389d249eb8d26ca07c30911257cae8c96"}, + {file = "aiohttp-3.11.2-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:afa55e863224e664a782effa62245df73fdfc55aee539bed6efacf35f6d4e4b7"}, + {file = "aiohttp-3.11.2-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:10a5f91c319d9d4afba812f72984816b5fcd20742232ff7ecc1610ffbf3fc64d"}, + {file = "aiohttp-3.11.2-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:6e8e19a80ba194db5c06915a9df23c0c06e0e9ca9a4db9386a6056cca555a027"}, + {file = "aiohttp-3.11.2-cp311-cp311-win32.whl", hash = "sha256:9c8d1db4f65bbc9d75b7b271d68fb996f1c8c81a525263862477d93611856c2d"}, + {file = "aiohttp-3.11.2-cp311-cp311-win_amd64.whl", hash = "sha256:2adb967454e10e69478ba4a8d8afbba48a7c7a8619216b7c807f8481cc66ddfb"}, + {file = "aiohttp-3.11.2-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:f833a80d9de9307d736b6af58c235b17ef7f90ebea7b9c49cd274dec7a66a2f1"}, + {file = "aiohttp-3.11.2-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:382f853516664d2ebfc75dc01da4a10fdef5edcb335fe7b45cf471ce758ecb18"}, + {file = "aiohttp-3.11.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:d3a2bcf6c81639a165da93469e1e0aff67c956721f3fa9c0560f07dd1e505116"}, + {file = "aiohttp-3.11.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:de3b4d5fb5d69749104b880a157f38baeea7765c93d9cd3837cedd5b84729e10"}, + {file = "aiohttp-3.11.2-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:0a90a0dc4b054b5af299a900bf950fe8f9e3e54322bc405005f30aa5cacc5c98"}, + {file = "aiohttp-3.11.2-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:32334f35824811dd20a12cc90825d000e6b50faaeaa71408d42269151a66140d"}, + {file = "aiohttp-3.11.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0cba0b8d25aa2d450762f3dd6df85498f5e7c3ad0ddeb516ef2b03510f0eea32"}, + {file = "aiohttp-3.11.2-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:9bbb2dbc2701ab7e9307ca3a8fa4999c5b28246968e0a0202a5afabf48a42e22"}, + {file = "aiohttp-3.11.2-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:97fba98fc5d9ccd3d33909e898d00f2494d6a9eec7cbda3d030632e2c8bb4d00"}, + {file = "aiohttp-3.11.2-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:0ebdf5087e2ce903d8220cc45dcece90c2199ae4395fd83ca616fcc81010db2c"}, + {file = "aiohttp-3.11.2-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:122768e3ae9ce74f981b46edefea9c6e5a40aea38aba3ac50168e6370459bf20"}, + {file = "aiohttp-3.11.2-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:5587da333b7d280a312715b843d43e734652aa382cba824a84a67c81f75b338b"}, + {file = "aiohttp-3.11.2-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:85de9904bc360fd29a98885d2bfcbd4e02ab33c53353cb70607f2bea2cb92468"}, + {file = "aiohttp-3.11.2-cp312-cp312-win32.whl", hash = "sha256:b470de64d17156c37e91effc109d3b032b39867000e2c126732fe01d034441f9"}, + {file = "aiohttp-3.11.2-cp312-cp312-win_amd64.whl", hash = "sha256:3f617a48b70f4843d54f52440ea1e58da6bdab07b391a3a6aed8d3b311a4cc04"}, + {file = "aiohttp-3.11.2-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:5d90b5a3b0f32a5fecf5dd83d828713986c019585f5cddf40d288ff77f366615"}, + {file = "aiohttp-3.11.2-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:d23854e5867650d40cba54d49956aad8081452aa80b2cf0d8c310633f4f48510"}, + {file = "aiohttp-3.11.2-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:486273d3b5af75a80c31c311988931bdd2a4b96a74d5c7f422bad948f99988ef"}, + {file = "aiohttp-3.11.2-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9075313f8e41b481e4cb10af405054564b0247dc335db5398ed05f8ec38787e2"}, + {file = "aiohttp-3.11.2-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:44b69c69c194ffacbc50165911cf023a4b1b06422d1e1199d3aea82eac17004e"}, + {file = "aiohttp-3.11.2-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b339d91ac9060bd6ecdc595a82dc151045e5d74f566e0864ef3f2ba0887fec42"}, + {file = "aiohttp-3.11.2-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:64e8f5178958a9954043bc8cd10a5ae97352c3f2fc99aa01f2aebb0026010910"}, + {file = "aiohttp-3.11.2-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:3129151378f858cdc4a0a4df355c9a0d060ab49e2eea7e62e9f085bac100551b"}, + {file = "aiohttp-3.11.2-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:14eb6c628432720e41b4fab1ada879d56cfe7034159849e083eb536b4c2afa99"}, + {file = "aiohttp-3.11.2-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:e57a10aacedcf24666f4c90d03e599f71d172d1c5e00dcf48205c445806745b0"}, + {file = "aiohttp-3.11.2-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:66e58a2e8c7609a3545c4b38fb8b01a6b8346c4862e529534f7674c5265a97b8"}, + {file = "aiohttp-3.11.2-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:9b6d15adc9768ff167614ca853f7eeb6ee5f1d55d5660e3af85ce6744fed2b82"}, + {file = "aiohttp-3.11.2-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:2914061f5ca573f990ec14191e6998752fa8fe50d518e3405410353c3f44aa5d"}, + {file = "aiohttp-3.11.2-cp313-cp313-win32.whl", hash = "sha256:1c2496182e577042e0e07a328d91c949da9e77a2047c7291071e734cd7a6e780"}, + {file = "aiohttp-3.11.2-cp313-cp313-win_amd64.whl", hash = "sha256:cccb2937bece1310c5c0163d0406aba170a2e5fb1f0444d7b0e7fdc9bd6bb713"}, + {file = "aiohttp-3.11.2.tar.gz", hash = "sha256:68d1f46f9387db3785508f5225d3acbc5825ca13d9c29f2b5cce203d5863eb79"}, ] [[package]] @@ -116,7 +102,7 @@ name = "aiosignal" version = "1.3.1" requires_python = ">=3.7" summary = "aiosignal: a list of registered asynchronous callbacks" -groups = ["default"] +groups = ["default", "chat_with_repo"] dependencies = [ "frozenlist>=1.1.0", ] @@ -130,7 +116,7 @@ name = "annotated-types" version = "0.7.0" requires_python = ">=3.8" summary = "Reusable constraint types to use with typing.Annotated" -groups = ["default"] +groups = ["default", "chat_with_repo"] dependencies = [ "typing-extensions>=4.0.0; python_version < \"3.9\"", ] @@ -141,10 +127,10 @@ files = [ [[package]] name = "anyio" -version = "4.6.0" +version = "4.6.2.post1" requires_python = ">=3.9" summary = "High level compatibility layer for multiple asynchronous event loop implementations" -groups = ["default"] +groups = ["default", "chat_with_repo"] dependencies = [ "exceptiongroup>=1.0.2; python_version < \"3.11\"", "idna>=2.8", @@ -152,8 +138,8 @@ dependencies = [ "typing-extensions>=4.1; python_version < \"3.11\"", ] files = [ - {file = "anyio-4.6.0-py3-none-any.whl", hash = "sha256:c7d2e9d63e31599eeb636c8c5c03a7e108d73b345f064f1c19fdc87b79036a9a"}, - {file = "anyio-4.6.0.tar.gz", hash = "sha256:137b4559cbb034c477165047febb6ff83f390fc3b20bf181c1fc0a728cb8beeb"}, + {file = "anyio-4.6.2.post1-py3-none-any.whl", hash = "sha256:6d170c36fba3bdd840c73d3868c1e777e33676a69c3a72cf0a0d5d6d8009b61d"}, + {file = "anyio-4.6.2.post1.tar.gz", hash = "sha256:4c8bc31ccdb51c7f7bd251f51c609e038d63e34219b44aa86e47576389880b4c"}, ] [[package]] @@ -161,7 +147,7 @@ name = "asgiref" version = "3.8.1" requires_python = ">=3.8" summary = "ASGI specs, helper code, and adapters" -groups = ["default"] +groups = ["chat_with_repo"] dependencies = [ "typing-extensions>=4; python_version < \"3.11\"", ] @@ -170,27 +156,12 @@ files = [ {file = "asgiref-3.8.1.tar.gz", hash = "sha256:c343bd80a0bec947a9860adb4c432ffa7db769836c64238fc34bdc3fec84d590"}, ] -[[package]] -name = "async-timeout" -version = "4.0.3" -requires_python = ">=3.7" -summary = "Timeout context manager for asyncio programs" -groups = ["default"] -marker = "python_version < \"3.11\"" -dependencies = [ - "typing-extensions>=3.6.5; python_version < \"3.8\"", -] -files = [ - {file = "async-timeout-4.0.3.tar.gz", hash = "sha256:4640d96be84d82d02ed59ea2b7105a0f7b33abe8703703cd0ab0bf87c427522f"}, - {file = "async_timeout-4.0.3-py3-none-any.whl", hash = "sha256:7405140ff1230c310e51dc27b3145b9092d659ce68ff733fb0cefe3ee42be028"}, -] - [[package]] name = "attrs" version = "24.2.0" requires_python = ">=3.7" summary = "Classes Without Boilerplate" -groups = ["default"] +groups = ["default", "chat_with_repo"] dependencies = [ "importlib-metadata; python_version < \"3.8\"", ] @@ -199,12 +170,55 @@ files = [ {file = "attrs-24.2.0.tar.gz", hash = "sha256:5cfb1b9148b5b086569baec03f20d7b6bf3bcacc9a42bebf87ffaaca362f6346"}, ] +[[package]] +name = "audioop-lts" +version = "0.2.1" +requires_python = ">=3.13" +summary = "LTS Port of Python audioop" +groups = ["chat_with_repo"] +marker = "python_version >= \"3.13\"" +files = [ + {file = "audioop_lts-0.2.1-cp313-abi3-macosx_10_13_universal2.whl", hash = "sha256:fd1345ae99e17e6910f47ce7d52673c6a1a70820d78b67de1b7abb3af29c426a"}, + {file = "audioop_lts-0.2.1-cp313-abi3-macosx_10_13_x86_64.whl", hash = "sha256:e175350da05d2087e12cea8e72a70a1a8b14a17e92ed2022952a4419689ede5e"}, + {file = "audioop_lts-0.2.1-cp313-abi3-macosx_11_0_arm64.whl", hash = "sha256:4a8dd6a81770f6ecf019c4b6d659e000dc26571b273953cef7cd1d5ce2ff3ae6"}, + {file = "audioop_lts-0.2.1-cp313-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d1cd3c0b6f2ca25c7d2b1c3adeecbe23e65689839ba73331ebc7d893fcda7ffe"}, + {file = "audioop_lts-0.2.1-cp313-abi3-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ff3f97b3372c97782e9c6d3d7fdbe83bce8f70de719605bd7ee1839cd1ab360a"}, + {file = "audioop_lts-0.2.1-cp313-abi3-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a351af79edefc2a1bd2234bfd8b339935f389209943043913a919df4b0f13300"}, + {file = "audioop_lts-0.2.1-cp313-abi3-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:2aeb6f96f7f6da80354330470b9134d81b4cf544cdd1c549f2f45fe964d28059"}, + {file = "audioop_lts-0.2.1-cp313-abi3-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c589f06407e8340e81962575fcffbba1e92671879a221186c3d4662de9fe804e"}, + {file = "audioop_lts-0.2.1-cp313-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:fbae5d6925d7c26e712f0beda5ed69ebb40e14212c185d129b8dfbfcc335eb48"}, + {file = "audioop_lts-0.2.1-cp313-abi3-musllinux_1_2_i686.whl", hash = "sha256:d2d5434717f33117f29b5691fbdf142d36573d751716249a288fbb96ba26a281"}, + {file = "audioop_lts-0.2.1-cp313-abi3-musllinux_1_2_ppc64le.whl", hash = "sha256:f626a01c0a186b08f7ff61431c01c055961ee28769591efa8800beadd27a2959"}, + {file = "audioop_lts-0.2.1-cp313-abi3-musllinux_1_2_s390x.whl", hash = "sha256:05da64e73837f88ee5c6217d732d2584cf638003ac72df124740460531e95e47"}, + {file = "audioop_lts-0.2.1-cp313-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:56b7a0a4dba8e353436f31a932f3045d108a67b5943b30f85a5563f4d8488d77"}, + {file = "audioop_lts-0.2.1-cp313-abi3-win32.whl", hash = "sha256:6e899eb8874dc2413b11926b5fb3857ec0ab55222840e38016a6ba2ea9b7d5e3"}, + {file = "audioop_lts-0.2.1-cp313-abi3-win_amd64.whl", hash = "sha256:64562c5c771fb0a8b6262829b9b4f37a7b886c01b4d3ecdbae1d629717db08b4"}, + {file = "audioop_lts-0.2.1-cp313-abi3-win_arm64.whl", hash = "sha256:c45317debeb64002e980077642afbd977773a25fa3dfd7ed0c84dccfc1fafcb0"}, + {file = "audioop_lts-0.2.1-cp313-cp313t-macosx_10_13_universal2.whl", hash = "sha256:3827e3fce6fee4d69d96a3d00cd2ab07f3c0d844cb1e44e26f719b34a5b15455"}, + {file = "audioop_lts-0.2.1-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:161249db9343b3c9780ca92c0be0d1ccbfecdbccac6844f3d0d44b9c4a00a17f"}, + {file = "audioop_lts-0.2.1-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:5b7b4ff9de7a44e0ad2618afdc2ac920b91f4a6d3509520ee65339d4acde5abf"}, + {file = "audioop_lts-0.2.1-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:72e37f416adb43b0ced93419de0122b42753ee74e87070777b53c5d2241e7fab"}, + {file = "audioop_lts-0.2.1-cp313-cp313t-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:534ce808e6bab6adb65548723c8cbe189a3379245db89b9d555c4210b4aaa9b6"}, + {file = "audioop_lts-0.2.1-cp313-cp313t-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d2de9b6fb8b1cf9f03990b299a9112bfdf8b86b6987003ca9e8a6c4f56d39543"}, + {file = "audioop_lts-0.2.1-cp313-cp313t-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f24865991b5ed4b038add5edbf424639d1358144f4e2a3e7a84bc6ba23e35074"}, + {file = "audioop_lts-0.2.1-cp313-cp313t-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2bdb3b7912ccd57ea53197943f1bbc67262dcf29802c4a6df79ec1c715d45a78"}, + {file = "audioop_lts-0.2.1-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:120678b208cca1158f0a12d667af592e067f7a50df9adc4dc8f6ad8d065a93fb"}, + {file = "audioop_lts-0.2.1-cp313-cp313t-musllinux_1_2_i686.whl", hash = "sha256:54cd4520fc830b23c7d223693ed3e1b4d464997dd3abc7c15dce9a1f9bd76ab2"}, + {file = "audioop_lts-0.2.1-cp313-cp313t-musllinux_1_2_ppc64le.whl", hash = "sha256:d6bd20c7a10abcb0fb3d8aaa7508c0bf3d40dfad7515c572014da4b979d3310a"}, + {file = "audioop_lts-0.2.1-cp313-cp313t-musllinux_1_2_s390x.whl", hash = "sha256:f0ed1ad9bd862539ea875fb339ecb18fcc4148f8d9908f4502df28f94d23491a"}, + {file = "audioop_lts-0.2.1-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:e1af3ff32b8c38a7d900382646e91f2fc515fd19dea37e9392275a5cbfdbff63"}, + {file = "audioop_lts-0.2.1-cp313-cp313t-win32.whl", hash = "sha256:f51bb55122a89f7a0817d7ac2319744b4640b5b446c4c3efcea5764ea99ae509"}, + {file = "audioop_lts-0.2.1-cp313-cp313t-win_amd64.whl", hash = "sha256:f0f2f336aa2aee2bce0b0dcc32bbba9178995454c7b979cf6ce086a8801e14c7"}, + {file = "audioop_lts-0.2.1-cp313-cp313t-win_arm64.whl", hash = "sha256:78bfb3703388c780edf900be66e07de5a3d4105ca8e8720c5c4d67927e0b15d0"}, + {file = "audioop_lts-0.2.1.tar.gz", hash = "sha256:e81268da0baa880431b68b1308ab7257eb33f356e57a5f9b1f915dfb13dd1387"}, +] + [[package]] name = "backoff" version = "2.2.1" requires_python = ">=3.7,<4.0" summary = "Function decoration for backoff and retry" -groups = ["default"] +groups = ["chat_with_repo"] files = [ {file = "backoff-2.2.1-py3-none-any.whl", hash = "sha256:63579f9a0628e06278f7e47b7d7d5b6ce20dc65c5e96a6f3ca99a6adca0396e8"}, {file = "backoff-2.2.1.tar.gz", hash = "sha256:03f829f5bb1923180821643f8753b0502c3b682293992485b0eef2807afa5cba"}, @@ -215,7 +229,7 @@ name = "bcrypt" version = "4.2.0" requires_python = ">=3.7" summary = "Modern password hashing for your software and your servers" -groups = ["default"] +groups = ["chat_with_repo"] files = [ {file = "bcrypt-4.2.0-cp37-abi3-macosx_10_12_universal2.whl", hash = "sha256:096a15d26ed6ce37a14c1ac1e48119660f21b24cba457f160a4b830f3fe6b5cb"}, {file = "bcrypt-4.2.0-cp37-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c02d944ca89d9b1922ceb8a46460dd17df1ba37ab66feac4870f6862a1533c00"}, @@ -239,8 +253,6 @@ files = [ {file = "bcrypt-4.2.0-cp39-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:cb2a8ec2bc07d3553ccebf0746bbf3d19426d1c6d1adbd4fa48925f66af7b9e8"}, {file = "bcrypt-4.2.0-cp39-abi3-win32.whl", hash = "sha256:77800b7147c9dc905db1cba26abe31e504d8247ac73580b4aa179f98e6608f34"}, {file = "bcrypt-4.2.0-cp39-abi3-win_amd64.whl", hash = "sha256:61ed14326ee023917ecd093ee6ef422a72f3aec6f07e21ea5f10622b735538a9"}, - {file = "bcrypt-4.2.0-pp310-pypy310_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:39e1d30c7233cfc54f5c3f2c825156fe044efdd3e0b9d309512cc514a263ec2a"}, - {file = "bcrypt-4.2.0-pp310-pypy310_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:f4f4acf526fcd1c34e7ce851147deedd4e26e6402369304220250598b26448db"}, {file = "bcrypt-4.2.0.tar.gz", hash = "sha256:cf69eaf5185fd58f268f805b505ce31f9b9fc2d64b376642164e9244540c1221"}, ] @@ -249,7 +261,7 @@ name = "build" version = "1.2.2.post1" requires_python = ">=3.8" summary = "A simple, correct Python build frontend" -groups = ["default"] +groups = ["chat_with_repo"] dependencies = [ "colorama; os_name == \"nt\"", "importlib-metadata>=4.6; python_full_version < \"3.10.2\"", @@ -267,7 +279,7 @@ name = "cachetools" version = "5.5.0" requires_python = ">=3.7" summary = "Extensible memoizing collections and decorators" -groups = ["default"] +groups = ["chat_with_repo"] files = [ {file = "cachetools-5.5.0-py3-none-any.whl", hash = "sha256:02134e8439cdc2ffb62023ce1debca2944c3f289d66bb17ead3ab3dede74b292"}, {file = "cachetools-5.5.0.tar.gz", hash = "sha256:2cc24fb4cbe39633fb7badd9db9ca6295d766d9c2995f245725a46715d050f2a"}, @@ -278,7 +290,7 @@ name = "certifi" version = "2024.8.30" requires_python = ">=3.6" summary = "Python package for providing Mozilla's CA Bundle." -groups = ["default"] +groups = ["default", "chat_with_repo"] files = [ {file = "certifi-2024.8.30-py3-none-any.whl", hash = "sha256:922820b53db7a7257ffbda3f597266d435245903d80737e34f8a45ff3e3230d8"}, {file = "certifi-2024.8.30.tar.gz", hash = "sha256:bec941d2aa8195e248a60b31ff9f0558284cf01a52591ceda73ea9afffd69fd9"}, @@ -286,74 +298,69 @@ files = [ [[package]] name = "charset-normalizer" -version = "3.3.2" +version = "3.4.0" requires_python = ">=3.7.0" summary = "The Real First Universal Charset Detector. Open, modern and actively maintained alternative to Chardet." -groups = ["default"] -files = [ - {file = "charset-normalizer-3.3.2.tar.gz", hash = "sha256:f30c3cb33b24454a82faecaf01b19c18562b1e89558fb6c56de4d9118a032fd5"}, - {file = "charset_normalizer-3.3.2-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:25baf083bf6f6b341f4121c2f3c548875ee6f5339300e08be3f2b2ba1721cdd3"}, - {file = "charset_normalizer-3.3.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:06435b539f889b1f6f4ac1758871aae42dc3a8c0e24ac9e60c2384973ad73027"}, - {file = "charset_normalizer-3.3.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:9063e24fdb1e498ab71cb7419e24622516c4a04476b17a2dab57e8baa30d6e03"}, - {file = "charset_normalizer-3.3.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6897af51655e3691ff853668779c7bad41579facacf5fd7253b0133308cf000d"}, - {file = "charset_normalizer-3.3.2-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1d3193f4a680c64b4b6a9115943538edb896edc190f0b222e73761716519268e"}, - {file = "charset_normalizer-3.3.2-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:cd70574b12bb8a4d2aaa0094515df2463cb429d8536cfb6c7ce983246983e5a6"}, - {file = "charset_normalizer-3.3.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8465322196c8b4d7ab6d1e049e4c5cb460d0394da4a27d23cc242fbf0034b6b5"}, - {file = "charset_normalizer-3.3.2-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a9a8e9031d613fd2009c182b69c7b2c1ef8239a0efb1df3f7c8da66d5dd3d537"}, - {file = "charset_normalizer-3.3.2-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:beb58fe5cdb101e3a055192ac291b7a21e3b7ef4f67fa1d74e331a7f2124341c"}, - {file = "charset_normalizer-3.3.2-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:e06ed3eb3218bc64786f7db41917d4e686cc4856944f53d5bdf83a6884432e12"}, - {file = "charset_normalizer-3.3.2-cp310-cp310-musllinux_1_1_ppc64le.whl", hash = "sha256:2e81c7b9c8979ce92ed306c249d46894776a909505d8f5a4ba55b14206e3222f"}, - {file = "charset_normalizer-3.3.2-cp310-cp310-musllinux_1_1_s390x.whl", hash = "sha256:572c3763a264ba47b3cf708a44ce965d98555f618ca42c926a9c1616d8f34269"}, - {file = "charset_normalizer-3.3.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:fd1abc0d89e30cc4e02e4064dc67fcc51bd941eb395c502aac3ec19fab46b519"}, - {file = "charset_normalizer-3.3.2-cp310-cp310-win32.whl", hash = "sha256:3d47fa203a7bd9c5b6cee4736ee84ca03b8ef23193c0d1ca99b5089f72645c73"}, - {file = "charset_normalizer-3.3.2-cp310-cp310-win_amd64.whl", hash = "sha256:10955842570876604d404661fbccbc9c7e684caf432c09c715ec38fbae45ae09"}, - {file = "charset_normalizer-3.3.2-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:802fe99cca7457642125a8a88a084cef28ff0cf9407060f7b93dca5aa25480db"}, - {file = "charset_normalizer-3.3.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:573f6eac48f4769d667c4442081b1794f52919e7edada77495aaed9236d13a96"}, - {file = "charset_normalizer-3.3.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:549a3a73da901d5bc3ce8d24e0600d1fa85524c10287f6004fbab87672bf3e1e"}, - {file = "charset_normalizer-3.3.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f27273b60488abe721a075bcca6d7f3964f9f6f067c8c4c605743023d7d3944f"}, - {file = "charset_normalizer-3.3.2-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1ceae2f17a9c33cb48e3263960dc5fc8005351ee19db217e9b1bb15d28c02574"}, - {file = "charset_normalizer-3.3.2-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:65f6f63034100ead094b8744b3b97965785388f308a64cf8d7c34f2f2e5be0c4"}, - {file = "charset_normalizer-3.3.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:753f10e867343b4511128c6ed8c82f7bec3bd026875576dfd88483c5c73b2fd8"}, - {file = "charset_normalizer-3.3.2-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:4a78b2b446bd7c934f5dcedc588903fb2f5eec172f3d29e52a9096a43722adfc"}, - {file = "charset_normalizer-3.3.2-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:e537484df0d8f426ce2afb2d0f8e1c3d0b114b83f8850e5f2fbea0e797bd82ae"}, - {file = "charset_normalizer-3.3.2-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:eb6904c354526e758fda7167b33005998fb68c46fbc10e013ca97f21ca5c8887"}, - {file = "charset_normalizer-3.3.2-cp311-cp311-musllinux_1_1_ppc64le.whl", hash = "sha256:deb6be0ac38ece9ba87dea880e438f25ca3eddfac8b002a2ec3d9183a454e8ae"}, - {file = "charset_normalizer-3.3.2-cp311-cp311-musllinux_1_1_s390x.whl", hash = "sha256:4ab2fe47fae9e0f9dee8c04187ce5d09f48eabe611be8259444906793ab7cbce"}, - {file = "charset_normalizer-3.3.2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:80402cd6ee291dcb72644d6eac93785fe2c8b9cb30893c1af5b8fdd753b9d40f"}, - {file = "charset_normalizer-3.3.2-cp311-cp311-win32.whl", hash = "sha256:7cd13a2e3ddeed6913a65e66e94b51d80a041145a026c27e6bb76c31a853c6ab"}, - {file = "charset_normalizer-3.3.2-cp311-cp311-win_amd64.whl", hash = "sha256:663946639d296df6a2bb2aa51b60a2454ca1cb29835324c640dafb5ff2131a77"}, - {file = "charset_normalizer-3.3.2-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:0b2b64d2bb6d3fb9112bafa732def486049e63de9618b5843bcdd081d8144cd8"}, - {file = "charset_normalizer-3.3.2-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:ddbb2551d7e0102e7252db79ba445cdab71b26640817ab1e3e3648dad515003b"}, - {file = "charset_normalizer-3.3.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:55086ee1064215781fff39a1af09518bc9255b50d6333f2e4c74ca09fac6a8f6"}, - {file = "charset_normalizer-3.3.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8f4a014bc36d3c57402e2977dada34f9c12300af536839dc38c0beab8878f38a"}, - {file = "charset_normalizer-3.3.2-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a10af20b82360ab00827f916a6058451b723b4e65030c5a18577c8b2de5b3389"}, - {file = "charset_normalizer-3.3.2-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:8d756e44e94489e49571086ef83b2bb8ce311e730092d2c34ca8f7d925cb20aa"}, - {file = "charset_normalizer-3.3.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:90d558489962fd4918143277a773316e56c72da56ec7aa3dc3dbbe20fdfed15b"}, - {file = "charset_normalizer-3.3.2-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:6ac7ffc7ad6d040517be39eb591cac5ff87416c2537df6ba3cba3bae290c0fed"}, - {file = "charset_normalizer-3.3.2-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:7ed9e526742851e8d5cc9e6cf41427dfc6068d4f5a3bb03659444b4cabf6bc26"}, - {file = "charset_normalizer-3.3.2-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:8bdb58ff7ba23002a4c5808d608e4e6c687175724f54a5dade5fa8c67b604e4d"}, - {file = "charset_normalizer-3.3.2-cp312-cp312-musllinux_1_1_ppc64le.whl", hash = "sha256:6b3251890fff30ee142c44144871185dbe13b11bab478a88887a639655be1068"}, - {file = "charset_normalizer-3.3.2-cp312-cp312-musllinux_1_1_s390x.whl", hash = "sha256:b4a23f61ce87adf89be746c8a8974fe1c823c891d8f86eb218bb957c924bb143"}, - {file = "charset_normalizer-3.3.2-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:efcb3f6676480691518c177e3b465bcddf57cea040302f9f4e6e191af91174d4"}, - {file = "charset_normalizer-3.3.2-cp312-cp312-win32.whl", hash = "sha256:d965bba47ddeec8cd560687584e88cf699fd28f192ceb452d1d7ee807c5597b7"}, - {file = "charset_normalizer-3.3.2-cp312-cp312-win_amd64.whl", hash = "sha256:96b02a3dc4381e5494fad39be677abcb5e6634bf7b4fa83a6dd3112607547001"}, - {file = "charset_normalizer-3.3.2-py3-none-any.whl", hash = "sha256:3e4d1f6587322d2788836a99c69062fbb091331ec940e02d12d179c1d53e25fc"}, +groups = ["default", "chat_with_repo"] +files = [ + {file = "charset_normalizer-3.4.0-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:0d99dd8ff461990f12d6e42c7347fd9ab2532fb70e9621ba520f9e8637161d7c"}, + {file = "charset_normalizer-3.4.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:c57516e58fd17d03ebe67e181a4e4e2ccab1168f8c2976c6a334d4f819fe5944"}, + {file = "charset_normalizer-3.4.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:6dba5d19c4dfab08e58d5b36304b3f92f3bd5d42c1a3fa37b5ba5cdf6dfcbcee"}, + {file = "charset_normalizer-3.4.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bf4475b82be41b07cc5e5ff94810e6a01f276e37c2d55571e3fe175e467a1a1c"}, + {file = "charset_normalizer-3.4.0-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ce031db0408e487fd2775d745ce30a7cd2923667cf3b69d48d219f1d8f5ddeb6"}, + {file = "charset_normalizer-3.4.0-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:8ff4e7cdfdb1ab5698e675ca622e72d58a6fa2a8aa58195de0c0061288e6e3ea"}, + {file = "charset_normalizer-3.4.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3710a9751938947e6327ea9f3ea6332a09bf0ba0c09cae9cb1f250bd1f1549bc"}, + {file = "charset_normalizer-3.4.0-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:82357d85de703176b5587dbe6ade8ff67f9f69a41c0733cf2425378b49954de5"}, + {file = "charset_normalizer-3.4.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:47334db71978b23ebcf3c0f9f5ee98b8d65992b65c9c4f2d34c2eaf5bcaf0594"}, + {file = "charset_normalizer-3.4.0-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:8ce7fd6767a1cc5a92a639b391891bf1c268b03ec7e021c7d6d902285259685c"}, + {file = "charset_normalizer-3.4.0-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:f1a2f519ae173b5b6a2c9d5fa3116ce16e48b3462c8b96dfdded11055e3d6365"}, + {file = "charset_normalizer-3.4.0-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:63bc5c4ae26e4bc6be6469943b8253c0fd4e4186c43ad46e713ea61a0ba49129"}, + {file = "charset_normalizer-3.4.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:bcb4f8ea87d03bc51ad04add8ceaf9b0f085ac045ab4d74e73bbc2dc033f0236"}, + {file = "charset_normalizer-3.4.0-cp311-cp311-win32.whl", hash = "sha256:9ae4ef0b3f6b41bad6366fb0ea4fc1d7ed051528e113a60fa2a65a9abb5b1d99"}, + {file = "charset_normalizer-3.4.0-cp311-cp311-win_amd64.whl", hash = "sha256:cee4373f4d3ad28f1ab6290684d8e2ebdb9e7a1b74fdc39e4c211995f77bec27"}, + {file = "charset_normalizer-3.4.0-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:0713f3adb9d03d49d365b70b84775d0a0d18e4ab08d12bc46baa6132ba78aaf6"}, + {file = "charset_normalizer-3.4.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:de7376c29d95d6719048c194a9cf1a1b0393fbe8488a22008610b0361d834ecf"}, + {file = "charset_normalizer-3.4.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:4a51b48f42d9358460b78725283f04bddaf44a9358197b889657deba38f329db"}, + {file = "charset_normalizer-3.4.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b295729485b06c1a0683af02a9e42d2caa9db04a373dc38a6a58cdd1e8abddf1"}, + {file = "charset_normalizer-3.4.0-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ee803480535c44e7f5ad00788526da7d85525cfefaf8acf8ab9a310000be4b03"}, + {file = "charset_normalizer-3.4.0-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:3d59d125ffbd6d552765510e3f31ed75ebac2c7470c7274195b9161a32350284"}, + {file = "charset_normalizer-3.4.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8cda06946eac330cbe6598f77bb54e690b4ca93f593dee1568ad22b04f347c15"}, + {file = "charset_normalizer-3.4.0-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:07afec21bbbbf8a5cc3651aa96b980afe2526e7f048fdfb7f1014d84acc8b6d8"}, + {file = "charset_normalizer-3.4.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:6b40e8d38afe634559e398cc32b1472f376a4099c75fe6299ae607e404c033b2"}, + {file = "charset_normalizer-3.4.0-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:b8dcd239c743aa2f9c22ce674a145e0a25cb1566c495928440a181ca1ccf6719"}, + {file = "charset_normalizer-3.4.0-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:84450ba661fb96e9fd67629b93d2941c871ca86fc38d835d19d4225ff946a631"}, + {file = "charset_normalizer-3.4.0-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:44aeb140295a2f0659e113b31cfe92c9061622cadbc9e2a2f7b8ef6b1e29ef4b"}, + {file = "charset_normalizer-3.4.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:1db4e7fefefd0f548d73e2e2e041f9df5c59e178b4c72fbac4cc6f535cfb1565"}, + {file = "charset_normalizer-3.4.0-cp312-cp312-win32.whl", hash = "sha256:5726cf76c982532c1863fb64d8c6dd0e4c90b6ece9feb06c9f202417a31f7dd7"}, + {file = "charset_normalizer-3.4.0-cp312-cp312-win_amd64.whl", hash = "sha256:b197e7094f232959f8f20541ead1d9862ac5ebea1d58e9849c1bf979255dfac9"}, + {file = "charset_normalizer-3.4.0-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:dd4eda173a9fcccb5f2e2bd2a9f423d180194b1bf17cf59e3269899235b2a114"}, + {file = "charset_normalizer-3.4.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:e9e3c4c9e1ed40ea53acf11e2a386383c3304212c965773704e4603d589343ed"}, + {file = "charset_normalizer-3.4.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:92a7e36b000bf022ef3dbb9c46bfe2d52c047d5e3f3343f43204263c5addc250"}, + {file = "charset_normalizer-3.4.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:54b6a92d009cbe2fb11054ba694bc9e284dad30a26757b1e372a1fdddaf21920"}, + {file = "charset_normalizer-3.4.0-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1ffd9493de4c922f2a38c2bf62b831dcec90ac673ed1ca182fe11b4d8e9f2a64"}, + {file = "charset_normalizer-3.4.0-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:35c404d74c2926d0287fbd63ed5d27eb911eb9e4a3bb2c6d294f3cfd4a9e0c23"}, + {file = "charset_normalizer-3.4.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4796efc4faf6b53a18e3d46343535caed491776a22af773f366534056c4e1fbc"}, + {file = "charset_normalizer-3.4.0-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e7fdd52961feb4c96507aa649550ec2a0d527c086d284749b2f582f2d40a2e0d"}, + {file = "charset_normalizer-3.4.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:92db3c28b5b2a273346bebb24857fda45601aef6ae1c011c0a997106581e8a88"}, + {file = "charset_normalizer-3.4.0-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:ab973df98fc99ab39080bfb0eb3a925181454d7c3ac8a1e695fddfae696d9e90"}, + {file = "charset_normalizer-3.4.0-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:4b67fdab07fdd3c10bb21edab3cbfe8cf5696f453afce75d815d9d7223fbe88b"}, + {file = "charset_normalizer-3.4.0-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:aa41e526a5d4a9dfcfbab0716c7e8a1b215abd3f3df5a45cf18a12721d31cb5d"}, + {file = "charset_normalizer-3.4.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:ffc519621dce0c767e96b9c53f09c5d215578e10b02c285809f76509a3931482"}, + {file = "charset_normalizer-3.4.0-cp313-cp313-win32.whl", hash = "sha256:f19c1585933c82098c2a520f8ec1227f20e339e33aca8fa6f956f6691b784e67"}, + {file = "charset_normalizer-3.4.0-cp313-cp313-win_amd64.whl", hash = "sha256:707b82d19e65c9bd28b81dde95249b07bf9f5b90ebe1ef17d9b57473f8a64b7b"}, + {file = "charset_normalizer-3.4.0-py3-none-any.whl", hash = "sha256:fe9f97feb71aa9896b81973a7bbada8c49501dc73e58a10fcef6663af95e5079"}, + {file = "charset_normalizer-3.4.0.tar.gz", hash = "sha256:223217c3d4f82c3ac5e29032b3f1c2eb0fb591b72161f86d93f5719079dae93e"}, ] [[package]] name = "chroma-hnswlib" version = "0.7.6" summary = "Chromas fork of hnswlib" -groups = ["default"] +groups = ["chat_with_repo"] dependencies = [ "numpy", ] files = [ - {file = "chroma_hnswlib-0.7.6-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:f35192fbbeadc8c0633f0a69c3d3e9f1a4eab3a46b65458bbcbcabdd9e895c36"}, - {file = "chroma_hnswlib-0.7.6-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:6f007b608c96362b8f0c8b6b2ac94f67f83fcbabd857c378ae82007ec92f4d82"}, - {file = "chroma_hnswlib-0.7.6-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:456fd88fa0d14e6b385358515aef69fc89b3c2191706fd9aee62087b62aad09c"}, - {file = "chroma_hnswlib-0.7.6-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5dfaae825499c2beaa3b75a12d7ec713b64226df72a5c4097203e3ed532680da"}, - {file = "chroma_hnswlib-0.7.6-cp310-cp310-win_amd64.whl", hash = "sha256:2487201982241fb1581be26524145092c95902cb09fc2646ccfbc407de3328ec"}, {file = "chroma_hnswlib-0.7.6-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:81181d54a2b1e4727369486a631f977ffc53c5533d26e3d366dda243fb0998ca"}, {file = "chroma_hnswlib-0.7.6-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:4b4ab4e11f1083dd0a11ee4f0e0b183ca9f0f2ed63ededba1935b13ce2b3606f"}, {file = "chroma_hnswlib-0.7.6-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:53db45cd9173d95b4b0bdccb4dbff4c54a42b51420599c32267f3abbeb795170"}, @@ -368,10 +375,10 @@ files = [ [[package]] name = "chromadb" -version = "0.5.11" +version = "0.5.18" requires_python = ">=3.8" summary = "Chroma." -groups = ["default"] +groups = ["chat_with_repo"] dependencies = [ "PyYAML>=6.0.0", "bcrypt>=4.0.1", @@ -404,8 +411,8 @@ dependencies = [ "uvicorn[standard]>=0.18.3", ] files = [ - {file = "chromadb-0.5.11-py3-none-any.whl", hash = "sha256:f02d9326869cea926f980bd6c9a0150a0ef2e151072f325998c16a9502fb4b25"}, - {file = "chromadb-0.5.11.tar.gz", hash = "sha256:252e970b3e1a27b594cc7b3685238691bf8eaa232225d4dee9e33ec83580775f"}, + {file = "chromadb-0.5.18-py3-none-any.whl", hash = "sha256:9dd3827b5e04b4ff0a5ea0df28a78bac88a09f45be37fcd7fe20f879b57c43cf"}, + {file = "chromadb-0.5.18.tar.gz", hash = "sha256:cfbb3e5aeeb1dd532b47d80ed9185e8a9886c09af41c8e6123edf94395d76aec"}, ] [[package]] @@ -413,7 +420,7 @@ name = "click" version = "8.1.7" requires_python = ">=3.7" summary = "Composable command line interface toolkit" -groups = ["default"] +groups = ["default", "chat_with_repo"] dependencies = [ "colorama; platform_system == \"Windows\"", "importlib-metadata; python_version < \"3.8\"", @@ -428,7 +435,7 @@ name = "colorama" version = "0.4.6" requires_python = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,!=3.6.*,>=2.7" summary = "Cross-platform colored terminal text." -groups = ["default", "test"] +groups = ["default", "chat_with_repo", "test"] files = [ {file = "colorama-0.4.6-py2.py3-none-any.whl", hash = "sha256:4f1d9991f5acc0ca119f9d443620b77f9d6b33703e51011c16baf57afb285fc6"}, {file = "colorama-0.4.6.tar.gz", hash = "sha256:08695f5cb7ed6e0531a20572697297273c47b8cae5a63ffc6d6ed5c201be6e44"}, @@ -439,7 +446,7 @@ name = "coloredlogs" version = "15.0.1" requires_python = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*" summary = "Colored terminal output for Python's logging module" -groups = ["default"] +groups = ["chat_with_repo"] dependencies = [ "humanfriendly>=9.1", ] @@ -448,87 +455,12 @@ files = [ {file = "coloredlogs-15.0.1.tar.gz", hash = "sha256:7c991aa71a4577af2f82600d8f8f3a89f936baeaf9b50a9c197da014e5bf16b0"}, ] -[[package]] -name = "contourpy" -version = "1.3.0" -requires_python = ">=3.9" -summary = "Python library for calculating contours of 2D quadrilateral grids" -groups = ["default"] -dependencies = [ - "numpy>=1.23", -] -files = [ - {file = "contourpy-1.3.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:880ea32e5c774634f9fcd46504bf9f080a41ad855f4fef54f5380f5133d343c7"}, - {file = "contourpy-1.3.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:76c905ef940a4474a6289c71d53122a4f77766eef23c03cd57016ce19d0f7b42"}, - {file = "contourpy-1.3.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:92f8557cbb07415a4d6fa191f20fd9d2d9eb9c0b61d1b2f52a8926e43c6e9af7"}, - {file = "contourpy-1.3.0-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:36f965570cff02b874773c49bfe85562b47030805d7d8360748f3eca570f4cab"}, - {file = "contourpy-1.3.0-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:cacd81e2d4b6f89c9f8a5b69b86490152ff39afc58a95af002a398273e5ce589"}, - {file = "contourpy-1.3.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:69375194457ad0fad3a839b9e29aa0b0ed53bb54db1bfb6c3ae43d111c31ce41"}, - {file = "contourpy-1.3.0-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:7a52040312b1a858b5e31ef28c2e865376a386c60c0e248370bbea2d3f3b760d"}, - {file = "contourpy-1.3.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:3faeb2998e4fcb256542e8a926d08da08977f7f5e62cf733f3c211c2a5586223"}, - {file = "contourpy-1.3.0-cp310-cp310-win32.whl", hash = "sha256:36e0cff201bcb17a0a8ecc7f454fe078437fa6bda730e695a92f2d9932bd507f"}, - {file = "contourpy-1.3.0-cp310-cp310-win_amd64.whl", hash = "sha256:87ddffef1dbe5e669b5c2440b643d3fdd8622a348fe1983fad7a0f0ccb1cd67b"}, - {file = "contourpy-1.3.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:0fa4c02abe6c446ba70d96ece336e621efa4aecae43eaa9b030ae5fb92b309ad"}, - {file = "contourpy-1.3.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:834e0cfe17ba12f79963861e0f908556b2cedd52e1f75e6578801febcc6a9f49"}, - {file = "contourpy-1.3.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:dbc4c3217eee163fa3984fd1567632b48d6dfd29216da3ded3d7b844a8014a66"}, - {file = "contourpy-1.3.0-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:4865cd1d419e0c7a7bf6de1777b185eebdc51470800a9f42b9e9decf17762081"}, - {file = "contourpy-1.3.0-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:303c252947ab4b14c08afeb52375b26781ccd6a5ccd81abcdfc1fafd14cf93c1"}, - {file = "contourpy-1.3.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:637f674226be46f6ba372fd29d9523dd977a291f66ab2a74fbeb5530bb3f445d"}, - {file = "contourpy-1.3.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:76a896b2f195b57db25d6b44e7e03f221d32fe318d03ede41f8b4d9ba1bff53c"}, - {file = "contourpy-1.3.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:e1fd23e9d01591bab45546c089ae89d926917a66dceb3abcf01f6105d927e2cb"}, - {file = "contourpy-1.3.0-cp311-cp311-win32.whl", hash = "sha256:d402880b84df3bec6eab53cd0cf802cae6a2ef9537e70cf75e91618a3801c20c"}, - {file = "contourpy-1.3.0-cp311-cp311-win_amd64.whl", hash = "sha256:6cb6cc968059db9c62cb35fbf70248f40994dfcd7aa10444bbf8b3faeb7c2d67"}, - {file = "contourpy-1.3.0-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:570ef7cf892f0afbe5b2ee410c507ce12e15a5fa91017a0009f79f7d93a1268f"}, - {file = "contourpy-1.3.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:da84c537cb8b97d153e9fb208c221c45605f73147bd4cadd23bdae915042aad6"}, - {file = "contourpy-1.3.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0be4d8425bfa755e0fd76ee1e019636ccc7c29f77a7c86b4328a9eb6a26d0639"}, - {file = "contourpy-1.3.0-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:9c0da700bf58f6e0b65312d0a5e695179a71d0163957fa381bb3c1f72972537c"}, - {file = "contourpy-1.3.0-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:eb8b141bb00fa977d9122636b16aa67d37fd40a3d8b52dd837e536d64b9a4d06"}, - {file = "contourpy-1.3.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3634b5385c6716c258d0419c46d05c8aa7dc8cb70326c9a4fb66b69ad2b52e09"}, - {file = "contourpy-1.3.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:0dce35502151b6bd35027ac39ba6e5a44be13a68f55735c3612c568cac3805fd"}, - {file = "contourpy-1.3.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:aea348f053c645100612b333adc5983d87be69acdc6d77d3169c090d3b01dc35"}, - {file = "contourpy-1.3.0-cp312-cp312-win32.whl", hash = "sha256:90f73a5116ad1ba7174341ef3ea5c3150ddf20b024b98fb0c3b29034752c8aeb"}, - {file = "contourpy-1.3.0-cp312-cp312-win_amd64.whl", hash = "sha256:b11b39aea6be6764f84360fce6c82211a9db32a7c7de8fa6dd5397cf1d079c3b"}, - {file = "contourpy-1.3.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:3e1c7fa44aaae40a2247e2e8e0627f4bea3dd257014764aa644f319a5f8600e3"}, - {file = "contourpy-1.3.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:364174c2a76057feef647c802652f00953b575723062560498dc7930fc9b1cb7"}, - {file = "contourpy-1.3.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:32b238b3b3b649e09ce9aaf51f0c261d38644bdfa35cbaf7b263457850957a84"}, - {file = "contourpy-1.3.0-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d51fca85f9f7ad0b65b4b9fe800406d0d77017d7270d31ec3fb1cc07358fdea0"}, - {file = "contourpy-1.3.0-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:732896af21716b29ab3e988d4ce14bc5133733b85956316fb0c56355f398099b"}, - {file = "contourpy-1.3.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d73f659398a0904e125280836ae6f88ba9b178b2fed6884f3b1f95b989d2c8da"}, - {file = "contourpy-1.3.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:c6c7c2408b7048082932cf4e641fa3b8ca848259212f51c8c59c45aa7ac18f14"}, - {file = "contourpy-1.3.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:f317576606de89da6b7e0861cf6061f6146ead3528acabff9236458a6ba467f8"}, - {file = "contourpy-1.3.0-cp313-cp313-win32.whl", hash = "sha256:31cd3a85dbdf1fc002280c65caa7e2b5f65e4a973fcdf70dd2fdcb9868069294"}, - {file = "contourpy-1.3.0-cp313-cp313-win_amd64.whl", hash = "sha256:4553c421929ec95fb07b3aaca0fae668b2eb5a5203d1217ca7c34c063c53d087"}, - {file = "contourpy-1.3.0-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:345af746d7766821d05d72cb8f3845dfd08dd137101a2cb9b24de277d716def8"}, - {file = "contourpy-1.3.0-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:3bb3808858a9dc68f6f03d319acd5f1b8a337e6cdda197f02f4b8ff67ad2057b"}, - {file = "contourpy-1.3.0-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:420d39daa61aab1221567b42eecb01112908b2cab7f1b4106a52caaec8d36973"}, - {file = "contourpy-1.3.0-cp313-cp313t-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:4d63ee447261e963af02642ffcb864e5a2ee4cbfd78080657a9880b8b1868e18"}, - {file = "contourpy-1.3.0-cp313-cp313t-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:167d6c890815e1dac9536dca00828b445d5d0df4d6a8c6adb4a7ec3166812fa8"}, - {file = "contourpy-1.3.0-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:710a26b3dc80c0e4febf04555de66f5fd17e9cf7170a7b08000601a10570bda6"}, - {file = "contourpy-1.3.0-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:75ee7cb1a14c617f34a51d11fa7524173e56551646828353c4af859c56b766e2"}, - {file = "contourpy-1.3.0-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:33c92cdae89ec5135d036e7218e69b0bb2851206077251f04a6c4e0e21f03927"}, - {file = "contourpy-1.3.0-pp310-pypy310_pp73-macosx_10_15_x86_64.whl", hash = "sha256:fe41b41505a5a33aeaed2a613dccaeaa74e0e3ead6dd6fd3a118fb471644fd6c"}, - {file = "contourpy-1.3.0-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:eca7e17a65f72a5133bdbec9ecf22401c62bcf4821361ef7811faee695799779"}, - {file = "contourpy-1.3.0-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:1ec4dc6bf570f5b22ed0d7efba0dfa9c5b9e0431aeea7581aa217542d9e809a4"}, - {file = "contourpy-1.3.0.tar.gz", hash = "sha256:7ffa0db17717a8ffb127efd0c95a4362d996b892c2904db72428d5b52e1938a4"}, -] - -[[package]] -name = "cycler" -version = "0.12.1" -requires_python = ">=3.8" -summary = "Composable style cycles" -groups = ["default"] -files = [ - {file = "cycler-0.12.1-py3-none-any.whl", hash = "sha256:85cef7cff222d8644161529808465972e51340599459b8ac3ccbac5a854e0d30"}, - {file = "cycler-0.12.1.tar.gz", hash = "sha256:88bb128f02ba341da8ef447245a9e138fae777f6a23943da4540077d3601eb1c"}, -] - [[package]] name = "dataclasses-json" version = "0.6.7" requires_python = "<4.0,>=3.7" summary = "Easily serialize dataclasses to and from JSON." -groups = ["default"] +groups = ["default", "chat_with_repo"] dependencies = [ "marshmallow<4.0.0,>=3.18.0", "typing-inspect<1,>=0.4.0", @@ -540,23 +472,23 @@ files = [ [[package]] name = "deprecated" -version = "1.2.14" -requires_python = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*" +version = "1.2.15" +requires_python = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,>=2.7" summary = "Python @deprecated decorator to deprecate old python classes, functions or methods." -groups = ["default"] +groups = ["default", "chat_with_repo"] dependencies = [ "wrapt<2,>=1.10", ] files = [ - {file = "Deprecated-1.2.14-py2.py3-none-any.whl", hash = "sha256:6fac8b097794a90302bdbb17b9b815e732d3c4720583ff1b198499d78470466c"}, - {file = "Deprecated-1.2.14.tar.gz", hash = "sha256:e5323eb936458dccc2582dc6f9c322c852a775a27065ff2b0c4970b9d53d01b3"}, + {file = "Deprecated-1.2.15-py2.py3-none-any.whl", hash = "sha256:353bc4a8ac4bfc96800ddab349d89c25dec1079f65fd53acdcc1e0b975b21320"}, + {file = "deprecated-1.2.15.tar.gz", hash = "sha256:683e561a90de76239796e6b6feac66b99030d2dd3fcf61ef996330f14bbb9b0d"}, ] [[package]] name = "dirtyjson" version = "1.0.8" summary = "JSON decoder for Python that can extract data from the muck" -groups = ["default"] +groups = ["default", "chat_with_repo"] files = [ {file = "dirtyjson-1.0.8-py3-none-any.whl", hash = "sha256:125e27248435a58acace26d5c2c4c11a1c0de0a9c5124c5a94ba78e517d74f53"}, {file = "dirtyjson-1.0.8.tar.gz", hash = "sha256:90ca4a18f3ff30ce849d100dcf4a003953c79d3a2348ef056f1d9c22231a25fd"}, @@ -567,7 +499,7 @@ name = "distro" version = "1.9.0" requires_python = ">=3.6" summary = "Distro - an OS platform information API" -groups = ["default"] +groups = ["default", "chat_with_repo"] files = [ {file = "distro-1.9.0-py3-none-any.whl", hash = "sha256:7bffd925d65168f85027d8da9af6bddab658135b840670a223589bc0c8ef02b2"}, {file = "distro-1.9.0.tar.gz", hash = "sha256:2fa77c6fd8940f116ee1d6b94a2f90b13b5ea8d019b98bc8bafdcabcdd9bdbed"}, @@ -577,38 +509,26 @@ files = [ name = "durationpy" version = "0.9" summary = "Module for converting between datetime.timedelta and Go's Duration strings." -groups = ["default"] +groups = ["chat_with_repo"] files = [ {file = "durationpy-0.9-py3-none-any.whl", hash = "sha256:e65359a7af5cedad07fb77a2dd3f390f8eb0b74cb845589fa6c057086834dd38"}, {file = "durationpy-0.9.tar.gz", hash = "sha256:fd3feb0a69a0057d582ef643c355c40d2fa1c942191f914d12203b1a01ac722a"}, ] -[[package]] -name = "exceptiongroup" -version = "1.2.2" -requires_python = ">=3.7" -summary = "Backport of PEP 654 (exception groups)" -groups = ["default", "test"] -marker = "python_version < \"3.11\"" -files = [ - {file = "exceptiongroup-1.2.2-py3-none-any.whl", hash = "sha256:3111b9d131c238bec2f8f516e123e14ba243563fb135d3fe885990585aa7795b"}, - {file = "exceptiongroup-1.2.2.tar.gz", hash = "sha256:47c2edf7c6738fafb49fd34290706d1a1a2f4d1c6df275526b62cbb4aa5393cc"}, -] - [[package]] name = "fastapi" -version = "0.115.0" +version = "0.115.5" requires_python = ">=3.8" summary = "FastAPI framework, high performance, easy to learn, fast to code, ready for production" -groups = ["default"] +groups = ["chat_with_repo"] dependencies = [ "pydantic!=1.8,!=1.8.1,!=2.0.0,!=2.0.1,!=2.1.0,<3.0.0,>=1.7.4", - "starlette<0.39.0,>=0.37.2", + "starlette<0.42.0,>=0.40.0", "typing-extensions>=4.8.0", ] files = [ - {file = "fastapi-0.115.0-py3-none-any.whl", hash = "sha256:17ea427674467486e997206a5ab25760f6b09e069f099b96f5b55a32fb6f1631"}, - {file = "fastapi-0.115.0.tar.gz", hash = "sha256:f93b4ca3529a8ebc6fc3fcf710e5efa8de3df9b41570958abf1d97d843138004"}, + {file = "fastapi-0.115.5-py3-none-any.whl", hash = "sha256:596b95adbe1474da47049e802f9a65ab2ffa9c2b07e7efee70eb8a66c9f2f796"}, + {file = "fastapi-0.115.5.tar.gz", hash = "sha256:0e7a4d0dc0d01c68df21887cce0945e72d3c48b9f4f79dfe7a7d53aa08fbb289"}, ] [[package]] @@ -616,7 +536,7 @@ name = "ffmpy" version = "0.4.0" requires_python = "<4.0.0,>=3.8.1" summary = "A simple Python wrapper for FFmpeg" -groups = ["default"] +groups = ["chat_with_repo"] files = [ {file = "ffmpy-0.4.0-py3-none-any.whl", hash = "sha256:39c0f20c5b465e7f8d29a5191f3a7d7675a8c546d9d985de8921151cd9b59e14"}, {file = "ffmpy-0.4.0.tar.gz", hash = "sha256:131b57794e802ad555f579007497f7a3d0cab0583d37496c685b8acae4837b1d"}, @@ -627,128 +547,97 @@ name = "filelock" version = "3.16.1" requires_python = ">=3.8" summary = "A platform independent file lock." -groups = ["default"] +groups = ["chat_with_repo"] files = [ {file = "filelock-3.16.1-py3-none-any.whl", hash = "sha256:2082e5703d51fbf98ea75855d9d5527e33d8ff23099bec374a134febee6946b0"}, {file = "filelock-3.16.1.tar.gz", hash = "sha256:c249fbfcd5db47e5e2d6d62198e565475ee65e4831e2561c8e313fa7eb961435"}, ] +[[package]] +name = "filetype" +version = "1.2.0" +summary = "Infer file type and MIME type of any file/buffer. No external dependencies." +groups = ["default", "chat_with_repo"] +files = [ + {file = "filetype-1.2.0-py2.py3-none-any.whl", hash = "sha256:7ce71b6880181241cf7ac8697a2f1eb6a8bd9b429f7ad6d27b8db9ba5f1c2d25"}, + {file = "filetype-1.2.0.tar.gz", hash = "sha256:66b56cd6474bf41d8c54660347d37afcc3f7d1970648de365c102ef77548aadb"}, +] + [[package]] name = "flatbuffers" version = "24.3.25" summary = "The FlatBuffers serialization format for Python" -groups = ["default"] +groups = ["chat_with_repo"] files = [ {file = "flatbuffers-24.3.25-py2.py3-none-any.whl", hash = "sha256:8dbdec58f935f3765e4f7f3cf635ac3a77f83568138d6a2311f524ec96364812"}, {file = "flatbuffers-24.3.25.tar.gz", hash = "sha256:de2ec5b203f21441716617f38443e0a8ebf3d25bf0d9c0bb0ce68fa00ad546a4"}, ] -[[package]] -name = "fonttools" -version = "4.54.1" -requires_python = ">=3.8" -summary = "Tools to manipulate font files" -groups = ["default"] -files = [ - {file = "fonttools-4.54.1-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:7ed7ee041ff7b34cc62f07545e55e1468808691dddfd315d51dd82a6b37ddef2"}, - {file = "fonttools-4.54.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:41bb0b250c8132b2fcac148e2e9198e62ff06f3cc472065dff839327945c5882"}, - {file = "fonttools-4.54.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7965af9b67dd546e52afcf2e38641b5be956d68c425bef2158e95af11d229f10"}, - {file = "fonttools-4.54.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:278913a168f90d53378c20c23b80f4e599dca62fbffae4cc620c8eed476b723e"}, - {file = "fonttools-4.54.1-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:0e88e3018ac809b9662615072dcd6b84dca4c2d991c6d66e1970a112503bba7e"}, - {file = "fonttools-4.54.1-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:4aa4817f0031206e637d1e685251ac61be64d1adef111060df84fdcbc6ab6c44"}, - {file = "fonttools-4.54.1-cp310-cp310-win32.whl", hash = "sha256:7e3b7d44e18c085fd8c16dcc6f1ad6c61b71ff463636fcb13df7b1b818bd0c02"}, - {file = "fonttools-4.54.1-cp310-cp310-win_amd64.whl", hash = "sha256:dd9cc95b8d6e27d01e1e1f1fae8559ef3c02c76317da650a19047f249acd519d"}, - {file = "fonttools-4.54.1-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:5419771b64248484299fa77689d4f3aeed643ea6630b2ea750eeab219588ba20"}, - {file = "fonttools-4.54.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:301540e89cf4ce89d462eb23a89464fef50915255ece765d10eee8b2bf9d75b2"}, - {file = "fonttools-4.54.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:76ae5091547e74e7efecc3cbf8e75200bc92daaeb88e5433c5e3e95ea8ce5aa7"}, - {file = "fonttools-4.54.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:82834962b3d7c5ca98cb56001c33cf20eb110ecf442725dc5fdf36d16ed1ab07"}, - {file = "fonttools-4.54.1-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:d26732ae002cc3d2ecab04897bb02ae3f11f06dd7575d1df46acd2f7c012a8d8"}, - {file = "fonttools-4.54.1-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:58974b4987b2a71ee08ade1e7f47f410c367cdfc5a94fabd599c88165f56213a"}, - {file = "fonttools-4.54.1-cp311-cp311-win32.whl", hash = "sha256:ab774fa225238986218a463f3fe151e04d8c25d7de09df7f0f5fce27b1243dbc"}, - {file = "fonttools-4.54.1-cp311-cp311-win_amd64.whl", hash = "sha256:07e005dc454eee1cc60105d6a29593459a06321c21897f769a281ff2d08939f6"}, - {file = "fonttools-4.54.1-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:54471032f7cb5fca694b5f1a0aaeba4af6e10ae989df408e0216f7fd6cdc405d"}, - {file = "fonttools-4.54.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:8fa92cb248e573daab8d032919623cc309c005086d743afb014c836636166f08"}, - {file = "fonttools-4.54.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0a911591200114969befa7f2cb74ac148bce5a91df5645443371aba6d222e263"}, - {file = "fonttools-4.54.1-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:93d458c8a6a354dc8b48fc78d66d2a8a90b941f7fec30e94c7ad9982b1fa6bab"}, - {file = "fonttools-4.54.1-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:5eb2474a7c5be8a5331146758debb2669bf5635c021aee00fd7c353558fc659d"}, - {file = "fonttools-4.54.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:c9c563351ddc230725c4bdf7d9e1e92cbe6ae8553942bd1fb2b2ff0884e8b714"}, - {file = "fonttools-4.54.1-cp312-cp312-win32.whl", hash = "sha256:fdb062893fd6d47b527d39346e0c5578b7957dcea6d6a3b6794569370013d9ac"}, - {file = "fonttools-4.54.1-cp312-cp312-win_amd64.whl", hash = "sha256:e4564cf40cebcb53f3dc825e85910bf54835e8a8b6880d59e5159f0f325e637e"}, - {file = "fonttools-4.54.1-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:6e37561751b017cf5c40fce0d90fd9e8274716de327ec4ffb0df957160be3bff"}, - {file = "fonttools-4.54.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:357cacb988a18aace66e5e55fe1247f2ee706e01debc4b1a20d77400354cddeb"}, - {file = "fonttools-4.54.1-cp313-cp313-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f8e953cc0bddc2beaf3a3c3b5dd9ab7554677da72dfaf46951e193c9653e515a"}, - {file = "fonttools-4.54.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:58d29b9a294573d8319f16f2f79e42428ba9b6480442fa1836e4eb89c4d9d61c"}, - {file = "fonttools-4.54.1-cp313-cp313-win32.whl", hash = "sha256:9ef1b167e22709b46bf8168368b7b5d3efeaaa746c6d39661c1b4405b6352e58"}, - {file = "fonttools-4.54.1-cp313-cp313-win_amd64.whl", hash = "sha256:262705b1663f18c04250bd1242b0515d3bbae177bee7752be67c979b7d47f43d"}, - {file = "fonttools-4.54.1-py3-none-any.whl", hash = "sha256:37cddd62d83dc4f72f7c3f3c2bcf2697e89a30efb152079896544a93907733bd"}, - {file = "fonttools-4.54.1.tar.gz", hash = "sha256:957f669d4922f92c171ba01bef7f29410668db09f6c02111e22b2bce446f3285"}, -] - [[package]] name = "frozenlist" -version = "1.4.1" +version = "1.5.0" requires_python = ">=3.8" summary = "A list-like structure which implements collections.abc.MutableSequence" -groups = ["default"] -files = [ - {file = "frozenlist-1.4.1-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:f9aa1878d1083b276b0196f2dfbe00c9b7e752475ed3b682025ff20c1c1f51ac"}, - {file = "frozenlist-1.4.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:29acab3f66f0f24674b7dc4736477bcd4bc3ad4b896f5f45379a67bce8b96868"}, - {file = "frozenlist-1.4.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:74fb4bee6880b529a0c6560885fce4dc95936920f9f20f53d99a213f7bf66776"}, - {file = "frozenlist-1.4.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:590344787a90ae57d62511dd7c736ed56b428f04cd8c161fcc5e7232c130c69a"}, - {file = "frozenlist-1.4.1-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:068b63f23b17df8569b7fdca5517edef76171cf3897eb68beb01341131fbd2ad"}, - {file = "frozenlist-1.4.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5c849d495bf5154cd8da18a9eb15db127d4dba2968d88831aff6f0331ea9bd4c"}, - {file = "frozenlist-1.4.1-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:9750cc7fe1ae3b1611bb8cfc3f9ec11d532244235d75901fb6b8e42ce9229dfe"}, - {file = "frozenlist-1.4.1-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a9b2de4cf0cdd5bd2dee4c4f63a653c61d2408055ab77b151c1957f221cabf2a"}, - {file = "frozenlist-1.4.1-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:0633c8d5337cb5c77acbccc6357ac49a1770b8c487e5b3505c57b949b4b82e98"}, - {file = "frozenlist-1.4.1-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:27657df69e8801be6c3638054e202a135c7f299267f1a55ed3a598934f6c0d75"}, - {file = "frozenlist-1.4.1-cp310-cp310-musllinux_1_1_ppc64le.whl", hash = "sha256:f9a3ea26252bd92f570600098783d1371354d89d5f6b7dfd87359d669f2109b5"}, - {file = "frozenlist-1.4.1-cp310-cp310-musllinux_1_1_s390x.whl", hash = "sha256:4f57dab5fe3407b6c0c1cc907ac98e8a189f9e418f3b6e54d65a718aaafe3950"}, - {file = "frozenlist-1.4.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:e02a0e11cf6597299b9f3bbd3f93d79217cb90cfd1411aec33848b13f5c656cc"}, - {file = "frozenlist-1.4.1-cp310-cp310-win32.whl", hash = "sha256:a828c57f00f729620a442881cc60e57cfcec6842ba38e1b19fd3e47ac0ff8dc1"}, - {file = "frozenlist-1.4.1-cp310-cp310-win_amd64.whl", hash = "sha256:f56e2333dda1fe0f909e7cc59f021eba0d2307bc6f012a1ccf2beca6ba362439"}, - {file = "frozenlist-1.4.1-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:a0cb6f11204443f27a1628b0e460f37fb30f624be6051d490fa7d7e26d4af3d0"}, - {file = "frozenlist-1.4.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:b46c8ae3a8f1f41a0d2ef350c0b6e65822d80772fe46b653ab6b6274f61d4a49"}, - {file = "frozenlist-1.4.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:fde5bd59ab5357e3853313127f4d3565fc7dad314a74d7b5d43c22c6a5ed2ced"}, - {file = "frozenlist-1.4.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:722e1124aec435320ae01ee3ac7bec11a5d47f25d0ed6328f2273d287bc3abb0"}, - {file = "frozenlist-1.4.1-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:2471c201b70d58a0f0c1f91261542a03d9a5e088ed3dc6c160d614c01649c106"}, - {file = "frozenlist-1.4.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:c757a9dd70d72b076d6f68efdbb9bc943665ae954dad2801b874c8c69e185068"}, - {file = "frozenlist-1.4.1-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f146e0911cb2f1da549fc58fc7bcd2b836a44b79ef871980d605ec392ff6b0d2"}, - {file = "frozenlist-1.4.1-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4f9c515e7914626b2a2e1e311794b4c35720a0be87af52b79ff8e1429fc25f19"}, - {file = "frozenlist-1.4.1-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:c302220494f5c1ebeb0912ea782bcd5e2f8308037b3c7553fad0e48ebad6ad82"}, - {file = "frozenlist-1.4.1-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:442acde1e068288a4ba7acfe05f5f343e19fac87bfc96d89eb886b0363e977ec"}, - {file = "frozenlist-1.4.1-cp311-cp311-musllinux_1_1_ppc64le.whl", hash = "sha256:1b280e6507ea8a4fa0c0a7150b4e526a8d113989e28eaaef946cc77ffd7efc0a"}, - {file = "frozenlist-1.4.1-cp311-cp311-musllinux_1_1_s390x.whl", hash = "sha256:fe1a06da377e3a1062ae5fe0926e12b84eceb8a50b350ddca72dc85015873f74"}, - {file = "frozenlist-1.4.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:db9e724bebd621d9beca794f2a4ff1d26eed5965b004a97f1f1685a173b869c2"}, - {file = "frozenlist-1.4.1-cp311-cp311-win32.whl", hash = "sha256:e774d53b1a477a67838a904131c4b0eef6b3d8a651f8b138b04f748fccfefe17"}, - {file = "frozenlist-1.4.1-cp311-cp311-win_amd64.whl", hash = "sha256:fb3c2db03683b5767dedb5769b8a40ebb47d6f7f45b1b3e3b4b51ec8ad9d9825"}, - {file = "frozenlist-1.4.1-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:1979bc0aeb89b33b588c51c54ab0161791149f2461ea7c7c946d95d5f93b56ae"}, - {file = "frozenlist-1.4.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:cc7b01b3754ea68a62bd77ce6020afaffb44a590c2289089289363472d13aedb"}, - {file = "frozenlist-1.4.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:c9c92be9fd329ac801cc420e08452b70e7aeab94ea4233a4804f0915c14eba9b"}, - {file = "frozenlist-1.4.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5c3894db91f5a489fc8fa6a9991820f368f0b3cbdb9cd8849547ccfab3392d86"}, - {file = "frozenlist-1.4.1-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ba60bb19387e13597fb059f32cd4d59445d7b18b69a745b8f8e5db0346f33480"}, - {file = "frozenlist-1.4.1-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:8aefbba5f69d42246543407ed2461db31006b0f76c4e32dfd6f42215a2c41d09"}, - {file = "frozenlist-1.4.1-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:780d3a35680ced9ce682fbcf4cb9c2bad3136eeff760ab33707b71db84664e3a"}, - {file = "frozenlist-1.4.1-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9acbb16f06fe7f52f441bb6f413ebae6c37baa6ef9edd49cdd567216da8600cd"}, - {file = "frozenlist-1.4.1-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:23b701e65c7b36e4bf15546a89279bd4d8675faabc287d06bbcfac7d3c33e1e6"}, - {file = "frozenlist-1.4.1-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:3e0153a805a98f5ada7e09826255ba99fb4f7524bb81bf6b47fb702666484ae1"}, - {file = "frozenlist-1.4.1-cp312-cp312-musllinux_1_1_ppc64le.whl", hash = "sha256:dd9b1baec094d91bf36ec729445f7769d0d0cf6b64d04d86e45baf89e2b9059b"}, - {file = "frozenlist-1.4.1-cp312-cp312-musllinux_1_1_s390x.whl", hash = "sha256:1a4471094e146b6790f61b98616ab8e44f72661879cc63fa1049d13ef711e71e"}, - {file = "frozenlist-1.4.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:5667ed53d68d91920defdf4035d1cdaa3c3121dc0b113255124bcfada1cfa1b8"}, - {file = "frozenlist-1.4.1-cp312-cp312-win32.whl", hash = "sha256:beee944ae828747fd7cb216a70f120767fc9f4f00bacae8543c14a6831673f89"}, - {file = "frozenlist-1.4.1-cp312-cp312-win_amd64.whl", hash = "sha256:64536573d0a2cb6e625cf309984e2d873979709f2cf22839bf2d61790b448ad5"}, - {file = "frozenlist-1.4.1-py3-none-any.whl", hash = "sha256:04ced3e6a46b4cfffe20f9ae482818e34eba9b5fb0ce4056e4cc9b6e212d09b7"}, - {file = "frozenlist-1.4.1.tar.gz", hash = "sha256:c037a86e8513059a2613aaba4d817bb90b9d9b6b69aace3ce9c877e8c8ed402b"}, +groups = ["default", "chat_with_repo"] +files = [ + {file = "frozenlist-1.5.0-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:fd74520371c3c4175142d02a976aee0b4cb4a7cc912a60586ffd8d5929979b30"}, + {file = "frozenlist-1.5.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:2f3f7a0fbc219fb4455264cae4d9f01ad41ae6ee8524500f381de64ffaa077d5"}, + {file = "frozenlist-1.5.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:f47c9c9028f55a04ac254346e92977bf0f166c483c74b4232bee19a6697e4778"}, + {file = "frozenlist-1.5.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0996c66760924da6e88922756d99b47512a71cfd45215f3570bf1e0b694c206a"}, + {file = "frozenlist-1.5.0-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a2fe128eb4edeabe11896cb6af88fca5346059f6c8d807e3b910069f39157869"}, + {file = "frozenlist-1.5.0-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:1a8ea951bbb6cacd492e3948b8da8c502a3f814f5d20935aae74b5df2b19cf3d"}, + {file = "frozenlist-1.5.0-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:de537c11e4aa01d37db0d403b57bd6f0546e71a82347a97c6a9f0dcc532b3a45"}, + {file = "frozenlist-1.5.0-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9c2623347b933fcb9095841f1cc5d4ff0b278addd743e0e966cb3d460278840d"}, + {file = "frozenlist-1.5.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:cee6798eaf8b1416ef6909b06f7dc04b60755206bddc599f52232606e18179d3"}, + {file = "frozenlist-1.5.0-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:f5f9da7f5dbc00a604fe74aa02ae7c98bcede8a3b8b9666f9f86fc13993bc71a"}, + {file = "frozenlist-1.5.0-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:90646abbc7a5d5c7c19461d2e3eeb76eb0b204919e6ece342feb6032c9325ae9"}, + {file = "frozenlist-1.5.0-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:bdac3c7d9b705d253b2ce370fde941836a5f8b3c5c2b8fd70940a3ea3af7f4f2"}, + {file = "frozenlist-1.5.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:03d33c2ddbc1816237a67f66336616416e2bbb6beb306e5f890f2eb22b959cdf"}, + {file = "frozenlist-1.5.0-cp311-cp311-win32.whl", hash = "sha256:237f6b23ee0f44066219dae14c70ae38a63f0440ce6750f868ee08775073f942"}, + {file = "frozenlist-1.5.0-cp311-cp311-win_amd64.whl", hash = "sha256:0cc974cc93d32c42e7b0f6cf242a6bd941c57c61b618e78b6c0a96cb72788c1d"}, + {file = "frozenlist-1.5.0-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:31115ba75889723431aa9a4e77d5f398f5cf976eea3bdf61749731f62d4a4a21"}, + {file = "frozenlist-1.5.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:7437601c4d89d070eac8323f121fcf25f88674627505334654fd027b091db09d"}, + {file = "frozenlist-1.5.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:7948140d9f8ece1745be806f2bfdf390127cf1a763b925c4a805c603df5e697e"}, + {file = "frozenlist-1.5.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:feeb64bc9bcc6b45c6311c9e9b99406660a9c05ca8a5b30d14a78555088b0b3a"}, + {file = "frozenlist-1.5.0-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:683173d371daad49cffb8309779e886e59c2f369430ad28fe715f66d08d4ab1a"}, + {file = "frozenlist-1.5.0-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:7d57d8f702221405a9d9b40f9da8ac2e4a1a8b5285aac6100f3393675f0a85ee"}, + {file = "frozenlist-1.5.0-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:30c72000fbcc35b129cb09956836c7d7abf78ab5416595e4857d1cae8d6251a6"}, + {file = "frozenlist-1.5.0-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:000a77d6034fbad9b6bb880f7ec073027908f1b40254b5d6f26210d2dab1240e"}, + {file = "frozenlist-1.5.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:5d7f5a50342475962eb18b740f3beecc685a15b52c91f7d975257e13e029eca9"}, + {file = "frozenlist-1.5.0-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:87f724d055eb4785d9be84e9ebf0f24e392ddfad00b3fe036e43f489fafc9039"}, + {file = "frozenlist-1.5.0-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:6e9080bb2fb195a046e5177f10d9d82b8a204c0736a97a153c2466127de87784"}, + {file = "frozenlist-1.5.0-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:9b93d7aaa36c966fa42efcaf716e6b3900438632a626fb09c049f6a2f09fc631"}, + {file = "frozenlist-1.5.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:52ef692a4bc60a6dd57f507429636c2af8b6046db8b31b18dac02cbc8f507f7f"}, + {file = "frozenlist-1.5.0-cp312-cp312-win32.whl", hash = "sha256:29d94c256679247b33a3dc96cce0f93cbc69c23bf75ff715919332fdbb6a32b8"}, + {file = "frozenlist-1.5.0-cp312-cp312-win_amd64.whl", hash = "sha256:8969190d709e7c48ea386db202d708eb94bdb29207a1f269bab1196ce0dcca1f"}, + {file = "frozenlist-1.5.0-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:7a1a048f9215c90973402e26c01d1cff8a209e1f1b53f72b95c13db61b00f953"}, + {file = "frozenlist-1.5.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:dd47a5181ce5fcb463b5d9e17ecfdb02b678cca31280639255ce9d0e5aa67af0"}, + {file = "frozenlist-1.5.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:1431d60b36d15cda188ea222033eec8e0eab488f39a272461f2e6d9e1a8e63c2"}, + {file = "frozenlist-1.5.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6482a5851f5d72767fbd0e507e80737f9c8646ae7fd303def99bfe813f76cf7f"}, + {file = "frozenlist-1.5.0-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:44c49271a937625619e862baacbd037a7ef86dd1ee215afc298a417ff3270608"}, + {file = "frozenlist-1.5.0-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:12f78f98c2f1c2429d42e6a485f433722b0061d5c0b0139efa64f396efb5886b"}, + {file = "frozenlist-1.5.0-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ce3aa154c452d2467487765e3adc730a8c153af77ad84096bc19ce19a2400840"}, + {file = "frozenlist-1.5.0-cp313-cp313-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9b7dc0c4338e6b8b091e8faf0db3168a37101943e687f373dce00959583f7439"}, + {file = "frozenlist-1.5.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:45e0896250900b5aa25180f9aec243e84e92ac84bd4a74d9ad4138ef3f5c97de"}, + {file = "frozenlist-1.5.0-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:561eb1c9579d495fddb6da8959fd2a1fca2c6d060d4113f5844b433fc02f2641"}, + {file = "frozenlist-1.5.0-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:df6e2f325bfee1f49f81aaac97d2aa757c7646534a06f8f577ce184afe2f0a9e"}, + {file = "frozenlist-1.5.0-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:140228863501b44b809fb39ec56b5d4071f4d0aa6d216c19cbb08b8c5a7eadb9"}, + {file = "frozenlist-1.5.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:7707a25d6a77f5d27ea7dc7d1fc608aa0a478193823f88511ef5e6b8a48f9d03"}, + {file = "frozenlist-1.5.0-cp313-cp313-win32.whl", hash = "sha256:31a9ac2b38ab9b5a8933b693db4939764ad3f299fcaa931a3e605bc3460e693c"}, + {file = "frozenlist-1.5.0-cp313-cp313-win_amd64.whl", hash = "sha256:11aabdd62b8b9c4b84081a3c246506d1cddd2dd93ff0ad53ede5defec7886b28"}, + {file = "frozenlist-1.5.0-py3-none-any.whl", hash = "sha256:d994863bba198a4a518b467bb971c56e1db3f180a25c6cf7bb1949c267f748c3"}, + {file = "frozenlist-1.5.0.tar.gz", hash = "sha256:81d5af29e61b9c8348e876d442253723928dce6433e0e76cd925cd83f1b4b817"}, ] [[package]] name = "fsspec" -version = "2024.9.0" +version = "2024.10.0" requires_python = ">=3.8" summary = "File-system specification" -groups = ["default"] +groups = ["default", "chat_with_repo"] files = [ - {file = "fsspec-2024.9.0-py3-none-any.whl", hash = "sha256:a0947d552d8a6efa72cc2c730b12c41d043509156966cca4fb157b0f2a0c574b"}, - {file = "fsspec-2024.9.0.tar.gz", hash = "sha256:4b0afb90c2f21832df142f292649035d80b421f60a9e1c027802e5a0da2b04e8"}, + {file = "fsspec-2024.10.0-py3-none-any.whl", hash = "sha256:03b9a6785766a4de40368b88906366755e2819e758b83705c88cd7cb5fe81871"}, + {file = "fsspec-2024.10.0.tar.gz", hash = "sha256:eda2d8a4116d4f2429db8550f2457da57279247dd930bb12f821b58391359493"}, ] [[package]] @@ -782,80 +671,80 @@ files = [ [[package]] name = "google-auth" -version = "2.35.0" +version = "2.36.0" requires_python = ">=3.7" summary = "Google Authentication Library" -groups = ["default"] +groups = ["chat_with_repo"] dependencies = [ "cachetools<6.0,>=2.0.0", "pyasn1-modules>=0.2.1", "rsa<5,>=3.1.4", ] files = [ - {file = "google_auth-2.35.0-py2.py3-none-any.whl", hash = "sha256:25df55f327ef021de8be50bad0dfd4a916ad0de96da86cd05661c9297723ad3f"}, - {file = "google_auth-2.35.0.tar.gz", hash = "sha256:f4c64ed4e01e8e8b646ef34c018f8bf3338df0c8e37d8b3bba40e7f574a3278a"}, + {file = "google_auth-2.36.0-py2.py3-none-any.whl", hash = "sha256:51a15d47028b66fd36e5c64a82d2d57480075bccc7da37cde257fc94177a61fb"}, + {file = "google_auth-2.36.0.tar.gz", hash = "sha256:545e9618f2df0bcbb7dcbc45a546485b1212624716975a1ea5ae8149ce769ab1"}, ] [[package]] name = "googleapis-common-protos" -version = "1.65.0" +version = "1.66.0" requires_python = ">=3.7" summary = "Common protobufs used in Google APIs" -groups = ["default"] +groups = ["chat_with_repo"] dependencies = [ "protobuf!=3.20.0,!=3.20.1,!=4.21.1,!=4.21.2,!=4.21.3,!=4.21.4,!=4.21.5,<6.0.0.dev0,>=3.20.2", ] files = [ - {file = "googleapis_common_protos-1.65.0-py2.py3-none-any.whl", hash = "sha256:2972e6c496f435b92590fd54045060867f3fe9be2c82ab148fc8885035479a63"}, - {file = "googleapis_common_protos-1.65.0.tar.gz", hash = "sha256:334a29d07cddc3aa01dee4988f9afd9b2916ee2ff49d6b757155dc0d197852c0"}, + {file = "googleapis_common_protos-1.66.0-py2.py3-none-any.whl", hash = "sha256:d7abcd75fabb2e0ec9f74466401f6c119a0b498e27370e9be4c94cb7e382b8ed"}, + {file = "googleapis_common_protos-1.66.0.tar.gz", hash = "sha256:c3e7b33d15fdca5374cc0a7346dd92ffa847425cc4ea941d970f13680052ec8c"}, ] [[package]] name = "gradio" -version = "4.44.1" -requires_python = ">=3.8" +version = "5.6.0" +requires_python = ">=3.10" summary = "Python library for easily interacting with trained machine learning models" -groups = ["default"] +groups = ["chat_with_repo"] dependencies = [ "aiofiles<24.0,>=22.0", "anyio<5.0,>=3.0", - "fastapi<1.0", + "audioop-lts<1.0; python_version >= \"3.13\"", + "fastapi<1.0,>=0.115.2", "ffmpy", - "gradio-client==1.3.0", + "gradio-client==1.4.3", "httpx>=0.24.1", - "huggingface-hub>=0.19.3", - "importlib-resources<7.0,>=1.3", + "huggingface-hub>=0.25.1", "jinja2<4.0", "markupsafe~=2.0", - "matplotlib~=3.0", "numpy<3.0,>=1.0", "orjson~=3.0", "packaging", "pandas<3.0,>=1.0", - "pillow<11.0,>=8.0", + "pillow<12.0,>=8.0", "pydantic>=2.0", "pydub", - "python-multipart>=0.0.9", + "python-multipart==0.0.12", "pyyaml<7.0,>=5.0", "ruff>=0.2.2; sys_platform != \"emscripten\"", + "safehttpx<1.0,>=0.1.1", "semantic-version~=2.0", + "starlette<1.0,>=0.40.0; sys_platform != \"emscripten\"", "tomlkit==0.12.0", "typer<1.0,>=0.12; sys_platform != \"emscripten\"", "typing-extensions~=4.0", - "urllib3~=2.0", + "urllib3~=2.0; sys_platform == \"emscripten\"", "uvicorn>=0.14.0; sys_platform != \"emscripten\"", ] files = [ - {file = "gradio-4.44.1-py3-none-any.whl", hash = "sha256:c908850c638e4a176b22f95a758ce6a63ffbc2a7a5a74b23186ceeeedc23f4d9"}, - {file = "gradio-4.44.1.tar.gz", hash = "sha256:a68a52498ac6b63f8864ef84bf7866a70e7d07ebe913edf921e1d2a3708ad5ae"}, + {file = "gradio-5.6.0-py3-none-any.whl", hash = "sha256:6eba135b32fb2fcb5d83fa845f6ad57c033894d5218fd3010de8fb4d735b9b6d"}, ] [[package]] name = "gradio-client" -version = "1.3.0" -requires_python = ">=3.8" +version = "1.4.3" +requires_python = ">=3.10" summary = "Python library for easily interacting with trained machine learning models" -groups = ["default"] +groups = ["chat_with_repo"] dependencies = [ "fsspec", "httpx>=0.24.1", @@ -865,8 +754,8 @@ dependencies = [ "websockets<13.0,>=10.0", ] files = [ - {file = "gradio_client-1.3.0-py3-none-any.whl", hash = "sha256:20c40cb4d56e18de1a025ccf58079f08a304e4fb2dfbcf7c2352815b2cb31091"}, - {file = "gradio_client-1.3.0.tar.gz", hash = "sha256:d904afeae4f5682add0a6a263542c10e7669ff6c9de0a53a5c2fc9b719a24bb8"}, + {file = "gradio_client-1.4.3-py3-none-any.whl", hash = "sha256:a76990277cab011f1c49391bdf33670bbbbb32b3503971ae5d91281e7946bdc1"}, + {file = "gradio_client-1.4.3.tar.gz", hash = "sha256:55fd446ae80a5320f164b6913e8862b398fca9097d33b76bd98efb61cbd449c3"}, ] [[package]] @@ -874,17 +763,8 @@ name = "greenlet" version = "3.1.1" requires_python = ">=3.7" summary = "Lightweight in-process concurrent programming" -groups = ["default"] +groups = ["default", "chat_with_repo"] files = [ - {file = "greenlet-3.1.1-cp310-cp310-macosx_11_0_universal2.whl", hash = "sha256:0bbae94a29c9e5c7e4a2b7f0aae5c17e8e90acbfd3bf6270eeba60c39fce3563"}, - {file = "greenlet-3.1.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0fde093fb93f35ca72a556cf72c92ea3ebfda3d79fc35bb19fbe685853869a83"}, - {file = "greenlet-3.1.1-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:36b89d13c49216cadb828db8dfa6ce86bbbc476a82d3a6c397f0efae0525bdd0"}, - {file = "greenlet-3.1.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:94b6150a85e1b33b40b1464a3f9988dcc5251d6ed06842abff82e42632fac120"}, - {file = "greenlet-3.1.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:93147c513fac16385d1036b7e5b102c7fbbdb163d556b791f0f11eada7ba65dc"}, - {file = "greenlet-3.1.1-cp310-cp310-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:da7a9bff22ce038e19bf62c4dd1ec8391062878710ded0a845bcf47cc0200617"}, - {file = "greenlet-3.1.1-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:b2795058c23988728eec1f36a4e5e4ebad22f8320c85f3587b539b9ac84128d7"}, - {file = "greenlet-3.1.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:ed10eac5830befbdd0c32f83e8aa6288361597550ba669b04c48f0f9a2c843c6"}, - {file = "greenlet-3.1.1-cp310-cp310-win_amd64.whl", hash = "sha256:77c386de38a60d1dfb8e55b8c1101d68c79dfdd25c7095d51fec2dd800892b80"}, {file = "greenlet-3.1.1-cp311-cp311-macosx_11_0_universal2.whl", hash = "sha256:e4d333e558953648ca09d64f13e6d8f0523fa705f51cae3f03b5983489958c70"}, {file = "greenlet-3.1.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:09fc016b73c94e98e29af67ab7b9a879c307c6731a2c9da0db5a7d9b7edd1159"}, {file = "greenlet-3.1.1-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d5e975ca70269d66d17dd995dafc06f1b06e8cb1ec1e9ed54c1d1e4a7c4cf26e"}, @@ -924,48 +804,39 @@ files = [ [[package]] name = "grpcio" -version = "1.66.2" +version = "1.68.0" requires_python = ">=3.8" summary = "HTTP/2-based RPC framework" -groups = ["default"] -files = [ - {file = "grpcio-1.66.2-cp310-cp310-linux_armv7l.whl", hash = "sha256:fe96281713168a3270878255983d2cb1a97e034325c8c2c25169a69289d3ecfa"}, - {file = "grpcio-1.66.2-cp310-cp310-macosx_12_0_universal2.whl", hash = "sha256:73fc8f8b9b5c4a03e802b3cd0c18b2b06b410d3c1dcbef989fdeb943bd44aff7"}, - {file = "grpcio-1.66.2-cp310-cp310-manylinux_2_17_aarch64.whl", hash = "sha256:03b0b307ba26fae695e067b94cbb014e27390f8bc5ac7a3a39b7723fed085604"}, - {file = "grpcio-1.66.2-cp310-cp310-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7d69ce1f324dc2d71e40c9261d3fdbe7d4c9d60f332069ff9b2a4d8a257c7b2b"}, - {file = "grpcio-1.66.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:05bc2ceadc2529ab0b227b1310d249d95d9001cd106aa4d31e8871ad3c428d73"}, - {file = "grpcio-1.66.2-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:8ac475e8da31484efa25abb774674d837b343afb78bb3bcdef10f81a93e3d6bf"}, - {file = "grpcio-1.66.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:0be4e0490c28da5377283861bed2941d1d20ec017ca397a5df4394d1c31a9b50"}, - {file = "grpcio-1.66.2-cp310-cp310-win32.whl", hash = "sha256:4e504572433f4e72b12394977679161d495c4c9581ba34a88d843eaf0f2fbd39"}, - {file = "grpcio-1.66.2-cp310-cp310-win_amd64.whl", hash = "sha256:2018b053aa15782db2541ca01a7edb56a0bf18c77efed975392583725974b249"}, - {file = "grpcio-1.66.2-cp311-cp311-linux_armv7l.whl", hash = "sha256:2335c58560a9e92ac58ff2bc5649952f9b37d0735608242973c7a8b94a6437d8"}, - {file = "grpcio-1.66.2-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:45a3d462826f4868b442a6b8fdbe8b87b45eb4f5b5308168c156b21eca43f61c"}, - {file = "grpcio-1.66.2-cp311-cp311-manylinux_2_17_aarch64.whl", hash = "sha256:a9539f01cb04950fd4b5ab458e64a15f84c2acc273670072abe49a3f29bbad54"}, - {file = "grpcio-1.66.2-cp311-cp311-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ce89f5876662f146d4c1f695dda29d4433a5d01c8681fbd2539afff535da14d4"}, - {file = "grpcio-1.66.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d25a14af966438cddf498b2e338f88d1c9706f3493b1d73b93f695c99c5f0e2a"}, - {file = "grpcio-1.66.2-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:6001e575b8bbd89eee11960bb640b6da6ae110cf08113a075f1e2051cc596cae"}, - {file = "grpcio-1.66.2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:4ea1d062c9230278793820146c95d038dc0f468cbdd172eec3363e42ff1c7d01"}, - {file = "grpcio-1.66.2-cp311-cp311-win32.whl", hash = "sha256:38b68498ff579a3b1ee8f93a05eb48dc2595795f2f62716e797dc24774c1aaa8"}, - {file = "grpcio-1.66.2-cp311-cp311-win_amd64.whl", hash = "sha256:6851de821249340bdb100df5eacfecfc4e6075fa85c6df7ee0eb213170ec8e5d"}, - {file = "grpcio-1.66.2-cp312-cp312-linux_armv7l.whl", hash = "sha256:802d84fd3d50614170649853d121baaaa305de7b65b3e01759247e768d691ddf"}, - {file = "grpcio-1.66.2-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:80fd702ba7e432994df208f27514280b4b5c6843e12a48759c9255679ad38db8"}, - {file = "grpcio-1.66.2-cp312-cp312-manylinux_2_17_aarch64.whl", hash = "sha256:12fda97ffae55e6526825daf25ad0fa37483685952b5d0f910d6405c87e3adb6"}, - {file = "grpcio-1.66.2-cp312-cp312-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:950da58d7d80abd0ea68757769c9db0a95b31163e53e5bb60438d263f4bed7b7"}, - {file = "grpcio-1.66.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e636ce23273683b00410f1971d209bf3689238cf5538d960adc3cdfe80dd0dbd"}, - {file = "grpcio-1.66.2-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:a917d26e0fe980b0ac7bfcc1a3c4ad6a9a4612c911d33efb55ed7833c749b0ee"}, - {file = "grpcio-1.66.2-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:49f0ca7ae850f59f828a723a9064cadbed90f1ece179d375966546499b8a2c9c"}, - {file = "grpcio-1.66.2-cp312-cp312-win32.whl", hash = "sha256:31fd163105464797a72d901a06472860845ac157389e10f12631025b3e4d0453"}, - {file = "grpcio-1.66.2-cp312-cp312-win_amd64.whl", hash = "sha256:ff1f7882e56c40b0d33c4922c15dfa30612f05fb785074a012f7cda74d1c3679"}, - {file = "grpcio-1.66.2-cp313-cp313-linux_armv7l.whl", hash = "sha256:3b00efc473b20d8bf83e0e1ae661b98951ca56111feb9b9611df8efc4fe5d55d"}, - {file = "grpcio-1.66.2-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:1caa38fb22a8578ab8393da99d4b8641e3a80abc8fd52646f1ecc92bcb8dee34"}, - {file = "grpcio-1.66.2-cp313-cp313-manylinux_2_17_aarch64.whl", hash = "sha256:c408f5ef75cfffa113cacd8b0c0e3611cbfd47701ca3cdc090594109b9fcbaed"}, - {file = "grpcio-1.66.2-cp313-cp313-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c806852deaedee9ce8280fe98955c9103f62912a5b2d5ee7e3eaa284a6d8d8e7"}, - {file = "grpcio-1.66.2-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f145cc21836c332c67baa6fc81099d1d27e266401565bf481948010d6ea32d46"}, - {file = "grpcio-1.66.2-cp313-cp313-musllinux_1_1_i686.whl", hash = "sha256:73e3b425c1e155730273f73e419de3074aa5c5e936771ee0e4af0814631fb30a"}, - {file = "grpcio-1.66.2-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:9c509a4f78114cbc5f0740eb3d7a74985fd2eff022971bc9bc31f8bc93e66a3b"}, - {file = "grpcio-1.66.2-cp313-cp313-win32.whl", hash = "sha256:20657d6b8cfed7db5e11b62ff7dfe2e12064ea78e93f1434d61888834bc86d75"}, - {file = "grpcio-1.66.2-cp313-cp313-win_amd64.whl", hash = "sha256:fb70487c95786e345af5e854ffec8cb8cc781bcc5df7930c4fbb7feaa72e1cdf"}, - {file = "grpcio-1.66.2.tar.gz", hash = "sha256:563588c587b75c34b928bc428548e5b00ea38c46972181a4d8b75ba7e3f24231"}, +groups = ["chat_with_repo"] +files = [ + {file = "grpcio-1.68.0-cp311-cp311-linux_armv7l.whl", hash = "sha256:3b2b559beb2d433129441783e5f42e3be40a9e1a89ec906efabf26591c5cd415"}, + {file = "grpcio-1.68.0-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:e46541de8425a4d6829ac6c5d9b16c03c292105fe9ebf78cb1c31e8d242f9155"}, + {file = "grpcio-1.68.0-cp311-cp311-manylinux_2_17_aarch64.whl", hash = "sha256:c1245651f3c9ea92a2db4f95d37b7597db6b246d5892bca6ee8c0e90d76fb73c"}, + {file = "grpcio-1.68.0-cp311-cp311-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:4f1931c7aa85be0fa6cea6af388e576f3bf6baee9e5d481c586980c774debcb4"}, + {file = "grpcio-1.68.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8b0ff09c81e3aded7a183bc6473639b46b6caa9c1901d6f5e2cba24b95e59e30"}, + {file = "grpcio-1.68.0-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:8c73f9fbbaee1a132487e31585aa83987ddf626426d703ebcb9a528cf231c9b1"}, + {file = "grpcio-1.68.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:6b2f98165ea2790ea159393a2246b56f580d24d7da0d0342c18a085299c40a75"}, + {file = "grpcio-1.68.0-cp311-cp311-win32.whl", hash = "sha256:e1e7ed311afb351ff0d0e583a66fcb39675be112d61e7cfd6c8269884a98afbc"}, + {file = "grpcio-1.68.0-cp311-cp311-win_amd64.whl", hash = "sha256:e0d2f68eaa0a755edd9a47d40e50dba6df2bceda66960dee1218da81a2834d27"}, + {file = "grpcio-1.68.0-cp312-cp312-linux_armv7l.whl", hash = "sha256:8af6137cc4ae8e421690d276e7627cfc726d4293f6607acf9ea7260bd8fc3d7d"}, + {file = "grpcio-1.68.0-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:4028b8e9a3bff6f377698587d642e24bd221810c06579a18420a17688e421af7"}, + {file = "grpcio-1.68.0-cp312-cp312-manylinux_2_17_aarch64.whl", hash = "sha256:f60fa2adf281fd73ae3a50677572521edca34ba373a45b457b5ebe87c2d01e1d"}, + {file = "grpcio-1.68.0-cp312-cp312-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e18589e747c1e70b60fab6767ff99b2d0c359ea1db8a2cb524477f93cdbedf5b"}, + {file = "grpcio-1.68.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e0d30f3fee9372796f54d3100b31ee70972eaadcc87314be369360248a3dcffe"}, + {file = "grpcio-1.68.0-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:7e0a3e72c0e9a1acab77bef14a73a416630b7fd2cbd893c0a873edc47c42c8cd"}, + {file = "grpcio-1.68.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:a831dcc343440969aaa812004685ed322cdb526cd197112d0db303b0da1e8659"}, + {file = "grpcio-1.68.0-cp312-cp312-win32.whl", hash = "sha256:5a180328e92b9a0050958ced34dddcb86fec5a8b332f5a229e353dafc16cd332"}, + {file = "grpcio-1.68.0-cp312-cp312-win_amd64.whl", hash = "sha256:2bddd04a790b69f7a7385f6a112f46ea0b34c4746f361ebafe9ca0be567c78e9"}, + {file = "grpcio-1.68.0-cp313-cp313-linux_armv7l.whl", hash = "sha256:fc05759ffbd7875e0ff2bd877be1438dfe97c9312bbc558c8284a9afa1d0f40e"}, + {file = "grpcio-1.68.0-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:15fa1fe25d365a13bc6d52fcac0e3ee1f9baebdde2c9b3b2425f8a4979fccea1"}, + {file = "grpcio-1.68.0-cp313-cp313-manylinux_2_17_aarch64.whl", hash = "sha256:32a9cb4686eb2e89d97022ecb9e1606d132f85c444354c17a7dbde4a455e4a3b"}, + {file = "grpcio-1.68.0-cp313-cp313-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:dba037ff8d284c8e7ea9a510c8ae0f5b016004f13c3648f72411c464b67ff2fb"}, + {file = "grpcio-1.68.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0efbbd849867e0e569af09e165363ade75cf84f5229b2698d53cf22c7a4f9e21"}, + {file = "grpcio-1.68.0-cp313-cp313-musllinux_1_1_i686.whl", hash = "sha256:4e300e6978df0b65cc2d100c54e097c10dfc7018b9bd890bbbf08022d47f766d"}, + {file = "grpcio-1.68.0-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:6f9c7ad1a23e1047f827385f4713b5b8c6c7d325705be1dd3e31fb00dcb2f665"}, + {file = "grpcio-1.68.0-cp313-cp313-win32.whl", hash = "sha256:3ac7f10850fd0487fcce169c3c55509101c3bde2a3b454869639df2176b60a03"}, + {file = "grpcio-1.68.0-cp313-cp313-win_amd64.whl", hash = "sha256:afbf45a62ba85a720491bfe9b2642f8761ff348006f5ef67e4622621f116b04a"}, + {file = "grpcio-1.68.0.tar.gz", hash = "sha256:7e7483d39b4a4fddb9906671e9ea21aaad4f031cdfc349fec76bdfa1e404543a"}, ] [[package]] @@ -973,7 +844,7 @@ name = "h11" version = "0.14.0" requires_python = ">=3.7" summary = "A pure-Python, bring-your-own-I/O implementation of HTTP/1.1" -groups = ["default"] +groups = ["default", "chat_with_repo"] dependencies = [ "typing-extensions; python_version < \"3.8\"", ] @@ -984,48 +855,48 @@ files = [ [[package]] name = "httpcore" -version = "1.0.6" +version = "1.0.7" requires_python = ">=3.8" summary = "A minimal low-level HTTP client." -groups = ["default"] +groups = ["default", "chat_with_repo"] dependencies = [ "certifi", "h11<0.15,>=0.13", ] files = [ - {file = "httpcore-1.0.6-py3-none-any.whl", hash = "sha256:27b59625743b85577a8c0e10e55b50b5368a4f2cfe8cc7bcfa9cf00829c2682f"}, - {file = "httpcore-1.0.6.tar.gz", hash = "sha256:73f6dbd6eb8c21bbf7ef8efad555481853f5f6acdeaff1edb0694289269ee17f"}, + {file = "httpcore-1.0.7-py3-none-any.whl", hash = "sha256:a3fff8f43dc260d5bd363d9f9cf1830fa3a458b332856f34282de498ed420edd"}, + {file = "httpcore-1.0.7.tar.gz", hash = "sha256:8551cb62a169ec7162ac7be8d4817d561f60e08eaa485234898414bb5a8a0b4c"}, ] [[package]] name = "httptools" -version = "0.6.1" +version = "0.6.4" requires_python = ">=3.8.0" summary = "A collection of framework independent HTTP protocol utils." -groups = ["default"] -files = [ - {file = "httptools-0.6.1-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:d2f6c3c4cb1948d912538217838f6e9960bc4a521d7f9b323b3da579cd14532f"}, - {file = "httptools-0.6.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:00d5d4b68a717765b1fabfd9ca755bd12bf44105eeb806c03d1962acd9b8e563"}, - {file = "httptools-0.6.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:639dc4f381a870c9ec860ce5c45921db50205a37cc3334e756269736ff0aac58"}, - {file = "httptools-0.6.1-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e57997ac7fb7ee43140cc03664de5f268813a481dff6245e0075925adc6aa185"}, - {file = "httptools-0.6.1-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:0ac5a0ae3d9f4fe004318d64b8a854edd85ab76cffbf7ef5e32920faef62f142"}, - {file = "httptools-0.6.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:3f30d3ce413088a98b9db71c60a6ada2001a08945cb42dd65a9a9fe228627658"}, - {file = "httptools-0.6.1-cp310-cp310-win_amd64.whl", hash = "sha256:1ed99a373e327f0107cb513b61820102ee4f3675656a37a50083eda05dc9541b"}, - {file = "httptools-0.6.1-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:7a7ea483c1a4485c71cb5f38be9db078f8b0e8b4c4dc0210f531cdd2ddac1ef1"}, - {file = "httptools-0.6.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:85ed077c995e942b6f1b07583e4eb0a8d324d418954fc6af913d36db7c05a5a0"}, - {file = "httptools-0.6.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8b0bb634338334385351a1600a73e558ce619af390c2b38386206ac6a27fecfc"}, - {file = "httptools-0.6.1-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7d9ceb2c957320def533671fc9c715a80c47025139c8d1f3797477decbc6edd2"}, - {file = "httptools-0.6.1-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:4f0f8271c0a4db459f9dc807acd0eadd4839934a4b9b892f6f160e94da309837"}, - {file = "httptools-0.6.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:6a4f5ccead6d18ec072ac0b84420e95d27c1cdf5c9f1bc8fbd8daf86bd94f43d"}, - {file = "httptools-0.6.1-cp311-cp311-win_amd64.whl", hash = "sha256:5cceac09f164bcba55c0500a18fe3c47df29b62353198e4f37bbcc5d591172c3"}, - {file = "httptools-0.6.1-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:75c8022dca7935cba14741a42744eee13ba05db00b27a4b940f0d646bd4d56d0"}, - {file = "httptools-0.6.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:48ed8129cd9a0d62cf4d1575fcf90fb37e3ff7d5654d3a5814eb3d55f36478c2"}, - {file = "httptools-0.6.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6f58e335a1402fb5a650e271e8c2d03cfa7cea46ae124649346d17bd30d59c90"}, - {file = "httptools-0.6.1-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:93ad80d7176aa5788902f207a4e79885f0576134695dfb0fefc15b7a4648d503"}, - {file = "httptools-0.6.1-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:9bb68d3a085c2174c2477eb3ffe84ae9fb4fde8792edb7bcd09a1d8467e30a84"}, - {file = "httptools-0.6.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:b512aa728bc02354e5ac086ce76c3ce635b62f5fbc32ab7082b5e582d27867bb"}, - {file = "httptools-0.6.1-cp312-cp312-win_amd64.whl", hash = "sha256:97662ce7fb196c785344d00d638fc9ad69e18ee4bfb4000b35a52efe5adcc949"}, - {file = "httptools-0.6.1.tar.gz", hash = "sha256:c6e26c30455600b95d94b1b836085138e82f177351454ee841c148f93a9bad5a"}, +groups = ["chat_with_repo"] +files = [ + {file = "httptools-0.6.4-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:f47f8ed67cc0ff862b84a1189831d1d33c963fb3ce1ee0c65d3b0cbe7b711069"}, + {file = "httptools-0.6.4-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:0614154d5454c21b6410fdf5262b4a3ddb0f53f1e1721cfd59d55f32138c578a"}, + {file = "httptools-0.6.4-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f8787367fbdfccae38e35abf7641dafc5310310a5987b689f4c32cc8cc3ee975"}, + {file = "httptools-0.6.4-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:40b0f7fe4fd38e6a507bdb751db0379df1e99120c65fbdc8ee6c1d044897a636"}, + {file = "httptools-0.6.4-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:40a5ec98d3f49904b9fe36827dcf1aadfef3b89e2bd05b0e35e94f97c2b14721"}, + {file = "httptools-0.6.4-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:dacdd3d10ea1b4ca9df97a0a303cbacafc04b5cd375fa98732678151643d4988"}, + {file = "httptools-0.6.4-cp311-cp311-win_amd64.whl", hash = "sha256:288cd628406cc53f9a541cfaf06041b4c71d751856bab45e3702191f931ccd17"}, + {file = "httptools-0.6.4-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:df017d6c780287d5c80601dafa31f17bddb170232d85c066604d8558683711a2"}, + {file = "httptools-0.6.4-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:85071a1e8c2d051b507161f6c3e26155b5c790e4e28d7f236422dbacc2a9cc44"}, + {file = "httptools-0.6.4-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:69422b7f458c5af875922cdb5bd586cc1f1033295aa9ff63ee196a87519ac8e1"}, + {file = "httptools-0.6.4-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:16e603a3bff50db08cd578d54f07032ca1631450ceb972c2f834c2b860c28ea2"}, + {file = "httptools-0.6.4-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:ec4f178901fa1834d4a060320d2f3abc5c9e39766953d038f1458cb885f47e81"}, + {file = "httptools-0.6.4-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:f9eb89ecf8b290f2e293325c646a211ff1c2493222798bb80a530c5e7502494f"}, + {file = "httptools-0.6.4-cp312-cp312-win_amd64.whl", hash = "sha256:db78cb9ca56b59b016e64b6031eda5653be0589dba2b1b43453f6e8b405a0970"}, + {file = "httptools-0.6.4-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:ade273d7e767d5fae13fa637f4d53b6e961fb7fd93c7797562663f0171c26660"}, + {file = "httptools-0.6.4-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:856f4bc0478ae143bad54a4242fccb1f3f86a6e1be5548fecfd4102061b3a083"}, + {file = "httptools-0.6.4-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:322d20ea9cdd1fa98bd6a74b77e2ec5b818abdc3d36695ab402a0de8ef2865a3"}, + {file = "httptools-0.6.4-cp313-cp313-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4d87b29bd4486c0093fc64dea80231f7c7f7eb4dc70ae394d70a495ab8436071"}, + {file = "httptools-0.6.4-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:342dd6946aa6bda4b8f18c734576106b8a31f2fe31492881a9a160ec84ff4bd5"}, + {file = "httptools-0.6.4-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:4b36913ba52008249223042dca46e69967985fb4051951f94357ea681e1f5dc0"}, + {file = "httptools-0.6.4-cp313-cp313-win_amd64.whl", hash = "sha256:28908df1b9bb8187393d5b5db91435ccc9c8e891657f9cbb42a2541b44c82fc8"}, + {file = "httptools-0.6.4.tar.gz", hash = "sha256:4e93eee4add6493b59a5c514da98c939b244fce4a0d8879cd3f466562f4b7d5c"}, ] [[package]] @@ -1033,7 +904,7 @@ name = "httpx" version = "0.27.2" requires_python = ">=3.8" summary = "The next generation HTTP client." -groups = ["default"] +groups = ["default", "chat_with_repo"] dependencies = [ "anyio", "certifi", @@ -1048,10 +919,10 @@ files = [ [[package]] name = "huggingface-hub" -version = "0.25.1" +version = "0.26.2" requires_python = ">=3.8.0" summary = "Client library to download and publish models, datasets and other repos on the huggingface.co hub" -groups = ["default"] +groups = ["chat_with_repo"] dependencies = [ "filelock", "fsspec>=2023.5.0", @@ -1062,8 +933,8 @@ dependencies = [ "typing-extensions>=3.7.4.3", ] files = [ - {file = "huggingface_hub-0.25.1-py3-none-any.whl", hash = "sha256:a5158ded931b3188f54ea9028097312cb0acd50bffaaa2612014c3c526b44972"}, - {file = "huggingface_hub-0.25.1.tar.gz", hash = "sha256:9ff7cb327343211fbd06e2b149b8f362fd1e389454f3f14c6db75a4999ee20ff"}, + {file = "huggingface_hub-0.26.2-py3-none-any.whl", hash = "sha256:98c2a5a8e786c7b2cb6fdeb2740893cba4d53e312572ed3d8afafda65b128c46"}, + {file = "huggingface_hub-0.26.2.tar.gz", hash = "sha256:b100d853465d965733964d123939ba287da60a547087783ddff8a323f340332b"}, ] [[package]] @@ -1071,7 +942,7 @@ name = "humanfriendly" version = "10.0" requires_python = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*" summary = "Human friendly output for text interfaces using Python" -groups = ["default"] +groups = ["chat_with_repo"] dependencies = [ "monotonic; python_version == \"2.7\"", "pyreadline3; sys_platform == \"win32\" and python_version >= \"3.8\"", @@ -1087,7 +958,7 @@ name = "idna" version = "3.10" requires_python = ">=3.6" summary = "Internationalized Domain Names in Applications (IDNA)" -groups = ["default"] +groups = ["default", "chat_with_repo"] files = [ {file = "idna-3.10-py3-none-any.whl", hash = "sha256:946d195a0d259cbba61165e88e65941f16e9b36ea6ddb97f00452bae8b1287d3"}, {file = "idna-3.10.tar.gz", hash = "sha256:12f65c9b470abda6dc35cf8e63cc574b1c52b11df2c86030af0ac09b01b13ea9"}, @@ -1095,17 +966,17 @@ files = [ [[package]] name = "importlib-metadata" -version = "8.4.0" +version = "8.5.0" requires_python = ">=3.8" summary = "Read metadata from Python packages" -groups = ["default"] +groups = ["chat_with_repo"] dependencies = [ "typing-extensions>=3.6.4; python_version < \"3.8\"", - "zipp>=0.5", + "zipp>=3.20", ] files = [ - {file = "importlib_metadata-8.4.0-py3-none-any.whl", hash = "sha256:66f342cc6ac9818fc6ff340576acd24d65ba0b3efabb2b4ac08b598965a4a2f1"}, - {file = "importlib_metadata-8.4.0.tar.gz", hash = "sha256:9a547d3bc3608b025f93d403fdd1aae741c24fbb8314df4b155675742ce303c5"}, + {file = "importlib_metadata-8.5.0-py3-none-any.whl", hash = "sha256:45e54197d28b7a7f1559e60b95e7c567032b602131fbd588f1497f47880aa68b"}, + {file = "importlib_metadata-8.5.0.tar.gz", hash = "sha256:71522656f0abace1d072b9e5481a48f07c138e00f079c38c8f883823f9c26bd7"}, ] [[package]] @@ -1113,7 +984,7 @@ name = "importlib-resources" version = "6.4.5" requires_python = ">=3.8" summary = "Read resources from Python packages" -groups = ["default"] +groups = ["chat_with_repo"] dependencies = [ "zipp>=3.1.0; python_version < \"3.10\"", ] @@ -1135,16 +1006,16 @@ files = [ [[package]] name = "jedi" -version = "0.19.1" +version = "0.19.2" requires_python = ">=3.6" summary = "An autocompletion tool for Python that can be used for text editors." groups = ["default"] dependencies = [ - "parso<0.9.0,>=0.8.3", + "parso<0.9.0,>=0.8.4", ] files = [ - {file = "jedi-0.19.1-py2.py3-none-any.whl", hash = "sha256:e983c654fe5c02867aef4cdfce5a2fbb4a50adc0af145f70504238f18ef5e7e0"}, - {file = "jedi-0.19.1.tar.gz", hash = "sha256:cf0496f3651bc65d7174ac1b7d043eff454892c708a87d1b683e57b569927ffd"}, + {file = "jedi-0.19.2-py2.py3-none-any.whl", hash = "sha256:a8ef22bde8490f57fe5c7681a3c83cb58874daf72b4784de3cce5b6ef6edb5b9"}, + {file = "jedi-0.19.2.tar.gz", hash = "sha256:4770dc3de41bde3966b02eb84fbcf557fb33cce26ad23da12c742fb50ecb11f0"}, ] [[package]] @@ -1152,7 +1023,7 @@ name = "jinja2" version = "3.1.4" requires_python = ">=3.7" summary = "A very fast and expressive template engine." -groups = ["default"] +groups = ["chat_with_repo"] dependencies = [ "MarkupSafe>=2.0", ] @@ -1163,60 +1034,48 @@ files = [ [[package]] name = "jiter" -version = "0.6.1" +version = "0.7.1" requires_python = ">=3.8" summary = "Fast iterable JSON parser." -groups = ["default"] -files = [ - {file = "jiter-0.6.1-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:d08510593cb57296851080018006dfc394070178d238b767b1879dc1013b106c"}, - {file = "jiter-0.6.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:adef59d5e2394ebbad13b7ed5e0306cceb1df92e2de688824232a91588e77aa7"}, - {file = "jiter-0.6.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b3e02f7a27f2bcc15b7d455c9df05df8ffffcc596a2a541eeda9a3110326e7a3"}, - {file = "jiter-0.6.1-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:ed69a7971d67b08f152c17c638f0e8c2aa207e9dd3a5fcd3cba294d39b5a8d2d"}, - {file = "jiter-0.6.1-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b2019d966e98f7c6df24b3b8363998575f47d26471bfb14aade37630fae836a1"}, - {file = "jiter-0.6.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:36c0b51a285b68311e207a76c385650322734c8717d16c2eb8af75c9d69506e7"}, - {file = "jiter-0.6.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:220e0963b4fb507c525c8f58cde3da6b1be0bfddb7ffd6798fb8f2531226cdb1"}, - {file = "jiter-0.6.1-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:aa25c7a9bf7875a141182b9c95aed487add635da01942ef7ca726e42a0c09058"}, - {file = "jiter-0.6.1-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:e90552109ca8ccd07f47ca99c8a1509ced93920d271bb81780a973279974c5ab"}, - {file = "jiter-0.6.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:67723a011964971864e0b484b0ecfee6a14de1533cff7ffd71189e92103b38a8"}, - {file = "jiter-0.6.1-cp310-none-win32.whl", hash = "sha256:33af2b7d2bf310fdfec2da0177eab2fedab8679d1538d5b86a633ebfbbac4edd"}, - {file = "jiter-0.6.1-cp310-none-win_amd64.whl", hash = "sha256:7cea41c4c673353799906d940eee8f2d8fd1d9561d734aa921ae0f75cb9732f4"}, - {file = "jiter-0.6.1-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:b03c24e7da7e75b170c7b2b172d9c5e463aa4b5c95696a368d52c295b3f6847f"}, - {file = "jiter-0.6.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:47fee1be677b25d0ef79d687e238dc6ac91a8e553e1a68d0839f38c69e0ee491"}, - {file = "jiter-0.6.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:25f0d2f6e01a8a0fb0eab6d0e469058dab2be46ff3139ed2d1543475b5a1d8e7"}, - {file = "jiter-0.6.1-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:0b809e39e342c346df454b29bfcc7bca3d957f5d7b60e33dae42b0e5ec13e027"}, - {file = "jiter-0.6.1-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e9ac7c2f092f231f5620bef23ce2e530bd218fc046098747cc390b21b8738a7a"}, - {file = "jiter-0.6.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:e51a2d80d5fe0ffb10ed2c82b6004458be4a3f2b9c7d09ed85baa2fbf033f54b"}, - {file = "jiter-0.6.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3343d4706a2b7140e8bd49b6c8b0a82abf9194b3f0f5925a78fc69359f8fc33c"}, - {file = "jiter-0.6.1-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:82521000d18c71e41c96960cb36e915a357bc83d63a8bed63154b89d95d05ad1"}, - {file = "jiter-0.6.1-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:3c843e7c1633470708a3987e8ce617ee2979ee18542d6eb25ae92861af3f1d62"}, - {file = "jiter-0.6.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:a2e861658c3fe849efc39b06ebb98d042e4a4c51a8d7d1c3ddc3b1ea091d0784"}, - {file = "jiter-0.6.1-cp311-none-win32.whl", hash = "sha256:7d72fc86474862c9c6d1f87b921b70c362f2b7e8b2e3c798bb7d58e419a6bc0f"}, - {file = "jiter-0.6.1-cp311-none-win_amd64.whl", hash = "sha256:3e36a320634f33a07794bb15b8da995dccb94f944d298c8cfe2bd99b1b8a574a"}, - {file = "jiter-0.6.1-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:1fad93654d5a7dcce0809aff66e883c98e2618b86656aeb2129db2cd6f26f867"}, - {file = "jiter-0.6.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:4e6e340e8cd92edab7f6a3a904dbbc8137e7f4b347c49a27da9814015cc0420c"}, - {file = "jiter-0.6.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:691352e5653af84ed71763c3c427cff05e4d658c508172e01e9c956dfe004aba"}, - {file = "jiter-0.6.1-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:defee3949313c1f5b55e18be45089970cdb936eb2a0063f5020c4185db1b63c9"}, - {file = "jiter-0.6.1-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:26d2bdd5da097e624081c6b5d416d3ee73e5b13f1703bcdadbb1881f0caa1933"}, - {file = "jiter-0.6.1-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:18aa9d1626b61c0734b973ed7088f8a3d690d0b7f5384a5270cd04f4d9f26c86"}, - {file = "jiter-0.6.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7a3567c8228afa5ddcce950631c6b17397ed178003dc9ee7e567c4c4dcae9fa0"}, - {file = "jiter-0.6.1-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:e5c0507131c922defe3f04c527d6838932fcdfd69facebafd7d3574fa3395314"}, - {file = "jiter-0.6.1-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:540fcb224d7dc1bcf82f90f2ffb652df96f2851c031adca3c8741cb91877143b"}, - {file = "jiter-0.6.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:e7b75436d4fa2032b2530ad989e4cb0ca74c655975e3ff49f91a1a3d7f4e1df2"}, - {file = "jiter-0.6.1-cp312-none-win32.whl", hash = "sha256:883d2ced7c21bf06874fdeecab15014c1c6d82216765ca6deef08e335fa719e0"}, - {file = "jiter-0.6.1-cp312-none-win_amd64.whl", hash = "sha256:91e63273563401aadc6c52cca64a7921c50b29372441adc104127b910e98a5b6"}, - {file = "jiter-0.6.1-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:852508a54fe3228432e56019da8b69208ea622a3069458252f725d634e955b31"}, - {file = "jiter-0.6.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:f491cc69ff44e5a1e8bc6bf2b94c1f98d179e1aaf4a554493c171a5b2316b701"}, - {file = "jiter-0.6.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:cc56c8f0b2a28ad4d8047f3ae62d25d0e9ae01b99940ec0283263a04724de1f3"}, - {file = "jiter-0.6.1-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:51b58f7a0d9e084a43b28b23da2b09fc5e8df6aa2b6a27de43f991293cab85fd"}, - {file = "jiter-0.6.1-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:5f79ce15099154c90ef900d69c6b4c686b64dfe23b0114e0971f2fecd306ec6c"}, - {file = "jiter-0.6.1-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:03a025b52009f47e53ea619175d17e4ded7c035c6fbd44935cb3ada11e1fd592"}, - {file = "jiter-0.6.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c74a8d93718137c021d9295248a87c2f9fdc0dcafead12d2930bc459ad40f885"}, - {file = "jiter-0.6.1-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:40b03b75f903975f68199fc4ec73d546150919cb7e534f3b51e727c4d6ccca5a"}, - {file = "jiter-0.6.1-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:825651a3f04cf92a661d22cad61fc913400e33aa89b3e3ad9a6aa9dc8a1f5a71"}, - {file = "jiter-0.6.1-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:928bf25eb69ddb292ab8177fe69d3fbf76c7feab5fce1c09265a7dccf25d3991"}, - {file = "jiter-0.6.1-cp313-none-win32.whl", hash = "sha256:352cd24121e80d3d053fab1cc9806258cad27c53cad99b7a3cac57cf934b12e4"}, - {file = "jiter-0.6.1-cp313-none-win_amd64.whl", hash = "sha256:be7503dd6f4bf02c2a9bacb5cc9335bc59132e7eee9d3e931b13d76fd80d7fda"}, - {file = "jiter-0.6.1.tar.gz", hash = "sha256:e19cd21221fc139fb032e4112986656cb2739e9fe6d84c13956ab30ccc7d4449"}, +groups = ["default", "chat_with_repo"] +files = [ + {file = "jiter-0.7.1-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:ad04a23a91f3d10d69d6c87a5f4471b61c2c5cd6e112e85136594a02043f462c"}, + {file = "jiter-0.7.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:1e47a554de88dff701226bb5722b7f1b6bccd0b98f1748459b7e56acac2707a5"}, + {file = "jiter-0.7.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1e44fff69c814a2e96a20b4ecee3e2365e9b15cf5fe4e00869d18396daa91dab"}, + {file = "jiter-0.7.1-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:df0a1d05081541b45743c965436f8b5a1048d6fd726e4a030113a2699a6046ea"}, + {file = "jiter-0.7.1-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f22cf8f236a645cb6d8ffe2a64edb5d2b66fb148bf7c75eea0cb36d17014a7bc"}, + {file = "jiter-0.7.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:da8589f50b728ea4bf22e0632eefa125c8aa9c38ed202a5ee6ca371f05eeb3ff"}, + {file = "jiter-0.7.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f20de711224f2ca2dbb166a8d512f6ff48c9c38cc06b51f796520eb4722cc2ce"}, + {file = "jiter-0.7.1-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:8a9803396032117b85ec8cbf008a54590644a062fedd0425cbdb95e4b2b60479"}, + {file = "jiter-0.7.1-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:3d8bae77c82741032e9d89a4026479061aba6e646de3bf5f2fc1ae2bbd9d06e0"}, + {file = "jiter-0.7.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:3dc9939e576bbc68c813fc82f6620353ed68c194c7bcf3d58dc822591ec12490"}, + {file = "jiter-0.7.1-cp311-none-win32.whl", hash = "sha256:f7605d24cd6fab156ec89e7924578e21604feee9c4f1e9da34d8b67f63e54892"}, + {file = "jiter-0.7.1-cp311-none-win_amd64.whl", hash = "sha256:f3ea649e7751a1a29ea5ecc03c4ada0a833846c59c6da75d747899f9b48b7282"}, + {file = "jiter-0.7.1-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:ad36a1155cbd92e7a084a568f7dc6023497df781adf2390c345dd77a120905ca"}, + {file = "jiter-0.7.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:7ba52e6aaed2dc5c81a3d9b5e4ab95b039c4592c66ac973879ba57c3506492bb"}, + {file = "jiter-0.7.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2b7de0b6f6728b678540c7927587e23f715284596724be203af952418acb8a2d"}, + {file = "jiter-0.7.1-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:9463b62bd53c2fb85529c700c6a3beb2ee54fde8bef714b150601616dcb184a6"}, + {file = "jiter-0.7.1-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:627164ec01d28af56e1f549da84caf0fe06da3880ebc7b7ee1ca15df106ae172"}, + {file = "jiter-0.7.1-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:25d0e5bf64e368b0aa9e0a559c3ab2f9b67e35fe7269e8a0d81f48bbd10e8963"}, + {file = "jiter-0.7.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c244261306f08f8008b3087059601997016549cb8bb23cf4317a4827f07b7d74"}, + {file = "jiter-0.7.1-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:7ded4e4b75b68b843b7cea5cd7c55f738c20e1394c68c2cb10adb655526c5f1b"}, + {file = "jiter-0.7.1-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:80dae4f1889b9d09e5f4de6b58c490d9c8ce7730e35e0b8643ab62b1538f095c"}, + {file = "jiter-0.7.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:5970cf8ec943b51bce7f4b98d2e1ed3ada170c2a789e2db3cb484486591a176a"}, + {file = "jiter-0.7.1-cp312-none-win32.whl", hash = "sha256:701d90220d6ecb3125d46853c8ca8a5bc158de8c49af60fd706475a49fee157e"}, + {file = "jiter-0.7.1-cp312-none-win_amd64.whl", hash = "sha256:7824c3ecf9ecf3321c37f4e4d4411aad49c666ee5bc2a937071bdd80917e4533"}, + {file = "jiter-0.7.1-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:097676a37778ba3c80cb53f34abd6943ceb0848263c21bf423ae98b090f6c6ba"}, + {file = "jiter-0.7.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:3298af506d4271257c0a8f48668b0f47048d69351675dd8500f22420d4eec378"}, + {file = "jiter-0.7.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:12fd88cfe6067e2199964839c19bd2b422ca3fd792949b8f44bb8a4e7d21946a"}, + {file = "jiter-0.7.1-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:dacca921efcd21939123c8ea8883a54b9fa7f6545c8019ffcf4f762985b6d0c8"}, + {file = "jiter-0.7.1-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:de3674a5fe1f6713a746d25ad9c32cd32fadc824e64b9d6159b3b34fd9134143"}, + {file = "jiter-0.7.1-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:65df9dbae6d67e0788a05b4bad5706ad40f6f911e0137eb416b9eead6ba6f044"}, + {file = "jiter-0.7.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7ba9a358d59a0a55cccaa4957e6ae10b1a25ffdabda863c0343c51817610501d"}, + {file = "jiter-0.7.1-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:576eb0f0c6207e9ede2b11ec01d9c2182973986514f9c60bc3b3b5d5798c8f50"}, + {file = "jiter-0.7.1-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:e550e29cdf3577d2c970a18f3959e6b8646fd60ef1b0507e5947dc73703b5627"}, + {file = "jiter-0.7.1-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:81d968dbf3ce0db2e0e4dec6b0a0d5d94f846ee84caf779b07cab49f5325ae43"}, + {file = "jiter-0.7.1-cp313-none-win32.whl", hash = "sha256:f892e547e6e79a1506eb571a676cf2f480a4533675f834e9ae98de84f9b941ac"}, + {file = "jiter-0.7.1-cp313-none-win_amd64.whl", hash = "sha256:0302f0940b1455b2a7fb0409b8d5b31183db70d2b07fd177906d83bf941385d1"}, + {file = "jiter-0.7.1.tar.gz", hash = "sha256:448cf4f74f7363c34cdef26214da527e8eeffd88ba06d0b80b485ad0667baf5d"}, ] [[package]] @@ -1224,98 +1083,18 @@ name = "joblib" version = "1.4.2" requires_python = ">=3.8" summary = "Lightweight pipelining with Python functions" -groups = ["default"] +groups = ["default", "chat_with_repo"] files = [ {file = "joblib-1.4.2-py3-none-any.whl", hash = "sha256:06d478d5674cbc267e7496a410ee875abd68e4340feff4490bcb7afb88060ae6"}, {file = "joblib-1.4.2.tar.gz", hash = "sha256:2382c5816b2636fbd20a09e0f4e9dad4736765fdfb7dca582943b9c1366b3f0e"}, ] -[[package]] -name = "kiwisolver" -version = "1.4.7" -requires_python = ">=3.8" -summary = "A fast implementation of the Cassowary constraint solver" -groups = ["default"] -files = [ - {file = "kiwisolver-1.4.7-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:8a9c83f75223d5e48b0bc9cb1bf2776cf01563e00ade8775ffe13b0b6e1af3a6"}, - {file = "kiwisolver-1.4.7-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:58370b1ffbd35407444d57057b57da5d6549d2d854fa30249771775c63b5fe17"}, - {file = "kiwisolver-1.4.7-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:aa0abdf853e09aff551db11fce173e2177d00786c688203f52c87ad7fcd91ef9"}, - {file = "kiwisolver-1.4.7-cp310-cp310-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:8d53103597a252fb3ab8b5845af04c7a26d5e7ea8122303dd7a021176a87e8b9"}, - {file = "kiwisolver-1.4.7-cp310-cp310-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:88f17c5ffa8e9462fb79f62746428dd57b46eb931698e42e990ad63103f35e6c"}, - {file = "kiwisolver-1.4.7-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:88a9ca9c710d598fd75ee5de59d5bda2684d9db36a9f50b6125eaea3969c2599"}, - {file = "kiwisolver-1.4.7-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f4d742cb7af1c28303a51b7a27aaee540e71bb8e24f68c736f6f2ffc82f2bf05"}, - {file = "kiwisolver-1.4.7-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:e28c7fea2196bf4c2f8d46a0415c77a1c480cc0724722f23d7410ffe9842c407"}, - {file = "kiwisolver-1.4.7-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:e968b84db54f9d42046cf154e02911e39c0435c9801681e3fc9ce8a3c4130278"}, - {file = "kiwisolver-1.4.7-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:0c18ec74c0472de033e1bebb2911c3c310eef5649133dd0bedf2a169a1b269e5"}, - {file = "kiwisolver-1.4.7-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:8f0ea6da6d393d8b2e187e6a5e3fb81f5862010a40c3945e2c6d12ae45cfb2ad"}, - {file = "kiwisolver-1.4.7-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:f106407dda69ae456dd1227966bf445b157ccc80ba0dff3802bb63f30b74e895"}, - {file = "kiwisolver-1.4.7-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:84ec80df401cfee1457063732d90022f93951944b5b58975d34ab56bb150dfb3"}, - {file = "kiwisolver-1.4.7-cp310-cp310-win32.whl", hash = "sha256:71bb308552200fb2c195e35ef05de12f0c878c07fc91c270eb3d6e41698c3bcc"}, - {file = "kiwisolver-1.4.7-cp310-cp310-win_amd64.whl", hash = "sha256:44756f9fd339de0fb6ee4f8c1696cfd19b2422e0d70b4cefc1cc7f1f64045a8c"}, - {file = "kiwisolver-1.4.7-cp310-cp310-win_arm64.whl", hash = "sha256:78a42513018c41c2ffd262eb676442315cbfe3c44eed82385c2ed043bc63210a"}, - {file = "kiwisolver-1.4.7-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:d2b0e12a42fb4e72d509fc994713d099cbb15ebf1103545e8a45f14da2dfca54"}, - {file = "kiwisolver-1.4.7-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:2a8781ac3edc42ea4b90bc23e7d37b665d89423818e26eb6df90698aa2287c95"}, - {file = "kiwisolver-1.4.7-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:46707a10836894b559e04b0fd143e343945c97fd170d69a2d26d640b4e297935"}, - {file = "kiwisolver-1.4.7-cp311-cp311-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ef97b8df011141c9b0f6caf23b29379f87dd13183c978a30a3c546d2c47314cb"}, - {file = "kiwisolver-1.4.7-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3ab58c12a2cd0fc769089e6d38466c46d7f76aced0a1f54c77652446733d2d02"}, - {file = "kiwisolver-1.4.7-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:803b8e1459341c1bb56d1c5c010406d5edec8a0713a0945851290a7930679b51"}, - {file = "kiwisolver-1.4.7-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f9a9e8a507420fe35992ee9ecb302dab68550dedc0da9e2880dd88071c5fb052"}, - {file = "kiwisolver-1.4.7-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:18077b53dc3bb490e330669a99920c5e6a496889ae8c63b58fbc57c3d7f33a18"}, - {file = "kiwisolver-1.4.7-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:6af936f79086a89b3680a280c47ea90b4df7047b5bdf3aa5c524bbedddb9e545"}, - {file = "kiwisolver-1.4.7-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:3abc5b19d24af4b77d1598a585b8a719beb8569a71568b66f4ebe1fb0449460b"}, - {file = "kiwisolver-1.4.7-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:933d4de052939d90afbe6e9d5273ae05fb836cc86c15b686edd4b3560cc0ee36"}, - {file = "kiwisolver-1.4.7-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:65e720d2ab2b53f1f72fb5da5fb477455905ce2c88aaa671ff0a447c2c80e8e3"}, - {file = "kiwisolver-1.4.7-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:3bf1ed55088f214ba6427484c59553123fdd9b218a42bbc8c6496d6754b1e523"}, - {file = "kiwisolver-1.4.7-cp311-cp311-win32.whl", hash = "sha256:4c00336b9dd5ad96d0a558fd18a8b6f711b7449acce4c157e7343ba92dd0cf3d"}, - {file = "kiwisolver-1.4.7-cp311-cp311-win_amd64.whl", hash = "sha256:929e294c1ac1e9f615c62a4e4313ca1823ba37326c164ec720a803287c4c499b"}, - {file = "kiwisolver-1.4.7-cp311-cp311-win_arm64.whl", hash = "sha256:e33e8fbd440c917106b237ef1a2f1449dfbb9b6f6e1ce17c94cd6a1e0d438376"}, - {file = "kiwisolver-1.4.7-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:5360cc32706dab3931f738d3079652d20982511f7c0ac5711483e6eab08efff2"}, - {file = "kiwisolver-1.4.7-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:942216596dc64ddb25adb215c3c783215b23626f8d84e8eff8d6d45c3f29f75a"}, - {file = "kiwisolver-1.4.7-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:48b571ecd8bae15702e4f22d3ff6a0f13e54d3d00cd25216d5e7f658242065ee"}, - {file = "kiwisolver-1.4.7-cp312-cp312-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ad42ba922c67c5f219097b28fae965e10045ddf145d2928bfac2eb2e17673640"}, - {file = "kiwisolver-1.4.7-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:612a10bdae23404a72941a0fc8fa2660c6ea1217c4ce0dbcab8a8f6543ea9e7f"}, - {file = "kiwisolver-1.4.7-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:9e838bba3a3bac0fe06d849d29772eb1afb9745a59710762e4ba3f4cb8424483"}, - {file = "kiwisolver-1.4.7-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:22f499f6157236c19f4bbbd472fa55b063db77a16cd74d49afe28992dff8c258"}, - {file = "kiwisolver-1.4.7-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:693902d433cf585133699972b6d7c42a8b9f8f826ebcaf0132ff55200afc599e"}, - {file = "kiwisolver-1.4.7-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:4e77f2126c3e0b0d055f44513ed349038ac180371ed9b52fe96a32aa071a5107"}, - {file = "kiwisolver-1.4.7-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:657a05857bda581c3656bfc3b20e353c232e9193eb167766ad2dc58b56504948"}, - {file = "kiwisolver-1.4.7-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:4bfa75a048c056a411f9705856abfc872558e33c055d80af6a380e3658766038"}, - {file = "kiwisolver-1.4.7-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:34ea1de54beef1c104422d210c47c7d2a4999bdecf42c7b5718fbe59a4cac383"}, - {file = "kiwisolver-1.4.7-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:90da3b5f694b85231cf93586dad5e90e2d71b9428f9aad96952c99055582f520"}, - {file = "kiwisolver-1.4.7-cp312-cp312-win32.whl", hash = "sha256:18e0cca3e008e17fe9b164b55735a325140a5a35faad8de92dd80265cd5eb80b"}, - {file = "kiwisolver-1.4.7-cp312-cp312-win_amd64.whl", hash = "sha256:58cb20602b18f86f83a5c87d3ee1c766a79c0d452f8def86d925e6c60fbf7bfb"}, - {file = "kiwisolver-1.4.7-cp312-cp312-win_arm64.whl", hash = "sha256:f5a8b53bdc0b3961f8b6125e198617c40aeed638b387913bf1ce78afb1b0be2a"}, - {file = "kiwisolver-1.4.7-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:2e6039dcbe79a8e0f044f1c39db1986a1b8071051efba3ee4d74f5b365f5226e"}, - {file = "kiwisolver-1.4.7-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:a1ecf0ac1c518487d9d23b1cd7139a6a65bc460cd101ab01f1be82ecf09794b6"}, - {file = "kiwisolver-1.4.7-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:7ab9ccab2b5bd5702ab0803676a580fffa2aa178c2badc5557a84cc943fcf750"}, - {file = "kiwisolver-1.4.7-cp313-cp313-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f816dd2277f8d63d79f9c8473a79fe54047bc0467754962840782c575522224d"}, - {file = "kiwisolver-1.4.7-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:cf8bcc23ceb5a1b624572a1623b9f79d2c3b337c8c455405ef231933a10da379"}, - {file = "kiwisolver-1.4.7-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:dea0bf229319828467d7fca8c7c189780aa9ff679c94539eed7532ebe33ed37c"}, - {file = "kiwisolver-1.4.7-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:7c06a4c7cf15ec739ce0e5971b26c93638730090add60e183530d70848ebdd34"}, - {file = "kiwisolver-1.4.7-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:913983ad2deb14e66d83c28b632fd35ba2b825031f2fa4ca29675e665dfecbe1"}, - {file = "kiwisolver-1.4.7-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:5337ec7809bcd0f424c6b705ecf97941c46279cf5ed92311782c7c9c2026f07f"}, - {file = "kiwisolver-1.4.7-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:4c26ed10c4f6fa6ddb329a5120ba3b6db349ca192ae211e882970bfc9d91420b"}, - {file = "kiwisolver-1.4.7-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:c619b101e6de2222c1fcb0531e1b17bbffbe54294bfba43ea0d411d428618c27"}, - {file = "kiwisolver-1.4.7-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:073a36c8273647592ea332e816e75ef8da5c303236ec0167196793eb1e34657a"}, - {file = "kiwisolver-1.4.7-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:3ce6b2b0231bda412463e152fc18335ba32faf4e8c23a754ad50ffa70e4091ee"}, - {file = "kiwisolver-1.4.7-cp313-cp313-win32.whl", hash = "sha256:f4c9aee212bc89d4e13f58be11a56cc8036cabad119259d12ace14b34476fd07"}, - {file = "kiwisolver-1.4.7-cp313-cp313-win_amd64.whl", hash = "sha256:8a3ec5aa8e38fc4c8af308917ce12c536f1c88452ce554027e55b22cbbfbff76"}, - {file = "kiwisolver-1.4.7-cp313-cp313-win_arm64.whl", hash = "sha256:76c8094ac20ec259471ac53e774623eb62e6e1f56cd8690c67ce6ce4fcb05650"}, - {file = "kiwisolver-1.4.7-pp310-pypy310_pp73-macosx_10_15_x86_64.whl", hash = "sha256:94252291e3fe68001b1dd747b4c0b3be12582839b95ad4d1b641924d68fd4643"}, - {file = "kiwisolver-1.4.7-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:5b7dfa3b546da08a9f622bb6becdb14b3e24aaa30adba66749d38f3cc7ea9706"}, - {file = "kiwisolver-1.4.7-pp310-pypy310_pp73-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:bd3de6481f4ed8b734da5df134cd5a6a64fe32124fe83dde1e5b5f29fe30b1e6"}, - {file = "kiwisolver-1.4.7-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a91b5f9f1205845d488c928e8570dcb62b893372f63b8b6e98b863ebd2368ff2"}, - {file = "kiwisolver-1.4.7-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:40fa14dbd66b8b8f470d5fc79c089a66185619d31645f9b0773b88b19f7223c4"}, - {file = "kiwisolver-1.4.7-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:eb542fe7933aa09d8d8f9d9097ef37532a7df6497819d16efe4359890a2f417a"}, - {file = "kiwisolver-1.4.7.tar.gz", hash = "sha256:9893ff81bd7107f7b685d3017cc6583daadb4fc26e4a888350df530e41980a60"}, -] - [[package]] name = "kubernetes" version = "31.0.0" requires_python = ">=3.6" summary = "Kubernetes python client" -groups = ["default"] +groups = ["chat_with_repo"] dependencies = [ "certifi>=14.05.14", "durationpy>=0.7", @@ -1336,10 +1115,10 @@ files = [ [[package]] name = "llama-index-core" -version = "0.11.16" +version = "0.11.23" requires_python = "<4.0,>=3.8.1" summary = "Interface between LLMs and your data" -groups = ["default"] +groups = ["default", "chat_with_repo"] dependencies = [ "PyYAML>=6.0.1", "SQLAlchemy[asyncio]>=1.4.49", @@ -1347,6 +1126,7 @@ dependencies = [ "dataclasses-json", "deprecated>=1.2.9.3", "dirtyjson<2.0.0,>=1.0.8", + "filetype<2.0.0,>=1.2.0", "fsspec>=2023.5.0", "httpx", "nest-asyncio<2.0.0,>=1.5.8", @@ -1364,13 +1144,28 @@ dependencies = [ "wrapt", ] files = [ - {file = "llama_index_core-0.11.16-py3-none-any.whl", hash = "sha256:099ba785e357506fd5a24c1a6b8fa5286366d6c71637649fab0f9126dcea842c"}, - {file = "llama_index_core-0.11.16.tar.gz", hash = "sha256:232a5cebcc73b951d9c663bd30ed59de5356dbd8f9ab88024d19c88bdd1b3254"}, + {file = "llama_index_core-0.11.23-py3-none-any.whl", hash = "sha256:25a0cb4a055bfb348655ca4acd1b475529bd8537a7b81874ef14ed13f56e06c1"}, + {file = "llama_index_core-0.11.23.tar.gz", hash = "sha256:e150859696a0eae169fe19323f46e9a31af2c12c3182012e4d0353ea8eb06d24"}, +] + +[[package]] +name = "llama-index-embeddings-openai" +version = "0.2.5" +requires_python = "<4.0,>=3.8.1" +summary = "llama-index embeddings openai integration" +groups = ["chat_with_repo"] +dependencies = [ + "llama-index-core<0.12.0,>=0.11.0", + "openai>=1.1.0", +] +files = [ + {file = "llama_index_embeddings_openai-0.2.5-py3-none-any.whl", hash = "sha256:823c8311e556349ba19dda408a64a314fa3dafe0e5759709c54d33a0269aa6ba"}, + {file = "llama_index_embeddings_openai-0.2.5.tar.gz", hash = "sha256:0047dd71d747068645ed728c29312aa91b65bbe4c6142180034c64dfc5c6f6e8"}, ] [[package]] name = "llama-index-llms-openai" -version = "0.2.12" +version = "0.2.16" requires_python = "<4.0,>=3.8.1" summary = "llama-index llms openai integration" groups = ["default"] @@ -1379,8 +1174,23 @@ dependencies = [ "openai<2.0.0,>=1.40.0", ] files = [ - {file = "llama_index_llms_openai-0.2.12-py3-none-any.whl", hash = "sha256:a4dbfdd1504aecd875406b65bbc108c0e281f56b7f82019c7660b86b8a4eef50"}, - {file = "llama_index_llms_openai-0.2.12.tar.gz", hash = "sha256:4721bc1a765cc9f896b92824bb4ed14f289c19fc15765a66809078b2563a7411"}, + {file = "llama_index_llms_openai-0.2.16-py3-none-any.whl", hash = "sha256:413466acbb894bd81f8dab2037f595e92392d869eec6d8274a16d43123cac8b6"}, + {file = "llama_index_llms_openai-0.2.16.tar.gz", hash = "sha256:7c666dd27056c278a079ff45d53f1fbfc8ed363764aa7baeee2e03df47f9072a"}, +] + +[[package]] +name = "llama-index-vector-stores-chroma" +version = "0.3.0" +requires_python = "<4.0,>=3.8.1" +summary = "llama-index vector_stores chroma integration" +groups = ["chat_with_repo"] +dependencies = [ + "chromadb>=0.5.17", + "llama-index-core<0.12.0,>=0.11.0", +] +files = [ + {file = "llama_index_vector_stores_chroma-0.3.0-py3-none-any.whl", hash = "sha256:45d6283ac1e36b5793fd74633bce2de455adcd17aac8f1eab90c09285e9eec8c"}, + {file = "llama_index_vector_stores_chroma-0.3.0.tar.gz", hash = "sha256:54059216a23bd6bd1dc3af4c5986db38b2933586b042e8ee126385d7a5dece23"}, ] [[package]] @@ -1404,7 +1214,7 @@ name = "markdown" version = "3.7" requires_python = ">=3.8" summary = "Python implementation of John Gruber's Markdown." -groups = ["default"] +groups = ["chat_with_repo"] dependencies = [ "importlib-metadata>=4.4; python_version < \"3.10\"", ] @@ -1418,7 +1228,7 @@ name = "markdown-it-py" version = "3.0.0" requires_python = ">=3.8" summary = "Python port of markdown-it. Markdown parsing, done right!" -groups = ["default"] +groups = ["chat_with_repo"] dependencies = [ "mdurl~=0.1", ] @@ -1432,18 +1242,8 @@ name = "markupsafe" version = "2.1.5" requires_python = ">=3.7" summary = "Safely add untrusted strings to HTML/XML markup." -groups = ["default"] +groups = ["chat_with_repo"] files = [ - {file = "MarkupSafe-2.1.5-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:a17a92de5231666cfbe003f0e4b9b3a7ae3afb1ec2845aadc2bacc93ff85febc"}, - {file = "MarkupSafe-2.1.5-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:72b6be590cc35924b02c78ef34b467da4ba07e4e0f0454a2c5907f473fc50ce5"}, - {file = "MarkupSafe-2.1.5-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e61659ba32cf2cf1481e575d0462554625196a1f2fc06a1c777d3f48e8865d46"}, - {file = "MarkupSafe-2.1.5-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2174c595a0d73a3080ca3257b40096db99799265e1c27cc5a610743acd86d62f"}, - {file = "MarkupSafe-2.1.5-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ae2ad8ae6ebee9d2d94b17fb62763125f3f374c25618198f40cbb8b525411900"}, - {file = "MarkupSafe-2.1.5-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:075202fa5b72c86ad32dc7d0b56024ebdbcf2048c0ba09f1cde31bfdd57bcfff"}, - {file = "MarkupSafe-2.1.5-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:598e3276b64aff0e7b3451b72e94fa3c238d452e7ddcd893c3ab324717456bad"}, - {file = "MarkupSafe-2.1.5-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:fce659a462a1be54d2ffcacea5e3ba2d74daa74f30f5f143fe0c58636e355fdd"}, - {file = "MarkupSafe-2.1.5-cp310-cp310-win32.whl", hash = "sha256:d9fad5155d72433c921b782e58892377c44bd6252b5af2f67f16b194987338a4"}, - {file = "MarkupSafe-2.1.5-cp310-cp310-win_amd64.whl", hash = "sha256:bf50cd79a75d181c9181df03572cdce0fbb75cc353bc350712073108cba98de5"}, {file = "MarkupSafe-2.1.5-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:629ddd2ca402ae6dbedfceeba9c46d5f7b2a61d9749597d4307f943ef198fc1f"}, {file = "MarkupSafe-2.1.5-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:5b7b716f97b52c5a14bffdf688f971b2d5ef4029127f1ad7a513973cfd818df2"}, {file = "MarkupSafe-2.1.5-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6ec585f69cec0aa07d945b20805be741395e28ac1627333b1c5b0105962ffced"}, @@ -1469,67 +1269,16 @@ files = [ [[package]] name = "marshmallow" -version = "3.22.0" -requires_python = ">=3.8" +version = "3.23.1" +requires_python = ">=3.9" summary = "A lightweight library for converting complex datatypes to and from native Python datatypes." -groups = ["default"] +groups = ["default", "chat_with_repo"] dependencies = [ "packaging>=17.0", ] files = [ - {file = "marshmallow-3.22.0-py3-none-any.whl", hash = "sha256:71a2dce49ef901c3f97ed296ae5051135fd3febd2bf43afe0ae9a82143a494d9"}, - {file = "marshmallow-3.22.0.tar.gz", hash = "sha256:4972f529104a220bb8637d595aa4c9762afbe7f7a77d82dc58c1615d70c5823e"}, -] - -[[package]] -name = "matplotlib" -version = "3.9.2" -requires_python = ">=3.9" -summary = "Python plotting package" -groups = ["default"] -dependencies = [ - "contourpy>=1.0.1", - "cycler>=0.10", - "fonttools>=4.22.0", - "importlib-resources>=3.2.0; python_version < \"3.10\"", - "kiwisolver>=1.3.1", - "numpy>=1.23", - "packaging>=20.0", - "pillow>=8", - "pyparsing>=2.3.1", - "python-dateutil>=2.7", -] -files = [ - {file = "matplotlib-3.9.2-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:9d78bbc0cbc891ad55b4f39a48c22182e9bdaea7fc0e5dbd364f49f729ca1bbb"}, - {file = "matplotlib-3.9.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:c375cc72229614632c87355366bdf2570c2dac01ac66b8ad048d2dabadf2d0d4"}, - {file = "matplotlib-3.9.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1d94ff717eb2bd0b58fe66380bd8b14ac35f48a98e7c6765117fe67fb7684e64"}, - {file = "matplotlib-3.9.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ab68d50c06938ef28681073327795c5db99bb4666214d2d5f880ed11aeaded66"}, - {file = "matplotlib-3.9.2-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:65aacf95b62272d568044531e41de26285d54aec8cb859031f511f84bd8b495a"}, - {file = "matplotlib-3.9.2-cp310-cp310-win_amd64.whl", hash = "sha256:3fd595f34aa8a55b7fc8bf9ebea8aa665a84c82d275190a61118d33fbc82ccae"}, - {file = "matplotlib-3.9.2-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:d8dd059447824eec055e829258ab092b56bb0579fc3164fa09c64f3acd478772"}, - {file = "matplotlib-3.9.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:c797dac8bb9c7a3fd3382b16fe8f215b4cf0f22adccea36f1545a6d7be310b41"}, - {file = "matplotlib-3.9.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d719465db13267bcef19ea8954a971db03b9f48b4647e3860e4bc8e6ed86610f"}, - {file = "matplotlib-3.9.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8912ef7c2362f7193b5819d17dae8629b34a95c58603d781329712ada83f9447"}, - {file = "matplotlib-3.9.2-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:7741f26a58a240f43bee74965c4882b6c93df3e7eb3de160126d8c8f53a6ae6e"}, - {file = "matplotlib-3.9.2-cp311-cp311-win_amd64.whl", hash = "sha256:ae82a14dab96fbfad7965403c643cafe6515e386de723e498cf3eeb1e0b70cc7"}, - {file = "matplotlib-3.9.2-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:ac43031375a65c3196bee99f6001e7fa5bdfb00ddf43379d3c0609bdca042df9"}, - {file = "matplotlib-3.9.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:be0fc24a5e4531ae4d8e858a1a548c1fe33b176bb13eff7f9d0d38ce5112a27d"}, - {file = "matplotlib-3.9.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bf81de2926c2db243c9b2cbc3917619a0fc85796c6ba4e58f541df814bbf83c7"}, - {file = "matplotlib-3.9.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f6ee45bc4245533111ced13f1f2cace1e7f89d1c793390392a80c139d6cf0e6c"}, - {file = "matplotlib-3.9.2-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:306c8dfc73239f0e72ac50e5a9cf19cc4e8e331dd0c54f5e69ca8758550f1e1e"}, - {file = "matplotlib-3.9.2-cp312-cp312-win_amd64.whl", hash = "sha256:5413401594cfaff0052f9d8b1aafc6d305b4bd7c4331dccd18f561ff7e1d3bd3"}, - {file = "matplotlib-3.9.2-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:18128cc08f0d3cfff10b76baa2f296fc28c4607368a8402de61bb3f2eb33c7d9"}, - {file = "matplotlib-3.9.2-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:4876d7d40219e8ae8bb70f9263bcbe5714415acfdf781086601211335e24f8aa"}, - {file = "matplotlib-3.9.2-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6d9f07a80deab4bb0b82858a9e9ad53d1382fd122be8cde11080f4e7dfedb38b"}, - {file = "matplotlib-3.9.2-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f7c0410f181a531ec4e93bbc27692f2c71a15c2da16766f5ba9761e7ae518413"}, - {file = "matplotlib-3.9.2-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:909645cce2dc28b735674ce0931a4ac94e12f5b13f6bb0b5a5e65e7cea2c192b"}, - {file = "matplotlib-3.9.2-cp313-cp313-win_amd64.whl", hash = "sha256:f32c7410c7f246838a77d6d1eff0c0f87f3cb0e7c4247aebea71a6d5a68cab49"}, - {file = "matplotlib-3.9.2-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:37e51dd1c2db16ede9cfd7b5cabdfc818b2c6397c83f8b10e0e797501c963a03"}, - {file = "matplotlib-3.9.2-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:b82c5045cebcecd8496a4d694d43f9cc84aeeb49fe2133e036b207abe73f4d30"}, - {file = "matplotlib-3.9.2-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f053c40f94bc51bc03832a41b4f153d83f2062d88c72b5e79997072594e97e51"}, - {file = "matplotlib-3.9.2-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:dbe196377a8248972f5cede786d4c5508ed5f5ca4a1e09b44bda889958b33f8c"}, - {file = "matplotlib-3.9.2-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:5816b1e1fe8c192cbc013f8f3e3368ac56fbecf02fb41b8f8559303f24c5015e"}, - {file = "matplotlib-3.9.2.tar.gz", hash = "sha256:96ab43906269ca64a6366934106fa01534454a69e471b7bf3d79083981aaab92"}, + {file = "marshmallow-3.23.1-py3-none-any.whl", hash = "sha256:fece2eb2c941180ea1b7fcbd4a83c51bfdd50093fdd3ad2585ee5e1df2508491"}, + {file = "marshmallow-3.23.1.tar.gz", hash = "sha256:3a8dfda6edd8dcdbf216c0ede1d1e78d230a6dc9c5a088f58c4083b974a0d468"}, ] [[package]] @@ -1537,7 +1286,7 @@ name = "mdurl" version = "0.1.2" requires_python = ">=3.7" summary = "Markdown URL utilities" -groups = ["default"] +groups = ["chat_with_repo"] files = [ {file = "mdurl-0.1.2-py3-none-any.whl", hash = "sha256:84008a41e51615a49fc9966191ff91509e3c40b939176e643fd50a5c2196b8f8"}, {file = "mdurl-0.1.2.tar.gz", hash = "sha256:bb413d29f5eea38f31dd4754dd7377d4465116fb207585f97bf925588687c1ba"}, @@ -1548,24 +1297,8 @@ name = "mmh3" version = "5.0.1" requires_python = ">=3.8" summary = "Python extension for MurmurHash (MurmurHash3), a set of fast and robust hash functions." -groups = ["default"] +groups = ["chat_with_repo"] files = [ - {file = "mmh3-5.0.1-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:f0a4b4bf05778ed77d820d6e7d0e9bd6beb0c01af10e1ce9233f5d2f814fcafa"}, - {file = "mmh3-5.0.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:ac7a391039aeab95810c2d020b69a94eb6b4b37d4e2374831e92db3a0cdf71c6"}, - {file = "mmh3-5.0.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:3a2583b5521ca49756d8d8bceba80627a9cc295f255dcab4e3df7ccc2f09679a"}, - {file = "mmh3-5.0.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:081a8423fe53c1ac94f87165f3e4c500125d343410c1a0c5f1703e898a3ef038"}, - {file = "mmh3-5.0.1-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b8b4d72713799755dc8954a7d36d5c20a6c8de7b233c82404d122c7c7c1707cc"}, - {file = "mmh3-5.0.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:389a6fd51efc76d3182d36ec306448559c1244f11227d2bb771bdd0e6cc91321"}, - {file = "mmh3-5.0.1-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:39f4128edaa074bff721b1d31a72508cba4d2887ee7867f22082e1fe9d4edea0"}, - {file = "mmh3-5.0.1-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1d5d23a94d91aabba3386b3769048d5f4210fdfef80393fece2f34ba5a7b466c"}, - {file = "mmh3-5.0.1-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:16347d038361f8b8f24fd2b7ef378c9b68ddee9f7706e46269b6e0d322814713"}, - {file = "mmh3-5.0.1-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:6e299408565af7d61f2d20a5ffdd77cf2ed902460fe4e6726839d59ba4b72316"}, - {file = "mmh3-5.0.1-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:42050af21ddfc5445ee5a66e73a8fc758c71790305e3ee9e4a85a8e69e810f94"}, - {file = "mmh3-5.0.1-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:2ae9b1f5ef27ec54659920f0404b7ceb39966e28867c461bfe83a05e8d18ddb0"}, - {file = "mmh3-5.0.1-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:50c2495a02045f3047d71d4ae9cdd7a15efc0bcbb7ff17a18346834a8e2d1d19"}, - {file = "mmh3-5.0.1-cp310-cp310-win32.whl", hash = "sha256:c028fa77cddf351ca13b4a56d43c1775652cde0764cadb39120b68f02a23ecf6"}, - {file = "mmh3-5.0.1-cp310-cp310-win_amd64.whl", hash = "sha256:c5e741e421ec14400c4aae30890515c201f518403bdef29ae1e00d375bb4bbb5"}, - {file = "mmh3-5.0.1-cp310-cp310-win_arm64.whl", hash = "sha256:b17156d56fabc73dbf41bca677ceb6faed435cc8544f6566d72ea77d8a17e9d0"}, {file = "mmh3-5.0.1-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:9a6d5a9b1b923f1643559ba1fc0bf7a5076c90cbb558878d3bf3641ce458f25d"}, {file = "mmh3-5.0.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:3349b968be555f7334bbcce839da98f50e1e80b1c615d8e2aa847ea4a964a012"}, {file = "mmh3-5.0.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:1bd3c94b110e55db02ab9b605029f48a2f7f677c6e58c09d44e42402d438b7e1"}, @@ -1621,7 +1354,7 @@ files = [ name = "monotonic" version = "1.6" summary = "An implementation of time.monotonic() for Python 2 & < 3.3" -groups = ["default"] +groups = ["chat_with_repo"] files = [ {file = "monotonic-1.6-py2.py3-none-any.whl", hash = "sha256:68687e19a14f11f26d140dd5c86f3dba4bf5df58003000ed467e0e2a69bca96c"}, {file = "monotonic-1.6.tar.gz", hash = "sha256:3a55207bcfed53ddd5c5bae174524062935efed17792e9de2ad0205ce9ad63f7"}, @@ -1631,7 +1364,7 @@ files = [ name = "mpmath" version = "1.3.0" summary = "Python library for arbitrary-precision floating-point arithmetic" -groups = ["default"] +groups = ["chat_with_repo"] files = [ {file = "mpmath-1.3.0-py3-none-any.whl", hash = "sha256:a0b2b9fe80bbcd81a6647ff13108738cfb482d481d826cc0e02f5b35e5c88d2c"}, {file = "mpmath-1.3.0.tar.gz", hash = "sha256:7a28eb2a9774d00c7bc92411c19a89209d5da7c4c9a9e227be8330a23a25b91f"}, @@ -1642,26 +1375,11 @@ name = "multidict" version = "6.1.0" requires_python = ">=3.8" summary = "multidict implementation" -groups = ["default"] +groups = ["default", "chat_with_repo"] dependencies = [ "typing-extensions>=4.1.0; python_version < \"3.11\"", ] files = [ - {file = "multidict-6.1.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:3380252550e372e8511d49481bd836264c009adb826b23fefcc5dd3c69692f60"}, - {file = "multidict-6.1.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:99f826cbf970077383d7de805c0681799491cb939c25450b9b5b3ced03ca99f1"}, - {file = "multidict-6.1.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:a114d03b938376557927ab23f1e950827c3b893ccb94b62fd95d430fd0e5cf53"}, - {file = "multidict-6.1.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b1c416351ee6271b2f49b56ad7f308072f6f44b37118d69c2cad94f3fa8a40d5"}, - {file = "multidict-6.1.0-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:6b5d83030255983181005e6cfbac1617ce9746b219bc2aad52201ad121226581"}, - {file = "multidict-6.1.0-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:3e97b5e938051226dc025ec80980c285b053ffb1e25a3db2a3aa3bc046bf7f56"}, - {file = "multidict-6.1.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d618649d4e70ac6efcbba75be98b26ef5078faad23592f9b51ca492953012429"}, - {file = "multidict-6.1.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:10524ebd769727ac77ef2278390fb0068d83f3acb7773792a5080f2b0abf7748"}, - {file = "multidict-6.1.0-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:ff3827aef427c89a25cc96ded1759271a93603aba9fb977a6d264648ebf989db"}, - {file = "multidict-6.1.0-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:06809f4f0f7ab7ea2cabf9caca7d79c22c0758b58a71f9d32943ae13c7ace056"}, - {file = "multidict-6.1.0-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:f179dee3b863ab1c59580ff60f9d99f632f34ccb38bf67a33ec6b3ecadd0fd76"}, - {file = "multidict-6.1.0-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:aaed8b0562be4a0876ee3b6946f6869b7bcdb571a5d1496683505944e268b160"}, - {file = "multidict-6.1.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:3c8b88a2ccf5493b6c8da9076fb151ba106960a2df90c2633f342f120751a9e7"}, - {file = "multidict-6.1.0-cp310-cp310-win32.whl", hash = "sha256:4a9cb68166a34117d6646c0023c7b759bf197bee5ad4272f420a0141d7eb03a0"}, - {file = "multidict-6.1.0-cp310-cp310-win_amd64.whl", hash = "sha256:20b9b5fbe0b88d0bdef2012ef7dee867f874b72528cf1d08f1d59b0e3850129d"}, {file = "multidict-6.1.0-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:3efe2c2cb5763f2f1b275ad2bf7a287d3f7ebbef35648a9726e3b69284a4f3d6"}, {file = "multidict-6.1.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:c7053d3b0353a8b9de430a4f4b4268ac9a4fb3481af37dfe49825bf45ca24156"}, {file = "multidict-6.1.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:27e5fc84ccef8dfaabb09d82b7d179c7cf1a3fbc8a966f8274fcb4ab2eb4cadb"}, @@ -1716,7 +1434,7 @@ name = "mypy-extensions" version = "1.0.0" requires_python = ">=3.5" summary = "Type system extensions for programs checked with the mypy type checker." -groups = ["default"] +groups = ["default", "chat_with_repo"] files = [ {file = "mypy_extensions-1.0.0-py3-none-any.whl", hash = "sha256:4392f6c0eb8a5668a69e23d168ffa70f0be9ccfd32b5cc2d26a34ae5b844552d"}, {file = "mypy_extensions-1.0.0.tar.gz", hash = "sha256:75dbf8955dc00442a438fc4d0666508a9a97b6bd41aa2f0ffe9d2f2725af0782"}, @@ -1727,7 +1445,7 @@ name = "nest-asyncio" version = "1.6.0" requires_python = ">=3.5" summary = "Patch asyncio to allow nested event loops" -groups = ["default"] +groups = ["default", "chat_with_repo"] files = [ {file = "nest_asyncio-1.6.0-py3-none-any.whl", hash = "sha256:87af6efd6b5e897c81050477ef65c62e2b2f35d51703cae01aff2905b1852e1c"}, {file = "nest_asyncio-1.6.0.tar.gz", hash = "sha256:6f172d5449aca15afd6c646851f4e31e02c598d553a667e38cafa997cfec55fe"}, @@ -1735,13 +1453,13 @@ files = [ [[package]] name = "networkx" -version = "3.3" +version = "3.4.2" requires_python = ">=3.10" summary = "Python package for creating and manipulating graphs and networks" -groups = ["default"] +groups = ["default", "chat_with_repo"] files = [ - {file = "networkx-3.3-py3-none-any.whl", hash = "sha256:28575580c6ebdaf4505b22c6256a2b9de86b316dc63ba9e93abde3d78dfdbcf2"}, - {file = "networkx-3.3.tar.gz", hash = "sha256:0c127d8b2f4865f59ae9cb8aafcd60b5c70f3241ebd66f7defad7c4ab90126c9"}, + {file = "networkx-3.4.2-py3-none-any.whl", hash = "sha256:df5d4365b724cf81b8c6a7312509d0c22386097011ad1abe274afd5e9d3bbc5f"}, + {file = "networkx-3.4.2.tar.gz", hash = "sha256:307c3669428c5362aab27c8a1260aa8f47c4e91d3891f48be0141738d8d053e1"}, ] [[package]] @@ -1749,7 +1467,7 @@ name = "nltk" version = "3.9.1" requires_python = ">=3.8" summary = "Natural Language Toolkit" -groups = ["default"] +groups = ["default", "chat_with_repo"] dependencies = [ "click", "joblib", @@ -1766,16 +1484,8 @@ name = "numpy" version = "1.26.4" requires_python = ">=3.9" summary = "Fundamental package for array computing in Python" -groups = ["default"] +groups = ["default", "chat_with_repo"] files = [ - {file = "numpy-1.26.4-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:9ff0f4f29c51e2803569d7a51c2304de5554655a60c5d776e35b4a41413830d0"}, - {file = "numpy-1.26.4-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:2e4ee3380d6de9c9ec04745830fd9e2eccb3e6cf790d39d7b98ffd19b0dd754a"}, - {file = "numpy-1.26.4-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d209d8969599b27ad20994c8e41936ee0964e6da07478d6c35016bc386b66ad4"}, - {file = "numpy-1.26.4-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ffa75af20b44f8dba823498024771d5ac50620e6915abac414251bd971b4529f"}, - {file = "numpy-1.26.4-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:62b8e4b1e28009ef2846b4c7852046736bab361f7aeadeb6a5b89ebec3c7055a"}, - {file = "numpy-1.26.4-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:a4abb4f9001ad2858e7ac189089c42178fcce737e4169dc61321660f1a96c7d2"}, - {file = "numpy-1.26.4-cp310-cp310-win32.whl", hash = "sha256:bfe25acf8b437eb2a8b2d49d443800a5f18508cd811fea3181723922a8a82b07"}, - {file = "numpy-1.26.4-cp310-cp310-win_amd64.whl", hash = "sha256:b97fe8060236edf3662adfc2c633f56a08ae30560c56310562cb4f95500022d5"}, {file = "numpy-1.26.4-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:4c66707fabe114439db9068ee468c26bbdf909cac0fb58686a42a24de1760c71"}, {file = "numpy-1.26.4-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:edd8b5fe47dab091176d21bb6de568acdd906d1887a4584a15a9a96a1dca06ef"}, {file = "numpy-1.26.4-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7ab55401287bfec946ced39700c053796e7cc0e3acbef09993a9ad2adba6ca6e"}, @@ -1800,7 +1510,7 @@ name = "oauthlib" version = "3.2.2" requires_python = ">=3.6" summary = "A generic, spec-compliant, thorough implementation of the OAuth request-signing logic" -groups = ["default"] +groups = ["chat_with_repo"] files = [ {file = "oauthlib-3.2.2-py3-none-any.whl", hash = "sha256:8139f29aac13e25d502680e9e19963e83f16838d48a0d71c287fe40e7067fbca"}, {file = "oauthlib-3.2.2.tar.gz", hash = "sha256:9859c40929662bec5d64f34d01c99e093149682a3f38915dc0655d5a633dd918"}, @@ -1808,9 +1518,9 @@ files = [ [[package]] name = "onnxruntime" -version = "1.19.2" +version = "1.20.0" summary = "ONNX Runtime is a runtime accelerator for Machine Learning models" -groups = ["default"] +groups = ["chat_with_repo"] dependencies = [ "coloredlogs", "flatbuffers", @@ -1820,29 +1530,30 @@ dependencies = [ "sympy", ] files = [ - {file = "onnxruntime-1.19.2-cp310-cp310-macosx_11_0_universal2.whl", hash = "sha256:84fa57369c06cadd3c2a538ae2a26d76d583e7c34bdecd5769d71ca5c0fc750e"}, - {file = "onnxruntime-1.19.2-cp310-cp310-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:bdc471a66df0c1cdef774accef69e9f2ca168c851ab5e4f2f3341512c7ef4666"}, - {file = "onnxruntime-1.19.2-cp310-cp310-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:e3a4ce906105d99ebbe817f536d50a91ed8a4d1592553f49b3c23c4be2560ae6"}, - {file = "onnxruntime-1.19.2-cp310-cp310-win32.whl", hash = "sha256:4b3d723cc154c8ddeb9f6d0a8c0d6243774c6b5930847cc83170bfe4678fafb3"}, - {file = "onnxruntime-1.19.2-cp310-cp310-win_amd64.whl", hash = "sha256:17ed7382d2c58d4b7354fb2b301ff30b9bf308a1c7eac9546449cd122d21cae5"}, - {file = "onnxruntime-1.19.2-cp311-cp311-macosx_11_0_universal2.whl", hash = "sha256:d863e8acdc7232d705d49e41087e10b274c42f09e259016a46f32c34e06dc4fd"}, - {file = "onnxruntime-1.19.2-cp311-cp311-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:c1dfe4f660a71b31caa81fc298a25f9612815215a47b286236e61d540350d7b6"}, - {file = "onnxruntime-1.19.2-cp311-cp311-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:a36511dc07c5c964b916697e42e366fa43c48cdb3d3503578d78cef30417cb84"}, - {file = "onnxruntime-1.19.2-cp311-cp311-win32.whl", hash = "sha256:50cbb8dc69d6befad4746a69760e5b00cc3ff0a59c6c3fb27f8afa20e2cab7e7"}, - {file = "onnxruntime-1.19.2-cp311-cp311-win_amd64.whl", hash = "sha256:1c3e5d415b78337fa0b1b75291e9ea9fb2a4c1f148eb5811e7212fed02cfffa8"}, - {file = "onnxruntime-1.19.2-cp312-cp312-macosx_11_0_universal2.whl", hash = "sha256:68e7051bef9cfefcbb858d2d2646536829894d72a4130c24019219442b1dd2ed"}, - {file = "onnxruntime-1.19.2-cp312-cp312-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:d2d366fbcc205ce68a8a3bde2185fd15c604d9645888703785b61ef174265168"}, - {file = "onnxruntime-1.19.2-cp312-cp312-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:477b93df4db467e9cbf34051662a4b27c18e131fa1836e05974eae0d6e4cf29b"}, - {file = "onnxruntime-1.19.2-cp312-cp312-win32.whl", hash = "sha256:9a174073dc5608fad05f7cf7f320b52e8035e73d80b0a23c80f840e5a97c0147"}, - {file = "onnxruntime-1.19.2-cp312-cp312-win_amd64.whl", hash = "sha256:190103273ea4507638ffc31d66a980594b237874b65379e273125150eb044857"}, + {file = "onnxruntime-1.20.0-cp311-cp311-macosx_13_0_universal2.whl", hash = "sha256:3398354e9145c68edc09dbc72265401150027e76716ae758e8d9b52e6a7ddca0"}, + {file = "onnxruntime-1.20.0-cp311-cp311-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:8a831b720d0a7be8241a230cb06f592e8bb66652d7cea54ce02d83769651fdee"}, + {file = "onnxruntime-1.20.0-cp311-cp311-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:041fefe60af844ebd90f344c84f908201490555cd0a6d78dd0a7acdc27b59972"}, + {file = "onnxruntime-1.20.0-cp311-cp311-win32.whl", hash = "sha256:83da64d2824809d0f6977db8bfc5091f742c26f09dfd66a3934e673780f5f87a"}, + {file = "onnxruntime-1.20.0-cp311-cp311-win_amd64.whl", hash = "sha256:bfa390046332f5fca6f8af8c9d17164621ac52e66b11518e187278b19364800c"}, + {file = "onnxruntime-1.20.0-cp312-cp312-macosx_13_0_universal2.whl", hash = "sha256:97c2b91bfea063f9c3457422d28a336bfd2859001cd880645adfa7184e29dd79"}, + {file = "onnxruntime-1.20.0-cp312-cp312-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:51e7b34e398089c4ed8d0f50722d7a64a4d5f11b38c4a42576458a03c6dbc72e"}, + {file = "onnxruntime-1.20.0-cp312-cp312-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:0e259378ff2843321e0bf4552adcbee48822c91d77d42dde78b87dcdf10ad01f"}, + {file = "onnxruntime-1.20.0-cp312-cp312-win32.whl", hash = "sha256:428abc1f7d8eb425887e2b7726044f2af7b5a098359455e7d2d92343f04ad0ff"}, + {file = "onnxruntime-1.20.0-cp312-cp312-win_amd64.whl", hash = "sha256:d5f23cbfeb546e16ffea81c28d2e796a53197fdc6c92540648e2aa53a7c7a637"}, + {file = "onnxruntime-1.20.0-cp313-cp313-macosx_13_0_universal2.whl", hash = "sha256:95b91126bc3e1754868da1d3d2d08a7a10279b8ff5cea5e34e92fbe3fd691dcf"}, + {file = "onnxruntime-1.20.0-cp313-cp313-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:d57c10d7729347d6663f32b3f569f33d69a95e150d37ff6af4be9b9ab1ffdc25"}, + {file = "onnxruntime-1.20.0-cp313-cp313-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:b9c38735dac127d0eeb957ec312c8f1ae90ecae2779a55b2fa279aa7bd116cbd"}, + {file = "onnxruntime-1.20.0-cp313-cp313-win_amd64.whl", hash = "sha256:25514cec4ea251d492aa1e38a7395d8801e64a4c940a154aef84cfad97ae4628"}, + {file = "onnxruntime-1.20.0-cp313-cp313t-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:640ad9ea72d322f0325a51544eddb54f4fa843c4348573c88a9cb44f46678f3f"}, + {file = "onnxruntime-1.20.0-cp313-cp313t-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:dc4e7c10c98c1f407835448c26a7e14ebff3234f131e1fbc53bd9500c828df89"}, ] [[package]] name = "openai" -version = "1.51.2" -requires_python = ">=3.7.1" +version = "1.54.4" +requires_python = ">=3.8" summary = "The official Python library for the openai API" -groups = ["default"] +groups = ["default", "chat_with_repo"] dependencies = [ "anyio<5,>=3.5.0", "cached-property; python_version < \"3.8\"", @@ -1855,211 +1566,203 @@ dependencies = [ "typing-extensions<5,>=4.11", ] files = [ - {file = "openai-1.51.2-py3-none-any.whl", hash = "sha256:5c5954711cba931423e471c37ff22ae0fd3892be9b083eee36459865fbbb83fa"}, - {file = "openai-1.51.2.tar.gz", hash = "sha256:c6a51fac62a1ca9df85a522e462918f6bb6bc51a8897032217e453a0730123a6"}, + {file = "openai-1.54.4-py3-none-any.whl", hash = "sha256:0d95cef99346bf9b6d7fbf57faf61a673924c3e34fa8af84c9ffe04660673a7e"}, + {file = "openai-1.54.4.tar.gz", hash = "sha256:50f3656e45401c54e973fa05dc29f3f0b0d19348d685b2f7ddb4d92bf7b1b6bf"}, ] [[package]] name = "opentelemetry-api" -version = "1.27.0" +version = "1.28.1" requires_python = ">=3.8" summary = "OpenTelemetry Python API" -groups = ["default"] +groups = ["chat_with_repo"] dependencies = [ "deprecated>=1.2.6", - "importlib-metadata<=8.4.0,>=6.0", + "importlib-metadata<=8.5.0,>=6.0", ] files = [ - {file = "opentelemetry_api-1.27.0-py3-none-any.whl", hash = "sha256:953d5871815e7c30c81b56d910c707588000fff7a3ca1c73e6531911d53065e7"}, - {file = "opentelemetry_api-1.27.0.tar.gz", hash = "sha256:ed673583eaa5f81b5ce5e86ef7cdaf622f88ef65f0b9aab40b843dcae5bef342"}, + {file = "opentelemetry_api-1.28.1-py3-none-any.whl", hash = "sha256:bfe86c95576cf19a914497f439fd79c9553a38de0adbdc26f7cfc46b0c00b16c"}, + {file = "opentelemetry_api-1.28.1.tar.gz", hash = "sha256:6fa7295a12c707f5aebef82da3d9ec5afe6992f3e42bfe7bec0339a44b3518e7"}, ] [[package]] name = "opentelemetry-exporter-otlp-proto-common" -version = "1.27.0" +version = "1.28.1" requires_python = ">=3.8" summary = "OpenTelemetry Protobuf encoding" -groups = ["default"] +groups = ["chat_with_repo"] dependencies = [ - "opentelemetry-proto==1.27.0", + "opentelemetry-proto==1.28.1", ] files = [ - {file = "opentelemetry_exporter_otlp_proto_common-1.27.0-py3-none-any.whl", hash = "sha256:675db7fffcb60946f3a5c43e17d1168a3307a94a930ecf8d2ea1f286f3d4f79a"}, - {file = "opentelemetry_exporter_otlp_proto_common-1.27.0.tar.gz", hash = "sha256:159d27cf49f359e3798c4c3eb8da6ef4020e292571bd8c5604a2a573231dd5c8"}, + {file = "opentelemetry_exporter_otlp_proto_common-1.28.1-py3-none-any.whl", hash = "sha256:56ea6cf28c90f767733f046a54525dc7271a25faff86b1955e5252b55f4e007f"}, + {file = "opentelemetry_exporter_otlp_proto_common-1.28.1.tar.gz", hash = "sha256:6e55e7f5d59296cc87a74c08b8e0ddf87403f73a62302ec7ee042c1a1f4a8f70"}, ] [[package]] name = "opentelemetry-exporter-otlp-proto-grpc" -version = "1.27.0" +version = "1.28.1" requires_python = ">=3.8" summary = "OpenTelemetry Collector Protobuf over gRPC Exporter" -groups = ["default"] +groups = ["chat_with_repo"] dependencies = [ "deprecated>=1.2.6", "googleapis-common-protos~=1.52", - "grpcio<2.0.0,>=1.0.0", + "grpcio<2.0.0,>=1.63.2", "opentelemetry-api~=1.15", - "opentelemetry-exporter-otlp-proto-common==1.27.0", - "opentelemetry-proto==1.27.0", - "opentelemetry-sdk~=1.27.0", + "opentelemetry-exporter-otlp-proto-common==1.28.1", + "opentelemetry-proto==1.28.1", + "opentelemetry-sdk~=1.28.1", ] files = [ - {file = "opentelemetry_exporter_otlp_proto_grpc-1.27.0-py3-none-any.whl", hash = "sha256:56b5bbd5d61aab05e300d9d62a6b3c134827bbd28d0b12f2649c2da368006c9e"}, - {file = "opentelemetry_exporter_otlp_proto_grpc-1.27.0.tar.gz", hash = "sha256:af6f72f76bcf425dfb5ad11c1a6d6eca2863b91e63575f89bb7b4b55099d968f"}, + {file = "opentelemetry_exporter_otlp_proto_grpc-1.28.1-py3-none-any.whl", hash = "sha256:fd494b9dd7869975138cef68d52ed45b9ca584c1fa31bef2d01ecfd537445dfa"}, + {file = "opentelemetry_exporter_otlp_proto_grpc-1.28.1.tar.gz", hash = "sha256:9c84a103734d0c9cf9a4ba973d9c15c21996a554ab2bbd6208b3925873912642"}, ] [[package]] name = "opentelemetry-instrumentation" -version = "0.48b0" +version = "0.49b1" requires_python = ">=3.8" summary = "Instrumentation Tools & Auto Instrumentation for OpenTelemetry Python" -groups = ["default"] +groups = ["chat_with_repo"] dependencies = [ "opentelemetry-api~=1.4", - "setuptools>=16.0", + "opentelemetry-semantic-conventions==0.49b1", + "packaging>=18.0", "wrapt<2.0.0,>=1.0.0", ] files = [ - {file = "opentelemetry_instrumentation-0.48b0-py3-none-any.whl", hash = "sha256:a69750dc4ba6a5c3eb67986a337185a25b739966d80479befe37b546fc870b44"}, - {file = "opentelemetry_instrumentation-0.48b0.tar.gz", hash = "sha256:94929685d906380743a71c3970f76b5f07476eea1834abd5dd9d17abfe23cc35"}, + {file = "opentelemetry_instrumentation-0.49b1-py3-none-any.whl", hash = "sha256:0a9d3821736104013693ef3b8a9d29b41f2f3a81ee2d8c9288b52d62bae5747c"}, + {file = "opentelemetry_instrumentation-0.49b1.tar.gz", hash = "sha256:2d0e41181b7957ba061bb436b969ad90545ac3eba65f290830009b4264d2824e"}, ] [[package]] name = "opentelemetry-instrumentation-asgi" -version = "0.48b0" +version = "0.49b1" requires_python = ">=3.8" summary = "ASGI instrumentation for OpenTelemetry" -groups = ["default"] +groups = ["chat_with_repo"] dependencies = [ "asgiref~=3.0", "opentelemetry-api~=1.12", - "opentelemetry-instrumentation==0.48b0", - "opentelemetry-semantic-conventions==0.48b0", - "opentelemetry-util-http==0.48b0", + "opentelemetry-instrumentation==0.49b1", + "opentelemetry-semantic-conventions==0.49b1", + "opentelemetry-util-http==0.49b1", ] files = [ - {file = "opentelemetry_instrumentation_asgi-0.48b0-py3-none-any.whl", hash = "sha256:ddb1b5fc800ae66e85a4e2eca4d9ecd66367a8c7b556169d9e7b57e10676e44d"}, - {file = "opentelemetry_instrumentation_asgi-0.48b0.tar.gz", hash = "sha256:04c32174b23c7fa72ddfe192dad874954968a6a924608079af9952964ecdf785"}, + {file = "opentelemetry_instrumentation_asgi-0.49b1-py3-none-any.whl", hash = "sha256:8dcbc438cb138789fcb20ae38b6e7f23088e066d77b54bae205c5744856603c6"}, + {file = "opentelemetry_instrumentation_asgi-0.49b1.tar.gz", hash = "sha256:d1a2b4cb76490be28bcad3c0f562c4b3c84157148c922ca298bb04ed9e36c005"}, ] [[package]] name = "opentelemetry-instrumentation-fastapi" -version = "0.48b0" +version = "0.49b1" requires_python = ">=3.8" summary = "OpenTelemetry FastAPI Instrumentation" -groups = ["default"] +groups = ["chat_with_repo"] dependencies = [ "opentelemetry-api~=1.12", - "opentelemetry-instrumentation-asgi==0.48b0", - "opentelemetry-instrumentation==0.48b0", - "opentelemetry-semantic-conventions==0.48b0", - "opentelemetry-util-http==0.48b0", + "opentelemetry-instrumentation-asgi==0.49b1", + "opentelemetry-instrumentation==0.49b1", + "opentelemetry-semantic-conventions==0.49b1", + "opentelemetry-util-http==0.49b1", ] files = [ - {file = "opentelemetry_instrumentation_fastapi-0.48b0-py3-none-any.whl", hash = "sha256:afeb820a59e139d3e5d96619600f11ce0187658b8ae9e3480857dd790bc024f2"}, - {file = "opentelemetry_instrumentation_fastapi-0.48b0.tar.gz", hash = "sha256:21a72563ea412c0b535815aeed75fc580240f1f02ebc72381cfab672648637a2"}, + {file = "opentelemetry_instrumentation_fastapi-0.49b1-py3-none-any.whl", hash = "sha256:3398940102c8ef613b9c55fc4f179cc92413de456f6bec6eeb1995270de2b087"}, + {file = "opentelemetry_instrumentation_fastapi-0.49b1.tar.gz", hash = "sha256:13d9d4d70b4bb831468b8e40807353731cad7fbfaeedde0070d93bcb2c417b07"}, ] [[package]] name = "opentelemetry-proto" -version = "1.27.0" +version = "1.28.1" requires_python = ">=3.8" summary = "OpenTelemetry Python Proto" -groups = ["default"] +groups = ["chat_with_repo"] dependencies = [ - "protobuf<5.0,>=3.19", + "protobuf<6.0,>=5.0", ] files = [ - {file = "opentelemetry_proto-1.27.0-py3-none-any.whl", hash = "sha256:b133873de5581a50063e1e4b29cdcf0c5e253a8c2d8dc1229add20a4c3830ace"}, - {file = "opentelemetry_proto-1.27.0.tar.gz", hash = "sha256:33c9345d91dafd8a74fc3d7576c5a38f18b7fdf8d02983ac67485386132aedd6"}, + {file = "opentelemetry_proto-1.28.1-py3-none-any.whl", hash = "sha256:cb406ec69f1d11439e60fb43c6b744783fc8ee4deecdab61b3e29f112b0602f9"}, + {file = "opentelemetry_proto-1.28.1.tar.gz", hash = "sha256:6f9e9d9958822ab3e3cdcd2a24806d62aa10282349fd4338aafe32c69c87fc15"}, ] [[package]] name = "opentelemetry-sdk" -version = "1.27.0" +version = "1.28.1" requires_python = ">=3.8" summary = "OpenTelemetry Python SDK" -groups = ["default"] +groups = ["chat_with_repo"] dependencies = [ - "opentelemetry-api==1.27.0", - "opentelemetry-semantic-conventions==0.48b0", + "opentelemetry-api==1.28.1", + "opentelemetry-semantic-conventions==0.49b1", "typing-extensions>=3.7.4", ] files = [ - {file = "opentelemetry_sdk-1.27.0-py3-none-any.whl", hash = "sha256:365f5e32f920faf0fd9e14fdfd92c086e317eaa5f860edba9cdc17a380d9197d"}, - {file = "opentelemetry_sdk-1.27.0.tar.gz", hash = "sha256:d525017dea0ccce9ba4e0245100ec46ecdc043f2d7b8315d56b19aff0904fa6f"}, + {file = "opentelemetry_sdk-1.28.1-py3-none-any.whl", hash = "sha256:72aad7f5fcbe37113c4ab4899f6cdeb6ac77ed3e62f25a85e3627b12583dad0f"}, + {file = "opentelemetry_sdk-1.28.1.tar.gz", hash = "sha256:100fa371b2046ffba6a340c18f0b2a0463acad7461e5177e126693b613a6ca57"}, ] [[package]] name = "opentelemetry-semantic-conventions" -version = "0.48b0" +version = "0.49b1" requires_python = ">=3.8" summary = "OpenTelemetry Semantic Conventions" -groups = ["default"] +groups = ["chat_with_repo"] dependencies = [ "deprecated>=1.2.6", - "opentelemetry-api==1.27.0", + "opentelemetry-api==1.28.1", ] files = [ - {file = "opentelemetry_semantic_conventions-0.48b0-py3-none-any.whl", hash = "sha256:a0de9f45c413a8669788a38569c7e0a11ce6ce97861a628cca785deecdc32a1f"}, - {file = "opentelemetry_semantic_conventions-0.48b0.tar.gz", hash = "sha256:12d74983783b6878162208be57c9effcb89dc88691c64992d70bb89dc00daa1a"}, + {file = "opentelemetry_semantic_conventions-0.49b1-py3-none-any.whl", hash = "sha256:dd6f3ac8169d2198c752e1a63f827e5f5e110ae9b0ce33f2aad9a3baf0739743"}, + {file = "opentelemetry_semantic_conventions-0.49b1.tar.gz", hash = "sha256:91817883b159ffb94c2ca9548509c4fe0aafce7c24f437aa6ac3fc613aa9a758"}, ] [[package]] name = "opentelemetry-util-http" -version = "0.48b0" +version = "0.49b1" requires_python = ">=3.8" summary = "Web util for OpenTelemetry" -groups = ["default"] +groups = ["chat_with_repo"] files = [ - {file = "opentelemetry_util_http-0.48b0-py3-none-any.whl", hash = "sha256:76f598af93aab50328d2a69c786beaedc8b6a7770f7a818cc307eb353debfffb"}, - {file = "opentelemetry_util_http-0.48b0.tar.gz", hash = "sha256:60312015153580cc20f322e5cdc3d3ecad80a71743235bdb77716e742814623c"}, + {file = "opentelemetry_util_http-0.49b1-py3-none-any.whl", hash = "sha256:0290b942f7888b6310df6803e52e12f4043b8f224db0659f62dc7b70059eb94f"}, + {file = "opentelemetry_util_http-0.49b1.tar.gz", hash = "sha256:6c2bc6f7e20e286dbdfcccb9d895fa290ec9d7c596cdf2e06bf1d8e434b2edd0"}, ] [[package]] name = "orjson" -version = "3.10.7" +version = "3.10.11" requires_python = ">=3.8" summary = "Fast, correct Python JSON library supporting dataclasses, datetimes, and numpy" -groups = ["default"] -files = [ - {file = "orjson-3.10.7-cp310-cp310-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:74f4544f5a6405b90da8ea724d15ac9c36da4d72a738c64685003337401f5c12"}, - {file = "orjson-3.10.7-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:34a566f22c28222b08875b18b0dfbf8a947e69df21a9ed5c51a6bf91cfb944ac"}, - {file = "orjson-3.10.7-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:bf6ba8ebc8ef5792e2337fb0419f8009729335bb400ece005606336b7fd7bab7"}, - {file = "orjson-3.10.7-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ac7cf6222b29fbda9e3a472b41e6a5538b48f2c8f99261eecd60aafbdb60690c"}, - {file = "orjson-3.10.7-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:de817e2f5fc75a9e7dd350c4b0f54617b280e26d1631811a43e7e968fa71e3e9"}, - {file = "orjson-3.10.7-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:348bdd16b32556cf8d7257b17cf2bdb7ab7976af4af41ebe79f9796c218f7e91"}, - {file = "orjson-3.10.7-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:479fd0844ddc3ca77e0fd99644c7fe2de8e8be1efcd57705b5c92e5186e8a250"}, - {file = "orjson-3.10.7-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:fdf5197a21dd660cf19dfd2a3ce79574588f8f5e2dbf21bda9ee2d2b46924d84"}, - {file = "orjson-3.10.7-cp310-none-win32.whl", hash = "sha256:d374d36726746c81a49f3ff8daa2898dccab6596864ebe43d50733275c629175"}, - {file = "orjson-3.10.7-cp310-none-win_amd64.whl", hash = "sha256:cb61938aec8b0ffb6eef484d480188a1777e67b05d58e41b435c74b9d84e0b9c"}, - {file = "orjson-3.10.7-cp311-cp311-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:7db8539039698ddfb9a524b4dd19508256107568cdad24f3682d5773e60504a2"}, - {file = "orjson-3.10.7-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:480f455222cb7a1dea35c57a67578848537d2602b46c464472c995297117fa09"}, - {file = "orjson-3.10.7-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:8a9c9b168b3a19e37fe2778c0003359f07822c90fdff8f98d9d2a91b3144d8e0"}, - {file = "orjson-3.10.7-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:8de062de550f63185e4c1c54151bdddfc5625e37daf0aa1e75d2a1293e3b7d9a"}, - {file = "orjson-3.10.7-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:6b0dd04483499d1de9c8f6203f8975caf17a6000b9c0c54630cef02e44ee624e"}, - {file = "orjson-3.10.7-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b58d3795dafa334fc8fd46f7c5dc013e6ad06fd5b9a4cc98cb1456e7d3558bd6"}, - {file = "orjson-3.10.7-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:33cfb96c24034a878d83d1a9415799a73dc77480e6c40417e5dda0710d559ee6"}, - {file = "orjson-3.10.7-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:e724cebe1fadc2b23c6f7415bad5ee6239e00a69f30ee423f319c6af70e2a5c0"}, - {file = "orjson-3.10.7-cp311-none-win32.whl", hash = "sha256:82763b46053727a7168d29c772ed5c870fdae2f61aa8a25994c7984a19b1021f"}, - {file = "orjson-3.10.7-cp311-none-win_amd64.whl", hash = "sha256:eb8d384a24778abf29afb8e41d68fdd9a156cf6e5390c04cc07bbc24b89e98b5"}, - {file = "orjson-3.10.7-cp312-cp312-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:44a96f2d4c3af51bfac6bc4ef7b182aa33f2f054fd7f34cc0ee9a320d051d41f"}, - {file = "orjson-3.10.7-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:76ac14cd57df0572453543f8f2575e2d01ae9e790c21f57627803f5e79b0d3c3"}, - {file = "orjson-3.10.7-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:bdbb61dcc365dd9be94e8f7df91975edc9364d6a78c8f7adb69c1cdff318ec93"}, - {file = "orjson-3.10.7-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b48b3db6bb6e0a08fa8c83b47bc169623f801e5cc4f24442ab2b6617da3b5313"}, - {file = "orjson-3.10.7-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:23820a1563a1d386414fef15c249040042b8e5d07b40ab3fe3efbfbbcbcb8864"}, - {file = "orjson-3.10.7-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a0c6a008e91d10a2564edbb6ee5069a9e66df3fbe11c9a005cb411f441fd2c09"}, - {file = "orjson-3.10.7-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:d352ee8ac1926d6193f602cbe36b1643bbd1bbcb25e3c1a657a4390f3000c9a5"}, - {file = "orjson-3.10.7-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:d2d9f990623f15c0ae7ac608103c33dfe1486d2ed974ac3f40b693bad1a22a7b"}, - {file = "orjson-3.10.7-cp312-none-win32.whl", hash = "sha256:7c4c17f8157bd520cdb7195f75ddbd31671997cbe10aee559c2d613592e7d7eb"}, - {file = "orjson-3.10.7-cp312-none-win_amd64.whl", hash = "sha256:1d9c0e733e02ada3ed6098a10a8ee0052dd55774de3d9110d29868d24b17faa1"}, - {file = "orjson-3.10.7-cp313-cp313-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:77d325ed866876c0fa6492598ec01fe30e803272a6e8b10e992288b009cbe149"}, - {file = "orjson-3.10.7-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9ea2c232deedcb605e853ae1db2cc94f7390ac776743b699b50b071b02bea6fe"}, - {file = "orjson-3.10.7-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:3dcfbede6737fdbef3ce9c37af3fb6142e8e1ebc10336daa05872bfb1d87839c"}, - {file = "orjson-3.10.7-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:11748c135f281203f4ee695b7f80bb1358a82a63905f9f0b794769483ea854ad"}, - {file = "orjson-3.10.7-cp313-none-win32.whl", hash = "sha256:a7e19150d215c7a13f39eb787d84db274298d3f83d85463e61d277bbd7f401d2"}, - {file = "orjson-3.10.7-cp313-none-win_amd64.whl", hash = "sha256:eef44224729e9525d5261cc8d28d6b11cafc90e6bd0be2157bde69a52ec83024"}, - {file = "orjson-3.10.7.tar.gz", hash = "sha256:75ef0640403f945f3a1f9f6400686560dbfb0fb5b16589ad62cd477043c4eee3"}, +groups = ["chat_with_repo"] +files = [ + {file = "orjson-3.10.11-cp311-cp311-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:1444f9cb7c14055d595de1036f74ecd6ce15f04a715e73f33bb6326c9cef01b6"}, + {file = "orjson-3.10.11-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:cdec57fe3b4bdebcc08a946db3365630332dbe575125ff3d80a3272ebd0ddafe"}, + {file = "orjson-3.10.11-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:4eed32f33a0ea6ef36ccc1d37f8d17f28a1d6e8eefae5928f76aff8f1df85e67"}, + {file = "orjson-3.10.11-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:80df27dd8697242b904f4ea54820e2d98d3f51f91e97e358fc13359721233e4b"}, + {file = "orjson-3.10.11-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:705f03cee0cb797256d54de6695ef219e5bc8c8120b6654dd460848d57a9af3d"}, + {file = "orjson-3.10.11-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:03246774131701de8e7059b2e382597da43144a9a7400f178b2a32feafc54bd5"}, + {file = "orjson-3.10.11-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:8b5759063a6c940a69c728ea70d7c33583991c6982915a839c8da5f957e0103a"}, + {file = "orjson-3.10.11-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:677f23e32491520eebb19c99bb34675daf5410c449c13416f7f0d93e2cf5f981"}, + {file = "orjson-3.10.11-cp311-none-win32.whl", hash = "sha256:a11225d7b30468dcb099498296ffac36b4673a8398ca30fdaec1e6c20df6aa55"}, + {file = "orjson-3.10.11-cp311-none-win_amd64.whl", hash = "sha256:df8c677df2f9f385fcc85ab859704045fa88d4668bc9991a527c86e710392bec"}, + {file = "orjson-3.10.11-cp312-cp312-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:360a4e2c0943da7c21505e47cf6bd725588962ff1d739b99b14e2f7f3545ba51"}, + {file = "orjson-3.10.11-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:496e2cb45de21c369079ef2d662670a4892c81573bcc143c4205cae98282ba97"}, + {file = "orjson-3.10.11-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:7dfa8db55c9792d53c5952900c6a919cfa377b4f4534c7a786484a6a4a350c19"}, + {file = "orjson-3.10.11-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:51f3382415747e0dbda9dade6f1e1a01a9d37f630d8c9049a8ed0e385b7a90c0"}, + {file = "orjson-3.10.11-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f35a1b9f50a219f470e0e497ca30b285c9f34948d3c8160d5ad3a755d9299433"}, + {file = "orjson-3.10.11-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e2f3b7c5803138e67028dde33450e054c87e0703afbe730c105f1fcd873496d5"}, + {file = "orjson-3.10.11-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:f91d9eb554310472bd09f5347950b24442600594c2edc1421403d7610a0998fd"}, + {file = "orjson-3.10.11-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:dfbb2d460a855c9744bbc8e36f9c3a997c4b27d842f3d5559ed54326e6911f9b"}, + {file = "orjson-3.10.11-cp312-none-win32.whl", hash = "sha256:d4a62c49c506d4d73f59514986cadebb7e8d186ad510c518f439176cf8d5359d"}, + {file = "orjson-3.10.11-cp312-none-win_amd64.whl", hash = "sha256:f1eec3421a558ff7a9b010a6c7effcfa0ade65327a71bb9b02a1c3b77a247284"}, + {file = "orjson-3.10.11-cp313-cp313-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:c46294faa4e4d0eb73ab68f1a794d2cbf7bab33b1dda2ac2959ffb7c61591899"}, + {file = "orjson-3.10.11-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:52e5834d7d6e58a36846e059d00559cb9ed20410664f3ad156cd2cc239a11230"}, + {file = "orjson-3.10.11-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a2fc947e5350fdce548bfc94f434e8760d5cafa97fb9c495d2fef6757aa02ec0"}, + {file = "orjson-3.10.11-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:0efabbf839388a1dab5b72b5d3baedbd6039ac83f3b55736eb9934ea5494d258"}, + {file = "orjson-3.10.11-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:a3f29634260708c200c4fe148e42b4aae97d7b9fee417fbdd74f8cfc265f15b0"}, + {file = "orjson-3.10.11-cp313-none-win32.whl", hash = "sha256:1a1222ffcee8a09476bbdd5d4f6f33d06d0d6642df2a3d78b7a195ca880d669b"}, + {file = "orjson-3.10.11-cp313-none-win_amd64.whl", hash = "sha256:bc274ac261cc69260913b2d1610760e55d3c0801bb3457ba7b9004420b6b4270"}, + {file = "orjson-3.10.11.tar.gz", hash = "sha256:e35b6d730de6384d5b2dab5fd23f0d76fae8bbc8c353c2f78210aa5fa4beb3ef"}, ] [[package]] @@ -2067,7 +1770,7 @@ name = "overrides" version = "7.7.0" requires_python = ">=3.6" summary = "A decorator to automatically detect mismatch when overriding a method." -groups = ["default"] +groups = ["chat_with_repo"] dependencies = [ "typing; python_version < \"3.5\"", ] @@ -2078,13 +1781,13 @@ files = [ [[package]] name = "packaging" -version = "24.1" +version = "24.2" requires_python = ">=3.8" summary = "Core utilities for Python packages" -groups = ["default", "test"] +groups = ["default", "chat_with_repo", "test"] files = [ - {file = "packaging-24.1-py3-none-any.whl", hash = "sha256:5b8f2217dbdbd2f7f384c41c628544e6d52f2d0f53c6d0c3ea61aa5d1d7ff124"}, - {file = "packaging-24.1.tar.gz", hash = "sha256:026ed72c8ed3fcce5bf8950572258698927fd1dbda10a5e981cdf0ac37f4f002"}, + {file = "packaging-24.2-py3-none-any.whl", hash = "sha256:09abb1bccd265c01f4a3aa3f7a7db064b36514d2cba19a2f694fe6150451a759"}, + {file = "packaging-24.2.tar.gz", hash = "sha256:c228a6dc5e932d346bc5739379109d49e8853dd8223571c7c5b55260edc0b97f"}, ] [[package]] @@ -2092,7 +1795,7 @@ name = "pandas" version = "2.2.3" requires_python = ">=3.9" summary = "Powerful data structures for data analysis, time series, and statistics" -groups = ["default"] +groups = ["chat_with_repo"] dependencies = [ "numpy>=1.22.4; python_version < \"3.11\"", "numpy>=1.23.2; python_version == \"3.11\"", @@ -2102,13 +1805,6 @@ dependencies = [ "tzdata>=2022.7", ] files = [ - {file = "pandas-2.2.3-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:1948ddde24197a0f7add2bdc4ca83bf2b1ef84a1bc8ccffd95eda17fd836ecb5"}, - {file = "pandas-2.2.3-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:381175499d3802cde0eabbaf6324cce0c4f5d52ca6f8c377c29ad442f50f6348"}, - {file = "pandas-2.2.3-cp310-cp310-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:d9c45366def9a3dd85a6454c0e7908f2b3b8e9c138f5dc38fed7ce720d8453ed"}, - {file = "pandas-2.2.3-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:86976a1c5b25ae3f8ccae3a5306e443569ee3c3faf444dfd0f41cda24667ad57"}, - {file = "pandas-2.2.3-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:b8661b0238a69d7aafe156b7fa86c44b881387509653fdf857bebc5e4008ad42"}, - {file = "pandas-2.2.3-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:37e0aced3e8f539eccf2e099f65cdb9c8aa85109b0be6e93e2baff94264bdc6f"}, - {file = "pandas-2.2.3-cp310-cp310-win_amd64.whl", hash = "sha256:56534ce0746a58afaf7942ba4863e0ef81c9c50d3f0ae93e9497d6a41a057645"}, {file = "pandas-2.2.3-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:66108071e1b935240e74525006034333f98bcdb87ea116de573a6a0dccb6c039"}, {file = "pandas-2.2.3-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:7c2875855b0ff77b2a64a0365e24455d9990730d6431b9e0ee18ad8acee13dbd"}, {file = "pandas-2.2.3-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:cd8d0c3be0515c12fed0bdbae072551c8b54b7192c7b1fda0ba56059a0179698"}, @@ -2152,63 +1848,53 @@ files = [ [[package]] name = "pillow" -version = "10.4.0" -requires_python = ">=3.8" +version = "11.0.0" +requires_python = ">=3.9" summary = "Python Imaging Library (Fork)" -groups = ["default"] -files = [ - {file = "pillow-10.4.0-cp310-cp310-macosx_10_10_x86_64.whl", hash = "sha256:4d9667937cfa347525b319ae34375c37b9ee6b525440f3ef48542fcf66f2731e"}, - {file = "pillow-10.4.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:543f3dc61c18dafb755773efc89aae60d06b6596a63914107f75459cf984164d"}, - {file = "pillow-10.4.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7928ecbf1ece13956b95d9cbcfc77137652b02763ba384d9ab508099a2eca856"}, - {file = "pillow-10.4.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e4d49b85c4348ea0b31ea63bc75a9f3857869174e2bf17e7aba02945cd218e6f"}, - {file = "pillow-10.4.0-cp310-cp310-manylinux_2_28_aarch64.whl", hash = "sha256:6c762a5b0997f5659a5ef2266abc1d8851ad7749ad9a6a5506eb23d314e4f46b"}, - {file = "pillow-10.4.0-cp310-cp310-manylinux_2_28_x86_64.whl", hash = "sha256:a985e028fc183bf12a77a8bbf36318db4238a3ded7fa9df1b9a133f1cb79f8fc"}, - {file = "pillow-10.4.0-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:812f7342b0eee081eaec84d91423d1b4650bb9828eb53d8511bcef8ce5aecf1e"}, - {file = "pillow-10.4.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:ac1452d2fbe4978c2eec89fb5a23b8387aba707ac72810d9490118817d9c0b46"}, - {file = "pillow-10.4.0-cp310-cp310-win32.whl", hash = "sha256:bcd5e41a859bf2e84fdc42f4edb7d9aba0a13d29a2abadccafad99de3feff984"}, - {file = "pillow-10.4.0-cp310-cp310-win_amd64.whl", hash = "sha256:ecd85a8d3e79cd7158dec1c9e5808e821feea088e2f69a974db5edf84dc53141"}, - {file = "pillow-10.4.0-cp310-cp310-win_arm64.whl", hash = "sha256:ff337c552345e95702c5fde3158acb0625111017d0e5f24bf3acdb9cc16b90d1"}, - {file = "pillow-10.4.0-cp311-cp311-macosx_10_10_x86_64.whl", hash = "sha256:0a9ec697746f268507404647e531e92889890a087e03681a3606d9b920fbee3c"}, - {file = "pillow-10.4.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:dfe91cb65544a1321e631e696759491ae04a2ea11d36715eca01ce07284738be"}, - {file = "pillow-10.4.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5dc6761a6efc781e6a1544206f22c80c3af4c8cf461206d46a1e6006e4429ff3"}, - {file = "pillow-10.4.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5e84b6cc6a4a3d76c153a6b19270b3526a5a8ed6b09501d3af891daa2a9de7d6"}, - {file = "pillow-10.4.0-cp311-cp311-manylinux_2_28_aarch64.whl", hash = "sha256:bbc527b519bd3aa9d7f429d152fea69f9ad37c95f0b02aebddff592688998abe"}, - {file = "pillow-10.4.0-cp311-cp311-manylinux_2_28_x86_64.whl", hash = "sha256:76a911dfe51a36041f2e756b00f96ed84677cdeb75d25c767f296c1c1eda1319"}, - {file = "pillow-10.4.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:59291fb29317122398786c2d44427bbd1a6d7ff54017075b22be9d21aa59bd8d"}, - {file = "pillow-10.4.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:416d3a5d0e8cfe4f27f574362435bc9bae57f679a7158e0096ad2beb427b8696"}, - {file = "pillow-10.4.0-cp311-cp311-win32.whl", hash = "sha256:7086cc1d5eebb91ad24ded9f58bec6c688e9f0ed7eb3dbbf1e4800280a896496"}, - {file = "pillow-10.4.0-cp311-cp311-win_amd64.whl", hash = "sha256:cbed61494057c0f83b83eb3a310f0bf774b09513307c434d4366ed64f4128a91"}, - {file = "pillow-10.4.0-cp311-cp311-win_arm64.whl", hash = "sha256:f5f0c3e969c8f12dd2bb7e0b15d5c468b51e5017e01e2e867335c81903046a22"}, - {file = "pillow-10.4.0-cp312-cp312-macosx_10_10_x86_64.whl", hash = "sha256:673655af3eadf4df6b5457033f086e90299fdd7a47983a13827acf7459c15d94"}, - {file = "pillow-10.4.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:866b6942a92f56300012f5fbac71f2d610312ee65e22f1aa2609e491284e5597"}, - {file = "pillow-10.4.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:29dbdc4207642ea6aad70fbde1a9338753d33fb23ed6956e706936706f52dd80"}, - {file = "pillow-10.4.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bf2342ac639c4cf38799a44950bbc2dfcb685f052b9e262f446482afaf4bffca"}, - {file = "pillow-10.4.0-cp312-cp312-manylinux_2_28_aarch64.whl", hash = "sha256:f5b92f4d70791b4a67157321c4e8225d60b119c5cc9aee8ecf153aace4aad4ef"}, - {file = "pillow-10.4.0-cp312-cp312-manylinux_2_28_x86_64.whl", hash = "sha256:86dcb5a1eb778d8b25659d5e4341269e8590ad6b4e8b44d9f4b07f8d136c414a"}, - {file = "pillow-10.4.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:780c072c2e11c9b2c7ca37f9a2ee8ba66f44367ac3e5c7832afcfe5104fd6d1b"}, - {file = "pillow-10.4.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:37fb69d905be665f68f28a8bba3c6d3223c8efe1edf14cc4cfa06c241f8c81d9"}, - {file = "pillow-10.4.0-cp312-cp312-win32.whl", hash = "sha256:7dfecdbad5c301d7b5bde160150b4db4c659cee2b69589705b6f8a0c509d9f42"}, - {file = "pillow-10.4.0-cp312-cp312-win_amd64.whl", hash = "sha256:1d846aea995ad352d4bdcc847535bd56e0fd88d36829d2c90be880ef1ee4668a"}, - {file = "pillow-10.4.0-cp312-cp312-win_arm64.whl", hash = "sha256:e553cad5179a66ba15bb18b353a19020e73a7921296a7979c4a2b7f6a5cd57f9"}, - {file = "pillow-10.4.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:8bc1a764ed8c957a2e9cacf97c8b2b053b70307cf2996aafd70e91a082e70df3"}, - {file = "pillow-10.4.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:6209bb41dc692ddfee4942517c19ee81b86c864b626dbfca272ec0f7cff5d9fb"}, - {file = "pillow-10.4.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bee197b30783295d2eb680b311af15a20a8b24024a19c3a26431ff83eb8d1f70"}, - {file = "pillow-10.4.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1ef61f5dd14c300786318482456481463b9d6b91ebe5ef12f405afbba77ed0be"}, - {file = "pillow-10.4.0-cp313-cp313-manylinux_2_28_aarch64.whl", hash = "sha256:297e388da6e248c98bc4a02e018966af0c5f92dfacf5a5ca22fa01cb3179bca0"}, - {file = "pillow-10.4.0-cp313-cp313-manylinux_2_28_x86_64.whl", hash = "sha256:e4db64794ccdf6cb83a59d73405f63adbe2a1887012e308828596100a0b2f6cc"}, - {file = "pillow-10.4.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:bd2880a07482090a3bcb01f4265f1936a903d70bc740bfcb1fd4e8a2ffe5cf5a"}, - {file = "pillow-10.4.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:4b35b21b819ac1dbd1233317adeecd63495f6babf21b7b2512d244ff6c6ce309"}, - {file = "pillow-10.4.0-cp313-cp313-win32.whl", hash = "sha256:551d3fd6e9dc15e4c1eb6fc4ba2b39c0c7933fa113b220057a34f4bb3268a060"}, - {file = "pillow-10.4.0-cp313-cp313-win_amd64.whl", hash = "sha256:030abdbe43ee02e0de642aee345efa443740aa4d828bfe8e2eb11922ea6a21ea"}, - {file = "pillow-10.4.0-cp313-cp313-win_arm64.whl", hash = "sha256:5b001114dd152cfd6b23befeb28d7aee43553e2402c9f159807bf55f33af8a8d"}, - {file = "pillow-10.4.0-pp310-pypy310_pp73-macosx_10_15_x86_64.whl", hash = "sha256:5b4815f2e65b30f5fbae9dfffa8636d992d49705723fe86a3661806e069352d4"}, - {file = "pillow-10.4.0-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:8f0aef4ef59694b12cadee839e2ba6afeab89c0f39a3adc02ed51d109117b8da"}, - {file = "pillow-10.4.0-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9f4727572e2918acaa9077c919cbbeb73bd2b3ebcfe033b72f858fc9fbef0026"}, - {file = "pillow-10.4.0-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ff25afb18123cea58a591ea0244b92eb1e61a1fd497bf6d6384f09bc3262ec3e"}, - {file = "pillow-10.4.0-pp310-pypy310_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:dc3e2db6ba09ffd7d02ae9141cfa0ae23393ee7687248d46a7507b75d610f4f5"}, - {file = "pillow-10.4.0-pp310-pypy310_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:02a2be69f9c9b8c1e97cf2713e789d4e398c751ecfd9967c18d0ce304efbf885"}, - {file = "pillow-10.4.0-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:0755ffd4a0c6f267cccbae2e9903d95477ca2f77c4fcf3a3a09570001856c8a5"}, - {file = "pillow-10.4.0.tar.gz", hash = "sha256:166c1cd4d24309b30d61f79f4a9114b7b2313d7450912277855ff5dfd7cd4a06"}, +groups = ["default", "chat_with_repo"] +files = [ + {file = "pillow-11.0.0-cp311-cp311-macosx_10_10_x86_64.whl", hash = "sha256:1c1d72714f429a521d8d2d018badc42414c3077eb187a59579f28e4270b4b0fc"}, + {file = "pillow-11.0.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:499c3a1b0d6fc8213519e193796eb1a86a1be4b1877d678b30f83fd979811d1a"}, + {file = "pillow-11.0.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c8b2351c85d855293a299038e1f89db92a2f35e8d2f783489c6f0b2b5f3fe8a3"}, + {file = "pillow-11.0.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6f4dba50cfa56f910241eb7f883c20f1e7b1d8f7d91c750cd0b318bad443f4d5"}, + {file = "pillow-11.0.0-cp311-cp311-manylinux_2_28_aarch64.whl", hash = "sha256:5ddbfd761ee00c12ee1be86c9c0683ecf5bb14c9772ddbd782085779a63dd55b"}, + {file = "pillow-11.0.0-cp311-cp311-manylinux_2_28_x86_64.whl", hash = "sha256:45c566eb10b8967d71bf1ab8e4a525e5a93519e29ea071459ce517f6b903d7fa"}, + {file = "pillow-11.0.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:b4fd7bd29610a83a8c9b564d457cf5bd92b4e11e79a4ee4716a63c959699b306"}, + {file = "pillow-11.0.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:cb929ca942d0ec4fac404cbf520ee6cac37bf35be479b970c4ffadf2b6a1cad9"}, + {file = "pillow-11.0.0-cp311-cp311-win32.whl", hash = "sha256:006bcdd307cc47ba43e924099a038cbf9591062e6c50e570819743f5607404f5"}, + {file = "pillow-11.0.0-cp311-cp311-win_amd64.whl", hash = "sha256:52a2d8323a465f84faaba5236567d212c3668f2ab53e1c74c15583cf507a0291"}, + {file = "pillow-11.0.0-cp311-cp311-win_arm64.whl", hash = "sha256:16095692a253047fe3ec028e951fa4221a1f3ed3d80c397e83541a3037ff67c9"}, + {file = "pillow-11.0.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:d2c0a187a92a1cb5ef2c8ed5412dd8d4334272617f532d4ad4de31e0495bd923"}, + {file = "pillow-11.0.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:084a07ef0821cfe4858fe86652fffac8e187b6ae677e9906e192aafcc1b69903"}, + {file = "pillow-11.0.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8069c5179902dcdce0be9bfc8235347fdbac249d23bd90514b7a47a72d9fecf4"}, + {file = "pillow-11.0.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f02541ef64077f22bf4924f225c0fd1248c168f86e4b7abdedd87d6ebaceab0f"}, + {file = "pillow-11.0.0-cp312-cp312-manylinux_2_28_aarch64.whl", hash = "sha256:fcb4621042ac4b7865c179bb972ed0da0218a076dc1820ffc48b1d74c1e37fe9"}, + {file = "pillow-11.0.0-cp312-cp312-manylinux_2_28_x86_64.whl", hash = "sha256:00177a63030d612148e659b55ba99527803288cea7c75fb05766ab7981a8c1b7"}, + {file = "pillow-11.0.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:8853a3bf12afddfdf15f57c4b02d7ded92c7a75a5d7331d19f4f9572a89c17e6"}, + {file = "pillow-11.0.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:3107c66e43bda25359d5ef446f59c497de2b5ed4c7fdba0894f8d6cf3822dafc"}, + {file = "pillow-11.0.0-cp312-cp312-win32.whl", hash = "sha256:86510e3f5eca0ab87429dd77fafc04693195eec7fd6a137c389c3eeb4cfb77c6"}, + {file = "pillow-11.0.0-cp312-cp312-win_amd64.whl", hash = "sha256:8ec4a89295cd6cd4d1058a5e6aec6bf51e0eaaf9714774e1bfac7cfc9051db47"}, + {file = "pillow-11.0.0-cp312-cp312-win_arm64.whl", hash = "sha256:27a7860107500d813fcd203b4ea19b04babe79448268403172782754870dac25"}, + {file = "pillow-11.0.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:bcd1fb5bb7b07f64c15618c89efcc2cfa3e95f0e3bcdbaf4642509de1942a699"}, + {file = "pillow-11.0.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:0e038b0745997c7dcaae350d35859c9715c71e92ffb7e0f4a8e8a16732150f38"}, + {file = "pillow-11.0.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0ae08bd8ffc41aebf578c2af2f9d8749d91f448b3bfd41d7d9ff573d74f2a6b2"}, + {file = "pillow-11.0.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d69bfd8ec3219ae71bcde1f942b728903cad25fafe3100ba2258b973bd2bc1b2"}, + {file = "pillow-11.0.0-cp313-cp313-manylinux_2_28_aarch64.whl", hash = "sha256:61b887f9ddba63ddf62fd02a3ba7add935d053b6dd7d58998c630e6dbade8527"}, + {file = "pillow-11.0.0-cp313-cp313-manylinux_2_28_x86_64.whl", hash = "sha256:c6a660307ca9d4867caa8d9ca2c2658ab685de83792d1876274991adec7b93fa"}, + {file = "pillow-11.0.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:73e3a0200cdda995c7e43dd47436c1548f87a30bb27fb871f352a22ab8dcf45f"}, + {file = "pillow-11.0.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:fba162b8872d30fea8c52b258a542c5dfd7b235fb5cb352240c8d63b414013eb"}, + {file = "pillow-11.0.0-cp313-cp313-win32.whl", hash = "sha256:f1b82c27e89fffc6da125d5eb0ca6e68017faf5efc078128cfaa42cf5cb38798"}, + {file = "pillow-11.0.0-cp313-cp313-win_amd64.whl", hash = "sha256:8ba470552b48e5835f1d23ecb936bb7f71d206f9dfeee64245f30c3270b994de"}, + {file = "pillow-11.0.0-cp313-cp313-win_arm64.whl", hash = "sha256:846e193e103b41e984ac921b335df59195356ce3f71dcfd155aa79c603873b84"}, + {file = "pillow-11.0.0-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:4ad70c4214f67d7466bea6a08061eba35c01b1b89eaa098040a35272a8efb22b"}, + {file = "pillow-11.0.0-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:6ec0d5af64f2e3d64a165f490d96368bb5dea8b8f9ad04487f9ab60dc4bb6003"}, + {file = "pillow-11.0.0-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c809a70e43c7977c4a42aefd62f0131823ebf7dd73556fa5d5950f5b354087e2"}, + {file = "pillow-11.0.0-cp313-cp313t-manylinux_2_28_x86_64.whl", hash = "sha256:4b60c9520f7207aaf2e1d94de026682fc227806c6e1f55bba7606d1c94dd623a"}, + {file = "pillow-11.0.0-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:1e2688958a840c822279fda0086fec1fdab2f95bf2b717b66871c4ad9859d7e8"}, + {file = "pillow-11.0.0-cp313-cp313t-win32.whl", hash = "sha256:607bbe123c74e272e381a8d1957083a9463401f7bd01287f50521ecb05a313f8"}, + {file = "pillow-11.0.0-cp313-cp313t-win_amd64.whl", hash = "sha256:5c39ed17edea3bc69c743a8dd3e9853b7509625c2462532e62baa0732163a904"}, + {file = "pillow-11.0.0-cp313-cp313t-win_arm64.whl", hash = "sha256:75acbbeb05b86bc53cbe7b7e6fe00fbcf82ad7c684b3ad82e3d711da9ba287d3"}, + {file = "pillow-11.0.0.tar.gz", hash = "sha256:72bacbaf24ac003fea9bff9837d1eedb6088758d41e100c1552930151f677739"}, ] [[package]] @@ -2226,7 +1912,7 @@ files = [ name = "posthog" version = "3.7.0" summary = "Integrate PostHog into any python application." -groups = ["default"] +groups = ["chat_with_repo"] dependencies = [ "backoff>=1.10.0", "monotonic>=1.5", @@ -2241,16 +1927,16 @@ files = [ [[package]] name = "prettytable" -version = "3.11.0" -requires_python = ">=3.8" +version = "3.12.0" +requires_python = ">=3.9" summary = "A simple Python library for easily displaying tabular data in a visually appealing ASCII table format" groups = ["default"] dependencies = [ "wcwidth", ] files = [ - {file = "prettytable-3.11.0-py3-none-any.whl", hash = "sha256:aa17083feb6c71da11a68b2c213b04675c4af4ce9c541762632ca3f2cb3546dd"}, - {file = "prettytable-3.11.0.tar.gz", hash = "sha256:7e23ca1e68bbfd06ba8de98bf553bf3493264c96d5e8a615c0471025deeba722"}, + {file = "prettytable-3.12.0-py3-none-any.whl", hash = "sha256:77ca0ad1c435b6e363d7e8623d7cc4fcf2cf15513bf77a1c1b2e814930ac57cc"}, + {file = "prettytable-3.12.0.tar.gz", hash = "sha256:f04b3e1ba35747ac86e96ec33e3bb9748ce08e254dc2a1c6253945901beec804"}, ] [[package]] @@ -2258,24 +1944,8 @@ name = "propcache" version = "0.2.0" requires_python = ">=3.8" summary = "Accelerated property cache" -groups = ["default"] +groups = ["default", "chat_with_repo"] files = [ - {file = "propcache-0.2.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:c5869b8fd70b81835a6f187c5fdbe67917a04d7e52b6e7cc4e5fe39d55c39d58"}, - {file = "propcache-0.2.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:952e0d9d07609d9c5be361f33b0d6d650cd2bae393aabb11d9b719364521984b"}, - {file = "propcache-0.2.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:33ac8f098df0585c0b53009f039dfd913b38c1d2edafed0cedcc0c32a05aa110"}, - {file = "propcache-0.2.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:97e48e8875e6c13909c800fa344cd54cc4b2b0db1d5f911f840458a500fde2c2"}, - {file = "propcache-0.2.0-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:388f3217649d6d59292b722d940d4d2e1e6a7003259eb835724092a1cca0203a"}, - {file = "propcache-0.2.0-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f571aea50ba5623c308aa146eb650eebf7dbe0fd8c5d946e28343cb3b5aad577"}, - {file = "propcache-0.2.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3dfafb44f7bb35c0c06eda6b2ab4bfd58f02729e7c4045e179f9a861b07c9850"}, - {file = "propcache-0.2.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a3ebe9a75be7ab0b7da2464a77bb27febcb4fab46a34f9288f39d74833db7f61"}, - {file = "propcache-0.2.0-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:d2f0d0f976985f85dfb5f3d685697ef769faa6b71993b46b295cdbbd6be8cc37"}, - {file = "propcache-0.2.0-cp310-cp310-musllinux_1_2_armv7l.whl", hash = "sha256:a3dc1a4b165283bd865e8f8cb5f0c64c05001e0718ed06250d8cac9bec115b48"}, - {file = "propcache-0.2.0-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:9e0f07b42d2a50c7dd2d8675d50f7343d998c64008f1da5fef888396b7f84630"}, - {file = "propcache-0.2.0-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:e63e3e1e0271f374ed489ff5ee73d4b6e7c60710e1f76af5f0e1a6117cd26394"}, - {file = "propcache-0.2.0-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:56bb5c98f058a41bb58eead194b4db8c05b088c93d94d5161728515bd52b052b"}, - {file = "propcache-0.2.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:7665f04d0c7f26ff8bb534e1c65068409bf4687aa2534faf7104d7182debb336"}, - {file = "propcache-0.2.0-cp310-cp310-win32.whl", hash = "sha256:7cf18abf9764746b9c8704774d8b06714bcb0a63641518a3a89c7f85cc02c2ad"}, - {file = "propcache-0.2.0-cp310-cp310-win_amd64.whl", hash = "sha256:cfac69017ef97db2438efb854edf24f5a29fd09a536ff3a992b75990720cdc99"}, {file = "propcache-0.2.0-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:63f13bf09cc3336eb04a837490b8f332e0db41da66995c9fd1ba04552e516354"}, {file = "propcache-0.2.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:608cce1da6f2672a56b24a015b42db4ac612ee709f3d29f27a00c943d9e851de"}, {file = "propcache-0.2.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:466c219deee4536fbc83c08d09115249db301550625c7fef1c5563a584c9bc87"}, @@ -2330,18 +2000,18 @@ files = [ [[package]] name = "protobuf" -version = "4.25.5" +version = "5.28.3" requires_python = ">=3.8" summary = "" -groups = ["default"] +groups = ["chat_with_repo"] files = [ - {file = "protobuf-4.25.5-cp310-abi3-win32.whl", hash = "sha256:5e61fd921603f58d2f5acb2806a929b4675f8874ff5f330b7d6f7e2e784bbcd8"}, - {file = "protobuf-4.25.5-cp310-abi3-win_amd64.whl", hash = "sha256:4be0571adcbe712b282a330c6e89eae24281344429ae95c6d85e79e84780f5ea"}, - {file = "protobuf-4.25.5-cp37-abi3-macosx_10_9_universal2.whl", hash = "sha256:b2fde3d805354df675ea4c7c6338c1aecd254dfc9925e88c6d31a2bcb97eb173"}, - {file = "protobuf-4.25.5-cp37-abi3-manylinux2014_aarch64.whl", hash = "sha256:919ad92d9b0310070f8356c24b855c98df2b8bd207ebc1c0c6fcc9ab1e007f3d"}, - {file = "protobuf-4.25.5-cp37-abi3-manylinux2014_x86_64.whl", hash = "sha256:fe14e16c22be926d3abfcb500e60cab068baf10b542b8c858fa27e098123e331"}, - {file = "protobuf-4.25.5-py3-none-any.whl", hash = "sha256:0aebecb809cae990f8129ada5ca273d9d670b76d9bfc9b1809f0a9c02b7dbf41"}, - {file = "protobuf-4.25.5.tar.gz", hash = "sha256:7f8249476b4a9473645db7f8ab42b02fe1488cbe5fb72fddd445e0665afd8584"}, + {file = "protobuf-5.28.3-cp310-abi3-win32.whl", hash = "sha256:0c4eec6f987338617072592b97943fdbe30d019c56126493111cf24344c1cc24"}, + {file = "protobuf-5.28.3-cp310-abi3-win_amd64.whl", hash = "sha256:91fba8f445723fcf400fdbe9ca796b19d3b1242cd873907979b9ed71e4afe868"}, + {file = "protobuf-5.28.3-cp38-abi3-macosx_10_9_universal2.whl", hash = "sha256:a3f6857551e53ce35e60b403b8a27b0295f7d6eb63d10484f12bc6879c715687"}, + {file = "protobuf-5.28.3-cp38-abi3-manylinux2014_aarch64.whl", hash = "sha256:3fa2de6b8b29d12c61911505d893afe7320ce7ccba4df913e2971461fa36d584"}, + {file = "protobuf-5.28.3-cp38-abi3-manylinux2014_x86_64.whl", hash = "sha256:712319fbdddb46f21abb66cd33cb9e491a5763b2febd8f228251add221981135"}, + {file = "protobuf-5.28.3-py3-none-any.whl", hash = "sha256:cee1757663fa32a1ee673434fcf3bf24dd54763c79690201208bafec62f19eed"}, + {file = "protobuf-5.28.3.tar.gz", hash = "sha256:64badbc49180a5e401f373f9ce7ab1d18b63f7dd4a9cdc43c92b9f0b481cef7b"}, ] [[package]] @@ -2349,7 +2019,7 @@ name = "pyasn1" version = "0.6.1" requires_python = ">=3.8" summary = "Pure-Python implementation of ASN.1 types and DER/BER/CER codecs (X.208)" -groups = ["default"] +groups = ["chat_with_repo"] files = [ {file = "pyasn1-0.6.1-py3-none-any.whl", hash = "sha256:0d632f46f2ba09143da3a8afe9e33fb6f92fa2320ab7e886e2d0f7672af84629"}, {file = "pyasn1-0.6.1.tar.gz", hash = "sha256:6f580d2bdd84365380830acf45550f2511469f673cb4a5ae3857a3170128b034"}, @@ -2360,7 +2030,7 @@ name = "pyasn1-modules" version = "0.4.1" requires_python = ">=3.8" summary = "A collection of ASN.1-based protocols modules" -groups = ["default"] +groups = ["chat_with_repo"] dependencies = [ "pyasn1<0.7.0,>=0.4.6", ] @@ -2374,7 +2044,7 @@ name = "pydantic" version = "2.9.2" requires_python = ">=3.8" summary = "Data validation using Python type hints" -groups = ["default"] +groups = ["default", "chat_with_repo"] dependencies = [ "annotated-types>=0.6.0", "pydantic-core==2.23.4", @@ -2391,23 +2061,11 @@ name = "pydantic-core" version = "2.23.4" requires_python = ">=3.8" summary = "Core functionality for Pydantic validation and serialization" -groups = ["default"] +groups = ["default", "chat_with_repo"] dependencies = [ "typing-extensions!=4.7.0,>=4.6.0", ] files = [ - {file = "pydantic_core-2.23.4-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:b10bd51f823d891193d4717448fab065733958bdb6a6b351967bd349d48d5c9b"}, - {file = "pydantic_core-2.23.4-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:4fc714bdbfb534f94034efaa6eadd74e5b93c8fa6315565a222f7b6f42ca1166"}, - {file = "pydantic_core-2.23.4-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:63e46b3169866bd62849936de036f901a9356e36376079b05efa83caeaa02ceb"}, - {file = "pydantic_core-2.23.4-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:ed1a53de42fbe34853ba90513cea21673481cd81ed1be739f7f2efb931b24916"}, - {file = "pydantic_core-2.23.4-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:cfdd16ab5e59fc31b5e906d1a3f666571abc367598e3e02c83403acabc092e07"}, - {file = "pydantic_core-2.23.4-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:255a8ef062cbf6674450e668482456abac99a5583bbafb73f9ad469540a3a232"}, - {file = "pydantic_core-2.23.4-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4a7cd62e831afe623fbb7aabbb4fe583212115b3ef38a9f6b71869ba644624a2"}, - {file = "pydantic_core-2.23.4-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:f09e2ff1f17c2b51f2bc76d1cc33da96298f0a036a137f5440ab3ec5360b624f"}, - {file = "pydantic_core-2.23.4-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:e38e63e6f3d1cec5a27e0afe90a085af8b6806ee208b33030e65b6516353f1a3"}, - {file = "pydantic_core-2.23.4-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:0dbd8dbed2085ed23b5c04afa29d8fd2771674223135dc9bc937f3c09284d071"}, - {file = "pydantic_core-2.23.4-cp310-none-win32.whl", hash = "sha256:6531b7ca5f951d663c339002e91aaebda765ec7d61b7d1e3991051906ddde119"}, - {file = "pydantic_core-2.23.4-cp310-none-win_amd64.whl", hash = "sha256:7c9129eb40958b3d4500fa2467e6a83356b3b61bfff1b414c7361d9220f9ae8f"}, {file = "pydantic_core-2.23.4-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:77733e3892bb0a7fa797826361ce8a9184d25c8dffaec60b7ffe928153680ba8"}, {file = "pydantic_core-2.23.4-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:1b84d168f6c48fabd1f2027a3d1bdfe62f92cade1fb273a5d68e621da0e44e6d"}, {file = "pydantic_core-2.23.4-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:df49e7a0861a8c36d089c1ed57d308623d60416dab2647a4a17fe050ba85de0e"}, @@ -2444,20 +2102,12 @@ files = [ {file = "pydantic_core-2.23.4-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:ed541d70698978a20eb63d8c5d72f2cc6d7079d9d90f6b50bad07826f1320f5f"}, {file = "pydantic_core-2.23.4-cp313-none-win32.whl", hash = "sha256:3d5639516376dce1940ea36edf408c554475369f5da2abd45d44621cb616f769"}, {file = "pydantic_core-2.23.4-cp313-none-win_amd64.whl", hash = "sha256:5a1504ad17ba4210df3a045132a7baeeba5a200e930f57512ee02909fc5c4cb5"}, - {file = "pydantic_core-2.23.4-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:f455ee30a9d61d3e1a15abd5068827773d6e4dc513e795f380cdd59932c782d5"}, - {file = "pydantic_core-2.23.4-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:1e90d2e3bd2c3863d48525d297cd143fe541be8bbf6f579504b9712cb6b643ec"}, - {file = "pydantic_core-2.23.4-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2e203fdf807ac7e12ab59ca2bfcabb38c7cf0b33c41efeb00f8e5da1d86af480"}, - {file = "pydantic_core-2.23.4-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e08277a400de01bc72436a0ccd02bdf596631411f592ad985dcee21445bd0068"}, - {file = "pydantic_core-2.23.4-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:f220b0eea5965dec25480b6333c788fb72ce5f9129e8759ef876a1d805d00801"}, - {file = "pydantic_core-2.23.4-pp310-pypy310_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:d06b0c8da4f16d1d1e352134427cb194a0a6e19ad5db9161bf32b2113409e728"}, - {file = "pydantic_core-2.23.4-pp310-pypy310_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:ba1a0996f6c2773bd83e63f18914c1de3c9dd26d55f4ac302a7efe93fb8e7433"}, - {file = "pydantic_core-2.23.4-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:9a5bce9d23aac8f0cf0836ecfc033896aa8443b501c58d0602dbfd5bd5b37753"}, {file = "pydantic_core-2.23.4.tar.gz", hash = "sha256:2584f7cf844ac4d970fba483a717dbe10c1c1c96a969bf65d61ffe94df1b2863"}, ] [[package]] name = "pydantic-settings" -version = "2.5.2" +version = "2.6.1" requires_python = ">=3.8" summary = "Settings management using Pydantic" groups = ["default"] @@ -2466,15 +2116,15 @@ dependencies = [ "python-dotenv>=0.21.0", ] files = [ - {file = "pydantic_settings-2.5.2-py3-none-any.whl", hash = "sha256:2c912e55fd5794a59bf8c832b9de832dcfdf4778d79ff79b708744eed499a907"}, - {file = "pydantic_settings-2.5.2.tar.gz", hash = "sha256:f90b139682bee4d2065273d5185d71d37ea46cfe57e1b5ae184fc6a0b2484ca0"}, + {file = "pydantic_settings-2.6.1-py3-none-any.whl", hash = "sha256:7fb0637c786a558d3103436278a7c4f1cfd29ba8973238a50c5bb9a55387da87"}, + {file = "pydantic_settings-2.6.1.tar.gz", hash = "sha256:e0f92546d8a9923cb8941689abf85d6601a8c19a23e97a34b2964a2e3f813ca0"}, ] [[package]] name = "pydub" version = "0.25.1" summary = "Manipulate audio with an simple and easy high level interface" -groups = ["default"] +groups = ["chat_with_repo"] files = [ {file = "pydub-0.25.1-py2.py3-none-any.whl", hash = "sha256:65617e33033874b59d87db603aa1ed450633288aefead953b30bded59cb599a6"}, {file = "pydub-0.25.1.tar.gz", hash = "sha256:980a33ce9949cab2a569606b65674d748ecbca4f0796887fd6f46173a7b0d30f"}, @@ -2485,28 +2135,17 @@ name = "pygments" version = "2.18.0" requires_python = ">=3.8" summary = "Pygments is a syntax highlighting package written in Python." -groups = ["default"] +groups = ["chat_with_repo"] files = [ {file = "pygments-2.18.0-py3-none-any.whl", hash = "sha256:b8e6aca0523f3ab76fee51799c488e38782ac06eafcf95e7ba832985c8e7b13a"}, {file = "pygments-2.18.0.tar.gz", hash = "sha256:786ff802f32e91311bff3889f6e9a86e81505fe99f2735bb6d60ae0c5004f199"}, ] -[[package]] -name = "pyparsing" -version = "3.1.4" -requires_python = ">=3.6.8" -summary = "pyparsing module - Classes and methods to define and execute parsing grammars" -groups = ["default"] -files = [ - {file = "pyparsing-3.1.4-py3-none-any.whl", hash = "sha256:a6a7ee4235a3f944aa1fa2249307708f893fe5717dc603503c6c7969c070fb7c"}, - {file = "pyparsing-3.1.4.tar.gz", hash = "sha256:f86ec8d1a83f11977c9a6ea7598e8c27fc5cddfa5b07ea2241edbbde1d7bc032"}, -] - [[package]] name = "pypika" version = "0.48.9" summary = "A SQL query builder API for Python" -groups = ["default"] +groups = ["chat_with_repo"] files = [ {file = "PyPika-0.48.9.tar.gz", hash = "sha256:838836a61747e7c8380cd1b7ff638694b7a7335345d0f559b04b2cd832ad5378"}, ] @@ -2516,7 +2155,7 @@ name = "pyproject-hooks" version = "1.2.0" requires_python = ">=3.7" summary = "Wrappers to call pyproject.toml-based build backend hooks." -groups = ["default"] +groups = ["chat_with_repo"] files = [ {file = "pyproject_hooks-1.2.0-py3-none-any.whl", hash = "sha256:9e5c6bfa8dcc30091c74b0cf803c81fdd29d94f01992a7707bc97babb1141913"}, {file = "pyproject_hooks-1.2.0.tar.gz", hash = "sha256:1e859bd5c40fae9448642dd871adf459e5e2084186e8d2c2a79a824c970da1f8"}, @@ -2527,7 +2166,7 @@ name = "pyreadline3" version = "3.5.4" requires_python = ">=3.8" summary = "A python implementation of GNU readline." -groups = ["default"] +groups = ["chat_with_repo"] marker = "sys_platform == \"win32\" and python_version >= \"3.8\"" files = [ {file = "pyreadline3-3.5.4-py3-none-any.whl", hash = "sha256:eaf8e6cc3c49bcccf145fc6067ba8643d1df34d604a1ec0eccbf7a18e6d3fae6"}, @@ -2573,7 +2212,7 @@ name = "python-dateutil" version = "2.9.0.post0" requires_python = "!=3.0.*,!=3.1.*,!=3.2.*,>=2.7" summary = "Extensions to the standard Python datetime module" -groups = ["default"] +groups = ["chat_with_repo"] dependencies = [ "six>=1.5", ] @@ -2587,7 +2226,7 @@ name = "python-dotenv" version = "1.0.1" requires_python = ">=3.8" summary = "Read key-value pairs from a .env file and set them as environment variables" -groups = ["default"] +groups = ["default", "chat_with_repo"] files = [ {file = "python-dotenv-1.0.1.tar.gz", hash = "sha256:e324ee90a023d808f1959c46bcbc04446a10ced277783dc6ee09987c37ec10ca"}, {file = "python_dotenv-1.0.1-py3-none-any.whl", hash = "sha256:f7b63ef50f1b690dddf550d03497b66d609393b40b564ed0d674909a68ebf16a"}, @@ -2595,13 +2234,13 @@ files = [ [[package]] name = "python-iso639" -version = "2024.4.27" +version = "2024.10.22" requires_python = ">=3.8" summary = "ISO 639 language codes, names, and other associated information" groups = ["default"] files = [ - {file = "python_iso639-2024.4.27-py3-none-any.whl", hash = "sha256:27526a84cebc4c4d53fea9d1ebbc7209c8d279bebaa343e6765a1fc8780565ab"}, - {file = "python_iso639-2024.4.27.tar.gz", hash = "sha256:97e63b5603e085c6a56a12a95740010e75d9134e0aab767e0978b53fd8824f13"}, + {file = "python_iso639-2024.10.22-py3-none-any.whl", hash = "sha256:02d3ce2e01c6896b30b9cbbd3e1c8ee0d7221250b5d63ea9803e0d2a81fd1047"}, + {file = "python_iso639-2024.10.22.tar.gz", hash = "sha256:750f21b6a0bc6baa24253a3d8aae92b582bf93aa40988361cd96852c2c6d9a52"}, ] [[package]] @@ -2609,7 +2248,7 @@ name = "python-multipart" version = "0.0.12" requires_python = ">=3.8" summary = "A streaming multipart parser for Python" -groups = ["default"] +groups = ["chat_with_repo"] files = [ {file = "python_multipart-0.0.12-py3-none-any.whl", hash = "sha256:43dcf96cf65888a9cd3423544dd0d75ac10f7aa0c3c28a175bbcd00c9ce1aebf"}, {file = "python_multipart-0.0.12.tar.gz", hash = "sha256:045e1f98d719c1ce085ed7f7e1ef9d8ccc8c02ba02b5566d5f7521410ced58cb"}, @@ -2619,7 +2258,7 @@ files = [ name = "pytz" version = "2024.2" summary = "World timezone definitions, modern and historical" -groups = ["default"] +groups = ["chat_with_repo"] files = [ {file = "pytz-2024.2-py2.py3-none-any.whl", hash = "sha256:31c7c1817eb7fae7ca4b8c7ee50c72f93aa2dd863de768e1ef4245d426aa0725"}, {file = "pytz-2024.2.tar.gz", hash = "sha256:2aa355083c50a0f93fa581709deac0c9ad65cca8a9e9beac660adcbd493c798a"}, @@ -2630,17 +2269,8 @@ name = "pyyaml" version = "6.0.2" requires_python = ">=3.8" summary = "YAML parser and emitter for Python" -groups = ["default"] +groups = ["default", "chat_with_repo"] files = [ - {file = "PyYAML-6.0.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:0a9a2848a5b7feac301353437eb7d5957887edbf81d56e903999a75a3d743086"}, - {file = "PyYAML-6.0.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:29717114e51c84ddfba879543fb232a6ed60086602313ca38cce623c1d62cfbf"}, - {file = "PyYAML-6.0.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8824b5a04a04a047e72eea5cec3bc266db09e35de6bdfe34c9436ac5ee27d237"}, - {file = "PyYAML-6.0.2-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:7c36280e6fb8385e520936c3cb3b8042851904eba0e58d277dca80a5cfed590b"}, - {file = "PyYAML-6.0.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ec031d5d2feb36d1d1a24380e4db6d43695f3748343d99434e6f5f9156aaa2ed"}, - {file = "PyYAML-6.0.2-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:936d68689298c36b53b29f23c6dbb74de12b4ac12ca6cfe0e047bedceea56180"}, - {file = "PyYAML-6.0.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:23502f431948090f597378482b4812b0caae32c22213aecf3b55325e049a6c68"}, - {file = "PyYAML-6.0.2-cp310-cp310-win32.whl", hash = "sha256:2e99c6826ffa974fe6e27cdb5ed0021786b03fc98e5ee3c5bfe1fd5015f42b99"}, - {file = "PyYAML-6.0.2-cp310-cp310-win_amd64.whl", hash = "sha256:a4d3091415f010369ae4ed1fc6b79def9416358877534caf6a0fdd2146c87a3e"}, {file = "PyYAML-6.0.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:cc1c1159b3d456576af7a3e4d1ba7e6924cb39de8f67111c735f6fc832082774"}, {file = "PyYAML-6.0.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:1e2120ef853f59c7419231f3bf4e7021f1b936f6ebd222406c3b60212205d2ee"}, {file = "PyYAML-6.0.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5d225db5a45f21e78dd9358e58a98702a0302f2659a3c6cd320564b75b86f47c"}, @@ -2673,73 +2303,57 @@ files = [ [[package]] name = "regex" -version = "2024.9.11" +version = "2024.11.6" requires_python = ">=3.8" summary = "Alternative regular expression module, to replace re." -groups = ["default"] -files = [ - {file = "regex-2024.9.11-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:1494fa8725c285a81d01dc8c06b55287a1ee5e0e382d8413adc0a9197aac6408"}, - {file = "regex-2024.9.11-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:0e12c481ad92d129c78f13a2a3662317e46ee7ef96c94fd332e1c29131875b7d"}, - {file = "regex-2024.9.11-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:16e13a7929791ac1216afde26f712802e3df7bf0360b32e4914dca3ab8baeea5"}, - {file = "regex-2024.9.11-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:46989629904bad940bbec2106528140a218b4a36bb3042d8406980be1941429c"}, - {file = "regex-2024.9.11-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a906ed5e47a0ce5f04b2c981af1c9acf9e8696066900bf03b9d7879a6f679fc8"}, - {file = "regex-2024.9.11-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:e9a091b0550b3b0207784a7d6d0f1a00d1d1c8a11699c1a4d93db3fbefc3ad35"}, - {file = "regex-2024.9.11-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5ddcd9a179c0a6fa8add279a4444015acddcd7f232a49071ae57fa6e278f1f71"}, - {file = "regex-2024.9.11-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:6b41e1adc61fa347662b09398e31ad446afadff932a24807d3ceb955ed865cc8"}, - {file = "regex-2024.9.11-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:ced479f601cd2f8ca1fd7b23925a7e0ad512a56d6e9476f79b8f381d9d37090a"}, - {file = "regex-2024.9.11-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:635a1d96665f84b292e401c3d62775851aedc31d4f8784117b3c68c4fcd4118d"}, - {file = "regex-2024.9.11-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:c0256beda696edcf7d97ef16b2a33a8e5a875affd6fa6567b54f7c577b30a137"}, - {file = "regex-2024.9.11-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:3ce4f1185db3fbde8ed8aa223fc9620f276c58de8b0d4f8cc86fd1360829edb6"}, - {file = "regex-2024.9.11-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:09d77559e80dcc9d24570da3745ab859a9cf91953062e4ab126ba9d5993688ca"}, - {file = "regex-2024.9.11-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:7a22ccefd4db3f12b526eccb129390942fe874a3a9fdbdd24cf55773a1faab1a"}, - {file = "regex-2024.9.11-cp310-cp310-win32.whl", hash = "sha256:f745ec09bc1b0bd15cfc73df6fa4f726dcc26bb16c23a03f9e3367d357eeedd0"}, - {file = "regex-2024.9.11-cp310-cp310-win_amd64.whl", hash = "sha256:01c2acb51f8a7d6494c8c5eafe3d8e06d76563d8a8a4643b37e9b2dd8a2ff623"}, - {file = "regex-2024.9.11-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:2cce2449e5927a0bf084d346da6cd5eb016b2beca10d0013ab50e3c226ffc0df"}, - {file = "regex-2024.9.11-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:3b37fa423beefa44919e009745ccbf353d8c981516e807995b2bd11c2c77d268"}, - {file = "regex-2024.9.11-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:64ce2799bd75039b480cc0360907c4fb2f50022f030bf9e7a8705b636e408fad"}, - {file = "regex-2024.9.11-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a4cc92bb6db56ab0c1cbd17294e14f5e9224f0cc6521167ef388332604e92679"}, - {file = "regex-2024.9.11-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d05ac6fa06959c4172eccd99a222e1fbf17b5670c4d596cb1e5cde99600674c4"}, - {file = "regex-2024.9.11-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:040562757795eeea356394a7fb13076ad4f99d3c62ab0f8bdfb21f99a1f85664"}, - {file = "regex-2024.9.11-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6113c008a7780792efc80f9dfe10ba0cd043cbf8dc9a76ef757850f51b4edc50"}, - {file = "regex-2024.9.11-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:8e5fb5f77c8745a60105403a774fe2c1759b71d3e7b4ca237a5e67ad066c7199"}, - {file = "regex-2024.9.11-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:54d9ff35d4515debf14bc27f1e3b38bfc453eff3220f5bce159642fa762fe5d4"}, - {file = "regex-2024.9.11-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:df5cbb1fbc74a8305b6065d4ade43b993be03dbe0f8b30032cced0d7740994bd"}, - {file = "regex-2024.9.11-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:7fb89ee5d106e4a7a51bce305ac4efb981536301895f7bdcf93ec92ae0d91c7f"}, - {file = "regex-2024.9.11-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:a738b937d512b30bf75995c0159c0ddf9eec0775c9d72ac0202076c72f24aa96"}, - {file = "regex-2024.9.11-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:e28f9faeb14b6f23ac55bfbbfd3643f5c7c18ede093977f1df249f73fd22c7b1"}, - {file = "regex-2024.9.11-cp311-cp311-win32.whl", hash = "sha256:18e707ce6c92d7282dfce370cd205098384b8ee21544e7cb29b8aab955b66fa9"}, - {file = "regex-2024.9.11-cp311-cp311-win_amd64.whl", hash = "sha256:313ea15e5ff2a8cbbad96ccef6be638393041b0a7863183c2d31e0c6116688cf"}, - {file = "regex-2024.9.11-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:b0d0a6c64fcc4ef9c69bd5b3b3626cc3776520a1637d8abaa62b9edc147a58f7"}, - {file = "regex-2024.9.11-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:49b0e06786ea663f933f3710a51e9385ce0cba0ea56b67107fd841a55d56a231"}, - {file = "regex-2024.9.11-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:5b513b6997a0b2f10e4fd3a1313568e373926e8c252bd76c960f96fd039cd28d"}, - {file = "regex-2024.9.11-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ee439691d8c23e76f9802c42a95cfeebf9d47cf4ffd06f18489122dbb0a7ad64"}, - {file = "regex-2024.9.11-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a8f877c89719d759e52783f7fe6e1c67121076b87b40542966c02de5503ace42"}, - {file = "regex-2024.9.11-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:23b30c62d0f16827f2ae9f2bb87619bc4fba2044911e2e6c2eb1af0161cdb766"}, - {file = "regex-2024.9.11-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:85ab7824093d8f10d44330fe1e6493f756f252d145323dd17ab6b48733ff6c0a"}, - {file = "regex-2024.9.11-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:8dee5b4810a89447151999428fe096977346cf2f29f4d5e29609d2e19e0199c9"}, - {file = "regex-2024.9.11-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:98eeee2f2e63edae2181c886d7911ce502e1292794f4c5ee71e60e23e8d26b5d"}, - {file = "regex-2024.9.11-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:57fdd2e0b2694ce6fc2e5ccf189789c3e2962916fb38779d3e3521ff8fe7a822"}, - {file = "regex-2024.9.11-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:d552c78411f60b1fdaafd117a1fca2f02e562e309223b9d44b7de8be451ec5e0"}, - {file = "regex-2024.9.11-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:a0b2b80321c2ed3fcf0385ec9e51a12253c50f146fddb2abbb10f033fe3d049a"}, - {file = "regex-2024.9.11-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:18406efb2f5a0e57e3a5881cd9354c1512d3bb4f5c45d96d110a66114d84d23a"}, - {file = "regex-2024.9.11-cp312-cp312-win32.whl", hash = "sha256:e464b467f1588e2c42d26814231edecbcfe77f5ac414d92cbf4e7b55b2c2a776"}, - {file = "regex-2024.9.11-cp312-cp312-win_amd64.whl", hash = "sha256:9e8719792ca63c6b8340380352c24dcb8cd7ec49dae36e963742a275dfae6009"}, - {file = "regex-2024.9.11-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:c157bb447303070f256e084668b702073db99bbb61d44f85d811025fcf38f784"}, - {file = "regex-2024.9.11-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:4db21ece84dfeefc5d8a3863f101995de646c6cb0536952c321a2650aa202c36"}, - {file = "regex-2024.9.11-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:220e92a30b426daf23bb67a7962900ed4613589bab80382be09b48896d211e92"}, - {file = "regex-2024.9.11-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:eb1ae19e64c14c7ec1995f40bd932448713d3c73509e82d8cd7744dc00e29e86"}, - {file = "regex-2024.9.11-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f47cd43a5bfa48f86925fe26fbdd0a488ff15b62468abb5d2a1e092a4fb10e85"}, - {file = "regex-2024.9.11-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:9d4a76b96f398697fe01117093613166e6aa8195d63f1b4ec3f21ab637632963"}, - {file = "regex-2024.9.11-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0ea51dcc0835eea2ea31d66456210a4e01a076d820e9039b04ae8d17ac11dee6"}, - {file = "regex-2024.9.11-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b7aaa315101c6567a9a45d2839322c51c8d6e81f67683d529512f5bcfb99c802"}, - {file = "regex-2024.9.11-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:c57d08ad67aba97af57a7263c2d9006d5c404d721c5f7542f077f109ec2a4a29"}, - {file = "regex-2024.9.11-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:f8404bf61298bb6f8224bb9176c1424548ee1181130818fcd2cbffddc768bed8"}, - {file = "regex-2024.9.11-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:dd4490a33eb909ef5078ab20f5f000087afa2a4daa27b4c072ccb3cb3050ad84"}, - {file = "regex-2024.9.11-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:eee9130eaad130649fd73e5cd92f60e55708952260ede70da64de420cdcad554"}, - {file = "regex-2024.9.11-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:6a2644a93da36c784e546de579ec1806bfd2763ef47babc1b03d765fe560c9f8"}, - {file = "regex-2024.9.11-cp313-cp313-win32.whl", hash = "sha256:e997fd30430c57138adc06bba4c7c2968fb13d101e57dd5bb9355bf8ce3fa7e8"}, - {file = "regex-2024.9.11-cp313-cp313-win_amd64.whl", hash = "sha256:042c55879cfeb21a8adacc84ea347721d3d83a159da6acdf1116859e2427c43f"}, - {file = "regex-2024.9.11.tar.gz", hash = "sha256:6c188c307e8433bcb63dc1915022deb553b4203a70722fc542c363bf120a01fd"}, +groups = ["default", "chat_with_repo"] +files = [ + {file = "regex-2024.11.6-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:5478c6962ad548b54a591778e93cd7c456a7a29f8eca9c49e4f9a806dcc5d638"}, + {file = "regex-2024.11.6-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:2c89a8cc122b25ce6945f0423dc1352cb9593c68abd19223eebbd4e56612c5b7"}, + {file = "regex-2024.11.6-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:94d87b689cdd831934fa3ce16cc15cd65748e6d689f5d2b8f4f4df2065c9fa20"}, + {file = "regex-2024.11.6-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1062b39a0a2b75a9c694f7a08e7183a80c63c0d62b301418ffd9c35f55aaa114"}, + {file = "regex-2024.11.6-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:167ed4852351d8a750da48712c3930b031f6efdaa0f22fa1933716bfcd6bf4a3"}, + {file = "regex-2024.11.6-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2d548dafee61f06ebdb584080621f3e0c23fff312f0de1afc776e2a2ba99a74f"}, + {file = "regex-2024.11.6-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f2a19f302cd1ce5dd01a9099aaa19cae6173306d1302a43b627f62e21cf18ac0"}, + {file = "regex-2024.11.6-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:bec9931dfb61ddd8ef2ebc05646293812cb6b16b60cf7c9511a832b6f1854b55"}, + {file = "regex-2024.11.6-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:9714398225f299aa85267fd222f7142fcb5c769e73d7733344efc46f2ef5cf89"}, + {file = "regex-2024.11.6-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:202eb32e89f60fc147a41e55cb086db2a3f8cb82f9a9a88440dcfc5d37faae8d"}, + {file = "regex-2024.11.6-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:4181b814e56078e9b00427ca358ec44333765f5ca1b45597ec7446d3a1ef6e34"}, + {file = "regex-2024.11.6-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:068376da5a7e4da51968ce4c122a7cd31afaaec4fccc7856c92f63876e57b51d"}, + {file = "regex-2024.11.6-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:ac10f2c4184420d881a3475fb2c6f4d95d53a8d50209a2500723d831036f7c45"}, + {file = "regex-2024.11.6-cp311-cp311-win32.whl", hash = "sha256:c36f9b6f5f8649bb251a5f3f66564438977b7ef8386a52460ae77e6070d309d9"}, + {file = "regex-2024.11.6-cp311-cp311-win_amd64.whl", hash = "sha256:02e28184be537f0e75c1f9b2f8847dc51e08e6e171c6bde130b2687e0c33cf60"}, + {file = "regex-2024.11.6-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:52fb28f528778f184f870b7cf8f225f5eef0a8f6e3778529bdd40c7b3920796a"}, + {file = "regex-2024.11.6-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:fdd6028445d2460f33136c55eeb1f601ab06d74cb3347132e1c24250187500d9"}, + {file = "regex-2024.11.6-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:805e6b60c54bf766b251e94526ebad60b7de0c70f70a4e6210ee2891acb70bf2"}, + {file = "regex-2024.11.6-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b85c2530be953a890eaffde05485238f07029600e8f098cdf1848d414a8b45e4"}, + {file = "regex-2024.11.6-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:bb26437975da7dc36b7efad18aa9dd4ea569d2357ae6b783bf1118dabd9ea577"}, + {file = "regex-2024.11.6-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:abfa5080c374a76a251ba60683242bc17eeb2c9818d0d30117b4486be10c59d3"}, + {file = "regex-2024.11.6-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:70b7fa6606c2881c1db9479b0eaa11ed5dfa11c8d60a474ff0e095099f39d98e"}, + {file = "regex-2024.11.6-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:0c32f75920cf99fe6b6c539c399a4a128452eaf1af27f39bce8909c9a3fd8cbe"}, + {file = "regex-2024.11.6-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:982e6d21414e78e1f51cf595d7f321dcd14de1f2881c5dc6a6e23bbbbd68435e"}, + {file = "regex-2024.11.6-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:a7c2155f790e2fb448faed6dd241386719802296ec588a8b9051c1f5c481bc29"}, + {file = "regex-2024.11.6-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:149f5008d286636e48cd0b1dd65018548944e495b0265b45e1bffecce1ef7f39"}, + {file = "regex-2024.11.6-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:e5364a4502efca094731680e80009632ad6624084aff9a23ce8c8c6820de3e51"}, + {file = "regex-2024.11.6-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:0a86e7eeca091c09e021db8eb72d54751e527fa47b8d5787caf96d9831bd02ad"}, + {file = "regex-2024.11.6-cp312-cp312-win32.whl", hash = "sha256:32f9a4c643baad4efa81d549c2aadefaeba12249b2adc5af541759237eee1c54"}, + {file = "regex-2024.11.6-cp312-cp312-win_amd64.whl", hash = "sha256:a93c194e2df18f7d264092dc8539b8ffb86b45b899ab976aa15d48214138e81b"}, + {file = "regex-2024.11.6-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:a6ba92c0bcdf96cbf43a12c717eae4bc98325ca3730f6b130ffa2e3c3c723d84"}, + {file = "regex-2024.11.6-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:525eab0b789891ac3be914d36893bdf972d483fe66551f79d3e27146191a37d4"}, + {file = "regex-2024.11.6-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:086a27a0b4ca227941700e0b31425e7a28ef1ae8e5e05a33826e17e47fbfdba0"}, + {file = "regex-2024.11.6-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bde01f35767c4a7899b7eb6e823b125a64de314a8ee9791367c9a34d56af18d0"}, + {file = "regex-2024.11.6-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b583904576650166b3d920d2bcce13971f6f9e9a396c673187f49811b2769dc7"}, + {file = "regex-2024.11.6-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:1c4de13f06a0d54fa0d5ab1b7138bfa0d883220965a29616e3ea61b35d5f5fc7"}, + {file = "regex-2024.11.6-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3cde6e9f2580eb1665965ce9bf17ff4952f34f5b126beb509fee8f4e994f143c"}, + {file = "regex-2024.11.6-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:0d7f453dca13f40a02b79636a339c5b62b670141e63efd511d3f8f73fba162b3"}, + {file = "regex-2024.11.6-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:59dfe1ed21aea057a65c6b586afd2a945de04fc7db3de0a6e3ed5397ad491b07"}, + {file = "regex-2024.11.6-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:b97c1e0bd37c5cd7902e65f410779d39eeda155800b65fc4d04cc432efa9bc6e"}, + {file = "regex-2024.11.6-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:f9d1e379028e0fc2ae3654bac3cbbef81bf3fd571272a42d56c24007979bafb6"}, + {file = "regex-2024.11.6-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:13291b39131e2d002a7940fb176e120bec5145f3aeb7621be6534e46251912c4"}, + {file = "regex-2024.11.6-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:4f51f88c126370dcec4908576c5a627220da6c09d0bff31cfa89f2523843316d"}, + {file = "regex-2024.11.6-cp313-cp313-win32.whl", hash = "sha256:63b13cfd72e9601125027202cad74995ab26921d8cd935c25f09c630436348ff"}, + {file = "regex-2024.11.6-cp313-cp313-win_amd64.whl", hash = "sha256:2b3361af3198667e99927da8b84c1b010752fa4b1115ee30beaa332cabc3ef1a"}, + {file = "regex-2024.11.6.tar.gz", hash = "sha256:7ab159b063c52a0333c884e4679f8d7a85112ee3078fe3d9004b2dd875585519"}, ] [[package]] @@ -2747,7 +2361,7 @@ name = "requests" version = "2.32.3" requires_python = ">=3.8" summary = "Python HTTP for Humans." -groups = ["default"] +groups = ["default", "chat_with_repo"] dependencies = [ "certifi>=2017.4.17", "charset-normalizer<4,>=2", @@ -2764,7 +2378,7 @@ name = "requests-oauthlib" version = "2.0.0" requires_python = ">=3.4" summary = "OAuthlib authentication support for Requests." -groups = ["default"] +groups = ["chat_with_repo"] dependencies = [ "oauthlib>=3.0.0", "requests>=2.0.0", @@ -2776,18 +2390,18 @@ files = [ [[package]] name = "rich" -version = "13.9.2" +version = "13.9.4" requires_python = ">=3.8.0" summary = "Render rich text, tables, progress bars, syntax highlighting, markdown and more to the terminal" -groups = ["default"] +groups = ["chat_with_repo"] dependencies = [ "markdown-it-py>=2.2.0", "pygments<3.0.0,>=2.13.0", "typing-extensions<5.0,>=4.0.0; python_version < \"3.11\"", ] files = [ - {file = "rich-13.9.2-py3-none-any.whl", hash = "sha256:8c82a3d3f8dcfe9e734771313e606b39d8247bb6b826e196f4914b333b743cf1"}, - {file = "rich-13.9.2.tar.gz", hash = "sha256:51a2c62057461aaf7152b4d611168f93a9fc73068f8ded2790f29fe2b5366d0c"}, + {file = "rich-13.9.4-py3-none-any.whl", hash = "sha256:6049d5e6ec054bf2779ab3358186963bac2ea89175919d699e378b99738c2a90"}, + {file = "rich-13.9.4.tar.gz", hash = "sha256:439594978a49a09530cff7ebc4b5c7103ef57baf48d5ea3184f21d9a2befa098"}, ] [[package]] @@ -2795,7 +2409,7 @@ name = "rsa" version = "4.9" requires_python = ">=3.6,<4" summary = "Pure-Python RSA implementation" -groups = ["default"] +groups = ["chat_with_repo"] dependencies = [ "pyasn1>=0.1.3", ] @@ -2806,29 +2420,43 @@ files = [ [[package]] name = "ruff" -version = "0.6.9" +version = "0.7.4" requires_python = ">=3.7" summary = "An extremely fast Python linter and code formatter, written in Rust." -groups = ["default", "dev"] -files = [ - {file = "ruff-0.6.9-py3-none-linux_armv6l.whl", hash = "sha256:064df58d84ccc0ac0fcd63bc3090b251d90e2a372558c0f057c3f75ed73e1ccd"}, - {file = "ruff-0.6.9-py3-none-macosx_10_12_x86_64.whl", hash = "sha256:140d4b5c9f5fc7a7b074908a78ab8d384dd7f6510402267bc76c37195c02a7ec"}, - {file = "ruff-0.6.9-py3-none-macosx_11_0_arm64.whl", hash = "sha256:53fd8ca5e82bdee8da7f506d7b03a261f24cd43d090ea9db9a1dc59d9313914c"}, - {file = "ruff-0.6.9-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:645d7d8761f915e48a00d4ecc3686969761df69fb561dd914a773c1a8266e14e"}, - {file = "ruff-0.6.9-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:eae02b700763e3847595b9d2891488989cac00214da7f845f4bcf2989007d577"}, - {file = "ruff-0.6.9-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7d5ccc9e58112441de8ad4b29dcb7a86dc25c5f770e3c06a9d57e0e5eba48829"}, - {file = "ruff-0.6.9-py3-none-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:417b81aa1c9b60b2f8edc463c58363075412866ae4e2b9ab0f690dc1e87ac1b5"}, - {file = "ruff-0.6.9-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3c866b631f5fbce896a74a6e4383407ba7507b815ccc52bcedabb6810fdb3ef7"}, - {file = "ruff-0.6.9-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:7b118afbb3202f5911486ad52da86d1d52305b59e7ef2031cea3425142b97d6f"}, - {file = "ruff-0.6.9-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a67267654edc23c97335586774790cde402fb6bbdb3c2314f1fc087dee320bfa"}, - {file = "ruff-0.6.9-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:3ef0cc774b00fec123f635ce5c547dac263f6ee9fb9cc83437c5904183b55ceb"}, - {file = "ruff-0.6.9-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:12edd2af0c60fa61ff31cefb90aef4288ac4d372b4962c2864aeea3a1a2460c0"}, - {file = "ruff-0.6.9-py3-none-musllinux_1_2_i686.whl", hash = "sha256:55bb01caeaf3a60b2b2bba07308a02fca6ab56233302406ed5245180a05c5625"}, - {file = "ruff-0.6.9-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:925d26471fa24b0ce5a6cdfab1bb526fb4159952385f386bdcc643813d472039"}, - {file = "ruff-0.6.9-py3-none-win32.whl", hash = "sha256:eb61ec9bdb2506cffd492e05ac40e5bc6284873aceb605503d8494180d6fc84d"}, - {file = "ruff-0.6.9-py3-none-win_amd64.whl", hash = "sha256:785d31851c1ae91f45b3d8fe23b8ae4b5170089021fbb42402d811135f0b7117"}, - {file = "ruff-0.6.9-py3-none-win_arm64.whl", hash = "sha256:a9641e31476d601f83cd602608739a0840e348bda93fec9f1ee816f8b6798b93"}, - {file = "ruff-0.6.9.tar.gz", hash = "sha256:b076ef717a8e5bc819514ee1d602bbdca5b4420ae13a9cf61a0c0a4f53a2baa2"}, +groups = ["chat_with_repo", "lint"] +files = [ + {file = "ruff-0.7.4-py3-none-linux_armv6l.whl", hash = "sha256:a4919925e7684a3f18e18243cd6bea7cfb8e968a6eaa8437971f681b7ec51478"}, + {file = "ruff-0.7.4-py3-none-macosx_10_12_x86_64.whl", hash = "sha256:cfb365c135b830778dda8c04fb7d4280ed0b984e1aec27f574445231e20d6c63"}, + {file = "ruff-0.7.4-py3-none-macosx_11_0_arm64.whl", hash = "sha256:63a569b36bc66fbadec5beaa539dd81e0527cb258b94e29e0531ce41bacc1f20"}, + {file = "ruff-0.7.4-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0d06218747d361d06fd2fdac734e7fa92df36df93035db3dc2ad7aa9852cb109"}, + {file = "ruff-0.7.4-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:e0cea28d0944f74ebc33e9f934238f15c758841f9f5edd180b5315c203293452"}, + {file = "ruff-0.7.4-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:80094ecd4793c68b2571b128f91754d60f692d64bc0d7272ec9197fdd09bf9ea"}, + {file = "ruff-0.7.4-py3-none-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:997512325c6620d1c4c2b15db49ef59543ef9cd0f4aa8065ec2ae5103cedc7e7"}, + {file = "ruff-0.7.4-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:00b4cf3a6b5fad6d1a66e7574d78956bbd09abfd6c8a997798f01f5da3d46a05"}, + {file = "ruff-0.7.4-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:7dbdc7d8274e1422722933d1edddfdc65b4336abf0b16dfcb9dedd6e6a517d06"}, + {file = "ruff-0.7.4-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0e92dfb5f00eaedb1501b2f906ccabfd67b2355bdf117fea9719fc99ac2145bc"}, + {file = "ruff-0.7.4-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:3bd726099f277d735dc38900b6a8d6cf070f80828877941983a57bca1cd92172"}, + {file = "ruff-0.7.4-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:2e32829c429dd081ee5ba39aef436603e5b22335c3d3fff013cd585806a6486a"}, + {file = "ruff-0.7.4-py3-none-musllinux_1_2_i686.whl", hash = "sha256:662a63b4971807623f6f90c1fb664613f67cc182dc4d991471c23c541fee62dd"}, + {file = "ruff-0.7.4-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:876f5e09eaae3eb76814c1d3b68879891d6fde4824c015d48e7a7da4cf066a3a"}, + {file = "ruff-0.7.4-py3-none-win32.whl", hash = "sha256:75c53f54904be42dd52a548728a5b572344b50d9b2873d13a3f8c5e3b91f5cac"}, + {file = "ruff-0.7.4-py3-none-win_amd64.whl", hash = "sha256:745775c7b39f914238ed1f1b0bebed0b9155a17cd8bc0b08d3c87e4703b990d6"}, + {file = "ruff-0.7.4-py3-none-win_arm64.whl", hash = "sha256:11bff065102c3ae9d3ea4dc9ecdfe5a5171349cdd0787c1fc64761212fc9cf1f"}, + {file = "ruff-0.7.4.tar.gz", hash = "sha256:cd12e35031f5af6b9b93715d8c4f40360070b2041f81273d0527683d5708fce2"}, +] + +[[package]] +name = "safehttpx" +version = "0.1.1" +requires_python = ">=3.9" +summary = "A small Python library created to help developers protect their applications from Server Side Request Forgery (SSRF) attacks." +groups = ["chat_with_repo"] +dependencies = [ + "httpx", +] +files = [ + {file = "safehttpx-0.1.1-py3-none-any.whl", hash = "sha256:1d93b64023c00d5c53ea70ea36e773b8a0dba5eaf1a1eb188856584a0a4cf4d1"}, + {file = "safehttpx-0.1.1.tar.gz", hash = "sha256:6e1bedf7767213300da5e4cb7e823e98edc934f17ca192d2e585111a2b899149"}, ] [[package]] @@ -2836,29 +2464,18 @@ name = "semantic-version" version = "2.10.0" requires_python = ">=2.7" summary = "A library implementing the 'SemVer' scheme." -groups = ["default"] +groups = ["chat_with_repo"] files = [ {file = "semantic_version-2.10.0-py2.py3-none-any.whl", hash = "sha256:de78a3b8e0feda74cabc54aab2da702113e33ac9d9eb9d2389bcf1f58b7d9177"}, {file = "semantic_version-2.10.0.tar.gz", hash = "sha256:bdabb6d336998cbb378d4b9db3a4b56a1e3235701dc05ea2690d9a997ed5041c"}, ] -[[package]] -name = "setuptools" -version = "75.1.0" -requires_python = ">=3.8" -summary = "Easily download, build, install, upgrade, and uninstall Python packages" -groups = ["default"] -files = [ - {file = "setuptools-75.1.0-py3-none-any.whl", hash = "sha256:35ab7fd3bcd95e6b7fd704e4a1539513edad446c097797f2985e0e4b960772f2"}, - {file = "setuptools-75.1.0.tar.gz", hash = "sha256:d59a21b17a275fb872a9c3dae73963160ae079f1049ed956880cd7c09b120538"}, -] - [[package]] name = "shellingham" version = "1.5.4" requires_python = ">=3.7" summary = "Tool to Detect Surrounding Shell" -groups = ["default"] +groups = ["chat_with_repo"] files = [ {file = "shellingham-1.5.4-py2.py3-none-any.whl", hash = "sha256:7ecfff8f2fd72616f7481040475a65b2bf8af90a56c89140852d1120324e8686"}, {file = "shellingham-1.5.4.tar.gz", hash = "sha256:8dbca0739d487e5bd35ab3ca4b36e11c4078f3a234bfce294b0a0291363404de"}, @@ -2869,7 +2486,7 @@ name = "six" version = "1.16.0" requires_python = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*" summary = "Python 2 and 3 compatibility utilities" -groups = ["default"] +groups = ["chat_with_repo"] files = [ {file = "six-1.16.0-py2.py3-none-any.whl", hash = "sha256:8abb2f1d86890a2dfb989f9a77cfcfd3e47c2a354b01111771326f8aa26e0254"}, {file = "six-1.16.0.tar.gz", hash = "sha256:1e61c37477a1626458e36f7b1d82aa5c9b094fa4802892072e49de9c60c4c926"}, @@ -2891,7 +2508,7 @@ name = "sniffio" version = "1.3.1" requires_python = ">=3.7" summary = "Sniff out which async library your code is running under" -groups = ["default"] +groups = ["default", "chat_with_repo"] files = [ {file = "sniffio-1.3.1-py3-none-any.whl", hash = "sha256:2f6da418d1f1e0fddd844478f41680e794e6051915791a034ff65e5f100525a2"}, {file = "sniffio-1.3.1.tar.gz", hash = "sha256:f4324edc670a0f49750a81b895f35c3adb843cca46f0530f79fc1babb23789dc"}, @@ -2899,97 +2516,97 @@ files = [ [[package]] name = "sqlalchemy" -version = "2.0.35" +version = "2.0.36" requires_python = ">=3.7" summary = "Database Abstraction Library" -groups = ["default"] +groups = ["default", "chat_with_repo"] dependencies = [ "greenlet!=0.4.17; (platform_machine == \"win32\" or platform_machine == \"WIN32\" or platform_machine == \"AMD64\" or platform_machine == \"amd64\" or platform_machine == \"x86_64\" or platform_machine == \"ppc64le\" or platform_machine == \"aarch64\") and python_version < \"3.13\"", "importlib-metadata; python_version < \"3.8\"", "typing-extensions>=4.6.0", ] files = [ - {file = "SQLAlchemy-2.0.35-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:67219632be22f14750f0d1c70e62f204ba69d28f62fd6432ba05ab295853de9b"}, - {file = "SQLAlchemy-2.0.35-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:4668bd8faf7e5b71c0319407b608f278f279668f358857dbfd10ef1954ac9f90"}, - {file = "SQLAlchemy-2.0.35-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:cb8bea573863762bbf45d1e13f87c2d2fd32cee2dbd50d050f83f87429c9e1ea"}, - {file = "SQLAlchemy-2.0.35-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f552023710d4b93d8fb29a91fadf97de89c5926c6bd758897875435f2a939f33"}, - {file = "SQLAlchemy-2.0.35-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:016b2e665f778f13d3c438651dd4de244214b527a275e0acf1d44c05bc6026a9"}, - {file = "SQLAlchemy-2.0.35-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:7befc148de64b6060937231cbff8d01ccf0bfd75aa26383ffdf8d82b12ec04ff"}, - {file = "SQLAlchemy-2.0.35-cp310-cp310-win32.whl", hash = "sha256:22b83aed390e3099584b839b93f80a0f4a95ee7f48270c97c90acd40ee646f0b"}, - {file = "SQLAlchemy-2.0.35-cp310-cp310-win_amd64.whl", hash = "sha256:a29762cd3d116585278ffb2e5b8cc311fb095ea278b96feef28d0b423154858e"}, - {file = "SQLAlchemy-2.0.35-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:e21f66748ab725ade40fa7af8ec8b5019c68ab00b929f6643e1b1af461eddb60"}, - {file = "SQLAlchemy-2.0.35-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:8a6219108a15fc6d24de499d0d515c7235c617b2540d97116b663dade1a54d62"}, - {file = "SQLAlchemy-2.0.35-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:042622a5306c23b972192283f4e22372da3b8ddf5f7aac1cc5d9c9b222ab3ff6"}, - {file = "SQLAlchemy-2.0.35-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:627dee0c280eea91aed87b20a1f849e9ae2fe719d52cbf847c0e0ea34464b3f7"}, - {file = "SQLAlchemy-2.0.35-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:4fdcd72a789c1c31ed242fd8c1bcd9ea186a98ee8e5408a50e610edfef980d71"}, - {file = "SQLAlchemy-2.0.35-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:89b64cd8898a3a6f642db4eb7b26d1b28a497d4022eccd7717ca066823e9fb01"}, - {file = "SQLAlchemy-2.0.35-cp311-cp311-win32.whl", hash = "sha256:6a93c5a0dfe8d34951e8a6f499a9479ffb9258123551fa007fc708ae2ac2bc5e"}, - {file = "SQLAlchemy-2.0.35-cp311-cp311-win_amd64.whl", hash = "sha256:c68fe3fcde03920c46697585620135b4ecfdfc1ed23e75cc2c2ae9f8502c10b8"}, - {file = "SQLAlchemy-2.0.35-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:eb60b026d8ad0c97917cb81d3662d0b39b8ff1335e3fabb24984c6acd0c900a2"}, - {file = "SQLAlchemy-2.0.35-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:6921ee01caf375363be5e9ae70d08ce7ca9d7e0e8983183080211a062d299468"}, - {file = "SQLAlchemy-2.0.35-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8cdf1a0dbe5ced887a9b127da4ffd7354e9c1a3b9bb330dce84df6b70ccb3a8d"}, - {file = "SQLAlchemy-2.0.35-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:93a71c8601e823236ac0e5d087e4f397874a421017b3318fd92c0b14acf2b6db"}, - {file = "SQLAlchemy-2.0.35-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:e04b622bb8a88f10e439084486f2f6349bf4d50605ac3e445869c7ea5cf0fa8c"}, - {file = "SQLAlchemy-2.0.35-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:1b56961e2d31389aaadf4906d453859f35302b4eb818d34a26fab72596076bb8"}, - {file = "SQLAlchemy-2.0.35-cp312-cp312-win32.whl", hash = "sha256:0f9f3f9a3763b9c4deb8c5d09c4cc52ffe49f9876af41cc1b2ad0138878453cf"}, - {file = "SQLAlchemy-2.0.35-cp312-cp312-win_amd64.whl", hash = "sha256:25b0f63e7fcc2a6290cb5f7f5b4fc4047843504983a28856ce9b35d8f7de03cc"}, - {file = "SQLAlchemy-2.0.35-py3-none-any.whl", hash = "sha256:2ab3f0336c0387662ce6221ad30ab3a5e6499aab01b9790879b6578fd9b8faa1"}, - {file = "sqlalchemy-2.0.35.tar.gz", hash = "sha256:e11d7ea4d24f0a262bccf9a7cd6284c976c5369dac21db237cff59586045ab9f"}, + {file = "SQLAlchemy-2.0.36-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:fd3a55deef00f689ce931d4d1b23fa9f04c880a48ee97af488fd215cf24e2a6c"}, + {file = "SQLAlchemy-2.0.36-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:4f5e9cd989b45b73bd359f693b935364f7e1f79486e29015813c338450aa5a71"}, + {file = "SQLAlchemy-2.0.36-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d0ddd9db6e59c44875211bc4c7953a9f6638b937b0a88ae6d09eb46cced54eff"}, + {file = "SQLAlchemy-2.0.36-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2519f3a5d0517fc159afab1015e54bb81b4406c278749779be57a569d8d1bb0d"}, + {file = "SQLAlchemy-2.0.36-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:59b1ee96617135f6e1d6f275bbe988f419c5178016f3d41d3c0abb0c819f75bb"}, + {file = "SQLAlchemy-2.0.36-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:39769a115f730d683b0eb7b694db9789267bcd027326cccc3125e862eb03bfd8"}, + {file = "SQLAlchemy-2.0.36-cp311-cp311-win32.whl", hash = "sha256:66bffbad8d6271bb1cc2f9a4ea4f86f80fe5e2e3e501a5ae2a3dc6a76e604e6f"}, + {file = "SQLAlchemy-2.0.36-cp311-cp311-win_amd64.whl", hash = "sha256:23623166bfefe1487d81b698c423f8678e80df8b54614c2bf4b4cfcd7c711959"}, + {file = "SQLAlchemy-2.0.36-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:f7b64e6ec3f02c35647be6b4851008b26cff592a95ecb13b6788a54ef80bbdd4"}, + {file = "SQLAlchemy-2.0.36-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:46331b00096a6db1fdc052d55b101dbbfc99155a548e20a0e4a8e5e4d1362855"}, + {file = "SQLAlchemy-2.0.36-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:fdf3386a801ea5aba17c6410dd1dc8d39cf454ca2565541b5ac42a84e1e28f53"}, + {file = "SQLAlchemy-2.0.36-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ac9dfa18ff2a67b09b372d5db8743c27966abf0e5344c555d86cc7199f7ad83a"}, + {file = "SQLAlchemy-2.0.36-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:90812a8933df713fdf748b355527e3af257a11e415b613dd794512461eb8a686"}, + {file = "SQLAlchemy-2.0.36-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:1bc330d9d29c7f06f003ab10e1eaced295e87940405afe1b110f2eb93a233588"}, + {file = "SQLAlchemy-2.0.36-cp312-cp312-win32.whl", hash = "sha256:79d2e78abc26d871875b419e1fd3c0bca31a1cb0043277d0d850014599626c2e"}, + {file = "SQLAlchemy-2.0.36-cp312-cp312-win_amd64.whl", hash = "sha256:b544ad1935a8541d177cb402948b94e871067656b3a0b9e91dbec136b06a2ff5"}, + {file = "SQLAlchemy-2.0.36-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:b5cc79df7f4bc3d11e4b542596c03826063092611e481fcf1c9dfee3c94355ef"}, + {file = "SQLAlchemy-2.0.36-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:3c01117dd36800f2ecaa238c65365b7b16497adc1522bf84906e5710ee9ba0e8"}, + {file = "SQLAlchemy-2.0.36-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9bc633f4ee4b4c46e7adcb3a9b5ec083bf1d9a97c1d3854b92749d935de40b9b"}, + {file = "SQLAlchemy-2.0.36-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9e46ed38affdfc95d2c958de328d037d87801cfcbea6d421000859e9789e61c2"}, + {file = "SQLAlchemy-2.0.36-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:b2985c0b06e989c043f1dc09d4fe89e1616aadd35392aea2844f0458a989eacf"}, + {file = "SQLAlchemy-2.0.36-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:4a121d62ebe7d26fec9155f83f8be5189ef1405f5973ea4874a26fab9f1e262c"}, + {file = "SQLAlchemy-2.0.36-cp313-cp313-win32.whl", hash = "sha256:0572f4bd6f94752167adfd7c1bed84f4b240ee6203a95e05d1e208d488d0d436"}, + {file = "SQLAlchemy-2.0.36-cp313-cp313-win_amd64.whl", hash = "sha256:8c78ac40bde930c60e0f78b3cd184c580f89456dd87fc08f9e3ee3ce8765ce88"}, + {file = "SQLAlchemy-2.0.36-py3-none-any.whl", hash = "sha256:fddbe92b4760c6f5d48162aef14824add991aeda8ddadb3c31d56eb15ca69f8e"}, + {file = "sqlalchemy-2.0.36.tar.gz", hash = "sha256:7f2767680b6d2398aea7082e45a774b2b0767b5c8d8ffb9c8b683088ea9b29c5"}, ] [[package]] name = "sqlalchemy" -version = "2.0.35" +version = "2.0.36" extras = ["asyncio"] requires_python = ">=3.7" summary = "Database Abstraction Library" -groups = ["default"] +groups = ["default", "chat_with_repo"] dependencies = [ - "SQLAlchemy==2.0.35", + "SQLAlchemy==2.0.36", "greenlet!=0.4.17", ] files = [ - {file = "SQLAlchemy-2.0.35-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:67219632be22f14750f0d1c70e62f204ba69d28f62fd6432ba05ab295853de9b"}, - {file = "SQLAlchemy-2.0.35-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:4668bd8faf7e5b71c0319407b608f278f279668f358857dbfd10ef1954ac9f90"}, - {file = "SQLAlchemy-2.0.35-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:cb8bea573863762bbf45d1e13f87c2d2fd32cee2dbd50d050f83f87429c9e1ea"}, - {file = "SQLAlchemy-2.0.35-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f552023710d4b93d8fb29a91fadf97de89c5926c6bd758897875435f2a939f33"}, - {file = "SQLAlchemy-2.0.35-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:016b2e665f778f13d3c438651dd4de244214b527a275e0acf1d44c05bc6026a9"}, - {file = "SQLAlchemy-2.0.35-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:7befc148de64b6060937231cbff8d01ccf0bfd75aa26383ffdf8d82b12ec04ff"}, - {file = "SQLAlchemy-2.0.35-cp310-cp310-win32.whl", hash = "sha256:22b83aed390e3099584b839b93f80a0f4a95ee7f48270c97c90acd40ee646f0b"}, - {file = "SQLAlchemy-2.0.35-cp310-cp310-win_amd64.whl", hash = "sha256:a29762cd3d116585278ffb2e5b8cc311fb095ea278b96feef28d0b423154858e"}, - {file = "SQLAlchemy-2.0.35-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:e21f66748ab725ade40fa7af8ec8b5019c68ab00b929f6643e1b1af461eddb60"}, - {file = "SQLAlchemy-2.0.35-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:8a6219108a15fc6d24de499d0d515c7235c617b2540d97116b663dade1a54d62"}, - {file = "SQLAlchemy-2.0.35-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:042622a5306c23b972192283f4e22372da3b8ddf5f7aac1cc5d9c9b222ab3ff6"}, - {file = "SQLAlchemy-2.0.35-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:627dee0c280eea91aed87b20a1f849e9ae2fe719d52cbf847c0e0ea34464b3f7"}, - {file = "SQLAlchemy-2.0.35-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:4fdcd72a789c1c31ed242fd8c1bcd9ea186a98ee8e5408a50e610edfef980d71"}, - {file = "SQLAlchemy-2.0.35-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:89b64cd8898a3a6f642db4eb7b26d1b28a497d4022eccd7717ca066823e9fb01"}, - {file = "SQLAlchemy-2.0.35-cp311-cp311-win32.whl", hash = "sha256:6a93c5a0dfe8d34951e8a6f499a9479ffb9258123551fa007fc708ae2ac2bc5e"}, - {file = "SQLAlchemy-2.0.35-cp311-cp311-win_amd64.whl", hash = "sha256:c68fe3fcde03920c46697585620135b4ecfdfc1ed23e75cc2c2ae9f8502c10b8"}, - {file = "SQLAlchemy-2.0.35-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:eb60b026d8ad0c97917cb81d3662d0b39b8ff1335e3fabb24984c6acd0c900a2"}, - {file = "SQLAlchemy-2.0.35-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:6921ee01caf375363be5e9ae70d08ce7ca9d7e0e8983183080211a062d299468"}, - {file = "SQLAlchemy-2.0.35-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8cdf1a0dbe5ced887a9b127da4ffd7354e9c1a3b9bb330dce84df6b70ccb3a8d"}, - {file = "SQLAlchemy-2.0.35-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:93a71c8601e823236ac0e5d087e4f397874a421017b3318fd92c0b14acf2b6db"}, - {file = "SQLAlchemy-2.0.35-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:e04b622bb8a88f10e439084486f2f6349bf4d50605ac3e445869c7ea5cf0fa8c"}, - {file = "SQLAlchemy-2.0.35-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:1b56961e2d31389aaadf4906d453859f35302b4eb818d34a26fab72596076bb8"}, - {file = "SQLAlchemy-2.0.35-cp312-cp312-win32.whl", hash = "sha256:0f9f3f9a3763b9c4deb8c5d09c4cc52ffe49f9876af41cc1b2ad0138878453cf"}, - {file = "SQLAlchemy-2.0.35-cp312-cp312-win_amd64.whl", hash = "sha256:25b0f63e7fcc2a6290cb5f7f5b4fc4047843504983a28856ce9b35d8f7de03cc"}, - {file = "SQLAlchemy-2.0.35-py3-none-any.whl", hash = "sha256:2ab3f0336c0387662ce6221ad30ab3a5e6499aab01b9790879b6578fd9b8faa1"}, - {file = "sqlalchemy-2.0.35.tar.gz", hash = "sha256:e11d7ea4d24f0a262bccf9a7cd6284c976c5369dac21db237cff59586045ab9f"}, + {file = "SQLAlchemy-2.0.36-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:fd3a55deef00f689ce931d4d1b23fa9f04c880a48ee97af488fd215cf24e2a6c"}, + {file = "SQLAlchemy-2.0.36-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:4f5e9cd989b45b73bd359f693b935364f7e1f79486e29015813c338450aa5a71"}, + {file = "SQLAlchemy-2.0.36-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d0ddd9db6e59c44875211bc4c7953a9f6638b937b0a88ae6d09eb46cced54eff"}, + {file = "SQLAlchemy-2.0.36-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2519f3a5d0517fc159afab1015e54bb81b4406c278749779be57a569d8d1bb0d"}, + {file = "SQLAlchemy-2.0.36-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:59b1ee96617135f6e1d6f275bbe988f419c5178016f3d41d3c0abb0c819f75bb"}, + {file = "SQLAlchemy-2.0.36-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:39769a115f730d683b0eb7b694db9789267bcd027326cccc3125e862eb03bfd8"}, + {file = "SQLAlchemy-2.0.36-cp311-cp311-win32.whl", hash = "sha256:66bffbad8d6271bb1cc2f9a4ea4f86f80fe5e2e3e501a5ae2a3dc6a76e604e6f"}, + {file = "SQLAlchemy-2.0.36-cp311-cp311-win_amd64.whl", hash = "sha256:23623166bfefe1487d81b698c423f8678e80df8b54614c2bf4b4cfcd7c711959"}, + {file = "SQLAlchemy-2.0.36-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:f7b64e6ec3f02c35647be6b4851008b26cff592a95ecb13b6788a54ef80bbdd4"}, + {file = "SQLAlchemy-2.0.36-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:46331b00096a6db1fdc052d55b101dbbfc99155a548e20a0e4a8e5e4d1362855"}, + {file = "SQLAlchemy-2.0.36-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:fdf3386a801ea5aba17c6410dd1dc8d39cf454ca2565541b5ac42a84e1e28f53"}, + {file = "SQLAlchemy-2.0.36-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ac9dfa18ff2a67b09b372d5db8743c27966abf0e5344c555d86cc7199f7ad83a"}, + {file = "SQLAlchemy-2.0.36-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:90812a8933df713fdf748b355527e3af257a11e415b613dd794512461eb8a686"}, + {file = "SQLAlchemy-2.0.36-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:1bc330d9d29c7f06f003ab10e1eaced295e87940405afe1b110f2eb93a233588"}, + {file = "SQLAlchemy-2.0.36-cp312-cp312-win32.whl", hash = "sha256:79d2e78abc26d871875b419e1fd3c0bca31a1cb0043277d0d850014599626c2e"}, + {file = "SQLAlchemy-2.0.36-cp312-cp312-win_amd64.whl", hash = "sha256:b544ad1935a8541d177cb402948b94e871067656b3a0b9e91dbec136b06a2ff5"}, + {file = "SQLAlchemy-2.0.36-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:b5cc79df7f4bc3d11e4b542596c03826063092611e481fcf1c9dfee3c94355ef"}, + {file = "SQLAlchemy-2.0.36-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:3c01117dd36800f2ecaa238c65365b7b16497adc1522bf84906e5710ee9ba0e8"}, + {file = "SQLAlchemy-2.0.36-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9bc633f4ee4b4c46e7adcb3a9b5ec083bf1d9a97c1d3854b92749d935de40b9b"}, + {file = "SQLAlchemy-2.0.36-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9e46ed38affdfc95d2c958de328d037d87801cfcbea6d421000859e9789e61c2"}, + {file = "SQLAlchemy-2.0.36-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:b2985c0b06e989c043f1dc09d4fe89e1616aadd35392aea2844f0458a989eacf"}, + {file = "SQLAlchemy-2.0.36-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:4a121d62ebe7d26fec9155f83f8be5189ef1405f5973ea4874a26fab9f1e262c"}, + {file = "SQLAlchemy-2.0.36-cp313-cp313-win32.whl", hash = "sha256:0572f4bd6f94752167adfd7c1bed84f4b240ee6203a95e05d1e208d488d0d436"}, + {file = "SQLAlchemy-2.0.36-cp313-cp313-win_amd64.whl", hash = "sha256:8c78ac40bde930c60e0f78b3cd184c580f89456dd87fc08f9e3ee3ce8765ce88"}, + {file = "SQLAlchemy-2.0.36-py3-none-any.whl", hash = "sha256:fddbe92b4760c6f5d48162aef14824add991aeda8ddadb3c31d56eb15ca69f8e"}, + {file = "sqlalchemy-2.0.36.tar.gz", hash = "sha256:7f2767680b6d2398aea7082e45a774b2b0767b5c8d8ffb9c8b683088ea9b29c5"}, ] [[package]] name = "starlette" -version = "0.38.6" +version = "0.41.2" requires_python = ">=3.8" summary = "The little ASGI library that shines." -groups = ["default"] +groups = ["chat_with_repo"] dependencies = [ "anyio<5,>=3.4.0", "typing-extensions>=3.10.0; python_version < \"3.10\"", ] files = [ - {file = "starlette-0.38.6-py3-none-any.whl", hash = "sha256:4517a1409e2e73ee4951214ba012052b9e16f60e90d73cfb06192c19203bbb05"}, - {file = "starlette-0.38.6.tar.gz", hash = "sha256:863a1588f5574e70a821dadefb41e4881ea451a47a3cd1b4df359d4ffefe5ead"}, + {file = "starlette-0.41.2-py3-none-any.whl", hash = "sha256:fbc189474b4731cf30fcef52f18a8d070e3f3b46c6a04c97579e85e6ffca942d"}, + {file = "starlette-0.41.2.tar.gz", hash = "sha256:9834fd799d1a87fd346deb76158668cfa0b0d56f85caefe8268e2d97c3468b62"}, ] [[package]] @@ -2997,7 +2614,7 @@ name = "sympy" version = "1.13.3" requires_python = ">=3.8" summary = "Computer algebra system (CAS) in Python" -groups = ["default"] +groups = ["chat_with_repo"] dependencies = [ "mpmath<1.4,>=1.1.0", ] @@ -3011,7 +2628,7 @@ name = "tenacity" version = "8.5.0" requires_python = ">=3.8" summary = "Retry code until it succeeds" -groups = ["default"] +groups = ["default", "chat_with_repo"] files = [ {file = "tenacity-8.5.0-py3-none-any.whl", hash = "sha256:b594c2a5945830c267ce6b79a166228323ed52718f30302c1359836112346687"}, {file = "tenacity-8.5.0.tar.gz", hash = "sha256:8bc6c0c8a09b31e6cad13c47afbed1a567518250a9a171418582ed8d9c20ca78"}, @@ -3022,18 +2639,12 @@ name = "tiktoken" version = "0.8.0" requires_python = ">=3.9" summary = "tiktoken is a fast BPE tokeniser for use with OpenAI's models" -groups = ["default"] +groups = ["default", "chat_with_repo"] dependencies = [ "regex>=2022.1.18", "requests>=2.26.0", ] files = [ - {file = "tiktoken-0.8.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:b07e33283463089c81ef1467180e3e00ab00d46c2c4bbcef0acab5f771d6695e"}, - {file = "tiktoken-0.8.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:9269348cb650726f44dd3bbb3f9110ac19a8dcc8f54949ad3ef652ca22a38e21"}, - {file = "tiktoken-0.8.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:25e13f37bc4ef2d012731e93e0fef21dc3b7aea5bb9009618de9a4026844e560"}, - {file = "tiktoken-0.8.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f13d13c981511331eac0d01a59b5df7c0d4060a8be1e378672822213da51e0a2"}, - {file = "tiktoken-0.8.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:6b2ddbc79a22621ce8b1166afa9f9a888a664a579350dc7c09346a3b5de837d9"}, - {file = "tiktoken-0.8.0-cp310-cp310-win_amd64.whl", hash = "sha256:d8c2d0e5ba6453a290b86cd65fc51fedf247e1ba170191715b049dac1f628005"}, {file = "tiktoken-0.8.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:d622d8011e6d6f239297efa42a2657043aaed06c4f68833550cac9e9bc723ef1"}, {file = "tiktoken-0.8.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:2efaf6199717b4485031b4d6edb94075e4d79177a172f38dd934d911b588d54a"}, {file = "tiktoken-0.8.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5637e425ce1fc49cf716d88df3092048359a4b3bbb7da762840426e937ada06d"}, @@ -3057,80 +2668,51 @@ files = [ [[package]] name = "tokenizers" -version = "0.20.0" +version = "0.20.3" requires_python = ">=3.7" summary = "" -groups = ["default"] +groups = ["chat_with_repo"] dependencies = [ "huggingface-hub<1.0,>=0.16.4", ] files = [ - {file = "tokenizers-0.20.0-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:6cff5c5e37c41bc5faa519d6f3df0679e4b37da54ea1f42121719c5e2b4905c0"}, - {file = "tokenizers-0.20.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:62a56bf75c27443432456f4ca5ca055befa95e25be8a28141cc495cac8ae4d6d"}, - {file = "tokenizers-0.20.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:68cc7de6a63f09c4a86909c2597b995aa66e19df852a23aea894929c74369929"}, - {file = "tokenizers-0.20.0-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:053c37ecee482cc958fdee53af3c6534286a86f5d35aac476f7c246830e53ae5"}, - {file = "tokenizers-0.20.0-cp310-cp310-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:3d7074aaabc151a6363fa03db5493fc95b423b2a1874456783989e96d541c7b6"}, - {file = "tokenizers-0.20.0-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a11435780f2acd89e8fefe5e81cecf01776f6edb9b3ac95bcb76baee76b30b90"}, - {file = "tokenizers-0.20.0-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:9a81cd2712973b007d84268d45fc3f6f90a79c31dfe7f1925e6732f8d2959987"}, - {file = "tokenizers-0.20.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d7dfd796ab9d909f76fb93080e1c7c8309f196ecb316eb130718cd5e34231c69"}, - {file = "tokenizers-0.20.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:8029ad2aa8cb00605c9374566034c1cc1b15130713e0eb5afcef6cface8255c9"}, - {file = "tokenizers-0.20.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:ca4d54260ebe97d59dfa9a30baa20d0c4dd9137d99a8801700055c561145c24e"}, - {file = "tokenizers-0.20.0-cp310-none-win32.whl", hash = "sha256:95ee16b57cec11b86a7940174ec5197d506439b0f415ab3859f254b1dffe9df0"}, - {file = "tokenizers-0.20.0-cp310-none-win_amd64.whl", hash = "sha256:0a61a11e93eeadbf02aea082ffc75241c4198e0608bbbac4f65a9026851dcf37"}, - {file = "tokenizers-0.20.0-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:6636b798b3c4d6c9b1af1a918bd07c867808e5a21c64324e95318a237e6366c3"}, - {file = "tokenizers-0.20.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:5ec603e42eaf499ffd58b9258162add948717cf21372458132f14e13a6bc7172"}, - {file = "tokenizers-0.20.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:cce124264903a8ea6f8f48e1cc7669e5ef638c18bd4ab0a88769d5f92debdf7f"}, - {file = "tokenizers-0.20.0-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:07bbeba0231cf8de07aa6b9e33e9779ff103d47042eeeb859a8c432e3292fb98"}, - {file = "tokenizers-0.20.0-cp311-cp311-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:06c0ca8397b35d38b83a44a9c6929790c1692957d88541df061cb34d82ebbf08"}, - {file = "tokenizers-0.20.0-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ca6557ac3b83d912dfbb1f70ab56bd4b0594043916688e906ede09f42e192401"}, - {file = "tokenizers-0.20.0-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2a5ad94c9e80ac6098328bee2e3264dbced4c6faa34429994d473f795ec58ef4"}, - {file = "tokenizers-0.20.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0b5c7f906ee6bec30a9dc20268a8b80f3b9584de1c9f051671cb057dc6ce28f6"}, - {file = "tokenizers-0.20.0-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:31e087e9ee1b8f075b002bfee257e858dc695f955b43903e1bb4aa9f170e37fe"}, - {file = "tokenizers-0.20.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:c3124fb6f3346cb3d8d775375d3b429bf4dcfc24f739822702009d20a4297990"}, - {file = "tokenizers-0.20.0-cp311-none-win32.whl", hash = "sha256:a4bb8b40ba9eefa621fdcabf04a74aa6038ae3be0c614c6458bd91a4697a452f"}, - {file = "tokenizers-0.20.0-cp311-none-win_amd64.whl", hash = "sha256:2b709d371f1fe60a28ef0c5c67815952d455ca7f34dbe7197eaaed3cc54b658e"}, - {file = "tokenizers-0.20.0-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:15c81a17d0d66f4987c6ca16f4bea7ec253b8c7ed1bb00fdc5d038b1bb56e714"}, - {file = "tokenizers-0.20.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:6a531cdf1fb6dc41c984c785a3b299cb0586de0b35683842a3afbb1e5207f910"}, - {file = "tokenizers-0.20.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:06caabeb4587f8404e0cd9d40f458e9cba3e815c8155a38e579a74ff3e2a4301"}, - {file = "tokenizers-0.20.0-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:8768f964f23f5b9f50546c0369c75ab3262de926983888bbe8b98be05392a79c"}, - {file = "tokenizers-0.20.0-cp312-cp312-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:626403860152c816f97b649fd279bd622c3d417678c93b4b1a8909b6380b69a8"}, - {file = "tokenizers-0.20.0-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:9c1b88fa9e5ff062326f4bf82681da5a96fca7104d921a6bd7b1e6fcf224af26"}, - {file = "tokenizers-0.20.0-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:3d7e559436a07dc547f22ce1101f26d8b2fad387e28ec8e7e1e3b11695d681d8"}, - {file = "tokenizers-0.20.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e48afb75e50449848964e4a67b0da01261dd3aa8df8daecf10db8fd7f5b076eb"}, - {file = "tokenizers-0.20.0-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:baf5d0e1ff44710a95eefc196dd87666ffc609fd447c5e5b68272a7c3d342a1d"}, - {file = "tokenizers-0.20.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:e5e56df0e8ed23ba60ae3848c3f069a0710c4b197218fe4f89e27eba38510768"}, - {file = "tokenizers-0.20.0-cp312-none-win32.whl", hash = "sha256:ec53e5ecc142a82432f9c6c677dbbe5a2bfee92b8abf409a9ecb0d425ee0ce75"}, - {file = "tokenizers-0.20.0-cp312-none-win_amd64.whl", hash = "sha256:f18661ece72e39c0dfaa174d6223248a15b457dbd4b0fc07809b8e6d3ca1a234"}, - {file = "tokenizers-0.20.0-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:d68e15f1815357b059ec266062340c343ea7f98f7f330602df81ffa3474b6122"}, - {file = "tokenizers-0.20.0-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:23f9ecec637b9bc80da5f703808d29ed5329e56b5aa8d791d1088014f48afadc"}, - {file = "tokenizers-0.20.0-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f830b318ee599e3d0665b3e325f85bc75ee2d2ca6285f52e439dc22b64691580"}, - {file = "tokenizers-0.20.0-pp310-pypy310_pp73-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b3dc750def789cb1de1b5a37657919545e1d9ffa667658b3fa9cb7862407a1b8"}, - {file = "tokenizers-0.20.0-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e26e6c755ae884c2ea6135cd215bdd0fccafe4ee62405014b8c3cd19954e3ab9"}, - {file = "tokenizers-0.20.0-pp310-pypy310_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:a1158c7174f427182e08baa2a8ded2940f2b4a3e94969a85cc9cfd16004cbcea"}, - {file = "tokenizers-0.20.0-pp310-pypy310_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:6324826287a3fc198898d3dcf758fe4a8479e42d6039f4c59e2cedd3cf92f64e"}, - {file = "tokenizers-0.20.0.tar.gz", hash = "sha256:39d7acc43f564c274085cafcd1dae9d36f332456de1a31970296a6b8da4eac8d"}, -] - -[[package]] -name = "tomli" -version = "2.0.2" -requires_python = ">=3.8" -summary = "A lil' TOML parser" -groups = ["default", "test"] -files = [ - {file = "tomli-2.0.2-py3-none-any.whl", hash = "sha256:2ebe24485c53d303f690b0ec092806a085f07af5a5aa1464f3931eec36caaa38"}, - {file = "tomli-2.0.2.tar.gz", hash = "sha256:d46d457a85337051c36524bc5349dd91b1877838e2979ac5ced3e710ed8a60ed"}, -] - -[[package]] -name = "tomli-w" -version = "1.1.0" -requires_python = ">=3.9" -summary = "A lil' TOML writer" -groups = ["default"] -files = [ - {file = "tomli_w-1.1.0-py3-none-any.whl", hash = "sha256:1403179c78193e3184bfaade390ddbd071cba48a32a2e62ba11aae47490c63f7"}, - {file = "tomli_w-1.1.0.tar.gz", hash = "sha256:49e847a3a304d516a169a601184932ef0f6b61623fe680f836a2aa7128ed0d33"}, + {file = "tokenizers-0.20.3-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:585b51e06ca1f4839ce7759941e66766d7b060dccfdc57c4ca1e5b9a33013a90"}, + {file = "tokenizers-0.20.3-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:61cbf11954f3b481d08723ebd048ba4b11e582986f9be74d2c3bdd9293a4538d"}, + {file = "tokenizers-0.20.3-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ef820880d5e4e8484e2fa54ff8d297bb32519eaa7815694dc835ace9130a3eea"}, + {file = "tokenizers-0.20.3-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:67ef4dcb8841a4988cd00dd288fb95dfc8e22ed021f01f37348fd51c2b055ba9"}, + {file = "tokenizers-0.20.3-cp311-cp311-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ff1ef8bd47a02b0dc191688ccb4da53600df5d4c9a05a4b68e1e3de4823e78eb"}, + {file = "tokenizers-0.20.3-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:444d188186eab3148baf0615b522461b41b1f0cd58cd57b862ec94b6ac9780f1"}, + {file = "tokenizers-0.20.3-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:37c04c032c1442740b2c2d925f1857885c07619224a533123ac7ea71ca5713da"}, + {file = "tokenizers-0.20.3-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:453c7769d22231960ee0e883d1005c93c68015025a5e4ae56275406d94a3c907"}, + {file = "tokenizers-0.20.3-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:4bb31f7b2847e439766aaa9cc7bccf7ac7088052deccdb2275c952d96f691c6a"}, + {file = "tokenizers-0.20.3-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:843729bf0f991b29655a069a2ff58a4c24375a553c70955e15e37a90dd4e045c"}, + {file = "tokenizers-0.20.3-cp311-none-win32.whl", hash = "sha256:efcce3a927b1e20ca694ba13f7a68c59b0bd859ef71e441db68ee42cf20c2442"}, + {file = "tokenizers-0.20.3-cp311-none-win_amd64.whl", hash = "sha256:88301aa0801f225725b6df5dea3d77c80365ff2362ca7e252583f2b4809c4cc0"}, + {file = "tokenizers-0.20.3-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:49d12a32e190fad0e79e5bdb788d05da2f20d8e006b13a70859ac47fecf6ab2f"}, + {file = "tokenizers-0.20.3-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:282848cacfb9c06d5e51489f38ec5aa0b3cd1e247a023061945f71f41d949d73"}, + {file = "tokenizers-0.20.3-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:abe4e08c7d0cd6154c795deb5bf81d2122f36daf075e0c12a8b050d824ef0a64"}, + {file = "tokenizers-0.20.3-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:ca94fc1b73b3883c98f0c88c77700b13d55b49f1071dfd57df2b06f3ff7afd64"}, + {file = "tokenizers-0.20.3-cp312-cp312-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ef279c7e239f95c8bdd6ff319d9870f30f0d24915b04895f55b1adcf96d6c60d"}, + {file = "tokenizers-0.20.3-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:16384073973f6ccbde9852157a4fdfe632bb65208139c9d0c0bd0176a71fd67f"}, + {file = "tokenizers-0.20.3-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:312d522caeb8a1a42ebdec87118d99b22667782b67898a76c963c058a7e41d4f"}, + {file = "tokenizers-0.20.3-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f2b7cb962564785a83dafbba0144ecb7f579f1d57d8c406cdaa7f32fe32f18ad"}, + {file = "tokenizers-0.20.3-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:124c5882ebb88dadae1fc788a582299fcd3a8bd84fc3e260b9918cf28b8751f5"}, + {file = "tokenizers-0.20.3-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:2b6e54e71f84c4202111a489879005cb14b92616a87417f6c102c833af961ea2"}, + {file = "tokenizers-0.20.3-cp312-none-win32.whl", hash = "sha256:83d9bfbe9af86f2d9df4833c22e94d94750f1d0cd9bfb22a7bb90a86f61cdb1c"}, + {file = "tokenizers-0.20.3-cp312-none-win_amd64.whl", hash = "sha256:44def74cee574d609a36e17c8914311d1b5dbcfe37c55fd29369d42591b91cf2"}, + {file = "tokenizers-0.20.3-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:e0b630e0b536ef0e3c8b42c685c1bc93bd19e98c0f1543db52911f8ede42cf84"}, + {file = "tokenizers-0.20.3-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:a02d160d2b19bcbfdf28bd9a4bf11be4cb97d0499c000d95d4c4b1a4312740b6"}, + {file = "tokenizers-0.20.3-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0e3d80d89b068bc30034034b5319218c7c0a91b00af19679833f55f3becb6945"}, + {file = "tokenizers-0.20.3-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:174a54910bed1b089226512b4458ea60d6d6fd93060254734d3bc3540953c51c"}, + {file = "tokenizers-0.20.3-cp313-cp313-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:098b8a632b8656aa5802c46689462c5c48f02510f24029d71c208ec2c822e771"}, + {file = "tokenizers-0.20.3-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:78c8c143e3ae41e718588281eb3e212c2b31623c9d6d40410ec464d7d6221fb5"}, + {file = "tokenizers-0.20.3-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2b26b0aadb18cd8701077362ba359a06683662d5cafe3e8e8aba10eb05c037f1"}, + {file = "tokenizers-0.20.3-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:07d7851a72717321022f3774e84aa9d595a041d643fafa2e87fbc9b18711dac0"}, + {file = "tokenizers-0.20.3-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:bd44e48a430ada902c6266a8245f5036c4fe744fcb51f699999fbe82aa438797"}, + {file = "tokenizers-0.20.3-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:a4c186bb006ccbe1f5cc4e0380d1ce7806f5955c244074fd96abc55e27b77f01"}, + {file = "tokenizers-0.20.3-cp313-none-win32.whl", hash = "sha256:6e19e0f1d854d6ab7ea0c743d06e764d1d9a546932be0a67f33087645f00fe13"}, + {file = "tokenizers-0.20.3-cp313-none-win_amd64.whl", hash = "sha256:d50ede425c7e60966a9680d41b58b3a0950afa1bb570488e2972fa61662c4273"}, + {file = "tokenizers-0.20.3.tar.gz", hash = "sha256:2278b34c5d0dd78e087e1ca7f9b1dcbf129d80211afa645f214bd6e051037539"}, ] [[package]] @@ -3138,7 +2720,7 @@ name = "tomlkit" version = "0.12.0" requires_python = ">=3.7" summary = "Style preserving TOML library" -groups = ["default"] +groups = ["chat_with_repo"] files = [ {file = "tomlkit-0.12.0-py3-none-any.whl", hash = "sha256:926f1f37a1587c7a4f6c7484dae538f1345d96d793d9adab5d3675957b1d0766"}, {file = "tomlkit-0.12.0.tar.gz", hash = "sha256:01f0477981119c7d8ee0f67ebe0297a7c95b14cf9f4b102b45486deb77018716"}, @@ -3146,24 +2728,24 @@ files = [ [[package]] name = "tqdm" -version = "4.66.5" +version = "4.67.0" requires_python = ">=3.7" summary = "Fast, Extensible Progress Meter" -groups = ["default"] +groups = ["default", "chat_with_repo"] dependencies = [ "colorama; platform_system == \"Windows\"", ] files = [ - {file = "tqdm-4.66.5-py3-none-any.whl", hash = "sha256:90279a3770753eafc9194a0364852159802111925aa30eb3f9d85b0e805ac7cd"}, - {file = "tqdm-4.66.5.tar.gz", hash = "sha256:e1020aef2e5096702d8a025ac7d16b1577279c9d63f8375b63083e9a5f0fcbad"}, + {file = "tqdm-4.67.0-py3-none-any.whl", hash = "sha256:0cd8af9d56911acab92182e88d763100d4788bdf421d251616040cc4d44863be"}, + {file = "tqdm-4.67.0.tar.gz", hash = "sha256:fe5a6f95e6fe0b9755e9469b77b9c3cf850048224ecaa8293d7d2d31f97d869a"}, ] [[package]] name = "typer" -version = "0.12.5" +version = "0.13.0" requires_python = ">=3.7" summary = "Typer, build great CLIs. Easy to code. Based on Python type hints." -groups = ["default"] +groups = ["chat_with_repo"] dependencies = [ "click>=8.0.0", "rich>=10.11.0", @@ -3171,8 +2753,8 @@ dependencies = [ "typing-extensions>=3.7.4.3", ] files = [ - {file = "typer-0.12.5-py3-none-any.whl", hash = "sha256:62fe4e471711b147e3365034133904df3e235698399bc4de2b36c8579298d52b"}, - {file = "typer-0.12.5.tar.gz", hash = "sha256:f592f089bedcc8ec1b974125d64851029c3b1af145f04aca64d69410f0c9b722"}, + {file = "typer-0.13.0-py3-none-any.whl", hash = "sha256:d85fe0b777b2517cc99c8055ed735452f2659cd45e451507c76f48ce5c1d00e2"}, + {file = "typer-0.13.0.tar.gz", hash = "sha256:f1c7198347939361eec90139ffa0fd8b3df3a2259d5852a0f7400e476d95985c"}, ] [[package]] @@ -3180,7 +2762,7 @@ name = "typing-extensions" version = "4.12.2" requires_python = ">=3.8" summary = "Backported and Experimental Type Hints for Python 3.8+" -groups = ["default"] +groups = ["default", "chat_with_repo"] files = [ {file = "typing_extensions-4.12.2-py3-none-any.whl", hash = "sha256:04e5ca0351e0f3f85c6853954072df659d0d13fac324d0072316b67d7794700d"}, {file = "typing_extensions-4.12.2.tar.gz", hash = "sha256:1a7ead55c7e559dd4dee8856e3a88b41225abfe1ce8df57b7c13915fe121ffb8"}, @@ -3190,7 +2772,7 @@ files = [ name = "typing-inspect" version = "0.9.0" summary = "Runtime inspection utilities for typing module." -groups = ["default"] +groups = ["default", "chat_with_repo"] dependencies = [ "mypy-extensions>=0.3.0", "typing-extensions>=3.7.4", @@ -3206,7 +2788,7 @@ name = "tzdata" version = "2024.2" requires_python = ">=2" summary = "Provider of IANA time zone data" -groups = ["default"] +groups = ["chat_with_repo"] files = [ {file = "tzdata-2024.2-py2.py3-none-any.whl", hash = "sha256:a48093786cdcde33cad18c2555e8532f34422074448fbc874186f0abd79565cd"}, {file = "tzdata-2024.2.tar.gz", hash = "sha256:7d85cc416e9382e69095b7bdf4afd9e3880418a2413feec7069d533d6b4e31cc"}, @@ -3217,7 +2799,7 @@ name = "urllib3" version = "2.2.3" requires_python = ">=3.8" summary = "HTTP library with thread-safe connection pooling, file post, and more." -groups = ["default"] +groups = ["default", "chat_with_repo"] files = [ {file = "urllib3-2.2.3-py3-none-any.whl", hash = "sha256:ca899ca043dcb1bafa3e262d73aa25c465bfb49e0bd9dd5d59f1d0acba2f8fac"}, {file = "urllib3-2.2.3.tar.gz", hash = "sha256:e7d814a81dad81e6caf2ec9fdedb284ecc9c73076b62654547cc64ccdcae26e9"}, @@ -3225,69 +2807,69 @@ files = [ [[package]] name = "uvicorn" -version = "0.31.0" +version = "0.32.0" requires_python = ">=3.8" summary = "The lightning-fast ASGI server." -groups = ["default"] +groups = ["chat_with_repo"] dependencies = [ "click>=7.0", "h11>=0.8", "typing-extensions>=4.0; python_version < \"3.11\"", ] files = [ - {file = "uvicorn-0.31.0-py3-none-any.whl", hash = "sha256:cac7be4dd4d891c363cd942160a7b02e69150dcbc7a36be04d5f4af4b17c8ced"}, - {file = "uvicorn-0.31.0.tar.gz", hash = "sha256:13bc21373d103859f68fe739608e2eb054a816dea79189bc3ca08ea89a275906"}, + {file = "uvicorn-0.32.0-py3-none-any.whl", hash = "sha256:60b8f3a5ac027dcd31448f411ced12b5ef452c646f76f02f8cc3f25d8d26fd82"}, + {file = "uvicorn-0.32.0.tar.gz", hash = "sha256:f78b36b143c16f54ccdb8190d0a26b5f1901fe5a3c777e1ab29f26391af8551e"}, ] [[package]] name = "uvicorn" -version = "0.31.0" +version = "0.32.0" extras = ["standard"] requires_python = ">=3.8" summary = "The lightning-fast ASGI server." -groups = ["default"] +groups = ["chat_with_repo"] dependencies = [ "colorama>=0.4; sys_platform == \"win32\"", "httptools>=0.5.0", "python-dotenv>=0.13", "pyyaml>=5.1", - "uvicorn==0.31.0", + "uvicorn==0.32.0", "uvloop!=0.15.0,!=0.15.1,>=0.14.0; (sys_platform != \"cygwin\" and sys_platform != \"win32\") and platform_python_implementation != \"PyPy\"", "watchfiles>=0.13", "websockets>=10.4", ] files = [ - {file = "uvicorn-0.31.0-py3-none-any.whl", hash = "sha256:cac7be4dd4d891c363cd942160a7b02e69150dcbc7a36be04d5f4af4b17c8ced"}, - {file = "uvicorn-0.31.0.tar.gz", hash = "sha256:13bc21373d103859f68fe739608e2eb054a816dea79189bc3ca08ea89a275906"}, + {file = "uvicorn-0.32.0-py3-none-any.whl", hash = "sha256:60b8f3a5ac027dcd31448f411ced12b5ef452c646f76f02f8cc3f25d8d26fd82"}, + {file = "uvicorn-0.32.0.tar.gz", hash = "sha256:f78b36b143c16f54ccdb8190d0a26b5f1901fe5a3c777e1ab29f26391af8551e"}, ] [[package]] name = "uvloop" -version = "0.20.0" +version = "0.21.0" requires_python = ">=3.8.0" summary = "Fast implementation of asyncio event loop on top of libuv" -groups = ["default"] +groups = ["chat_with_repo"] marker = "(sys_platform != \"cygwin\" and sys_platform != \"win32\") and platform_python_implementation != \"PyPy\"" files = [ - {file = "uvloop-0.20.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:9ebafa0b96c62881d5cafa02d9da2e44c23f9f0cd829f3a32a6aff771449c996"}, - {file = "uvloop-0.20.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:35968fc697b0527a06e134999eef859b4034b37aebca537daeb598b9d45a137b"}, - {file = "uvloop-0.20.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b16696f10e59d7580979b420eedf6650010a4a9c3bd8113f24a103dfdb770b10"}, - {file = "uvloop-0.20.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9b04d96188d365151d1af41fa2d23257b674e7ead68cfd61c725a422764062ae"}, - {file = "uvloop-0.20.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:94707205efbe809dfa3a0d09c08bef1352f5d3d6612a506f10a319933757c006"}, - {file = "uvloop-0.20.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:89e8d33bb88d7263f74dc57d69f0063e06b5a5ce50bb9a6b32f5fcbe655f9e73"}, - {file = "uvloop-0.20.0-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:e50289c101495e0d1bb0bfcb4a60adde56e32f4449a67216a1ab2750aa84f037"}, - {file = "uvloop-0.20.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:e237f9c1e8a00e7d9ddaa288e535dc337a39bcbf679f290aee9d26df9e72bce9"}, - {file = "uvloop-0.20.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:746242cd703dc2b37f9d8b9f173749c15e9a918ddb021575a0205ec29a38d31e"}, - {file = "uvloop-0.20.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:82edbfd3df39fb3d108fc079ebc461330f7c2e33dbd002d146bf7c445ba6e756"}, - {file = "uvloop-0.20.0-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:80dc1b139516be2077b3e57ce1cb65bfed09149e1d175e0478e7a987863b68f0"}, - {file = "uvloop-0.20.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:4f44af67bf39af25db4c1ac27e82e9665717f9c26af2369c404be865c8818dcf"}, - {file = "uvloop-0.20.0-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:4b75f2950ddb6feed85336412b9a0c310a2edbcf4cf931aa5cfe29034829676d"}, - {file = "uvloop-0.20.0-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:77fbc69c287596880ecec2d4c7a62346bef08b6209749bf6ce8c22bbaca0239e"}, - {file = "uvloop-0.20.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6462c95f48e2d8d4c993a2950cd3d31ab061864d1c226bbf0ee2f1a8f36674b9"}, - {file = "uvloop-0.20.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:649c33034979273fa71aa25d0fe120ad1777c551d8c4cd2c0c9851d88fcb13ab"}, - {file = "uvloop-0.20.0-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:3a609780e942d43a275a617c0839d85f95c334bad29c4c0918252085113285b5"}, - {file = "uvloop-0.20.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:aea15c78e0d9ad6555ed201344ae36db5c63d428818b4b2a42842b3870127c00"}, - {file = "uvloop-0.20.0.tar.gz", hash = "sha256:4603ca714a754fc8d9b197e325db25b2ea045385e8a3ad05d3463de725fdf469"}, + {file = "uvloop-0.21.0-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:c0f3fa6200b3108919f8bdabb9a7f87f20e7097ea3c543754cabc7d717d95cf8"}, + {file = "uvloop-0.21.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:0878c2640cf341b269b7e128b1a5fed890adc4455513ca710d77d5e93aa6d6a0"}, + {file = "uvloop-0.21.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b9fb766bb57b7388745d8bcc53a359b116b8a04c83a2288069809d2b3466c37e"}, + {file = "uvloop-0.21.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8a375441696e2eda1c43c44ccb66e04d61ceeffcd76e4929e527b7fa401b90fb"}, + {file = "uvloop-0.21.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:baa0e6291d91649c6ba4ed4b2f982f9fa165b5bbd50a9e203c416a2797bab3c6"}, + {file = "uvloop-0.21.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:4509360fcc4c3bd2c70d87573ad472de40c13387f5fda8cb58350a1d7475e58d"}, + {file = "uvloop-0.21.0-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:359ec2c888397b9e592a889c4d72ba3d6befba8b2bb01743f72fffbde663b59c"}, + {file = "uvloop-0.21.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:f7089d2dc73179ce5ac255bdf37c236a9f914b264825fdaacaded6990a7fb4c2"}, + {file = "uvloop-0.21.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:baa4dcdbd9ae0a372f2167a207cd98c9f9a1ea1188a8a526431eef2f8116cc8d"}, + {file = "uvloop-0.21.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:86975dca1c773a2c9864f4c52c5a55631038e387b47eaf56210f873887b6c8dc"}, + {file = "uvloop-0.21.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:461d9ae6660fbbafedd07559c6a2e57cd553b34b0065b6550685f6653a98c1cb"}, + {file = "uvloop-0.21.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:183aef7c8730e54c9a3ee3227464daed66e37ba13040bb3f350bc2ddc040f22f"}, + {file = "uvloop-0.21.0-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:bfd55dfcc2a512316e65f16e503e9e450cab148ef11df4e4e679b5e8253a5281"}, + {file = "uvloop-0.21.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:787ae31ad8a2856fc4e7c095341cccc7209bd657d0e71ad0dc2ea83c4a6fa8af"}, + {file = "uvloop-0.21.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5ee4d4ef48036ff6e5cfffb09dd192c7a5027153948d85b8da7ff705065bacc6"}, + {file = "uvloop-0.21.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f3df876acd7ec037a3d005b3ab85a7e4110422e4d9c1571d4fc89b0fc41b6816"}, + {file = "uvloop-0.21.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:bd53ecc9a0f3d87ab847503c2e1552b690362e005ab54e8a48ba97da3924c0dc"}, + {file = "uvloop-0.21.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:a5c39f217ab3c663dc699c04cbd50c13813e31d917642d459fdcec07555cc553"}, + {file = "uvloop-0.21.0.tar.gz", hash = "sha256:3bf12b0fda68447806a7ad847bfa591613177275d35b6724b1ee573faa3704e3"}, ] [[package]] @@ -3295,23 +2877,11 @@ name = "watchfiles" version = "0.24.0" requires_python = ">=3.8" summary = "Simple, modern and high performance file watching and code reload in python." -groups = ["default"] +groups = ["chat_with_repo"] dependencies = [ "anyio>=3.0.0", ] files = [ - {file = "watchfiles-0.24.0-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:083dc77dbdeef09fa44bb0f4d1df571d2e12d8a8f985dccde71ac3ac9ac067a0"}, - {file = "watchfiles-0.24.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:e94e98c7cb94cfa6e071d401ea3342767f28eb5a06a58fafdc0d2a4974f4f35c"}, - {file = "watchfiles-0.24.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:82ae557a8c037c42a6ef26c494d0631cacca040934b101d001100ed93d43f361"}, - {file = "watchfiles-0.24.0-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:acbfa31e315a8f14fe33e3542cbcafc55703b8f5dcbb7c1eecd30f141df50db3"}, - {file = "watchfiles-0.24.0-cp310-cp310-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b74fdffce9dfcf2dc296dec8743e5b0332d15df19ae464f0e249aa871fc1c571"}, - {file = "watchfiles-0.24.0-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:449f43f49c8ddca87c6b3980c9284cab6bd1f5c9d9a2b00012adaaccd5e7decd"}, - {file = "watchfiles-0.24.0-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:4abf4ad269856618f82dee296ac66b0cd1d71450fc3c98532d93798e73399b7a"}, - {file = "watchfiles-0.24.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9f895d785eb6164678ff4bb5cc60c5996b3ee6df3edb28dcdeba86a13ea0465e"}, - {file = "watchfiles-0.24.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:7ae3e208b31be8ce7f4c2c0034f33406dd24fbce3467f77223d10cd86778471c"}, - {file = "watchfiles-0.24.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:2efec17819b0046dde35d13fb8ac7a3ad877af41ae4640f4109d9154ed30a188"}, - {file = "watchfiles-0.24.0-cp310-none-win32.whl", hash = "sha256:6bdcfa3cd6fdbdd1a068a52820f46a815401cbc2cb187dd006cb076675e7b735"}, - {file = "watchfiles-0.24.0-cp310-none-win_amd64.whl", hash = "sha256:54ca90a9ae6597ae6dc00e7ed0a040ef723f84ec517d3e7ce13e63e4bc82fa04"}, {file = "watchfiles-0.24.0-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:bdcd5538e27f188dd3c804b4a8d5f52a7fc7f87e7fd6b374b8e36a4ca03db428"}, {file = "watchfiles-0.24.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:2dadf8a8014fde6addfd3c379e6ed1a981c8f0a48292d662e27cabfe4239c83c"}, {file = "watchfiles-0.24.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6509ed3f467b79d95fc62a98229f79b1a60d1b93f101e1c61d10c95a46a84f43"}, @@ -3350,10 +2920,6 @@ files = [ {file = "watchfiles-0.24.0-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:edf71b01dec9f766fb285b73930f95f730bb0943500ba0566ae234b5c1618c18"}, {file = "watchfiles-0.24.0-cp313-none-win32.whl", hash = "sha256:f4c96283fca3ee09fb044f02156d9570d156698bc3734252175a38f0e8975f07"}, {file = "watchfiles-0.24.0-cp313-none-win_amd64.whl", hash = "sha256:a974231b4fdd1bb7f62064a0565a6b107d27d21d9acb50c484d2cdba515b9366"}, - {file = "watchfiles-0.24.0-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:632676574429bee8c26be8af52af20e0c718cc7f5f67f3fb658c71928ccd4f7f"}, - {file = "watchfiles-0.24.0-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:a2a9891723a735d3e2540651184be6fd5b96880c08ffe1a98bae5017e65b544b"}, - {file = "watchfiles-0.24.0-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4a7fa2bc0efef3e209a8199fd111b8969fe9db9c711acc46636686331eda7dd4"}, - {file = "watchfiles-0.24.0-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:01550ccf1d0aed6ea375ef259706af76ad009ef5b0203a3a4cce0f6024f9b68a"}, {file = "watchfiles-0.24.0.tar.gz", hash = "sha256:afb72325b74fa7a428c009c1b8be4b4d7c2afedafb2982827ef2156646df2fe1"}, ] @@ -3375,7 +2941,7 @@ name = "websocket-client" version = "1.8.0" requires_python = ">=3.8" summary = "WebSocket client for Python with low level API options" -groups = ["default"] +groups = ["chat_with_repo"] files = [ {file = "websocket_client-1.8.0-py3-none-any.whl", hash = "sha256:17b44cc997f5c498e809b22cdf2d9c7a9e71c02c8cc2b6c56e7c2d1239bfa526"}, {file = "websocket_client-1.8.0.tar.gz", hash = "sha256:3239df9f44da632f96012472805d40a23281a991027ce11d2f45a6f24ac4c3da"}, @@ -3386,19 +2952,8 @@ name = "websockets" version = "12.0" requires_python = ">=3.8" summary = "An implementation of the WebSocket Protocol (RFC 6455 & 7692)" -groups = ["default"] +groups = ["chat_with_repo"] files = [ - {file = "websockets-12.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:d554236b2a2006e0ce16315c16eaa0d628dab009c33b63ea03f41c6107958374"}, - {file = "websockets-12.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:2d225bb6886591b1746b17c0573e29804619c8f755b5598d875bb4235ea639be"}, - {file = "websockets-12.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:eb809e816916a3b210bed3c82fb88eaf16e8afcf9c115ebb2bacede1797d2547"}, - {file = "websockets-12.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c588f6abc13f78a67044c6b1273a99e1cf31038ad51815b3b016ce699f0d75c2"}, - {file = "websockets-12.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:5aa9348186d79a5f232115ed3fa9020eab66d6c3437d72f9d2c8ac0c6858c558"}, - {file = "websockets-12.0-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6350b14a40c95ddd53e775dbdbbbc59b124a5c8ecd6fbb09c2e52029f7a9f480"}, - {file = "websockets-12.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:70ec754cc2a769bcd218ed8d7209055667b30860ffecb8633a834dde27d6307c"}, - {file = "websockets-12.0-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:6e96f5ed1b83a8ddb07909b45bd94833b0710f738115751cdaa9da1fb0cb66e8"}, - {file = "websockets-12.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:4d87be612cbef86f994178d5186add3d94e9f31cc3cb499a0482b866ec477603"}, - {file = "websockets-12.0-cp310-cp310-win32.whl", hash = "sha256:befe90632d66caaf72e8b2ed4d7f02b348913813c8b0a32fae1cc5fe3730902f"}, - {file = "websockets-12.0-cp310-cp310-win_amd64.whl", hash = "sha256:363f57ca8bc8576195d0540c648aa58ac18cf85b76ad5202b9f976918f4219cf"}, {file = "websockets-12.0-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:5d873c7de42dea355d73f170be0f23788cf3fa9f7bed718fd2830eefedce01b4"}, {file = "websockets-12.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:3f61726cae9f65b872502ff3c1496abc93ffbe31b278455c418492016e2afc8f"}, {file = "websockets-12.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:ed2fcf7a07334c77fc8a230755c2209223a7cc44fc27597729b8ef5425aa61a3"}, @@ -3421,11 +2976,6 @@ files = [ {file = "websockets-12.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:9fdf06fd06c32205a07e47328ab49c40fc1407cdec801d698a7c41167ea45113"}, {file = "websockets-12.0-cp312-cp312-win32.whl", hash = "sha256:baa386875b70cbd81798fa9f71be689c1bf484f65fd6fb08d051a0ee4e79924d"}, {file = "websockets-12.0-cp312-cp312-win_amd64.whl", hash = "sha256:ae0a5da8f35a5be197f328d4727dbcfafa53d1824fac3d96cdd3a642fe09394f"}, - {file = "websockets-12.0-pp310-pypy310_pp73-macosx_10_9_x86_64.whl", hash = "sha256:248d8e2446e13c1d4326e0a6a4e9629cb13a11195051a73acf414812700badbd"}, - {file = "websockets-12.0-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f44069528d45a933997a6fef143030d8ca8042f0dfaad753e2906398290e2870"}, - {file = "websockets-12.0-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c4e37d36f0d19f0a4413d3e18c0d03d0c268ada2061868c1e6f5ab1a6d575077"}, - {file = "websockets-12.0-pp310-pypy310_pp73-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3d829f975fc2e527a3ef2f9c8f25e553eb7bc779c6665e8e1d52aa22800bb38b"}, - {file = "websockets-12.0-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:2c71bd45a777433dd9113847af751aae36e448bc6b8c361a566cb043eda6ec30"}, {file = "websockets-12.0-py3-none-any.whl", hash = "sha256:dc284bbc8d7c78a6c69e0c7325ab46ee5e40bb4d50e494d8131a07ef47500e9e"}, {file = "websockets-12.0.tar.gz", hash = "sha256:81df9cbcbb6c260de1e007e58c011bfebe2dafc8435107b0537f393dd38c8b1b"}, ] @@ -3447,18 +2997,8 @@ name = "wrapt" version = "1.16.0" requires_python = ">=3.6" summary = "Module for decorators, wrappers and monkey patching." -groups = ["default"] +groups = ["default", "chat_with_repo"] files = [ - {file = "wrapt-1.16.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:ffa565331890b90056c01db69c0fe634a776f8019c143a5ae265f9c6bc4bd6d4"}, - {file = "wrapt-1.16.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:e4fdb9275308292e880dcbeb12546df7f3e0f96c6b41197e0cf37d2826359020"}, - {file = "wrapt-1.16.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bb2dee3874a500de01c93d5c71415fcaef1d858370d405824783e7a8ef5db440"}, - {file = "wrapt-1.16.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:2a88e6010048489cda82b1326889ec075a8c856c2e6a256072b28eaee3ccf487"}, - {file = "wrapt-1.16.0-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ac83a914ebaf589b69f7d0a1277602ff494e21f4c2f743313414378f8f50a4cf"}, - {file = "wrapt-1.16.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:73aa7d98215d39b8455f103de64391cb79dfcad601701a3aa0dddacf74911d72"}, - {file = "wrapt-1.16.0-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:807cc8543a477ab7422f1120a217054f958a66ef7314f76dd9e77d3f02cdccd0"}, - {file = "wrapt-1.16.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:bf5703fdeb350e36885f2875d853ce13172ae281c56e509f4e6eca049bdfb136"}, - {file = "wrapt-1.16.0-cp310-cp310-win32.whl", hash = "sha256:f6b2d0c6703c988d334f297aa5df18c45e97b0af3679bb75059e0e0bd8b1069d"}, - {file = "wrapt-1.16.0-cp310-cp310-win_amd64.whl", hash = "sha256:decbfa2f618fa8ed81c95ee18a387ff973143c656ef800c9f24fb7e9c16054e2"}, {file = "wrapt-1.16.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:1a5db485fe2de4403f13fafdc231b0dbae5eca4359232d2efc79025527375b09"}, {file = "wrapt-1.16.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:75ea7d0ee2a15733684badb16de6794894ed9c55aa5e9903260922f0482e687d"}, {file = "wrapt-1.16.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a452f9ca3e3267cd4d0fcf2edd0d035b1934ac2bd7e0e57ac91ad6b95c0c6389"}, @@ -3485,87 +3025,75 @@ files = [ [[package]] name = "yarl" -version = "1.14.0" -requires_python = ">=3.8" +version = "1.17.1" +requires_python = ">=3.9" summary = "Yet another URL library" -groups = ["default"] +groups = ["default", "chat_with_repo"] dependencies = [ "idna>=2.0", "multidict>=4.0", "propcache>=0.2.0", ] files = [ - {file = "yarl-1.14.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:1bfc25aa6a7c99cf86564210f79a0b7d4484159c67e01232b116e445b3036547"}, - {file = "yarl-1.14.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:0cf21f46a15d445417de8fc89f2568852cf57fe8ca1ab3d19ddb24d45c0383ae"}, - {file = "yarl-1.14.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:1dda53508df0de87b6e6b0a52d6718ff6c62a5aca8f5552748404963df639269"}, - {file = "yarl-1.14.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:587c3cc59bc148a9b1c07a019346eda2549bc9f468acd2f9824d185749acf0a6"}, - {file = "yarl-1.14.0-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3007a5b75cb50140708420fe688c393e71139324df599434633019314ceb8b59"}, - {file = "yarl-1.14.0-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:06ff23462398333c78b6f4f8d3d70410d657a471c2c5bbe6086133be43fc8f1a"}, - {file = "yarl-1.14.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:689a99a42ee4583fcb0d3a67a0204664aa1539684aed72bdafcbd505197a91c4"}, - {file = "yarl-1.14.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b0547ab1e9345dc468cac8368d88ea4c5bd473ebc1d8d755347d7401982b5dd8"}, - {file = "yarl-1.14.0-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:742aef0a99844faaac200564ea6f5e08facb285d37ea18bd1a5acf2771f3255a"}, - {file = "yarl-1.14.0-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:176110bff341b6730f64a1eb3a7070e12b373cf1c910a9337e7c3240497db76f"}, - {file = "yarl-1.14.0-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:46a9772a1efa93f9cd170ad33101c1817c77e0e9914d4fe33e2da299d7cf0f9b"}, - {file = "yarl-1.14.0-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:ee2c68e4f2dd1b1c15b849ba1c96fac105fca6ffdb7c1e8be51da6fabbdeafb9"}, - {file = "yarl-1.14.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:047b258e00b99091b6f90355521f026238c63bd76dcf996d93527bb13320eefd"}, - {file = "yarl-1.14.0-cp310-cp310-win32.whl", hash = "sha256:0aa92e3e30a04f9462a25077db689c4ac5ea9ab6cc68a2e563881b987d42f16d"}, - {file = "yarl-1.14.0-cp310-cp310-win_amd64.whl", hash = "sha256:d9baec588f015d0ee564057aa7574313c53a530662ffad930b7886becc85abdf"}, - {file = "yarl-1.14.0-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:07f9eaf57719d6721ab15805d85f4b01a5b509a0868d7320134371bcb652152d"}, - {file = "yarl-1.14.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:c14b504a74e58e2deb0378b3eca10f3d076635c100f45b113c18c770b4a47a50"}, - {file = "yarl-1.14.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:16a682a127930f3fc4e42583becca6049e1d7214bcad23520c590edd741d2114"}, - {file = "yarl-1.14.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:73bedd2be05f48af19f0f2e9e1353921ce0c83f4a1c9e8556ecdcf1f1eae4892"}, - {file = "yarl-1.14.0-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f3ab950f8814f3b7b5e3eebc117986f817ec933676f68f0a6c5b2137dd7c9c69"}, - {file = "yarl-1.14.0-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b693c63e7e64b524f54aa4888403c680342d1ad0d97be1707c531584d6aeeb4f"}, - {file = "yarl-1.14.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:85cb3e40eaa98489f1e2e8b29f5ad02ee1ee40d6ce6b88d50cf0f205de1d9d2c"}, - {file = "yarl-1.14.0-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:4f24f08b6c9b9818fd80612c97857d28f9779f0d1211653ece9844fc7b414df2"}, - {file = "yarl-1.14.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:29a84a46ec3ebae7a1c024c055612b11e9363a8a23238b3e905552d77a2bc51b"}, - {file = "yarl-1.14.0-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:5cd5dad8366e0168e0fd23d10705a603790484a6dbb9eb272b33673b8f2cce72"}, - {file = "yarl-1.14.0-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:a152751af7ef7b5d5fa6d215756e508dd05eb07d0cf2ba51f3e740076aa74373"}, - {file = "yarl-1.14.0-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:3d569f877ed9a708e4c71a2d13d2940cb0791da309f70bd970ac1a5c088a0a92"}, - {file = "yarl-1.14.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:6a615cad11ec3428020fb3c5a88d85ce1b5c69fd66e9fcb91a7daa5e855325dd"}, - {file = "yarl-1.14.0-cp311-cp311-win32.whl", hash = "sha256:bab03192091681d54e8225c53f270b0517637915d9297028409a2a5114ff4634"}, - {file = "yarl-1.14.0-cp311-cp311-win_amd64.whl", hash = "sha256:985623575e5c4ea763056ffe0e2d63836f771a8c294b3de06d09480538316b13"}, - {file = "yarl-1.14.0-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:fc2c80bc87fba076e6cbb926216c27fba274dae7100a7b9a0983b53132dd99f2"}, - {file = "yarl-1.14.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:55c144d363ad4626ca744556c049c94e2b95096041ac87098bb363dcc8635e8d"}, - {file = "yarl-1.14.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:b03384eed107dbeb5f625a99dc3a7de8be04fc8480c9ad42fccbc73434170b20"}, - {file = "yarl-1.14.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f72a0d746d38cb299b79ce3d4d60ba0892c84bbc905d0d49c13df5bace1b65f8"}, - {file = "yarl-1.14.0-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:8648180b34faaea4aa5b5ca7e871d9eb1277033fa439693855cf0ea9195f85f1"}, - {file = "yarl-1.14.0-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:9557c9322aaa33174d285b0c1961fb32499d65ad1866155b7845edc876c3c835"}, - {file = "yarl-1.14.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8f50eb3837012a937a2b649ec872b66ba9541ad9d6f103ddcafb8231cfcafd22"}, - {file = "yarl-1.14.0-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:8892fa575ac9b1b25fae7b221bc4792a273877b9b56a99ee2d8d03eeb3dbb1d2"}, - {file = "yarl-1.14.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:e6a2c5c5bb2556dfbfffffc2bcfb9c235fd2b566d5006dfb2a37afc7e3278a07"}, - {file = "yarl-1.14.0-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:ab3abc0b78a5dfaa4795a6afbe7b282b6aa88d81cf8c1bb5e394993d7cae3457"}, - {file = "yarl-1.14.0-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:47eede5d11d669ab3759b63afb70d28d5328c14744b8edba3323e27dc52d298d"}, - {file = "yarl-1.14.0-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:fe4d2536c827f508348d7b40c08767e8c7071614250927233bf0c92170451c0a"}, - {file = "yarl-1.14.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:0fd7b941dd1b00b5f0acb97455fea2c4b7aac2dd31ea43fb9d155e9bc7b78664"}, - {file = "yarl-1.14.0-cp312-cp312-win32.whl", hash = "sha256:99ff3744f5fe48288be6bc402533b38e89749623a43208e1d57091fc96b783b9"}, - {file = "yarl-1.14.0-cp312-cp312-win_amd64.whl", hash = "sha256:1ca3894e9e9f72da93544f64988d9c052254a338a9f855165f37f51edb6591de"}, - {file = "yarl-1.14.0-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:5d02d700705d67e09e1f57681f758f0b9d4412eeb70b2eb8d96ca6200b486db3"}, - {file = "yarl-1.14.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:30600ba5db60f7c0820ef38a2568bb7379e1418ecc947a0f76fd8b2ff4257a97"}, - {file = "yarl-1.14.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:e85d86527baebb41a214cc3b45c17177177d900a2ad5783dbe6f291642d4906f"}, - {file = "yarl-1.14.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:37001e5d4621cef710c8dc1429ca04e189e572f128ab12312eab4e04cf007132"}, - {file = "yarl-1.14.0-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f4f4547944d4f5cfcdc03f3f097d6f05bbbc915eaaf80a2ee120d0e756de377d"}, - {file = "yarl-1.14.0-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:75ff4c819757f9bdb35de049a509814d6ce851fe26f06eb95a392a5640052482"}, - {file = "yarl-1.14.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:68ac1a09392ed6e3fd14be880d39b951d7b981fd135416db7d18a6208c536561"}, - {file = "yarl-1.14.0-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:96952f642ac69075e44c7d0284528938fdff39422a1d90d3e45ce40b72e5e2d9"}, - {file = "yarl-1.14.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:a56fbe3d7f3bce1d060ea18d2413a2ca9ca814eea7cedc4d247b5f338d54844e"}, - {file = "yarl-1.14.0-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:7e2637d75e92763d1322cb5041573279ec43a80c0f7fbbd2d64f5aee98447b17"}, - {file = "yarl-1.14.0-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:9abe80ae2c9d37c17599557b712e6515f4100a80efb2cda15f5f070306477cd2"}, - {file = "yarl-1.14.0-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:217a782020b875538eebf3948fac3a7f9bbbd0fd9bf8538f7c2ad7489e80f4e8"}, - {file = "yarl-1.14.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:b9cfef3f14f75bf6aba73a76caf61f9d00865912a04a4393c468a7ce0981b519"}, - {file = "yarl-1.14.0-cp313-cp313-win32.whl", hash = "sha256:d8361c7d04e6a264481f0b802e395f647cd3f8bbe27acfa7c12049efea675bd1"}, - {file = "yarl-1.14.0-cp313-cp313-win_amd64.whl", hash = "sha256:bc24f968b82455f336b79bf37dbb243b7d76cd40897489888d663d4e028f5069"}, - {file = "yarl-1.14.0-py3-none-any.whl", hash = "sha256:c8ed4034f0765f8861620c1f2f2364d2e58520ea288497084dae880424fc0d9f"}, - {file = "yarl-1.14.0.tar.gz", hash = "sha256:88c7d9d58aab0724b979ab5617330acb1c7030b79379c8138c1c8c94e121d1b3"}, + {file = "yarl-1.17.1-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:cbad927ea8ed814622305d842c93412cb47bd39a496ed0f96bfd42b922b4a217"}, + {file = "yarl-1.17.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:fca4b4307ebe9c3ec77a084da3a9d1999d164693d16492ca2b64594340999988"}, + {file = "yarl-1.17.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:ff5c6771c7e3511a06555afa317879b7db8d640137ba55d6ab0d0c50425cab75"}, + {file = "yarl-1.17.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5b29beab10211a746f9846baa39275e80034e065460d99eb51e45c9a9495bcca"}, + {file = "yarl-1.17.1-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1a52a1ffdd824fb1835272e125385c32fd8b17fbdefeedcb4d543cc23b332d74"}, + {file = "yarl-1.17.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:58c8e9620eb82a189c6c40cb6b59b4e35b2ee68b1f2afa6597732a2b467d7e8f"}, + {file = "yarl-1.17.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d216e5d9b8749563c7f2c6f7a0831057ec844c68b4c11cb10fc62d4fd373c26d"}, + {file = "yarl-1.17.1-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:881764d610e3269964fc4bb3c19bb6fce55422828e152b885609ec176b41cf11"}, + {file = "yarl-1.17.1-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:8c79e9d7e3d8a32d4824250a9c6401194fb4c2ad9a0cec8f6a96e09a582c2cc0"}, + {file = "yarl-1.17.1-cp311-cp311-musllinux_1_2_armv7l.whl", hash = "sha256:299f11b44d8d3a588234adbe01112126010bd96d9139c3ba7b3badd9829261c3"}, + {file = "yarl-1.17.1-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:cc7d768260f4ba4ea01741c1b5fe3d3a6c70eb91c87f4c8761bbcce5181beafe"}, + {file = "yarl-1.17.1-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:de599af166970d6a61accde358ec9ded821234cbbc8c6413acfec06056b8e860"}, + {file = "yarl-1.17.1-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:2b24ec55fad43e476905eceaf14f41f6478780b870eda5d08b4d6de9a60b65b4"}, + {file = "yarl-1.17.1-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:9fb815155aac6bfa8d86184079652c9715c812d506b22cfa369196ef4e99d1b4"}, + {file = "yarl-1.17.1-cp311-cp311-win32.whl", hash = "sha256:7615058aabad54416ddac99ade09a5510cf77039a3b903e94e8922f25ed203d7"}, + {file = "yarl-1.17.1-cp311-cp311-win_amd64.whl", hash = "sha256:14bc88baa44e1f84164a392827b5defb4fa8e56b93fecac3d15315e7c8e5d8b3"}, + {file = "yarl-1.17.1-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:327828786da2006085a4d1feb2594de6f6d26f8af48b81eb1ae950c788d97f61"}, + {file = "yarl-1.17.1-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:cc353841428d56b683a123a813e6a686e07026d6b1c5757970a877195f880c2d"}, + {file = "yarl-1.17.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:c73df5b6e8fabe2ddb74876fb82d9dd44cbace0ca12e8861ce9155ad3c886139"}, + {file = "yarl-1.17.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0bdff5e0995522706c53078f531fb586f56de9c4c81c243865dd5c66c132c3b5"}, + {file = "yarl-1.17.1-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:06157fb3c58f2736a5e47c8fcbe1afc8b5de6fb28b14d25574af9e62150fcaac"}, + {file = "yarl-1.17.1-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:1654ec814b18be1af2c857aa9000de7a601400bd4c9ca24629b18486c2e35463"}, + {file = "yarl-1.17.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7f6595c852ca544aaeeb32d357e62c9c780eac69dcd34e40cae7b55bc4fb1147"}, + {file = "yarl-1.17.1-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:459e81c2fb920b5f5df744262d1498ec2c8081acdcfe18181da44c50f51312f7"}, + {file = "yarl-1.17.1-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:7e48cdb8226644e2fbd0bdb0a0f87906a3db07087f4de77a1b1b1ccfd9e93685"}, + {file = "yarl-1.17.1-cp312-cp312-musllinux_1_2_armv7l.whl", hash = "sha256:d9b6b28a57feb51605d6ae5e61a9044a31742db557a3b851a74c13bc61de5172"}, + {file = "yarl-1.17.1-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:e594b22688d5747b06e957f1ef822060cb5cb35b493066e33ceac0cf882188b7"}, + {file = "yarl-1.17.1-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:5f236cb5999ccd23a0ab1bd219cfe0ee3e1c1b65aaf6dd3320e972f7ec3a39da"}, + {file = "yarl-1.17.1-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:a2a64e62c7a0edd07c1c917b0586655f3362d2c2d37d474db1a509efb96fea1c"}, + {file = "yarl-1.17.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:d0eea830b591dbc68e030c86a9569826145df485b2b4554874b07fea1275a199"}, + {file = "yarl-1.17.1-cp312-cp312-win32.whl", hash = "sha256:46ddf6e0b975cd680eb83318aa1d321cb2bf8d288d50f1754526230fcf59ba96"}, + {file = "yarl-1.17.1-cp312-cp312-win_amd64.whl", hash = "sha256:117ed8b3732528a1e41af3aa6d4e08483c2f0f2e3d3d7dca7cf538b3516d93df"}, + {file = "yarl-1.17.1-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:5d1d42556b063d579cae59e37a38c61f4402b47d70c29f0ef15cee1acaa64488"}, + {file = "yarl-1.17.1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:c0167540094838ee9093ef6cc2c69d0074bbf84a432b4995835e8e5a0d984374"}, + {file = "yarl-1.17.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:2f0a6423295a0d282d00e8701fe763eeefba8037e984ad5de44aa349002562ac"}, + {file = "yarl-1.17.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e5b078134f48552c4d9527db2f7da0b5359abd49393cdf9794017baec7506170"}, + {file = "yarl-1.17.1-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d401f07261dc5aa36c2e4efc308548f6ae943bfff20fcadb0a07517a26b196d8"}, + {file = "yarl-1.17.1-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b5f1ac7359e17efe0b6e5fec21de34145caef22b260e978336f325d5c84e6938"}, + {file = "yarl-1.17.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7f63d176a81555984e91f2c84c2a574a61cab7111cc907e176f0f01538e9ff6e"}, + {file = "yarl-1.17.1-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:9e275792097c9f7e80741c36de3b61917aebecc08a67ae62899b074566ff8556"}, + {file = "yarl-1.17.1-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:81713b70bea5c1386dc2f32a8f0dab4148a2928c7495c808c541ee0aae614d67"}, + {file = "yarl-1.17.1-cp313-cp313-musllinux_1_2_armv7l.whl", hash = "sha256:aa46dce75078fceaf7cecac5817422febb4355fbdda440db55206e3bd288cfb8"}, + {file = "yarl-1.17.1-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:1ce36ded585f45b1e9bb36d0ae94765c6608b43bd2e7f5f88079f7a85c61a4d3"}, + {file = "yarl-1.17.1-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:2d374d70fdc36f5863b84e54775452f68639bc862918602d028f89310a034ab0"}, + {file = "yarl-1.17.1-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:2d9f0606baaec5dd54cb99667fcf85183a7477f3766fbddbe3f385e7fc253299"}, + {file = "yarl-1.17.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:b0341e6d9a0c0e3cdc65857ef518bb05b410dbd70d749a0d33ac0f39e81a4258"}, + {file = "yarl-1.17.1-cp313-cp313-win32.whl", hash = "sha256:2e7ba4c9377e48fb7b20dedbd473cbcbc13e72e1826917c185157a137dac9df2"}, + {file = "yarl-1.17.1-cp313-cp313-win_amd64.whl", hash = "sha256:949681f68e0e3c25377462be4b658500e85ca24323d9619fdc41f68d46a1ffda"}, + {file = "yarl-1.17.1-py3-none-any.whl", hash = "sha256:f1790a4b1e8e8e028c391175433b9c8122c39b46e1663228158e61e6f915bf06"}, + {file = "yarl-1.17.1.tar.gz", hash = "sha256:067a63fcfda82da6b198fa73079b1ca40b7c9b7994995b6ee38acda728b64d47"}, ] [[package]] name = "zipp" -version = "3.20.2" -requires_python = ">=3.8" +version = "3.21.0" +requires_python = ">=3.9" summary = "Backport of pathlib-compatible object wrapper for zip files" -groups = ["default"] +groups = ["chat_with_repo"] files = [ - {file = "zipp-3.20.2-py3-none-any.whl", hash = "sha256:a817ac80d6cf4b23bf7f2828b7cabf326f15a001bea8b1f9b49631780ba28350"}, - {file = "zipp-3.20.2.tar.gz", hash = "sha256:bc9eb26f4506fda01b81bcde0ca78103b6e62f991b381fec825435c836edbc29"}, + {file = "zipp-3.21.0-py3-none-any.whl", hash = "sha256:ac1bbe05fd2991f160ebce24ffbac5f6d11d83dc90891255885223d42b3cd931"}, + {file = "zipp-3.21.0.tar.gz", hash = "sha256:2c9958f6430a2040341a52eb608ed6dd93ef4392e02ffe219417c1b28b5dd1f4"}, ] diff --git a/pyproject.toml b/pyproject.toml index f5fa955..1ba806e 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -9,23 +9,18 @@ license = {text = "Apache-2.0"} requires-python = ">=3.11,<4.0" dependencies = [ "loguru>=0.7.2", - "openai>=1.10.0", - "gradio>=4.16.0", - "markdown>=3.5.2", - "pyyaml>=6.0.1", "jedi>=0.19.1", "GitPython>=3.1.41", - "chromadb>=0.4.22", "prettytable>=3.9.0", "python-iso639>=2024.2.7", "pydantic-settings>=2.2.1", - "tomli>=2.0.1", - "tomli-w>=1.0.0", "colorama>=0.4.6", - "llama-index-llms-openai>=0.2.12", + "click>=8.1.7", + "python-iso639>=2024.10.22", + "llama-index-llms-openai>=0.2.16", ] name = "repoagent" -version = "0.1.4" +version = "0.1.5" description = "An LLM-Powered Framework for Repository-level Code Documentation Generation." readme = "README.md" classifiers = [ @@ -41,26 +36,38 @@ repository = "https://github.com/LOGIC-10/RepoAgent" [project.scripts] repoagent = "repo_agent.main:cli" +[project.optional-dependencies] +chat_with_repo = [ + "markdown>=3.7", + "llama-index-embeddings-openai>=0.2.5", + "llama-index-vector-stores-chroma>=0.3.0", + "gradio>=5.6.0", +] + +[build-system] +requires = ["pdm-backend"] +build-backend = "pdm.backend" + +[tool.pyright] +reportCallIssue="none" + +[tool.ruff] +# General ruff settings can stay here. + +[tool.ruff.lint] +select = ["I001"] + [tool.pdm] [tool.pdm.dev-dependencies] -dev = [ - "ruff>=0.1.14", -] test = [ "pytest<8.0.0,>=7.4.4", "pytest-mock<4.0.0,>=3.12.0", ] +lint = [ + "ruff>=0.7.4", +] [tool.pdm.build] includes = [ "repo_agent", -] - - -[build-system] -requires = ["pdm-backend"] -build-backend = "pdm.backend" - - -[tool.pyright] -reportCallIssue="none" \ No newline at end of file +] \ No newline at end of file diff --git a/repo_agent/__main__.py b/repo_agent/__main__.py index 05ccb0b..a011062 100755 --- a/repo_agent/__main__.py +++ b/repo_agent/__main__.py @@ -1,4 +1,4 @@ from .main import cli if __name__ == "__main__": - cli() \ No newline at end of file + cli() diff --git a/repo_agent/change_detector.py b/repo_agent/change_detector.py index bf0d7d0..34930b5 100644 --- a/repo_agent/change_detector.py +++ b/repo_agent/change_detector.py @@ -6,7 +6,7 @@ from colorama import Fore, Style from repo_agent.file_handler import FileHandler -from repo_agent.settings import setting +from repo_agent.settings import SettingsManager class ChangeDetector: @@ -67,7 +67,7 @@ def get_file_diff(self, file_path, is_new_file): if is_new_file: # For new files, first add them to the staging area. - add_command = f'git -C {repo.working_dir} add {file_path}' + add_command = f"git -C {repo.working_dir} add {file_path}" subprocess.run(add_command, shell=True, check=True) # Get the diff from the staging area. @@ -165,8 +165,14 @@ def get_to_be_staged_files(self): to_be_staged_files = [] # staged_files是已经暂存的文件,通常这里是作者做了更改后git add 的.py文件 或其他文件 staged_files = [item.a_path for item in self.repo.index.diff("HEAD")] - print(f"{Fore.LIGHTYELLOW_EX}target_repo_path{Style.RESET_ALL}: {self.repo_path}") - print(f"{Fore.LIGHTMAGENTA_EX}already_staged_files{Style.RESET_ALL}:{staged_files}") + print( + f"{Fore.LIGHTYELLOW_EX}target_repo_path{Style.RESET_ALL}: {self.repo_path}" + ) + print( + f"{Fore.LIGHTMAGENTA_EX}already_staged_files{Style.RESET_ALL}:{staged_files}" + ) + + setting = SettingsManager.get_setting() project_hierarchy = setting.project.hierarchy_name # diffs是所有未暂存更改文件的列表。这些更改文件是相对于工作区(working directory)的,也就是说,它们是自上次提交(commit)以来在工作区发生的更改,但还没有被添加到暂存区(staging area) @@ -213,12 +219,14 @@ def get_to_be_staged_files(self): for unstaged_file in unstaged_files: # 连接repo_path和unstaged_file以获取完整的绝对路径 - if unstaged_file.startswith(setting.project.markdown_docs_name) or unstaged_file.startswith(setting.project.hierarchy_name): + if unstaged_file.startswith( + setting.project.markdown_docs_name + ) or unstaged_file.startswith(setting.project.hierarchy_name): # abs_unstaged_file = os.path.join(self.repo_path, unstaged_file) # # # 获取相对于仓库根目录的相对路径 # # rel_unstaged_file = os.path.relpath(abs_unstaged_file, self.repo_path) to_be_staged_files.append(unstaged_file) - elif unstaged_file == project_hierarchy: #project_hierarchy永远add + elif unstaged_file == project_hierarchy: # project_hierarchy永远add to_be_staged_files.append(unstaged_file) continue abs_unstaged_file = os.path.join(self.repo_path, unstaged_file) @@ -242,9 +250,11 @@ def get_to_be_staged_files(self): rel_unstaged_file, ) ) - elif unstaged_file == project_hierarchy: #project_hierarchy永远add + elif unstaged_file == project_hierarchy: # project_hierarchy永远add to_be_staged_files.append(unstaged_file) - print(f"{Fore.LIGHTRED_EX}newly_staged_files{Style.RESET_ALL}: {to_be_staged_files}") + print( + f"{Fore.LIGHTRED_EX}newly_staged_files{Style.RESET_ALL}: {to_be_staged_files}" + ) return to_be_staged_files def add_unstaged_files(self): @@ -253,7 +263,7 @@ def add_unstaged_files(self): """ unstaged_files_meeting_conditions = self.get_to_be_staged_files() for file_path in unstaged_files_meeting_conditions: - add_command = f'git -C {self.repo.working_dir} add {file_path}' + add_command = f"git -C {self.repo.working_dir} add {file_path}" subprocess.run(add_command, shell=True, check=True) return unstaged_files_meeting_conditions diff --git a/repo_agent/chat_engine.py b/repo_agent/chat_engine.py index 380fae6..b9a93ac 100644 --- a/repo_agent/chat_engine.py +++ b/repo_agent/chat_engine.py @@ -1,30 +1,9 @@ -import inspect -import os -import sys -import time -from dataclasses import dataclass - -import tiktoken -from openai import APIConnectionError, OpenAI +from llama_index.llms.openai import OpenAI from repo_agent.doc_meta_info import DocItem from repo_agent.log import logger -from repo_agent.prompt import SYS_PROMPT, USR_PROMPT -from repo_agent.settings import max_input_tokens_map, setting - - -def get_import_statements(): - source_lines = inspect.getsourcelines(sys.modules[__name__])[0] - import_lines = [ - line - for line in source_lines - if line.strip().startswith("import") or line.strip().startswith("from") - ] - return import_lines - -@dataclass -class ResponseMessage: - content: str +from repo_agent.prompt import chat_template +from repo_agent.settings import SettingsManager class ChatEngine: @@ -33,106 +12,22 @@ class ChatEngine: """ def __init__(self, project_manager): - self.project_manager = project_manager - - def num_tokens_from_string(self, string: str, encoding_name="cl100k_base") -> int: - """Returns the number of tokens in a text string.""" - encoding = tiktoken.get_encoding(encoding_name) - num_tokens = len(encoding.encode(string)) - return num_tokens - - def reduce_input_length(self, shorten_attempt, prompt_data): - """ - Reduces the length of the input prompts by modifying the sys_prompt contents. - """ - - logger.info( - f"Attempt {shorten_attempt + 1} / 2 to reduce the length of the messages." - ) - if shorten_attempt == 0: - # First attempt, remove project_structure and project_structure_prefix - prompt_data.project_structure = "" - prompt_data.project_structure_prefix = "" - elif shorten_attempt == 1: - # Second attempt, futher remove caller and callee (reference) information - prompt_data.project_structure = "" - prompt_data.project_structure_prefix = "" - - prompt_data.referenced = False - prompt_data.referencer_content = "" - prompt_data.reference_letter = "" - prompt_data.combine_ref_situation = "" + setting = SettingsManager.get_setting() - # Update sys_prompt - sys_prompt = SYS_PROMPT.format(**prompt_data) - - return sys_prompt - - def generate_response(self, model, sys_prompt, usr_prompt, max_tokens): - client = OpenAI( + self.llm = OpenAI( api_key=setting.chat_completion.openai_api_key.get_secret_value(), - base_url=str(setting.chat_completion.base_url), + api_base=setting.chat_completion.openai_base_url, timeout=setting.chat_completion.request_timeout, - ) - - messages = [ - {"role": "system", "content": sys_prompt}, - {"role": "user", "content": usr_prompt}, - ] - - response = client.chat.completions.create( - model=model, - messages=messages, + model=setting.chat_completion.model, temperature=setting.chat_completion.temperature, - max_tokens=max_tokens, + max_retries=1, + is_chat_model=True, ) - response_message = response.choices[0].message - - return response_message - - def attempt_generate_response( - self, model, sys_prompt, usr_prompt, max_tokens, max_attempts=5 - ): - attempt = 0 - while attempt < max_attempts: - try: - response_message = self.generate_response( - model, sys_prompt, usr_prompt, max_tokens - ) - - # 如果 response_message 是 None,则继续下一次循环 - if response_message is None: - attempt += 1 - continue - return response_message + def build_prompt(self, doc_item: DocItem): + """Builds and returns the system and user prompts based on the DocItem.""" + setting = SettingsManager.get_setting() - except APIConnectionError as e: - logger.error( - f"Connection error: {e}. Attempt {attempt + 1} of {max_attempts}" - ) - # Retry after 7 seconds - time.sleep(7) - attempt += 1 - if attempt == max_attempts: - raise - else: - continue # Try to request again - - except Exception as e: - logger.error( - f"An unknown error occurred: {e}. \nAttempt {attempt + 1} of {max_attempts}" - ) - # Retry after 10 seconds - time.sleep(10) - attempt += 1 - if attempt == max_attempts: - response_message = ResponseMessage( - "An unknown error occurred while generating this documentation after many tries." - ) - return response_message - - def generate_doc(self, doc_item: DocItem, file_handler): code_info = doc_item.content referenced = len(doc_item.who_reference_me) > 0 @@ -140,23 +35,7 @@ def generate_doc(self, doc_item: DocItem, file_handler): code_name = code_info["name"] code_content = code_info["code_content"] have_return = code_info["have_return"] - who_reference_me = doc_item.who_reference_me_name_list - reference_who = doc_item.reference_who_name_list file_path = doc_item.get_full_name() - doc_item_path = os.path.join(file_path, code_name) - - # 树结构路径通过全局信息中的who reference me 和 reference who + 自身的file_path来获取 - # 使用 ProjectManager 实例来获取项目结构 - project_structure = self.project_manager.build_path_tree( - who_reference_me, reference_who, doc_item_path - ) - - # project_manager = ProjectManager(repo_path=file_handler.repo_path, project_hierarchy=file_handler.project_hierarchy) - # project_structure = project_manager.get_project_structure() - # file_path = os.path.join(file_handler.repo_path, file_handler.file_path) - # code_from_referencer = get_code_from_json(project_manager.project_hierarchy, referencer) # - # referenced = True if len(code_from_referencer) > 0 else False - # referencer_content = '\n'.join([f'File_Path:{file_path}\n' + '\n'.join([f'Corresponding code as follows:\n{code}\n[End of this part of code]' for code in codes]) + f'\n[End of {file_path}]' for file_path, codes in code_from_referencer.items()]) def get_referenced_prompt(doc_item: DocItem) -> str: if len(doc_item.reference_who) == 0: @@ -164,7 +43,7 @@ def get_referenced_prompt(doc_item: DocItem) -> str: prompt = [ """As you can see, the code calls the following objects, their code and docs are as following:""" ] - for k, reference_item in enumerate(doc_item.reference_who): + for reference_item in doc_item.reference_who: instance_prompt = ( f"""obj: {reference_item.get_full_name()}\nDocument: \n{reference_item.md_content[-1] if len(reference_item.md_content) > 0 else 'None'}\nRaw code:```\n{reference_item.content['code_content'] if 'code_content' in reference_item.content.keys() else ''}\n```""" + "=" * 10 @@ -178,7 +57,7 @@ def get_referencer_prompt(doc_item: DocItem) -> str: prompt = [ """Also, the code has been called by the following objects, their code and docs are as following:""" ] - for k, referencer_item in enumerate(doc_item.who_reference_me): + for referencer_item in doc_item.who_reference_me: instance_prompt = ( f"""obj: {referencer_item.get_full_name()}\nDocument: \n{referencer_item.md_content[-1] if len(referencer_item.md_content) > 0 else 'None'}\nRaw code:```\n{referencer_item.content['code_content'] if 'code_content' in referencer_item.content.keys() else 'None'}\n```""" + "=" * 10 @@ -196,8 +75,6 @@ def get_relationship_description(referencer_content, reference_letter): else: return "" - max_tokens = setting.project.max_document_tokens - code_type_tell = "Class" if code_type == "ClassDef" else "Function" parameters_or_attribute = ( "attributes" if code_type == "ClassDef" else "parameters" @@ -207,7 +84,6 @@ def get_relationship_description(referencer_content, reference_letter): if have_return else "" ) - # reference_letter = "This object is called in the following files, the file paths and corresponding calling parts of the code are as follows:" if referenced else "" combine_ref_situation = ( "and combine it with its calling situation in the project," if referenced @@ -222,85 +98,35 @@ def get_relationship_description(referencer_content, reference_letter): project_structure_prefix = ", and the related hierarchical structure of this project is as follows (The current object is marked with an *):" - # 第一次尝试构建完整的prompt - prompt_data = { - "combine_ref_situation": combine_ref_situation, - "file_path": file_path, - "project_structure_prefix": project_structure_prefix, - "project_structure": project_structure, - "code_type_tell": code_type_tell, - "code_name": code_name, - "code_content": code_content, - "have_return_tell": have_return_tell, - "has_relationship": has_relationship, - "reference_letter": reference_letter, - "referencer_content": referencer_content, - "parameters_or_attribute": parameters_or_attribute, - "language": setting.project.language, - } - - sys_prompt = SYS_PROMPT.format(**prompt_data) - - usr_prompt = USR_PROMPT.format(language=setting.project.language) - - model = setting.chat_completion.model - max_input_length = max_input_tokens_map.get(model, 4096) - max_tokens - - total_tokens = self.num_tokens_from_string( - sys_prompt - ) + self.num_tokens_from_string(usr_prompt) - - # 如果总tokens超过当前模型的限制,则尝试寻找较大模型或者缩减输入 - if total_tokens >= max_input_length: - # 查找一个拥有更大输入限制的模型 - larger_models = { - k: v - for k, v in max_input_tokens_map.items() - if (v - max_tokens) > total_tokens - } # 抽取出所有上下文长度大于当前总输入tokens的模型 - for model_name, max_input_length in larger_models.items(): - if max_input_length - max_tokens > total_tokens: - try: - # Attempt to make a request with the larger model - logger.info( - f"Trying model {model_name} for large-context processing." - ) - response_message = self.attempt_generate_response( - model_name, sys_prompt, usr_prompt, max_tokens - ) # response_message在attempt_generate_response中已经被校验过了 - return response_message - except Exception as e: - # 否则直接跳过,尝试下一个模型 - continue # Try the next model - # If no larger models succeed, fallback to original model - # 对于最初的model模型,尝试缩减输入长度 - for shorten_attempt in range(2): - shorten_success = False - sys_prompt = self.reduce_input_length(shorten_attempt, prompt_data) - # 重新计算 tokens - total_tokens = self.num_tokens_from_string( - sys_prompt - ) + self.num_tokens_from_string(usr_prompt) - # 检查是否满足要求 - if total_tokens < max_input_length: - shorten_success = True - # 如满足要求直接发送请求来生成文档 - response_message = self.attempt_generate_response( - model, sys_prompt, usr_prompt, max_tokens - ) + return chat_template.format_messages( + combine_ref_situation=combine_ref_situation, + file_path=file_path, + project_structure_prefix=project_structure_prefix, + code_type_tell=code_type_tell, + code_name=code_name, + code_content=code_content, + have_return_tell=have_return_tell, + has_relationship=has_relationship, + reference_letter=reference_letter, + referencer_content=referencer_content, + parameters_or_attribute=parameters_or_attribute, + language=setting.project.language, + ) - if not shorten_success: - # 意味着这个doc_item无法生成doc(因为代码本身的长度就超过了模型的限制) - # 返回一个自定义的response_message对象,它的content是"Tried to generate the document, but the code is too long to process." - # 在其他代码调用的时候使用的是response_message.content,所以必须确保content能通过这种方式从response_message中被读取出来 - response_message = ResponseMessage( - "Tried to generate the document, but the code is too long to process." - ) - return response_message + def generate_doc(self, doc_item: DocItem): + """Generates documentation for a given DocItem.""" + messages = self.build_prompt(doc_item) - else: # 如果总tokens没有超过模型限制,直接发送请求 - response_message = self.attempt_generate_response( - model, sys_prompt, usr_prompt, max_tokens + try: + response = self.llm.chat(messages) + logger.debug(f"LLM Prompt Tokens: {response.raw.usage.prompt_tokens}") # type: ignore + logger.debug( + f"LLM Completion Tokens: {response.raw.usage.completion_tokens}" # type: ignore ) - - return response_message + logger.debug( + f"Total LLM Token Count: {response.raw.usage.total_tokens}" # type: ignore + ) + return response.message.content + except Exception as e: + logger.error(f"Error in llamaindex chat call: {e}") + raise diff --git a/repo_agent/chat_with_repo/gradio_interface.py b/repo_agent/chat_with_repo/gradio_interface.py index 634ec21..e06fd17 100644 --- a/repo_agent/chat_with_repo/gradio_interface.py +++ b/repo_agent/chat_with_repo/gradio_interface.py @@ -7,7 +7,7 @@ class GradioInterface: def __init__(self, respond_function): self.respond = respond_function - self.cssa = """ + self.cssa = """