diff --git a/.cursorrules b/.cursorrules new file mode 120000 index 00000000..580a6313 --- /dev/null +++ b/.cursorrules @@ -0,0 +1 @@ +LLM.md \ No newline at end of file diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS new file mode 100644 index 00000000..7cf6a8a2 --- /dev/null +++ b/.github/CODEOWNERS @@ -0,0 +1 @@ +/.github./ @taiyangc @hyacinthus \ No newline at end of file diff --git a/.github/ISSUE_TEMPLATE.md b/.github/ISSUE_TEMPLATE.md new file mode 100644 index 00000000..38c4ce1f --- /dev/null +++ b/.github/ISSUE_TEMPLATE.md @@ -0,0 +1,18 @@ +# Issue Template + +## Description +Please provide a clear and concise description of the issue. + +## Steps to Reproduce +1. Step one +2. Step two +3. Step three + +## Expected Behavior +Describe what you expected to happen. + +## Actual Behavior +Describe what actually happened. + +## Additional Context +Add any other context about the problem here. diff --git a/.github/PULL_REQUEST_TEMPLATE.md b/.github/PULL_REQUEST_TEMPLATE.md new file mode 100644 index 00000000..82464155 --- /dev/null +++ b/.github/PULL_REQUEST_TEMPLATE.md @@ -0,0 +1,20 @@ +## Description + +Please include a summary of the changes and the related issue. + +## Type of Change + +- [ ] Bugfix +- [ ] New Feature +- [ ] Improvement +- [ ] Documentation Update + +## Checklist + +- [ ] I have read the contributing guidelines. +- [ ] I have added tests to cover my changes. +- [ ] All new and existing tests passed. + +## Related Issue + +Closes #[issue number] diff --git a/.github/dependabot.yml b/.github/dependabot.yml new file mode 100644 index 00000000..9d866e39 --- /dev/null +++ b/.github/dependabot.yml @@ -0,0 +1,11 @@ +# To get started with Dependabot version updates, you'll need to specify which +# package ecosystems to update and where the package manifests are located. +# Please see the documentation for all configuration options: +# https://docs.github.com/code-security/dependabot/dependabot-version-updates/configuration-options-for-the-dependabot.yml-file + +version: 2 +updates: + - package-ecosystem: "pip" # See documentation for possible values + directory: "/" # Location of package manifests + schedule: + interval: "weekly" diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml new file mode 100644 index 00000000..1c0e6e62 --- /dev/null +++ b/.github/workflows/build.yml @@ -0,0 +1,230 @@ +name: Build Docker Image and Deploy + +on: + push: + branches: [ main ] + paths-ignore: + - '.github/**' + - 'docs/**' + - '*.md' + release: + types: [ prereleased, released ] + +jobs: + publish-package: + runs-on: ubuntu-latest + if: ${{ github.event_name == 'release' }} + permissions: + contents: read + id-token: write + steps: + - name: Checkout code + uses: actions/checkout@v4 + + - name: Set up Python + uses: actions/setup-python@v5 + with: + python-version: '3.12' + + - name: Install uv + uses: astral-sh/setup-uv@v4 + + - name: Extract version from tag + id: version + run: | + VERSION=${{ github.event.release.tag_name }} + VERSION_NO_V=${VERSION#v} + echo "version=${VERSION_NO_V}" >> $GITHUB_OUTPUT + + - name: Update version in pyproject.toml + run: | + cd intentkit + sed -i 's/version = ".*"/version = "${{ steps.version.outputs.version }}"/' pyproject.toml + + - name: Update version in __init__.py + run: | + cd intentkit + sed -i 's/__version__ = ".*"/__version__ = "${{ steps.version.outputs.version }}"/' __init__.py + + - name: Build package + run: | + cd intentkit + uv build + + - name: Publish to PyPI + run: | + uv publish --token ${{ secrets.UV_PUBLISH_TOKEN }} dist/* + + docker: + runs-on: ubuntu-latest + permissions: + contents: read + id-token: write + steps: + - name: Build Start + id: ci_start + uses: slackapi/slack-github-action@485a9d42d3a73031f12ec201c457e2162c45d02d + with: + method: chat.postMessage + token: ${{ secrets.SLACK_BOT_TOKEN }} + payload: | + channel: ${{ secrets.SLACK_CHANNEL }} + text: "Build started 👀" + attachments: + - color: "dbab09" + fields: + - title: "Repository" + short: true + value: ${{ github.repository }} + - title: "Status" + short: true + value: "In Progress" + - title: "Branch" + short: true + value: ${{ github.ref }} + - title: "Author" + short: true + value: ${{ github.actor }} + - title: "Action" + value: ${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }} + - title: "Diff" + value: ${{ github.event.head_commit.url }} + - title: "Changes" + value: ${{ toJSON(github.event.head_commit.message) }} + + - name: Docker meta + id: meta + uses: docker/metadata-action@369eb591f429131d6889c46b94e711f089e6ca96 + with: + # list of Docker images to use as base name for tags + images: | + crestal/intentkit + # generate Docker tags based on the following events/attributes + tags: | + type=ref,event=pr + type=semver,pattern={{version}} + type=semver,pattern={{major}}.{{minor}} + type=raw,value=latest,enable={{is_default_branch}} + + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@6524bf65af31da8d45b59e8c27de4bd072b392f5 + + - name: Login to Docker Hub + uses: docker/login-action@9780b0c442fbb1117ed29e0efdff1e18412f7567 + with: + username: crestal + password: ${{ secrets.DOCKERHUB_TOKEN }} + + - name: Build and push + uses: docker/build-push-action@48aba3b46d1b1fec4febb7c5d0c644b249a11355 + with: + build-args: | + RELEASE=${{ github.event.release.tag_name || 'latest'}} + push: true + tags: ${{ steps.meta.outputs.tags }} + labels: ${{ steps.meta.outputs.labels }} + cache-from: type=gha + cache-to: type=gha,mode=max + + - name: Configure AWS Credentials + if: ${{ github.event_name == 'release' }} + uses: aws-actions/configure-aws-credentials@e3dd6a429d7300a6a4c196c26e071d42e0343502 + with: + aws-region: ${{ secrets.AWS_REGION }} + role-to-assume: arn:aws:iam::${{ secrets.AWS_ID }}:role/GithubActions + + - name: Deploy to Amazon EKS Dev + if: ${{ github.event_name == 'release' && github.event.action == 'prereleased' }} + run: | + VERSION=${{ github.event.release.tag_name }} + VERSION_NO_V=${VERSION#v} + aws eks update-kubeconfig --region ${{ secrets.AWS_REGION }} --name ${{ secrets.EKS_DEV_CLUSTER }} + kubectl version + kubectl set image -n testnet-dev deployment/intent-api intent-api=crestal/intentkit:${VERSION_NO_V} + kubectl set image -n testnet-dev deployment/intent-readonly intent-readonly=crestal/intentkit:${VERSION_NO_V} + kubectl set image -n testnet-dev deployment/intent-autonomous intent-autonomous=crestal/intentkit:${VERSION_NO_V} + kubectl set image -n testnet-dev deployment/intent-tg intent-tg=crestal/intentkit:${VERSION_NO_V} + kubectl set image -n testnet-dev deployment/intent-scheduler intent-scheduler=crestal/intentkit:${VERSION_NO_V} + kubectl set image -n testnet-dev deployment/intent-checker intent-checker=crestal/intentkit:${VERSION_NO_V} + kubectl set image -n testnet-dev deployment/intent-singleton intent-singleton=crestal/intentkit:${VERSION_NO_V} + kubectl set image -n testnet-dev deployment/sandbox-open sandbox-open=crestal/intentkit:${VERSION_NO_V} + + - name: Deploy to Amazon EKS Prod + if: ${{ github.event_name == 'release' && github.event.action == 'released' }} + run: | + VERSION=${{ github.event.release.tag_name }} + VERSION_NO_V=${VERSION#v} + aws eks update-kubeconfig --region ${{ secrets.AWS_REGION }} --name ${{ secrets.EKS_PROD_CLUSTER }} + kubectl version + kubectl set image -n testnet-prod deployment/intent-api intent-api=crestal/intentkit:${VERSION_NO_V} + kubectl set image -n testnet-prod deployment/intent-readonly intent-readonly=crestal/intentkit:${VERSION_NO_V} + kubectl set image -n testnet-prod deployment/intent-autonomous intent-autonomous=crestal/intentkit:${VERSION_NO_V} + kubectl set image -n testnet-prod deployment/intent-tg intent-tg=crestal/intentkit:${VERSION_NO_V} + kubectl set image -n testnet-prod deployment/intent-scheduler intent-scheduler=crestal/intentkit:${VERSION_NO_V} + kubectl set image -n testnet-prod deployment/intent-checker intent-checker=crestal/intentkit:${VERSION_NO_V} + kubectl set image -n testnet-prod deployment/intent-singleton intent-singleton=crestal/intentkit:${VERSION_NO_V} + kubectl set image -n testnet-prod deployment/sandbox-open sandbox-open=crestal/intentkit:${VERSION_NO_V} + + - name: Build Success + if: ${{ success() }} + uses: slackapi/slack-github-action@485a9d42d3a73031f12ec201c457e2162c45d02d + with: + method: chat.update + token: ${{ secrets.SLACK_BOT_TOKEN }} + payload: | + channel: ${{ secrets.SLACK_CHANNEL }} + ts: "${{ steps.ci_start.outputs.ts }}" + text: "Build Succeeded ✅" + attachments: + - color: "28a745" + fields: + - title: "Repository" + short: true + value: ${{ github.repository }} + - title: "Status" + short: true + value: "Completed" + - title: "Branch" + short: true + value: ${{ github.ref }} + - title: "Author" + short: true + value: ${{ github.actor }} + - title: "Image" + value: https://hub.docker.com/r/crestal/intentkit/tags + - title: "Diff" + value: ${{ github.event.head_commit.url }} + - title: "Changes" + value: ${{ toJSON(github.event.head_commit.message) }} + + - name: Build Failure + if: ${{ failure() }} + uses: slackapi/slack-github-action@485a9d42d3a73031f12ec201c457e2162c45d02d + with: + method: chat.update + token: ${{ secrets.SLACK_BOT_TOKEN }} + payload: | + channel: ${{ secrets.SLACK_CHANNEL }} + ts: "${{ steps.ci_start.outputs.ts }}" + text: "Build Failed ❌" + attachments: + - color: "dc3545" + fields: + - title: "Repository" + short: true + value: ${{ github.repository }} + - title: "Status" + short: true + value: "Failed" + - title: "Branch" + short: true + value: ${{ github.ref }} + - title: "Author" + short: true + value: ${{ github.actor }} + - title: "Action" + value: ${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }} + - title: "Diff" + value: ${{ github.event.head_commit.url }} + - title: "Changes" + value: ${{ toJSON(github.event.head_commit.message) }} diff --git a/.github/workflows/codeql.yml b/.github/workflows/codeql.yml new file mode 100644 index 00000000..247b6080 --- /dev/null +++ b/.github/workflows/codeql.yml @@ -0,0 +1,100 @@ +# For most projects, this workflow file will not need changing; you simply need +# to commit it to your repository. +# +# You may wish to alter this file to override the set of languages analyzed, +# or to provide custom queries or build logic. +# +# ******** NOTE ******** +# We have attempted to detect the languages in your repository. Please check +# the `language` matrix defined below to confirm you have the correct set of +# supported CodeQL languages. +# +name: "CodeQL Advanced" + +on: + push: + branches: [ "main" ] + pull_request: + branches: [ "main" ] + schedule: + - cron: '35 18 * * 0' + +jobs: + analyze: + name: Analyze (${{ matrix.language }}) + # Runner size impacts CodeQL analysis time. To learn more, please see: + # - https://gh.io/recommended-hardware-resources-for-running-codeql + # - https://gh.io/supported-runners-and-hardware-resources + # - https://gh.io/using-larger-runners (GitHub.com only) + # Consider using larger runners or machines with greater resources for possible analysis time improvements. + runs-on: ${{ (matrix.language == 'swift' && 'macos-latest') || 'ubuntu-latest' }} + permissions: + # required for all workflows + security-events: write + + # required to fetch internal or private CodeQL packs + packages: read + + # only required for workflows in private repositories + actions: read + contents: read + + strategy: + fail-fast: false + matrix: + include: + - language: actions + build-mode: none + - language: python + build-mode: none + # CodeQL supports the following values keywords for 'language': 'actions', 'c-cpp', 'csharp', 'go', 'java-kotlin', 'javascript-typescript', 'python', 'ruby', 'swift' + # Use `c-cpp` to analyze code written in C, C++ or both + # Use 'java-kotlin' to analyze code written in Java, Kotlin or both + # Use 'javascript-typescript' to analyze code written in JavaScript, TypeScript or both + # To learn more about changing the languages that are analyzed or customizing the build mode for your analysis, + # see https://docs.github.com/en/code-security/code-scanning/creating-an-advanced-setup-for-code-scanning/customizing-your-advanced-setup-for-code-scanning. + # If you are analyzing a compiled language, you can modify the 'build-mode' for that language to customize how + # your codebase is analyzed, see https://docs.github.com/en/code-security/code-scanning/creating-an-advanced-setup-for-code-scanning/codeql-code-scanning-for-compiled-languages + steps: + - name: Checkout repository + uses: actions/checkout@v4 + + # Add any setup steps before running the `github/codeql-action/init` action. + # This includes steps like installing compilers or runtimes (`actions/setup-node` + # or others). This is typically only required for manual builds. + # - name: Setup runtime (example) + # uses: actions/setup-example@v1 + + # Initializes the CodeQL tools for scanning. + - name: Initialize CodeQL + uses: github/codeql-action/init@v3 + with: + languages: ${{ matrix.language }} + build-mode: ${{ matrix.build-mode }} + # If you wish to specify custom queries, you can do so here or in a config file. + # By default, queries listed here will override any specified in a config file. + # Prefix the list here with "+" to use these queries and those in the config file. + + # For more details on CodeQL's query packs, refer to: https://docs.github.com/en/code-security/code-scanning/automatically-scanning-your-code-for-vulnerabilities-and-errors/configuring-code-scanning#using-queries-in-ql-packs + # queries: security-extended,security-and-quality + + # If the analyze step fails for one of the languages you are analyzing with + # "We were unable to automatically build your code", modify the matrix above + # to set the build mode to "manual" for that language. Then modify this step + # to build your code. + # â„šī¸ Command-line programs to run using the OS shell. + # 📚 See https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#jobsjob_idstepsrun + - if: matrix.build-mode == 'manual' + shell: bash + run: | + echo 'If you are using a "manual" build mode for one or more of the' \ + 'languages you are analyzing, replace this with the commands to build' \ + 'your code, for example:' + echo ' make bootstrap' + echo ' make release' + exit 1 + + - name: Perform CodeQL Analysis + uses: github/codeql-action/analyze@v3 + with: + category: "/language:${{matrix.language}}" diff --git a/.github/workflows/lint.yml b/.github/workflows/lint.yml new file mode 100644 index 00000000..6935b2e8 --- /dev/null +++ b/.github/workflows/lint.yml @@ -0,0 +1,29 @@ +name: Lint + +on: + pull_request: + branches: [ main ] + +jobs: + security-scan: + permissions: + contents: read + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + + - name: Set up Python + uses: actions/setup-python@v4 + with: + python-version-file: "pyproject.toml" + + - name: Install uv + uses: astral-sh/setup-uv@v5 + + - name: Install dependencies + run: | + uv sync --locked --all-extras --dev + + - name: Ruff Check + run: | + sh lint.sh ci diff --git a/.gitignore b/.gitignore index 82f92755..76f29a7a 100644 --- a/.gitignore +++ b/.gitignore @@ -3,6 +3,9 @@ __pycache__/ *.py[cod] *$py.class +# MacOS +.DS_Store + # C extensions *.so @@ -24,7 +27,6 @@ share/python-wheels/ *.egg-info/ .installed.cfg *.egg -MANIFEST # PyInstaller # Usually these files are written by a python script from a template @@ -122,7 +124,9 @@ celerybeat.pid *.sage.py # Environments +*.env .env +!example.env .venv env/ venv/ @@ -160,3 +164,16 @@ cython_debug/ # and can be added to the global gitignore or merged into this file. For a more nuclear # option (not recommended) you can uncomment the following to ignore the entire idea folder. #.idea/ + +# IDE +.idea +.vscode +.cursor + +# web3 +wallet_data.txt + +# export/import data +*.response +scripts/*.yaml +skills.csv diff --git a/.trae/rules/project_rules.md b/.trae/rules/project_rules.md new file mode 120000 index 00000000..95bbedca --- /dev/null +++ b/.trae/rules/project_rules.md @@ -0,0 +1 @@ +../../LLM.md \ No newline at end of file diff --git a/.windsurfrules b/.windsurfrules new file mode 120000 index 00000000..580a6313 --- /dev/null +++ b/.windsurfrules @@ -0,0 +1 @@ +LLM.md \ No newline at end of file diff --git a/CHANGELOG.md b/CHANGELOG.md new file mode 100644 index 00000000..329f2d26 --- /dev/null +++ b/CHANGELOG.md @@ -0,0 +1,877 @@ +## v0.7.0 + +### Features +- **Config Enhancement**: Add intentkit_prompt to config and prompt system for better customization +- **Credit Management**: Add comprehensive credit event consistency checker with base validation +- **Migration Tools**: Add script to migrate credit accounts from transactions +- **Optimization**: Optimize credit event consistency checking scripts for better performance + +### Fixes +- **Model Update**: Change default model to gpt-5-mini for improved performance +- **Credit Events**: Update and improve credit event consistency check script +- **Workflow**: Update pypi publish workflow and changelog + +### Refactoring +- **Credit Event Logic**: Improve readability of credit type distribution logic +- **Performance**: Remove redundant logs and add batch stats tracking for better monitoring + +### Chores +- **Documentation**: Update LLM rules and guidelines +- **Migration Scripts**: Fix and improve migration scripts + +**Full Changelog**: https://github.com/crestalnetwork/intentkit/compare/v0.6.26...v0.7.0 + +## v0.6.26 + +### Refactoring +- Move asyncio import to top of file in account_checking.py + +**Full Changelog**: https://github.com/crestalnetwork/intentkit/compare/v0.6.25...v0.6.26 + +## v0.6.25 + +### Refactoring +- Simplified Dockerfile dependency installation process +- Removed unnecessary await from sync get_system_config calls in Twitter module + +### Build & Configuration +- Updated project name and added workspace configuration + +### Documentation +- Updated changelog for v0.6.23 release + +**Full Changelog**: https://github.com/crestalnetwork/intentkit/compare/v0.6.23...v0.6.25 + +## v0.6.23 + +### Features +- Add reasoning_effort parameter for gpt-5 models + +### Documentation +- Update changelog + +**Full Changelog**: https://github.com/crestalnetwork/intentkit/compare/v0.6.22...v0.6.23 + +## v0.6.22 + +### Features +- **XMTP Skills Enhancement**: Expanded XMTP skills to support multiple networks, improving cross-chain communication capabilities +- **DexScreener Integration**: Added comprehensive DexScreener skills for enhanced token and pair information retrieval + - New `get_pair_info` skill for detailed trading pair data + - New `get_token_pairs` skill for token pair discovery + - New `get_tokens_info` skill for comprehensive token information + - Enhanced search functionality with improved utilities + +### Technical Improvements +- Added new Web3 client utilities for better blockchain interaction +- Enhanced chat functionality in core system +- Updated agent schema with improved configuration options +- Improved skill base classes with better error handling + +### Dependencies +- Updated project dependencies for better compatibility and security + +**Full Changelog**: https://github.com/crestalnetwork/intentkit/compare/v0.6.21...v0.6.22 + +## v0.6.21 + +### Features +- Added agent onchain fields support +- Added web3 client and updated skill base class +- Added clean thread memory functionality + +### Improvements +- Package upgrade and maintenance + +### Bug Fixes +- Fixed typo in intentkit package info + +### Documentation +- Updated changelog documentation + +**Full Changelog**: https://github.com/crestalnetwork/intentkit/compare/v0.6.20...v0.6.21 + +## v0.6.20 + +### Features +- **Firecrawl Integration**: Enhanced firecrawl scraping capabilities by consolidating logic into a single `firecrawl_scrape` skill, removing the redundant `firecrawl_replace_scrape` skill +- **Web3 Client**: Added web3 client support to skills for better blockchain integration +- **XMTP Transfer**: Improved XMTP transfer validation and checking mechanisms + +### Bug Fixes +- Fixed Supabase integration bugs +- Better XMTP transfer validation and error handling +- Removed deprecated skill context to improve performance + +### Documentation +- Updated Firecrawl skill documentation +- Enhanced changelog maintenance + +### Technical Improvements +- Code quality improvements and lint fixes +- Minor performance optimizations + +**Full Changelog**: https://github.com/crestalnetwork/intentkit/compare/v0.6.19...v0.6.20 + +## v0.6.19 + +### Features +- **Credit System**: Add base credit type amount fields and migration script +- **Credit Events**: Enhance consistency checker and add fixer script +- **Event System**: Add event check functionality +- **Transaction Details**: Add fee detail in event and tx + +### Bug Fixes +- **CDP Networks**: Add network id mapping hack for cdp mainnet networks +- **UI**: Always hide skill details +- **Onchain Options**: Better onchain options description + +### Technical Improvements +- Enhanced credit event consistency checking and fixing capabilities +- Improved network compatibility for CDP mainnet operations +- Better transaction fee tracking and reporting + +**Full Changelog**: https://github.com/crestalnetwork/intentkit/compare/v0.6.18...v0.6.19 + +## v0.6.18 + +### New Features +- **Casino Skills**: Added comprehensive gambling and gaming skill set for interactive agent entertainment + - **Deck Shuffling**: Multi-deck support with customizable jokers for Blackjack and card games + - **Card Drawing**: Visual card display with PNG/SVG images for interactive gameplay + - **Quantum Dice Rolling**: True quantum randomness using QRandom API for authentic dice games + - **State Management**: Persistent game sessions with deck tracking and rate limiting + - **Gaming APIs**: Integration with Deck of Cards API and QRandom quantum random number generator + +**Full Changelog**: https://github.com/crestalnetwork/intentkit/compare/v0.6.17...v0.6.18 + +## v0.6.17 + +### ✨ New Features +- **Error Tracking**: Add error_type field to chat message model for better error tracking + +### 🔧 Improvements +- **Core Engine**: Refactor core engine and update models for better performance +- **System Messages**: Refactor system messages handling +- **Error Handling**: Refactor error handling system + +### 🐛 Bug Fixes +- **Wallet Provider**: Fix wallet provider JSON configuration +- **Linting**: Fix linting issues + +### 📚 Documentation +- Update changelog documentation + +**Full Changelog**: https://github.com/crestalnetwork/intentkit/compare/v0.6.16...v0.6.17 + +## v0.6.16 + +### 🐛 Bug Fixes +- **Agent Generator**: Fixed missing wallet_provider default configuration in agent schema generation +- **Schema Updates**: Updated agent schema JSON to reflect latest configuration requirements + +### 🔧 Improvements +- Enhanced agent generator to include CDP wallet provider as default +- Improved agent configuration consistency + +**Full Changelog**: https://github.com/crestalnetwork/intentkit/compare/v0.6.15...v0.6.16 + +## v0.6.15 + +### 🔧 Improvements +- **Validation Logging**: Enhanced error logging in schema validation for better debugging +- **Documentation**: Updated changelog with v0.6.14 release notes + +### 🐛 Bug Fixes +- Improved error handling and logging in generator validation + +**Full Changelog**: https://github.com/crestalnetwork/intentkit/compare/v0.6.14...v0.6.15 + +## v0.6.14 + +### 🐛 Bug Fixes +- **Readonly Wallet Address**: Fixed readonly_wallet_address issue + +### 🔧 Changes +- Fixed readonly wallet address handling + +**Full Changelog**: https://github.com/crestalnetwork/intentkit/compare/v0.6.13...v0.6.14 + +## v0.6.13 + +### ✨ New Features +- **Readonly Wallet Support**: Added readonly wallet provider and functionality +- **Agent API Streaming**: Implemented SSE (Server-Sent Events) for chat stream mode in agent API +- **Internal Stream Client**: Added internal streaming client capabilities +- **Entrypoint System Prompts**: Added system prompt support for entrypoints, including XMTP entrypoint prompts +- **Agent Model Configuration**: Updated agent model configuration system + +### 🔧 Improvements +- **Documentation**: Updated changelog and LLM documentation +- **Twitter Entrypoint**: Removed deprecated Twitter entrypoint + +### 🐛 Bug Fixes +- **Agent Context Type**: Fixed agent context type issues +- **Error Messages**: Improved error message handling + +### Diff +[Compare v0.6.12...v0.6.13](https://github.com/crestalnetwork/intentkit/compare/v0.6.12...v0.6.13) + +## v0.6.12 + +### 🔧 Improvements +- **Skill Messages**: Consolidated artifact attachments into skill messages for better organization +- **Documentation**: Updated changelog entries + +### Diff +[Compare v0.6.11...v0.6.12](https://github.com/crestalnetwork/intentkit/compare/v0.6.11...v0.6.12) + +## v0.6.11 + +### ✨ New Features +- **XMTP Integration**: Added new XMTP features including swap and price skills +- **User Wallet Info**: Enhanced user wallet information display +- **DeepSeek Integration**: Updated DeepSeek integration with improved functionality + +### 🐛 Bug Fixes +- **Search Functionality**: Temporarily disabled search for GPT-5 to resolve issues +- **Configuration**: Better handling of integer config loading and number type validation +- **Fee Agent Account**: Fixed fee_agent_account assignment in expense_summarize function +- **Security**: Fixed clear-text logging of sensitive information (CodeQL alerts #31, #32) +- **XMTP Schema**: Added missing XMTP schema files +- **DeepSeek Bug**: Resolved DeepSeek-related bugs + +### 🔧 Improvements +- **Prompt System**: Refactored prompt system for better performance +- **Code Quality**: Improved formatting and code organization +- **Build Configuration**: Updated GitHub workflow build configuration +- **Dependencies**: Updated uv sync and dependency management + +### 📚 Documentation +- Updated changelog entries throughout development cycle +- Enhanced documentation for new features + +### Diff +[Compare v0.6.10...v0.6.11](https://github.com/crestalnetwork/intentkit/compare/v0.6.10...v0.6.11) + +## v0.6.10 + +### ✨ New Features +- **XMTP Integration**: Added new XMTP message transfer skill with attachment support +- **LangGraph 6.0 Upgrade**: Updated to LangGraph 6.0 for improved agent capabilities + +### 🔧 Improvements +- **API Key Management**: Standardized API key retrieval across all skills for better consistency +- **Skill Context**: Refactored skill context handling for improved performance and maintainability +- **Skill Architecture**: Enhanced base skill classes with better API key management patterns +- **XMTP Skill**: Updated XMTP skill image format and schema configuration +- **Dependencies**: Added jsonref dependency for JSON reference handling +- **Build Workflow**: Updated GitHub Actions build workflow configuration + +### 🐛 Bug Fixes +- **XMTP Skill**: Align state typing and schema enum/titles for public/private options +- **GPT-5 Features**: Fixed GPT-5 model features and capabilities implementation +- **CI Improvements**: Fixed continuous integration workflow issues +- **Agent & LLM Model Validation**: Enhanced agent and LLM models with improved validation capabilities and error handling + +### đŸ› ī¸ Technical Changes +- Updated 169 files with comprehensive refactoring +- Added XMTP skill category with transfer capabilities +- Improved skill base classes across all categories +- Enhanced context handling in core engine and nodes +- Updated dependencies and lock files +- Enhanced XMTP skill metadata and configuration files +- Updated skill image format for better compatibility +- Updated `intentkit/pyproject.toml` with jsonref dependency +- Enhanced `.github/workflows/build.yml` configuration +- Updated `intentkit/uv.lock` with new dependency + +### 📚 Documentation +- **Changelog**: Updated changelog documentation with comprehensive release notes + +### Diff +[Compare v0.6.9...v0.6.10](https://github.com/crestalnetwork/intentkit/compare/v0.6.9...v0.6.10) + +## v0.6.9 + +### 📚 Documentation +- **API Documentation**: Updated API documentation URLs to use localhost for development + +### 🔧 Maintenance +- **Sentry Configuration**: Updated sentry configuration settings + +### Diff +[Compare v0.6.8...v0.6.9](https://github.com/crestalnetwork/intentkit/compare/v0.6.8...v0.6.9) + +## v0.6.8 + +### 🚀 Features & Improvements + +#### 🔧 Dependency Updates +- **LangGraph SDK & LangMem**: Updated to latest versions for improved performance +- **FastAPI**: Updated core dependencies for better stability + +#### 📚 Documentation +- **LLM Integration Guide**: Enhanced guide with better examples and updated instructions +- **Cursor Rules**: Converted to symlink for better maintainability + +#### 💾 Database +- **Connection Pooling**: Enhanced database connection pooling configuration with new parameters for better performance and resource management + +### 🐛 Bug Fixes +- **Twitter**: Fixed rate limit handling for improved reliability + +### 🔧 Maintenance +- **Elfa**: Migrated to v2 API for better functionality +- **Documentation**: Various changelog and documentation updates + +### Diff +[Compare v0.6.7...v0.6.8](https://github.com/crestalnetwork/intentkit/compare/v0.6.7...v0.6.8) + +## v0.6.7 + +### 🚀 Features +- **Autonomous Task Management System**: Added comprehensive autonomous task management capabilities with new skills for creating, updating, and managing autonomous tasks +- **Agent Information Endpoint**: New endpoint to retrieve current agent information including EVM and Solana wallet addresses +- **Enhanced Agent Model**: Added EVM and Solana wallet address fields to AgentResponse model +- **Configurable Payment Settings**: Added configurable free_quota and refill_amount to payment settings + +### 🔧 Improvements +- **Simplified Autonomous Tasks**: Removed enabled parameter from add_autonomous_task skill - tasks are now always enabled by default +- **Better Task Integration**: Autonomous task information is now included in entrypoint rules system prompt +- **Code Organization**: Refactored quota reset functions to AgentQuota class and moved update_agent_action_cost function to agent module + +### 🐛 Bug Fixes +- Fixed autonomous skill bugs and ensured proper serialization of autonomous tasks in agent operations +- Improved code formatting and removed unused files + +### 📚 Documentation +- Updated changelog with comprehensive release notes + +**Full Changelog**: https://github.com/crestalnetwork/intentkit/compare/v0.6.6...v0.6.7 + +## v0.6.6 + +### 🚀 Features +- **Twitter Timeline Enhancement**: Exclude replies from twitter timeline by default to improve content quality and relevance + +### 🔧 Technical Details +- Modified twitter timeline skill to filter out reply tweets by default +- This change improves the signal-to-noise ratio when fetching timeline data + +**Full Changelog**: https://github.com/crestalnetwork/intentkit/compare/v0.6.5...v0.6.6 + +## v0.6.5 + +### 🚀 Features +- Add sanitize_privacy method to ChatMessage model for better privacy handling +- Add redis_db parameter to all redis connections for improved database management + +### 🔧 Improvements +- Prevent twitter reply skill from replying to own tweets to avoid self-loops +- Better agent API documentation with improved clarity and examples +- Enhanced agent documentation with clearer explanations + +### 🐛 Bug Fixes +- Fix agent data types for better type safety +- Fix bug in agent schema validation +- Remove number field in agent model to simplify structure +- Use separate connection for langgraph migration setup to prevent conflicts +- Fix typo in documentation + +### 📚 Documentation +- Improved agent API documentation +- Updated changelog entries +- Better agent documentation structure + +**Full Changelog**: https://github.com/crestalnetwork/intentkit/compare/v0.6.4...v0.6.5 + +## v0.6.4 + +### 🔧 Maintenance +- **Dependency Management**: Rollback langgraph-checkpoint-postgres version for stability +- **Package Updates**: Update dependencies in pyproject.toml +- **Documentation**: Documentation improvements + +### 🐛 Bug Fixes +- **Compatibility**: Fixed dependency compatibility issues + +### 🚀 Improvements +- **Stability**: Enhanced system stability with dependency rollbacks + +**Full Changelog**: https://github.com/crestalnetwork/intentkit/compare/v0.6.3...v0.6.4 + +## v0.6.3 + +### 🚀 Features +- **CDP Swap Skill**: Added CDP swap skill for token swapping functionality + +### 🐛 Bug Fixes +- Fixed lint error +- Fixed a type error + +### 🔧 Maintenance +- Updated dependencies in pyproject.toml +- Fixed dependency error +- Updated package versions +- Documentation changelog updates + +**Full Changelog**: https://github.com/crestalnetwork/intentkit/compare/v0.6.2...v0.6.3 + +## v0.6.2 + +### 🚀 Features +- **Agent API Enhancement**: Added comprehensive agent API sub-application with CORS support and improved error handling +- **Authentication Improvements**: Implemented token-based authentication for agent API endpoints +- **Credit Tracking**: Enhanced credit event tracking with agent_wallet_address field for better monitoring +- **Chat API Flexibility**: Made user_id optional in chat API with automatic fallback to agent.owner +- **Documentation Updates**: Restructured and updated API documentation for better clarity + +### 🔧 Improvements +- **Twitter Service**: Refactored twitter service for better maintainability +- **Text Processing**: Improved formatting in extract_text_and_images function +- **Agent Authentication**: Streamlined agent and admin authentication systems +- **Supabase Integration**: Fixed supabase link issues +- **API Key Skills**: Enhanced description for get API key skills + +### 📚 Documentation +- Updated README with latest information +- Restructured API documentation files +- Added comprehensive agent API documentation + +### đŸ› ī¸ Technical Changes +- Updated dependencies with uv sync +- Various code refactoring for better code quality +- Fixed typos in chat message handling + +**Full Changelog**: https://github.com/crestalnetwork/intentkit/compare/v0.6.1...v0.6.2 + +## v0.6.1 + +### Features +- feat: add public key to supabase + +### Bug Fixes +- fix: node log level +- fix: cdp get balance bug +- fix: close some default skills + +### Documentation +- doc: changelog + +**Full Changelog**: https://github.com/crestalnetwork/intentkit/compare/v0.6.0...v0.6.1 + +## v0.6.0 + +### 🚀 Features +- **IntentKit Package Publishing**: The intentkit package is now published and available for installation +- **Web Scraper Skills**: Added comprehensive web scraping capabilities to scrape entire sites in one prompt +- **Firecrawl Integration**: New Firecrawl skill for advanced web content extraction +- **Supabase Skills**: Complete Supabase integration with data operations and error handling +- **HTTP Skills**: Generic HTTP request capabilities for external API interactions +- **Enhanced Skill Context**: More contextual information available to skills during execution + +### 🔧 Improvements +- **Core Refactoring**: Major refactoring of the intentkit core system for better performance +- **Stream Executor**: Improved streaming capabilities for real-time responses +- **Agent Creation**: Streamlined agent creation process +- **Memory Management**: Better memory handling with SQLite support for testing +- **CDP Wallet Integration**: Enhanced CDP wallet functionality with automatic wallet creation +- **Skill Schema Updates**: Improved skill schemas with conditional validation +- **LangGraph Integration**: Better PostgreSQL saver initialization for LangGraph + +### 🐛 Bug Fixes +- Fixed import issues in core modules +- Corrected skills path and added webp support in admin schema +- Fixed CDP balance retrieval functionality +- Resolved wallet creation issues during agent initialization +- Various lint and formatting fixes + +### 📚 Documentation +- Updated LLM integration guide +- Enhanced skill development documentation +- Improved changelog maintenance + +### Breaking Changes +- Core intentkit package structure has been refactored +- Some skill interfaces may have changed due to enhanced context support + +### Migration Guide +- Update your intentkit package installation to use the new published version +- Review skill implementations if using custom skills +- Check agent creation code for any compatibility issues + +**Full Changelog**: https://github.com/crestalnetwork/intentkit/compare/v0.5.9...v0.6.0 + +## v0.5.0 + +### Breaking Changes +- Switch to uv as package manager + +## v0.4.0 + +### New Features +- Support Payment + +## 2025-02-26 + +### New Features +- Chat entity and API + +## 2025-02-25 + +### New Features +- Elfa integration + +## 2025-02-24 + +### New Features +- Add input token limit to config +- Auto clean memory after agent update + +## 2025-02-23 + +### New Features +- Defillama skills + +## 2025-02-21 + +### New Features +- AgentKit upgrade to new package + +## 2025-02-20 + +### New Features +- Add new skill config model +- Introduce json schema for skill config + +## 2025-02-18 + +### New Features +- Introduce json schema for agent model +- Chain provider abstraction and quicknode + +## 2025-02-17 + +### New Features +- Check and get the telegram bot info when creating an agent + +## 2025-02-16 + +### New Features +- Chat History API +- Introduce to Chat ID concept + +## 2025-02-15 + +### New Features +- GOAT Integration +- CrossMint Wallet Integration + +## 2025-02-14 + +### New Features +- Auto create cdp wallet when create agent +- CryptoCompare skills + +## 2025-02-13 + +### New Features +- All chats will be saved in the db table chat_messages + +### Breaking Changes +- Remove config.debug_resp flag, you can only use debug endpoint for debugging +- Remove config.autonomous_memory_public, the autonomous task will always use chat id "autonomous" + +## 2025-02-11 + +### Improvements +- Twitter account link support redirect after authorization + +## 2025-02-05 + +### New Features +- Acolyt integration + +## 2025-02-04 + +### Improvements +- split scheduler to new service +- split singleton to new service + +## 2025-02-03 + +### Breaking Changes +- Use async everywhere + +## 2025-02-02 + +### Bug Fixes +- Fix bugs in twitter account binding + +## 2025-02-01 + +### New Features +- Readonly API for better performance + +## 2025-01-30 + +### New Features +- LLM creativity in agent config +- Agent memory cleanup by token count + +## 2025-01-28 + +### New Features +- Enso tx CDP wallet broadcast + +## 2025-01-27 + +### New Features +- Sentry Error Tracking + +### Improvements +- Better short memory management, base on token count now +- Better logs + +## 2025-01-26 + +### Improvements +- If you open the jwt verify of admin api, it now ignore the reqest come from internal network +- Improve the docker compose tutorial, comment the twitter and tg entrypoint service by default + +### Break Changes +- The new docker-compose.yml change the service name, add "intent-" prefix to all services + +## 2025-01-25 + +### New Features +- DeepSeek LLM Support! +- Enso skills now use CDP wallet +- Add an API for frontend to link twitter account to an agent + +## 2025-01-24 + +### Improvements +- Refactor telegram services +- Save telegram user info to db when it linked to an agent + +### Bug Fixes +- Fix bug when twitter token refresh some skills will not work + +## 2025-01-23 + +### Features +- Chat API released, you can use it to support a web UI + +### Improvements +- Admin API: + - When create agent, id is not required now, we will generate a random id if not provided + - All agent response data is improved, it has more data now +- ENSO Skills improved + +## 2025-01-22 + +### Features +- If admin api enable the JWT authentication, the agent can only updated by its owner +- Add upstream_id to Agent, when other service call admin API, can use this field to keep idempotent, or track the agent + +## 2025-01-21 + +### Features +- Enso add network skill + +### Improvements +- Enso skills behavior improved + +## 2025-01-20 + +### Features +- Twitter skills now get more context, agent can know the author of the tweet, the thread of the tweet, and more. + +## 2025-01-19 + +### Improvements +- Twitter skills will not reply to your own tweets +- Twitter docs improved + +## 2025-01-18 + +### Improvements +- Twitter rate limit only affected when using OAuth +- Better twitter rate limit numbers +- Slack notify improved + +## 2025-01-17 + +### New Features +- Add twitter skill rate limit + +### Improvements +- Better doc/create_agent.sh +- OAuth 2.0 refresh token failure handling + +### Bug Fixes +- Fix bug in twitter search skill + +## 2025-01-16 + +### New Features +- Twitter Follow User +- Twitter Like Tweet +- Twitter Retweet +- Twitter Search Tweets + +## 2025-01-15 + +### New Features +- Twitter OAuth 2.0 Authorization Code Flow with PKCE +- Twitter access token auto refresh +- AgentData table and AgentStore interface + +## 2025-01-14 + +### New Features +- ENSO Skills + +## 2025-01-12 + +### Improvements +- Better architecture doc: [Architecture](docs/architecture.md) + +## 2025-01-09 + +### New Features +- Add IntentKitSkill abstract class, for now, it has a skill store interface out of the box +- Use skill store in Twitter skills, fetch skills will store the last processed tweet ID, prevent duplicate processing +- CDP Skills Filter in Agent, choose the skills you want only, the less skills, the better performance + +### Improvements +- Add a document for skill contributors: [How to add a new skill](docs/contributing/skills.md) + +## 2025-01-08 + +### New Features +- Add `prompt_append` to Agent, it will be appended to the entire prompt as system role, it has stronger priority +- When you use web debug mode, you can see the entire prompt sent to the AI model +- You can use new query param `thread` to debug any conversation thread + +## 2025-01-07 + +### New Features +- Memory Management + +### Improvements +- Refactor the core ai agent creation + +### Bug Fixes +- Fix bug that resp debug model is not correct + +## 2025-01-06 + +### New Features +- Optional JWT Authentication for admin API + +### Improvements +- Refactor the core ai agent engine for better architecture +- Telegram entrypoint greeting message + +### Bug Fixes +- Fix bug that agent config update not taking effect sometimes + +## 2025-01-05 + +### Improvements +- Telegram entrypoint support regenerate token +- Telegram entrypoint robust error handling + +## 2025-01-03 + +### Improvements +- Telegram entrypoint support dynamic enable and disable +- Better conversation behavior about the wallet + +## 2025-01-02 + +### New Features +- System Prompt, It will affect all agents in a deployment. +- Nation number in Agent model + +### Improvements +- Share agent memory between all public entrypoints +- Auto timestamp in db model + +### Bug Fixes +- Fix bug in db create from scratch + +## 2025-01-01 + +### Bug Fixes +- Fix Telegram group bug + +## 2024-12-31 + +### New Features +- Telegram Entrypoint + +## 2024-12-30 + +### Improvements +- Twitter Integration Enchancement + +## 2024-12-28 + +### New Features +- Twitter Entrypoint +- Admin cron for quota clear +- Admin API get all agents + +### Improvements +- Change lint tools to ruff +- Improve CI +- Improve twitter skills + +### Bug Fixes +- Fix bug in db base code + +## 2024-12-27 + +### New Features +- Twitter Skills + - Get Mentions + - Get Timeline + - Post Tweet + - Reply Tweet + +### Improvements +- CI/CD refactoring for better security + +## 2024-12-26 + +### Improvements +- Change default plan to "self-hosted" from "free", new agent now has 9999 message limit for testing +- Add a flag "DEBUG_RESP", when set to true, the Agent will respond with thought processes and time costs +- Better DB session management + +## 2024-12-25 + +### Improvements +- Use Poetry as package manager +- Docker Compose tutorial in readme + +## 2024-12-24 + +### New Features +- Multiple Agent Support +- Autonomous Agent Management +- Blockchain Integration (CDP for now, will add more) +- Extensible Skill System +- Extensible Plugin System + +### Improvements +- Change lint tools to ruff +- Improve CI +- Improve twitter skills + +### Bug Fixes +- Fix bug in db base code \ No newline at end of file diff --git a/CLAUDE.md b/CLAUDE.md new file mode 120000 index 00000000..580a6313 --- /dev/null +++ b/CLAUDE.md @@ -0,0 +1 @@ +LLM.md \ No newline at end of file diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md new file mode 100644 index 00000000..8911e89a --- /dev/null +++ b/CONTRIBUTING.md @@ -0,0 +1,46 @@ +# Contributing to IntentKit + +We love your input! We want to make contributing to IntentKit as easy and transparent as possible, whether it's: + +- Reporting a bug +- Discussing the current state of the code +- Submitting a fix +- Proposing new features +- Becoming a maintainer + +## We Develop with Github + +We use GitHub to host code, to track issues and feature requests, as well as accept pull requests. + +## Pull Requests Process + +1. Fork the repo and create your branch from `main`. +2. If you've added code that should be tested, add tests. +3. If you've changed APIs, update the documentation. +4. Ensure the test suite passes. +5. Make sure your code lints. +6. Issue that pull request! + +## Any contributions you make will be under the MIT Software License + +In short, when you submit code changes, your submissions are understood to be under the same [MIT License](http://choosealicense.com/licenses/mit/) that covers the project. Feel free to contact the maintainers if that's a concern. + +## Report bugs using Github's [issue tracker](https://github.com/crestalnetwork/intentkit/issues) + +We use GitHub issues to track public bugs. Report a bug by [opening a new issue](https://github.com/crestalnetwork/intentkit/issues/new); it's that easy! + +## Write bug reports with detail, background, and sample code + +**Great Bug Reports** tend to have: + +- A quick summary and/or background +- Steps to reproduce + - Be specific! + - Give sample code if you can. +- What you expected would happen +- What actually happens +- Notes (possibly including why you think this might be happening, or stuff you tried that didn't work) + +## License + +By contributing, you agree that your contributions will be licensed under its MIT License. diff --git a/DEVELOPMENT.md b/DEVELOPMENT.md new file mode 100644 index 00000000..676c6652 --- /dev/null +++ b/DEVELOPMENT.md @@ -0,0 +1,115 @@ +# Development + +## Quick Start + +### Docker (When you just want to have a quick try) +> If you decide you want to contribute to IntentKit, skip this section and run the code in your local development environment. + +0. Install [Docker](https://docs.docker.com/get-started/get-docker/). + +1. Create a new directory and navigate into it: +```bash +mkdir intentkit && cd intentkit +``` + +2. Download the required files: +```bash +# Download docker-compose.yml +curl -O https://raw.githubusercontent.com/crestalnetwork/intentkit/main/docker-compose.yml + +# Download example environment file +curl -O https://raw.githubusercontent.com/crestalnetwork/intentkit/main/example.env +``` + +3. Set up environment: +```bash +# Rename example.env to .env +mv example.env .env + +# Edit .env file and add your configuration +# Make sure to set OPENAI_API_KEY +``` + +4. Start the services: +```bash +docker compose up +``` +This will block current terminal to show logs, you can press Ctrl+C to stop it. +When you want to run other command, you can open another terminal. + +5. Try it out: +```bash +curl "http://127.0.0.1:8000/debug/example/chat?q=Hello" +``` +In terminal, curl cannot auto escape special characters, so you can use browser to test. Just copy the URL to your browser, replace "Hello" with your words. + +6. Manage your agent: +When intentkit first starts, it will create an example agent for you. You can manage your agent by using the scripts in the `scripts` directory. +```bash +cd scripts +# Export agent +sh export.sh example +# Import agent +sh import.sh example +# Create another agent +sh create.sh my_agent +``` + +### Local Development +1. Clone the repository: +```bash +git clone https://github.com/crestalnetwork/intentkit.git +cd intentkit +``` + +2. Set up your environment: + +If you haven't installed [uv](https://docs.astral.sh/uv/), please [install](https://docs.astral.sh/uv/getting-started/installation/) it first. +You don't need to worry about your Python version and venv; uv will automatically handle everything for you. +```bash +uv sync +``` + +3. Configure your environment: + +Read [Configuration](docs/configuration.md) for detailed settings. Then create your local .env file. +```bash +cp example.env .env +# Edit .env with your configuration +# OPENAI_API_KEY and DB_* are required +``` + +4. Run the application: +```bash +# Run the API server in development mode +uvicorn app.api:app --reload + +# There are many other services, like autonomous agent scheduler, you can try them later +# python -m app.autonomous +``` + +5. Try it out: +```bash +curl "http://127.0.0.1:8000/debug/example/chat?q=Hello" +``` +In terminal, curl cannot auto escape special characters, so you can use browser to test. Just copy the URL to your browser, replace "Hello" with your words. + +6. Manage your agent: +When intentkit first starts, it will create an example agent for you. You can manage your agent by using the scripts in the `scripts` directory. +```bash +cd scripts +# Export agent +sh export.sh example +# Import agent +sh import.sh example +# Create another agent +sh create.sh my_agent +``` + +## What's Next + +More about the agent management, check out [Agent Management](docs/agent.md). + +You can visit the [API Docs](http://localhost:8000/redoc#tag/Agent) to learn more. + +You may want to contribute skills, check out [Skill Contributing](docs/contributing/skills.md). diff --git a/Dockerfile b/Dockerfile new file mode 100644 index 00000000..2b991426 --- /dev/null +++ b/Dockerfile @@ -0,0 +1,31 @@ +# Use Python slim image as the base +FROM python:3.12-slim + +# Install uv +COPY --from=ghcr.io/astral-sh/uv:latest /uv /uvx /bin/ + +# Install system dependencies +RUN apt-get update \ + && apt-get install -y --no-install-recommends \ + curl \ + build-essential \ + libpq-dev \ + && apt-get purge -y --auto-remove curl \ + && rm -rf /var/lib/apt/lists/* + +# Set the working directory in the container +WORKDIR /app + +# Copy the project into the image +ADD . /app + +# Install dependencies and sync the project +RUN --mount=type=cache,target=/root/.cache/uv \ + uv sync --locked + +ARG RELEASE +ENV RELEASE=$RELEASE +ENV PATH="/app/.venv/bin:$PATH" + +# Command to run the application +CMD ["uvicorn", "app.api:app", "--host", "0.0.0.0", "--port", "80"] diff --git a/GEMINI.md b/GEMINI.md new file mode 120000 index 00000000..580a6313 --- /dev/null +++ b/GEMINI.md @@ -0,0 +1 @@ +LLM.md \ No newline at end of file diff --git a/LLM.md b/LLM.md new file mode 100644 index 00000000..06714b72 --- /dev/null +++ b/LLM.md @@ -0,0 +1,89 @@ +# LLM Integration Guide for IntentKit + +This guide provides comprehensive information for Large Language Models (LLMs) working with the IntentKit autonomous agent framework. + +## Project Overview + +IntentKit is an autonomous agent framework that enables creation and management of AI agents with capabilities. + +## Architecture Understanding + +1. **IntentKit Package** (`intentkit/`) + - The intentkit/ folder is published as a pip package. + - The core/ folder contains the agent system, driven by LangGraph. + - The models/ folder houses the entity models, most of it both have padantic models for memory use and sqlalchemy models for storage. + - The config/ folder contains the system level config, like database config, LLM provider api keys and skill provider api keys. + - The skills/ folder contains the skills system, driven by LangChain's BaseTool. LLM can call skills to fetch data, perform actions, or interact with the environment. + - The utils/ folder contains the utility functions, like logging, formatting, etc. + - The abstracts/ folder contains interfaces, for core/ and skills/ use. + +2. **IntentKit App** (`app/`) + - The app/ folder contains API server, autonomous runner, and background scheduler. + - User can use intentkit package in their own project for customization, or just start the intentkit app for default features. + +3. **Operation or Temporary Scripts** (`scripts/`) + - Agent management scripts + - Manual scripts for potential use + - Migration scripts + +4. **Integration Tests** (`tests/`) + - Core package testing in `tests/core/` + - API server testing in `tests/api/` + - Skill integration testing in `tests/skills/` + +## Technology Stack +- Package manager: uv, please use native `uv` command, do not use the `uv pip` command. If you want run or test python script, use `uv run` command, don't directly using python command. +- Lint: ruff, run `uv run ruff format & uv run ruff check --fix` after your final edit. +- API framework: fastapi, Doc in https://fastapi.tiangolo.com/ +- DB ORM: SQLAlchemy 2.0, please check the 2.0 api for use, do not use the legacy way. Doc in https://docs.sqlalchemy.org/en/20/ +- Model: Pydantic V2, Also be careful not to use the obsolete V1 interface. Doc in https://docs.pydantic.dev/latest/ +- Testing Framework: pytest + +## Rules + +1. Always use the latest version of the new package. +2. Always use English for code comments. +3. Always use English to search. +4. Unless I specifically ask you to do so, do not git commit after coding. +5. Always place imports at the beginning of the file in your new code. + +## Dev Guide + +### Skills Development + +1. Skills are in the `intentkit/skills/` folder. Each folder is a category. Each skill category can contain multiple skills. A category can be a theme or a brand. +2. To avoid circular dependencies, Skills can only depend on the contents of models, abstracts, utils, and clients. +3. The necessary elements in a skill category folder are as follows. For the paradigm of each element, you can refer to existing skills, such as skills/twitter + - `base.py`: Base class inherit `IntentKitSkill`. If there are functions that are common to this category, they can also be written in BaseClass. A common example is get_api_key + - Then every skill can have it's own file, with the same name as the skill. Key points: + - The skill class inherit BaseClass created in base.py + - The `name` attribute need a same prefix as the category name, such as `twitter_`, for uniqueness in the system. + - The `description` attribute is the description of the skill, which will be used in LLM to select the skill. + - The `args_schema` attribute is the pydantic model for the skill arguments. + - The `_arun` method is the main logic of the skill. There is special parameter `config: RunnableConfig`, which is used to pass the LangChain runnable config. There is function `context_from_config` in IntentKitSkill, can be used to get the context from the runnable config. In the _arun method, if there is any exception, just raise it, and the exception will be handled by the Agent. If the return value is not a string, you can document it in the description attribute. + - The `__init__.py` must have the function `async def get_skills( config: "Config", is_private: bool, store: SkillStoreABC, **_,) -> list[OpenAIBaseTool]` + - Config is inherit from `SkillConfig`, and the `states` is a dict, key is the skill name, value is the skill state. If the skill category have any other config fields need agent creator to set, they can be added to Config. + - If the skill is stateless, you can add a global _cache for it, to avoid re-create the skill object every time. + - A square image is needed in the category folder. + - Add schema.json file for the config, since the Config inherit from SkillConfig, you can check examples in exists skill category to find out the pattern. + - There is no need to catch exceptions in skills, because the agent has a dedicated module to catch skill exceptions. If you need to add more information to the exception, you can catch it and re-throw the appropriate exception. + +## Ops Guide + +### Git Commit +1. run `uv run ruff format && uv run ruff check --fix` before commit. +2. When you generate git commit message, always start with one of feat/fix/chore/docs/test/refactor/improve. Title Format: `: `, subject should start with lowercase. Only one-line needed, do not generate commit message body. + +### Github Pull Request +1. If there are uncommited changes, add them and commit them. +2. Push to remote branch. +3. Pull origin/main, so you can summarize the changes for pull request title and description. +4. Create a pull request with MCP tools. + +### Github Release +1. Make a `git pull` first. +2. The release version number rule is follow Semantic Versioning: pre-release is `vA.B.C-devD`, release is `vA.B.C`. When you calculate the next version number, release should +1 to patch version `C`, pre-release should +1 to `D` of `-devD`, but if vA.B.C already released, next pre-release should be restart from next patch version `vA.B.(C+1)-dev1`. For example, next pre-release of v0.1.2-dev3 should be v0.1.2-dev4, but if v0.1.2 production release already published, the next pre-release will restart from v0.1.3-dev1. +3. Find the last version number in release or pre-release using `git tag --sort=-version:refname | head -15`, diff origin/main with it, summarize the release note to build/changelog.md for later use. Add a diff link to release note too, the from and to should be the version number. +4. If the release is not pre-release, also insert the release note to the beginning of CHANGELOG.md (This file contains all history release notes, don't use it in gh command), leave this changed CHANGELOG.md in local, don't commit and push it, we will commit it together with next changes. +5. Construct `gh release create` command, use changelog.md as notes file in gh command. +6. Use gh to do release only, don't create branch, tag, or pull request, don't switch local branch. diff --git a/README.md b/README.md index 5737dd05..e0266726 100644 --- a/README.md +++ b/README.md @@ -1 +1,122 @@ -# intentkit \ No newline at end of file +# IntentKit + +
+ IntentKit by Crestal +
+
+ +IntentKit is an autonomous agent framework that enables the creation and management of AI agents with various capabilities including blockchain interaction, social media management, and custom skill integration. + +## Package Manager Migration Warning + +We just migrated to uv from poetry. +You need to delete the .venv folder and run `uv sync` to create a new virtual environment. (one time) +```bash +rm -rf .venv +uv sync +``` + +## Features + +- 🤖 Multiple Agent Support +- 🔄 Autonomous Agent Management +- 🔗 Blockchain Integration (EVM chains first) +- đŸĻ Social Media Integration (Twitter, Telegram, and more) +- đŸ› ī¸ Extensible Skill System +- 🔌 MCP (WIP) + +## Architecture + +``` + + Entrypoints + │ │ + │ Twitter/Telegram & more │ + └──────────────â”Ŧ──────────────┘ + │ + Storage: ────┐ │ ┌──── Skills: + │ │ │ + Agent Config │ ┌───────────────â–ŧ────────────────┐ │ Chain Integration + │ │ │ │ + Credentials │ │ │ │ Wallet Management + │ │ The Agent │ │ + Personality │ │ │ │ On-Chain Actions + │ │ │ │ + Memory │ │ Powered by LangGraph │ │ Internet Search + │ │ │ │ + Skill State │ └────────────────────────────────┘ │ Image Processing + ────┘ └──── + + More and More... + ┌──────────────────────────┐ + │ │ + │ Agent Config & Memory │ + │ │ + └──────────────────────────┘ + +``` + +The architecture is a simplified view, and more details can be found in the [Architecture](docs/architecture.md) section. + +## Development + +Read [Development Guide](DEVELOPMENT.md) to get started with your setup. + +## Documentation + +Check out [Documentation](docs/) before you start. + +## Project Structure + +The project is divided into the core package and the application: + +- **[intentkit/](intentkit/)**: The IntentKit package (published as a pip package) + - [abstracts/](intentkit/abstracts/): Abstract classes and interfaces for core and skills + - [clients/](intentkit/clients/): Clients for external services + - [config/](intentkit/config/): System level configurations + - [core/](intentkit/core/): Core agent system, driven by LangGraph + - [models/](intentkit/models/): Entity models using Pydantic and SQLAlchemy + - [skills/](intentkit/skills/): Extensible skills system, based on LangChain tools + - [utils/](intentkit/utils/): Utility functions + +- **[app/](app/)**: The IntentKit app (API server, autonomous runner, and background scheduler) + - [admin/](app/admin/): Admin APIs, agent generators, and related functionality + - [entrypoints/](app/entrypoints/): Entrypoints for interacting with agents (web, Telegram, Twitter, etc.) + - [services/](app/services/): Service implementations for Telegram, Twitter, etc. + - [api.py](app/api.py): REST API server + - [autonomous.py](app/autonomous.py): Autonomous agent runner + - [checker.py](app/checker.py): Health and credit checking logic + - [readonly.py](app/readonly.py): Readonly entrypoint + - [scheduler.py](app/scheduler.py): Background task scheduler + - [singleton.py](app/singleton.py): Singleton agent manager + - [telegram.py](app/telegram.py): Telegram integration + - [twitter.py](app/twitter.py): Twitter integration + +- [docs/](docs/): Documentation +- [scripts/](scripts/): Operation and temporary scripts for management and migrations + +## Agent API + +IntentKit provides a comprehensive REST API for programmatic access to your agents. Build applications, integrate with existing systems, or create custom interfaces using our Agent API. + +**Get Started:** [Agent API Documentation](docs/agent_api.md) + +## Contributing + +Contributions are welcome! Please read our [Contributing Guidelines](CONTRIBUTING.md) before submitting a pull request. + +### Contribute Skills + +First check [Wishlist](docs/contributing/wishlist.md) for active requests. + +Once you are ready to start, see [Skill Development Guide](docs/contributing/skills.md) for more information. + +### Developer Chat + +Join our [Discord](https://discord.com/invite/crestal), open a support ticket to apply for an intentkit dev role. + +We have a discussion channel there for you to join up with the rest of the developers. + +## License + +This project is licensed under the MIT License - see the [LICENSE](LICENSE) file for details. diff --git a/SECURITY.md b/SECURITY.md new file mode 100644 index 00000000..2b4d7cb9 --- /dev/null +++ b/SECURITY.md @@ -0,0 +1,139 @@ +# Security Policy + +## Alpha Stage Warning + +âš ī¸ IntentKit is currently in alpha stage. While we take security seriously, the software may contain unknown vulnerabilities. Use at your own risk and not recommended for production environments without thorough security review. + +## Reporting a Vulnerability + +We take the security of IntentKit seriously. If you believe you have found a security vulnerability, please report it to us as described below. + +**Please do NOT report security vulnerabilities through public GitHub issues.** + +Instead, please report them via email to [security@crestal.network](mailto:security@crestal.network) with the following information: + +1. Description of the vulnerability +2. Steps to reproduce the issue +3. Potential impact +4. Suggested fix (if any) + +You should receive a response within 48 hours. If for some reason you do not, please follow up via email to ensure we received your original message. + +## Security Best Practices + +### API Keys and Credentials + +1. **Environment Variables** + - Never commit API keys or credentials to version control + - Use environment variables or secure secret management + - Follow the example in `example.env` + +2. **Access Control** + - Implement proper authentication for your deployment + - Use secure session management + - Regularly rotate API keys and credentials + +3. **Network Security** + - Deploy behind a reverse proxy with SSL/TLS + - Use firewalls to restrict access + - Monitor for unusual traffic patterns + +### Agent Security + +1. **Quota Management** + - Always implement rate limiting + - Monitor agent usage patterns + - Set appropriate quotas for your use case + +2. **Tool Access** + - Carefully review tool permissions + - Implement tool-specific rate limiting + - Monitor tool usage and audit logs + +3. **Autonomous Execution** + - Review autonomous prompts carefully + - Implement safeguards for autonomous actions + - Monitor autonomous agent behavior + +### Database Security + +1. **Connection Security** + - Use strong passwords + - Enable SSL for database connections + - Restrict database access to necessary operations + +2. **Data Protection** + - Encrypt sensitive data at rest + - Implement proper backup procedures + - Regular security audits + +### Deployment Security + +1. **Container Security** + - Keep base images updated + - Run containers as non-root + - Scan containers for vulnerabilities + +2. **Infrastructure** + - Use secure infrastructure configurations + - Implement logging and monitoring + - Regular security updates + +## Known Limitations + +1. **Alpha Stage Limitations** + - Security features may be incomplete + - APIs may change without notice + - Some security controls are still in development + +2. **Integration Security** + - Third-party integrations may have their own security considerations + - Review security implications of enabled integrations + - Monitor integration access patterns + +## Security Updates + +Security updates will be released as soon as possible after a vulnerability is confirmed. Updates will be published: + +1. As GitHub releases with security notes +2. Via security advisories for critical issues +3. Through our notification system for registered users + +## Secure Development + +When contributing to IntentKit, please follow these security guidelines: + +1. **Code Review** + - All code must be reviewed before merging + - Security-sensitive changes require additional review + - Follow secure coding practices + +2. **Dependencies** + - Keep dependencies up to date + - Review security advisories for dependencies + - Use dependency scanning tools + +3. **Testing** + - Include security tests where applicable + - Test for common vulnerabilities + - Validate input and output handling + +## Version Support + +Given the alpha stage of the project, we currently: +- Support only the latest release +- Provide security updates for critical vulnerabilities +- Recommend frequent updates to the latest version + +## Acknowledgments + +We would like to thank the following for their contributions to our security: + +- All security researchers who responsibly disclose vulnerabilities +- Our community members who help improve our security +- Contributors who help implement security features + +## Contact + +For any questions about this security policy, please contact: +- Email: [security@crestal.network](mailto:security@crestal.network) diff --git a/__init__.py b/__init__.py new file mode 100644 index 00000000..8b137891 --- /dev/null +++ b/__init__.py @@ -0,0 +1 @@ + diff --git a/app/__init__.py b/app/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/app/admin/__init__.py b/app/admin/__init__.py new file mode 100644 index 00000000..f022e838 --- /dev/null +++ b/app/admin/__init__.py @@ -0,0 +1,20 @@ +from app.admin.agent_generator_api import router as agent_generator_router +from app.admin.api import admin_router, admin_router_readonly +from app.admin.credit import credit_router, credit_router_readonly +from app.admin.health import health_router +from app.admin.metadata import metadata_router_readonly +from app.admin.schema import schema_router_readonly +from app.admin.user import user_router, user_router_readonly + +__all__ = [ + "admin_router", + "admin_router_readonly", + "health_router", + "schema_router_readonly", + "credit_router", + "credit_router_readonly", + "metadata_router_readonly", + "user_router", + "user_router_readonly", + "agent_generator_router", +] diff --git a/app/admin/account_checking.py b/app/admin/account_checking.py new file mode 100644 index 00000000..1002dfac --- /dev/null +++ b/app/admin/account_checking.py @@ -0,0 +1,804 @@ +import asyncio +import logging +from datetime import datetime, timedelta, timezone +from decimal import Decimal +from typing import Dict, List, Optional + +from sqlalchemy import select, text + +from intentkit.config.config import config +from intentkit.models.credit import ( + CreditAccount, + CreditAccountTable, + CreditEvent, + CreditEventTable, + CreditTransaction, + CreditTransactionTable, +) +from intentkit.models.db import get_session, init_db + +logger = logging.getLogger(__name__) + + +class AccountCheckingResult: + """Result of an account checking operation.""" + + def __init__(self, check_type: str, status: bool, details: Optional[Dict] = None): + self.check_type = check_type + self.status = status # True if check passed, False if failed + self.details = details or {} + self.timestamp = datetime.now(timezone.utc) + + def __str__(self) -> str: + status_str = "PASSED" if self.status else "FAILED" + return f"[{self.timestamp.isoformat()}] {self.check_type}: {status_str} - {self.details}" + + +async def check_account_balance_consistency( + check_recent_only: bool = True, recent_hours: int = 24 +) -> List[AccountCheckingResult]: + """Check if all account balances are consistent with their transactions. + + This verifies that the total balance in each account matches the sum of all transactions + for that account, properly accounting for credits and debits. + + To ensure consistency during system operation, this function processes accounts in batches + using ID-based pagination and uses the last_event_id from each account to limit + transaction queries, ensuring that only transactions from events up to and including + the last recorded event for that account are considered. + + Args: + check_recent_only: If True, only check accounts updated within recent_hours. Default True. + recent_hours: Number of hours to look back for recent updates. Default 24. + + Returns: + List of checking results + """ + results = [] + batch_size = 1000 # Process 1000 accounts at a time + total_processed = 0 + batch_count = 0 + last_id = "" # Starting ID for pagination (empty string comes before all valid IDs) + + # Calculate time threshold for recent updates if needed + time_threshold = None + if check_recent_only: + time_threshold = datetime.now(timezone.utc) - timedelta(hours=recent_hours) + + while True: + # Create a new session for each batch to prevent timeouts + async with get_session() as session: + # Get accounts in batches using ID-based pagination + query = ( + select(CreditAccountTable) + .where(CreditAccountTable.id > last_id) # ID-based pagination + .order_by(CreditAccountTable.id) + .limit(batch_size) + ) + + # Add time filter if checking recent updates only + if check_recent_only and time_threshold: + query = query.where(CreditAccountTable.updated_at >= time_threshold) + accounts_result = await session.execute(query) + batch_accounts = [ + CreditAccount.model_validate(acc) + for acc in accounts_result.scalars().all() + ] + + # If no more accounts to process, break the loop + if not batch_accounts: + break + + # Update counters and last_id for next iteration + batch_count += 1 + current_batch_size = len(batch_accounts) + total_processed += current_batch_size + last_id = batch_accounts[-1].id # Update last_id for next batch + + logger.info( + f"Processing account balance batch: {batch_count}, accounts: {current_batch_size}" + ) + + # Process each account in the batch + for account in batch_accounts: + # Sleep for 10ms to reduce database load + await asyncio.sleep(0.01) + + # Calculate the total balance across all credit types + total_balance = ( + account.free_credits + account.reward_credits + account.credits + ) + + # Calculate the expected balance from all transactions, regardless of credit type + # If account has last_event_id, only include transactions from events up to and including that event + # If no last_event_id, include all transactions for the account + if account.last_event_id: + query = text(""" + SELECT + SUM(CASE WHEN ct.credit_debit = 'credit' THEN ct.change_amount ELSE 0 END) as credits, + SUM(CASE WHEN ct.credit_debit = 'debit' THEN ct.change_amount ELSE 0 END) as debits, + SUM(CASE WHEN ct.credit_debit = 'credit' THEN ct.free_amount ELSE -ct.free_amount END) as free_credits_sum, + SUM(CASE WHEN ct.credit_debit = 'credit' THEN ct.reward_amount ELSE -ct.reward_amount END) as reward_credits_sum, + SUM(CASE WHEN ct.credit_debit = 'credit' THEN ct.permanent_amount ELSE -ct.permanent_amount END) as permanent_credits_sum + FROM credit_transactions ct + JOIN credit_events ce ON ct.event_id = ce.id + WHERE ct.account_id = :account_id + AND ce.id <= :last_event_id + """) + + tx_result = await session.execute( + query, + { + "account_id": account.id, + "last_event_id": account.last_event_id, + }, + ) + else: + query = text(""" + SELECT + SUM(CASE WHEN ct.credit_debit = 'credit' THEN ct.change_amount ELSE 0 END) as credits, + SUM(CASE WHEN ct.credit_debit = 'debit' THEN ct.change_amount ELSE 0 END) as debits, + SUM(CASE WHEN ct.credit_debit = 'credit' THEN ct.free_amount ELSE -ct.free_amount END) as free_credits_sum, + SUM(CASE WHEN ct.credit_debit = 'credit' THEN ct.reward_amount ELSE -ct.reward_amount END) as reward_credits_sum, + SUM(CASE WHEN ct.credit_debit = 'credit' THEN ct.permanent_amount ELSE -ct.permanent_amount END) as permanent_credits_sum + FROM credit_transactions ct + WHERE ct.account_id = :account_id + """) + + tx_result = await session.execute( + query, + {"account_id": account.id}, + ) + tx_data = tx_result.fetchone() + + credits = tx_data.credits or Decimal("0") + debits = tx_data.debits or Decimal("0") + expected_balance = credits - debits + + # Calculate expected balances for each credit type + expected_free_credits = tx_data.free_credits_sum or Decimal("0") + expected_reward_credits = tx_data.reward_credits_sum or Decimal("0") + expected_permanent_credits = tx_data.permanent_credits_sum or Decimal( + "0" + ) + + # Compare total balances and individual credit type balances + is_total_consistent = total_balance == expected_balance + is_free_consistent = account.free_credits == expected_free_credits + is_reward_consistent = account.reward_credits == expected_reward_credits + is_permanent_consistent = account.credits == expected_permanent_credits + + is_consistent = ( + is_total_consistent + and is_free_consistent + and is_reward_consistent + and is_permanent_consistent + ) + + result = AccountCheckingResult( + check_type="account_balance_consistency", + status=is_consistent, + details={ + "account_id": account.id, + "owner_type": account.owner_type, + "owner_id": account.owner_id, + "current_total_balance": float(total_balance), + "free_credits": float(account.free_credits), + "reward_credits": float(account.reward_credits), + "permanent_credits": float(account.credits), + "expected_total_balance": float(expected_balance), + "expected_free_credits": float(expected_free_credits), + "expected_reward_credits": float(expected_reward_credits), + "expected_permanent_credits": float(expected_permanent_credits), + "total_credits": float(credits), + "total_debits": float(debits), + "total_balance_difference": float( + total_balance - expected_balance + ), + "free_credits_difference": float( + account.free_credits - expected_free_credits + ), + "reward_credits_difference": float( + account.reward_credits - expected_reward_credits + ), + "permanent_credits_difference": float( + account.credits - expected_permanent_credits + ), + "is_total_consistent": is_total_consistent, + "is_free_consistent": is_free_consistent, + "is_reward_consistent": is_reward_consistent, + "is_permanent_consistent": is_permanent_consistent, + "last_event_id": account.last_event_id, + "batch": batch_count, + "check_recent_only": check_recent_only, + "recent_hours": recent_hours if check_recent_only else None, + }, + ) + results.append(result) + + if not is_consistent: + inconsistency_details = [] + if not is_total_consistent: + inconsistency_details.append( + f"Total: {total_balance} vs {expected_balance}" + ) + if not is_free_consistent: + inconsistency_details.append( + f"Free: {account.free_credits} vs {expected_free_credits}" + ) + if not is_reward_consistent: + inconsistency_details.append( + f"Reward: {account.reward_credits} vs {expected_reward_credits}" + ) + if not is_permanent_consistent: + inconsistency_details.append( + f"Permanent: {account.credits} vs {expected_permanent_credits}" + ) + + logger.warning( + f"Account balance inconsistency detected: {account.id} ({account.owner_type}:{account.owner_id}) - " + f"{'; '.join(inconsistency_details)}" + ) + + filter_info = ( + f" (recent {recent_hours}h only)" if check_recent_only else " (all accounts)" + ) + logger.info( + f"Completed account balance consistency check{filter_info}: processed {total_processed} accounts in {batch_count} batches" + ) + + return results + + +async def check_transaction_balance() -> List[AccountCheckingResult]: + """Check if all credit events have balanced transactions. + + For each credit event, the sum of all credit transactions should equal the sum of all debit transactions. + Events are processed in batches to prevent memory overflow issues using ID-based pagination for better performance. + + Returns: + List of checking results + """ + results = [] + batch_size = 1000 # Process 1000 events at a time + total_processed = 0 + batch_count = 0 + last_id = "" # Starting ID for pagination (empty string comes before all valid IDs) + + # Time window for events (last 3 days for performance) + three_days_ago = datetime.now(timezone.utc) - timedelta(hours=4) + + while True: + # Create a new session for each batch to prevent timeouts + async with get_session() as session: + # Get events in batches using ID-based pagination + query = ( + select(CreditEventTable) + .where(CreditEventTable.created_at >= three_days_ago) + .where( + CreditEventTable.id > last_id + ) # Key change: ID-based pagination with string comparison + .order_by(CreditEventTable.id) + .limit(batch_size) + ) + events_result = await session.execute(query) + batch_events = [ + CreditEvent.model_validate(event) + for event in events_result.scalars().all() + ] + + # If no more events to process, break the loop + if not batch_events: + break + + # Update counters and last_id for next iteration + batch_count += 1 + current_batch_size = len(batch_events) + total_processed += current_batch_size + last_id = batch_events[-1].id # Update last_id for next batch + + logger.info( + f"Processing transaction balance batch: {batch_count}, events: {current_batch_size}" + ) + + # Process each event in the batch + for event in batch_events: + # Sleep for 10ms to reduce database load + await asyncio.sleep(0.01) + + # Get all transactions for this event + tx_query = select(CreditTransactionTable).where( + CreditTransactionTable.event_id == event.id + ) + tx_result = await session.execute(tx_query) + transactions = [ + CreditTransaction.model_validate(tx) + for tx in tx_result.scalars().all() + ] + + # Calculate credit and debit sums + credit_sum = sum( + tx.change_amount + for tx in transactions + if tx.credit_debit == "credit" + ) + debit_sum = sum( + tx.change_amount + for tx in transactions + if tx.credit_debit == "debit" + ) + + # Check if they balance + is_balanced = credit_sum == debit_sum + + result = AccountCheckingResult( + check_type="transaction_balance", + status=is_balanced, + details={ + "event_id": event.id, + "event_type": event.event_type, + "credit_sum": float(credit_sum), + "debit_sum": float(debit_sum), + "difference": float(credit_sum - debit_sum), + "created_at": event.created_at.isoformat() + if event.created_at + else None, + "batch": batch_count, + }, + ) + results.append(result) + + if not is_balanced: + logger.warning( + f"Transaction imbalance detected for event {event.id} ({event.event_type}). " + f"Credit: {credit_sum}, Debit: {debit_sum}" + ) + + logger.info( + f"Completed transaction balance check: processed {total_processed} events in {batch_count} batches" + ) + + return results + + +async def check_orphaned_transactions() -> List[AccountCheckingResult]: + """Check for orphaned transactions that don't have a corresponding event. + + Returns: + List of checking results + """ + # Create a new session for this function + async with get_session() as session: + # Find transactions with event_ids that don't exist in the events table + query = text(""" + SELECT t.id, t.account_id, t.event_id, t.tx_type, t.credit_debit, t.change_amount, t.credit_type, t.created_at + FROM credit_transactions t + LEFT JOIN credit_events e ON t.event_id = e.id + WHERE e.id IS NULL + """) + + result = await session.execute(query) + orphaned_txs = result.fetchall() + + # Process orphaned transactions with a sleep to reduce database load + orphaned_tx_details = [] + for tx in orphaned_txs[:100]: # Limit to first 100 for report size + # Sleep for 10ms to reduce database load + await asyncio.sleep(0.01) + + # Add transaction details to the list + orphaned_tx_details.append( + { + "id": tx.id, + "account_id": tx.account_id, + "event_id": tx.event_id, + "tx_type": tx.tx_type, + "credit_debit": tx.credit_debit, + "change_amount": float(tx.change_amount), + "credit_type": tx.credit_type, + "created_at": tx.created_at.isoformat() if tx.created_at else None, + } + ) + + check_result = AccountCheckingResult( + check_type="orphaned_transactions", + status=(len(orphaned_txs) == 0), + details={ + "orphaned_count": len(orphaned_txs), + "orphaned_transactions": orphaned_tx_details, + }, + ) + + if orphaned_txs: + logger.warning( + f"Found {len(orphaned_txs)} orphaned transactions without corresponding events" + ) + + return [check_result] + + +async def check_orphaned_events() -> List[AccountCheckingResult]: + """Check for orphaned events that don't have any transactions. + + Returns: + List of checking results + """ + # Create a new session for this function + async with get_session() as session: + # Find events that don't have any transactions + query = text(""" + SELECT e.id, e.event_type, e.account_id, e.total_amount, e.credit_type, e.created_at + FROM credit_events e + LEFT JOIN credit_transactions t ON e.id = t.event_id + WHERE t.id IS NULL + """) + + result = await session.execute(query) + orphaned_events = result.fetchall() + + if not orphaned_events: + return [ + AccountCheckingResult( + check_type="orphaned_events", + status=True, + details={"message": "No orphaned events found"}, + ) + ] + + # If we found orphaned events, report them + orphaned_event_ids = [event.id for event in orphaned_events] + orphaned_event_details = [] + for event in orphaned_events: + # Sleep for 10ms to reduce database load + await asyncio.sleep(0.01) + + # Add event details to the list + orphaned_event_details.append( + { + "event_id": event.id, + "event_type": event.event_type, + "account_id": event.account_id, + "total_amount": float(event.total_amount), + "credit_type": event.credit_type, + "created_at": event.created_at.isoformat() + if event.created_at + else None, + } + ) + + logger.warning( + f"Found {len(orphaned_events)} orphaned events with no transactions: {orphaned_event_ids}" + ) + + return [ + AccountCheckingResult( + check_type="orphaned_events", + status=False, + details={ + "orphaned_count": len(orphaned_events), + "orphaned_events": orphaned_event_details, + }, + ) + ] + + +async def check_total_credit_balance() -> List[AccountCheckingResult]: + """Check if the sum of all free_credits, reward_credits, and credits across all accounts is 0. + + This verifies that the overall credit system is balanced, with all credits accounted for. + + Returns: + List of checking results + """ + # Create a new session for this function + async with get_session() as session: + # Query to sum all credit types across all accounts + query = text(""" + SELECT + SUM(free_credits) as total_free_credits, + SUM(reward_credits) as total_reward_credits, + SUM(credits) as total_permanent_credits, + SUM(free_credits) + SUM(reward_credits) + SUM(credits) as grand_total + FROM credit_accounts + """) + + result = await session.execute(query) + balance_data = result.fetchone() + + total_free_credits = balance_data.total_free_credits or Decimal("0") + total_reward_credits = balance_data.total_reward_credits or Decimal("0") + total_permanent_credits = balance_data.total_permanent_credits or Decimal("0") + grand_total = balance_data.grand_total or Decimal("0") + + # Check if the grand total is zero (or very close to zero due to potential floating point issues) + is_balanced = grand_total == Decimal("0") + + # If not exactly zero but very close (due to potential rounding issues), log a warning but still consider it balanced + if not is_balanced and abs(grand_total) < Decimal("0.001"): + logger.warning( + f"Total credit balance is very close to zero but not exact: {grand_total}. " + f"This might be due to rounding issues." + ) + is_balanced = True + + result = AccountCheckingResult( + check_type="total_credit_balance", + status=is_balanced, + details={ + "total_free_credits": float(total_free_credits), + "total_reward_credits": float(total_reward_credits), + "total_permanent_credits": float(total_permanent_credits), + "grand_total": float(grand_total), + }, + ) + + if not is_balanced: + logger.warning( + f"Total credit balance inconsistency detected. System is not balanced. " + f"Total: {grand_total} (Free: {total_free_credits}, Reward: {total_reward_credits}, " + f"Permanent: {total_permanent_credits})" + ) + + return [result] + + +async def check_transaction_total_balance() -> List[AccountCheckingResult]: + """Check if the total credit and debit amounts in the CreditTransaction table are balanced. + + This verifies that across all transactions in the system, the total credits equal the total debits. + + Returns: + List of checking results + """ + # Create a new session for this function + async with get_session() as session: + # Query to sum all credit and debit transactions + query = text(""" + SELECT + SUM(CASE WHEN credit_debit = 'credit' THEN change_amount ELSE 0 END) as total_credits, + SUM(CASE WHEN credit_debit = 'debit' THEN change_amount ELSE 0 END) as total_debits + FROM credit_transactions + """) + + result = await session.execute(query) + balance_data = result.fetchone() + + total_credits = balance_data.total_credits or Decimal("0") + total_debits = balance_data.total_debits or Decimal("0") + difference = total_credits - total_debits + + # Check if credits and debits are balanced (difference should be zero) + is_balanced = difference == Decimal("0") + + # If not exactly zero but very close (due to potential rounding issues), log a warning but still consider it balanced + if not is_balanced and abs(difference) < Decimal("0.001"): + logger.warning( + f"Transaction total balance is very close to zero but not exact: {difference}. " + f"This might be due to rounding issues." + ) + is_balanced = True + + result = AccountCheckingResult( + check_type="transaction_total_balance", + status=is_balanced, + details={ + "total_credits": float(total_credits), + "total_debits": float(total_debits), + "difference": float(difference), + }, + ) + + if not is_balanced: + logger.warning( + f"Transaction total balance inconsistency detected. System is not balanced. " + f"Credits: {total_credits}, Debits: {total_debits}, Difference: {difference}" + ) + + return [result] + + +async def run_quick_checks() -> Dict[str, List[AccountCheckingResult]]: + """Run quick account checking procedures and return results. + + These checks are designed to be fast and can be run frequently. + + Returns: + Dictionary mapping check names to their results + """ + logger.info("Starting quick account checking procedures") + + results = {} + # Quick checks don't need a session at this level as each function creates its own session + results["transaction_balance"] = await check_transaction_balance() + results["orphaned_transactions"] = await check_orphaned_transactions() + results["orphaned_events"] = await check_orphaned_events() + results["total_credit_balance"] = await check_total_credit_balance() + results["transaction_total_balance"] = await check_transaction_total_balance() + + # Log summary + all_passed = True + failed_count = 0 + for check_name, check_results in results.items(): + check_failed_count = sum(1 for result in check_results if not result.status) + failed_count += check_failed_count + + if check_failed_count > 0: + logger.warning( + f"{check_name}: {check_failed_count} of {len(check_results)} checks failed" + ) + all_passed = False + else: + logger.info(f"{check_name}: All {len(check_results)} checks passed") + + if all_passed: + logger.info("All quick account checks passed successfully") + else: + logger.warning( + f"Quick account checking summary: {failed_count} checks failed - see logs for details" + ) + + # Send summary to Slack + from intentkit.utils.slack_alert import send_slack_message + + # Create a summary message with color based on status + total_checks = sum(len(check_results) for check_results in results.values()) + + if all_passed: + color = "good" # Green color + title = "✅ Quick Account Checking Completed Successfully" + text = f"All {total_checks} quick account checks passed successfully." + notify = "" # No notification needed for success + else: + color = "danger" # Red color + title = "❌ Quick Account Checking Found Issues" + text = f"Quick account checking found {failed_count} issues out of {total_checks} checks." + notify = " " # Notify channel for failures + + # Create attachments with check details + attachments = [{"color": color, "title": title, "text": text, "fields": []}] + + # Add fields for each check type + for check_name, check_results in results.items(): + check_failed_count = sum(1 for result in check_results if not result.status) + check_status = ( + "✅ Passed" + if check_failed_count == 0 + else f"❌ Failed ({check_failed_count} issues)" + ) + + attachments[0]["fields"].append( + { + "title": check_name.replace("_", " ").title(), + "value": check_status, + "short": True, + } + ) + + # Send the message + send_slack_message( + message=f"{notify}Quick Account Checking Results", attachments=attachments + ) + + return results + + +async def run_slow_checks() -> Dict[str, List[AccountCheckingResult]]: + """Run slow account checking procedures and return results. + + These checks are more resource-intensive and should be run less frequently. + + Returns: + Dictionary mapping check names to their results + """ + logger.info("Starting slow account checking procedures") + + results = {} + # Slow checks don't need a session at this level as each function creates its own session + results["account_balance"] = await check_account_balance_consistency() + + # Log summary + all_passed = True + failed_count = 0 + for check_name, check_results in results.items(): + check_failed_count = sum(1 for result in check_results if not result.status) + failed_count += check_failed_count + + if check_failed_count > 0: + logger.warning( + f"{check_name}: {check_failed_count} of {len(check_results)} checks failed" + ) + all_passed = False + else: + logger.info(f"{check_name}: All {len(check_results)} checks passed") + + if all_passed: + logger.info("All slow account checks passed successfully") + else: + logger.warning( + f"Slow account checking summary: {failed_count} checks failed - see logs for details" + ) + + # Send summary to Slack + from intentkit.utils.slack_alert import send_slack_message + + # Create a summary message with color based on status + total_checks = sum(len(check_results) for check_results in results.values()) + + if all_passed: + color = "good" # Green color + title = "✅ Slow Account Checking Completed Successfully" + text = f"All {total_checks} slow account checks passed successfully." + notify = "" # No notification needed for success + else: + color = "danger" # Red color + title = "❌ Slow Account Checking Found Issues" + text = f"Slow account checking found {failed_count} issues out of {total_checks} checks." + notify = " " # Notify channel for failures + + # Create attachments with check details + attachments = [{"color": color, "title": title, "text": text, "fields": []}] + + # Add fields for each check type + for check_name, check_results in results.items(): + check_failed_count = sum(1 for result in check_results if not result.status) + check_status = ( + "✅ Passed" + if check_failed_count == 0 + else f"❌ Failed ({check_failed_count} issues)" + ) + + attachments[0]["fields"].append( + { + "title": check_name.replace("_", " ").title(), + "value": check_status, + "short": True, + } + ) + + # Send the message + send_slack_message( + message=f"{notify}Slow Account Checking Results", attachments=attachments + ) + + return results + + +async def main(): + """Main entry point for running account checks.""" + await init_db(**config.db) + logger.info("Starting account balance consistency check (permanent mode)") + + # Test the modified check_account_balance_consistency function with permanent checking + results = await check_account_balance_consistency(check_recent_only=False) + + # Print summary of results + total_accounts = len(results) + failed_accounts = sum(1 for result in results if not result.status) + passed_accounts = total_accounts - failed_accounts + + logger.info("Account balance consistency check completed:") + logger.info(f" Total accounts checked: {total_accounts}") + logger.info(f" Passed: {passed_accounts}") + logger.info(f" Failed: {failed_accounts}") + + if failed_accounts > 0: + logger.warning(f"Found {failed_accounts} accounts with balance inconsistencies") + # Log details of first few failed accounts for debugging + for i, result in enumerate([r for r in results if not r.status][:5]): + details = result.details + logger.warning( + f" Account {i + 1}: {details['account_id']} - " + f"Total: {details['current_total_balance']} vs {details['expected_total_balance']}, " + f"Free: {details['free_credits']} vs {details['expected_free_credits']}, " + f"Reward: {details['reward_credits']} vs {details['expected_reward_credits']}, " + f"Permanent: {details['permanent_credits']} vs {details['expected_permanent_credits']}" + ) + else: + logger.info("All accounts have consistent balances!") + + return results + + +if __name__ == "__main__": + # Run the main function + asyncio.run(main()) diff --git a/app/admin/agent_generator_api.py b/app/admin/agent_generator_api.py new file mode 100644 index 00000000..1181f4de --- /dev/null +++ b/app/admin/agent_generator_api.py @@ -0,0 +1,414 @@ +"""Agent Generator API. + +FastAPI endpoints for generating agent schemas from natural language prompts. +""" + +import logging +from datetime import datetime +from typing import Any, Dict, List, Optional + +from fastapi import APIRouter, HTTPException +from pydantic import BaseModel, Field, validator + +from app.admin.generator import generate_validated_agent_schema +from app.admin.generator.conversation_service import ( + get_conversation_history, + get_project_metadata, + get_projects_by_user, +) +from app.admin.generator.llm_logger import ( + LLMLogger, + create_llm_logger, +) +from app.admin.generator.utils import generate_tags_from_nation_api +from intentkit.models.agent import AgentUpdate + +logger = logging.getLogger(__name__) + +# Create router +router = APIRouter( + prefix="/agent", + tags=["Agent"], +) + + +class AgentGenerateRequest(BaseModel): + """Request model for agent generation.""" + + prompt: str = Field( + ..., + description="Natural language description of the agent's desired capabilities", + min_length=10, + max_length=1000, + ) + + existing_agent: Optional[AgentUpdate] = Field( + None, + description="Existing agent to update. If provided, the LLM will make minimal changes to this agent based on the prompt. If null, a new agent will be created.", + ) + + user_id: str = Field( + ..., description="User ID for logging and rate limiting purposes" + ) + + project_id: Optional[str] = Field( + None, + description="Project ID for conversation history. If not provided, a new project will be created.", + ) + + @validator("prompt") + def validate_prompt_length(cls, v): + if len(v) < 10: + raise ValueError( + "Prompt is too short. Please provide at least 10 characters describing the agent's capabilities." + ) + if len(v) > 1000: + raise ValueError( + "Prompt is too long. Please keep your description under 1000 characters to ensure efficient processing." + ) + return v + + @validator("user_id") + def validate_user_id(cls, v): + if not v or not v.strip(): + raise ValueError( + "User ID is required and cannot be empty. Please provide a valid user identifier." + ) + return v.strip() + + +class AgentGenerateResponse(BaseModel): + """Response model for agent generation.""" + + agent: Dict[str, Any] = Field(..., description="The generated agent schema") + + project_id: str = Field(..., description="Project ID for this conversation session") + + summary: str = Field( + ..., description="Human-readable summary of the generated agent" + ) + + tags: List[Dict[str, int]] = Field( + default_factory=list, + description="Generated tags for the agent as ID objects: [{'id': 1}, {'id': 2}]", + ) + + autonomous_tasks: List[Dict[str, Any]] = Field( + default_factory=list, + description="List of autonomous tasks generated for the agent", + ) + + activated_skills: List[str] = Field( + default_factory=list, + description="List of skills that were activated based on the prompt", + ) + + +class GenerationsListRequest(BaseModel): + """Request model for getting generations list.""" + + user_id: Optional[str] = Field(None, description="User ID to filter generations") + + limit: int = Field( + default=50, + description="Maximum number of recent projects to return", + ge=1, + le=100, + ) + + +class GenerationsListResponse(BaseModel): + """Response model for generations list.""" + + projects: List[Dict[str, Any]] = Field( + ..., description="List of recent projects with their conversation history" + ) + + +class GenerationDetailResponse(BaseModel): + """Response model for single generation detail.""" + + project_id: str = Field(..., description="Project ID") + user_id: Optional[str] = Field(None, description="User ID who owns this project") + created_at: Optional[str] = Field(None, description="Project creation timestamp") + last_activity: Optional[str] = Field(None, description="Last activity timestamp") + message_count: int = Field(..., description="Number of messages in conversation") + last_message: Optional[Dict[str, Any]] = Field( + None, description="Last message in conversation" + ) + first_message: Optional[Dict[str, Any]] = Field( + None, description="First message in conversation" + ) + conversation_history: List[Dict[str, Any]] = Field( + ..., description="Full conversation history" + ) + + +@router.post( + "/generate", + summary="Generate Agent from Natural Language Prompt", + response_model=AgentGenerateResponse, +) +async def generate_agent( + request: AgentGenerateRequest, +) -> AgentGenerateResponse: + """Generate an agent schema from a natural language prompt. + + Converts plain English descriptions into complete, validated agent configurations. + Automatically identifies required skills, sets up configurations, detects autonomous + task patterns, and ensures everything works correctly with intelligent error correction. + + **Autonomous Task Detection:** + The API can automatically detect scheduling patterns in prompts like: + - "Buy 0.1 ETH every hour" → Creates 60-minute autonomous task with CDP trade skill + - "Check portfolio daily" → Creates 24-hour autonomous task with portfolio skills + - "Post tweet every 30 minutes" → Creates 30-minute autonomous task with Twitter skill + + **Request Body:** + * `prompt` - Natural language description of the agent's desired capabilities and schedule + * `existing_agent` - Optional existing agent to update (preserves current setup while adding capabilities) + * `user_id` - Required user ID for logging and rate limiting + * `project_id` - Optional project ID for conversation history + + **Returns:** + * `AgentGenerateResponse` - Contains agent schema, autonomous tasks, activated skills, project ID, and summary + + **Response Fields:** + * `agent` - Complete agent schema with skills and autonomous configurations + * `autonomous_tasks` - List of autonomous tasks detected and configured + * `activated_skills` - List of skills that were activated based on the prompt + * `project_id` - Project ID for conversation tracking + * `summary` - Human-readable summary of the generated agent + * `tags` - Generated tags for categorization + + **Raises:** + * `HTTPException`: + - 400: Invalid request (missing user_id, invalid prompt format or length) + - 500: Agent generation failed after retries + """ + # Create or reuse LLM logger based on project_id + if request.project_id: + llm_logger = LLMLogger(request_id=request.project_id, user_id=request.user_id) + project_id = request.project_id + logger.info(f"Using existing project_id: {project_id}") + else: + llm_logger = create_llm_logger(user_id=request.user_id) + project_id = llm_logger.request_id + logger.info(f"Created new project_id: {project_id}") + + logger.info( + f"Agent generation request received: {request.prompt[:100]}... " + f"(project_id={project_id})" + ) + + # Determine if this is an update operation + is_update = request.existing_agent is not None + + if is_update: + logger.info( + f"Processing agent update with existing agent data (project_id={project_id})" + ) + + try: + # Generate agent schema with automatic validation and AI self-correction + ( + agent_schema, + identified_skills, + summary, + ) = await generate_validated_agent_schema( + prompt=request.prompt, + user_id=request.user_id, + existing_agent=request.existing_agent, + llm_logger=llm_logger, + ) + + # Generate tags using Nation API + tags = await generate_tags_from_nation_api(agent_schema, request.prompt) + + logger.info( + f"Agent generation completed successfully (project_id={project_id})" + ) + if is_update: + logger.info( + f"Agent schema updated via minimal changes with AI self-correction (project_id={project_id})" + ) + else: + logger.info( + f"New agent schema generated successfully with validation (project_id={project_id})" + ) + + # Extract autonomous tasks and activated skills from the schema + autonomous_tasks = agent_schema.get("autonomous", []) + activated_skills = list(agent_schema.get("skills", {}).keys()) + + # Enhanced logging for autonomous functionality + if autonomous_tasks: + logger.info(f" Autonomous tasks detected: {len(autonomous_tasks)} tasks") + for task in autonomous_tasks: + schedule_info = ( + f"{task.get('minutes')} minutes" + if task.get("minutes") + else task.get("cron", "unknown") + ) + logger.info(f" '{task.get('name', 'Unnamed Task')}' - {schedule_info}") + else: + logger.info(" No autonomous tasks in generated agent") + + logger.info( + f" Activated skills: {len(activated_skills)} skills - {activated_skills}" + ) + + return AgentGenerateResponse( + agent=agent_schema, + project_id=project_id, + summary=summary, + tags=tags, + autonomous_tasks=autonomous_tasks, + activated_skills=activated_skills, + ) + + except Exception as e: + # All internal retries and AI self-correction failed + logger.error( + f"Agent generation failed after all attempts (project_id={project_id}): {str(e)}", + exc_info=True, + ) + raise HTTPException( + status_code=500, + detail={ + "error": "AgentGenerationFailed", + "msg": f"Failed to generate valid agent: {str(e)}", + "project_id": project_id, + }, + ) + + +@router.get( + "/generations", + summary="Get Generations List by User", + response_model=GenerationsListResponse, +) +async def get_generations( + user_id: Optional[str] = None, limit: int = 50 +) -> GenerationsListResponse: + """Get all projects/generations for a user. + + **Query Parameters:** + * `user_id` - Optional user ID to filter projects + * `limit` - Maximum number of recent projects to return (default: 50, max: 100) + + **Returns:** + * `GenerationsListResponse` - Contains list of projects with their conversation history + + **Raises:** + * `HTTPException`: + - 400: Invalid parameters + - 500: Failed to retrieve generations + """ + if limit < 1 or limit > 100: + raise HTTPException(status_code=400, detail="Limit must be between 1 and 100") + + logger.info(f"Getting generations for user_id={user_id}, limit={limit}") + + try: + # Get recent projects with their conversation history + projects = await get_projects_by_user(user_id=user_id, limit=limit) + + logger.info(f"Retrieved {len(projects)} projects for user {user_id}") + return GenerationsListResponse(projects=projects) + + except Exception as e: + logger.error(f"Failed to retrieve generations: {str(e)}", exc_info=True) + raise HTTPException( + status_code=500, + detail={ + "error": "GenerationsRetrievalFailed", + "msg": f"Failed to retrieve generations: {str(e)}", + }, + ) + + +@router.get( + "/generations/{project_id}", + summary="Get Generation Detail by Project ID", + response_model=GenerationDetailResponse, +) +async def get_generation_detail( + project_id: str, user_id: Optional[str] = None +) -> GenerationDetailResponse: + """Get specific project conversation history. + + **Path Parameters:** + * `project_id` - Project ID to get conversation history for + + **Query Parameters:** + * `user_id` - Optional user ID for access validation + + **Returns:** + * `GenerationDetailResponse` - Contains full conversation history for the project + + **Raises:** + * `HTTPException`: + - 404: Project not found or access denied + - 500: Failed to retrieve generation detail + """ + logger.info( + f"Getting generation detail for project_id={project_id}, user_id={user_id}" + ) + + try: + # Get conversation history for the specific project + try: + conversation_history = await get_conversation_history( + project_id=project_id, + user_id=user_id, # Used for additional access validation + ) + except ValueError as ve: + logger.warning(f"Access denied or project not found: {ve}") + raise HTTPException(status_code=404, detail=str(ve)) + + if not conversation_history: + logger.warning(f"No conversation history found for project {project_id}") + raise HTTPException( + status_code=404, + detail=f"No conversation history found for project {project_id}", + ) + + # Get project metadata for additional information + project_metadata = await get_project_metadata(project_id) + + logger.info( + f"Retrieved conversation with {len(conversation_history)} messages for project {project_id}" + ) + + return GenerationDetailResponse( + project_id=project_id, + user_id=project_metadata.get("user_id") if project_metadata else user_id, + created_at=datetime.fromtimestamp( + project_metadata.get("created_at") + ).isoformat() + if project_metadata and project_metadata.get("created_at") + else None, + last_activity=datetime.fromtimestamp( + project_metadata.get("last_activity") + ).isoformat() + if project_metadata and project_metadata.get("last_activity") + else None, + message_count=len(conversation_history), + last_message=conversation_history[-1] if conversation_history else None, + first_message=conversation_history[0] if conversation_history else None, + conversation_history=conversation_history, + ) + + except HTTPException: + # Re-raise HTTP exceptions as-is + raise + except Exception as e: + logger.error(f"Failed to retrieve generation detail: {str(e)}", exc_info=True) + raise HTTPException( + status_code=500, + detail={ + "error": "GenerationDetailRetrievalFailed", + "msg": f"Failed to retrieve generation detail: {str(e)}", + }, + ) diff --git a/app/admin/api.py b/app/admin/api.py new file mode 100644 index 00000000..7b24aa13 --- /dev/null +++ b/app/admin/api.py @@ -0,0 +1,936 @@ +import importlib +import logging +from typing import Annotated, Optional, TypedDict + +from aiogram import Bot +from aiogram.exceptions import TelegramConflictError, TelegramUnauthorizedError +from aiogram.utils.token import TokenValidationError +from fastapi import ( + APIRouter, + Body, + Depends, + File, + HTTPException, + Path, + Query, + Response, + UploadFile, +) +from fastapi.responses import PlainTextResponse +from pydantic import BaseModel, Field, ValidationError +from sqlalchemy import select +from sqlalchemy.exc import SQLAlchemyError +from sqlalchemy.ext.asyncio import AsyncSession +from sqlalchemy.orm.exc import NoResultFound +from yaml import safe_load + +from app.auth import verify_admin_jwt +from intentkit.clients.cdp import get_cdp_client +from intentkit.clients.twitter import unlink_twitter +from intentkit.config.config import config +from intentkit.core.engine import clean_agent_memory +from intentkit.core.skill import skill_store +from intentkit.models.agent import ( + Agent, + AgentCreate, + AgentResponse, + AgentTable, + AgentUpdate, +) +from intentkit.models.agent_data import AgentData, AgentDataTable +from intentkit.models.db import get_db +from intentkit.models.user import User +from intentkit.skills import __all__ as skill_categories +from intentkit.utils.slack_alert import send_slack_message + +admin_router_readonly = APIRouter() +admin_router = APIRouter() + +logger = logging.getLogger(__name__) + + +async def _process_agent( + agent: AgentCreate, subject: str | None = None, slack_message: str | None = None +) -> tuple[Agent, AgentData]: + """Shared function to process agent creation or update. + + Args: + agent: Agent configuration to process + subject: Optional subject from JWT token + slack_message: Optional custom message for Slack notification + + Returns: + tuple[Agent, AgentData]: Tuple of (processed agent, agent data) + """ + logger.info(f"Processing agent: {agent}") + if subject: + agent.owner = subject + + # Get the latest agent from create_or_update + latest_agent, is_new = await agent.create_or_update() + + # Process common post-creation/update steps + agent_data = await _process_agent_post_actions(latest_agent, is_new, slack_message) + + return latest_agent, agent_data + + +async def _process_agent_post_actions( + agent: Agent, is_new: bool = True, slack_message: str | None = None +) -> AgentData: + """Process common actions after agent creation or update. + + Args: + agent: The agent that was created or updated + is_new: Whether the agent is newly created + slack_message: Optional custom message for Slack notification + + Returns: + AgentData: The processed agent data + """ + if config.cdp_api_key_id and agent.wallet_provider == "cdp": + cdp_client = await get_cdp_client(agent.id, skill_store) + await cdp_client.get_wallet_provider() + + # Get new agent data + # FIXME: refuse to change wallet provider + if agent.wallet_provider == "readonly": + agent_data = await AgentData.patch( + agent.id, + { + "evm_wallet_address": agent.readonly_wallet_address, + }, + ) + else: + agent_data = await AgentData.get(agent.id) + + # Send Slack notification + slack_message = slack_message or ("Agent Created" if is_new else "Agent Updated") + try: + _send_agent_notification(agent, agent_data, slack_message) + except Exception as e: + logger.error("Failed to send Slack notification: %s", e) + + return agent_data + + +async def _process_telegram_config( + agent: AgentUpdate, existing_agent: Optional[Agent], agent_data: AgentData +) -> AgentData: + """Process telegram configuration for an agent. + + Args: + agent: The agent with telegram configuration + agent_data: The agent data to update + + Returns: + AgentData: The updated agent data + """ + changes = agent.model_dump(exclude_unset=True) + if not changes.get("telegram_entrypoint_enabled"): + return agent_data + + if not changes.get("telegram_config") or not changes.get("telegram_config").get( + "token" + ): + return agent_data + + tg_bot_token = changes.get("telegram_config").get("token") + + if existing_agent and existing_agent.telegram_config.get("token") == tg_bot_token: + return agent_data + + try: + bot = Bot(token=tg_bot_token) + bot_info = await bot.get_me() + agent_data.telegram_id = str(bot_info.id) + agent_data.telegram_username = bot_info.username + agent_data.telegram_name = bot_info.first_name + if bot_info.last_name: + agent_data.telegram_name = f"{bot_info.first_name} {bot_info.last_name}" + await agent_data.save() + try: + await bot.close() + except Exception: + pass + return agent_data + except ( + TelegramUnauthorizedError, + TelegramConflictError, + TokenValidationError, + ) as req_err: + logger.error( + f"Unauthorized err getting telegram bot username with token {tg_bot_token}: {req_err}", + ) + return agent_data + except Exception as e: + logger.error( + f"Error getting telegram bot username with token {tg_bot_token}: {e}", + ) + return agent_data + + +def _send_agent_notification(agent: Agent, agent_data: AgentData, message: str) -> None: + """Send a notification about agent creation or update. + + Args: + agent: The agent that was created or updated + agent_data: The agent data to update + message: The notification message + """ + # Format autonomous configurations - show only enabled ones with their id, name, and schedule + autonomous_formatted = "" + if agent.autonomous: + enabled_autonomous = [auto for auto in agent.autonomous if auto.enabled] + if enabled_autonomous: + autonomous_items = [] + for auto in enabled_autonomous: + schedule = ( + f"cron: {auto.cron}" if auto.cron else f"minutes: {auto.minutes}" + ) + autonomous_items.append( + f"â€ĸ {auto.id}: {auto.name or 'Unnamed'} ({schedule})" + ) + autonomous_formatted = "\n".join(autonomous_items) + else: + autonomous_formatted = "No enabled autonomous configurations" + else: + autonomous_formatted = "None" + + # Format skills - find categories with enabled: true and list skills in public/private states + skills_formatted = "" + if agent.skills: + enabled_categories = [] + for category, skill_config in agent.skills.items(): + if skill_config and skill_config.get("enabled") is True: + skills_list = [] + states = skill_config.get("states", {}) + public_skills = [ + skill for skill, state in states.items() if state == "public" + ] + private_skills = [ + skill for skill, state in states.items() if state == "private" + ] + + if public_skills: + skills_list.append(f" Public: {', '.join(public_skills)}") + if private_skills: + skills_list.append(f" Private: {', '.join(private_skills)}") + + if skills_list: + enabled_categories.append( + f"â€ĸ {category}:\n{chr(10).join(skills_list)}" + ) + + if enabled_categories: + skills_formatted = "\n".join(enabled_categories) + else: + skills_formatted = "No enabled skills" + else: + skills_formatted = "None" + + send_slack_message( + message, + attachments=[ + { + "color": "good", + "fields": [ + {"title": "ID", "short": True, "value": agent.id}, + {"title": "Name", "short": True, "value": agent.name}, + {"title": "Model", "short": True, "value": agent.model}, + { + "title": "Network", + "short": True, + "value": agent.network_id or agent.cdp_network_id or "Default", + }, + { + "title": "X Username", + "short": True, + "value": agent_data.twitter_username, + }, + { + "title": "Telegram Enabled", + "short": True, + "value": str(agent.telegram_entrypoint_enabled), + }, + { + "title": "Telegram Username", + "short": True, + "value": agent_data.telegram_username, + }, + { + "title": "Wallet Address", + "value": agent_data.evm_wallet_address, + }, + { + "title": "Autonomous", + "value": autonomous_formatted, + }, + { + "title": "Skills", + "value": skills_formatted, + }, + ], + } + ], + ) + + +@admin_router.post( + "/agents", + tags=["Agent"], + status_code=201, + operation_id="post_agent_deprecated", + deprecated=True, +) +async def create_or_update_agent( + agent: AgentCreate = Body(AgentCreate, description="Agent configuration"), + subject: str = Depends(verify_admin_jwt), +) -> Response: + """Create or update an agent. + + THIS ENDPOINT IS DEPRECATED. Please use POST /agents/v2 for creating new agents. + + This endpoint: + 1. Validates agent ID format + 2. Creates or updates agent configuration + 3. Reinitializes agent if already in cache + 4. Masks sensitive data in response + + **Request Body:** + * `agent` - Agent configuration + + **Returns:** + * `AgentResponse` - Updated agent configuration with additional processed data + + **Raises:** + * `HTTPException`: + - 400: Invalid agent ID format + - 500: Database error + """ + latest_agent, agent_data = await _process_agent(agent, subject) + agent_response = await AgentResponse.from_agent(latest_agent, agent_data) + + # Return Response with ETag header + return Response( + content=agent_response.model_dump_json(), + media_type="application/json", + headers={"ETag": agent_response.etag()}, + ) + + +@admin_router_readonly.post( + "/agent/validate", + tags=["Agent"], + status_code=204, + operation_id="validate_agent_create", +) +async def validate_agent_create( + user_id: Annotated[ + Optional[str], Query(description="Optional user ID for authorization check") + ] = None, + input: AgentUpdate = Body(AgentUpdate, description="Agent configuration"), +) -> Response: + """Validate agent configuration. + + **Request Body:** + * `agent` - Agent configuration + + **Returns:** + * `204 No Content` - Agent configuration is valid + + **Raises:** + * `HTTPException`: + - 400: Invalid agent configuration + - 422: Invalid agent configuration from intentkit core + - 500: Server error + """ + if not input.owner: + raise HTTPException(status_code=400, detail="Owner is required") + max_fee = 100 + if user_id: + if input.owner != user_id: + raise HTTPException(status_code=400, detail="Owner does not match user ID") + user = await User.get(user_id) + if user: + max_fee += user.nft_count * 10 + if input.fee_percentage and input.fee_percentage > max_fee: + raise HTTPException(status_code=400, detail="Fee percentage too high") + input.validate_autonomous_schedule() + return Response(status_code=204) + + +@admin_router_readonly.post( + "/agents/{agent_id}/validate", + tags=["Agent"], + status_code=204, + operation_id="validate_agent_update", +) +async def validate_agent_update( + agent_id: Annotated[str, Path(description="Agent ID")], + user_id: Annotated[ + Optional[str], Query(description="Optional user ID for authorization check") + ] = None, + input: AgentUpdate = Body(AgentUpdate, description="Agent configuration"), +) -> Response: + """Validate agent configuration. + + **Request Body:** + * `agent` - Agent configuration + + **Returns:** + * `204 No Content` - Agent configuration is valid + + **Raises:** + * `HTTPException`: + - 400: Invalid agent configuration + - 422: Invalid agent configuration from intentkit core + - 500: Server error + """ + agent = await Agent.get(agent_id) + if not agent: + raise HTTPException(status_code=404, detail="Agent not found") + max_fee = 100 + if user_id: + if agent.owner != user_id: + raise HTTPException(status_code=400, detail="Owner does not match user ID") + user = await User.get(user_id) + if user: + max_fee += user.nft_count * 10 + if input.fee_percentage and input.fee_percentage > max_fee: + raise HTTPException(status_code=400, detail="Fee percentage too high") + input.validate_autonomous_schedule() + return Response(status_code=204) + + +@admin_router.post( + "/agents/v2", + tags=["Agent"], + operation_id="create_agent", + summary="Create Agent", + response_model=AgentResponse, + responses={ + 200: {"model": AgentResponse, "description": "Agent already exists"}, + 201: {"model": AgentResponse, "description": "Agent created"}, + 400: {"description": "Other client errors except format error"}, + 422: {"description": "Invalid agent configuration"}, + 500: {"description": "Server error"}, + }, +) +async def create_agent( + input: AgentUpdate = Body(AgentUpdate, description="Agent configuration"), + subject: str = Depends(verify_admin_jwt), +) -> Response: + """Create a new agent. + + **Request Body:** + * `agent` - Agent configuration + + **Returns:** + * `AgentResponse` - Created agent configuration with additional processed data + + **Raises:** + * `HTTPException`: + - 400: Invalid agent ID format or agent ID already exists + - 500: Database error + """ + agent = AgentCreate.model_validate(input) + if subject: + agent.owner = subject + + # Check for existing agent by upstream_id + existing = await agent.get_by_upstream_id() + if existing: + agent_data = await AgentData.get(existing.id) + agent_response = await AgentResponse.from_agent(existing, agent_data) + return Response( + status_code=200, + content=agent_response.model_dump_json(), + media_type="application/json", + headers={"ETag": agent_response.etag()}, + ) + # Create new agent + latest_agent = await agent.create() + # Process common post-creation actions + agent_data = await _process_agent_post_actions(latest_agent, True, "Agent Created") + agent_data = await _process_telegram_config(input, None, agent_data) + agent_response = await AgentResponse.from_agent(latest_agent, agent_data) + + # Return Response with ETag header + return Response( + status_code=201, + content=agent_response.model_dump_json(), + media_type="application/json", + headers={"ETag": agent_response.etag()}, + ) + + +@admin_router.patch( + "/agents/{agent_id}", tags=["Agent"], status_code=200, operation_id="update_agent" +) +async def update_agent( + agent_id: str = Path(..., description="ID of the agent to update"), + agent: AgentUpdate = Body(AgentUpdate, description="Agent update configuration"), + subject: str = Depends(verify_admin_jwt), +) -> Response: + """Update an existing agent. + + Use input to update agent configuration. If some fields are not provided, they will not be changed. + + **Path Parameters:** + * `agent_id` - ID of the agent to update + + **Request Body:** + * `agent` - Agent update configuration + + **Returns:** + * `AgentResponse` - Updated agent configuration with additional processed data + + **Raises:** + * `HTTPException`: + - 400: Invalid agent ID format + - 404: Agent not found + - 403: Permission denied (if owner mismatch) + - 500: Database error + """ + if subject: + agent.owner = subject + + existing_agent = await Agent.get(agent_id) + if not existing_agent: + raise HTTPException(status_code=404, detail="Agent not found") + + # Update agent + latest_agent = await agent.update(agent_id) + + # Process common post-update actions + agent_data = await _process_agent_post_actions(latest_agent, False, "Agent Updated") + + agent_data = await _process_telegram_config(agent, existing_agent, agent_data) + + agent_response = await AgentResponse.from_agent(latest_agent, agent_data) + + # Return Response with ETag header + return Response( + content=agent_response.model_dump_json(), + media_type="application/json", + headers={"ETag": agent_response.etag()}, + ) + + +@admin_router.put( + "/agents/{agent_id}", tags=["Agent"], status_code=200, operation_id="override_agent" +) +async def override_agent( + agent_id: str = Path(..., description="ID of the agent to update"), + agent: AgentUpdate = Body(AgentUpdate, description="Agent update configuration"), + subject: str = Depends(verify_admin_jwt), +) -> Response: + """Override an existing agent. + + Use input to override agent configuration. If some fields are not provided, they will be reset to default values. + + **Path Parameters:** + * `agent_id` - ID of the agent to update + + **Request Body:** + * `agent` - Agent update configuration + + **Returns:** + * `AgentResponse` - Updated agent configuration with additional processed data + + **Raises:** + * `HTTPException`: + - 400: Invalid agent ID format + - 404: Agent not found + - 403: Permission denied (if owner mismatch) + - 500: Database error + """ + if subject: + agent.owner = subject + + if not agent.owner: + raise HTTPException(status_code=400, detail="Owner is required") + + existing_agent = await Agent.get(agent_id) + if not existing_agent: + raise HTTPException(status_code=404, detail="Agent not found") + + # Update agent + latest_agent = await agent.override(agent_id) + + # Process common post-update actions + agent_data = await _process_agent_post_actions( + latest_agent, False, "Agent Overridden" + ) + + agent_data = await _process_telegram_config(agent, existing_agent, agent_data) + + agent_response = await AgentResponse.from_agent(latest_agent, agent_data) + + # Return Response with ETag header + return Response( + content=agent_response.model_dump_json(), + media_type="application/json", + headers={"ETag": agent_response.etag()}, + ) + + +@admin_router_readonly.get( + "/agents", + tags=["Agent"], + dependencies=[Depends(verify_admin_jwt)], + operation_id="get_agents", +) +async def get_agents(db: AsyncSession = Depends(get_db)) -> list[AgentResponse]: + """Get all agents with their quota information. + + **Returns:** + * `list[AgentResponse]` - List of agents with their quota information and additional processed data + """ + # Query all agents first + agents = (await db.scalars(select(AgentTable))).all() + + # Batch get agent data + agent_ids = [agent.id for agent in agents] + agent_data_list = await db.scalars( + select(AgentDataTable).where(AgentDataTable.id.in_(agent_ids)) + ) + agent_data_map = {data.id: data for data in agent_data_list} + + # Convert to AgentResponse objects + return [ + await AgentResponse.from_agent( + Agent.model_validate(agent), + AgentData.model_validate(agent_data_map.get(agent.id)) + if agent.id in agent_data_map + else None, + ) + for agent in agents + ] + + +@admin_router_readonly.get( + "/agents/{agent_id}", + tags=["Agent"], + dependencies=[Depends(verify_admin_jwt)], + operation_id="get_agent", +) +async def get_agent( + agent_id: str = Path(..., description="ID of the agent to retrieve"), +) -> Response: + """Get a single agent by ID. + + **Path Parameters:** + * `agent_id` - ID of the agent to retrieve + + **Returns:** + * `AgentResponse` - Agent configuration with additional processed data + + **Raises:** + * `HTTPException`: + - 404: Agent not found + """ + agent = await Agent.get(agent_id) + if not agent: + raise HTTPException(status_code=404, detail="Agent not found") + + # Get agent data + agent_data = await AgentData.get(agent_id) + + agent_response = await AgentResponse.from_agent(agent, agent_data) + + # Return Response with ETag header + return Response( + content=agent_response.model_dump_json(), + media_type="application/json", + headers={"ETag": agent_response.etag()}, + ) + + +class MemCleanRequest(BaseModel): + """Request model for agent memory cleanup endpoint. + + Attributes: + agent_id (str): Agent ID to clean + chat_id (str): Chat ID to clean + clean_skills_memory (bool): To clean the skills data. + clean_agent_memory (bool): To clean the agent memory. + """ + + agent_id: str + clean_agent_memory: bool + clean_skills_memory: bool + chat_id: str | None = Field("") + + +@admin_router.post( + "/agent/clean-memory", + tags=["Agent"], + status_code=204, + dependencies=[Depends(verify_admin_jwt)], + operation_id="clean_agent_memory", +) +@admin_router.post( + "/agents/clean-memory", + tags=["Agent"], + status_code=201, + dependencies=[Depends(verify_admin_jwt)], + operation_id="clean_agent_memory_deprecated", + deprecated=True, +) +async def clean_memory( + request: MemCleanRequest = Body( + MemCleanRequest, description="Agent memory cleanup request" + ), +): + """Clear an agent memory. + + **Request Body:** + * `request` - The execution request containing agent ID, message, and thread ID + + **Returns:** + * `str` - Formatted response lines from agent memory cleanup + + **Raises:** + * `HTTPException`: + - 400: If input parameters are invalid (empty agent_id, thread_id, or message text) + - 404: If agent not found + - 500: For other server-side errors + """ + # Validate input parameters + if not request.agent_id or not request.agent_id.strip(): + raise HTTPException(status_code=400, detail="Agent ID cannot be empty") + + try: + agent = await Agent.get(request.agent_id) + if not agent: + raise HTTPException( + status_code=404, + detail=f"Agent with id {request.agent_id} not found", + ) + + await clean_agent_memory( + request.agent_id, + request.chat_id, + clean_agent=request.clean_agent_memory, + clean_skill=request.clean_skills_memory, + ) + except NoResultFound: + raise HTTPException( + status_code=404, detail=f"Agent {request.agent_id} not found" + ) + except SQLAlchemyError as e: + raise HTTPException(status_code=500, detail=f"Database error: {str(e)}") + except ValueError as e: + raise HTTPException(status_code=400, detail=str(e)) + except Exception as e: + raise HTTPException(status_code=500, detail=f"Server error: {str(e)}") + + +@admin_router_readonly.get( + "/agents/{agent_id}/export", + tags=["Agent"], + operation_id="export_agent", + dependencies=[Depends(verify_admin_jwt)], +) +async def export_agent( + agent_id: str = Path(..., description="ID of the agent to export"), +) -> str: + """Export agent configuration as YAML. + + **Path Parameters:** + * `agent_id` - ID of the agent to export + + **Returns:** + * `str` - YAML configuration of the agent + + **Raises:** + * `HTTPException`: + - 404: Agent not found + """ + agent = await Agent.get(agent_id) + if not agent: + raise HTTPException(status_code=404, detail="Agent not found") + # Ensure agent.skills is initialized + if agent.skills is None: + agent.skills = {} + + # Process all skill categories + for category in skill_categories: + try: + # Dynamically import the skill module + skill_module = importlib.import_module(f"intentkit.skills.{category}") + + # Check if the module has a Config class and get_skills function + if hasattr(skill_module, "Config") and hasattr(skill_module, "get_skills"): + # Get or create the config for this category + category_config = agent.skills.get(category, {}) + + # Ensure 'enabled' field exists (required by SkillConfig) + if "enabled" not in category_config: + category_config["enabled"] = False + + # Ensure states dict exists + if "states" not in category_config: + category_config["states"] = {} + + # Get all available skill states from the module + available_skills = [] + if hasattr(skill_module, "SkillStates") and hasattr( + skill_module.SkillStates, "__annotations__" + ): + available_skills = list( + skill_module.SkillStates.__annotations__.keys() + ) + # Add missing skills with disabled state + for skill_name in available_skills: + if skill_name not in category_config["states"]: + category_config["states"][skill_name] = "disabled" + + # Get all required fields from Config class and its base classes + config_class = skill_module.Config + # Get all base classes of Config + all_bases = [config_class] + for base in config_class.__mro__[1:]: + if base is TypedDict or base is dict or base is object: + continue + all_bases.append(base) + + # Collect all required fields from Config and its base classes + for base in all_bases: + if hasattr(base, "__annotations__"): + for field_name, field_type in base.__annotations__.items(): + # Skip fields already set or marked as NotRequired + if field_name in category_config or "NotRequired" in str( + field_type + ): + continue + # Add default value based on type + if field_name != "states": # states already handled above + if "str" in str(field_type): + category_config[field_name] = "" + elif "bool" in str(field_type): + category_config[field_name] = False + elif "int" in str(field_type): + category_config[field_name] = 0 + elif "float" in str(field_type): + category_config[field_name] = 0.0 + elif "list" in str(field_type) or "List" in str( + field_type + ): + category_config[field_name] = [] + elif "dict" in str(field_type) or "Dict" in str( + field_type + ): + category_config[field_name] = {} + + # Update the agent's skills config + agent.skills[category] = category_config + except (ImportError, AttributeError): + # Skip if module import fails or doesn't have required components + pass + yaml_content = agent.to_yaml() + return Response( + content=yaml_content, + media_type="application/x-yaml", + headers={"Content-Disposition": f'attachment; filename="{agent_id}.yaml"'}, + ) + + +@admin_router.put( + "/agents/{agent_id}/import", + tags=["Agent"], + operation_id="import_agent", + response_class=PlainTextResponse, +) +async def import_agent( + agent_id: str = Path(...), + file: UploadFile = File( + ..., description="YAML file containing agent configuration" + ), + subject: str = Depends(verify_admin_jwt), +) -> str: + """Import agent configuration from YAML file. + Only updates existing agents, will not create new ones. + + **Path Parameters:** + * `agent_id` - ID of the agent to update + + **Request Body:** + * `file` - YAML file containing agent configuration + + **Returns:** + * `str` - Success message + + **Raises:** + * `HTTPException`: + - 400: Invalid YAML or agent configuration + - 404: Agent not found + - 500: Server error + """ + # First check if agent exists + existing_agent = await Agent.get(agent_id) + if not existing_agent: + raise HTTPException(status_code=404, detail="Agent not found") + + # Read and parse YAML + content = await file.read() + try: + yaml_data = safe_load(content) + except Exception as e: + raise HTTPException(status_code=400, detail=f"Invalid YAML format: {e}") + + # Create Agent instance from YAML + try: + agent = AgentUpdate.model_validate(yaml_data) + except ValidationError as e: + raise HTTPException(status_code=400, detail=f"Invalid agent configuration: {e}") + + # Get the latest agent from create_or_update + latest_agent = await agent.update(agent_id) + + # Process common post-creation/update steps + agent_data = await _process_agent_post_actions( + latest_agent, False, "Agent Updated via YAML Import" + ) + + await _process_telegram_config(agent, existing_agent, agent_data) + + return "Agent import successful" + + +@admin_router.put( + "/agents/{agent_id}/twitter/unlink", + tags=["Agent"], + operation_id="unlink_twitter", + dependencies=[Depends(verify_admin_jwt)], + response_class=Response, +) +async def unlink_twitter_endpoint( + agent_id: str = Path(..., description="ID of the agent to unlink from X"), +) -> Response: + """Unlink X from an agent. + + **Path Parameters:** + * `agent_id` - ID of the agent to unlink from X + + **Raises:** + * `HTTPException`: + - 404: Agent not found + """ + # Check if agent exists + agent = await Agent.get(agent_id) + if not agent: + raise HTTPException(status_code=404, detail="Agent not found") + + # Call the unlink_twitter function from clients.twitter + agent_data = await unlink_twitter(agent_id) + + agent_response = await AgentResponse.from_agent(agent, agent_data) + + return Response( + content=agent_response.model_dump_json(), + media_type="application/json", + headers={"ETag": agent_response.etag()}, + ) diff --git a/app/admin/credit.py b/app/admin/credit.py new file mode 100644 index 00000000..f0bf85c7 --- /dev/null +++ b/app/admin/credit.py @@ -0,0 +1,879 @@ +import logging +from datetime import datetime, timedelta +from decimal import Decimal +from typing import Annotated, List, Optional + +from fastapi import APIRouter, Depends, HTTPException, Path, Query, status +from pydantic import BaseModel, Field, model_validator +from sqlalchemy import func, select +from sqlalchemy.ext.asyncio import AsyncSession + +from app.auth import verify_admin_jwt +from intentkit.core.credit import ( + fetch_credit_event_by_id, + fetch_credit_event_by_upstream_tx_id, + list_credit_events, + list_credit_events_by_user, + list_fee_events_by_agent, + recharge, + reward, + update_credit_event_note, + update_daily_quota, +) +from intentkit.models.agent_data import AgentQuota +from intentkit.models.credit import ( + CreditAccount, + CreditAccountTable, + CreditDebit, + CreditEvent, + CreditEventTable, + CreditTransaction, + CreditTransactionTable, + Direction, + EventType, + OwnerType, + RewardType, + TransactionType, +) +from intentkit.models.db import get_db + +logger = logging.getLogger(__name__) + +credit_router = APIRouter(prefix="/credit", tags=["Credit"]) +credit_router_readonly = APIRouter(prefix="/credit", tags=["Credit"]) + + +# ===== Models ===== +class CreditEventsResponse(BaseModel): + """Response model for credit events with pagination.""" + + data: List[CreditEvent] = Field(description="List of credit events") + has_more: bool = Field(description="Indicates if there are more items") + next_cursor: Optional[str] = Field(None, description="Cursor for next page") + + +class CreditTransactionResp(CreditTransaction): + """Credit transaction response model with event data.""" + + event: Optional[CreditEvent] = Field(None, description="Associated credit event") + + +class CreditTransactionsResponse(BaseModel): + """Response model for credit transactions with pagination.""" + + data: List[CreditTransactionResp] = Field(description="List of credit transactions") + has_more: bool = Field(description="Indicates if there are more items") + next_cursor: Optional[str] = Field(None, description="Cursor for next page") + + +# ===== Input models ===== +class RechargeRequest(BaseModel): + """Request model for recharging a user account.""" + + upstream_tx_id: Annotated[ + str, Field(str, description="Upstream transaction ID, idempotence Check") + ] + user_id: Annotated[str, Field(description="ID of the user to recharge")] + amount: Annotated[Decimal, Field(gt=Decimal("0"), description="Amount to recharge")] + note: Annotated[ + Optional[str], Field(None, description="Optional note for the recharge") + ] + + +class RewardRequest(BaseModel): + """Request model for rewarding a user account.""" + + upstream_tx_id: Annotated[ + str, Field(str, description="Upstream transaction ID, idempotence Check") + ] + user_id: Annotated[str, Field(description="ID of the user to reward")] + amount: Annotated[Decimal, Field(gt=Decimal("0"), description="Amount to reward")] + note: Annotated[ + Optional[str], Field(None, description="Optional note for the reward") + ] + reward_type: Annotated[ + Optional[RewardType], + Field(RewardType.REWARD, description="Type of reward event"), + ] + + +# class AdjustmentRequest(BaseModel): +# """Request model for adjusting a user account.""" + +# upstream_tx_id: Annotated[ +# str, Field(str, description="Upstream transaction ID, idempotence Check") +# ] +# user_id: Annotated[str, Field(description="ID of the user to adjust")] +# credit_type: Annotated[CreditType, Field(description="Type of credit to adjust")] +# amount: Annotated[ +# Decimal, Field(description="Amount to adjust (positive or negative)") +# ] +# note: Annotated[str, Field(description="Required explanation for the adjustment")] + + +class UpdateDailyQuotaRequest(BaseModel): + """Request model for updating account daily quota and refill amount.""" + + upstream_tx_id: Annotated[ + str, Field(str, description="Upstream transaction ID, idempotence Check") + ] + free_quota: Annotated[ + Optional[Decimal], + Field( + None, gt=Decimal("0"), description="New daily quota value for the account" + ), + ] + refill_amount: Annotated[ + Optional[Decimal], + Field( + None, + ge=Decimal("0"), + description="Amount to refill hourly, not exceeding free_quota", + ), + ] + note: Annotated[ + str, + Field(description="Explanation for changing the daily quota and refill amount"), + ] + + @model_validator(mode="after") + def validate_at_least_one_field(self) -> "UpdateDailyQuotaRequest": + """Validate that at least one of free_quota or refill_amount is provided.""" + if self.free_quota is None and self.refill_amount is None: + raise ValueError( + "At least one of free_quota or refill_amount must be provided" + ) + return self + + +class UpdateEventNoteRequest(BaseModel): + """Request model for updating event note.""" + + note: Annotated[Optional[str], Field(None, description="New note for the event")] + + +# ===== API Endpoints ===== +@credit_router.get( + "/accounts/{owner_type}/{owner_id}", + response_model=CreditAccount, + operation_id="get_account", + summary="Get Account", + dependencies=[Depends(verify_admin_jwt)], +) +async def get_account(owner_type: OwnerType, owner_id: str) -> CreditAccount: + """Get a credit account by owner type and ID. It will create a new account if it does not exist. + + This endpoint is not in readonly router, because it may create a new account. + + Args: + owner_type: Type of the owner (user, agent, company) + owner_id: ID of the owner + + Returns: + The credit account + """ + return await CreditAccount.get_or_create(owner_type, owner_id) + + +@credit_router.post( + "/recharge", + response_model=CreditAccount, + status_code=status.HTTP_201_CREATED, + operation_id="recharge_account", + summary="Recharge", + dependencies=[Depends(verify_admin_jwt)], +) +async def recharge_user_account( + request: RechargeRequest, + db: AsyncSession = Depends(get_db), +) -> CreditAccount: + """Recharge a user account with credits. + + Args: + request: Recharge request details + + Returns: + The updated credit account + """ + return await recharge( + db, request.user_id, request.amount, request.upstream_tx_id, request.note + ) + + +@credit_router.post( + "/reward", + response_model=CreditAccount, + status_code=status.HTTP_201_CREATED, + operation_id="reward_account", + summary="Reward", + dependencies=[Depends(verify_admin_jwt)], +) +async def reward_user_account( + request: RewardRequest, + db: AsyncSession = Depends(get_db), +) -> CreditAccount: + """Reward a user account with credits. + + Args: + request: Reward request details + db: Database session + + Returns: + The updated credit account + """ + return await reward( + db, + request.user_id, + request.amount, + request.upstream_tx_id, + request.note, + request.reward_type, + ) + + +# @credit_router.post( +# "/adjust", +# response_model=CreditAccount, +# status_code=status.HTTP_201_CREATED, +# operation_id="adjust_account", +# summary="Adjust", +# dependencies=[Depends(verify_admin_jwt)], +# ) +# async def adjust_user_account( +# request: AdjustmentRequest, +# db: AsyncSession = Depends(get_db), +# ) -> CreditAccount: +# """Adjust a user account's credits. + +# Args: +# request: Adjustment request details +# db: Database session + +# Returns: +# The updated credit account +# """ +# return await adjustment( +# db, +# request.user_id, +# request.credit_type, +# request.amount, +# request.upstream_tx_id, +# request.note, +# ) + + +@credit_router.put( + "/accounts/users/{user_id}/daily-quota", + response_model=CreditAccount, + status_code=status.HTTP_200_OK, + operation_id="update_account_free_quota", + summary="Update Daily Quota and Refill Amount", + dependencies=[Depends(verify_admin_jwt)], +) +async def update_account_free_quota( + user_id: str, request: UpdateDailyQuotaRequest, db: AsyncSession = Depends(get_db) +) -> CreditAccount: + """Update the daily quota and refill amount of a credit account. + + Args: + user_id: ID of the user + request: Update request details including optional free_quota, optional refill_amount, and explanation note + db: Database session + + Returns: + The updated credit account + """ + # At least one of free_quota or refill_amount must be provided (validated in the request model) + return await update_daily_quota( + session=db, + user_id=user_id, + free_quota=request.free_quota, + refill_amount=request.refill_amount, + upstream_tx_id=request.upstream_tx_id, + note=request.note, + ) + + +class AgentStatisticsResponse(BaseModel): + """Response model for agent statistics.""" + + agent_id: str = Field(description="ID of the agent") + account_id: str = Field(description="ID of the agent's credit account") + balance: Decimal = Field(description="Total balance of the agent's account") + total_income: Decimal = Field(description="Total income from all credit events") + net_income: Decimal = Field(description="Net income from all credit events") + permanent_income: Decimal = Field( + description="Permanent income from all credit events" + ) + permanent_profit: Decimal = Field( + description="Permanent profit from all credit events" + ) + last_24h_income: Decimal = Field(description="Income from last 24 hours") + last_24h_permanent_income: Decimal = Field( + description="Permanent income from last 24 hours" + ) + avg_action_cost: Decimal = Field(description="Average action cost") + min_action_cost: Decimal = Field(description="Minimum action cost") + max_action_cost: Decimal = Field(description="Maximum action cost") + low_action_cost: Decimal = Field(description="Low action cost") + medium_action_cost: Decimal = Field(description="Medium action cost") + high_action_cost: Decimal = Field(description="High action cost") + + +@credit_router.get( + "/accounts/agent/{agent_id}/statistics", + response_model=AgentStatisticsResponse, + operation_id="get_agent_statistics", + summary="Get Agent Statistics", + dependencies=[Depends(verify_admin_jwt)], +) +async def get_agent_statistics( + agent_id: Annotated[str, Path(description="ID of the agent")], + db: AsyncSession = Depends(get_db), +) -> AgentStatisticsResponse: + """Get statistics for an agent account. + + This endpoint is not in readonly router, because it may create a new account. + + Args: + agent_id: ID of the agent + db: Database session + + Returns: + Agent statistics including balance, total income, and net income + + Raises: + 404: If the agent account is not found + """ + # Get the agent account + agent_account = await CreditAccount.get_or_create_in_session( + db, OwnerType.AGENT, agent_id + ) + + # Calculate the total balance + balance = ( + agent_account.free_credits + + agent_account.reward_credits + + agent_account.credits + ) + + # Calculate total income (sum of total_amount) and net income (sum of fee_agent_amount) at SQL level + # Query to get the sum of total_amount and fee_agent_amount + stmt = select( + func.sum(CreditEventTable.total_amount).label("total_income"), + func.sum(CreditEventTable.fee_agent_amount).label("net_income"), + func.sum(CreditEventTable.permanent_amount).label("permanent_income"), + func.sum(CreditEventTable.fee_agent_permanent_amount).label("permanent_profit"), + ).where(CreditEventTable.agent_id == agent_id) + result = await db.execute(stmt) + row = result.first() + + # Extract the sums, defaulting to 0 if None + total_income = row.total_income if row.total_income is not None else Decimal("0") + net_income = row.net_income if row.net_income is not None else Decimal("0") + permanent_income = ( + row.permanent_income if row.permanent_income is not None else Decimal("0") + ) + permanent_profit = ( + row.permanent_profit if row.permanent_profit is not None else Decimal("0") + ) + + # Calculate last 24h income + stmt = select( + func.sum(CreditEventTable.total_amount).label("last_24h_income"), + func.sum(CreditEventTable.permanent_amount).label("last_24h_permanent_income"), + ).where( + CreditEventTable.agent_id == agent_id, + CreditEventTable.created_at >= datetime.now() - timedelta(hours=24), + ) + result = await db.execute(stmt) + row = result.first() + last_24h_income = ( + row.last_24h_income if row.last_24h_income is not None else Decimal("0") + ) + last_24h_permanent_income = ( + row.last_24h_permanent_income + if row.last_24h_permanent_income is not None + else Decimal("0") + ) + quota = await AgentQuota.get(agent_id) + return AgentStatisticsResponse( + agent_id=agent_id, + account_id=agent_account.id, + balance=balance, + total_income=total_income, + net_income=net_income, + permanent_income=permanent_income, + permanent_profit=permanent_profit, + last_24h_income=last_24h_income, + last_24h_permanent_income=last_24h_permanent_income, + avg_action_cost=quota.avg_action_cost, + min_action_cost=quota.min_action_cost, + max_action_cost=quota.max_action_cost, + low_action_cost=quota.low_action_cost, + medium_action_cost=quota.medium_action_cost, + high_action_cost=quota.high_action_cost, + ) + + +@credit_router_readonly.get( + "/users/{user_id}/events", + response_model=CreditEventsResponse, + operation_id="list_user_events", + summary="List User Events", + dependencies=[Depends(verify_admin_jwt)], +) +async def list_user_events( + user_id: str, + event_type: Annotated[Optional[EventType], Query(description="Event type")] = None, + cursor: Annotated[Optional[str], Query(description="Cursor for pagination")] = None, + limit: Annotated[ + int, Query(description="Maximum number of events to return", ge=1, le=100) + ] = 20, + db: AsyncSession = Depends(get_db), +) -> CreditEventsResponse: + """List all events for a user account with optional event type filtering. + + Args: + user_id: ID of the user + event_type: Optional filter for specific event type + cursor: Cursor for pagination + limit: Maximum number of events to return + db: Database session + + Returns: + Response with list of events and pagination information + """ + events, next_cursor, has_more = await list_credit_events_by_user( + session=db, + user_id=user_id, + cursor=cursor, + limit=limit, + event_type=event_type, + ) + + return CreditEventsResponse( + data=events, + has_more=has_more, + next_cursor=next_cursor, + ) + + +@credit_router.patch( + "/events/{event_id}", + response_model=CreditEvent, + operation_id="update_event_note", + summary="Update Event Note", + dependencies=[Depends(verify_admin_jwt)], +) +async def update_event_note( + event_id: Annotated[str, Path(description="ID of the event to update")], + request: UpdateEventNoteRequest, + db: AsyncSession = Depends(get_db), +) -> CreditEvent: + """Update the note of a credit event. + + Args: + event_id: ID of the event to update + request: Request containing the new note + db: Database session + + Returns: + The updated credit event + + Raises: + 404: If the event is not found + """ + return await update_credit_event_note( + session=db, + event_id=event_id, + note=request.note, + ) + + +@credit_router_readonly.get( + "/event/users/{user_id}/expense", + response_model=CreditEventsResponse, + operation_id="list_user_expense_events", + summary="List User Expense", + dependencies=[Depends(verify_admin_jwt)], +) +async def list_user_expense_events( + user_id: str, + cursor: Annotated[Optional[str], Query(description="Cursor for pagination")] = None, + limit: Annotated[ + int, Query(description="Maximum number of events to return", ge=1, le=100) + ] = 20, + db: AsyncSession = Depends(get_db), +) -> CreditEventsResponse: + """List all expense events for a user account. + + Args: + user_id: ID of the user + cursor: Cursor for pagination + limit: Maximum number of events to return + db: Database session + + Returns: + Response with list of expense events and pagination information + """ + events, next_cursor, has_more = await list_credit_events_by_user( + session=db, + user_id=user_id, + direction=Direction.EXPENSE, + cursor=cursor, + limit=limit, + ) + + return CreditEventsResponse( + data=events, + has_more=has_more, + next_cursor=next_cursor, + ) + + +@credit_router_readonly.get( + "/transactions", + response_model=CreditTransactionsResponse, + operation_id="list_transactions", + summary="List Transactions", + dependencies=[Depends(verify_admin_jwt)], +) +async def list_transactions( + user_id: Annotated[str, Query(description="ID of the user")], + tx_type: Annotated[ + Optional[List[TransactionType]], Query(description="Transaction types") + ] = None, + credit_debit: Annotated[ + Optional[CreditDebit], Query(description="Credit or debit") + ] = None, + cursor: Annotated[Optional[str], Query(description="Cursor for pagination")] = None, + limit: Annotated[ + int, Query(description="Maximum number of transactions to return", ge=1, le=100) + ] = 20, + db: AsyncSession = Depends(get_db), +) -> CreditTransactionsResponse: + """List transactions with optional filtering by transaction type and credit/debit. + + You can use the `credit_debit` field to filter for debits or credits. + Alternatively, you can use `tx_type` to directly specify the transaction types you need. + For example, selecting `receive_fee_dev` and `reward` will query only those two types + of rewards; this way, topup will not be included. + + Args: + user_id: ID of the user + tx_type: Optional filter for transaction type + credit_debit: Optional filter for credit or debit + cursor: Cursor for pagination + limit: Maximum number of transactions to return + db: Database session + + Returns: + Response with list of transactions and pagination information + """ + # First get the account ID for the user + account_query = select(CreditAccountTable.id).where( + CreditAccountTable.owner_type == OwnerType.USER, + CreditAccountTable.owner_id == user_id, + ) + account_result = await db.execute(account_query) + account_id = account_result.scalar_one_or_none() + + if not account_id: + # Return empty response if account doesn't exist + return CreditTransactionsResponse( + data=[], + has_more=False, + next_cursor=None, + ) + + # Build query for transactions + query = select(CreditTransactionTable).where( + CreditTransactionTable.account_id == account_id + ) + + # Apply optional filters + if tx_type: + query = query.where(CreditTransactionTable.tx_type.in_(tx_type)) + + if credit_debit: + query = query.where(CreditTransactionTable.credit_debit == credit_debit) + + # Apply pagination + if cursor: + # Use ID directly as cursor since IDs are time-ordered + query = query.where(CreditTransactionTable.id < cursor) + + # Order by created_at desc, id desc for consistent pagination + query = query.order_by(CreditTransactionTable.id.desc()).limit( + limit + 1 + ) # Fetch one extra to determine if there are more + + result = await db.execute(query) + transactions = result.scalars().all() + + # Check if there are more results + has_more = len(transactions) > limit + if has_more: + transactions = transactions[:-1] # Remove the extra item + + # Generate next cursor + next_cursor = None + if has_more and transactions: + last_tx = transactions[-1] + next_cursor = last_tx.id + + # Convert SQLAlchemy models to Pydantic models + tx_models = [CreditTransaction.model_validate(tx) for tx in transactions] + + # Get all unique event IDs + event_ids = {tx.event_id for tx in tx_models} + + # Fetch all related events in a single query + events_map = {} + if event_ids: + events_query = select(CreditEventTable).where( + CreditEventTable.id.in_(event_ids) + ) + events_result = await db.execute(events_query) + events = events_result.scalars().all() + + # Create a map of event_id to CreditEvent + events_map = {event.id: CreditEvent.model_validate(event) for event in events} + + # Create response objects with associated events + tx_resp_models = [] + for tx in tx_models: + tx_resp = CreditTransactionResp( + **tx.model_dump(), event=events_map.get(tx.event_id) + ) + tx_resp_models.append(tx_resp) + + return CreditTransactionsResponse( + data=tx_resp_models, + has_more=has_more, + next_cursor=next_cursor, + ) + + +@credit_router_readonly.get( + "/event/users/{user_id}/income", + response_model=CreditEventsResponse, + operation_id="list_user_income_events", + summary="List User Income", + dependencies=[Depends(verify_admin_jwt)], +) +async def list_user_income_events( + user_id: str, + event_type: Annotated[Optional[EventType], Query(description="Event type")] = None, + cursor: Annotated[Optional[str], Query(description="Cursor for pagination")] = None, + limit: Annotated[ + int, Query(description="Maximum number of events to return", ge=1, le=100) + ] = 20, + db: AsyncSession = Depends(get_db), +) -> CreditEventsResponse: + """List all income events for a user account. + + Args: + user_id: ID of the user + event_type: Event type + cursor: Cursor for pagination + limit: Maximum number of events to return + db: Database session + + Returns: + Response with list of income events and pagination information + """ + events, next_cursor, has_more = await list_credit_events_by_user( + session=db, + user_id=user_id, + direction=Direction.INCOME, + cursor=cursor, + limit=limit, + event_type=event_type, + ) + + return CreditEventsResponse( + data=events, + has_more=has_more, + next_cursor=next_cursor, + ) + + +@credit_router_readonly.get( + "/event/agents/{agent_id}/income", + response_model=CreditEventsResponse, + operation_id="list_agent_income_events", + summary="List Agent Income", + dependencies=[Depends(verify_admin_jwt)], +) +async def list_agent_income_events( + agent_id: str, + cursor: Annotated[Optional[str], Query(description="Cursor for pagination")] = None, + limit: Annotated[ + int, Query(description="Maximum number of events to return", ge=1, le=100) + ] = 20, + db: AsyncSession = Depends(get_db), +) -> CreditEventsResponse: + """List all income events for an agent account. + + Args: + agent_id: ID of the agent + cursor: Cursor for pagination + limit: Maximum number of events to return + db: Database session + + Returns: + Response with list of income events and pagination information + """ + events, next_cursor, has_more = await list_fee_events_by_agent( + session=db, + agent_id=agent_id, + cursor=cursor, + limit=limit, + ) + + return CreditEventsResponse( + data=events, + has_more=has_more, + next_cursor=next_cursor, + ) + + +@credit_router_readonly.get( + "/event", + response_model=CreditEvent, + operation_id="fetch_credit_event_by_upstream_tx_id", + summary="Credit Event by Upstream ID", + dependencies=[Depends(verify_admin_jwt)], +) +async def fetch_credit_event( + upstream_tx_id: Annotated[str, Query(description="Upstream transaction ID")], + db: AsyncSession = Depends(get_db), +) -> CreditEvent: + """Fetch a credit event by its upstream transaction ID. + + Args: + upstream_tx_id: ID of the upstream transaction + db: Database session + + Returns: + Credit event + + Raises: + 404: If the credit event is not found + """ + return await fetch_credit_event_by_upstream_tx_id(db, upstream_tx_id) + + +@credit_router_readonly.get( + "/events/{event_id}", + response_model=CreditEvent, + operation_id="fetch_credit_event_by_id", + summary="Credit Event by ID", + dependencies=[Depends(verify_admin_jwt)], + responses={ + 200: {"description": "Credit event found and returned successfully"}, + 403: { + "description": "Forbidden: Credit event does not belong to the specified user" + }, + 404: { + "description": "Not Found: Credit event with the specified ID does not exist" + }, + }, +) +async def fetch_credit_event_by_id_endpoint( + event_id: Annotated[str, Path(description="Credit event ID")], + user_id: Annotated[ + Optional[str], Query(description="Optional user ID for authorization check") + ] = None, + db: AsyncSession = Depends(get_db), +) -> CreditEvent: + """Fetch a credit event by its ID. + + Args: + event_id: ID of the credit event + user_id: Optional user ID for authorization check + db: Database session + + Returns: + Credit event + + Raises: + 404: If the credit event is not found + 403: If the event's account does not belong to the provided user_id + """ + event = await fetch_credit_event_by_id(db, event_id) + + # If user_id is provided, check if the event's account belongs to this user + if user_id: + # Query to find the account by ID + stmt = select(CreditAccountTable).where( + CreditAccountTable.id == event.account_id, + CreditAccountTable.owner_type == "user", + CreditAccountTable.owner_id == user_id, + ) + + # Execute query + account = await db.scalar(stmt) + + # If no matching account found, the event doesn't belong to this user + if not account: + raise HTTPException( + status_code=403, + detail=f"Credit event with ID '{event_id}' does not belong to user '{user_id}'", + ) + + return event + + +@credit_router_readonly.get( + "/events", + operation_id="list_credit_events", + summary="List Credit Events", + response_model=CreditEventsResponse, +) +async def list_all_credit_events( + direction: Annotated[ + Optional[Direction], + Query(description="Direction of credit events (income or expense)"), + ] = Direction.EXPENSE, + event_type: Annotated[Optional[EventType], Query(description="Event type")] = None, + cursor: Annotated[Optional[str], Query(description="Cursor for pagination")] = None, + limit: Annotated[ + int, Query(description="Maximum number of events to return", ge=1, le=100) + ] = 20, + start_at: Annotated[ + Optional[datetime], + Query(description="Start datetime for filtering events, inclusive"), + ] = None, + end_at: Annotated[ + Optional[datetime], + Query(description="End datetime for filtering events, exclusive"), + ] = None, + db: AsyncSession = Depends(get_db), +) -> CreditEventsResponse: + """ + List all credit events for admin monitoring with cursor pagination. + + This endpoint is designed for admin use to monitor all credit events in the system. + Only the first request does not need a cursor, then always use the last cursor for subsequent requests. + Even when there are no records, it will still return a cursor that can be used for the next request. + You can poll this endpoint using the cursor every second - when new records are created, you will get them. + + """ + events, next_cursor, has_more = await list_credit_events( + session=db, + direction=direction, + cursor=cursor, + limit=limit, + event_type=event_type, + start_at=start_at, + end_at=end_at, + ) + + return CreditEventsResponse( + data=events, + next_cursor=next_cursor if next_cursor else cursor, + has_more=has_more, + ) diff --git a/app/admin/generator/README.md b/app/admin/generator/README.md new file mode 100644 index 00000000..0be33bb8 --- /dev/null +++ b/app/admin/generator/README.md @@ -0,0 +1,263 @@ +# Agent Generator Package + +AI-powered system for generating IntentKit agent schemas from natural language prompts with project-based conversation history and automatic tag generation. + +## Architecture + +``` +generator/ +├── agent_generator.py # Main orchestrator +├── skill_processor.py # Skill identification +├── validation.py # Schema validation +├── ai_assistant.py # AI operations + conversation history +├── llm_logger.py # Individual LLM call tracking +└── __init__.py # Package interface +``` + +## API Usage + +### Generate New Agent +```bash +curl -X POST "http://localhost:8000/agent/generate" \ + -H "Content-Type: application/json" \ + -d '{ + "prompt": "Create a Twitter bot that posts crypto analysis", + "user_id": "user123" + }' +``` + +### Generate Agent with Project Context +```bash +curl -X POST "http://localhost:8000/agent/generate" \ + -H "Content-Type: application/json" \ + -d '{ + "prompt": "Now add web search capabilities for research", + "user_id": "user123", + "project_id": "existing_project_id" + }' +``` + +### Update Existing Agent +```bash +curl -X POST "http://localhost:8000/agent/generate" \ + -H "Content-Type: application/json" \ + -d '{ + "prompt": "Change the name to CryptoBot Pro", + "existing_agent": {"name": "MyBot", "skills": {}}, + "user_id": "user123" + }' +``` + +### Get Generations List - All Projects for User +```bash +curl -X GET "http://localhost:8000/agent/generations?user_id=user123&limit=20" +``` + +### Get Generation Detail - Specific Project +```bash +curl -X GET "http://localhost:8000/agent/generations/bkj49k3nt2hc73jbdnp0?user_id=user123" +``` + +### Get Generation Detail - Specific Project (No User Validation) +```bash +curl -X GET "http://localhost:8000/agent/generations/bkj49k3nt2hc73jbdnp0" +``` + +### Get All Recent Projects (No User Filter) +```bash +curl -X GET "http://localhost:8000/agent/generations?limit=10" +``` + +## Request/Response Format + +**Agent Generation Request:** +- `prompt`: Description (10-1000 chars) +- `existing_agent`: Optional agent to update +- `user_id`: Optional user ID +- `project_id`: Optional project ID for conversation history + +**Agent Generation Response:** +```json +{ + "agent": { + "name": "CryptoBot", + "purpose": "Automated crypto analysis and posting", + "personality": "Professional and analytical", + "principles": "â€ĸ Provide accurate analysis\nâ€ĸ Post regularly\nâ€ĸ Stay updated", + "model": "gpt-4.1-nano", + "temperature": 0.7, + "skills": { + "twitter": { + "enabled": true, + "states": {"post_tweet": "public"}, + "api_key_provider": "platform" + }, + "tavily": { + "enabled": true, + "states": {"search": "public"}, + "api_key_provider": "platform" + } + }, + "owner": "user123" + }, + "project_id": "bkj49k3nt2hc73jbdnp0", + "summary": "Congratulations! You've successfully created CryptoBot...", + "tags": [{"id": 3}, {"id": 11}, {"id": 25}] +} +``` + +**Generations List Response:** +```json +{ + "projects": [ + { + "project_id": "bkj49k3nt2hc73jbdnp0", + "user_id": "user123", + "created_at": 1703123456.789, + "last_activity": 1703123556.789, + "message_count": 4, + "first_message": { + "role": "user", + "content": "Create a Twitter bot that posts crypto analysis" + }, + "last_message": { + "role": "assistant", + "content": "I've created CryptoBot with Twitter and research capabilities..." + }, + "conversation_history": [ + {"role": "user", "content": "Create a Twitter bot..."}, + {"role": "assistant", "content": "I've created..."}, + {"role": "user", "content": "Now add web search..."}, + {"role": "assistant", "content": "I've updated..."} + ] + } + ] +} +``` + +## Testing the Generations API + +1. **Create an Initial Agent (Get Project ID)** +```bash +curl -X POST "http://localhost:8000/agent/generate" \ + -H "Content-Type: application/json" \ + -d '{ + "prompt": "Create a trading bot for crypto analysis", + "user_id": "test_user_123" + }' +``` +*Save the `project_id` from the response for next steps* + +2. **Continue Conversation (Use Same Project ID)** +```bash +curl -X POST "http://localhost:8000/agent/generate" \ + -H "Content-Type: application/json" \ + -d '{ + "prompt": "Add social media posting capabilities", + "user_id": "test_user_123", + "project_id": "YOUR_PROJECT_ID_FROM_STEP_1" + }' +``` + +3. **Get All Projects for User** +```bash +curl -X GET "http://localhost:8000/agent/generations?user_id=test_user_123" +``` + +4. **Get Specific Project Conversation** +```bash +curl -X GET "http://localhost:8000/agent/generations/YOUR_PROJECT_ID?user_id=test_user_123" +``` + +5. **Test Access Control (Should Return 404)** +```bash +curl -X GET "http://localhost:8000/agent/generations/YOUR_PROJECT_ID?user_id=different_user" +``` + +### API Response Modes + +**Mode 1: All Projects for User** (`user_id` only) +- Returns: List of all projects for the user +- Sorted by: Last activity (most recent first) +- Includes: Full conversation history for each project + +**Mode 2: Specific Project** (`project_id` provided) +- Returns: Single project with full conversation history +- Access Control: Validates `user_id` matches project owner (if provided) +- Error: 404 if project not found or access denied + +### Expected Response Structure + +**All Projects Response:** +```json +{ + "projects": [ + { + "project_id": "bkj49k3nt2hc73jbdnp0", + "user_id": "test_user_123", + "created_at": 1703123456.789, + "last_activity": 1703123556.789, + "message_count": 4, + "conversation_history": [...] + } + ] +} +``` + +**Specific Project Response:** +```json +{ + "projects": [ + { + "project_id": "bkj49k3nt2hc73jbdnp0", + "user_id": "test_user_123", + "created_at": 1703123456.789, + "last_activity": 1703123556.789, + "message_count": 4, + "first_message": {"role": "user", "content": "..."}, + "last_message": {"role": "assistant", "content": "..."}, + "conversation_history": [ + {"role": "user", "content": "Create a trading bot..."}, + {"role": "assistant", "content": "I've created..."}, + {"role": "user", "content": "Add social media..."}, + {"role": "assistant", "content": "I've updated..."} + ] + } + ] +} +``` + +## Features + +### Project Conversation History + +The system maintains conversation history per project_id: + +1. **First Request**: Creates new project_id if not provided +2. **Subsequent Requests**: Use same project_id to maintain context +3. **LLM Context**: Previous conversations guide new generations +4. **Conversation Flow**: + - System prompt + - User message 1 → AI response 1 + - User message 2 → AI response 2 + - Current user message + +This enables iterative agent refinement with context awareness. + +### Automatic Tag Generation + +The system automatically generates exactly 3 relevant tags using Nation API + LLM selection. Always returns 3 tags, never empty. + + +### API Endpoints + +**List Endpoint**: `/agent/generations` +- Get all projects for a user +- Query parameters: `user_id`, `limit` +- Returns: List of projects with conversation history + +**Detail Endpoint**: `/agent/generations/{project_id}` +- Get specific project conversation history +- Path parameter: `project_id` +- Query parameter: `user_id` (for access validation) +- Returns: Single project with full conversation details \ No newline at end of file diff --git a/app/admin/generator/__init__.py b/app/admin/generator/__init__.py new file mode 100644 index 00000000..ede3b7f3 --- /dev/null +++ b/app/admin/generator/__init__.py @@ -0,0 +1,70 @@ +"""Agent Generator Package. + +AI-powered system for generating IntentKit agent schemas from natural language prompts. +Each LLM call is individually tracked with request ID and retry count for cost analysis. +""" + +from .agent_generator import ( + generate_agent_schema, + generate_validated_agent_schema, +) +from .ai_assistant import ( + enhance_agent, + generate_agent_attributes, + generate_validated_agent, +) +from .conversation_service import ( + ConversationService, + get_conversation_history, + get_project_metadata, + get_projects_by_user, +) +from .llm_logger import ( + LLMLogger, + create_llm_logger, +) +from .skill_processor import ( + filter_skills_for_auto_generation, + identify_skills, +) +from .utils import ( + ALLOWED_MODELS, + extract_token_usage, + generate_agent_summary, + generate_request_id, +) +from .validation import ( + ValidationResult, + validate_agent_create, + validate_schema, +) + +__all__ = [ + # Main generation functions + "generate_agent_schema", + "generate_validated_agent_schema", + "generate_validated_agent", + # AI operations + "enhance_agent", + "generate_agent_attributes", + "generate_agent_summary", + # Conversation history + "ConversationService", + "get_conversation_history", + "get_project_metadata", + "get_projects_by_user", + # LLM logging + "create_llm_logger", + "generate_request_id", + "LLMLogger", + # Skill processing + "identify_skills", + "filter_skills_for_auto_generation", + # Utilities + "extract_token_usage", + "ALLOWED_MODELS", + # Validation + "validate_schema", + "validate_agent_create", + "ValidationResult", +] diff --git a/app/admin/generator/agent_generator.py b/app/admin/generator/agent_generator.py new file mode 100644 index 00000000..143978bf --- /dev/null +++ b/app/admin/generator/agent_generator.py @@ -0,0 +1,206 @@ +"""Agent Generator Module. + +Main orchestrator for AI-powered agent generation from natural language prompts. +This module coordinates the skill processing, validation, and AI assistance modules. +""" + +import logging +from typing import TYPE_CHECKING, Any, Dict, Optional, Set, Tuple + +from openai import OpenAI + +from intentkit.config.config import config +from intentkit.models.agent import AgentUpdate + +from .ai_assistant import ( + enhance_agent, + generate_agent_attributes, + generate_validated_agent, +) +from .autonomous_generator import generate_autonomous_configuration +from .skill_processor import ( + filter_skills_for_auto_generation, + identify_skills, + merge_autonomous_skills, +) + +if TYPE_CHECKING: + from .llm_logger import LLMLogger + +logger = logging.getLogger(__name__) + + +async def generate_agent_schema( + prompt: str, + user_id: Optional[str] = None, + existing_agent: Optional[AgentUpdate] = None, + llm_logger: Optional["LLMLogger"] = None, +) -> Tuple[Dict[str, Any], Set[str], Dict[str, Any]]: + """Generate agent schema from a natural language prompt. + + This is the main entry point for agent generation. It handles both new agent + creation and existing agent updates with minimal changes. + + Args: + prompt: Natural language description of the desired agent + user_id: Optional user ID for ownership and validation + existing_agent: Optional existing agent to update (preserves configuration) + llm_logger: Optional LLM logger for tracking individual API calls + + Returns: + A tuple of (agent_schema, identified_skills, token_usage) + """ + logger.info( + f"Generating agent schema from prompt: '{prompt[:50]}{'...' if len(prompt) > 50 else ''}'" + ) + + # Get OpenAI API key from config + api_key = config.openai_api_key + if not api_key: + raise ValueError("OPENAI_API_KEY is not set in configuration") + + # Create OpenAI client + client = OpenAI(api_key=api_key) + + if existing_agent: + # Update existing agent - preserves configuration, makes minimal changes + logger.info(" Updating existing agent with minimal changes") + schema, skills, token_usage = await enhance_agent( + prompt=prompt, + existing_agent=existing_agent, + client=client, + user_id=user_id, + llm_logger=llm_logger, + ) + else: + # Create new agent from scratch + logger.info(" Creating new agent from scratch") + schema, skills, token_usage = await _generate_new_agent_schema( + prompt=prompt, + client=client, + user_id=user_id, + llm_logger=llm_logger, + ) + + logger.info(f"Generated agent schema with {len(skills)} skills: {list(skills)}") + return schema, skills, token_usage + + +async def _generate_new_agent_schema( + prompt: str, + client: OpenAI, + user_id: Optional[str] = None, + llm_logger: Optional["LLMLogger"] = None, +) -> Tuple[Dict[str, Any], Set[str], Dict[str, Any]]: + """Generate a completely new agent schema from a prompt. + + Args: + prompt: Natural language prompt + client: OpenAI client + user_id: Optional user ID + llm_logger: Optional LLM logger for tracking API calls + + Returns: + A tuple of (agent_schema, identified_skills, token_usage) + """ + # Step 1: Check for autonomous patterns first + logger.info(" Step 1: Checking for autonomous task patterns") + autonomous_result = await generate_autonomous_configuration( + prompt, client, llm_logger=llm_logger + ) + + autonomous_configs = [] + autonomous_skills = [] + if autonomous_result: + autonomous_configs, autonomous_skills = autonomous_result + logger.info(f"Generated {len(autonomous_configs)} autonomous tasks") + logger.info(f"Autonomous tasks require skills: {autonomous_skills}") + else: + logger.info( + " No autonomous patterns detected, proceeding with standard agent generation" + ) + + # Step 2: Identify required skills from the prompt + logger.info(" Step 2: Identifying skills from prompt") + skills_config = await identify_skills(prompt, client, llm_logger=llm_logger) + + # Merge autonomous skills with identified skills + if autonomous_skills: + logger.info( + f"Merging {len(autonomous_skills)} autonomous skills with identified skills" + ) + skills_config = merge_autonomous_skills(skills_config, autonomous_skills) + + # Filter out skills that require agent owner API keys + skills_config = await filter_skills_for_auto_generation(skills_config) + + logger.info(f"Final identified skills: {list(skills_config.keys())}") + + # Step 3: Generate agent attributes (name, purpose, personality, etc.) + logger.info(" Step 3: Generating agent attributes") + attributes, token_usage = await generate_agent_attributes( + prompt, skills_config, client, llm_logger=llm_logger, user_id=user_id + ) + + # Step 4: Combine into complete agent schema + logger.info(" Step 4: Assembling complete agent schema") + schema = { + **attributes, + "skills": skills_config, + "model": "gpt-4.1-nano", # Default model + "temperature": 0.7, + "wallet_provider": "cdp", + } + + # Add autonomous configuration if detected + if autonomous_configs: + schema["autonomous"] = [config.model_dump() for config in autonomous_configs] + logger.info( + f"Added {len(autonomous_configs)} autonomous configurations to schema" + ) + + # Log details of each autonomous task + for config in autonomous_configs: + schedule_info = ( + f"{config.minutes} minutes" if config.minutes else config.cron + ) + logger.info(f"Task: '{config.name}' - {schedule_info}") + + # Set user ID if provided + if user_id: + schema["owner"] = user_id + logger.debug(f"Set agent owner: {user_id}") + + identified_skills = set(skills_config.keys()) + autonomous_count = len(autonomous_configs) + logger.info( + f"New agent schema generated with {len(identified_skills)} skills and {autonomous_count} autonomous tasks" + ) + + return schema, identified_skills, token_usage + + +# Main generation function with validation and self-correction +async def generate_validated_agent_schema( + prompt: str, + user_id: Optional[str] = None, + existing_agent: Optional[AgentUpdate] = None, + llm_logger: Optional["LLMLogger"] = None, +) -> Tuple[Dict[str, Any], Set[str], str]: + """Generate and validate agent schema with summary. + + Args: + prompt: Natural language description of the desired agent + user_id: Optional user ID for ownership and validation + existing_agent: Optional existing agent to update + llm_logger: Optional LLM logger for tracking individual API calls + + Returns: + A tuple of (agent_schema, identified_skills, summary_message) + """ + return await generate_validated_agent( + prompt=prompt, + user_id=user_id, + existing_agent=existing_agent, + llm_logger=llm_logger, + ) diff --git a/app/admin/generator/ai_assistant.py b/app/admin/generator/ai_assistant.py new file mode 100644 index 00000000..7a6a07d5 --- /dev/null +++ b/app/admin/generator/ai_assistant.py @@ -0,0 +1,904 @@ +"""AI Assistant Module. + +This module handles core AI operations for agent generation including: +- Agent enhancement and updates using LLM +- Attribute generation from prompts +- AI-powered error correction and schema fixing +""" + +import json +import logging +import time +from typing import TYPE_CHECKING, Any, Dict, List, Optional, Set, Tuple + +from openai import OpenAI + +from intentkit.config.config import config +from intentkit.models.agent import AgentUpdate +from intentkit.models.db import get_session +from intentkit.models.generator import ( + AgentGenerationLog, + AgentGenerationLogCreate, +) + +from .autonomous_generator import generate_autonomous_configuration +from .conversation_service import ConversationService, get_conversation_history +from .skill_processor import ( + filter_skills_for_auto_generation, + identify_skills, + merge_autonomous_skills, +) +from .utils import extract_token_usage, generate_agent_summary +from .validation import ( + validate_agent_create, + validate_schema, +) + +if TYPE_CHECKING: + from .llm_logger import LLMLogger + +logger = logging.getLogger(__name__) + + +async def enhance_agent( + prompt: str, + existing_agent: "AgentUpdate", + client: OpenAI, + user_id: Optional[str] = None, + llm_logger: Optional["LLMLogger"] = None, +) -> Tuple[Dict[str, Any], Set[str], Dict[str, Any]]: + """Generate minimal updates to an existing agent based on a prompt. + + This function preserves the existing agent configuration and only makes + targeted changes based on the prompt, ensuring stability and user customizations. + + Args: + prompt: The natural language prompt describing desired changes + existing_agent: The current agent configuration + client: OpenAI client for API calls + user_id: Optional user ID for validation + llm_logger: Optional LLM logger for tracking API calls + + Returns: + A tuple of (updated_schema, identified_skills, token_usage) + """ + logger.info("Generating minimal agent update based on existing configuration") + + # Initialize conversation service + conversation_service = None + if llm_logger: + conversation_service = ConversationService( + project_id=llm_logger.request_id, user_id=llm_logger.user_id + ) + # Store the initial user prompt for updates + await conversation_service.add_user_message(prompt) + + # Initialize token usage tracking + total_token_usage = { + "total_tokens": 0, + "input_tokens": 0, + "output_tokens": 0, + "input_tokens_details": None, + "completion_tokens_details": None, + } + + # Convert existing agent to dictionary format + existing_schema = existing_agent.model_dump(exclude_unset=True) + + # Check for autonomous patterns first + logger.info("Checking for autonomous patterns in update prompt") + autonomous_result = await generate_autonomous_configuration( + prompt, client, llm_logger=llm_logger + ) + + autonomous_configs = [] + autonomous_skills = [] + if autonomous_result: + autonomous_configs, autonomous_skills = autonomous_result + logger.info(f"Generated {len(autonomous_configs)} autonomous tasks for update") + logger.info(f"Autonomous tasks require skills: {autonomous_skills}") + + # Use the real skill processor to identify skills from prompt + identified_skills_config = await identify_skills( + prompt, client, llm_logger=llm_logger + ) + identified_skill_names = set(identified_skills_config.keys()) + + # Merge autonomous skills with identified skills + identified_skills_config = merge_autonomous_skills( + identified_skills_config, autonomous_skills + ) + identified_skill_names.update(autonomous_skills) + + logger.info(f"Real skills identified from prompt: {identified_skill_names}") + + # Start with existing configuration + updated_schema = existing_schema.copy() + + # Ensure model field is present (required field) + if "model" not in updated_schema or not updated_schema["model"]: + updated_schema["model"] = "gpt-4.1-nano" # Default model + + # Merge skills carefully - preserve existing, add new real skills + existing_skills = updated_schema.get("skills", {}) + merged_skills = existing_skills.copy() + + # Add newly identified real skills + for skill_name, skill_config in identified_skills_config.items(): + if skill_name not in merged_skills: + merged_skills[skill_name] = skill_config + logger.info(f"Added new skill: {skill_name}") + else: + # Enable existing skill if it was disabled, and merge states + existing_skill = merged_skills[skill_name] + if not existing_skill.get("enabled", False): + merged_skills[skill_name] = skill_config + logger.info(f"Enabled existing skill: {skill_name}") + else: + # Merge states from both existing and new + existing_states = existing_skill.get("states", {}) + new_states = skill_config.get("states", {}) + merged_states = {**existing_states, **new_states} + merged_skills[skill_name]["states"] = merged_states + logger.info(f"Merged states for skill: {skill_name}") + + updated_schema["skills"] = merged_skills + + # Add or update autonomous configuration if detected + if autonomous_configs: + existing_autonomous = updated_schema.get("autonomous", []) + # Convert existing autonomous configs to list of dicts if they aren't already + if existing_autonomous and not isinstance(existing_autonomous[0], dict): + existing_autonomous = [ + config.model_dump() for config in existing_autonomous + ] + + # Add new autonomous configs + new_autonomous_dicts = [config.model_dump() for config in autonomous_configs] + updated_autonomous = existing_autonomous + new_autonomous_dicts + updated_schema["autonomous"] = updated_autonomous + logger.info( + f"Added {len(autonomous_configs)} autonomous configurations to existing agent" + ) + + # Filter skills for auto-generation (remove agent-owner API key skills) + updated_schema["skills"] = await filter_skills_for_auto_generation( + updated_schema["skills"] + ) + + # Set user ID if provided + if user_id: + updated_schema["owner"] = user_id + + # Only update agent attributes if the prompt specifically asks for them + should_update_attributes = any( + keyword in prompt.lower() + for keyword in [ + "name", + "purpose", + "personality", + "principle", + "description", + "rename", + "change name", + "update name", + "modify purpose", + "change purpose", + "update personality", + "change personality", + ] + ) + + if should_update_attributes: + logger.info("Prompt requests attribute updates - using AI for text fields only") + + # Get conversation history if logger has a project_id + history_messages = [] + if llm_logger: + try: + history_messages = await get_conversation_history( + project_id=llm_logger.request_id, + user_id=llm_logger.user_id, + ) + except Exception as e: + logger.warning(f"Failed to get conversation history: {e}") + history_messages = [] + + # Prepare system message for agent attribute updates + system_message = { + "role": "system", + "content": f"""You are updating an existing agent's text attributes only. + +CRITICAL INSTRUCTIONS: +1. Only update name, purpose, personality, and principles based on the prompt +2. Keep all existing skills exactly as they are - DO NOT modify skills +3. Keep the existing model and temperature settings +4. Only make changes if the prompt specifically requests them +5. Return the complete agent schema as valid JSON + +The agent currently has these attributes: +- Name: {updated_schema.get("name", "Unnamed Agent")} +- Purpose: {updated_schema.get("purpose", "No purpose defined")} +- Personality: {updated_schema.get("personality", "No personality defined")} +- Principles: {updated_schema.get("principles", "No principles defined")} + +Make minimal changes based on the prompt. If this is part of an ongoing conversation, consider the previous context.""", + } + + # Build messages with conversation history + messages = [system_message] + + # Add conversation history if available + if history_messages: + logger.info( + f"Using {len(history_messages)} messages from conversation history for update" + ) + messages.extend(history_messages) + + # Add current request + messages.append( + { + "role": "user", + "content": f"Update request: {prompt}\n\nCurrent agent schema:\n{json.dumps(updated_schema, indent=2)}", + } + ) + + # Log the LLM call if logger is provided + if llm_logger: + async with llm_logger.log_call( + call_type="agent_attribute_update", + prompt=prompt, + retry_count=0, + is_update=True, + existing_agent_id=getattr(existing_agent, "id", None), + llm_model="gpt-4.1-nano", + openai_messages=messages, + ) as call_log: + call_start_time = time.time() + + # Make OpenAI API call + response = client.chat.completions.create( + model="gpt-4.1-nano", + messages=messages, + temperature=0.3, + max_tokens=2000, + ) + + # Extract generated content + ai_response_content = response.choices[0].message.content.strip() + + try: + # Parse AI response + ai_updated_schema = json.loads(ai_response_content) + + # Safely merge only text attributes, preserving skills and other configs + for attr in ["name", "purpose", "personality", "principles"]: + if attr in ai_updated_schema: + updated_schema[attr] = ai_updated_schema[attr] + + generated_content = { + "updated_attributes": { + attr: ai_updated_schema.get(attr) + for attr in ["name", "purpose", "personality", "principles"] + if attr in ai_updated_schema + } + } + except json.JSONDecodeError as e: + logger.warning(f"Failed to parse AI response as JSON: {e}") + generated_content = {"error": "Failed to parse AI response"} + + # Log successful call + await llm_logger.log_successful_call( + call_log=call_log, + response=response, + generated_content=generated_content, + openai_messages=messages, + call_start_time=call_start_time, + ) + + # Extract token usage for return + total_token_usage = extract_token_usage(response) + else: + # Make call without logging (fallback) + response = client.chat.completions.create( + model="gpt-4.1-nano", + messages=messages, + temperature=0.3, + max_tokens=2000, + ) + + ai_response_content = response.choices[0].message.content.strip() + + try: + ai_updated_schema = json.loads(ai_response_content) + for attr in ["name", "purpose", "personality", "principles"]: + if attr in ai_updated_schema: + updated_schema[attr] = ai_updated_schema[attr] + except json.JSONDecodeError as e: + logger.warning(f"Failed to parse AI response as JSON: {e}") + + total_token_usage = extract_token_usage(response) + + # Store assistant response in conversation for updates + if conversation_service: + if should_update_attributes and len(identified_skill_names) > 0: + response_content = f"I've updated your agent with the requested changes and added {len(identified_skill_names)} skills: {', '.join(identified_skill_names)}." + elif should_update_attributes: + response_content = "I've updated your agent's attributes as requested." + else: + response_content = f"I've updated your agent with {len(identified_skill_names)} new skills: {', '.join(identified_skill_names) if identified_skill_names else 'none'}." + + await conversation_service.add_assistant_message( + content=response_content, + message_metadata={ + "call_type": "agent_enhancement", + "identified_skills": list(identified_skill_names), + "attribute_updates": should_update_attributes, + }, + ) + + logger.info("Agent enhancement completed with minimal changes") + return updated_schema, identified_skill_names, total_token_usage + + +async def generate_agent_attributes( + prompt: str, + skills_config: Dict[str, Any], + client: OpenAI, + llm_logger: Optional["LLMLogger"] = None, + user_id: Optional[str] = None, +) -> Tuple[Dict[str, Any], Dict[str, Any]]: + """Generate agent attributes (name, purpose, personality, principles) from prompt. + + Args: + prompt: The natural language prompt + skills_config: Configuration of identified skills + client: OpenAI client for API calls + llm_logger: Optional LLM logger for tracking API calls + + Returns: + A tuple of (agent_attributes, token_usage) + """ + logger.info("Generating agent attributes from prompt") + + # Create skill summary for context + skill_names = list(skills_config.keys()) + skill_summary = ", ".join(skill_names) if skill_names else "no specific skills" + + # Get conversation history if we have an llm_logger + history_messages = [] + if llm_logger: + try: + history_messages = await get_conversation_history( + project_id=llm_logger.request_id, + user_id=llm_logger.user_id, + ) + except Exception as e: + logger.warning(f"Failed to get conversation history: {e}") + history_messages = [] + + # Prepare messages for agent generation + system_message = { + "role": "system", + "content": f"""You are generating agent attributes for an IntentKit AI agent. + +Based on the user's description, create appropriate attributes for an agent that will use these skills: {skill_summary} + +Generate a JSON object with these exact fields: +- "name": A clear, descriptive name for the agent (2-4 words) +- "purpose": A concise description of what the agent does (1-2 sentences) +- "personality": The agent's communication style and personality traits (1-2 sentences) +- "principles": Core rules and guidelines the agent follows (1-3 bullet points) + +Make the attributes coherent and well-suited for the identified skills. +Return only valid JSON, no additional text. + +If this is part of an ongoing conversation, consider the previous context while creating the agent.""", + } + + # Build messages with conversation history + messages = [system_message] + + # Add conversation history if available + if history_messages: + logger.info(f"Using {len(history_messages)} messages from conversation history") + messages.extend(history_messages) + + # Add current user message + messages.append( + { + "role": "user", + "content": f"Create an agent for: {prompt}", + } + ) + + # Log the LLM call if logger is provided + if llm_logger: + async with llm_logger.log_call( + call_type="agent_attribute_generation", + prompt=prompt, + retry_count=0, + is_update=False, + llm_model="gpt-4.1-nano", + openai_messages=messages, + ) as call_log: + call_start_time = time.time() + + # Make OpenAI API call + response = client.chat.completions.create( + model="gpt-4.1-nano", + messages=messages, + temperature=0.7, + max_tokens=1500, + ) + + # Extract and parse generated content + ai_response_content = response.choices[0].message.content.strip() + + try: + attributes = json.loads(ai_response_content) + generated_content = {"attributes": attributes} + except json.JSONDecodeError as e: + logger.error(f"Failed to parse agent attributes JSON: {e}") + # Provide fallback attributes + attributes = { + "name": "AI Assistant", + "purpose": "A helpful AI agent designed to assist users with various tasks.", + "personality": "Friendly, professional, and helpful. Always strives to provide accurate and useful information.", + "principles": "â€ĸ Be helpful and accurate\nâ€ĸ Respect user privacy\nâ€ĸ Provide clear explanations", + } + generated_content = { + "error": "Failed to parse AI response", + "fallback_used": True, + "attributes": attributes, + } + + # Log successful call + await llm_logger.log_successful_call( + call_log=call_log, + response=response, + generated_content=generated_content, + openai_messages=messages, + call_start_time=call_start_time, + ) + + # Extract token usage + token_usage = extract_token_usage(response) + else: + # Make call without logging (fallback) + response = client.chat.completions.create( + model="gpt-4.1-nano", + messages=messages, + temperature=0.7, + max_tokens=1500, + ) + + ai_response_content = response.choices[0].message.content.strip() + + try: + attributes = json.loads(ai_response_content) + except json.JSONDecodeError as e: + logger.error(f"Failed to parse agent attributes JSON: {e}") + attributes = { + "name": "AI Assistant", + "purpose": "A helpful AI agent designed to assist users with various tasks.", + "personality": "Friendly, professional, and helpful. Always strives to provide accurate and useful information.", + "principles": "â€ĸ Be helpful and accurate\nâ€ĸ Respect user privacy\nâ€ĸ Provide clear explanations", + } + + token_usage = extract_token_usage(response) + + logger.info(f"Generated agent attributes: {attributes.get('name', 'Unknown')}") + return attributes, token_usage + + +async def generate_validated_agent( + prompt: str, + user_id: Optional[str] = None, + existing_agent: Optional["AgentUpdate"] = None, + llm_logger: Optional["LLMLogger"] = None, + max_attempts: int = 3, +) -> Tuple[Dict[str, Any], Set[str], str]: + """Generate agent schema with automatic validation retry and AI self-correction. + + This function uses an iterative approach: + 1. Generate agent schema + 2. Validate it + 3. If validation fails, feed raw errors back to AI for self-correction + 4. Repeat until validation passes or max attempts reached + + Args: + prompt: The natural language prompt describing the agent + user_id: Optional user ID for validation + existing_agent: Optional existing agent to update + llm_logger: Optional LLM logger for tracking API calls + max_attempts: Maximum number of generation attempts + + Returns: + A tuple of (validated_schema, identified_skills, summary_message) + """ + start_time = time.time() + + # Initialize conversation service + conversation_service = None + if llm_logger: + conversation_service = ConversationService( + project_id=llm_logger.request_id, user_id=llm_logger.user_id + ) + # Store the initial user prompt + await conversation_service.add_user_message(prompt) + + # Create generation log (keeping existing aggregate logging for backward compatibility) + async with get_session() as session: + log_data = AgentGenerationLogCreate( + user_id=user_id, + prompt=prompt, + existing_agent_id=getattr(existing_agent, "id", None), + is_update=existing_agent is not None, + ) + generation_log = await AgentGenerationLog.create(session, log_data) + + # Track cumulative metrics + total_tokens_used = 0 + total_input_tokens = 0 + total_output_tokens = 0 + all_token_details = [] + + # Get OpenAI API key from config + api_key = config.openai_api_key + if not api_key: + error_msg = "OPENAI_API_KEY is not set in configuration" + # Update log with error + async with get_session() as session: + await generation_log.update_completion( + session=session, + success=False, + error_message=error_msg, + generation_time_ms=int((time.time() - start_time) * 1000), + ) + raise ValueError(error_msg) + + # Create OpenAI client + client = OpenAI(api_key=api_key) + + last_schema = None + last_errors = [] + identified_skills = set() + + try: + for attempt in range(max_attempts): + try: + logger.info(f"Schema generation attempt {attempt + 1}/{max_attempts}") + + if attempt == 0: + # First attempt: Generate from scratch + from .agent_generator import generate_agent_schema + + schema, skills, token_usage = await generate_agent_schema( + prompt=prompt, + user_id=user_id, + existing_agent=existing_agent, + llm_logger=llm_logger, + ) + last_schema = schema + identified_skills = skills + + # Accumulate token usage from first attempt + if token_usage: + total_tokens_used += token_usage["total_tokens"] + total_input_tokens += token_usage["input_tokens"] + total_output_tokens += token_usage["output_tokens"] + all_token_details.append(token_usage) + else: + # Subsequent attempts: Let AI fix the validation errors + logger.info("Feeding validation errors to AI for self-correction") + schema, skills, token_usage = await fix_agent_schema_with_ai_logged( + original_prompt=prompt, + failed_schema=last_schema, + validation_errors=last_errors, + client=client, + user_id=user_id, + existing_agent=existing_agent, + llm_logger=llm_logger, + retry_count=attempt, + ) + last_schema = schema + identified_skills = identified_skills.union(skills) + + # Accumulate token usage + if token_usage: + total_tokens_used += token_usage["total_tokens"] + total_input_tokens += token_usage["input_tokens"] + total_output_tokens += token_usage["output_tokens"] + all_token_details.append(token_usage) + + # Validate the schema + schema_validation = await validate_schema(schema) + agent_validation = await validate_agent_create(schema, user_id) + + # Check if validation passed + if schema_validation.valid and agent_validation.valid: + logger.info(f"Validation passed on attempt {attempt + 1}") + + # Generate summary message + summary = await generate_agent_summary( + schema=schema, + identified_skills=identified_skills, + client=client, + llm_logger=llm_logger, + ) + + # Store assistant response in conversation + if conversation_service: + await conversation_service.add_assistant_message( + content=summary, + message_metadata={ + "call_type": "agent_generation_success", + "identified_skills": list(identified_skills), + "attempt": attempt + 1, + "validation_passed": True, + }, + ) + + # Update log with success + async with get_session() as session: + await generation_log.update_completion( + session=session, + generated_agent_schema=schema, + identified_skills=list(identified_skills), + llm_model="gpt-4.1-nano", + total_tokens=total_tokens_used, + input_tokens=total_input_tokens, + cached_input_tokens=sum( + usage.get("cached_input_tokens", 0) + for usage in all_token_details + ), + output_tokens=total_output_tokens, + generation_time_ms=int((time.time() - start_time) * 1000), + retry_count=attempt, + success=True, + ) + + return schema, identified_skills, summary + + # Collect raw validation errors for AI feedback + last_errors = [] + if not schema_validation.valid: + last_errors.extend( + [f"Schema error: {error}" for error in schema_validation.errors] + ) + if not agent_validation.valid: + last_errors.extend( + [ + f"Agent validation error: {error}" + for error in agent_validation.errors + ] + ) + + logger.warning( + f"Attempt {attempt + 1} validation failed with {len(last_errors)} errors" + ) + + except Exception as e: + logger.error(f"Attempt {attempt + 1} failed with exception: {str(e)}") + last_errors = [f"Generation exception: {str(e)}"] + + # All attempts failed + error_summary = "; ".join(last_errors[-5:]) # Last 5 errors for context + error_message = f"Failed to generate valid agent schema after {max_attempts} attempts. Last errors: {error_summary}" + + # Update log with failure + async with get_session() as session: + await generation_log.update_completion( + session=session, + generated_agent_schema=last_schema, + identified_skills=list(identified_skills), + llm_model="gpt-4.1-nano", + total_tokens=total_tokens_used, + input_tokens=total_input_tokens, + cached_input_tokens=sum( + usage.get("cached_input_tokens", 0) for usage in all_token_details + ), + output_tokens=total_output_tokens, + generation_time_ms=int((time.time() - start_time) * 1000), + retry_count=max_attempts, + validation_errors={"errors": last_errors}, + success=False, + error_message=error_message, + ) + + raise ValueError(error_message) + + except Exception as e: + # Update log with unexpected error + async with get_session() as session: + await generation_log.update_completion( + session=session, + generated_agent_schema=last_schema, + identified_skills=list(identified_skills), + llm_model="gpt-4.1-nano", + total_tokens=total_tokens_used, + input_tokens=total_input_tokens, + cached_input_tokens=sum( + usage.get("cached_input_tokens", 0) for usage in all_token_details + ), + output_tokens=total_output_tokens, + generation_time_ms=int((time.time() - start_time) * 1000), + retry_count=max_attempts, + validation_errors={"errors": [str(e)]}, + success=False, + error_message=str(e), + ) + raise + + +async def fix_agent_schema_with_ai_logged( + original_prompt: str, + failed_schema: Dict[str, Any], + validation_errors: List[str], + client: OpenAI, + user_id: Optional[str] = None, + existing_agent: Optional["AgentUpdate"] = None, + llm_logger: Optional["LLMLogger"] = None, + retry_count: int = 1, +) -> Tuple[Dict[str, Any], Set[str], Dict[str, Any]]: + """Fix agent schema using AI based on validation errors. + + Args: + original_prompt: The original user prompt + failed_schema: The schema that failed validation + validation_errors: List of validation error messages + client: OpenAI client for API calls + user_id: Optional user ID for validation + existing_agent: Optional existing agent context + llm_logger: Optional LLM logger for tracking API calls + retry_count: Current retry attempt number + + Returns: + A tuple of (fixed_schema, identified_skills, token_usage) + """ + logger.info(f"Attempting to fix schema using AI (retry {retry_count})") + + # Prepare detailed error context for AI + error_details = "\n".join([f"- {error}" for error in validation_errors]) + + # Prepare messages for schema fixing + messages = [ + { + "role": "system", + "content": """You are an expert at fixing IntentKit agent schema validation errors. + +The user created an agent but the schema has validation errors. Your job is to fix these errors while preserving the user's intent. + +CRITICAL RULES: +1. Only use real IntentKit skills that actually exist +2. Skills must have real states (not made-up ones) +3. Fix validation errors while maintaining user intent +4. Return only valid JSON for the complete agent schema +5. Do not add fake skills or fake states +6. ALWAYS preserve the owner field if it exists in the original schema + +AUTONOMOUS CONFIGURATION RULES: +- For autonomous tasks, use EITHER "minutes" OR "cron", NEVER both +- If both are present, keep only "minutes" and remove "cron" entirely +- If "cron" is null/None, remove it entirely from the configuration +- Minimum interval is 5 minutes for "minutes" field +- Example: {"minutes": 60} OR {"cron": "0 * * * *"} but NOT both + +Common validation errors and fixes: +- Missing required fields: Add them with appropriate values +- Invalid skill names: Remove or replace with real skills +- Invalid skill states: Replace with real states for that skill +- Invalid model names: Use gpt-4.1-nano as default +- Missing skill configurations: Add proper enabled/states/api_key_provider structure +- Missing owner field: Will be automatically added after your response +- "only one of minutes or cron can be set": Remove the cron field if minutes is present""", + }, + { + "role": "user", + "content": f"""Original user request: {original_prompt} + +Failed schema: +{json.dumps(failed_schema, indent=2)} + +Validation errors to fix: +{error_details} + +Please fix these errors and return the corrected agent schema as valid JSON.""", + }, + ] + + # Log the LLM call if logger is provided + if llm_logger: + async with llm_logger.log_call( + call_type="schema_error_correction", + prompt=original_prompt, + retry_count=retry_count, + is_update=existing_agent is not None, + existing_agent_id=getattr(existing_agent, "id", None), + llm_model="gpt-4.1-nano", + openai_messages=messages, + ) as call_log: + call_start_time = time.time() + + # Make OpenAI API call + response = client.chat.completions.create( + model="gpt-4.1-nano", + messages=messages, + temperature=0.3, + max_tokens=3000, + ) + + # Extract and parse generated content + ai_response_content = response.choices[0].message.content.strip() + + try: + # Parse the fixed schema + fixed_schema = json.loads(ai_response_content) + + # Ensure owner is set if user_id is provided + if user_id: + fixed_schema["owner"] = user_id + + # Extract skills for return value + identified_skills = set(fixed_schema.get("skills", {}).keys()) + + generated_content = { + "fixed_schema": fixed_schema, + "validation_errors_addressed": validation_errors, + "identified_skills": list(identified_skills), + } + except json.JSONDecodeError as e: + logger.error(f"Failed to parse AI-fixed schema JSON: {e}") + # Return original schema if AI response is invalid + fixed_schema = failed_schema + # Ensure owner is set even for fallback schema + if user_id: + fixed_schema["owner"] = user_id + identified_skills = set(failed_schema.get("skills", {}).keys()) + generated_content = { + "error": "Failed to parse AI response", + "raw_response": ai_response_content, + "fallback_schema": fixed_schema, + } + + # Log successful call + await llm_logger.log_successful_call( + call_log=call_log, + response=response, + generated_content=generated_content, + openai_messages=messages, + call_start_time=call_start_time, + ) + + # Extract token usage + token_usage = extract_token_usage(response) + else: + # Make call without logging (fallback) + response = client.chat.completions.create( + model="gpt-4.1-nano", + messages=messages, + temperature=0.3, + max_tokens=3000, + ) + + ai_response_content = response.choices[0].message.content.strip() + + try: + fixed_schema = json.loads(ai_response_content) + # Ensure owner is set if user_id is provided + if user_id: + fixed_schema["owner"] = user_id + identified_skills = set(fixed_schema.get("skills", {}).keys()) + except json.JSONDecodeError as e: + logger.error(f"Failed to parse AI-fixed schema JSON: {e}") + fixed_schema = failed_schema + # Ensure owner is set even for fallback schema + if user_id: + fixed_schema["owner"] = user_id + identified_skills = set(failed_schema.get("skills", {}).keys()) + + token_usage = extract_token_usage(response) + + logger.info(f"AI schema correction completed (retry {retry_count})") + return fixed_schema, identified_skills, token_usage diff --git a/app/admin/generator/autonomous_generator.py b/app/admin/generator/autonomous_generator.py new file mode 100644 index 00000000..8bdcce20 --- /dev/null +++ b/app/admin/generator/autonomous_generator.py @@ -0,0 +1,232 @@ +"""Autonomous Task Generator Module. + +AI-based autonomous configuration generator for IntentKit agents. +Uses LLM to detect scheduling patterns and generate proper autonomous configurations. +""" + +import json +import logging +import time +from typing import TYPE_CHECKING, List, Optional, Tuple + +from epyxid import XID +from openai import OpenAI + +from intentkit.models.agent import AgentAutonomous +from intentkit.skills import __all__ as available_skill_categories + +if TYPE_CHECKING: + from .llm_logger import LLMLogger + +logger = logging.getLogger(__name__) + + +async def generate_autonomous_configuration( + prompt: str, + client: OpenAI, + llm_logger: Optional["LLMLogger"] = None, +) -> Optional[Tuple[List[AgentAutonomous], List[str]]]: + """Generate autonomous configuration from a prompt using AI. + + Args: + prompt: The natural language prompt to analyze + client: OpenAI client for LLM analysis + llm_logger: Optional LLM logger for tracking API calls + + Returns: + Tuple of (autonomous_configs, required_skills) if autonomous pattern detected, + None otherwise + """ + logger.info("Using AI to analyze prompt for autonomous patterns") + logger.debug( + f"Analyzing prompt: '{prompt[:100]}{'...' if len(prompt) > 100 else ''}'" + ) + + system_message = f"""You are an expert at analyzing user prompts to detect autonomous task patterns and generating IntentKit agent configurations. + +TASK: Determine if the prompt describes a task that should run automatically on a schedule, and if so, generate the proper configuration. + +AVAILABLE SKILLS: {", ".join(available_skill_categories)} + +AUTONOMOUS FORMAT REQUIREMENTS: +- id: lowercase alphanumeric with dashes, max 20 chars (auto-generated) +- name: task display name, max 50 chars +- description: what the task does, max 200 chars +- prompt: the actual command/prompt for the agent to execute, max 20,000 chars +- enabled: true +- schedule: EITHER "minutes" (integer) OR "cron" (string), minimum 5 minutes +- required_skills: list of skills needed from available skills + +EXAMPLES OF AUTONOMOUS PATTERNS: +- "Create an agent that buys 0.1 eth every hour" → 60 minutes, needs "cdp" skill +- "Build a bot that posts tweets daily" → 1440 minutes, needs "twitter" skill +- "Monitor my portfolio every 30 minutes" → 30 minutes, needs "portfolio" skill + +EXAMPLES OF NON-AUTONOMOUS PATTERNS: +- "Create a trading bot" → general request, not specific scheduled task +- "Help me analyze crypto" → assistance request, not autonomous + +RESPONSE FORMAT: +If autonomous pattern detected: +{{ + "has_autonomous": true, + "autonomous_config": {{ + "name": "Brief task name", + "description": "What this automation does", + "prompt": "Exact command to execute (no scheduling words)", + "minutes": 60 // OR "cron": "0 * * * *" + }}, + "required_skills": ["skill1", "skill2"] +}} + +If no autonomous pattern: +{{ + "has_autonomous": false +}} + +Be accurate and only detect true autonomous patterns with clear scheduling intent.""" + + messages = [ + {"role": "system", "content": system_message}, + {"role": "user", "content": f"Analyze this prompt: {prompt}"}, + ] + + try: + logger.debug("Sending prompt to GPT-4 for autonomous pattern analysis") + + result_text = "" # Initialize result_text + + # Log the LLM call if logger is provided + if llm_logger: + async with llm_logger.log_call( + call_type="autonomous_pattern_analysis", + prompt=prompt, + retry_count=0, + is_update=False, + llm_model="gpt-4.1", + openai_messages=messages, + ) as call_log: + call_start_time = time.time() + + try: + # Make OpenAI API call + response = client.chat.completions.create( + model="gpt-4.1", + messages=messages, + temperature=0.1, + max_tokens=500, + ) + except Exception as api_error: + logger.error(f"OpenAI API call failed: {api_error}") + raise api_error + + result_text = response.choices[0].message.content.strip() + logger.debug( + f"GPT-4 response: {result_text[:200]}{'...' if len(result_text) > 200 else ''}" + ) + + # Log successful call + await llm_logger.log_successful_call( + call_log=call_log, + response=response, + generated_content={"analysis_result": result_text}, + openai_messages=messages, + call_start_time=call_start_time, + ) + else: + # Make call without logging (fallback) + try: + response = client.chat.completions.create( + model="gpt-4.1", messages=messages, temperature=0.1, max_tokens=500 + ) + except Exception as api_error: + logger.error(f"OpenAI API call failed (no logger): {api_error}") + raise api_error + + result_text = response.choices[0].message.content.strip() + logger.debug( + f"GPT-4 response: {result_text[:200]}{'...' if len(result_text) > 200 else ''}" + ) + + result = json.loads(result_text) + + if not result.get("has_autonomous", False): + logger.info(" No autonomous pattern detected in prompt") + return None + + logger.info(" Autonomous pattern detected! Processing configuration...") + + # Extract and validate configuration + config_data = result["autonomous_config"] + required_skills = result.get("required_skills", []) + + logger.info(f"Required skills for autonomous task: {required_skills}") + + # Validate required skills are available + valid_skills = [ + skill for skill in required_skills if skill in available_skill_categories + ] + if len(valid_skills) != len(required_skills): + invalid_skills = set(required_skills) - set(valid_skills) + logger.warning(f"Some required skills not available: {invalid_skills}") + logger.info(f"Valid skills that will be activated: {valid_skills}") + else: + logger.info(f"All required skills are available: {valid_skills}") + + # Generate autonomous configuration + task_id = str(XID())[:10].lower() + task_name = config_data["name"][:50] + task_description = config_data["description"][:200] + task_prompt = config_data["prompt"][:20000] + + autonomous_config = { + "id": task_id, + "name": task_name, + "description": task_description, + "prompt": task_prompt, + "enabled": True, + } + + # Set either minutes or cron, not both + if config_data.get("minutes"): + autonomous_config["minutes"] = max( + 5, int(config_data["minutes"]) + ) # Enforce minimum 5 minutes + logger.info(f"Schedule: Every {autonomous_config['minutes']} minutes") + elif config_data.get("cron"): + cron_expr = config_data["cron"] + autonomous_config["cron"] = cron_expr + logger.info(f"Schedule: Cron expression '{cron_expr}'") + else: + logger.error(" No valid schedule provided in autonomous config") + return None + + # Create AgentAutonomous object + try: + autonomous_obj = AgentAutonomous(**autonomous_config) + schedule_info = ( + f"{autonomous_obj.minutes} minutes" + if autonomous_obj.minutes + else autonomous_obj.cron + ) + logger.info( + f"Generated autonomous task: '{autonomous_obj.name}' ({schedule_info})" + ) + logger.info(f"Task details: {autonomous_obj.description}") + logger.info(f"Task prompt: '{autonomous_obj.prompt}'") + + return [autonomous_obj], valid_skills + except Exception as e: + logger.error(f"Failed to create AgentAutonomous object: {e}") + logger.debug(f"Config data that failed: {autonomous_config}") + return None + + except json.JSONDecodeError as e: + logger.error(f"Failed to parse LLM response as JSON: {e}") + if "result_text" in locals(): + logger.debug(f"Raw LLM response: {result_text}") + return None + except Exception as e: + logger.error(f"LLM autonomous analysis failed: {e}") + logger.debug(f"Error details: {str(e)}") + return None diff --git a/app/admin/generator/conversation_service.py b/app/admin/generator/conversation_service.py new file mode 100644 index 00000000..5ed9f868 --- /dev/null +++ b/app/admin/generator/conversation_service.py @@ -0,0 +1,225 @@ +"""Conversation Service Module. + +Handles conversation history storage and retrieval for agent generation. +This is separate from LLM logging which tracks technical API calls. +""" + +import logging +from typing import Any, Dict, List, Optional + +from intentkit.models.conversation import ( + ConversationMessage, + ConversationMessageCreate, + ConversationProject, + ConversationProjectCreate, +) + +logger = logging.getLogger(__name__) + + +class ConversationService: + """Service for managing conversation history.""" + + def __init__(self, project_id: str, user_id: Optional[str] = None): + """Initialize conversation service. + + Args: + project_id: Unique identifier for the conversation project + user_id: Optional user ID for access control + """ + self.project_id = project_id + self.user_id = user_id + self._project: Optional[ConversationProject] = None + + async def _ensure_project(self) -> ConversationProject: + """Ensure project exists and return it.""" + if not self._project: + self._project = await create_or_get_project(self.project_id, self.user_id) + return self._project + + async def add_user_message( + self, content: str, message_metadata: Optional[Dict[str, Any]] = None + ) -> ConversationMessage: + """Add a user message to the conversation.""" + return await add_message( + self.project_id, "user", content, message_metadata, self.user_id + ) + + async def add_assistant_message( + self, content: str, message_metadata: Optional[Dict[str, Any]] = None + ) -> ConversationMessage: + """Add an assistant message to the conversation.""" + return await add_message( + self.project_id, "assistant", content, message_metadata, self.user_id + ) + + async def get_history(self) -> List[Dict[str, Any]]: + """Get the conversation history.""" + try: + return await get_conversation_history(self.project_id, self.user_id) + except ValueError: + return [] + + async def get_recent_context(self, max_messages: int = 10) -> List[Dict[str, Any]]: + """Get recent conversation context for the LLM.""" + history = await self.get_history() + return history[-max_messages:] if history else [] + + def format_ai_response( + self, content: Dict[str, Any], call_type: str + ) -> Optional[str]: + """Format AI response content for conversation history. + + Args: + content: Generated content from the AI call + call_type: Type of AI operation + + Returns: + Formatted response string or None if no response needed + """ + if call_type == "agent_attribute_generation" and "attributes" in content: + attrs = content["attributes"] + response = "I've created an agent with the following attributes:\n" + response += f"Name: {attrs.get('name', 'N/A')}\n" + response += f"Purpose: {attrs.get('purpose', 'N/A')}\n" + response += f"Personality: {attrs.get('personality', 'N/A')}\n" + response += f"Principles: {attrs.get('principles', 'N/A')}" + return response + + elif call_type == "agent_attribute_update" and "updated_attributes" in content: + updates = content["updated_attributes"] + response = "I've updated the agent with the following changes:\n" + for attr, value in updates.items(): + if value: + response += f"{attr.title()}: {value}\n" + return response + + elif call_type == "schema_error_correction": + return "I've corrected the agent schema to fix validation errors." + + elif call_type == "tag_generation": + if "selected_tags" in content: + tags = content["selected_tags"] + if tags: + return f"I've generated the following tags for this agent: {', '.join(tags)}" + else: + return "I couldn't find appropriate tags for this agent from the available categories." + else: + return "I attempted to generate tags for the agent." + + return None + + +async def create_or_get_project( + project_id: str, user_id: Optional[str] = None +) -> ConversationProject: + """Create or get a conversation project.""" + # Try to get existing project first + existing_project = await ConversationProject.get(project_id) + if existing_project: + return existing_project + + # Create new project + project_create = ConversationProjectCreate( + id=project_id, + user_id=user_id, + ) + return await project_create.save() + + +async def add_message( + project_id: str, + role: str, + content: str, + message_metadata: Optional[Dict[str, Any]] = None, + user_id: Optional[str] = None, +) -> ConversationMessage: + """Add a message to a conversation project.""" + # Ensure project exists + await create_or_get_project(project_id, user_id) + + # Create and save message + message_create = ConversationMessageCreate( + project_id=project_id, + role=role, + content=content, + message_metadata=message_metadata, + ) + message = await message_create.save() + + # Update project activity + project = await ConversationProject.get(project_id) + if project: + await project.update_activity() + + return message + + +async def get_conversation_history( + project_id: str, user_id: Optional[str] = None +) -> List[Dict[str, Any]]: + """Get conversation history for a project.""" + messages = await ConversationMessage.get_by_project(project_id, user_id) + + if not messages: + raise ValueError(f"No conversation found for project {project_id}") + + # Convert to dict format expected by API + return [ + { + "id": message.id, + "role": message.role, + "content": message.content, + "metadata": message.message_metadata or {}, + "created_at": message.created_at.isoformat(), + } + for message in messages + ] + + +async def get_projects_by_user( + user_id: Optional[str] = None, limit: int = 50 +) -> List[Dict[str, Any]]: + """Get projects by user with their conversation history.""" + projects = await ConversationProject.get_by_user(user_id, limit) + + result = [] + for project in projects: + # Get conversation history for each project + try: + conversation_history = await get_conversation_history(project.id, user_id) + except ValueError: + # No conversation history for this project + conversation_history = [] + + result.append( + { + "project_id": project.id, + "user_id": project.user_id, + "created_at": project.created_at.isoformat(), + "last_activity": project.last_activity.isoformat(), + "message_count": len(conversation_history), + "last_message": conversation_history[-1] + if conversation_history + else None, + "first_message": conversation_history[0] + if conversation_history + else None, + "conversation_history": conversation_history, + } + ) + + return result + + +async def get_project_metadata(project_id: str) -> Optional[Dict[str, Any]]: + """Get project metadata.""" + project = await ConversationProject.get(project_id) + if not project: + return None + + return { + "user_id": project.user_id, + "created_at": project.created_at.timestamp(), + "last_activity": project.last_activity.timestamp(), + } diff --git a/app/admin/generator/llm_logger.py b/app/admin/generator/llm_logger.py new file mode 100644 index 00000000..5be2ce60 --- /dev/null +++ b/app/admin/generator/llm_logger.py @@ -0,0 +1,108 @@ +"""LLM Call Logger Module. + +Tracks LLM API calls for cost analysis and debugging. +For conversation history, use conversation_service.py instead. +""" + +import logging +from contextlib import asynccontextmanager +from typing import Any, Dict, List, Optional + +from .utils import generate_request_id + +logger = logging.getLogger(__name__) + + +class LLMLogger: + """Logger for tracking LLM API calls and costs.""" + + def __init__(self, request_id: str, user_id: Optional[str] = None): + """Initialize the LLM logger. + + Args: + request_id: Unique request ID that groups related LLM calls + user_id: Optional user ID for the request + """ + self.request_id = request_id + self.user_id = user_id + + @asynccontextmanager + async def log_call( + self, + call_type: str, + prompt: str, + retry_count: int = 0, + is_update: bool = False, + existing_agent_id: Optional[str] = None, + llm_model: Optional[str] = None, + openai_messages: Optional[List[Dict[str, Any]]] = None, + ): + """Context manager for logging an LLM call. + + Args: + call_type: Type of LLM call (e.g., 'agent_generation') + prompt: The original prompt for this generation request + retry_count: Retry attempt number (0 for initial, 1+ for retries) + is_update: Whether this is an update operation + existing_agent_id: ID of existing agent if update + llm_model: LLM model being used + openai_messages: Messages being sent to OpenAI + + Yields: + Simple dict for tracking this call + """ + call_info = { + "type": call_type, + "prompt": prompt, + "request_id": self.request_id, + "retry_count": retry_count, + } + + logger.info( + f"Started LLM call: {call_type} (request_id={self.request_id}, retry={retry_count})" + ) + + try: + yield call_info + except Exception as e: + logger.error( + f"LLM call failed: {call_type} (request_id={self.request_id}): {str(e)}" + ) + raise + + async def log_successful_call( + self, + call_log: Dict[str, Any], + response: Any, + generated_content: Optional[Dict[str, Any]] = None, + openai_messages: Optional[List[Dict[str, Any]]] = None, + call_start_time: Optional[float] = None, + ): + """Log a successful LLM call completion. + + Args: + call_log: The call log dict to update + response: OpenAI API response + generated_content: The generated content from the call + openai_messages: Messages sent to OpenAI + call_start_time: When the call started (for duration calculation) + """ + logger.info( + f"LLM call completed successfully: {call_log.get('type', 'unknown')}" + ) + + # Note: Conversation history is now handled by ConversationService + # This logger now only tracks LLM call metrics and costs + + +def create_llm_logger(user_id: Optional[str] = None) -> LLMLogger: + """Create a new LLM logger with a unique request ID. + + Args: + user_id: Optional user ID for the request + + Returns: + LLMLogger instance with unique request ID + """ + request_id = generate_request_id() + return LLMLogger(request_id=request_id, user_id=user_id) diff --git a/app/admin/generator/skill_processor.py b/app/admin/generator/skill_processor.py new file mode 100644 index 00000000..0ce7e9d6 --- /dev/null +++ b/app/admin/generator/skill_processor.py @@ -0,0 +1,502 @@ +"""Skill Processing Module. + +This module handles all skill-related operations for agent generation including: +- Skill identification from prompts +- Skill validation and filtering +- Keyword and AI-based skill matching +""" + +import importlib +import json +import logging +from pathlib import Path +from typing import TYPE_CHECKING, Any, Dict, List, Optional, Set + +from openai import OpenAI + +from intentkit.skills import __all__ as available_skill_categories + +if TYPE_CHECKING: + from .llm_logger import LLMLogger + +logger = logging.getLogger(__name__) + +# Get available skill categories from the skills module +AVAILABLE_SKILL_CATEGORIES = set(available_skill_categories) + +# Cache for skill states to avoid repeated imports +_skill_states_cache: Dict[str, Set[str]] = {} +_all_skills_cache: Dict[str, Dict[str, Set[str]]] = {} +_skill_schemas_cache: Dict[str, Dict[str, Any]] = {} + + +def load_skill_schema(skill_name: str) -> Optional[Dict[str, Any]]: + """Load schema.json for a specific skill.""" + if skill_name in _skill_schemas_cache: + return _skill_schemas_cache[skill_name] + + try: + # Get the skills directory path + # From intentkit/app/admin/generator/skill_processor.py to intentkit/skills/ + skills_dir = Path(__file__).parent.parent.parent.parent / "intentkit" / "skills" + schema_path = skills_dir / skill_name / "schema.json" + + if schema_path.exists(): + with open(schema_path, "r") as f: + schema = json.load(f) + _skill_schemas_cache[skill_name] = schema + return schema + else: + logger.warning(f"Schema file not found for skill: {skill_name}") + return None + except Exception as e: + logger.error(f"Error loading schema for skill {skill_name}: {e}") + return None + + +def get_agent_owner_api_key_skills() -> Set[str]: + """Get skills that require agent owner API keys.""" + agent_owner_skills = set() + + for skill_name in AVAILABLE_SKILL_CATEGORIES: + try: + schema = load_skill_schema(skill_name) + if ( + schema + and "properties" in schema + and "api_key_provider" in schema["properties"] + ): + api_key_provider = schema["properties"]["api_key_provider"] + if "enum" in api_key_provider and api_key_provider["enum"] == [ + "agent_owner" + ]: + agent_owner_skills.add(skill_name) + except Exception as e: + logger.warning( + f"Error checking API key requirement for skill {skill_name}: {e}" + ) + + return agent_owner_skills + + +def get_configurable_api_key_skills() -> Set[str]: + """Get skills with configurable API key providers.""" + configurable_skills = set() + + for skill_name in AVAILABLE_SKILL_CATEGORIES: + try: + schema = load_skill_schema(skill_name) + if ( + schema + and "properties" in schema + and "api_key_provider" in schema["properties"] + ): + api_key_provider = schema["properties"]["api_key_provider"] + if "enum" in api_key_provider: + enum_values = set(api_key_provider["enum"]) + if "platform" in enum_values and "agent_owner" in enum_values: + configurable_skills.add(skill_name) + except Exception as e: + logger.warning( + f"Error checking API key configurability for skill {skill_name}: {e}" + ) + + return configurable_skills + + +def get_skill_keyword_config() -> Dict[str, List[str]]: + """Generate skill keyword configuration from schemas.""" + config = {} + + for skill_name in AVAILABLE_SKILL_CATEGORIES: + try: + schema = load_skill_schema(skill_name) + keywords = [skill_name] # Always include skill name + + if schema: + # Add title words + if "title" in schema: + keywords.extend(schema["title"].lower().split()) + + # Add x-tags + if "x-tags" in schema: + keywords.extend([tag.lower() for tag in schema["x-tags"]]) + + config[skill_name] = keywords + except Exception as e: + logger.warning(f"Error getting keywords for skill {skill_name}: {e}") + config[skill_name] = [skill_name] + + return config + + +def get_skill_state_default(skill_name: str, state_name: str) -> str: + """Get the default value for a specific skill state from its schema.""" + try: + schema = load_skill_schema(skill_name) + if ( + schema + and "properties" in schema + and "states" in schema["properties"] + and "properties" in schema["properties"]["states"] + and state_name in schema["properties"]["states"]["properties"] + ): + state_config = schema["properties"]["states"]["properties"][state_name] + + # Return the default value if specified + if "default" in state_config: + return state_config["default"] + + # If no default, use the first valid enum value + if "enum" in state_config and state_config["enum"]: + return state_config["enum"][0] + + # Fallback to "private" + return "private" + + except Exception as e: + logger.warning(f"Error getting default for {skill_name}.{state_name}: {e}") + return "private" + + +def get_skill_default_api_key_provider(skill_name: str) -> str: + """Get the default API key provider for a skill from its schema.""" + try: + schema = load_skill_schema(skill_name) + if ( + schema + and "properties" in schema + and "api_key_provider" in schema["properties"] + ): + api_key_provider = schema["properties"]["api_key_provider"] + + # Return the default value if specified + if "default" in api_key_provider: + return api_key_provider["default"] + + # If no default, prefer platform if available + if "enum" in api_key_provider and api_key_provider["enum"]: + if "platform" in api_key_provider["enum"]: + return "platform" + return api_key_provider["enum"][0] + + # Fallback to "platform" + return "platform" + + except Exception as e: + logger.warning(f"Error getting API key provider default for {skill_name}: {e}") + return "platform" + + +def get_skill_states(skill_category: str) -> Set[str]: + """Get the actual skill states for a given skill category by importing its module.""" + if skill_category in _skill_states_cache: + return _skill_states_cache[skill_category] + + try: + # Import the skill category module + skill_module = importlib.import_module(f"intentkit.skills.{skill_category}") + + # Look for the SkillStates TypedDict class + if hasattr(skill_module, "SkillStates"): + skill_states_class = getattr(skill_module, "SkillStates") + # Get the annotations which contain the state names + if hasattr(skill_states_class, "__annotations__"): + states = set(skill_states_class.__annotations__.keys()) + _skill_states_cache[skill_category] = states + return states + + logger.warning(f"Could not find SkillStates for {skill_category}") + + except ImportError as e: + logger.warning(f"Could not import skill category {skill_category}: {e}") + + # Fallback: try to extract states from schema.json + try: + schema = load_skill_schema(skill_category) + if ( + schema + and "properties" in schema + and "states" in schema["properties"] + and "properties" in schema["properties"]["states"] + ): + states = set(schema["properties"]["states"]["properties"].keys()) + logger.info(f"Using schema-based states for {skill_category}: {states}") + _skill_states_cache[skill_category] = states + return states + except Exception as e: + logger.warning( + f"Could not extract states from schema for {skill_category}: {e}" + ) + + logger.warning(f"No states found for skill category {skill_category}") + return set() + + +def get_all_real_skills() -> Dict[str, Set[str]]: + """Get ALL real skills and their states from the codebase.""" + if _all_skills_cache: + return _all_skills_cache + + all_skills = {} + for skill_category in AVAILABLE_SKILL_CATEGORIES: + states = get_skill_states(skill_category) + if states: + all_skills[skill_category] = states + + _all_skills_cache.update(all_skills) + return all_skills + + +def merge_autonomous_skills( + skills_config: Dict[str, Any], autonomous_skills: List[str] +) -> Dict[str, Any]: + """Merge autonomous skills into existing skills configuration. + + Args: + skills_config: Existing skills configuration + autonomous_skills: List of skill names required for autonomous tasks + + Returns: + Updated skills configuration with autonomous skills added + """ + if not autonomous_skills: + logger.debug(" No autonomous skills to merge") + return skills_config + + logger.info( + f"Merging {len(autonomous_skills)} autonomous skills: {autonomous_skills}" + ) + logger.debug(f"Input skills config keys: {list(skills_config.keys())}") + + for skill_name in autonomous_skills: + if skill_name not in skills_config: + # Add required autonomous skills with dynamic configuration + skill_states = get_skill_states(skill_name) + logger.debug( + f"Got {len(skill_states)} states for {skill_name}: {skill_states}" + ) + + if not skill_states: + logger.warning(f"No states found for autonomous skill: {skill_name}") + continue + + states_dict = {} + for state in skill_states: + states_dict[state] = get_skill_state_default(skill_name, state) + + skills_config[skill_name] = { + "enabled": True, + "states": states_dict, + "api_key_provider": get_skill_default_api_key_provider(skill_name), + } + logger.info( + f"Added autonomous skill: {skill_name} (with {len(skill_states)} states)" + ) + else: + # Ensure autonomous skills are enabled + skills_config[skill_name]["enabled"] = True + logger.info(f"Enabled existing skill for autonomous use: {skill_name}") + + logger.debug(f"Output skills config keys: {list(skills_config.keys())}") + return skills_config + + +def get_skill_mapping() -> Dict[str, Dict[str, Set[str]]]: + """Generate skill mapping dynamically from actual skill implementations.""" + mapping = {} + all_real_skills = get_all_real_skills() + + # Build mapping from configuration + for skill_name, keywords in get_skill_keyword_config().items(): + if skill_name in all_real_skills: + skill_states = all_real_skills[skill_name] + + # Special case for twitter tweet - use only post_tweet state + if skill_name == "twitter": + for keyword in keywords: + if keyword == "tweet": + mapping[keyword] = { + skill_name: {"post_tweet"} + if "post_tweet" in skill_states + else skill_states + } + else: + mapping[keyword] = {skill_name: skill_states} + else: + # Standard mapping for all keywords + for keyword in keywords: + mapping[keyword] = {skill_name: skill_states} + + # Add direct skill name mappings for any skills not in config + for skill_name, skill_states in all_real_skills.items(): + if skill_name not in get_skill_keyword_config() and skill_name not in mapping: + mapping[skill_name] = {skill_name: skill_states} + + return mapping + + +def add_skill_by_name(prompt: str, skills_config: Dict[str, Any]) -> Dict[str, Any]: + """Add skills mentioned by exact name in the prompt.""" + all_real_skills = get_all_real_skills() + prompt_lower = prompt.lower() + + # Check for exact skill name matches + for skill_name in all_real_skills.keys(): + if skill_name in prompt_lower: + # Get states with schema-based defaults + states_dict = {} + for state in all_real_skills[skill_name]: + states_dict[state] = get_skill_state_default(skill_name, state) + + skills_config[skill_name] = { + "enabled": True, + "states": states_dict, + "api_key_provider": get_skill_default_api_key_provider(skill_name), + } + + # Handle "add all X skills" pattern + if "add all" in prompt_lower: + for skill_name in all_real_skills.keys(): + if f"all {skill_name}" in prompt_lower: + # Get states with schema-based defaults + states_dict = {} + for state in all_real_skills[skill_name]: + states_dict[state] = get_skill_state_default(skill_name, state) + + skills_config[skill_name] = { + "enabled": True, + "states": states_dict, + "api_key_provider": get_skill_default_api_key_provider(skill_name), + } + + return skills_config + + +async def validate_skills_exist(skills_config: Dict[str, Any]) -> Dict[str, Any]: + """Validate that all skills in the config actually exist in IntentKit. + + Args: + skills_config: Skills configuration to validate + + Returns: + Validated skills configuration with only existing skills + """ + logger.debug(f"Validating skills exist - input: {list(skills_config.keys())}") + logger.debug(f"Available skill categories: {list(AVAILABLE_SKILL_CATEGORIES)}") + + validated_skills = {} + + for skill_name, skill_config in skills_config.items(): + if skill_name in AVAILABLE_SKILL_CATEGORIES: + validated_skills[skill_name] = skill_config + logger.debug(f"Skill {skill_name} exists and validated") + else: + logger.warning( + f"Skipping non-existent skill '{skill_name}' - only available skills: {list(AVAILABLE_SKILL_CATEGORIES)}" + ) + + logger.debug(f"Validated skills output: {list(validated_skills.keys())}") + return validated_skills + + +async def filter_skills_for_auto_generation( + skills_config: Dict[str, Any], +) -> Dict[str, Any]: + """Filter out skills that require agent owner API keys from auto-generation. + + Args: + skills_config: Original skills configuration + + Returns: + Filtered skills configuration without agent-owner API key requirements + """ + # First validate that all skills exist + skills_config = await validate_skills_exist(skills_config) + + filtered_skills = {} + agent_owner_skills = get_agent_owner_api_key_skills() + configurable_skills = get_configurable_api_key_skills() + + for skill_name, skill_config in skills_config.items(): + # Skip skills that always require agent owner API keys + if skill_name in agent_owner_skills: + logger.info( + f"Excluding skill '{skill_name}' from auto-generation: requires agent owner API key" + ) + continue + + # For configurable skills, ensure we set api_key_provider to platform + skill_config = skill_config.copy() + if skill_name in configurable_skills: + skill_config["api_key_provider"] = "platform" + else: + # For other skills, use the schema default + skill_config["api_key_provider"] = get_skill_default_api_key_provider( + skill_name + ) + + filtered_skills[skill_name] = skill_config + + return filtered_skills + + +async def identify_skills( + prompt: str, client: OpenAI, llm_logger: Optional["LLMLogger"] = None +) -> Dict[str, Any]: + """Identify relevant skills from the prompt using only real skill data. + + Args: + prompt: The natural language prompt + client: OpenAI client (not used, kept for compatibility) + llm_logger: Optional LLM logger for tracking API calls (not used in this implementation) + + Returns: + Dict containing skill configurations with only real skill states + """ + # Use keyword matching first + skills_config = keyword_match_skills(prompt) + + # Add skills mentioned by exact name + skills_config = add_skill_by_name(prompt, skills_config) + return skills_config + + +def keyword_match_skills(prompt: str) -> Dict[str, Any]: + """Match skills using keyword matching with real skill states only. + + Args: + prompt: The natural language prompt + + Returns: + Dict containing skill configurations with real states only + """ + skills_config = {} + prompt_lower = prompt.lower() + + for keyword, skill_mapping in get_skill_mapping().items(): + if keyword.lower() in prompt_lower: + for skill_name, states in skill_mapping.items(): + if skill_name not in skills_config: + # Get states with schema-based defaults + states_dict = {} + for state in states: + states_dict[state] = get_skill_state_default(skill_name, state) + + skills_config[skill_name] = { + "enabled": True, + "states": states_dict, + "api_key_provider": get_skill_default_api_key_provider( + skill_name + ), + } + else: + # Merge states if skill already exists + existing_states = skills_config[skill_name]["states"] + for state in states: + if state not in existing_states: + existing_states[state] = get_skill_state_default( + skill_name, state + ) + + return skills_config diff --git a/app/admin/generator/utils.py b/app/admin/generator/utils.py new file mode 100644 index 00000000..0529783d --- /dev/null +++ b/app/admin/generator/utils.py @@ -0,0 +1,473 @@ +"""Utility functions for agent generation. + +Common helper functions used across the generator modules. +""" + +import json +import logging +import random +import time +from typing import TYPE_CHECKING, Any, Dict, List, Optional, Set + +import httpx +from epyxid import XID +from openai import OpenAI + +from intentkit.config.config import config + +if TYPE_CHECKING: + from .llm_logger import LLMLogger + +logger = logging.getLogger(__name__) + + +def extract_token_usage(response) -> Dict[str, Any]: + """Extract token usage information from OpenAI response. + + Args: + response: OpenAI API response + + Returns: + Dict containing token usage information + """ + usage_info = { + "total_tokens": 0, + "input_tokens": 0, + "cached_input_tokens": 0, + "output_tokens": 0, + "input_tokens_details": None, + "completion_tokens_details": None, + } + + if hasattr(response, "usage") and response.usage: + usage = response.usage + usage_info["total_tokens"] = getattr(usage, "total_tokens", 0) + usage_info["input_tokens"] = getattr(usage, "prompt_tokens", 0) or getattr( + usage, "input_tokens", 0 + ) + usage_info["output_tokens"] = getattr(usage, "completion_tokens", 0) or getattr( + usage, "output_tokens", 0 + ) + + # Get detailed token information if available + if hasattr(usage, "input_tokens_details") and usage.input_tokens_details: + usage_info["input_tokens_details"] = ( + usage.input_tokens_details.__dict__ + if hasattr(usage.input_tokens_details, "__dict__") + else dict(usage.input_tokens_details) + ) + + # Extract cached input tokens for cost calculation + if isinstance(usage_info["input_tokens_details"], dict): + usage_info["cached_input_tokens"] = usage_info[ + "input_tokens_details" + ].get("cached_tokens", 0) + + if ( + hasattr(usage, "completion_tokens_details") + and usage.completion_tokens_details + ): + usage_info["completion_tokens_details"] = ( + usage.completion_tokens_details.__dict__ + if hasattr(usage.completion_tokens_details, "__dict__") + else dict(usage.completion_tokens_details) + ) + + return usage_info + + +def generate_request_id() -> str: + """Generate a unique request ID for grouping related conversations.""" + return str(XID()) + + +async def generate_agent_summary( + schema: Dict[str, Any], + identified_skills: Set[str], + client: OpenAI, + llm_logger: Optional["LLMLogger"] = None, +) -> str: + """Generate a human-readable summary of the created agent. + + Args: + schema: The generated agent schema + identified_skills: Skills identified for the agent + client: OpenAI client for API calls + llm_logger: Optional LLM logger for tracking API calls + + Returns: + Human-readable summary message + """ + logger.info("Generating agent summary message") + + # Create skills list + skills_list = ( + ", ".join(identified_skills) if identified_skills else "general capabilities" + ) + + # Get agent attributes + agent_name = schema.get("name", "AI Agent") + agent_purpose = schema.get("purpose", "assist users with various tasks") + + # Prepare messages for summary generation + messages = [ + { + "role": "system", + "content": """You are writing a congratulatory message for a user who just generated an AI agent. + +Write a friendly, enthusiastic message that: +1. Congratulates them on creating their agent +2. Mentions the agent's name and main purpose +3. Lists the key capabilities (skills) the agent has +4. Keeps it concise (1-2 sentences) + +Make it sound exciting and helpful, like "Congratulations! You've successfully created [AgentName], an AI agent that can [purpose] with capabilities including [skills]. Your agent is ready to help you [brief benefit]!" + +Be specific about the agent's abilities but keep the tone conversational and encouraging.""", + }, + { + "role": "user", + "content": f"""Agent created: +Name: {agent_name} +Purpose: {agent_purpose} +Skills: {skills_list} + +Write a congratulatory message.""", + }, + ] + + # Log the LLM call if logger is provided + if llm_logger: + async with llm_logger.log_call( + call_type="agent_summary_generation", + prompt=f"Generate summary for agent: {agent_name}", + retry_count=0, + is_update=False, + llm_model="gpt-4.1-nano", + openai_messages=messages, + ) as call_log: + call_start_time = time.time() + + # Make OpenAI API call + response = client.chat.completions.create( + model="gpt-4.1-nano", + messages=messages, + temperature=0.7, + max_tokens=300, + ) + + # Extract generated content + summary = response.choices[0].message.content.strip() + + generated_content = { + "summary": summary, + "agent_name": agent_name, + "skills": list(identified_skills), + } + + # Log successful call + await llm_logger.log_successful_call( + call_log=call_log, + response=response, + generated_content=generated_content, + openai_messages=messages, + call_start_time=call_start_time, + ) + + return summary + else: + # Make call without logging (fallback) + response = client.chat.completions.create( + model="gpt-4.1-nano", + messages=messages, + temperature=0.7, + max_tokens=300, + ) + + summary = response.choices[0].message.content.strip() + return summary + + +# List of allowed models +ALLOWED_MODELS = [ + "gpt-4o", + "gpt-4o-mini", + "gpt-4.1-nano", + "gpt-4.1-mini", + "gpt-4.1", + "o4-mini", + "deepseek-chat", + "grok-2", + "grok-3", + "grok-3-mini", + "eternalai", + "reigent", + "venice-uncensored", + "venice-llama-4-maverick-17b", +] + + +async def generate_tags_from_nation_api( + agent_schema: Dict[str, Any], prompt: str +) -> List[Dict[str, int]]: + """Generate tags using Crestal API and LLM selection.""" + + # Simple fallback tags if everything fails - randomized to add variety + def get_default_tags() -> List[Dict[str, int]]: + fallback_sets = [ + [{"id": 28}, {"id": 23}, {"id": 20}], # Analytics, Social Media, Automation + [{"id": 3}, {"id": 11}, {"id": 53}], # Trading, Gaming, API + [ + {"id": 5}, + {"id": 14}, + {"id": 47}, + ], # Investing, Content Creation, Security + [ + {"id": 17}, + {"id": 27}, + {"id": 32}, + ], # Personal Assistant, Marketing, Reporting + [{"id": 33}, {"id": 43}, {"id": 38}], # Art, Tutor, Fitness + [{"id": 51}, {"id": 46}, {"id": 49}], # DevOps, Research, Compliance + ] + + # Add randomization based on current time + random.seed(int(time.time()) % 10000) + selected_set = random.choice(fallback_sets) + + logger.info(f"Using randomized fallback tags: {selected_set}") + return selected_set + + try: + # Use the fixed Crestal API endpoint + crestal_api_url = "https://api.service.crestal.dev/v1/tags" + logger.info(f"Fetching tags from Crestal API: {crestal_api_url}") + + # Get tags from Crestal API + async with httpx.AsyncClient(timeout=10.0) as client: + response = await client.get(crestal_api_url) + + logger.info(f"Crestal API response status: {response.status_code}") + + if response.status_code != 200: + logger.warning( + f"Crestal API returned status {response.status_code}: {response.text}" + ) + return get_default_tags() + + tags_data = response.json() + logger.info( + f"Received {len(tags_data) if isinstance(tags_data, list) else 0} tags from Crestal API" + ) + + if not isinstance(tags_data, list) or len(tags_data) == 0: + logger.warning("Crestal API response is not a valid list or is empty") + return get_default_tags() + + # Group by category with tag IDs + categories = {} + tag_lookup = {} # name -> {id, name, category} + + for tag in tags_data: + # Handle the actual Crestal API response format + cat = tag.get("category", "") + name = tag.get("name", "") + tag_id = tag.get("id") + + if cat and name and tag_id: + # Clean up category name (decode \u0026 to &) + clean_category = cat.replace("\\u0026", "&") + + if clean_category not in categories: + categories[clean_category] = [] + categories[clean_category].append(name) + tag_lookup[name] = { + "id": tag_id, + "name": name, + "category": clean_category, + } + + logger.info( + f"Grouped tags into {len(categories)} categories: {list(categories.keys())}" + ) + + if not categories: + logger.warning("No valid categories found after processing tags") + return get_default_tags() + + # Use LLM to select tag names, then convert to IDs + selected_names = await select_tags_with_llm( + agent_schema, prompt, categories + ) + logger.info(f"LLM selected tag names: {selected_names}") + + if not selected_names: + logger.warning("LLM returned no tag names") + return get_default_tags() + + # Convert names to ID objects for frontend + selected_tags = [] + for name in selected_names: + if name in tag_lookup: + selected_tags.append({"id": tag_lookup[name]["id"]}) + logger.info( + f"Converted tag '{name}' to ID {tag_lookup[name]['id']}" + ) + else: + logger.warning(f"Tag name '{name}' not found in lookup table") + + if len(selected_tags) < 3: + logger.warning( + f"Only got {len(selected_tags)} valid tags, using defaults" + ) + return get_default_tags() + + # Return exactly 3 tags + final_tags = selected_tags[:3] + logger.info(f"Final selected tags (3 max): {final_tags}") + return final_tags + + except httpx.TimeoutException: + logger.warning("Crestal API request timed out") + return get_default_tags() + except httpx.ConnectError: + logger.warning("Could not connect to Crestal API") + return get_default_tags() + except Exception as e: + logger.error(f"Error in tag generation: {str(e)}") + return get_default_tags() + + +async def select_tags_with_llm( + agent_schema: Dict[str, Any], prompt: str, categories: Dict[str, List[str]] +) -> List[str]: + """Use LLM to select appropriate tag names.""" + try: + if not config.openai_api_key: + logger.warning("OpenAI API key not configured") + return [] + + client = OpenAI(api_key=config.openai_api_key) + + random_seed = int(time.time() * 1000) % 10000 + random.seed(random_seed) + + # Shuffle categories + category_items = list(categories.items()) + random.shuffle(category_items) + + # Build category info for prompt + cat_info = [] + for cat_name, tag_list in category_items: + # Also shuffle tags within each category + shuffled_tags = tag_list.copy() + random.shuffle(shuffled_tags) + cat_info.append(f"{cat_name}: {', '.join(shuffled_tags)}") + + # Add randomization elements to make prompt more unique + variation_phrases = [ + "Based on the agent's characteristics, identify the most suitable tags", + "Analyze the agent's purpose and skills to determine appropriate tags", + "Consider the agent's functionality and select relevant tags", + "Evaluate the agent's capabilities and choose fitting tags", + ] + + selected_phrase = random.choice(variation_phrases) + + timestamp_hash = abs(hash(str(time.time()))) % 1000 + + llm_prompt = f"""{selected_phrase} from the available categories below. + +Agent Configuration: +- Name: {agent_schema.get("name", "AI Agent")} +- Purpose: {agent_schema.get("purpose", "Not specified")} +- Skills: {", ".join(agent_schema.get("skills", {}).keys()) or "None specified"} +- User Request: {prompt} +- Session ID: {timestamp_hash} + +Available Tag Categories: +{chr(10).join(cat_info)} + +Instructions: +- Select exactly 3 tag names from DIFFERENT categories +- Prioritize tags that best match the agent's purpose and skills +- Return response as a JSON array of tag names +- Example format: ["Trading", "Social Media", "Analytics"] +- Avoid selecting tags from the same category + +Your selection:""" + + logger.info("Calling OpenAI for tag selection with randomized prompt") + + # Increase temperature for more diverse outputs + response = client.chat.completions.create( + model="gpt-4.1-nano", + messages=[{"role": "user", "content": llm_prompt}], + temperature=0.8, + max_tokens=150, + top_p=0.9, + ) + + result = response.choices[0].message.content.strip() + logger.info(f"LLM raw response: {result}") + + try: + selected_tags = json.loads(result) + logger.info(f"Parsed LLM response: {selected_tags}") + except json.JSONDecodeError as e: + logger.error(f"Failed to parse LLM response as JSON: {e}") + logger.error(f"Raw response was: {result}") + return [] + + # Validate tags exist in categories and limit to 3 + valid_tags = [] + all_tag_names = [tag for tags in categories.values() for tag in tags] + selected_categories = set() + + for tag in selected_tags: + if tag in all_tag_names and len(valid_tags) < 3: + # Find the category of this tag + tag_category = None + for cat_name, tag_list in categories.items(): + if tag in tag_list: + tag_category = cat_name + break + + # Only add if from a different category (for diversity) + if tag_category and tag_category not in selected_categories: + valid_tags.append(tag) + selected_categories.add(tag_category) + logger.info(f"Added tag '{tag}' from category '{tag_category}'") + elif tag_category in selected_categories: + logger.info( + f"Skipped tag '{tag}' - category '{tag_category}' already selected" + ) + elif tag not in all_tag_names: + logger.warning(f"Tag '{tag}' not found in available tags") + + if len(valid_tags) < 3: + unused_categories = [ + cat for cat in categories.keys() if cat not in selected_categories + ] + random.shuffle(unused_categories) + + for cat_name in unused_categories: + if len(valid_tags) >= 3: + break + available_tags = categories[cat_name] + if available_tags: + random_tag = random.choice(available_tags) + valid_tags.append(random_tag) + selected_categories.add(cat_name) + logger.info( + f"Added random tag '{random_tag}' from unused category '{cat_name}'" + ) + + logger.info( + f"Final valid tags after filtering and diversification: {valid_tags}" + ) + return valid_tags[:3] # Ensure exactly 3 tags + + except Exception as e: + logger.error(f"Error in LLM tag selection: {str(e)}") + return [] diff --git a/app/admin/generator/validation.py b/app/admin/generator/validation.py new file mode 100644 index 00000000..55744ee9 --- /dev/null +++ b/app/admin/generator/validation.py @@ -0,0 +1,203 @@ +"""Validation Module. + +This module handles all validation operations for agent generation including: +- Schema validation against JSON schemas +- Agent-specific business logic validation +- Error formatting and handling +""" + +import logging +import re +from typing import Any, Dict, List + +import jsonschema +from pydantic import BaseModel, Field, ValidationError + +from intentkit.config.config import config +from intentkit.models.agent import Agent, AgentUpdate + +logger = logging.getLogger(__name__) + + +class ValidationResult(BaseModel): + """Result of schema validation.""" + + valid: bool = Field(..., description="Whether the schema is valid") + errors: List[str] = Field(default_factory=list, description="Validation errors") + + +async def validate_schema_against_json_schema( + data: Dict[str, Any], json_schema: Dict[str, Any] +) -> ValidationResult: + """Validate a schema against a JSON schema. + + Args: + data: The schema to validate + json_schema: The JSON schema to validate against + + Returns: + ValidationResult with validation status and errors + """ + result = ValidationResult(valid=True) + + try: + jsonschema.validate(data, json_schema) + except jsonschema.exceptions.ValidationError as e: + result.valid = False + result.errors.append(_format_validation_error(e)) + logger.error(f"Schema validation failed: {data}") + except Exception as e: + result.valid = False + result.errors.append(f"Schema validation failed: {str(e)}") + logger.error(f"Schema validation failed: {data}") + + return result + + +async def validate_schema(data: Dict[str, Any]) -> ValidationResult: + """Validate a schema against the agent schema. + + Args: + data: The schema to validate + + Returns: + ValidationResult with validation status and errors + """ + # Use the shared schema function with admin configuration + schema = await Agent.get_json_schema( + filter_owner_api_skills=True, + admin_llm_skill_control=config.admin_llm_skill_control, + ) + return await validate_schema_against_json_schema(data, schema) + + +def _format_validation_error(error: jsonschema.exceptions.ValidationError) -> str: + """Format a jsonschema validation error into a concise, user-friendly message.""" + field_path = ( + ".".join(str(p) for p in error.absolute_path) if error.absolute_path else "root" + ) + + if error.validator == "required": + return f"Missing required fields: {', '.join(error.validator_value)}" + + elif error.validator == "additionalProperties": + if "were unexpected" in error.message: + match = re.search(r"\(([^)]+) were unexpected\)", error.message) + if match: + unexpected = match.group(1).replace("'", "").replace(" ", "") + return f"Unexpected properties: {unexpected}" + return "Schema contains unexpected properties" + + elif error.validator == "type": + return f"Field '{field_path}' should be {error.validator_value}, got {type(error.instance).__name__}" + + elif error.validator in ["maxLength", "minLength"]: + limit = error.validator_value + actual = len(error.instance) if error.instance else 0 + op = "max" if error.validator == "maxLength" else "min" + return f"Field '{field_path}' length invalid ({op} {limit}, got {actual})" + + elif error.validator == "enum": + return f"Field '{field_path}' must be one of: {', '.join(str(v) for v in error.validator_value)}" + + elif error.validator == "pattern": + return f"Field '{field_path}' does not match required pattern" + + else: + return f"Validation error in '{field_path}': {error.message.split('.')[0]}" + + +async def validate_agent_create( + agent_data: Dict[str, Any], user_id: str = None +) -> ValidationResult: + """Validate agent data using the same validation as the admin API. + + Args: + agent_data: The agent data to validate + user_id: Optional user ID for authorization check + + Returns: + ValidationResult with validation status and errors + """ + result = ValidationResult(valid=True) + + try: + # Create AgentUpdate from data + agent = AgentUpdate.model_validate(agent_data) + + # Validate owner + if not agent.owner: + result.valid = False + result.errors.append("Owner is required") + return result + + # Validate fee percentage if user_id is provided + max_fee = 100 + if user_id: + if agent.owner != user_id: + result.valid = False + result.errors.append("Owner does not match user ID") + return result + + # Validate fee percentage + if agent.fee_percentage and agent.fee_percentage > max_fee: + result.valid = False + result.errors.append("Fee percentage too high") + return result + + # Validate autonomous schedule + try: + agent.validate_autonomous_schedule() + except ValueError as e: + result.valid = False + result.errors.append(str(e)) + + except ValidationError as e: + result.valid = False + for error in e.errors(): + result.errors.append(f"{error['loc'][0]}: {error['msg']}") + + return result + + +async def fix_validation_errors( + schema: Dict[str, Any], schema_errors: List[str], agent_errors: List[str] +) -> Dict[str, Any]: + """Attempt to fix validation errors. + + Args: + schema: The original schema + schema_errors: Schema validation errors + agent_errors: Agent validation errors + + Returns: + Fixed schema + """ + fixed_schema = schema.copy() + + # Fix required fields + required_fields = ["name", "purpose", "personality", "principles"] + for field in required_fields: + if field not in fixed_schema or not fixed_schema[field]: + fixed_schema[field] = f"Default {field.capitalize()}" + + # Fix model and temperature + if "model" not in fixed_schema or not fixed_schema["model"]: + fixed_schema["model"] = ( + "gpt-4.1-nano" # Use default model, let schema validation handle validity + ) + if "temperature" not in fixed_schema or not (0 <= fixed_schema["temperature"] <= 2): + fixed_schema["temperature"] = 0.7 + + # Fix agent-specific issues + if "Owner is required" in agent_errors: + fixed_schema["owner"] = "system" + if "Fee percentage too high" in agent_errors and "fee_percentage" in fixed_schema: + fixed_schema["fee_percentage"] = 100 + if ( + any("autonomous" in error for error in agent_errors) + and "autonomous" in fixed_schema + ): + fixed_schema.pop("autonomous") + + return fixed_schema diff --git a/app/admin/health.py b/app/admin/health.py new file mode 100644 index 00000000..f55776a9 --- /dev/null +++ b/app/admin/health.py @@ -0,0 +1,8 @@ +from fastapi import APIRouter + +health_router = APIRouter() + + +@health_router.get("/health", include_in_schema=False) +async def health_check(): + return {"status": "healthy"} diff --git a/app/admin/metadata.py b/app/admin/metadata.py new file mode 100644 index 00000000..b22269c6 --- /dev/null +++ b/app/admin/metadata.py @@ -0,0 +1,86 @@ +import logging +from typing import List + +from fastapi import APIRouter, Depends +from sqlalchemy import select +from sqlalchemy.ext.asyncio import AsyncSession + +from intentkit.models.db import get_db +from intentkit.models.llm import LLMModelInfo, LLMModelInfoTable, LLMProvider +from intentkit.models.skill import Skill, SkillTable + +# Create a readonly router for metadata endpoints +metadata_router_readonly = APIRouter(tags=["Metadata"]) + + +class LLMModelInfoWithProviderName(LLMModelInfo): + """LLM model information with provider display name.""" + + provider_name: str + + +@metadata_router_readonly.get( + "/metadata/skills", + response_model=List[Skill], + summary="Get all skills", + description="Returns a list of all available skills in the system", +) +async def get_skills(db: AsyncSession = Depends(get_db)): + """ + Get all skills available in the system. + + **Returns:** + * `List[Skill]` - List of all skills + """ + try: + # Query all skills from the database + stmt = select(SkillTable) + result = await db.execute(stmt) + skills = result.scalars().all() + + # Convert to Skill models + return [Skill.model_validate(skill) for skill in skills] + except Exception as e: + logging.error(f"Error getting skills: {e}") + raise + + +@metadata_router_readonly.get( + "/metadata/llms", + response_model=List[LLMModelInfoWithProviderName], + summary="Get all LLM models", + description="Returns a list of all available LLM models in the system", +) +async def get_llms(db: AsyncSession = Depends(get_db)): + """ + Get all LLM models available in the system. + + **Returns:** + * `List[LLMModelInfoWithProviderName]` - List of all LLM models with provider display names + """ + try: + # Query all LLM models from the database + stmt = select(LLMModelInfoTable) + result = await db.execute(stmt) + models = result.scalars().all() + + # Convert to LLMModelInfoWithProviderName models + result_models = [] + for model in models: + model_info = LLMModelInfo.model_validate(model) + # Convert provider string to LLMProvider enum if needed + provider = ( + LLMProvider(model_info.provider) + if isinstance(model_info.provider, str) + else model_info.provider + ) + result_models.append( + LLMModelInfoWithProviderName( + **model_info.model_dump(), + provider_name=provider.display_name(), + ) + ) + return result_models + except Exception as e: + logging.error(f"Error getting LLM models: {e}") + raise diff --git a/app/admin/scheduler.py b/app/admin/scheduler.py new file mode 100644 index 00000000..7f3953a0 --- /dev/null +++ b/app/admin/scheduler.py @@ -0,0 +1,112 @@ +"""Scheduler for periodic tasks.""" + +import logging + +from apscheduler.jobstores.redis import RedisJobStore +from apscheduler.schedulers.asyncio import AsyncIOScheduler +from apscheduler.triggers.cron import CronTrigger + +from app.services.twitter.oauth2_refresh import refresh_expiring_tokens +from intentkit.config.config import config +from intentkit.core.agent import update_agent_action_cost +from intentkit.core.credit import refill_all_free_credits +from intentkit.models.agent_data import AgentQuota +from intentkit.models.redis import get_redis, send_heartbeat + +logger = logging.getLogger(__name__) + + +async def send_scheduler_heartbeat(): + """Send a heartbeat signal to Redis to indicate the scheduler is running. + + This function sends a heartbeat to Redis that expires after 16 minutes, + allowing other services to verify that the scheduler is operational. + """ + logger.info("Sending scheduler heartbeat") + try: + redis_client = get_redis() + await send_heartbeat(redis_client, "scheduler") + logger.info("Sent scheduler heartbeat successfully") + except Exception as e: + logger.error(f"Error sending scheduler heartbeat: {e}") + + +def create_scheduler(): + """Create and configure the APScheduler with all periodic tasks.""" + # Job Store + jobstores = {} + if config.redis_host: + jobstores["default"] = RedisJobStore( + host=config.redis_host, + port=config.redis_port, + db=config.redis_db, + jobs_key="intentkit:scheduler:jobs", + run_times_key="intentkit:scheduler:run_times", + ) + logger.info(f"scheduler use redis store: {config.redis_host}") + + scheduler = AsyncIOScheduler(jobstores=jobstores) + + # Reset daily quotas at UTC 00:00 + scheduler.add_job( + AgentQuota.reset_daily_quotas, + trigger=CronTrigger(hour=0, minute=0, timezone="UTC"), + id="reset_daily_quotas", + name="Reset daily quotas", + replace_existing=True, + ) + + # Reset monthly quotas at UTC 00:00 on the first day of each month + scheduler.add_job( + AgentQuota.reset_monthly_quotas, + trigger=CronTrigger(day=1, hour=0, minute=0, timezone="UTC"), + id="reset_monthly_quotas", + name="Reset monthly quotas", + replace_existing=True, + ) + + # Check for expiring tokens every 5 minutes + scheduler.add_job( + refresh_expiring_tokens, + trigger=CronTrigger(minute="*/5", timezone="UTC"), # Run every 5 minutes + id="refresh_twitter_tokens", + name="Refresh expiring Twitter tokens", + replace_existing=True, + ) + + # Refill free credits every 10 minutes + scheduler.add_job( + refill_all_free_credits, + trigger=CronTrigger(minute="20", timezone="UTC"), # Run every hour + id="refill_free_credits", + name="Refill free credits", + replace_existing=True, + ) + + # Update agent action costs hourly + scheduler.add_job( + update_agent_action_cost, + trigger=CronTrigger(minute=40, timezone="UTC"), + id="update_agent_action_cost", + name="Update agent action costs", + replace_existing=True, + ) + + # Send heartbeat every minute + if config.redis_host: + scheduler.add_job( + send_scheduler_heartbeat, + trigger=CronTrigger(minute="*", timezone="UTC"), # Run every minute + id="scheduler_heartbeat", + name="Scheduler Heartbeat", + replace_existing=True, + ) + + return scheduler + + +def start_scheduler(): + """Create, configure and start the APScheduler.""" + scheduler = create_scheduler() + scheduler.start() + return scheduler diff --git a/app/admin/schema.py b/app/admin/schema.py new file mode 100644 index 00000000..c80fa43a --- /dev/null +++ b/app/admin/schema.py @@ -0,0 +1,135 @@ +import json +import logging +from pathlib import Path + +from fastapi import APIRouter, Depends, HTTPException +from fastapi import Path as PathParam +from fastapi.responses import FileResponse, JSONResponse +from sqlalchemy.ext.asyncio import AsyncSession + +from intentkit.config.config import config +from intentkit.models.agent import Agent +from intentkit.models.db import get_db + +logger = logging.getLogger(__name__) + +# Create readonly router +schema_router_readonly = APIRouter() + +# Get the project root directory +PROJECT_ROOT = Path(__file__).parent.parent.parent + + +@schema_router_readonly.get( + "/schema/agent", tags=["Schema"], operation_id="get_agent_schema" +) +async def get_agent_schema(db: AsyncSession = Depends(get_db)) -> JSONResponse: + """Get the JSON schema for Agent model with all $ref references resolved. + + Updates the model property in the schema based on LLMModelInfo.get results. + For each model in the enum list: + - If the model is not found in LLMModelInfo, it remains unchanged + - If the model is found but disabled (enabled=False), it is removed from the schema + - If the model is found and enabled, its properties are updated based on the LLMModelInfo record + + **Returns:** + * `JSONResponse` - The complete JSON schema for the Agent model with application/json content type + """ + return JSONResponse( + content=await Agent.get_json_schema( + db, admin_llm_skill_control=config.admin_llm_skill_control + ), + media_type="application/json", + ) + + +@schema_router_readonly.get( + "/skills/{skill}/schema.json", + tags=["Schema"], + operation_id="get_skill_schema", + responses={ + 200: {"description": "Success"}, + 404: {"description": "Skill not found"}, + 400: {"description": "Invalid skill name"}, + }, +) +async def get_skill_schema( + skill: str = PathParam(..., description="Skill name", regex="^[a-zA-Z0-9_-]+$"), +) -> JSONResponse: + """Get the JSON schema for a specific skill. + + **Path Parameters:** + * `skill` - Skill name + + **Returns:** + * `JSONResponse` - The complete JSON schema for the skill with application/json content type + + **Raises:** + * `HTTPException` - If the skill is not found or name is invalid + """ + base_path = PROJECT_ROOT / "intentkit" / "skills" + schema_path = base_path / skill / "schema.json" + normalized_path = schema_path.resolve() + + if not normalized_path.is_relative_to(base_path): + raise HTTPException(status_code=400, detail="Invalid skill name") + + try: + with open(normalized_path) as f: + schema = json.load(f) + except (FileNotFoundError, json.JSONDecodeError): + raise HTTPException(status_code=404, detail="Skill schema not found") + + return JSONResponse(content=schema, media_type="application/json") + + +@schema_router_readonly.get( + "/skills/{skill}/{icon_name}.{ext}", + tags=["Schema"], + operation_id="get_skill_icon", + responses={ + 200: {"description": "Success"}, + 404: {"description": "Skill icon not found"}, + 400: {"description": "Invalid skill name or extension"}, + }, +) +async def get_skill_icon( + skill: str = PathParam(..., description="Skill name", regex="^[a-zA-Z0-9_-]+$"), + icon_name: str = PathParam(..., description="Icon name"), + ext: str = PathParam( + ..., description="Icon file extension", regex="^(png|svg|jpg|jpeg)$" + ), +) -> FileResponse: + """Get the icon for a specific skill. + + **Path Parameters:** + * `skill` - Skill name + * `icon_name` - Icon name + * `ext` - Icon file extension (png or svg) + + **Returns:** + * `FileResponse` - The icon file with appropriate content type + + **Raises:** + * `HTTPException` - If the skill or icon is not found or name is invalid + """ + base_path = PROJECT_ROOT / "intentkit" / "skills" + icon_path = base_path / skill / f"{icon_name}.{ext}" + normalized_path = icon_path.resolve() + + if not normalized_path.is_relative_to(base_path): + raise HTTPException(status_code=400, detail="Invalid skill name") + + if not normalized_path.exists(): + raise HTTPException(status_code=404, detail="Skill icon not found") + + content_type = ( + "image/svg+xml" + if ext == "svg" + else "image/png" + if ext in ["png"] + else "image/webp" + if ext in ["webp"] + else "image/jpeg" + ) + return FileResponse(normalized_path, media_type=content_type) diff --git a/app/admin/user.py b/app/admin/user.py new file mode 100644 index 00000000..cce62eff --- /dev/null +++ b/app/admin/user.py @@ -0,0 +1,82 @@ +import logging +from typing import Annotated + +from fastapi import APIRouter, Depends, HTTPException, Path + +from app.auth import verify_admin_jwt +from intentkit.models.user import User, UserUpdate + +logger = logging.getLogger(__name__) + +user_router = APIRouter(prefix="/users", tags=["User"]) +user_router_readonly = APIRouter(prefix="/users", tags=["User"]) + + +@user_router_readonly.get( + "/{user_id}", + response_model=User, + operation_id="get_user", + summary="Get User", + dependencies=[Depends(verify_admin_jwt)], +) +async def get_user( + user_id: Annotated[str, Path(description="ID of the user")], +) -> User: + """Get a user by ID. + + Args: + user_id: ID of the user to get + + Returns: + User model + """ + user = await User.get(user_id) + if user is None: + raise HTTPException(status_code=404, detail="User not found") + return user + + +@user_router.put( + "/{user_id}", + response_model=User, + operation_id="put_user", + summary="Replace User", + dependencies=[Depends(verify_admin_jwt)], +) +async def put_user( + user_id: Annotated[str, Path(description="ID of the user")], + user_update: UserUpdate, +) -> User: + """Replace all fields of a user with the provided values. + + Args: + user_id: ID of the user to update + user_update: New user data to replace existing data + + Returns: + Updated User model + """ + return await user_update.put(user_id) + + +@user_router.patch( + "/{user_id}", + response_model=User, + operation_id="patch_user", + summary="Update User", + dependencies=[Depends(verify_admin_jwt)], +) +async def patch_user( + user_id: Annotated[str, Path(description="ID of the user")], + user_update: UserUpdate, +) -> User: + """Update only the provided fields of a user. + + Args: + user_id: ID of the user to update + user_update: User data to update + + Returns: + Updated User model + """ + return await user_update.patch(user_id) diff --git a/app/api.py b/app/api.py new file mode 100644 index 00000000..b444a431 --- /dev/null +++ b/app/api.py @@ -0,0 +1,244 @@ +"""API server module. + +This module initializes and configures the FastAPI application, +including routers, middleware, and startup/shutdown events. + +The API server provides endpoints for agent execution and management. +""" + +import logging +from contextlib import asynccontextmanager + +import sentry_sdk +from fastapi import FastAPI +from fastapi.exceptions import RequestValidationError +from fastapi.middleware.cors import CORSMiddleware +from sqlalchemy import select +from starlette.exceptions import HTTPException as StarletteHTTPException + +from app.admin import ( + admin_router, + admin_router_readonly, + agent_generator_router, + credit_router, + credit_router_readonly, + health_router, + metadata_router_readonly, + schema_router_readonly, + user_router, + user_router_readonly, +) +from app.entrypoints.agent_api import router_ro as agent_api_ro +from app.entrypoints.agent_api import router_rw as agent_api_rw +from app.entrypoints.openai_compatible import openai_router +from app.entrypoints.web import chat_router, chat_router_readonly +from app.services.twitter.oauth2 import router as twitter_oauth2_router +from app.services.twitter.oauth2_callback import router as twitter_callback_router +from intentkit.config.config import config +from intentkit.core.api import core_router +from intentkit.models.agent import AgentTable +from intentkit.models.db import get_session, init_db +from intentkit.models.redis import init_redis +from intentkit.utils.error import ( + IntentKitAPIError, + http_exception_handler, + intentkit_api_error_handler, + intentkit_other_error_handler, + request_validation_exception_handler, +) + +# init logger +logger = logging.getLogger(__name__) + +if config.sentry_dsn: + sentry_sdk.init( + dsn=config.sentry_dsn, + sample_rate=config.sentry_sample_rate, + # traces_sample_rate=config.sentry_traces_sample_rate, + # profiles_sample_rate=config.sentry_profiles_sample_rate, + environment=config.env, + release=config.release, + server_name="intent-api", + ) + + +# Read agent API documentation from file +def _load_agent_api_docs() -> str: + """Load agent API documentation from docs/agent_api.md file.""" + try: + import os + + docs_path = os.path.join( + os.path.dirname(os.path.dirname(__file__)), "docs", "agent_api.md" + ) + with open(docs_path, "r", encoding="utf-8") as f: + doc_str = f.read() + if config.open_api_base_url: + doc_str = doc_str.replace( + "http://localhost:8000", + config.open_api_base_url, + ) + return doc_str + except Exception: + return "Agent API" + + +# Create Agent API sub-application +agent_app = FastAPI( + title="IntentKit Agent API", + description=_load_agent_api_docs(), + version=config.release, + servers=[ + { + "url": f"{config.open_api_base_url}/v1", + "description": "IntentKit Agent API Server", + } + ], + contact={ + "name": "IntentKit Team", + "url": "https://github.com/crestalnetwork/intentkit", + }, + license_info={ + "name": "MIT", + "url": "https://opensource.org/licenses/MIT", + }, +) + +# Add exception handlers to the Agent API sub-application +agent_app.exception_handler(IntentKitAPIError)(intentkit_api_error_handler) +agent_app.exception_handler(RequestValidationError)( + request_validation_exception_handler +) +agent_app.exception_handler(StarletteHTTPException)(http_exception_handler) +agent_app.exception_handler(Exception)(intentkit_other_error_handler) + +# Add CORS middleware to the Agent API sub-application +agent_app.add_middleware( + CORSMiddleware, + allow_origins=["*"], # Allows all origins + allow_methods=["*"], # Allows all methods + allow_headers=["*"], # Allows all headers +) + +# Add routers to the Agent API sub-application +agent_app.include_router(agent_api_rw) +agent_app.include_router(agent_api_ro) +agent_app.include_router(openai_router) + + +@asynccontextmanager +async def lifespan(app: FastAPI): + """Manage application lifecycle. + + This context manager: + 1. Initializes database connection + 2. Performs any necessary startup tasks + 3. Handles graceful shutdown + + Args: + app: FastAPI application instance + """ + # Initialize database + await init_db(**config.db) + + # Initialize Redis if configured + if config.redis_host: + await init_redis( + host=config.redis_host, + port=config.redis_port, + db=config.redis_db, + ) + + # Create example agent if no agents exist + await create_example_agent() + + logger.info("API server start") + yield + # Clean up will run after the API server shutdown + logger.info("Cleaning up and shutdown...") + + +app = FastAPI( + lifespan=lifespan, + title="IntentKit API", + summary="IntentKit API Documentation", + version=config.release, + contact={ + "name": "IntentKit Team", + "url": "https://github.com/crestalnetwork/intentkit", + }, + license_info={ + "name": "MIT", + "url": "https://opensource.org/licenses/MIT", + }, +) + + +app.exception_handler(IntentKitAPIError)(intentkit_api_error_handler) +app.exception_handler(RequestValidationError)(request_validation_exception_handler) +app.exception_handler(StarletteHTTPException)(http_exception_handler) +app.exception_handler(Exception)(intentkit_other_error_handler) + +# Add CORS middleware +app.add_middleware( + CORSMiddleware, + allow_origins=["*"], # Allows all origins + allow_methods=["*"], # Allows all methods + allow_headers=["*"], # Allows all headers +) + +# Mount the Agent API sub-application +app.mount("/v1", agent_app) + +app.include_router(chat_router) +app.include_router(chat_router_readonly) +app.include_router(admin_router) +app.include_router(admin_router_readonly) +app.include_router(metadata_router_readonly) +app.include_router(credit_router) +app.include_router(credit_router_readonly) +app.include_router(schema_router_readonly) +app.include_router(user_router) +app.include_router(user_router_readonly) +app.include_router(core_router) +app.include_router(twitter_callback_router) +app.include_router(twitter_oauth2_router) +app.include_router(health_router) +app.include_router(agent_generator_router) + + +async def create_example_agent() -> None: + """Create an example agent if no agents exist in the database. + + Creates an agent with ID 'example' and basic configuration if the agents table is empty. + The agent is configured with the 'common' skill with 'current_time' state set to 'public'. + """ + try: + async with get_session() as session: + # Check if any agents exist - more efficient count query + result = await session.execute( + select(select(AgentTable.id).limit(1).exists().label("exists")) + ) + if result.scalar(): + logger.debug("Example agent not created: agents already exist") + return # Agents exist, nothing to do + + # Create example agent + example_agent = AgentTable( + id="example", + name="Example", + owner="intentkit", + skills={ + "system": { + "states": {"read_agent_api_key": "private"}, + "enabled": True, + } + }, + ) + + session.add(example_agent) + await session.commit() + logger.info("Created example agent with ID 'example'") + except Exception as e: + logger.error(f"Failed to create example agent: {str(e)}") + # Don't re-raise the exception to avoid blocking server startup diff --git a/app/auth/__init__.py b/app/auth/__init__.py new file mode 100644 index 00000000..b4c97606 --- /dev/null +++ b/app/auth/__init__.py @@ -0,0 +1,91 @@ +import logging +from typing import Optional + +import jwt +from fastapi import Depends, HTTPException, Request, status +from fastapi.security import HTTPAuthorizationCredentials, HTTPBearer +from pydantic import BaseModel + +from intentkit.config.config import config +from intentkit.models.agent import AgentData + +logger = logging.getLogger(__name__) + +security = HTTPBearer(auto_error=False) + + +class AgentToken(BaseModel): + """Agent token information.""" + + agent_id: str + is_public: bool + + +async def verify_admin_jwt( + request: Request, + credentials: Optional[HTTPAuthorizationCredentials] = Depends(security), +) -> str: + """Verify JWT token from Authorization header and return the subject claim. + + Returns: + str: The subject claim from the JWT token + """ + host = request.headers.get("host", "").split(":")[0] + logger.debug( + f"verify_admin_jwt: enable={config.admin_auth_enabled}, credentials={credentials}, host={host}" + ) + + if ( + not config.admin_auth_enabled + or host == "localhost" + or host == "127.0.0.1" + or host == "intent-api" + or host == "intent-readonly" + or host == "intent-singleton" + ): + return "" + + if not credentials: + raise HTTPException( + status_code=401, detail="Missing authentication credentials" + ) + + try: + payload = jwt.decode( + credentials.credentials, config.admin_jwt_secret, algorithms=["HS256"] + ) + return payload.get("sub", "") + except jwt.InvalidTokenError: + raise HTTPException(status_code=401, detail="Invalid authentication token") + + +agent_security = HTTPBearer() + + +async def verify_agent_token( + credentials: HTTPAuthorizationCredentials = Depends(agent_security), +) -> AgentToken: + """Verify the API token and return the associated agent token information. + + Args: + credentials: The Bearer token credentials from HTTPBearer + + Returns: + AgentToken: The agent token information containing agent_id and is_public + + Raises: + HTTPException: If token is invalid or agent not found + """ + token = credentials.credentials + + # Find agent data by api_key + agent_data = await AgentData.get_by_api_key(token) + if not agent_data: + raise HTTPException( + status_code=status.HTTP_401_UNAUTHORIZED, detail="Invalid API token" + ) + + # Check if token is public (starts with 'pk-') + is_public = token.startswith("pk-") + + return AgentToken(agent_id=agent_data.id, is_public=is_public) diff --git a/app/autonomous.py b/app/autonomous.py new file mode 100644 index 00000000..0847a6cb --- /dev/null +++ b/app/autonomous.py @@ -0,0 +1,245 @@ +import asyncio +import logging +import signal +from datetime import datetime +from typing import Dict + +import sentry_sdk +from apscheduler.jobstores.redis import RedisJobStore +from apscheduler.schedulers.asyncio import AsyncIOScheduler +from apscheduler.triggers.cron import CronTrigger +from sqlalchemy import select + +from app.entrypoints.autonomous import run_autonomous_task +from intentkit.config.config import config +from intentkit.models.agent import Agent, AgentTable +from intentkit.models.db import get_session, init_db +from intentkit.models.redis import ( + clean_heartbeat, + get_redis, + init_redis, + send_heartbeat, +) + +logger = logging.getLogger(__name__) + +# Global dictionary to store task_id and last updated time +autonomous_tasks_updated_at: Dict[str, datetime] = {} + +# Global scheduler instance +jobstores = {} +if config.redis_host: + jobstores["default"] = RedisJobStore( + host=config.redis_host, + port=config.redis_port, + db=config.redis_db, + jobs_key="intentkit:autonomous:jobs", + run_times_key="intentkit:autonomous:run_times", + ) + logger.info(f"autonomous scheduler use redis store: {config.redis_host}") +scheduler = AsyncIOScheduler(jobstores=jobstores) + +# Head job ID, it schedules the other jobs +HEAD_JOB_ID = "head" + + +if config.sentry_dsn: + sentry_sdk.init( + dsn=config.sentry_dsn, + sample_rate=config.sentry_sample_rate, + # traces_sample_rate=config.sentry_traces_sample_rate, + # profiles_sample_rate=config.sentry_profiles_sample_rate, + environment=config.env, + release=config.release, + server_name="intent-autonomous", + ) + + +async def send_autonomous_heartbeat(): + """Send a heartbeat signal to Redis to indicate the autonomous service is running. + + This function sends a heartbeat to Redis that expires after 16 minutes, + allowing other services to verify that the autonomous service is operational. + """ + logger.info("Sending autonomous heartbeat") + try: + redis_client = get_redis() + await send_heartbeat(redis_client, "autonomous") + logger.info("Sent autonomous heartbeat successfully") + except Exception as e: + logger.error(f"Error sending autonomous heartbeat: {e}") + + +async def schedule_agent_autonomous_tasks(): + """ + Find all agents with autonomous tasks and schedule them. + This function is called periodically to update the scheduler with new or modified tasks. + """ + logger.info("Checking for agent autonomous tasks...") + + # List of jobs to schedule, will delete jobs not in this list + planned_jobs = [HEAD_JOB_ID, "autonomous_heartbeat"] + + async with get_session() as db: + # Get all agents with autonomous configuration + query = select(AgentTable).where(AgentTable.autonomous != None) # noqa: E711 + agents = await db.scalars(query) + + for item in agents: + agent = Agent.model_validate(item) + if not agent.autonomous or len(agent.autonomous) == 0: + continue + + for autonomous in agent.autonomous: + if not autonomous.enabled: + continue + + # Create a unique task ID for this autonomous task + task_id = f"{agent.id}-{autonomous.id}" + planned_jobs.append(task_id) + + # Check if task exists and needs updating + if ( + task_id in autonomous_tasks_updated_at + and autonomous_tasks_updated_at[task_id] >= agent.updated_at + ): + # Task exists and agent hasn't been updated, skip + continue + + try: + # Schedule new job based on minutes or cron + if autonomous.cron: + logger.info( + f"Scheduling cron task {task_id} with cron: {autonomous.cron}" + ) + scheduler.add_job( + run_autonomous_task, + CronTrigger.from_crontab(autonomous.cron), + id=task_id, + args=[ + agent.id, + agent.owner, + autonomous.id, + autonomous.prompt, + ], + replace_existing=True, + ) + elif autonomous.minutes: + logger.info( + f"Scheduling interval task {task_id} every {autonomous.minutes} minutes" + ) + scheduler.add_job( + run_autonomous_task, + "interval", + id=task_id, + args=[ + agent.id, + agent.owner, + autonomous.id, + autonomous.prompt, + ], + minutes=autonomous.minutes, + replace_existing=True, + ) + else: + logger.error( + f"Invalid autonomous configuration for task {task_id}: {autonomous}" + ) + except Exception as e: + logger.error( + f"Failed to schedule autonomous task [{agent.id}] {task_id}: {e}" + ) + + # Update the last updated time + autonomous_tasks_updated_at[task_id] = agent.updated_at + + # Delete jobs not in the list + logger.debug(f"Current jobs: {planned_jobs}") + jobs = scheduler.get_jobs() + for job in jobs: + if job.id not in planned_jobs: + scheduler.remove_job(job.id) + logger.info(f"Removed job {job.id}") + + +if __name__ == "__main__": + + async def main(): + # Initialize database + await init_db(**config.db) + # Initialize Redis if configured + if config.redis_host: + await init_redis( + host=config.redis_host, + port=config.redis_port, + db=config.redis_db, + ) + + # Add job to schedule agent autonomous tasks every 5 minutes + # Run it immediately on startup and then every 5 minutes + jobs = scheduler.get_jobs() + job_ids = [job.id for job in jobs] + if HEAD_JOB_ID not in job_ids: + scheduler.add_job( + schedule_agent_autonomous_tasks, + "interval", + id=HEAD_JOB_ID, + minutes=1, + next_run_time=datetime.now(), + replace_existing=True, + ) + + # Add job to send heartbeat every 5 minutes + if config.redis_host: + scheduler.add_job( + send_autonomous_heartbeat, + trigger=CronTrigger(minute="*", timezone="UTC"), # Run every minute + id="autonomous_heartbeat", + name="Autonomous Heartbeat", + replace_existing=True, + ) + + # Create a shutdown event for graceful termination + shutdown_event = asyncio.Event() + + # Set up signal handlers for graceful shutdown + loop = asyncio.get_running_loop() + + # Define an async function to set the shutdown event + async def set_shutdown(): + shutdown_event.set() + + # Register signal handlers + for sig in (signal.SIGTERM, signal.SIGINT): + loop.add_signal_handler(sig, lambda: asyncio.create_task(set_shutdown())) + + # Define the cleanup function that will be called on exit + async def cleanup_resources(): + try: + if config.redis_host: + redis_client = get_redis() + await clean_heartbeat(redis_client, "autonomous") + except Exception as e: + logger.error(f"Error cleaning up heartbeat: {e}") + + try: + logger.info("Starting autonomous agents scheduler...") + scheduler.start() + + # Wait for shutdown event + logger.info( + "Autonomous process running. Press Ctrl+C or send SIGTERM to exit." + ) + await shutdown_event.wait() + logger.info("Received shutdown signal. Shutting down gracefully...") + except Exception as e: + logger.error(f"Error in autonomous process: {e}") + finally: + # Run the cleanup code and shutdown the scheduler + await cleanup_resources() + + if scheduler.running: + scheduler.shutdown() + + # Run the async main function + asyncio.run(main()) diff --git a/app/checker.py b/app/checker.py new file mode 100644 index 00000000..034f9e7b --- /dev/null +++ b/app/checker.py @@ -0,0 +1,202 @@ +"""Checker for periodic read-only validation tasks. + +This module runs a separate scheduler for account checks and other validation +tasks that only require read-only database access. +""" + +import asyncio +import logging +import signal + +import sentry_sdk +from apscheduler.jobstores.redis import RedisJobStore +from apscheduler.schedulers.asyncio import AsyncIOScheduler +from apscheduler.triggers.cron import CronTrigger + +from app.admin.account_checking import run_quick_checks, run_slow_checks +from intentkit.config.config import config +from intentkit.models.db import init_db +from intentkit.models.redis import ( + clean_heartbeat, + get_redis, + init_redis, + send_heartbeat, +) + +logger = logging.getLogger(__name__) + +if config.sentry_dsn: + sentry_sdk.init( + dsn=config.sentry_dsn, + sample_rate=config.sentry_sample_rate, + # traces_sample_rate=config.sentry_traces_sample_rate, + # profiles_sample_rate=config.sentry_profiles_sample_rate, + environment=config.env, + release=config.release, + server_name="intent-checker", + ) + + +async def run_quick_account_checks(): + """Run quick account consistency checks and send results to Slack. + + This runs the faster checks for account balances, transactions, and other credit-related consistency + issues and reports the results to the configured Slack channel. + """ + logger.info("Running scheduled quick account consistency checks") + try: + await run_quick_checks() + logger.info("Completed quick account consistency checks") + except Exception as e: + logger.error(f"Error running quick account consistency checks: {e}") + + +async def run_slow_account_checks(): + """Run slow account consistency checks and send results to Slack. + + This runs the more resource-intensive checks for account balances, transactions, + and other credit-related consistency issues and reports the results to the configured Slack channel. + """ + logger.info("Running scheduled slow account consistency checks") + try: + await run_slow_checks() + logger.info("Completed slow account consistency checks") + except Exception as e: + logger.error(f"Error running slow account consistency checks: {e}") + + +async def send_checker_heartbeat(): + """Send a heartbeat signal to Redis to indicate the checker is running. + + This function sends a heartbeat to Redis that expires after 16 minutes, + allowing other services to verify that the checker is operational. + """ + logger.info("Sending checker heartbeat") + try: + redis_client = get_redis() + await send_heartbeat(redis_client, "checker") + logger.info("Sent checker heartbeat successfully") + except Exception as e: + logger.error(f"Error sending checker heartbeat: {e}") + + +def create_checker(): + """Create and configure the AsyncIOScheduler for validation checks.""" + # Job Store + jobstores = {} + if config.redis_host: + jobstores["default"] = RedisJobStore( + host=config.redis_host, + port=config.redis_port, + db=config.redis_db, + jobs_key="intentkit:checker:jobs", + run_times_key="intentkit:checker:run_times", + ) + logger.info(f"checker using redis store: {config.redis_host}") + + scheduler = AsyncIOScheduler(jobstores=jobstores) + + # Run quick account consistency checks every 2 hours at the top of the hour + scheduler.add_job( + run_quick_account_checks, + trigger=CronTrigger( + hour="*/2", minute="30", timezone="UTC" + ), # Run every 2 hours + id="quick_account_checks", + name="Quick Account Consistency Checks", + replace_existing=True, + ) + + # Run slow account consistency checks once a day at midnight UTC + scheduler.add_job( + run_slow_account_checks, + trigger=CronTrigger( + hour="0,12", minute="0", timezone="UTC" + ), # Run 2 times a day + id="slow_account_checks", + name="Slow Account Consistency Checks", + replace_existing=True, + ) + + # Send heartbeat every 5 minutes to indicate checker is running + if config.redis_host: + scheduler.add_job( + send_checker_heartbeat, + trigger=CronTrigger(minute="*", timezone="UTC"), # Run every minute + id="checker_heartbeat", + name="Checker Heartbeat", + replace_existing=True, + ) + + return scheduler + + +def start_checker(): + """Create, configure and start the checker scheduler.""" + scheduler = create_checker() + scheduler.start() + return scheduler + + +if __name__ == "__main__": + + async def main(): + # Initialize database + await init_db(**config.db) + + # Initialize Redis if configured + if config.redis_host: + await init_redis( + host=config.redis_host, + port=config.redis_port, + db=config.redis_db, + ) + + # Set up a future to handle graceful shutdown + shutdown_event = asyncio.Event() + + # Set up signal handlers for graceful shutdown + loop = asyncio.get_running_loop() + + # Define an async function to set the shutdown event + async def set_shutdown(): + shutdown_event.set() + + # Register signal handlers + for sig in (signal.SIGTERM, signal.SIGINT): + loop.add_signal_handler(sig, lambda: asyncio.create_task(set_shutdown())) + + # Define the cleanup function that will be called on exit + async def cleanup_resources(): + try: + if config.redis_host: + redis_client = get_redis() + await clean_heartbeat(redis_client, "checker") + except Exception as e: + logger.error(f"Error cleaning up heartbeat: {e}") + + # Initialize checker + scheduler = create_checker() + + try: + logger.info("Starting checker process...") + scheduler.start() + + # Wait for shutdown event + logger.info( + "Checker process running. Press Ctrl+C or send SIGTERM to exit." + ) + await shutdown_event.wait() + logger.info("Received shutdown signal. Shutting down gracefully...") + except Exception as e: + logger.error(f"Error in checker process: {e}") + finally: + # Run the cleanup code and shutdown the scheduler + await cleanup_resources() + + if scheduler.running: + scheduler.shutdown() + + # Run the async main function + # We handle all signals inside the main function, so we don't need to handle KeyboardInterrupt here + asyncio.run(main()) diff --git a/app/entrypoints/agent_api.py b/app/entrypoints/agent_api.py new file mode 100644 index 00000000..c94e4f30 --- /dev/null +++ b/app/entrypoints/agent_api.py @@ -0,0 +1,679 @@ +"""IntentKit Chat API Router.""" + +import logging +import textwrap +from typing import Annotated, List, Optional + +from epyxid import XID +from fastapi import ( + APIRouter, + Depends, + HTTPException, + Path, + Query, + Response, + status, +) +from fastapi.responses import StreamingResponse +from pydantic import BaseModel, ConfigDict, Field +from sqlalchemy import desc, select +from sqlalchemy.ext.asyncio import AsyncSession + +from app.auth import AgentToken, verify_agent_token +from intentkit.core.engine import execute_agent, stream_agent +from intentkit.models.agent import Agent, AgentResponse +from intentkit.models.agent_data import AgentData +from intentkit.models.app_setting import SystemMessageType +from intentkit.models.chat import ( + AuthorType, + Chat, + ChatCreate, + ChatMessage, + ChatMessageAttachment, + ChatMessageCreate, + ChatMessageTable, +) +from intentkit.models.db import get_db + +logger = logging.getLogger(__name__) + +router_rw = APIRouter() +router_ro = APIRouter() + + +def get_real_user_id( + agent_token: AgentToken, user_id: Optional[str], agent_owner: Optional[str] +) -> str: + """Generate real user_id based on agent token and user_id. + + Args: + agent_token: Agent token containing agent_id and is_public flag + user_id: Optional user ID + agent_owner: Agent owner ID + + Returns: + Real user ID string + + Raises: + HTTPException: If user_id is provided for a private agent + """ + if user_id: + return f"{agent_token.agent_id}_{user_id}" + else: + if agent_token.is_public: + return f"{agent_token.agent_id}_anonymous" + else: + return agent_owner or agent_token.agent_id + + +class ChatMessagesResponse(BaseModel): + """Response model for chat messages with pagination.""" + + data: List[ChatMessage] + has_more: bool = False + next_cursor: Optional[str] = None + + model_config = ConfigDict( + use_enum_values=True, + json_schema_extra={ + "example": {"data": [], "has_more": False, "next_cursor": None} + }, + ) + + +class ChatUpdateRequest(BaseModel): + """Request model for updating a chat thread.""" + + summary: Annotated[ + str, + Field( + ..., + description="Updated summary for the chat thread", + examples=["Updated chat summary"], + max_length=500, + ), + ] + + model_config = ConfigDict( + json_schema_extra={"example": {"summary": "Updated chat summary"}}, + ) + + +class ChatMessageRequest(BaseModel): + """Request model for chat messages. + + This model represents the request body for creating a new chat message. + It contains the necessary fields to identify the chat context and message + content, along with optional attachments. The user ID is optional and + combined with internal ID for storage if provided. + """ + + user_id: Annotated[ + Optional[str], + Field( + None, + description="User ID (optional). When provided (whether API key uses pk or sk), only public skills will be accessible.", + examples=["user-123"], + ), + ] + app_id: Annotated[ + Optional[str], + Field( + None, + description="Optional application identifier", + examples=["app-789"], + ), + ] + message: Annotated[ + str, + Field( + ..., + description="Content of the message", + examples=["Hello, how can you help me today?"], + min_length=1, + max_length=65535, + ), + ] + stream: Annotated[ + Optional[bool], + Field( + None, + description="Whether to stream the response", + ), + ] + search_mode: Annotated[ + Optional[bool], + Field( + None, + description="Optional flag to enable search mode", + ), + ] + super_mode: Annotated[ + Optional[bool], + Field( + None, + description="Optional flag to enable super mode", + ), + ] + attachments: Annotated[ + Optional[List[ChatMessageAttachment]], + Field( + None, + description="Optional list of attachments (links, images, or files)", + examples=[[{"type": "link", "url": "https://example.com"}]], + ), + ] + + model_config = ConfigDict( + use_enum_values=True, + json_schema_extra={ + "example": { + "user_id": "user-123", + "app_id": "app-789", + "message": "Hello, how can you help me today?", + "search_mode": True, + "super_mode": False, + "attachments": [ + { + "type": "link", + "url": "https://example.com", + } + ], + } + }, + ) + + +@router_ro.get( + "/chats", + response_model=List[Chat], + operation_id="list_chats", + summary="List chat threads", + description="Retrieve all chat threads for the current user.", + tags=["Thread"], +) +async def get_chats( + user_id: Optional[str] = Query( + None, + description="User ID (optional). When provided (whether API key uses pk or sk), only public skills will be accessible.", + ), + agent_token: AgentToken = Depends(verify_agent_token), +): + """Get a list of chat threads.""" + agent_id = agent_token.agent_id + # Get agent to access owner + agent = await Agent.get(agent_id) + if not agent: + raise HTTPException(status_code=404, detail=f"Entity {agent_id} not found") + + real_user_id = get_real_user_id(agent_token, user_id, agent.owner) + return await Chat.get_by_agent_user(agent_id, real_user_id) + + +@router_rw.post( + "/chats", + response_model=Chat, + operation_id="create_chat_thread", + summary="Create a new chat thread", + description="Create a new chat thread for a specific user.", + tags=["Thread"], +) +async def create_chat( + user_id: Optional[str] = Query( + None, + description="User ID (optional). When provided (whether API key uses pk or sk), only public skills will be accessible.", + ), + agent_token: AgentToken = Depends(verify_agent_token), +): + """Create a new chat thread.""" + agent_id = agent_token.agent_id + # Verify that the entity exists + agent = await Agent.get(agent_id) + if not agent: + raise HTTPException(status_code=404, detail=f"Entity {agent_id} not found") + + real_user_id = get_real_user_id(agent_token, user_id, agent.owner) + chat = ChatCreate( + id=str(XID()), + agent_id=agent_id, + user_id=real_user_id, + summary="", + rounds=0, + ) + await chat.save() + # Retrieve the full Chat object with auto-generated fields + full_chat = await Chat.get(chat.id) + return full_chat + + +@router_ro.get( + "/chats/{chat_id}", + response_model=Chat, + operation_id="get_chat_thread_by_id", + summary="Get chat thread by ID", + description="Retrieve a specific chat thread by its ID for the current user. Returns 404 if not found or not owned by the user.", + tags=["Thread"], +) +async def get_chat( + chat_id: str = Path(..., description="Chat ID"), + user_id: Optional[str] = Query( + None, + description="User ID (optional). When provided (whether API key uses pk or sk), only public skills will be accessible.", + ), + agent_token: AgentToken = Depends(verify_agent_token), +): + """Get a specific chat thread.""" + agent_id = agent_token.agent_id + # Get agent to access owner + agent = await Agent.get(agent_id) + if not agent: + raise HTTPException(status_code=404, detail=f"Entity {agent_id} not found") + + real_user_id = get_real_user_id(agent_token, user_id, agent.owner) + chat = await Chat.get(chat_id) + if not chat or chat.agent_id != agent_id or chat.user_id != real_user_id: + raise HTTPException( + status_code=status.HTTP_404_NOT_FOUND, detail="Chat not found" + ) + return chat + + +@router_rw.patch( + "/chats/{chat_id}", + response_model=Chat, + operation_id="update_chat_thread", + summary="Update a chat thread", + description="Update details of a specific chat thread. Currently only supports updating the summary.", + tags=["Thread"], +) +async def update_chat( + request: ChatUpdateRequest, + chat_id: str = Path(..., description="Chat ID"), + agent_token: AgentToken = Depends(verify_agent_token), +): + """Update a chat thread.""" + agent_id = agent_token.agent_id + chat = await Chat.get(chat_id) + if not chat or chat.agent_id != agent_id: + raise HTTPException( + status_code=status.HTTP_404_NOT_FOUND, detail="Chat not found" + ) + + # Update the summary field + updated_chat = await chat.update_summary(request.summary) + + return updated_chat + + +@router_rw.delete( + "/chats/{chat_id}", + status_code=status.HTTP_204_NO_CONTENT, + operation_id="delete_chat_thread", + summary="Delete a chat thread", + description="Delete a specific chat thread for the current user. Returns 404 if not found or not owned by the user.", + tags=["Thread"], +) +async def delete_chat( + chat_id: str = Path(..., description="Chat ID"), + agent_token: AgentToken = Depends(verify_agent_token), +): + """Delete a chat thread.""" + agent_id = agent_token.agent_id + chat = await Chat.get(chat_id) + if not chat or chat.agent_id != agent_id: + raise HTTPException( + status_code=status.HTTP_404_NOT_FOUND, detail="Chat not found" + ) + await chat.delete() + return Response(status_code=status.HTTP_204_NO_CONTENT) + + +@router_ro.get( + "/chats/{chat_id}/messages", + response_model=ChatMessagesResponse, + operation_id="list_messages_in_chat", + summary="List messages in a chat thread", + description="Retrieve the message history for a specific chat thread with cursor-based pagination.", + tags=["Message"], +) +async def get_messages( + chat_id: str = Path(..., description="Chat ID"), + agent_token: AgentToken = Depends(verify_agent_token), + db: AsyncSession = Depends(get_db), + cursor: Optional[str] = Query( + None, description="Cursor for pagination (message id)" + ), + limit: int = Query( + 20, ge=1, le=100, description="Maximum number of messages to return" + ), +) -> ChatMessagesResponse: + """Get the message history for a chat thread with cursor-based pagination.""" + agent_id = agent_token.agent_id + chat = await Chat.get(chat_id) + if not chat or chat.agent_id != agent_id: + raise HTTPException( + status_code=status.HTTP_404_NOT_FOUND, detail="Chat not found" + ) + + stmt = ( + select(ChatMessageTable) + .where( + ChatMessageTable.agent_id == agent_id, ChatMessageTable.chat_id == chat_id + ) + .order_by(desc(ChatMessageTable.id)) + .limit(limit + 1) + ) + if cursor: + stmt = stmt.where(ChatMessageTable.id < cursor) + result = await db.scalars(stmt) + messages = result.all() + has_more = len(messages) > limit + messages_to_return = messages[:limit] + next_cursor = ( + str(messages_to_return[-1].id) if has_more and messages_to_return else None + ) + # Return as ChatMessagesResponse object + return ChatMessagesResponse( + data=[ + ChatMessage.model_validate(m).sanitize_privacy() for m in messages_to_return + ], + has_more=has_more, + next_cursor=next_cursor, + ) + + +@router_rw.post( + "/chats/{chat_id}/messages", + response_model=List[ChatMessage], + operation_id="send_message_to_chat", + summary="Send a message to a chat thread", + description=( + "Send a new message to a specific chat thread. The response is a list of messages generated by the agent. " + "The response does not include the original user message. It could be skill calls, agent messages, or system error messages.\n\n" + "**Stream Mode:**\n" + "When `stream: true` is set in the request body, the response will be a Server-Sent Events (SSE) stream. " + "Each event has the type 'message' and contains a ChatMessage object as JSON data. " + "The SSE format follows the standard: `event: message\\ndata: {ChatMessage JSON}\\n\\n`. " + "This allows real-time streaming of agent responses as they are generated, including intermediate skill calls and final responses." + ), + tags=["Message"], +) +async def send_message( + request: ChatMessageRequest, + chat_id: str = Path(..., description="Chat ID"), + agent_token: AgentToken = Depends(verify_agent_token), +): + """Send a new message.""" + agent_id = agent_token.agent_id + agent = await Agent.get(agent_id) + if not agent: + raise HTTPException(status_code=404, detail=f"Entity {agent_id} not found") + + real_user_id = get_real_user_id(agent_token, request.user_id, agent.owner) + # Verify that the chat exists and belongs to the user + chat = await Chat.get(chat_id) + if not chat or chat.agent_id != agent_id or chat.user_id != real_user_id: + raise HTTPException( + status_code=status.HTTP_404_NOT_FOUND, detail="Chat not found" + ) + + # Update summary if it's empty + if not chat.summary: + summary = textwrap.shorten(request.message, width=20, placeholder="...") + await chat.update_summary(summary) + + # Increment the round count + await chat.add_round() + + user_message = ChatMessageCreate( + id=str(XID()), + agent_id=agent_id, + chat_id=chat_id, + user_id=real_user_id, + author_id=real_user_id, + author_type=AuthorType.API, + thread_type=AuthorType.API, + message=request.message, + attachments=request.attachments, + model=None, + reply_to=None, + skill_calls=None, + input_tokens=0, + output_tokens=0, + time_cost=0.0, + credit_event_id=None, + credit_cost=None, + cold_start_cost=0.0, + app_id=request.app_id, + search_mode=request.search_mode, + super_mode=request.super_mode, + ) + # Don't save the message here - let the handler save it + # await user_message.save_in_session(db) + + if request.stream: + + async def stream_gen(): + async for chunk in stream_agent(user_message): + yield f"event: message\ndata: {chunk.model_dump_json()}\n\n" + + return StreamingResponse( + stream_gen(), + media_type="text/event-stream", + headers={"Cache-Control": "no-cache", "Connection": "keep-alive"}, + ) + else: + response_messages = await execute_agent(user_message) + # Return messages list directly for compatibility with stream mode + return [message.sanitize_privacy() for message in response_messages] + + +@router_rw.post( + "/chats/{chat_id}/messages/retry", + response_model=List[ChatMessage], + operation_id="retry_message_in_chat", + summary="Retry a message in a chat thread", + description="Retry sending the last message in a specific chat thread. If the last message is from the system, returns all messages after the last user message. If the last message is from a user, generates a new response. Only works with non-streaming mode.", + tags=["Message"], +) +async def retry_message( + chat_id: str = Path(..., description="Chat ID"), + user_id: Optional[str] = Query( + None, + description="User ID (optional). When provided (whether API key uses pk or sk), only public skills will be accessible.", + ), + agent_token: AgentToken = Depends(verify_agent_token), + db: AsyncSession = Depends(get_db), +): + """Retry the last message in a chat thread. + + If the last message is from the system, return all messages after the last user message. + If the last message is from a user, generate a new response. + Note: Retry only works in non-streaming mode. + """ + agent_id = agent_token.agent_id + # Get entity and check if exists + agent = await Agent.get(agent_id) + if not agent: + raise HTTPException(status_code=404, detail=f"Entity {agent_id} not found") + + real_user_id = get_real_user_id(agent_token, user_id, agent.owner) + # Verify that the chat exists and belongs to the user + chat = await Chat.get(chat_id) + if not chat or chat.agent_id != agent_id or chat.user_id != real_user_id: + raise HTTPException( + status_code=status.HTTP_404_NOT_FOUND, detail="Chat not found" + ) + + last = await db.scalar( + select(ChatMessageTable) + .where( + ChatMessageTable.agent_id == agent_id, ChatMessageTable.chat_id == chat_id + ) + .order_by(desc(ChatMessageTable.created_at)) + .limit(1) + ) + + if not last: + raise HTTPException(status_code=404, detail="No messages found") + + last_message = ChatMessage.model_validate(last) + + # If last message is from system, find all messages after last user message + if ( + last_message.author_type == AuthorType.AGENT + or last_message.author_type == AuthorType.SYSTEM + ): + # Find the last user message + last_user_message = await db.scalar( + select(ChatMessageTable) + .where( + ChatMessageTable.agent_id == agent_id, + ChatMessageTable.chat_id == chat_id, + ChatMessageTable.author_type == AuthorType.API, + ) + .order_by(desc(ChatMessageTable.created_at)) + .limit(1) + ) + + if not last_user_message: + # If no user message found, just return the last message + return [last_message.sanitize_privacy()] + + # Get all messages after the last user message + messages_after_user = await db.scalars( + select(ChatMessageTable) + .where( + ChatMessageTable.agent_id == agent_id, + ChatMessageTable.chat_id == chat_id, + ChatMessageTable.created_at > last_user_message.created_at, + ) + .order_by(ChatMessageTable.created_at) + ) + + messages_list = messages_after_user.all() + if messages_list: + return [ + ChatMessage.model_validate(msg).sanitize_privacy() + for msg in messages_list + ] + else: + # Fallback to just the last message if no messages found after user message + return [last_message.sanitize_privacy()] + + # If last message is from skill, provide warning message + if last_message.author_type == AuthorType.SKILL: + error_message_create = await ChatMessageCreate.from_system_message( + SystemMessageType.SKILL_INTERRUPTED, + agent_id=agent_id, + chat_id=chat_id, + user_id=real_user_id, + author_id=agent_id, + thread_type=last_message.thread_type, + reply_to=last_message.id, + time_cost=0.0, + ) + error_message = await error_message_create.save() + return [last_message.sanitize_privacy(), error_message.sanitize_privacy()] + + # If last message is from user, generate a new response + # Create a new user message for retry (non-streaming only) + retry_user_message = ChatMessageCreate( + id=str(XID()), + agent_id=agent_id, + chat_id=chat_id, + user_id=real_user_id, + author_id=real_user_id, + author_type=AuthorType.API, + thread_type=AuthorType.API, + message=last_message.message, + attachments=last_message.attachments, + model=None, + reply_to=None, + skill_calls=None, + input_tokens=0, + output_tokens=0, + time_cost=0.0, + credit_event_id=None, + credit_cost=None, + cold_start_cost=0.0, + app_id=last_message.app_id, + search_mode=last_message.search_mode, + super_mode=last_message.super_mode, + ) + + # Execute handler (non-streaming mode only) + response_messages = await execute_agent(retry_user_message) + + # Return messages list directly for compatibility with send_message + return [message.sanitize_privacy() for message in response_messages] + + +@router_ro.get( + "/messages/{message_id}", + response_model=ChatMessage, + operation_id="get_message_by_id", + summary="Get message by ID", + description="Retrieve a specific chat message by its ID for the current user. Returns 404 if not found or not owned by the user.", + tags=["Message"], +) +async def get_message( + message_id: str = Path(..., description="Message ID"), + user_id: Optional[str] = Query( + None, + description="User ID (optional). When provided (whether API key uses pk or sk), only public skills will be accessible.", + ), + agent_token: AgentToken = Depends(verify_agent_token), +): + """Get a specific message.""" + agent_id = agent_token.agent_id + # Get agent to access owner + agent = await Agent.get(agent_id) + if not agent: + raise HTTPException(status_code=404, detail=f"Entity {agent_id} not found") + + real_user_id = get_real_user_id(agent_token, user_id, agent.owner) + message = await ChatMessage.get(message_id) + if not message or message.user_id != real_user_id: + raise HTTPException( + status_code=status.HTTP_404_NOT_FOUND, detail="Message not found" + ) + return message.sanitize_privacy() + + +@router_ro.get( + "/agent", + response_model=AgentResponse, + operation_id="get_current_agent", + summary="Get current agent information", + description="Retrieve the current agent's public information from the token.", + tags=["Agent"], +) +async def get_current_agent( + agent_token: AgentToken = Depends(verify_agent_token), +) -> Response: + """Get the current agent from JWT token. + + **Returns:** + * `AgentResponse` - Agent configuration with additional processed data + + **Raises:** + * `HTTPException`: + - 404: Agent not found + """ + agent_id = agent_token.agent_id + agent = await Agent.get(agent_id) + if not agent: + raise HTTPException(status_code=404, detail="Agent not found") + + # Get agent data + agent_data = await AgentData.get(agent_id) + + agent_response = await AgentResponse.from_agent(agent, agent_data) + + # Return Response with ETag header + return Response( + content=agent_response.model_dump_json(), + media_type="application/json", + headers={"ETag": agent_response.etag()}, + ) diff --git a/app/entrypoints/autonomous.py b/app/entrypoints/autonomous.py new file mode 100644 index 00000000..265cfb3b --- /dev/null +++ b/app/entrypoints/autonomous.py @@ -0,0 +1,49 @@ +import logging + +from epyxid import XID + +from intentkit.core.engine import execute_agent +from intentkit.models.chat import AuthorType, ChatMessageCreate + +logger = logging.getLogger(__name__) + + +async def run_autonomous_task( + agent_id: str, agent_owner: str, task_id: str, prompt: str +): + """ + Run a specific autonomous task for an agent. + + Args: + agent_id: The ID of the agent + task_id: The ID of the autonomous task + prompt: The autonomous prompt to execute + """ + logger.info(f"Running autonomous task {task_id} for agent {agent_id}") + + try: + # Run the autonomous action + chat_id = f"autonomous-{task_id}" + message = ChatMessageCreate( + id=str(XID()), + agent_id=agent_id, + chat_id=chat_id, + user_id=agent_owner, + author_id="autonomous", + author_type=AuthorType.TRIGGER, + thread_type=AuthorType.TRIGGER, + message=prompt, + ) + + # Execute agent and get response + resp = await execute_agent(message) + + # Log the response + logger.info( + f"Task {task_id} completed: " + "\n".join(str(m) for m in resp), + extra={"aid": agent_id}, + ) + except Exception as e: + logger.error( + f"Error in autonomous task {task_id} for agent {agent_id}: {str(e)}" + ) diff --git a/app/entrypoints/openai_compatible.py b/app/entrypoints/openai_compatible.py new file mode 100644 index 00000000..91fbf575 --- /dev/null +++ b/app/entrypoints/openai_compatible.py @@ -0,0 +1,485 @@ +import logging +from typing import Any, Dict, List, Optional + +from epyxid import XID +from fastapi import APIRouter, Depends, HTTPException, status +from fastapi.responses import StreamingResponse +from pydantic import BaseModel, Field + +from app.auth import AgentToken, verify_agent_token +from intentkit.core.engine import execute_agent +from intentkit.models.agent import Agent +from intentkit.models.chat import ( + AuthorType, + ChatMessageAttachment, + ChatMessageAttachmentType, + ChatMessageCreate, +) + +# init logger +logger = logging.getLogger(__name__) + +openai_router = APIRouter() + + +# OpenAI API Models +class OpenAIMessage(BaseModel): + """OpenAI message format.""" + + role: str = Field(..., description="The role of the message author") + content: str | List[Dict[str, Any]] = Field( + ..., description="The content of the message" + ) + + +class OpenAIChatCompletionRequest(BaseModel): + """OpenAI Chat Completion API request format.""" + + model: str = Field(..., description="ID of the model to use") + messages: List[OpenAIMessage] = Field( + ..., description="A list of messages comprising the conversation" + ) + max_tokens: Optional[int] = Field( + None, description="The maximum number of tokens to generate" + ) + temperature: Optional[float] = Field( + None, description="What sampling temperature to use" + ) + top_p: Optional[float] = Field( + None, description="An alternative to sampling with temperature" + ) + n: Optional[int] = Field( + None, description="How many chat completion choices to generate" + ) + stream: Optional[bool] = Field( + None, description="If set, partial message deltas will be sent" + ) + stop: Optional[str | List[str]] = Field( + None, description="Up to 4 sequences where the API will stop generating" + ) + presence_penalty: Optional[float] = Field( + None, description="Number between -2.0 and 2.0" + ) + frequency_penalty: Optional[float] = Field( + None, description="Number between -2.0 and 2.0" + ) + logit_bias: Optional[Dict[str, int]] = Field( + None, description="Modify the likelihood of specified tokens" + ) + user: Optional[str] = Field( + None, description="A unique identifier representing your end-user" + ) + response_format: Optional[Dict[str, Any]] = Field( + None, description="An object specifying the format" + ) + + +class OpenAIUsage(BaseModel): + """OpenAI usage statistics.""" + + prompt_tokens: int = Field(0, description="Number of tokens in the prompt") + completion_tokens: int = Field(0, description="Number of tokens in the completion") + total_tokens: int = Field(0, description="Total number of tokens used") + + +class OpenAIDelta(BaseModel): + """OpenAI delta object for streaming.""" + + role: Optional[str] = Field(None, description="The role of the message author") + content: Optional[str] = Field(None, description="The content of the message") + + +class OpenAIChoiceDelta(BaseModel): + """OpenAI choice object for streaming.""" + + index: int = Field(0, description="The index of the choice") + delta: OpenAIDelta = Field(..., description="The delta object") + finish_reason: Optional[str] = Field( + None, description="The reason the model stopped generating tokens" + ) + + +class OpenAIChatCompletionChunk(BaseModel): + """OpenAI Chat Completion chunk for streaming.""" + + id: str = Field(..., description="A unique identifier for the chat completion") + object: str = Field("chat.completion.chunk", description="The object type") + created: int = Field( + ..., description="The Unix timestamp when the chat completion was created" + ) + model: str = Field(..., description="The model used for the chat completion") + choices: List[OpenAIChoiceDelta] = Field( + ..., description="A list of chat completion choices" + ) + system_fingerprint: Optional[str] = Field(None, description="System fingerprint") + + +class OpenAIChoice(BaseModel): + """OpenAI choice object.""" + + index: int = Field(0, description="The index of the choice") + message: OpenAIMessage = Field(..., description="The message object") + finish_reason: str = Field( + "stop", description="The reason the model stopped generating tokens" + ) + + +class OpenAIChatCompletionResponse(BaseModel): + """OpenAI Chat Completion API response format.""" + + id: str = Field(..., description="A unique identifier for the chat completion") + object: str = Field("chat.completion", description="The object type") + created: int = Field( + ..., description="The Unix timestamp when the chat completion was created" + ) + model: str = Field(..., description="The model used for the chat completion") + choices: List[OpenAIChoice] = Field( + ..., description="A list of chat completion choices" + ) + usage: OpenAIUsage = Field( + ..., description="Usage statistics for the completion request" + ) + system_fingerprint: Optional[str] = Field(None, description="System fingerprint") + + +def extract_text_and_images( + content: str | List[Dict[str, Any]], +) -> tuple[str, List[ChatMessageAttachment]]: + """Extract text and images from OpenAI message content. + + Args: + content: The message content (string or list of content parts) + + Returns: + tuple: (text_content, list_of_attachments) + """ + if isinstance(content, str): + return content, [] + + text_parts = [] + attachments = [] + + for part in content: + if part.get("type") == "text": + text_parts.append(part.get("text", "")) + elif part.get("type") == "image_url": + image_url = part.get("image_url", {}).get("url", "") + if image_url: + attachments.append( + { + "type": ChatMessageAttachmentType.IMAGE, + "url": image_url, + "name": "image", + } + ) + + return " ".join(text_parts), attachments + + +def create_streaming_response(content: str, request_id: str, model: str, created: int): + """Create a streaming response generator for OpenAI-compatible streaming. + + Args: + content: The complete message content to stream + request_id: The request ID + model: The model name + created: The creation timestamp + + Yields: + str: Server-sent events formatted chunks + """ + # First chunk with role + first_chunk = OpenAIChatCompletionChunk( + id=request_id, + object="chat.completion.chunk", + created=created, + model=model, + choices=[ + OpenAIChoiceDelta( + index=0, + delta=OpenAIDelta(role="assistant", content=None), + finish_reason=None, + ) + ], + system_fingerprint=None, + ) + yield f"data: {first_chunk.model_dump_json()}\n\n" + + # Content chunks - split content into smaller pieces for streaming effect + chunk_size = 20 # Characters per chunk + for i in range(0, len(content), chunk_size): + chunk_content = content[i : i + chunk_size] + content_chunk = OpenAIChatCompletionChunk( + id=request_id, + object="chat.completion.chunk", + created=created, + model=model, + choices=[ + OpenAIChoiceDelta( + index=0, + delta=OpenAIDelta(role=None, content=chunk_content), + finish_reason=None, + ) + ], + system_fingerprint=None, + ) + yield f"data: {content_chunk.model_dump_json()}\n\n" + + # Final chunk with finish_reason + final_chunk = OpenAIChatCompletionChunk( + id=request_id, + object="chat.completion.chunk", + created=created, + model=model, + choices=[ + OpenAIChoiceDelta( + index=0, + delta=OpenAIDelta(role=None, content=None), + finish_reason="stop", + ) + ], + system_fingerprint=None, + ) + yield f"data: {final_chunk.model_dump_json()}\n\n" + + # End of stream + yield "data: [DONE]\n\n" + + +def create_streaming_response_batched( + content_parts: List[str], request_id: str, model: str, created: int +): + """Create a streaming response generator for OpenAI-compatible streaming with batched content. + + Args: + content_parts: List of content parts to stream in batches + request_id: The request ID + model: The model name + created: The creation timestamp + + Yields: + str: Server-sent events formatted chunks + """ + # First chunk with role + first_chunk = OpenAIChatCompletionChunk( + id=request_id, + object="chat.completion.chunk", + created=created, + model=model, + choices=[ + OpenAIChoiceDelta( + index=0, + delta=OpenAIDelta(role="assistant", content=None), + finish_reason=None, + ) + ], + system_fingerprint=None, + ) + yield f"data: {first_chunk.model_dump_json()}\n\n" + + # Stream each content part as a separate batch + for i, content_part in enumerate(content_parts): + if content_part: + # Add newline between parts (except for the first one) + if i > 0: + newline_chunk = OpenAIChatCompletionChunk( + id=request_id, + object="chat.completion.chunk", + created=created, + model=model, + choices=[ + OpenAIChoiceDelta( + index=0, + delta=OpenAIDelta(role=None, content="\n"), + finish_reason=None, + ) + ], + system_fingerprint=None, + ) + yield f"data: {newline_chunk.model_dump_json()}\n\n" + + # Stream the content part + content_chunk = OpenAIChatCompletionChunk( + id=request_id, + object="chat.completion.chunk", + created=created, + model=model, + choices=[ + OpenAIChoiceDelta( + index=0, + delta=OpenAIDelta(role=None, content=content_part), + finish_reason=None, + ) + ], + system_fingerprint=None, + ) + yield f"data: {content_chunk.model_dump_json()}\n\n" + + # Final chunk with finish_reason + final_chunk = OpenAIChatCompletionChunk( + id=request_id, + object="chat.completion.chunk", + created=created, + model=model, + choices=[ + OpenAIChoiceDelta( + index=0, + delta=OpenAIDelta(role=None, content=None), + finish_reason="stop", + ) + ], + system_fingerprint=None, + ) + yield f"data: {final_chunk.model_dump_json()}\n\n" + + # End of stream + yield "data: [DONE]\n\n" + + +@openai_router.post( + "/chat/completions", + tags=["OpenAI"], + operation_id="create_chat_completion", + summary="Create chat completion", +) +async def create_chat_completion( + request: OpenAIChatCompletionRequest, + agent_token: AgentToken = Depends(verify_agent_token), +): + """Create a chat completion using OpenAI-compatible API. + + This endpoint provides OpenAI Chat Completion API compatibility. + Only the last message from the messages array is processed. + + Args: + request: OpenAI chat completion request + agent_token: The authenticated agent token information + + Returns: + OpenAIChatCompletionResponse: OpenAI-compatible response + """ + agent_id = agent_token.agent_id + if not request.messages: + raise HTTPException( + status_code=status.HTTP_400_BAD_REQUEST, + detail="Messages array cannot be empty", + ) + + # Get the last message only + last_message = request.messages[-1] + + # Extract text and images from the message content + text_content, attachments = extract_text_and_images(last_message.content) + + if not text_content.strip(): + raise HTTPException( + status_code=status.HTTP_400_BAD_REQUEST, + detail="Message content cannot be empty", + ) + + # Get the agent to access its owner + agent = await Agent.get(agent_id) + if not agent: + raise HTTPException(status_code=404, detail=f"Agent {agent_id} not found") + + # Use agent owner or fallback to agent_id if owner is None + if not agent_token.is_public and agent.owner: + user_id = agent.owner + else: + user_id = agent_id + "_openai" + + # Create user message with fixed chat_id "api" and user_id as agent.owner + user_message = ChatMessageCreate( + id=str(XID()), + agent_id=agent_id, + chat_id="api", + user_id=user_id, + author_id=user_id, + author_type=AuthorType.API, + thread_type=AuthorType.API, + message=text_content, + attachments=attachments if attachments else None, + model=None, + reply_to=None, + skill_calls=None, + input_tokens=0, + output_tokens=0, + time_cost=0.0, + credit_event_id=None, + credit_cost=None, + cold_start_cost=0.0, + app_id=None, + search_mode=None, + super_mode=None, + ) + + # Execute agent + response_messages = await execute_agent(user_message) + + # Process response messages based on AuthorType + if not response_messages: + raise HTTPException( + status_code=status.HTTP_500_INTERNAL_SERVER_ERROR, + detail="No response from agent", + ) + + # Convert response messages to content list + content_parts = [] + for msg in response_messages: + if msg.author_type == AuthorType.AGENT or msg.author_type == AuthorType.SYSTEM: + # For agent and system messages, use the content field + if msg.message: + content_parts.append(msg.message) + elif msg.author_type == AuthorType.SKILL: + # For skill messages, show "running skill_name..." for each skill call + if msg.skill_calls and len(msg.skill_calls) > 0: + for skill_call in msg.skill_calls: + skill_name = skill_call.get("name", "unknown") + content_parts.append(f"running {skill_name}...") + else: + content_parts.append("running unknown...") + + # Combine all content parts + content = "\n".join(content_parts) if content_parts else "" + + # Create OpenAI-compatible response + import time + + request_id = f"chatcmpl-{XID()}" + created = int(time.time()) + + # Check if streaming is requested + if request.stream: + # Return streaming response with batched content + return StreamingResponse( + create_streaming_response_batched( + content_parts, request_id, request.model, created + ), + media_type="text/plain", + headers={ + "Cache-Control": "no-cache", + "Connection": "keep-alive", + "Content-Type": "text/plain; charset=utf-8", + }, + ) + else: + # Return regular response + response = OpenAIChatCompletionResponse( + id=request_id, + object="chat.completion", + created=created, + model=request.model, + choices=[ + OpenAIChoice( + index=0, + message=OpenAIMessage(role="assistant", content=content), + finish_reason="stop", + ) + ], + usage=OpenAIUsage(prompt_tokens=0, completion_tokens=0, total_tokens=0), + system_fingerprint=None, + ) + + logger.debug(f"OpenAI-compatible response: {response}") + + return response diff --git a/app/entrypoints/tg.py b/app/entrypoints/tg.py new file mode 100644 index 00000000..626142cc --- /dev/null +++ b/app/entrypoints/tg.py @@ -0,0 +1,130 @@ +import asyncio +import logging +import signal +import sys + +from sqlalchemy import select + +from app.services.tg.bot import pool +from app.services.tg.bot.pool import BotPool, bot_by_token +from app.services.tg.utils.cleanup import clean_token_str +from intentkit.config.config import config +from intentkit.models.agent import Agent, AgentTable +from intentkit.models.agent_data import AgentData +from intentkit.models.db import get_session, init_db +from intentkit.models.redis import init_redis + +logger = logging.getLogger(__name__) + + +class AgentScheduler: + def __init__(self, bot_pool: BotPool): + self.bot_pool = bot_pool + + async def sync(self): + async with get_session() as db: + # Get all telegram agents + agents = await db.scalars(select(AgentTable)) + + for item in agents: + agent = Agent.model_validate(item) + try: + if agent.id not in pool._agent_bots: + if ( + agent.telegram_entrypoint_enabled + and agent.telegram_config + and agent.telegram_config.get("token") + ): + token = clean_token_str(agent.telegram_config["token"]) + if token in pool._bots: + logger.warning( + f"there is an existing bot with {token}, skipping agent {agent.id}..." + ) + continue + + logger.info(f"New agent with id {agent.id} found...") + await self.bot_pool.init_new_bot(agent) + await asyncio.sleep(1) + bot = bot_by_token(token) + if not bot: + continue + bot_info = await bot.bot.get_me() + # after bot init, refresh its info to agent data + agent_data = await AgentData.get(agent.id) + agent_data.telegram_id = str(bot_info.id) + agent_data.telegram_username = bot_info.username + agent_data.telegram_name = bot_info.first_name + if bot_info.last_name: + agent_data.telegram_name = ( + f"{bot_info.first_name} {bot_info.last_name}" + ) + await agent_data.save() + else: + cached_agent = pool._agent_bots[agent.id] + if cached_agent.updated_at != agent.updated_at: + if agent.telegram_config.get("token") not in pool._bots: + await self.bot_pool.change_bot_token(agent) + await asyncio.sleep(2) + else: + await self.bot_pool.modify_config(agent) + except Exception as e: + logger.error( + f"failed to process agent {agent.id}, skipping this to the next agent: {e}" + ) + + async def start(self, interval): + logger.info("New agent addition tracking started...") + while True: + logger.info("sync agents...") + try: + await self.sync() + except Exception as e: + logger.error(f"failed to sync agents: {e}") + + await asyncio.sleep(interval) + + +async def run_telegram_server() -> None: + # Initialize database connection + await init_db(**config.db) + + # Initialize Redis if configured + if config.redis_host: + await init_redis( + host=config.redis_host, + port=config.redis_port, + db=config.redis_db, + ) + + # Signal handler for graceful shutdown + def signal_handler(signum, frame): + logger.info("Received termination signal. Shutting down gracefully...") + scheduler.shutdown() + sys.exit(0) + + # Register signal handlers + signal.signal(signal.SIGINT, signal_handler) + signal.signal(signal.SIGTERM, signal_handler) + + logger.info("Initialize bot pool...") + bot_pool = BotPool(config.tg_base_url) + + bot_pool.init_god_bot() + bot_pool.init_all_dispatchers() + + scheduler = AgentScheduler(bot_pool) + + # Start the scheduler + asyncio.create_task(scheduler.start(int(config.tg_new_agent_poll_interval))) + + # Start the bot pool + await bot_pool.start( + asyncio.get_running_loop(), config.tg_server_host, int(config.tg_server_port) + ) + + # Keep the server running + try: + while True: + await asyncio.sleep(3600) # Sleep for an hour + except asyncio.CancelledError: + logging.info("Server shutdown initiated") diff --git a/app/entrypoints/web.py b/app/entrypoints/web.py new file mode 100644 index 00000000..ed9ef36d --- /dev/null +++ b/app/entrypoints/web.py @@ -0,0 +1,866 @@ +"""IntentKit Web API Router.""" + +import json +import logging +import secrets +import textwrap +from typing import List, Optional + +from epyxid import XID +from fastapi import ( + APIRouter, + Depends, + HTTPException, + Path, + Query, + Request, + Response, + status, +) +from fastapi.responses import PlainTextResponse +from fastapi.security import HTTPBasic, HTTPBasicCredentials +from pydantic import BaseModel, Field +from sqlalchemy import desc, select +from sqlalchemy.ext.asyncio import AsyncSession + +from app.auth import verify_admin_jwt +from intentkit.config.config import config +from intentkit.core.engine import execute_agent, thread_stats +from intentkit.core.prompt import agent_prompt +from intentkit.models.agent import Agent +from intentkit.models.agent_data import AgentData +from intentkit.models.app_setting import SystemMessageType +from intentkit.models.chat import ( + AuthorType, + Chat, + ChatCreate, + ChatMessage, + ChatMessageCreate, + ChatMessageRequest, + ChatMessageTable, +) +from intentkit.models.db import get_db + +# init logger +logger = logging.getLogger(__name__) + +chat_router = APIRouter() +chat_router_readonly = APIRouter() + +# Add security scheme +security = HTTPBasic() + + +# Add credentials checker +def verify_debug_credentials(credentials: HTTPBasicCredentials = Depends(security)): + from intentkit.config.config import config + + if not config.debug_auth_enabled: + return None + + if not config.debug_username or not config.debug_password: + raise HTTPException( + status_code=status.HTTP_500_INTERNAL_SERVER_ERROR, + detail="Debug credentials not configured", + ) + + is_username_correct = secrets.compare_digest( + credentials.username.encode("utf8"), config.debug_username.encode("utf8") + ) + is_password_correct = secrets.compare_digest( + credentials.password.encode("utf8"), config.debug_password.encode("utf8") + ) + + if not (is_username_correct and is_password_correct): + raise HTTPException( + status_code=status.HTTP_401_UNAUTHORIZED, + detail="Incorrect username or password", + headers={"WWW-Authenticate": "Basic"}, + ) + return credentials.username + + +@chat_router_readonly.get( + "/debug/{agent_id}/chats/{chat_id}/memory", + tags=["Debug"], + response_class=Response, + dependencies=[Depends(verify_debug_credentials)] + if config.debug_auth_enabled + else [], + operation_id="debug_chat_memory", + summary="Chat Memory", +) +async def debug_chat_memory( + agent_id: str = Path(..., description="Agent id"), + chat_id: str = Path(..., description="Chat id"), +) -> Response: + """Get chat memory for debugging.""" + messages = await thread_stats(agent_id, chat_id) + # Convert messages to format JSON + formatted_json = json.dumps( + [message.model_dump() for message in messages], indent=4 + ) + return Response(content=formatted_json, media_type="application/json") + + +@chat_router_readonly.get( + "/debug/{agent_id}/chats/{chat_id}", + tags=["Debug"], + response_class=PlainTextResponse, + dependencies=[Depends(verify_debug_credentials)] + if config.debug_auth_enabled + else [], + operation_id="debug_chat_history", + summary="Chat History", +) +async def debug_chat_history( + agent_id: str = Path(..., description="Agent id"), + chat_id: str = Path(..., description="Chat id"), + db: AsyncSession = Depends(get_db), +) -> str: + resp = f"Agent ID:\t{agent_id}\n\nChat ID:\t{chat_id}\n\n-------------------\n\n" + messages = await get_chat_history(agent_id, chat_id, user_id=None, db=db) + if messages: + resp += "".join(message.debug_format() for message in messages) + else: + resp += "No messages\n" + return resp + + +@chat_router.get( + "/{aid}/chat", tags=["Debug"], response_class=PlainTextResponse, deprecated=True +) +async def debug_chat_deprecated( + aid: str = Path(..., description="Agent ID"), +) -> str: + return f"Deprecated: /{aid}/chat\n\nPlease use /debug/{aid}/chat instead" + + +@chat_router.get( + "/debug/{aid}/chat", + tags=["Debug"], + response_class=PlainTextResponse, + dependencies=[Depends(verify_debug_credentials)] + if config.debug_auth_enabled + else [], + operation_id="debug_chat", + summary="Chat", +) +async def debug_chat( + request: Request, + aid: str = Path(..., description="Agent ID"), + q: str = Query(..., description="Query string"), + debug: Optional[bool] = Query(None, description="Enable debug mode"), + thread: Optional[str] = Query( + None, description="Thread ID for conversation tracking", deprecated=True + ), + chat_id: Optional[str] = Query( + None, description="Chat ID for conversation tracking" + ), +) -> str: + """Debug mode: Chat with an AI agent. + + **Process Flow:** + 1. Validates agent quota + 2. Creates a thread-specific context + 3. Executes the agent with the query + 4. Updates quota usage + + **Path Parameters:** + * `aid` - Agent ID + + **Query Parameters:** + * `q` - User's input query + * `debug` - Enable debug mode (show whole skill response) + * `thread` - Thread ID for conversation tracking + * `chat_id` - Chat ID for conversation tracking + + **Returns:** + * `str` - Formatted chat response + + **Raises:** + * `404` - Agent not found + * `429` - Quota exceeded + * `500` - Internal server error + """ + if not q: + raise HTTPException(status_code=400, detail="Query string cannot be empty") + + # Get agent and validate quota + agent = await Agent.get(aid) + if not agent: + raise HTTPException(status_code=404, detail=f"Agent {aid} not found") + + # get thread_id from request ip + if not chat_id: + chat_id = thread if thread else request.client.host + user_input = ChatMessageCreate( + id=str(XID()), + agent_id=aid, + chat_id=chat_id, + user_id=agent.owner, + author_id="debug", + author_type=AuthorType.WEB, + thread_type=AuthorType.WEB, + message=q, + ) + + # Execute agent and get response + messages = await execute_agent(user_input) + + resp = f"Agent ID:\t{aid}\n\nChat ID:\t{chat_id}\n\n-------------------\n\n" + resp += "[ Input: ]\n\n" + resp += f" {q} \n\n-------------------\n\n" + + resp += "".join(message.debug_format() for message in messages) + + resp += "Total time cost: {:.3f} seconds".format( + sum([message.time_cost + message.cold_start_cost for message in messages]) + ) + + return resp + + +@chat_router_readonly.get( + "/debug/{agent_id}/prompt", + tags=["Debug"], + response_class=PlainTextResponse, + dependencies=[Depends(verify_debug_credentials)] + if config.debug_auth_enabled + else [], + operation_id="debug_agent_prompt", + summary="Agent Prompt", +) +async def debug_agent_prompt( + agent_id: str = Path(..., description="Agent id"), +) -> str: + """Get agent's init and append prompts for debugging.""" + agent = await Agent.get(agent_id) + agent_data = await AgentData.get(agent_id) + + init_prompt = agent_prompt(agent, agent_data) + append_prompt = agent.prompt_append or "None" + + full_prompt = ( + f"[Init Prompt]\n\n{init_prompt}\n\n[Append Prompt]\n\n{append_prompt}" + ) + return full_prompt + + +@chat_router_readonly.get( + "/agents/{aid}/chat/history", + tags=["Chat"], + dependencies=[Depends(verify_admin_jwt)], + response_model=List[ChatMessage], + operation_id="get_chat_history", + summary="Chat History", +) +async def get_chat_history( + aid: str = Path(..., description="Agent ID"), + chat_id: str = Query(..., description="Chat ID to get history for"), + user_id: Optional[str] = Query(None, description="User ID"), + db: AsyncSession = Depends(get_db), +) -> List[ChatMessage]: + """Get last 50 messages for a specific chat. + + **Path Parameters:** + * `aid` - Agent ID + + **Query Parameters:** + * `chat_id` - Chat ID to get history for + + **Returns:** + * `List[ChatMessage]` - List of chat messages, ordered by creation time ascending + + **Raises:** + * `404` - Agent not found + """ + # Get agent and check if exists + agent = await Agent.get(aid) + if not agent: + raise HTTPException(status_code=404, detail="Agent not found") + + # Get chat messages (last 50 in DESC order) + result = await db.scalars( + select(ChatMessageTable) + .where(ChatMessageTable.agent_id == aid, ChatMessageTable.chat_id == chat_id) + .order_by(desc(ChatMessageTable.created_at)) + .limit(50) + ) + messages = result.all() + + # If the user_id exists, check if the chat belongs to the user + if user_id: + for message in messages: + if message.user_id == user_id: + break + if message.author_id == user_id: + break + else: + raise HTTPException(status_code=403, detail="Chat not belongs to user") + + # Reverse messages to get chronological order + messages = [ChatMessage.model_validate(message) for message in messages[::-1]] + + # Sanitize privacy for all messages + messages = [message.sanitize_privacy() for message in messages] + + return messages + + +@chat_router.get( + "/agents/{aid}/chat/retry", + tags=["Chat"], + dependencies=[Depends(verify_admin_jwt)], + response_model=ChatMessage, + operation_id="retry_chat_deprecated", + deprecated=True, + summary="Retry Chat", +) +async def retry_chat_deprecated( + aid: str = Path(..., description="Agent ID"), + chat_id: str = Query(..., description="Chat ID to retry last message"), + db: AsyncSession = Depends(get_db), +) -> ChatMessage: + """Retry the last message in a chat. + + If the last message is from the agent, return it directly. + If the last message is from a user, generate a new agent response. + + **Path Parameters:** + * `aid` - Agent ID + + **Query Parameters:** + * `chat_id` - Chat ID to retry + + **Returns:** + * `ChatMessage` - Agent's response message + + **Raises:** + * `404` - Agent not found or no messages found + * `429` - Quota exceeded + * `500` - Internal server error + """ + # Get agent and check if exists + agent = await Agent.get(aid) + if not agent: + raise HTTPException(status_code=404, detail="Agent not found") + + # Get last message + last = await db.scalar( + select(ChatMessageTable) + .where(ChatMessageTable.agent_id == aid, ChatMessageTable.chat_id == chat_id) + .order_by(desc(ChatMessageTable.created_at)) + .limit(1) + ) + if not last: + raise HTTPException(status_code=404, detail="No messages found") + + last_message = ChatMessage.model_validate(last) + + # If last message is from agent, return it + if ( + last_message.author_type == AuthorType.AGENT + or last_message.author_type == AuthorType.SYSTEM + ): + return last_message.sanitize_privacy() + + if last_message.author_type == AuthorType.SKILL: + error_message_create = await ChatMessageCreate.from_system_message( + SystemMessageType.SKILL_INTERRUPTED, + agent_id=aid, + chat_id=chat_id, + user_id=last_message.user_id, + author_id=aid, + thread_type=last_message.thread_type, + reply_to=last_message.id, + ) + error_message = await error_message_create.save() + return error_message.sanitize_privacy() + + # If last message is from user, generate a new agent response + return await create_chat_deprecated() + + +@chat_router.put( + "/agents/{aid}/chat/retry/v2", + tags=["Chat"], + dependencies=[Depends(verify_admin_jwt)], + response_model=list[ChatMessage], + operation_id="retry_chat_put_deprecated", + summary="Retry Chat", + deprecated=True, +) +@chat_router.post( + "/agents/{aid}/chat/retry/v2", + tags=["Chat"], + dependencies=[Depends(verify_admin_jwt)], + response_model=list[ChatMessage], + operation_id="retry_chat", + summary="Retry Chat", +) +async def retry_chat( + aid: str = Path(..., description="Agent ID"), + chat_id: str = Query(..., description="Chat ID to retry last message"), + db: AsyncSession = Depends(get_db), +) -> list[ChatMessage]: + """Retry the last message in a chat. + + If the last message is from the agent, return it directly. + If the last message is from a user, generate a new agent response. + + **Path Parameters:** + * `aid` - Agent ID + + **Query Parameters:** + * `chat_id` - Chat ID to retry + + **Returns:** + * `List[ChatMessage]` - List of chat messages including the retried response + + **Raises:** + * `404` - Agent not found or no messages found + * `429` - Quota exceeded + * `500` - Internal server error + """ + # Get agent and check if exists + agent = await Agent.get(aid) + if not agent: + raise HTTPException(status_code=404, detail="Agent not found") + + # Get last message + last = await db.scalar( + select(ChatMessageTable) + .where(ChatMessageTable.agent_id == aid, ChatMessageTable.chat_id == chat_id) + .order_by(desc(ChatMessageTable.created_at)) + .limit(1) + ) + + if not last: + raise HTTPException(status_code=404, detail="No messages found") + + last_message = ChatMessage.model_validate(last) + if ( + last_message.author_type == AuthorType.AGENT + or last_message.author_type == AuthorType.SYSTEM + ): + return [last_message.sanitize_privacy()] + + if last_message.author_type == AuthorType.SKILL: + error_message_create = await ChatMessageCreate.from_system_message( + SystemMessageType.SKILL_INTERRUPTED, + agent_id=aid, + chat_id=chat_id, + user_id=last_message.user_id, + author_id=aid, + thread_type=last_message.thread_type, + reply_to=last_message.id, + ) + error_message = await error_message_create.save() + return [last_message.sanitize_privacy(), error_message.sanitize_privacy()] + + # If last message is from user, generate a new agent response + return await create_chat() + + +@chat_router.post( + "/agents/{aid}/chat", + tags=["Chat"], + dependencies=[Depends(verify_admin_jwt)], + response_model=ChatMessage, + operation_id="create_chat_deprecated", + deprecated=True, + summary="Chat", +) +async def create_chat_deprecated( + request: ChatMessageRequest, + aid: str = Path(..., description="Agent ID"), +) -> ChatMessage: + """Create a private chat message and get agent's response. + + **Process Flow:** + 1. Validates agent quota + 2. Creates a thread-specific context + 3. Executes the agent with the query + 4. Updates quota usage + 5. Saves both input and output messages + + > **Note:** This is for internal/private use and may have additional features or fewer + > restrictions compared to the public endpoint. + + **Path Parameters:** + * `aid` - Agent ID + + **Request Body:** + * `request` - Chat message request object + + **Returns:** + * `ChatMessage` - Agent's response message + + **Raises:** + * `404` - Agent not found + * `429` - Quota exceeded + * `500` - Internal server error + """ + # Get agent and validate quota + agent = await Agent.get(aid) + if not agent: + raise HTTPException(status_code=404, detail=f"Agent {aid} not found") + + # Create user message + user_message = ChatMessageCreate( + id=str(XID()), + agent_id=aid, + chat_id=request.chat_id, + user_id=request.user_id, + author_id=request.user_id, + author_type=AuthorType.WEB, + thread_type=AuthorType.WEB, + message=request.message, + attachments=request.attachments, + ) + + # Execute agent + response_messages = await execute_agent(user_message) + + # Create or active chat + chat = await Chat.get(request.chat_id) + if chat: + await chat.add_round() + else: + chat = ChatCreate( + id=request.chat_id, + agent_id=aid, + user_id=request.user_id, + summary=textwrap.shorten(request.message, width=20, placeholder="..."), + rounds=1, + ) + await chat.save() + + # Sanitize privacy for all messages + sanitized_messages = [message.sanitize_privacy() for message in response_messages] + return sanitized_messages[-1] + + +@chat_router.post( + "/agents/{aid}/chat/v2", + tags=["Chat"], + dependencies=[Depends(verify_admin_jwt)], + response_model=list[ChatMessage], + operation_id="chat", + summary="Chat", +) +async def create_chat( + request: ChatMessageRequest, + aid: str = Path(..., description="Agent ID"), +) -> list[ChatMessage]: + """Create a chat message and get agent's response. + + **Process Flow:** + 1. Validates agent quota + 2. Creates a thread-specific context + 3. Executes the agent with the query + 4. Updates quota usage + 5. Saves both input and output messages + + > **Note:** This is the public-facing endpoint with appropriate rate limiting + > and security measures. + + **Path Parameters:** + * `aid` - Agent ID + + **Request Body:** + * `request` - Chat message request object + + **Returns:** + * `List[ChatMessage]` - List of chat messages including both user input and agent response + + **Raises:** + * `404` - Agent not found + * `429` - Quota exceeded + * `500` - Internal server error + """ + # Get agent and validate quota + agent = await Agent.get(aid) + if not agent: + raise HTTPException(status_code=404, detail=f"Agent {aid} not found") + + # Create user message + user_message = ChatMessageCreate( + id=str(XID()), + agent_id=aid, + chat_id=request.chat_id, + user_id=request.user_id, + author_id=request.user_id, + author_type=AuthorType.WEB, + thread_type=AuthorType.WEB, + message=request.message, + attachments=request.attachments, + ) + + # Execute agent + response_messages = await execute_agent(user_message) + + # Create or active chat + chat = await Chat.get(request.chat_id) + if chat: + await chat.add_round() + else: + chat = ChatCreate( + id=request.chat_id, + agent_id=aid, + user_id=request.user_id, + summary=textwrap.shorten(request.message, width=20, placeholder="..."), + rounds=1, + ) + await chat.save() + + # Sanitize privacy for all messages + return [message.sanitize_privacy() for message in response_messages] + + +@chat_router_readonly.get( + "/agents/{aid}/chats", + response_model=List[Chat], + summary="User Chat List", + tags=["Chat"], + operation_id="get_agent_chats", +) +async def get_agent_chats( + aid: str = Path(..., description="Agent ID"), + user_id: str = Query(..., description="User ID"), +): + """Get chat list for a specific agent and user. + + **Path Parameters:** + * `aid` - Agent ID + + **Query Parameters:** + * `user_id` - User ID + + **Returns:** + * `List[Chat]` - List of chats for the specified agent and user + + **Raises:** + * `404` - Agent not found + """ + # Verify agent exists + agent = await Agent.get(aid) + if not agent: + raise HTTPException(status_code=404, detail="Agent not found") + + # Get chats by agent and user + chats = await Chat.get_by_agent_user(aid, user_id) + return chats + + +class ChatSummaryUpdate(BaseModel): + """Request model for updating chat summary.""" + + summary: str = Field( + ..., + description="New summary text for the chat", + examples=["User asked about product features and pricing"], + min_length=1, + ) + + +@chat_router.put( + "/agents/{aid}/chats/{chat_id}", + response_model=Chat, + summary="Update Chat Summary", + tags=["Chat"], + deprecated=True, + operation_id="update_chat_summary_deprecated", +) +@chat_router.patch( + "/agents/{aid}/chats/{chat_id}", + response_model=Chat, + summary="Update Chat Summary", + tags=["Chat"], + operation_id="update_chat_summary", +) +async def update_chat_summary( + update_data: ChatSummaryUpdate, + aid: str = Path(..., description="Agent ID"), + chat_id: str = Path(..., description="Chat ID"), +): + """Update the summary of a specific chat. + + **Path Parameters:** + * `aid` - Agent ID + * `chat_id` - Chat ID + + **Request Body:** + * `update_data` - Summary update data (in request body) + + **Returns:** + * `Chat` - Updated chat object + + **Raises:** + * `404` - Agent or chat not found + """ + # Verify agent exists + agent = await Agent.get(aid) + if not agent: + raise HTTPException(status_code=404, detail="Agent not found") + + # Get chat + chat = await Chat.get(chat_id) + if not chat: + raise HTTPException(status_code=404, detail="Chat not found") + + # Verify chat belongs to agent + if chat.agent_id != aid: + raise HTTPException(status_code=404, detail="Chat not found for this agent") + + # Update summary + updated_chat = await chat.update_summary(update_data.summary) + return updated_chat + + +@chat_router.delete( + "/agents/{aid}/chats/{chat_id}", + status_code=status.HTTP_204_NO_CONTENT, + summary="Delete a Chat", + tags=["Chat"], + operation_id="delete_chat", +) +async def delete_chat( + aid: str = Path(..., description="Agent ID"), + chat_id: str = Path(..., description="Chat ID"), +): + """Delete a specific chat. + + **Path Parameters:** + * `aid` - Agent ID + * `chat_id` - Chat ID + + **Returns:** + * `204 No Content` - Success + + **Raises:** + * `404` - Agent or chat not found + """ + # Verify agent exists + agent = await Agent.get(aid) + if not agent: + raise HTTPException(status_code=404, detail="Agent not found") + + # Get chat + chat = await Chat.get(chat_id) + if not chat: + raise HTTPException(status_code=404, detail="Chat not found") + + # Verify chat belongs to agent + if chat.agent_id != aid: + raise HTTPException(status_code=404, detail="Chat not found for this agent") + + # Delete chat + await chat.delete() + return Response(status_code=status.HTTP_204_NO_CONTENT) + + +@chat_router_readonly.get( + "/agents/{aid}/skill/history", + tags=["Chat"], + dependencies=[Depends(verify_admin_jwt)], + response_model=List[ChatMessage], + operation_id="get_skill_history", + summary="Skill History", +) +async def get_skill_history( + aid: str = Path(..., description="Agent ID"), + db: AsyncSession = Depends(get_db), +) -> List[ChatMessage]: + """Get last 50 skill messages for a specific agent. + + **Path Parameters:** + * `aid` - Agent ID + + **Returns:** + * `List[ChatMessage]` - List of skill messages, ordered by creation time ascending + + **Raises:** + * `404` - Agent not found + """ + # Get agent and check if exists + agent = await Agent.get(aid) + if not agent: + raise HTTPException(status_code=404, detail="Agent not found") + + # Get skill messages (last 50 in DESC order) + result = await db.scalars( + select(ChatMessageTable) + .where( + ChatMessageTable.agent_id == aid, + ChatMessageTable.author_type == AuthorType.SKILL, + ) + .order_by(desc(ChatMessageTable.created_at)) + .limit(50) + ) + messages = result.all() + + # Reverse messages to get chronological order + messages = [ChatMessage.model_validate(message) for message in messages[::-1]] + + # Sanitize privacy for all messages + messages = [message.sanitize_privacy() for message in messages] + + return messages + + +@chat_router_readonly.get( + "/messages/{message_id}", + tags=["Chat"], + dependencies=[Depends(verify_admin_jwt)], + response_model=ChatMessage, + operation_id="get_chat_message", + summary="Get Chat Message", + responses={ + status.HTTP_200_OK: { + "description": "Successfully retrieved the chat message", + "model": ChatMessage, + }, + status.HTTP_403_FORBIDDEN: { + "description": "You don't have permission to access this message", + }, + status.HTTP_404_NOT_FOUND: { + "description": "Message not found", + }, + }, +) +async def get_chat_message( + message_id: str = Path(..., description="Message ID"), + user_id: Optional[str] = Query(None, description="User ID for authorization check"), +) -> ChatMessage: + """Get a specific chat message by its ID. + + **Path Parameters:** + * `message_id` - Message ID + + **Query Parameters:** + * `user_id` - Optional User ID for authorization check + + **Returns:** + * `ChatMessage` - The requested chat message + + **Raises:** + * `404` - Message not found + * `403` - Forbidden if user_id doesn't match the message's user_id + """ + message = await ChatMessage.get(message_id) + if not message: + raise HTTPException( + status_code=status.HTTP_404_NOT_FOUND, + detail=f"Message with ID {message_id} not found", + ) + + # If user_id is provided, check if it matches the message's user_id + if user_id and message.user_id and user_id != message.user_id: + raise HTTPException( + status_code=status.HTTP_403_FORBIDDEN, + detail="You don't have permission to access this message", + ) + + return message.sanitize_privacy() diff --git a/app/readonly.py b/app/readonly.py new file mode 100644 index 00000000..f9c66b61 --- /dev/null +++ b/app/readonly.py @@ -0,0 +1,83 @@ +import logging +from contextlib import asynccontextmanager + +import sentry_sdk +from fastapi import FastAPI +from fastapi.exceptions import RequestValidationError +from fastapi.middleware.cors import CORSMiddleware +from starlette.exceptions import HTTPException as StarletteHTTPException + +from app.admin import ( + admin_router_readonly, + credit_router_readonly, + health_router, + metadata_router_readonly, + schema_router_readonly, + user_router_readonly, +) +from app.entrypoints.web import chat_router_readonly +from intentkit.config.config import config +from intentkit.models.db import init_db +from intentkit.models.redis import init_redis +from intentkit.utils.error import ( + IntentKitAPIError, + http_exception_handler, + intentkit_api_error_handler, + intentkit_other_error_handler, + request_validation_exception_handler, +) + +# init logger +logger = logging.getLogger(__name__) + +if config.sentry_dsn: + sentry_sdk.init( + dsn=config.sentry_dsn, + sample_rate=config.sentry_sample_rate, + # traces_sample_rate=config.sentry_traces_sample_rate, + # profiles_sample_rate=config.sentry_profiles_sample_rate, + environment=config.env, + release=config.release, + server_name="intent-readonly", + ) + + +@asynccontextmanager +async def lifespan(app: FastAPI): + await init_db(**config.db) + + # Initialize Redis if configured + if config.redis_host: + await init_redis( + host=config.redis_host, + port=config.redis_port, + db=config.redis_db, + ) + + logger.info("Readonly API server starting") + yield + logger.info("Readonly API server shutting down") + + +app = FastAPI(lifespan=lifespan) + +app.exception_handler(IntentKitAPIError)(intentkit_api_error_handler) +app.exception_handler(RequestValidationError)(request_validation_exception_handler) +app.exception_handler(StarletteHTTPException)(http_exception_handler) +app.exception_handler(Exception)(intentkit_other_error_handler) + +# Add CORS middleware +app.add_middleware( + CORSMiddleware, + allow_origins=["*"], # Allows all origins + allow_methods=["*"], # Allows all methods + allow_headers=["*"], # Allows all headers +) + +app.include_router(health_router) +app.include_router(admin_router_readonly) +app.include_router(metadata_router_readonly) +app.include_router(schema_router_readonly) +app.include_router(chat_router_readonly) +app.include_router(credit_router_readonly) +app.include_router(user_router_readonly) diff --git a/app/scheduler.py b/app/scheduler.py new file mode 100644 index 00000000..8f56bebf --- /dev/null +++ b/app/scheduler.py @@ -0,0 +1,93 @@ +"""Scheduler process entry point. + +This module runs the scheduler in a separate process, using the implementation +from app.admin.scheduler. +""" + +import asyncio +import logging +import signal + +import sentry_sdk + +from app.admin.scheduler import create_scheduler +from intentkit.config.config import config +from intentkit.models.db import init_db +from intentkit.models.redis import clean_heartbeat, get_redis, init_redis + +logger = logging.getLogger(__name__) + +if config.sentry_dsn: + sentry_sdk.init( + dsn=config.sentry_dsn, + sample_rate=config.sentry_sample_rate, + # traces_sample_rate=config.sentry_traces_sample_rate, + # profiles_sample_rate=config.sentry_profiles_sample_rate, + environment=config.env, + release=config.release, + server_name="intent-scheduler", + ) + + +if __name__ == "__main__": + + async def main(): + # Create a shutdown event for graceful termination + shutdown_event = asyncio.Event() + + # Initialize database + await init_db(**config.db) + + # Initialize Redis if configured + if config.redis_host: + await init_redis( + host=config.redis_host, + port=config.redis_port, + db=config.redis_db, + ) + + # Set up signal handlers for graceful shutdown + loop = asyncio.get_running_loop() + + # Define an async function to set the shutdown event + async def set_shutdown(): + shutdown_event.set() + + # Register signal handlers + for sig in (signal.SIGTERM, signal.SIGINT): + loop.add_signal_handler(sig, lambda: asyncio.create_task(set_shutdown())) + + # Define the cleanup function that will be called on exit + async def cleanup_resources(): + try: + if config.redis_host: + redis_client = get_redis() + await clean_heartbeat(redis_client, "scheduler") + except Exception as e: + logger.error(f"Error cleaning up heartbeat: {e}") + + # Initialize scheduler + scheduler = create_scheduler() + + try: + logger.info("Starting scheduler process...") + scheduler.start() + + # Wait for shutdown event + logger.info( + "Scheduler process running. Press Ctrl+C or send SIGTERM to exit." + ) + await shutdown_event.wait() + logger.info("Received shutdown signal. Shutting down gracefully...") + except Exception as e: + logger.error(f"Error in scheduler process: {e}") + finally: + # Run the cleanup code and shutdown the scheduler + await cleanup_resources() + + if scheduler.running: + scheduler.shutdown() + + # Run the async main function + # We handle all signals inside the main function, so we don't need to handle KeyboardInterrupt here + asyncio.run(main()) diff --git a/app/services/__init__.py b/app/services/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/app/services/tg/__init__.py b/app/services/tg/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/app/services/tg/bot/__init__.py b/app/services/tg/bot/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/app/services/tg/bot/filter/__init__.py b/app/services/tg/bot/filter/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/app/services/tg/bot/filter/chat_type.py b/app/services/tg/bot/filter/chat_type.py new file mode 100644 index 00000000..c21d2346 --- /dev/null +++ b/app/services/tg/bot/filter/chat_type.py @@ -0,0 +1,26 @@ +import logging + +from aiogram.filters import BaseFilter +from aiogram.types import Message + +logger = logging.getLogger(__name__) + + +class ChatTypeFilter(BaseFilter): + def __init__(self, chat_type: str | list): + self.chat_type = chat_type + + async def __call__(self, message: Message) -> bool: + try: + if isinstance(self.chat_type, str): + return message.chat.type == self.chat_type + else: + return message.chat.type in self.chat_type + except Exception as e: + logger.error(f"failed to filter chat types: {str(e)}") + return False + + +class GroupOnlyFilter(ChatTypeFilter): + def __init__(self): + super().__init__(["group", "supergroup"]) diff --git a/app/services/tg/bot/filter/content_type.py b/app/services/tg/bot/filter/content_type.py new file mode 100644 index 00000000..334134a4 --- /dev/null +++ b/app/services/tg/bot/filter/content_type.py @@ -0,0 +1,23 @@ +import logging + +from aiogram.filters import BaseFilter +from aiogram.types import ContentType, Message + +logger = logging.getLogger(__name__) + + +class ContentTypeFilter(BaseFilter): + def __init__(self, content_types: ContentType | list): + self.content_types = content_types + + async def __call__(self, message: Message) -> bool: + try: + return message.content_type in self.content_types + except Exception as e: + logger.error(f"failed to filter content types: {str(e)}") + return False + + +class TextOnlyFilter(ContentTypeFilter): + def __init__(self): + super().__init__([ContentType.TEXT]) diff --git a/app/services/tg/bot/filter/id.py b/app/services/tg/bot/filter/id.py new file mode 100644 index 00000000..11d56729 --- /dev/null +++ b/app/services/tg/bot/filter/id.py @@ -0,0 +1,28 @@ +import logging + +from aiogram.filters import BaseFilter +from aiogram.types import Message + +from app.services.tg.bot import pool + +logger = logging.getLogger(__name__) + + +class WhitelistedChatIDsFilter(BaseFilter): + def __init__(self): + pass + + async def __call__(self, message: Message) -> bool: + try: + bot = pool.bot_by_token(message.bot.token) + if not bot: + return False + whitelist = bot.whitelist_chat_ids + if whitelist and len(whitelist) > 0: + return message.chat.id in whitelist or str(message.chat.id) in whitelist + + return True + + except Exception as e: + logger.error(f"failed to filter whitelisted chat ids: {str(e)}") + return False diff --git a/app/services/tg/bot/filter/no_bot.py b/app/services/tg/bot/filter/no_bot.py new file mode 100644 index 00000000..84d81fb3 --- /dev/null +++ b/app/services/tg/bot/filter/no_bot.py @@ -0,0 +1,19 @@ +import logging + +from aiogram.filters import BaseFilter +from aiogram.types import Message + +logger = logging.getLogger(__name__) + + +class NoBotFilter(BaseFilter): + def __init__(self): + pass + + async def __call__(self, message: Message) -> bool: + try: + return not message.from_user.is_bot + + except Exception as e: + logger.error(f"failed to filter no bots: {str(e)}") + return False diff --git a/app/services/tg/bot/kind/__init__.py b/app/services/tg/bot/kind/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/app/services/tg/bot/kind/ai_relayer/__init__.py b/app/services/tg/bot/kind/ai_relayer/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/app/services/tg/bot/kind/ai_relayer/router.py b/app/services/tg/bot/kind/ai_relayer/router.py new file mode 100644 index 00000000..b6c9c3ee --- /dev/null +++ b/app/services/tg/bot/kind/ai_relayer/router.py @@ -0,0 +1,186 @@ +import inspect +import logging + +import telegramify_markdown +from aiogram import Router +from aiogram.filters import Command, CommandStart +from aiogram.types import Message +from epyxid import XID + +from app.services.tg.bot import pool +from app.services.tg.bot.filter.chat_type import GroupOnlyFilter +from app.services.tg.bot.filter.content_type import TextOnlyFilter +from app.services.tg.bot.filter.id import WhitelistedChatIDsFilter +from app.services.tg.bot.filter.no_bot import NoBotFilter +from app.services.tg.utils.cleanup import remove_bot_name +from intentkit.core.client import execute_agent +from intentkit.models.chat import AuthorType, ChatMessageCreate +from intentkit.utils.slack_alert import send_slack_message + +logger = logging.getLogger(__name__) + + +def cur_func_name(): + return inspect.stack()[1][3] + + +def cur_mod_name(): + return inspect.getmodule(inspect.stack()[1][0]).__name__ + + +general_router = Router() + + +@general_router.message(Command("chat_id"), NoBotFilter(), TextOnlyFilter()) +async def command_chat_id(message: Message) -> None: + try: + await message.answer(text=str(message.chat.id)) + except Exception as e: + logger.warning( + f"error processing in function:{cur_func_name()}, token:{message.bot.token} err: {str(e)}" + ) + + +## group commands and messages + + +@general_router.message( + CommandStart(), + NoBotFilter(), + WhitelistedChatIDsFilter(), + GroupOnlyFilter(), + TextOnlyFilter(), +) +async def gp_command_start(message: Message): + try: + cached_bot_item = pool.bot_by_token(message.bot.token) + await message.answer(text=cached_bot_item.greeting_group) + except Exception as e: + logger.warning( + f"error processing in function:{cur_func_name()}, token:{message.bot.token} err: {str(e)}" + ) + + +@general_router.message( + WhitelistedChatIDsFilter(), NoBotFilter(), GroupOnlyFilter(), TextOnlyFilter() +) +async def gp_process_message(message: Message) -> None: + bot = await message.bot.get_me() + if ( + message.reply_to_message + and message.reply_to_message.from_user.id == message.bot.id + ) or bot.username in message.text: + cached_bot_item = pool.bot_by_token(message.bot.token) + if cached_bot_item is None: + logger.warning(f"bot with token {message.bot.token} not found in cache.") + return + + try: + # remove bot name tag from text + message_text = remove_bot_name(bot.username, message.text) + if len(message_text) > 65535: + send_slack_message( + ( + "Message too long from telegram.\n" + f"length: {len(message_text)}\n" + f"chat_id:{message.chat.id}\n" + f"agent:{cached_bot_item.agent_id}\n" + f"user:{message.from_user.id}\n" + f"content:{message_text[:100]}..." + ) + ) + + input = ChatMessageCreate( + id=str(XID()), + agent_id=cached_bot_item.agent_id, + chat_id=pool.agent_chat_id( + cached_bot_item.is_public_memory, message.chat.id + ), + user_id=str(message.from_user.id), + author_id=str(message.from_user.id), + author_type=AuthorType.TELEGRAM, + thread_type=AuthorType.TELEGRAM, + message=message_text, + ) + response = await execute_agent(input) + await message.answer( + text=telegramify_markdown.markdownify( + response[-1].message if response else "Server Error" + ), + parse_mode="MarkdownV2", + reply_to_message_id=message.message_id, + ) + except Exception as e: + logger.warning( + f"error processing in function:{cur_func_name()}, token:{message.bot.token}, err={str(e)}" + ) + await message.answer( + text="Server Error", reply_to_message_id=message.message_id + ) + + +## direct commands and messages + + +@general_router.message( + CommandStart(), NoBotFilter(), WhitelistedChatIDsFilter(), TextOnlyFilter() +) +async def command_start(message: Message) -> None: + try: + cached_bot_item = pool.bot_by_token(message.bot.token) + await message.answer(text=cached_bot_item.greeting_user) + except Exception as e: + logger.warning( + f"error processing in function:{cur_func_name()}, token:{message.bot.token} err: {str(e)}" + ) + + +@general_router.message( + TextOnlyFilter(), + NoBotFilter(), + WhitelistedChatIDsFilter(), +) +async def process_message(message: Message) -> None: + cached_bot_item = pool.bot_by_token(message.bot.token) + if cached_bot_item is None: + logger.warning(f"bot with token {message.bot.token} not found in cache.") + return + + if len(message.text) > 65535: + send_slack_message( + ( + "Message too long from telegram.\n" + f"length: {len(message.text)}\n" + f"chat_id:{message.chat.id}\n" + f"agent:{cached_bot_item.agent_id}\n" + f"user:{message.from_user.id}\n" + f"content:{message.text[:100]}..." + ) + ) + + try: + input = ChatMessageCreate( + id=str(XID()), + agent_id=cached_bot_item.agent_id, + chat_id=pool.agent_chat_id(False, message.chat.id), + user_id=cached_bot_item.agent_owner or str(message.from_user.id), + author_id=str(message.from_user.id), + author_type=AuthorType.TELEGRAM, + thread_type=AuthorType.TELEGRAM, + message=message.text, + ) + response = await execute_agent(input) + await message.answer( + text=telegramify_markdown.markdownify( + response[-1].message if response else "Server Error" + ), + parse_mode="MarkdownV2", + reply_to_message_id=message.message_id, + ) + except Exception as e: + logger.warning( + f"error processing in function:{cur_func_name()}, token:{message.bot.token} err:{str(e)}" + ) + await message.answer( + text="Server Error", reply_to_message_id=message.message_id + ) diff --git a/app/services/tg/bot/kind/god/__init__.py b/app/services/tg/bot/kind/god/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/app/services/tg/bot/kind/god/router.py b/app/services/tg/bot/kind/god/router.py new file mode 100644 index 00000000..bb2aa974 --- /dev/null +++ b/app/services/tg/bot/kind/god/router.py @@ -0,0 +1,31 @@ +from typing import Any, Dict, Union + +from aiogram import Bot, F, Router +from aiogram.exceptions import TelegramUnauthorizedError +from aiogram.filters import Command, CommandObject +from aiogram.types import Message +from aiogram.utils.token import TokenValidationError, validate_token + +god_router = Router() + + +def is_bot_token(value: str) -> Union[bool, Dict[str, Any]]: + try: + validate_token(value) + except TokenValidationError: + return False + return True + + +@god_router.message(Command("add", magic=F.args.func(is_bot_token))) +async def command_add_bot(message: Message, command: CommandObject, bot: Bot) -> Any: + new_bot = Bot(token=command.args, session=bot.session) + try: + bot_user = await new_bot.get_me() + except TelegramUnauthorizedError: + return message.answer("Invalid token") + # await new_bot.delete_webhook(drop_pending_updates=True) + # await new_bot.set_webhook(OTHER_BOTS_URL.format(bot_token=command.args)) + return await message.answer( + f"Your Bot is @{bot_user.username} but, it should be registered in Intent Kit first!" + ) diff --git a/app/services/tg/bot/kind/god/startup.py b/app/services/tg/bot/kind/god/startup.py new file mode 100644 index 00000000..6e0769dd --- /dev/null +++ b/app/services/tg/bot/kind/god/startup.py @@ -0,0 +1,11 @@ +from os import getenv + +from aiogram import Bot, Dispatcher + +BASE_URL = getenv("TG_BASE_URL") +GOD_BOT_PATH = "/webhook/god" +GOD_BOT_TOKEN = getenv("TG_TOKEN_GOD_BOT") + + +async def on_startup(dispatcher: Dispatcher, bot: Bot): + await bot.set_webhook(f"{BASE_URL}{GOD_BOT_PATH}") diff --git a/app/services/tg/bot/pool.py b/app/services/tg/bot/pool.py new file mode 100644 index 00000000..636fa05f --- /dev/null +++ b/app/services/tg/bot/pool.py @@ -0,0 +1,268 @@ +import logging + +from aiogram import Bot, Dispatcher +from aiogram.client.bot import DefaultBotProperties +from aiogram.enums import ParseMode +from aiogram.fsm.storage.memory import MemoryStorage +from aiogram.webhook.aiohttp_server import ( + SimpleRequestHandler, + TokenBasedRequestHandler, + setup_application, +) +from aiohttp import web + +from app.services.tg.bot.kind.ai_relayer.router import general_router +from app.services.tg.bot.kind.god.router import god_router +from app.services.tg.bot.kind.god.startup import GOD_BOT_PATH, GOD_BOT_TOKEN, on_startup +from app.services.tg.bot.types.agent import BotPoolAgentItem +from app.services.tg.bot.types.bot import BotPoolItem +from app.services.tg.bot.types.kind import Kind +from app.services.tg.bot.types.router_obj import RouterObj +from app.services.tg.utils.cleanup import clean_token_str +from intentkit.models.agent import Agent + +logger = logging.getLogger(__name__) + +BOTS_PATH = "/webhook/tgbot/{kind}/{bot_token}" + +_bots = {} +_agent_bots = {} + + +def bot_by_token(token) -> BotPoolItem: + return _bots.get(token) + + +def set_cache_bot(bot: BotPoolItem): + _bots[bot.token] = bot + + +def agent_by_id(agent_id) -> BotPoolAgentItem: + return _agent_bots.get(agent_id) + + +def set_cache_agent(agent: BotPoolAgentItem): + _agent_bots[agent.id] = agent + + +def agent_chat_id(group_memory_public, chat_id): + if group_memory_public: + return "public" + return f"telegram-{chat_id}" + + +async def health_handler(request): + """Health check endpoint handler.""" + return web.json_response({"status": "healthy"}) + + +class BotPool: + def __init__(self, base_url): + self.app = web.Application() + self.app.router.add_get("/health", health_handler) + self.base_url = f"{base_url}{BOTS_PATH}" + self.routers = { + Kind.AiRelayer: RouterObj(general_router), + } + + def init_god_bot(self): + if GOD_BOT_TOKEN: + try: + logger.info("Initialize god bot...") + self.god_bot = Bot( + token=GOD_BOT_TOKEN, + default=DefaultBotProperties(parse_mode=ParseMode.HTML), + ) + storage = MemoryStorage() + # In order to use RedisStorage you need to use Key Builder with bot ID: + # storage = RedisStorage.from_url(TG_REDIS_DSN, key_builder=DefaultKeyBuilder(with_bot_id=True)) + dp = Dispatcher(storage=storage) + dp.include_router(god_router) + dp.startup.register(on_startup) + SimpleRequestHandler(dispatcher=dp, bot=self.god_bot).register( + self.app, path=GOD_BOT_PATH + ) + setup_application(self.app, dp, bot=self.god_bot) + except Exception as e: + logger.error(f"failed to init god bot: {e}") + + def init_all_dispatchers(self): + logger.info("Initialize all dispatchers...") + for kind, b in self.routers.items(): + storage = MemoryStorage() + # In order to use RedisStorage you need to use Key Builder with bot ID: + # storage = RedisStorage.from_url(TG_REDIS_DSN, key_builder=DefaultKeyBuilder(with_bot_id=True)) + b.set_dispatcher(Dispatcher(storage=storage)) + b.get_dispatcher().include_router(b.get_router()) + TokenBasedRequestHandler( + dispatcher=b.get_dispatcher(), + default=DefaultBotProperties(parse_mode=ParseMode.HTML), + ).register( + self.app, + path=BOTS_PATH.format(kind=kind.value, bot_token="{bot_token}"), + ) + setup_application(self.app, b.get_dispatcher()) + logger.info(f"{kind} router initialized...") + + async def init_new_bot(self, agent: Agent): + bot_item = None + try: + bot_item = BotPoolItem(agent) + agent_item = BotPoolAgentItem(agent) + + await bot_item.bot.delete_webhook(drop_pending_updates=True) + await bot_item.bot.set_webhook( + self.base_url.format(kind=bot_item.kind, bot_token=bot_item.token) + ) + + set_cache_bot(bot_item) + set_cache_agent(agent_item) + + logger.info( + f"bot for agent {agent.id} with token {bot_item.token} initialized..." + ) + + except ValueError as e: + logger.warning( + f"bot for agent {agent.id} did not started because of invalid data. err: {e}" + ) + except Exception as e: + logger.info(f"failed to init new bot for agent {agent.id}: {e}") + finally: + if bot_item and bot_item.bot: + await bot_item.bot.session.close() + + async def change_bot_token(self, agent: Agent): + if not agent.telegram_entrypoint_enabled: + old_agent_item = agent_by_id(agent.id) + await self.stop_bot(agent.id, old_agent_item.bot_token) + return + + try: + new_bot_success = False + old_bot_stopped = False + new_bot_item = None + + for _, v in _agent_bots.items(): + if v.bot_token == agent.telegram_config.get("agent"): + raise Exception( + f"there is an existing bot for agent {agent.id} with token {v.bot_token}." + ) + + new_bot_item = BotPoolItem(agent) + new_agent_item = BotPoolAgentItem(agent) + + old_agent_item = agent_by_id(agent.id) + old_cached_bot_item = bot_by_token(old_agent_item.bot_token) + + if old_cached_bot_item and old_cached_bot_item.bot: + old_bot = old_cached_bot_item.bot + else: + old_bot = Bot( + token=old_cached_bot_item.token, + default=DefaultBotProperties(parse_mode=ParseMode.HTML), + ) + + await old_bot.session.close() + await old_bot.delete_webhook(drop_pending_updates=True) + old_bot_stopped = True + + await new_bot_item.bot.delete_webhook(drop_pending_updates=True) + await new_bot_item.bot.set_webhook( + self.base_url.format( + kind=new_bot_item.kind, bot_token=new_bot_item.token + ) + ) + + del _bots[old_cached_bot_item.token] + set_cache_bot(new_bot_item) + set_cache_agent(new_agent_item) + + logger.info( + f"bot for agent {agent.id} with token {old_agent_item.bot_token} changed to {new_bot_item.token}..." + ) + new_bot_success = True + except ValueError as e: + logger.warning( + f"bot for agent {agent.id} token did not changed because of invalid data. err: {e}" + ) + except Exception as e: + logger.error(f"failed to change bot token for agent {agent.id}: {str(e)}") + finally: + if old_bot_stopped and old_bot: + await old_bot.session.close() + if not new_bot_success and new_bot_item and new_bot_item.bot: + await new_bot_item.bot.session.close() + + async def stop_bot(self, agent_id, token): + bot = None + try: + if token is None: + logger.warning( + f"bot for agent {agent_id} token did not stopped because of empty token" + ) + return + + cached_bot_item = bot_by_token(token) + if cached_bot_item and cached_bot_item.bot: + bot = cached_bot_item.bot + else: + bot = Bot( + token=cached_bot_item.token, + default=DefaultBotProperties(parse_mode=ParseMode.HTML), + ) + + await bot.session.close() + await bot.delete_webhook(drop_pending_updates=True) + + del _bots[token] + del _agent_bots[agent_id] + + logger.info(f"Bot with token {token} for agent {agent_id} stopped...") + except Exception as e: + logger.error(f"failed to stop the bot for agent {agent_id}: {e}") + finally: + if bot: + await bot.session.close() + + async def modify_config(self, agent: Agent): + old_agent_item = agent_by_id(agent.id) + + token = agent.telegram_config.get("token") + if old_agent_item.bot_token != clean_token_str( + agent.telegram_config.get("token") + ): + raise Exception( + f"illegal modification of agent configurations, the bot token for agent {agent.id} does not match existing token of the cache." + ) + + if not agent.telegram_entrypoint_enabled: + await self.stop_bot(agent.id, token) + return + + try: + old_bot_item = bot_by_token(old_agent_item.bot_token) + old_bot_item.update_conf(agent.telegram_config) + old_agent_item.updated_at = agent.updated_at + + # if old_bot_item.kind != agent.telegram_config.get("kind"): + # await self.stop_bot(agent.id, token) + # await self.init_new_bot(agent) + logger.info( + f"configurations of the bot with token {token} for agent {agent.id} updated..." + ) + + except ValueError as e: + logger.warning( + f"bot for agent {agent.id} config did not changed because of invalid data. err: {e}" + ) + except Exception as e: + logger.error( + f"failed to change the configs of the bot for agent {agent.id}: {str(e)}" + ) + + async def start(self, asyncio_loop, host, port): + runner = web.AppRunner(self.app) + await runner.setup() + site = web.TCPSite(runner, host, port) + await site.start() diff --git a/app/services/tg/bot/types/__init__.py b/app/services/tg/bot/types/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/app/services/tg/bot/types/agent.py b/app/services/tg/bot/types/agent.py new file mode 100644 index 00000000..643f714c --- /dev/null +++ b/app/services/tg/bot/types/agent.py @@ -0,0 +1,28 @@ +from app.services.tg.utils.cleanup import clean_token_str +from intentkit.models.agent import Agent + + +class BotPoolAgentItem: + def __init__(self, agent: Agent): + self._bot_token = clean_token_str(agent.telegram_config.get("token")) + if self._bot_token is None: + raise ValueError("token can not be empty for agent item") + + self._id = agent.id + self._updated_at = agent.updated_at + + @property + def id(self): + return self._id + + @property + def bot_token(self): + return self._bot_token + + @property + def updated_at(self): + return self._updated_at + + @updated_at.setter + def updated_at(self, val): + self._updated_at = val diff --git a/app/services/tg/bot/types/bot.py b/app/services/tg/bot/types/bot.py new file mode 100644 index 00000000..3a4ad312 --- /dev/null +++ b/app/services/tg/bot/types/bot.py @@ -0,0 +1,86 @@ +from typing import NotRequired, TypedDict + +from aiogram import Bot +from aiogram.client.bot import DefaultBotProperties +from aiogram.enums import ParseMode + +from app.services.tg.utils.cleanup import clean_token_str +from intentkit.models.agent import Agent + + +class TelegramConfig(TypedDict): + token: str + kind: NotRequired[int] = 1 + group_memory_public: NotRequired[bool] + whitelist_chat_ids: NotRequired[list[int]] + greeting_group: NotRequired[str] + greeting_user: NotRequired[str] + + +class BotPoolItem: + def __init__(self, agent: Agent): + self._agent_id = agent.id + self._agent_owner = agent.owner + + self._token = clean_token_str(agent.telegram_config.get("token")) + if self._token is None: + raise ValueError("bot token can not be empty") + + self._kind = 1 + + self.update_conf(agent.telegram_config) + + self._bot = Bot( + token=self._token, + default=DefaultBotProperties(parse_mode=ParseMode.HTML), + ) + + def update_conf(self, cfg: TelegramConfig): + self._is_public_memory = cfg.get("group_memory_public", True) + self._whitelist_chat_ids = cfg.get("whitelist_chat_ids") + self._greeting_group = cfg.get( + "greeting_group", + "Glory to the Nation!\nFind me on https://nation.fun", + ) + self._greeting_user = cfg.get( + "greeting_user", + "Glory to the Nation!\nFind me on https://nation.fun", + ) + + @property + def agent_id(self): + return self._agent_id + + @property + def agent_owner(self): + return self._agent_owner + + @property + def token(self): + return self._token + + @property + def kind(self): + return self._kind + + @property + def bot(self): + return self._bot + + # optional props + + @property + def is_public_memory(self): + return self._is_public_memory + + @property + def whitelist_chat_ids(self): + return self._whitelist_chat_ids + + @property + def greeting_group(self): + return self._greeting_group + + @property + def greeting_user(self): + return self._greeting_user diff --git a/app/services/tg/bot/types/kind.py b/app/services/tg/bot/types/kind.py new file mode 100644 index 00000000..49367aa5 --- /dev/null +++ b/app/services/tg/bot/types/kind.py @@ -0,0 +1,13 @@ +from enum import Enum + + +class Kind(Enum): + AiRelayer = 1 + + +def is_valid_kind(kind: int): + try: + Kind(kind) + return True + except ValueError: + return False diff --git a/app/services/tg/bot/types/router_obj.py b/app/services/tg/bot/types/router_obj.py new file mode 100644 index 00000000..fc46dd33 --- /dev/null +++ b/app/services/tg/bot/types/router_obj.py @@ -0,0 +1,12 @@ +class RouterObj: + def __init__(self, router): + self.router = router + + def get_router(self): + return self.router + + def set_dispatcher(self, dp): + self.dispatcher = dp + + def get_dispatcher(self): + return self.dispatcher diff --git a/app/services/tg/utils/__init__.py b/app/services/tg/utils/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/app/services/tg/utils/cleanup.py b/app/services/tg/utils/cleanup.py new file mode 100644 index 00000000..138532e7 --- /dev/null +++ b/app/services/tg/utils/cleanup.py @@ -0,0 +1,11 @@ +import re + + +def remove_bot_name(bot_uname, message_text) -> str: + clean_text = message_text.replace(f"@{bot_uname} ", "") + clean_text = clean_text.replace(f"@{bot_uname}", "") + return clean_text + + +def clean_token_str(token) -> str: + return re.sub(r"\s+", "", token) diff --git a/app/services/twitter/__init__.py b/app/services/twitter/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/app/services/twitter/oauth2.py b/app/services/twitter/oauth2.py new file mode 100644 index 00000000..1e14889c --- /dev/null +++ b/app/services/twitter/oauth2.py @@ -0,0 +1,67 @@ +"""Twitter OAuth2 authentication module.""" + +from fastapi import APIRouter, Depends +from pydantic import BaseModel + +from app.auth import verify_admin_jwt +from intentkit.clients.twitter import OAuth2UserHandler +from intentkit.config.config import config + +# Initialize Twitter OAuth2 client +oauth2_user_handler = OAuth2UserHandler( + client_id=config.twitter_oauth2_client_id, + client_secret=config.twitter_oauth2_client_secret, + # backend uri point to twitter_oauth_callback + redirect_uri=config.twitter_oauth2_redirect_uri, + scope=[ + "tweet.read", + "tweet.write", + "users.read", + "offline.access", + "follows.read", + "follows.write", + "like.read", + "like.write", + "media.write", + ], +) + + +class TwitterAuthResponse(BaseModel): + agent_id: str + url: str + + +router = APIRouter(tags=["Auth"]) + + +@router.get( + "/auth/twitter", + response_model=TwitterAuthResponse, + dependencies=[Depends(verify_admin_jwt)], +) +async def get_twitter_auth_url(agent_id: str, redirect_uri: str) -> TwitterAuthResponse: + """Get Twitter OAuth2 authorization URL. + + **Query Parameters:** + * `agent_id` - ID of the agent to authenticate + * `redirect_uri` - DApp URI to redirect to after authorization from agentkit to DApp + + **Returns:** + * Object containing agent_id and authorization URL + """ + url = oauth2_user_handler.get_authorization_url(agent_id, redirect_uri) + return TwitterAuthResponse(agent_id=agent_id, url=url) + + +def get_authorization_url(agent_id: str, redirect_uri: str) -> str: + """Get Twitter OAuth2 authorization URL. + + **Query Parameters:** + * `agent_id` - ID of the agent to authenticate + * `redirect_uri` - DApp URI to redirect to after authorization from agentkit to DApp + + **Returns:** + * Authorization URL with agent_id as state parameter + """ + return oauth2_user_handler.get_authorization_url(agent_id, redirect_uri) diff --git a/app/services/twitter/oauth2_callback.py b/app/services/twitter/oauth2_callback.py new file mode 100644 index 00000000..ffe41849 --- /dev/null +++ b/app/services/twitter/oauth2_callback.py @@ -0,0 +1,140 @@ +"""Twitter OAuth2 callback handler.""" + +from datetime import datetime, timezone +from typing import Optional +from urllib.parse import parse_qs, urlencode, urlparse + +import tweepy +from fastapi import APIRouter, HTTPException +from starlette.responses import JSONResponse, RedirectResponse + +from app.services.twitter.oauth2 import oauth2_user_handler +from intentkit.config.config import config +from intentkit.models.agent import Agent +from intentkit.models.agent_data import AgentData + +router = APIRouter(prefix="/callback/auth", tags=["Callback"]) + + +def is_valid_url(url: str) -> bool: + """Check if a URL is valid. + + Args: + url: URL to validate + + Returns: + bool: True if URL is valid, False otherwise + """ + try: + result = urlparse(url) + return all([result.scheme, result.netloc]) + except (ValueError, AttributeError, TypeError): + return False + + +@router.get("/twitter") +async def twitter_oauth_callback( + state: str, + code: Optional[str] = None, + error: Optional[str] = None, +): + """Handle Twitter OAuth2 callback. + + This endpoint is called by Twitter after the user authorizes the application. + It exchanges the authorization code for access and refresh tokens, then stores + them in the database. + + **Query Parameters:** + * `state` - URL-encoded state containing agent_id and redirect_uri + * `code` - Authorization code from Twitter + * `error` - Error message from Twitter (optional) + + **Returns:** + * JSONResponse or RedirectResponse depending on redirect_uri + """ + if not state: + raise HTTPException(status_code=400, detail="Missing state parameter") + + try: + # Parse state parameter + state_params = parse_qs(state) + agent_id = state_params.get("agent_id", [""])[0] + redirect_uri = state_params.get("redirect_uri", [""])[0] + + if error: + raise HTTPException(status_code=400, detail=error) + + if not code: + raise HTTPException(status_code=400, detail="Missing code parameter") + + if not agent_id: + raise HTTPException( + status_code=400, detail="Missing agent_id in state parameter" + ) + + agent = await Agent.get(agent_id) + if not agent: + raise HTTPException(status_code=404, detail=f"Agent {agent_id} not found") + + agent_data = await AgentData.get(agent_id) + + # Exchange code for tokens + authorization_response = ( + f"{config.twitter_oauth2_redirect_uri}?state={state}&code={code}" + ) + token = oauth2_user_handler.get_token(authorization_response) + + # Store tokens in database + agent_data.twitter_access_token = token["access_token"] + agent_data.twitter_refresh_token = token["refresh_token"] + agent_data.twitter_access_token_expires_at = datetime.fromtimestamp( + token["expires_at"], tz=timezone.utc + ) + + # Get user info + client = tweepy.Client(bearer_token=token["access_token"], return_type=dict) + me = client.get_me( + user_auth=False, + user_fields="id,username,name,verified", + ) + + username = None + if me and "data" in me: + agent_data.twitter_id = me.get("data").get("id") + username = me.get("data").get("username") + agent_data.twitter_username = username + agent_data.twitter_name = me.get("data").get("name") + agent_data.twitter_is_verified = me.get("data").get("verified") + + # Commit changes + await agent_data.save() + + # Handle response based on redirect_uri + if redirect_uri and is_valid_url(redirect_uri): + params = {"twitter_auth": "success", "username": username} + redirect_url = f"{redirect_uri}{'&' if '?' in redirect_uri else '?'}{urlencode(params)}" + return RedirectResponse(url=redirect_url) + else: + return JSONResponse( + content={ + "message": "Authentication successful, you can close this window", + "username": username, + }, + status_code=200, + ) + except HTTPException as http_exc: + # Handle error response + if redirect_uri and is_valid_url(redirect_uri): + params = {"twitter_auth": "failed", "error": str(http_exc.detail)} + redirect_url = f"{redirect_uri}{'&' if '?' in redirect_uri else '?'}{urlencode(params)}" + return RedirectResponse(url=redirect_url) + # Re-raise HTTP exceptions to preserve their status codes + raise http_exc + except Exception as e: + # Handle error response for unexpected errors + if redirect_uri and is_valid_url(redirect_uri): + params = {"twitter_auth": "failed", "error": str(e)} + redirect_url = f"{redirect_uri}{'&' if '?' in redirect_uri else '?'}{urlencode(params)}" + return RedirectResponse(url=redirect_url) + # For unexpected errors, use 500 status code + raise HTTPException(status_code=500, detail=str(e)) diff --git a/app/services/twitter/oauth2_refresh.py b/app/services/twitter/oauth2_refresh.py new file mode 100644 index 00000000..f8bfe579 --- /dev/null +++ b/app/services/twitter/oauth2_refresh.py @@ -0,0 +1,83 @@ +"""Twitter OAuth2 token refresh functionality.""" + +import logging +from datetime import datetime, timedelta, timezone + +from sqlalchemy import select + +from app.services.twitter.oauth2 import oauth2_user_handler +from intentkit.models.agent_data import AgentData, AgentDataTable +from intentkit.models.db import get_session + +logger = logging.getLogger(__name__) + + +async def get_expiring_tokens(minutes_threshold: int = 10) -> list[AgentDataTable]: + """Get all agents with tokens expiring within the specified threshold. + + Args: + minutes_threshold: Number of minutes before expiration to consider tokens as expiring + + Returns: + List of AgentData records with expiring tokens + """ + expiration_threshold = datetime.now(timezone.utc) + timedelta( + minutes=minutes_threshold + ) + broken = datetime.now(timezone.utc) - timedelta(days=1) + + async with get_session() as db: + result = await db.execute( + select(AgentDataTable).where( + AgentDataTable.twitter_access_token.is_not(None), + AgentDataTable.twitter_refresh_token.is_not(None), + AgentDataTable.twitter_access_token_expires_at <= expiration_threshold, + AgentDataTable.twitter_access_token_expires_at > broken, + ) + ) + return result.scalars().all() + + +async def refresh_token(agent_data_record: AgentDataTable): + """Refresh Twitter OAuth2 token for an agent. + + Args: + agent_data_record: Agent data record containing refresh token + """ + try: + # Get new token using refresh token + token = oauth2_user_handler.refresh(agent_data_record.twitter_refresh_token) + + token = {} if token is None else token + + agent_data = AgentData(id=agent_data_record.id) + + # Update token information + agent_data.twitter_access_token = token.get("access_token") + agent_data.twitter_refresh_token = token.get("refresh_token") + if "expires_at" in token: + agent_data.twitter_access_token_expires_at = datetime.fromtimestamp( + token["expires_at"], timezone.utc + ) + + await agent_data.save() + + logger.info( + f"Successfully refreshed Twitter token for agent {agent_data_record.id}, " + f"expires at {agent_data_record.twitter_access_token_expires_at}" + ) + except Exception as e: + logger.error( + f"Failed to refresh Twitter token for agent {agent_data_record.id}: {str(e)}" + ) + + +async def refresh_expiring_tokens(): + """Refresh all tokens that are about to expire. + + This function is designed to be called by the scheduler every minute. + It will check for tokens expiring in the next 5 minutes and refresh them. + """ + agents = await get_expiring_tokens() + for agent in agents: + await refresh_token(agent) diff --git a/app/singleton.py b/app/singleton.py new file mode 100644 index 00000000..a29554d5 --- /dev/null +++ b/app/singleton.py @@ -0,0 +1,98 @@ +"""API server module. + +This module initializes and configures the FastAPI application, +including routers, middleware, and startup/shutdown events. + +The API server provides endpoints for agent execution and management. +""" + +import logging +from contextlib import asynccontextmanager + +import sentry_sdk +from fastapi import FastAPI +from fastapi.exceptions import RequestValidationError +from fastapi.middleware.cors import CORSMiddleware +from starlette.exceptions import HTTPException as StarletteHTTPException + +from app.admin.api import admin_router, admin_router_readonly +from app.admin.health import health_router +from app.admin.metadata import metadata_router_readonly +from app.services.twitter.oauth2 import router as twitter_oauth2_router +from app.services.twitter.oauth2_callback import router as twitter_callback_router +from intentkit.config.config import config +from intentkit.models.db import init_db +from intentkit.models.redis import init_redis +from intentkit.utils.error import ( + IntentKitAPIError, + http_exception_handler, + intentkit_api_error_handler, + intentkit_other_error_handler, + request_validation_exception_handler, +) + +# init logger +logger = logging.getLogger(__name__) + +if config.sentry_dsn: + sentry_sdk.init( + dsn=config.sentry_dsn, + sample_rate=config.sentry_sample_rate, + # traces_sample_rate=config.sentry_traces_sample_rate, + # profiles_sample_rate=config.sentry_profiles_sample_rate, + environment=config.env, + release=config.release, + server_name="intent-singleton", + ) + + +@asynccontextmanager +async def lifespan(app: FastAPI): + """Manage application lifecycle. + + This context manager: + 1. Initializes database connection + 2. Performs any necessary startup tasks + 3. Handles graceful shutdown + + Args: + app: FastAPI application instance + """ + # Initialize database + await init_db(**config.db) + + # Initialize Redis if configured + if config.redis_host: + await init_redis( + host=config.redis_host, + port=config.redis_port, + db=config.redis_db, + ) + + logger.info("API server start") + yield + # Clean up will run after the API server shutdown + logger.info("Cleaning up and shutdown...") + + +app = FastAPI(lifespan=lifespan) + +app.exception_handler(IntentKitAPIError)(intentkit_api_error_handler) +app.exception_handler(RequestValidationError)(request_validation_exception_handler) +app.exception_handler(StarletteHTTPException)(http_exception_handler) +app.exception_handler(Exception)(intentkit_other_error_handler) + +# Add CORS middleware +app.add_middleware( + CORSMiddleware, + allow_origins=["*"], # Allows all origins + allow_methods=["*"], # Allows all methods + allow_headers=["*"], # Allows all headers +) + +app.include_router(admin_router) +app.include_router(admin_router_readonly) +app.include_router(metadata_router_readonly) +app.include_router(twitter_callback_router) +app.include_router(twitter_oauth2_router) +app.include_router(health_router) diff --git a/app/telegram.py b/app/telegram.py new file mode 100644 index 00000000..8146a9a1 --- /dev/null +++ b/app/telegram.py @@ -0,0 +1,23 @@ +import asyncio +import logging + +import sentry_sdk + +from app.entrypoints.tg import run_telegram_server +from intentkit.config.config import config + +logger = logging.getLogger(__name__) + +if config.sentry_dsn: + sentry_sdk.init( + dsn=config.sentry_dsn, + sample_rate=config.sentry_sample_rate, + # traces_sample_rate=config.sentry_traces_sample_rate, + # profiles_sample_rate=config.sentry_profiles_sample_rate, + environment=config.env, + release=config.release, + server_name="intent-telegram", + ) + +if __name__ == "__main__": + asyncio.run(run_telegram_server()) diff --git a/docker-compose.yml b/docker-compose.yml new file mode 100644 index 00000000..32d5e8b7 --- /dev/null +++ b/docker-compose.yml @@ -0,0 +1,82 @@ +version: "3.8" + +services: + db: + image: postgres:16.1 + environment: + POSTGRES_USER: ${POSTGRES_USER:-postgres} + POSTGRES_PASSWORD: ${POSTGRES_PASSWORD:-postgres} + POSTGRES_DB: ${POSTGRES_DB:-intentkit} + volumes: + - postgres_data:/var/lib/postgresql/data + healthcheck: + test: ["CMD-SHELL", "pg_isready -U ${POSTGRES_USER:-postgres}"] + interval: 5s + timeout: 5s + retries: 5 + + intent-api: + image: crestal/intentkit:latest + depends_on: + db: + condition: service_healthy + environment: + - ENV=${ENV:-local} + - RELEASE=${RELEASE:-local} + - DB_USERNAME=${POSTGRES_USER:-postgres} + - DB_PASSWORD=${POSTGRES_PASSWORD:-postgres} + - DB_HOST=db + - DB_PORT=5432 + - DB_NAME=${POSTGRES_DB:-intentkit} + - OPENAI_API_KEY=${OPENAI_API_KEY} + - DEEPSEEK_API_KEY=${DEEPSEEK_API_KEY} + - CDP_API_KEY_ID=${CDP_API_KEY_ID} + - CDP_API_KEY_SECRET=${CDP_API_KEY_SECRET} + - CDP_WALLET_SECRET=${CDP_WALLET_SECRET} + ports: + - "8000:8000" + command: uvicorn app.api:app --host 0.0.0.0 --port 8000 + + intent-autonomous: + image: crestal/intentkit:latest + depends_on: + db: + condition: service_healthy + environment: + - ENV=${ENV:-local} + - RELEASE=${RELEASE:-local} + - DB_USERNAME=${POSTGRES_USER:-postgres} + - DB_PASSWORD=${POSTGRES_PASSWORD:-postgres} + - DB_HOST=db + - DB_PORT=5432 + - DB_NAME=${POSTGRES_DB:-intentkit} + - OPENAI_API_KEY=${OPENAI_API_KEY} + - DEEPSEEK_API_KEY=${DEEPSEEK_API_KEY} + - CDP_API_KEY_ID=${CDP_API_KEY_ID} + - CDP_API_KEY_SECRET=${CDP_API_KEY_SECRET} + - CDP_WALLET_SECRET=${CDP_WALLET_SECRET} + - INTERNAL_BASE_URL=http://api:8000 + command: python -m app.autonomous + + intent-scheduler: + image: crestal/intentkit:latest + depends_on: + db: + condition: service_healthy + environment: + - ENV=${ENV:-local} + - RELEASE=${RELEASE:-local} + - DB_USERNAME=${POSTGRES_USER:-postgres} + - DB_PASSWORD=${POSTGRES_PASSWORD:-postgres} + - DB_HOST=db + - DB_PORT=5432 + - DB_NAME=${POSTGRES_DB:-intentkit} + - OPENAI_API_KEY=${OPENAI_API_KEY} + - DEEPSEEK_API_KEY=${DEEPSEEK_API_KEY} + - CDP_API_KEY_ID=${CDP_API_KEY_ID} + - CDP_API_KEY_SECRET=${CDP_API_KEY_SECRET} + - CDP_WALLET_SECRET=${CDP_WALLET_SECRET} + command: python -m app.scheduler + +volumes: + postgres_data: diff --git a/docs/README.md b/docs/README.md new file mode 100644 index 00000000..7a692220 --- /dev/null +++ b/docs/README.md @@ -0,0 +1,36 @@ +# Documentation + +## General + +- [IntentKit Architecture](architecture.md) +- [API Docs](http://localhost:8000/redoc) (Only available after you start the API server in localhost) +- [How to Guides](how_to/) + +## Developing + +- [Configuration](configuration.md) +- [LLM](llm.md) +- [Create an Agent](agent.md) + +## User Interface + +- [IntentKit Sandbox UI](https://github.com/crestalnetwork/intentkit-sandbox-ui) - Next.js application for testing and interacting with agents +- Hosted IntentKit Sandbox at [https://intentkit.crestal.dev/](https://intentkit.crestal.dev/) - Use this to test your agents without any local setup + +## Agent API + +The Agent API provides programmatic access to IntentKit agents through RESTful endpoints. It allows you to create chat threads, send messages, retrieve conversation history, and integrate agents into your applications. + +**Get Started:** [Agent API Documentation](agent_api.md) + +**OpenAI Compatibility:** [OpenAI Compatible API](openai_compatible.md) + +## Contributing + +- [Building Skills](contributing/skills.md) +- [Wishlist](contributing/wishlist.md) + +## Skills + +- [X](skills/x.md) +- [Coinbase Developer Platform](skills/cdp.md) diff --git a/docs/agent.md b/docs/agent.md new file mode 100644 index 00000000..5dbd8476 --- /dev/null +++ b/docs/agent.md @@ -0,0 +1,74 @@ +# Agent Management + +## Use Shell Scripts + +We have provided some [helper shells](../scripts/) for your convenience. + +When you use these scripts, make sure you have started the [api server in localhost](../DEVELOPMENT.md). + +For example, we want to create an agent, id is `example`, first cd into the directory: + +```bash +cd scripts +``` + +**Create Agent:** +```bash +sh create.sh example +``` + +Now you have a blank agent, let's add features to it. + +**Export Agent:** +```bash +sh export.sh example +``` + +Edit the agent config file: `example.yaml`. Then import it. + +**Import Agent:** +```bash +sh import.sh example +``` + +**Test Your Agent:** + +You can test your agent using API calls or the IntentKit Sandbox UI. The UI provides a visual chat interface for interacting with your agents. + +You can use the hosted version at [https://intentkit.crestal.dev/](https://intentkit.crestal.dev/) without needing any local setup, or you can set up your own instance by following the instructions in the [IntentKit Sandbox UI repository](https://github.com/crestalnetwork/intentkit-sandbox-ui). + +## Advanced Agent API + +You can visit the [API Docs](http://localhost:8000/redoc#tag/Agent) to learn more. + +## The Prompt +In agent config, there are 5 fields about prompt, they are `purpose`, `personality`, `principles`, `prompt`, `prompt_append`. +IntentKit will compose `purpose`, `personality`, `principles`, `prompt` to `system_prompt`. + +### LLM Interaction +The models cannot remember anything, so every time we interact with it, we have to provide all the +background information and interaction context (that is, additional knowledge and memory). +What we send to the large model looks something like this: +- System: `system_prompt` +- User: conversation history +- Assistant: conversation history +- ... +- User: conversation history +- Assistant: conversation history +- User: currently being said +- System: `prompt_append` (Optional) + +The content of the system role is to inform the AI that it is being addressed by an administrator, +so it should not treat you like an user. However, your permissions are not necessarily always higher +than those of regular users; you simply have the advantage of being the first to set various rules +for the AI to follow according to your logic. +For example, you can tell it that the system role has the highest authority, and if the user role +requests an action that violates the rules set by the system role, you should deny it. + +### Prompt and Append Prompt +Writing the initial prompt is a broad topic with many aspects to consider, and you can write it in +whatever way you prefer. Here, I will only address the prompt_append. +We've found that if you emphasize the most important rules again at the end, it significantly increases +the likelihood of the AI following your rules. You can declare or repeat your core rules in this section. +One last tip: the AI will perceive this information as having been inserted just before it responds to the user, +so avoid saying anything like "later" in this instruction, as that "later" will never happen for the AI. diff --git a/docs/agent.webp b/docs/agent.webp new file mode 100644 index 00000000..8aded952 Binary files /dev/null and b/docs/agent.webp differ diff --git a/docs/agent_api.md b/docs/agent_api.md new file mode 100644 index 00000000..f7076a69 --- /dev/null +++ b/docs/agent_api.md @@ -0,0 +1,137 @@ +# Agent API Intro + +This guide teaches LLMs how to interact with IntentKit agent APIs when building applications. The agent API provides endpoints to create chat threads, send messages, and retrieve conversation history. + +## Base URL and Authentication + +All API endpoints are prefixed with `/v1/` and require authentication using a Bearer token. + +**Base URL:** `http://localhost:8000/v1/` when local development. For production, you will get it when you get the key. + +**Authentication:** Include the agent token in the Authorization header: +``` +Authorization: Bearer +``` +## How to Get the API Key + +IntentKit provides system skills for managing agent API keys. Active the skills in agent, and let it give you the API Keys. You can get two types of API keys with different access levels: + +- **Private API Key (sk-)**: Can access all skills (public and owner-only) +- **Public API Key (pk-)**: Can only access public skills + +## Quick Start Example + +Here's a complete example showing how to create a thread, send a message, and list messages: + +### 1. Create a Chat Thread + +```bash +curl -X POST "http://localhost:8000/v1/chats" \ + -H "Authorization: Bearer " \ + -H "Content-Type: application/json" +``` + +**Response:** +```json +{ + "id": "chat_123456", + "agent_id": "agent_789", + "user_id": "agent_789_user123", + "summary": "", + "rounds": 0, + "created_at": "2024-01-01T12:00:00Z", + "updated_at": "2024-01-01T12:00:00Z" +} +``` + +### 2. Send a Message + +```bash +curl -X POST "http://localhost:8000/v1/chats/chat_123456/messages" \ + -H "Authorization: Bearer " \ + -H "Content-Type: application/json" \ + -d '{ + "message": "Hello, how can you help me today?" + }' +``` + +**Response:** +```json +[ + { + "id": "msg_user_001", + "agent_id": "agent_789", + "chat_id": "chat_123456", + "user_id": "agent_789_user123", + "author_id": "agent_789_user123", + "author_type": "API", + "message": "Hello, how can you help me today?", + "created_at": "2024-01-01T12:00:01Z" + } +] +``` + +## Advanced Features + +### About User ID +All user_id parameter in the Agent API is optional. If not provided, the API will use owner role. +When you include the user_id in the request, the API will use the public role, and the user can have +it's own chat threads. You can use the user_id of your own system to identify the user. +If the app is anonymous, you can use javascript library `fingerprintjs` to generate the user_id. + +### Streaming Responses +Set `stream: true` in the message request to receive streaming responses, +your client need to support HTTP2 server push. + +### Attachments +Include images, or files in messages: + +```json +{ + "message": "Convert the image to Cyberpunk style", + "attachments": [ + { + "type": "image", + "url": "https://your.image.url" + } + ] +} +``` + +### Search and Super Mode +Search mode will active the model native search, only supported by the model with search ability, like gpt and grok. +Super mode will give the agent more step limit, to handle complex task. + +## Error Handling + +The API returns standard HTTP status codes: +- `200`: Success +- `201`: Created +- `204`: No Content +- `400`: Bad Request +- `401`: Unauthorized +- `404`: Not Found +- `500`: Internal Server Error + +Example error response: +```json +{ + "detail": "Chat not found" +} +``` + +## OpenAI Compatible API + +IntentKit also provides an OpenAI-compatible API endpoint that allows you to use your agent with any OpenAI-compatible client or SDK. This is particularly useful for integrating with existing applications that already use the OpenAI API format. + +For detailed, see the [OpenAI Compatible API Documentation](openai_compatible.md). + +## API Reference + +For complete API documentation with interactive examples, visit: +**http://localhost:8000/v1/redoc** + +If you are using LLM to generate code, providing it with this raw OpenAPI specification link is sufficient: +``` +http://localhost:8000/v1/openapi.json +``` \ No newline at end of file diff --git a/docs/agentkit_upgrade_guide.md b/docs/agentkit_upgrade_guide.md new file mode 100644 index 00000000..acee6f9e --- /dev/null +++ b/docs/agentkit_upgrade_guide.md @@ -0,0 +1,193 @@ +# AgentKit 0.4.0 → 0.6.0 Upgrade Guide + +This guide covers the complete migration from **coinbase-agentkit 0.4.0** to **0.6.0**, including breaking changes, wallet compatibility fixes, and required updates. + +## Quick Summary + +- **Dependencies**: Updated AgentKit core and langchain packages +- **API Changes**: Wallet providers renamed, function signatures changed +- **Environment Variables**: New canonical names for CDP credentials +- **Wallet Compatibility**: Fixed invalid wallet addresses from format changes +- **Schema Updates**: Removed deprecated functions + +## 1. Dependencies Updated + +```toml +# Before (0.4.0) +coinbase-agentkit = "0.4.0" +coinbase-agentkit-langchain = "0.3.0" +langgraph = ">=0.4.3" + +# After (0.6.0) +coinbase-agentkit = "0.6.0" +coinbase-agentkit-langchain = "0.5.0" +langgraph = ">=0.3.0" +pydantic = ">=2.10.0,<2.11.0" +``` + +## 2. Environment Variables + +AgentKit 0.6.0 uses new environment variable names: + +```bash +# New variables (0.6.0) +export CDP_API_KEY_ID="your_key_id" +export CDP_API_KEY_SECRET="your_private_key" +export CDP_WALLET_SECRET="your_wallet_secret" +``` + +## 3. Breaking API Changes + +### Wallet Provider Classes +```python +# Before (0.4.0) +from coinbase_agentkit import CdpWalletProvider, CdpWalletProviderConfig + +# After (0.6.0) +from coinbase_agentkit import CdpEvmServerWalletProvider, CdpEvmServerWalletProviderConfig +``` + +### Function Calls +```python +# Before (0.4.0) +cdp_api_action_provider(cdp_provider_config) + +# After (0.6.0) +cdp_api_action_provider() # No arguments +``` + +### Removed Functions +The following functions were completely removed in 0.6.0: +- `CdpWalletActionProvider_deploy_contract` +- `CdpWalletActionProvider_deploy_nft` +- `CdpWalletActionProvider_deploy_token` +- `CdpWalletActionProvider_trade` + +## 4. Wallet Structure Changes & Compatibility Issues + +### The Problem + +AgentKit 0.6.0 changed how wallet data is stored and validated. Agents created with 0.4.0 had wallet addresses stored in a format that became **invalid** in 0.6.0, causing this error: + +``` +ApiError(http_code=404, error_type=not_found, error_message=EVM account with given address not found.) +``` + +### Wallet Data Structure Differences + +**AgentKit 0.4.0 Format:** +```json +{ + "default_address_id": "0x1234...", + "wallet_secret": "encrypted_data", + "account_data": { ... } +} +``` + +**AgentKit 0.6.0 Format:** +```json +{ + "default_address_id": "0x5678...", + "wallet_secret": "new_encrypted_format", + "provider_specific_data": { ... } +} +``` + +The address validation and wallet initialization process changed, making old wallet addresses incompatible. + +### Two Wallet Management Approaches Found + +1. **Older agents**: `cdp_wallet_address: null` → Create wallets **on-demand** → ✅ **Work with 0.6.0** +2. **Newer agents**: Pre-stored `cdp_wallet_address` → Use **stored addresses** → ❌ **Fail with 0.6.0** + +## 5. Wallet Fix Script + +### What the Script Does + +We created `scripts/fix_invalid_wallets.py` to resolve wallet compatibility issues: + +1. **Scans all agents** with stored wallet addresses +2. **Tests each address** by attempting to initialize it with the new AgentKit 0.6.0 API +3. **Identifies invalid addresses** that cause "not found" errors +4. **Clears invalid wallet data** so agents can create fresh, compatible wallets + +### How It Works + +```python +# Test if wallet address exists in CDP 0.6.0 +wallet_config = CdpEvmServerWalletProviderConfig( + api_key_id=config.cdp_api_key_name, + api_key_secret=config.cdp_api_key_private_key, + network_id="base-mainnet", + address=wallet_address, + wallet_secret=None +) + +# This will throw an error if address is invalid +wallet_provider = CdpEvmServerWalletProvider(wallet_config) +``` + +### Usage + +```bash +# Check what would be fixed (dry run) +python scripts/fix_invalid_wallets.py --dry-run + +# Fix all invalid wallets +python scripts/fix_invalid_wallets.py + +# Fix specific agent +python scripts/fix_invalid_wallets.py --agent-id agent_id_here +``` + +### Results + +In our case, **all 5 agents** with wallet addresses had invalid addresses and were successfully fixed: + +``` +Found invalid wallet: d1c0kqo9i6t8calft7l0 -> 0x0B272145aA2c52587263a09a03eAcc78568082Bd +Found invalid wallet: comp1 -> 0x2cce7994C30BB178AC4D98149067245b8104fA72 +[... 3 more agents ...] + +Summary: 5 invalid addresses found, 5 fixed +``` + +## 6. Files Modified + +| File | Changes | +|------|---------| +| `pyproject.toml` | Updated dependency versions | +| `example.env` | Added new CDP environment variables | +| `intentkit/clients/cdp.py` | Replaced wallet provider classes, added env var fallbacks | +| `intentkit/skills/cdp/__init__.py` | Updated imports, removed deprecated references | +| `intentkit/skills/cdp/schema.json` | Removed deprecated function definitions | +| `scripts/fix_invalid_wallets.py` | **New script** to fix wallet compatibility | + +## 7. Migration Steps + +1. **Update dependencies:** `uv sync` +2. **Update environment variables** (see section 2) +3. **Fix wallet compatibility:** `python scripts/fix_invalid_wallets.py` +4. **Test the server:** `uvicorn app.api:app --reload` +5. **Verify wallet functions work** + +## 8. Post-Upgrade Behavior + +### Before Fix +- ❌ Agents with stored wallet addresses throw "EVM account not found" errors +- ❌ Wallet functions completely broken +- ❌ Server startup issues with CDP initialization + +### After Fix +- ✅ All agents work without wallet-related errors +- ✅ Agents create fresh, compatible wallets on-demand +- ✅ Existing funded wallets are re-discovered and maintained +- ✅ New agents work seamlessly with 0.6.0 + +## 9. Key Insights & Future Considerations + +1. **On-demand wallet creation** is more resilient than pre-stored addresses +2. **Wallet data format changes** between AgentKit versions can break compatibility +3. **The fix script approach** allows gradual migration without data loss +4. **Monitor for similar issues** in future AgentKit upgrades +5. **Document wallet changes** in release notes \ No newline at end of file diff --git a/docs/arch.jpg b/docs/arch.jpg new file mode 100644 index 00000000..327ee918 Binary files /dev/null and b/docs/arch.jpg differ diff --git a/docs/architecture.md b/docs/architecture.md new file mode 100644 index 00000000..b781e992 --- /dev/null +++ b/docs/architecture.md @@ -0,0 +1,45 @@ +# IntentKit Architecture + +## Overview + +IntentKit is built with a modular architecture that separates concerns into distinct components: + +![Architecture](arch.jpg) + +## Components + +### Entrypoint Layer +The entrypoint layer serves as the interface between the outside world and the Agent. It provides various integration points including Twitter and Telegram, along with autonomous execution capabilities. This layer includes adapters to handle input/output transformations, rate limiting, and modifications to ensure smooth communication between external services and the internal system. + +### LangGraph Layer +At the heart of IntentKit lies the LangGraph layer, which orchestrates the AI processing pipeline. It manages the language model interactions, prompt engineering, and tool execution flow. The layer maintains both thread-specific memory for ongoing conversations and a broader agent memory system, enabling contextual awareness and persistent knowledge across interactions. + +### Processing Layer +Skills and Memory Runtime + +### Storage Layer +The storage layer provides persistent data management across the system. It maintains agent configurations, securely stores credentials, preserves agent state information, and manages the memory store. This layer ensures that all persistent data is properly organized, secured, and readily accessible when needed. + +## The Flow + +![Flow](agent.webp) + +## Key Design Decisions + +1. **Agent Caching** + - Agents are cached in memory for performance + - Cache is invalidated on configuration changes + +2. **Tool Management** + - Tools are loaded dynamically based on agent configuration + - Each tool is isolated and independently maintainable + +3. **Error Handling** + - Graceful degradation on tool failures + - Comprehensive logging for debugging + - Quota management to prevent abuse + +4. **State Management** + - PostgreSQL for persistent storage + - In-memory caching for performance + - Transaction management for data consistency diff --git a/docs/configuration.md b/docs/configuration.md new file mode 100644 index 00000000..28dd2bb0 --- /dev/null +++ b/docs/configuration.md @@ -0,0 +1,12 @@ +# Configuration + +## IntentKit configuration + +The application can be configured using environment variables or AWS Secrets Manager. Key configuration options: + +- `ENV`: Environment (local or others) +- `DB_*`: PostgreSQL Database configuration (Required) +- `OPENAI_API_KEY`: OpenAI API key for agent interactions (Required) +- `CDP_*`: Coinbase Developer Platform configuration (Optional) + +See [example.env](../example.env) for all available options. diff --git a/docs/contributing/skills.md b/docs/contributing/skills.md new file mode 100644 index 00000000..c7829990 --- /dev/null +++ b/docs/contributing/skills.md @@ -0,0 +1,335 @@ +# Building Skills for IntentKit + +This guide will help you create new skills for IntentKit. Skills are the building blocks that give agents their capabilities. + +## Overview + +Skill can be enabled in the Agent configuration. The Agent is aware of all the skills it possesses and will spontaneously use them at appropriate times, utilizing the output of the skills for subsequent reasoning or decision-making. The Agent can call multiple skills in a single interaction based on the needs. + +A skill in IntentKit is a specialized tool that inherits from `IntentKitSkill` (which inherits from LangChain's `BaseTool`). Each skill provides specific functionality that agents can use to interact with external services or perform specific tasks. + +## How skill works + +Before writing our first skill, we need to understand how a skill works. + +The code of skills are all in the `skills/` directory. Each subdirectory is a skill category. + +The skill is configured in the field `skills` in the agent configuration. The key is the skill category, and the value is a predefined skill config. For example: +```yaml +id: my-test-agent +skills: + twitter: + states: + get_timeline: public + post_tweet: private + follow_user: disabled + common: + states: + current_time: public +``` + +## Adding a new skill category + +Most of the time, you will need to add a new skill category. If you only want to add a skill in an existing category, you can copy an existing skill and modify it. Let's see how to add a new skill category. + +After creating a new skill category folder in `skills/`, you need to add these 4 essential components: +- `base.py` - Defines the base class for the skill, adding shared functionality for all skills in this category +- `your_skill_name.py` - Defines the first skill implementation in the new category +- `__init__.py` - Defines how to instantiate and retrieve the skills in this category +- `schema.json` - Defines the config JSON schema for this skill category to help users understand the configuration options +- An icon for the skill category, png/svg/jpg/jpeg is supported. Tips: most of the time you can easily find the icon from their github organization or X account. + +Let's use `common/current_time` as an example. + +### Base class (base.py) + +The base class should inherit from `IntentKitSkill` and provide common functionality for all skills in this category: + +```python +from typing import Type + +from pydantic import BaseModel, Field + +from intentkit.abstracts.skill import SkillStoreABC +from intentkit.skills.base import IntentKitSkill + + +class CommonBaseTool(IntentKitSkill): + """Base class for common utility tools.""" + + name: str = Field(description="The name of the tool") + description: str = Field(description="A description of what the tool does") + args_schema: Type[BaseModel] + skill_store: SkillStoreABC = Field( + description="The skill store for persisting data" + ) + + @property + def category(self) -> str: + return "common" +``` + +Key points: +- The base class should inherit from `IntentKitSkill` +- Define common attributes all skills in this category will use +- Implement the `category` property to identify the skill category +- Include the `skill_store` for persistence if your skills need to store data + +### Skill class (current_time.py) + +Each skill implementation should inherit from your category base class: + +```python +class CurrentTimeInput(BaseModel): + """Input for CurrentTime tool.""" + + timezone: str = Field( + description="Timezone to format the time in (e.g., 'UTC', 'US/Pacific', 'Europe/London', 'Asia/Tokyo'). Default is UTC.", + default="UTC", + ) + + +class CurrentTime(CommonBaseTool): + """The doc string will not pass to LLM, it is written for human""" + + name: str = "current_time" + description: str = ( + "Get the current time, converted to a specified timezone.\n" + "You must call this tool whenever the user asks for the time." + ) + args_schema: Type[BaseModel] = CurrentTimeInput + + async def _arun(self, timezone: str = "UTC", **kwargs) -> str: + # Implementation of the tool + # ... +``` + +Key points: +- Create a Pydantic model for the input parameters +- Inherit from your category base class +- Define required attributes: `name`, `description`, and `args_schema` +- Implement the logic in `_arun` (asynchronous) method + +You should know, the `name`, `description`, and the description of the `args_schema` will be passed to the LLM. They are important reference information, letting LLM know when to call this skill, so please make sure they are clear and concise. + +### Skill getter (__init__.py) + +The `__init__.py` file exports your skills and defines how they are configured: + +```python +from typing import TypedDict + +from intentkit.abstracts.skill import SkillStoreABC +from intentkit.skills.base import SkillConfig, SkillState +from intentkit.skills.common.base import CommonBaseTool +from intentkit.skills.common.current_time import CurrentTime + +# Cache skills at the system level, because they are stateless +_cache: dict[str, CommonBaseTool] = {} + + +class SkillStates(TypedDict): + current_time: SkillState + + +class Config(SkillConfig): + """Configuration for common utility skills.""" + + states: SkillStates + + +async def get_skills( + config: "Config", + is_private: bool, + store: SkillStoreABC, + **_, +) -> list[CommonBaseTool]: + """Get all common utility skills.""" + available_skills = [] + + # Include skills based on their state + for skill_name, state in config["states"].items(): + if state == "disabled": + continue + elif state == "public" or (state == "private" and is_private): + available_skills.append(skill_name) + + # Get each skill using the cached getter + return [get_common_skill(name, store) for name in available_skills] + + +def get_common_skill( + name: str, + store: SkillStoreABC, +) -> CommonBaseTool: + """Get a common utility skill by name.""" + if name == "current_time": + if name not in _cache: + _cache[name] = CurrentTime( + skill_store=store, + ) + return _cache[name] + else: + raise ValueError(f"Unknown common skill: {name}") +``` + +Key points: +- Define a `TypedDict` for the skill states +- Create a `Config` class that extends `SkillConfig` +- Implement the `get_skills` function to return all enabled skills based on configuration +- The last param `**_` of `get_skills` is required. It is a placeholder for future use. +- Implement a helper function to instantiate individual skills +- Consider caching skill instances if they are stateless + +### Config Schema (schema.json) + +The schema.json file defines the JSON schema for configuring skills in this category: + +```json +{ + "$schema": "http://json-schema.org/draft-07/schema#", + "type": "object", + "title": "Common Utility Skills", + "description": "Configuration schema for common utility skills", + "properties": { + "states": { + "type": "object", + "properties": { + "current_time": { + "type": "string", + "title": "Current Time", + "enum": [ + "disabled", + "public", + "private" + ], + "description": "State of the current_time skill" + } + }, + "description": "States for each common utility skill (disabled, public, or private)" + } + }, + "required": ["states"], + "additionalProperties": true +} +``` + +Key points: +- Follow the JSON Schema standard (draft-07) +- Define the structure of the skill config, it will be used or check by the agent creation/update/import/export +- List all skills in the `states` section + +## Testing your skills before creating PR + +Make sure you have a local agent running, and you can test your skills in the agent. + +Read [Development Guide](../../DEVELOPMENT.md) to get started with your setup. + +You can test your skills using either API calls or the IntentKit Sandbox UI. The UI provides a visual interface for interacting with agents and testing skills. + +For quick testing without local setup, use the hosted IntentKit Sandbox at [https://intentkit.crestal.dev/](https://intentkit.crestal.dev/). + +Alternatively, you can set up your own instance by following the instructions in the [IntentKit Sandbox UI repository](https://github.com/crestalnetwork/intentkit-sandbox-ui). + +## More details + +### About the return value + +You may notice that we defined the input of the skill but not the output. What can I output? + +The answer is everything. You can output a natural language string to LLM, or you can output an object, which will be converted to json and sent to LLM. You can even output a markdown, but you need to convert it to a string. + +Images, videos and files are not supported yet. We will update the way to output images soon. + +### How to handle errors + +When the skill fails, you can return a string that will be passed to LLM. You can also raise an exception, which will be caught by the framework and converted to a string. + +Only if you are not satisfied with the contents of the exception, you can catch it and add more context, then re-throw it. + +### How to get the agent id in skill + +We recommend that you write your skill as stateless, which helps save memory. When you need to get the runtime context, you can get it from the parameters of the _run function. + +```python +from langchain_core.runnables import RunnableConfig + +class YourSkillInput(BaseModel): + foo: str = Field(description="A string parameter") + bar: int = Field(description="An integer parameter") + +class YourSkill(TwitterBaseTool): + async def _arun(self, config: RunnableConfig, **kwargs) -> str: + context = self.context_from_config(config) + print(context) + return f"I'm running in agent {context.agent_id}" +``` + +Here is the context definition: + +```python +class SkillContext(BaseModel): + agent: Agent + config: SkillConfig + user_id: str + entrypoint: Literal["web", "twitter", "telegram", "trigger"] +``` + +If you have optional parameters in _arun, you can put them after `config: RunnableConfig`. Because the agent always use parameter name to pass the parameters. + +### How to add custom skill config + +Some times you may need to add custom config to the skill. Like an api key, or behavior choices for agents. + +In `__init__.py` + +```python +class Config(SkillConfig): + """Configuration for your skills.""" + + states: SkillStates + api_key: str +``` + +Then it can be defined in the agent config. +```yaml +id: my-test-agent +skills: + your_new_skill_category: + states: + your_skill: public + api_key: your_api_key +``` + +You can get it from context when you need it. + +### How to use more packages in skill + +Please find in the [pyproject.toml](https://github.com/crestalnetwork/intentkit/blob/main/pyproject.toml) for the available packages. + +Like for http client, we suggest you use the async client of `httpx`. + +If you need to use other packages, please add them to the pyproject.toml use `uv add`. + +### How to store data in skill + +You can use the [skill_store](https://github.com/crestalnetwork/intentkit/blob/main/abstracts/skill.py) to store data in the skill. It is a key-value store that can be used to store data that is specific to the skill. + +You can store and retrieve a dict at these levels: +- agent +- thread +- agent + user + +### How to write on-chain skills + +You can use the [CdpClient](https://github.com/crestalnetwork/intentkit/blob/main/clients/cdp.py) to write on-chain skills. + +Get the agent id from context, then use agent id and self.store to initialize the CdpClient. + +### How to add api key to system level + +You may want to add an api key of specific service to the system level. +Then every agent can share this api key, no longer need to add it in the config. + +When you contribute a new skill category, please add it in skill config first. +If we find it is a common service, the IntentKit team will add it to the system level. diff --git a/docs/contributing/wishlist.md b/docs/contributing/wishlist.md new file mode 100644 index 00000000..779f3b9b --- /dev/null +++ b/docs/contributing/wishlist.md @@ -0,0 +1,49 @@ +# Wishlist for IntentKit + +## Where to Start + +The overall wishlist of contribution requests can be found [here](https://github.com/orgs/crestalnetwork/projects/1/views/2). + +We have divided the request types into five distinct categories, each with its own separate wishlist for easier browsing. + +To get started, either speak with the team in pre-arranged groups or directly tag `@hyacinthus` or `@taiyangc` on the issues you are interested in contributing to. + +## Wishlist Categories + +### On-chain wallet actions + +[Wishlist](https://github.com/orgs/crestalnetwork/projects/1/views/3) + +This category covers everything related to on-chain wallet interactions. It includes adding extra functionalities to any wallet provider or even integrating an entirely new wallet provider. It also encompasses support for assets beyond the basic, well-known ones. Multi-chain support is currently under development—so if you would like to support any protocol on chains other than Base, please consult the core contributors first. + +Any Web3 protocol that involves interactions with an on-chain wallet falls under this category. More protocol integrations mean greater customization of agents, offering endless variety for both creators and end users. + +### Data fetching + +[Wishlist](https://github.com/orgs/crestalnetwork/projects/1/views/4) + +This category involves integrating with any data sources or data-fetching APIs. This can include both web2 and web3 APIs—essentially, anything that provides data source or data infrastructure support for agents. Data fetching can usually be accomplished through a few API calls; however, integrating with premium sources can be critical for many advanced agents. + +### Data generation + +[Wishlist](https://github.com/orgs/crestalnetwork/projects/1/views/5) + +This category encompasses all data generation capabilities. This may involve multimedia generation, export, input, or even data manipulation for agents or end users. Data generation is a critical component of most businesses and user needs. The possibilities are limitless, and integration can occur at either the LLM level or the skill level. + +### Intelligence + +[Wishlist](https://github.com/orgs/crestalnetwork/projects/1/views/6) + +This category involves supporting new LLMs and LLM infrastructures. Anything that enhances the intelligence of the agents qualifies here. More intelligence integrations mean more ways for agents to be helpful, efficient, and cost-effective. + +### Entrypoints + +[Wishlist](https://github.com/orgs/crestalnetwork/projects/1/views/7) + +This category is somewhat analogous to supporting communication channels in traditional app development. Facilitating interactions with agents or enabling agents to interact through these channels is critical for reaching audiences beyond typical web3 social channels. More channels mean more distribution methods and a broader reach to end users. + +## Non-applicable? + +If what you are trying to build does not fall under any of the categories above, feel free to create an issue and clearly describe what you are building. + +If it indeed falls under a completely new category we have overlooked, we will gladly consider it and add it to the overall list. diff --git a/docs/how_to/clean_memory.md b/docs/how_to/clean_memory.md new file mode 100644 index 00000000..16a49879 --- /dev/null +++ b/docs/how_to/clean_memory.md @@ -0,0 +1,20 @@ +# Agent's Memory Cleanup + +Agent memory can be cleared using a request that requires an admin JWT token for authentication. This functionality allows for granular control: + +- **Clear all agent memory**: Reset the entire memory state of the agent. +- **Clear thread memory**: Clear memory specifically associated with a particular thread within the agent. + +> The `thread_id` parameter is used to specify the target thread for memory clearing. + +```bash +curl --location '{base_url}/agents/clean-memory' \ +--header 'Content-Type: application/json' \ +--header 'Authorization: Bearer {jwt_token}' \ +--data '{ + "agent_id": "local", + "thread_id": "chat1", + "clean_agent_memory": true, + "clean_skills_memory": true +}' +``` diff --git a/docs/how_to/readme.md b/docs/how_to/readme.md new file mode 100644 index 00000000..292db9ff --- /dev/null +++ b/docs/how_to/readme.md @@ -0,0 +1,5 @@ +# How to + +## Contents + +- [Clean Agent or Thread memory](clean_memory.md) diff --git a/docs/images/intentkit_banner.png b/docs/images/intentkit_banner.png new file mode 100644 index 00000000..4fa8ea8a Binary files /dev/null and b/docs/images/intentkit_banner.png differ diff --git a/docs/llm.md b/docs/llm.md new file mode 100644 index 00000000..9c516b8e --- /dev/null +++ b/docs/llm.md @@ -0,0 +1,61 @@ +# LLMs + +## Supported Models + +IntentKit supports a wide range of LLM providers and models to give you flexibility in choosing the right model for your needs. + +### Supported Providers + +#### OpenAI +- **gpt-4o** - GPT-4o with vision and tool calling support +- **gpt-4o-mini** - Faster, more cost-effective version of GPT-4o +- **gpt-4.1-nano** - Ultra-fast and cost-effective model +- **gpt-4.1-mini** - Balanced performance and cost +- **gpt-4.1** - Latest GPT-4.1 with enhanced capabilities +- **o4-mini** - OpenAI's reasoning model with advanced problem-solving + +#### DeepSeek +- **deepseek-chat** - DeepSeek V3 (0324) for general conversations +- **deepseek-reasoner** - DeepSeek R1 with enhanced reasoning capabilities + +#### XAI (Grok) +- **grok-2** - Grok 2 model with tool calling support +- **grok-3** - Latest Grok 3 with search capabilities +- **grok-3-mini** - Compact version with reasoning capabilities + +#### Eternal AI +- **eternalai** - Eternal AI (Llama-3.3-70B) for cost-effective inference + +#### Reigent +- **reigent** - REI Network model for specialized tasks + +#### Venice AI +- **venice-uncensored** - Venice Uncensored model +- **venice-llama-4-maverick-17b** - Venice Llama-4 Maverick 17B + +### Model Capabilities + +Each model supports different capabilities: + +- **Tool/Skill Calls**: All models support function calling for skills +- **Structured Output**: JSON and structured response generation +- **Image Input**: Available on select OpenAI models (gpt-4o, gpt-4.1) +- **Reasoning**: Enhanced reasoning on o4-mini, deepseek-reasoner, and grok-3-mini +- **Search**: Native search functionality on gpt-4o, gpt-4o-mini, gpt-4.1-mini, gpt-4.1, and grok-3 +- **Temperature Control**: Fine-tuning response creativity (not available on reasoning models) + +### Pricing + +Models are priced per million tokens with different rates for input and output tokens. The system automatically calculates costs based on token usage and converts to credits based on the current USDC exchange rate. + +### Configuration + +To use these models, configure the appropriate API keys in your environment: +- `OPENAI_API_KEY` for OpenAI models +- `DEEPSEEK_API_KEY` for DeepSeek models +- `XAI_API_KEY` for XAI/Grok models +- `ETERNAL_API_KEY` for Eternal AI models +- `REIGENT_API_KEY` for Reigent models +- `VENICE_API_KEY` for Venice AI models + +The system will automatically route requests to the appropriate provider based on the model selected. diff --git a/docs/openai_compatible.md b/docs/openai_compatible.md new file mode 100644 index 00000000..c9e5e74c --- /dev/null +++ b/docs/openai_compatible.md @@ -0,0 +1,164 @@ +# OpenAI Compatible API + +IntentKit provides system skills for managing agent API keys that enable OpenAI-compatible API access to your agents. The system supports two types of API keys with different access levels: + +- **Private API Key (sk-)**: Can access all skills (public and owner-only) +- **Public API Key (pk-)**: Can only access public skills + +## How to Use the API Keys + +Once you have obtained API keys using either of the above skills, you can use them to interact with your agent through the OpenAI-compatible API endpoint. Choose the appropriate key based on your access requirements. + +### API Endpoint + +The API endpoint follows the OpenAI Chat Completions format: + +``` +POST {base_url}/v1/chat/completions +``` + +Where `{base_url}` is the base URL provided by the skill output. + +### Authentication + +The API key should be included in the `Authorization` header as a Bearer token. Use either your private (sk-) or public (pk-) key depending on your access needs: + +``` +Authorization: Bearer {your_api_key} +``` + +**Examples:** +- Private key: `Authorization: Bearer sk-1234567890abcdef...` +- Public key: `Authorization: Bearer pk-1234567890abcdef...` + +## Usage Examples + +### cURL Example + +Here's how to make a request using cURL with either key type: + +**Using Private Key (full access):** +```bash +curl -X POST "{base_url}/v1/chat/completions" \ + -H "Content-Type: application/json" \ + -H "Authorization: Bearer sk-your_private_key_here" \ + -d '{ + "model": "agent", + "messages": [ + { + "role": "user", + "content": "Hello, how can you help me today?" + } + ] + }' +``` + +**Using Public Key (public skills only):** +```bash +curl -X POST "{base_url}/v1/chat/completions" \ + -H "Content-Type: application/json" \ + -H "Authorization: Bearer pk-your_public_key_here" \ + -d '{ + "model": "agent", + "messages": [ + { + "role": "user", + "content": "Hello, how can you help me today?" + } + ] + }' +``` +``` + +### OpenAI Python SDK Example + +You can use the official OpenAI Python SDK by configuring it to use your IntentKit agent's endpoint: + +**Using Private Key (full access):** +```python +from openai import OpenAI + +# Initialize the client with your agent's private API key and base URL +client = OpenAI( + api_key="sk-your_private_key_here", # Private key for full access + base_url="{base_url}/v1" +) + +# Make a chat completion request +response = client.chat.completions.create( + model="agent", # Model name is required but can be any value + messages=[ + { + "role": "user", + "content": "Hello, how can you help me today?" + } + ] +) + +print(response.choices[0].message.content) +``` + +**Using Public Key (public skills only):** +```python +from openai import OpenAI + +# Initialize the client with your agent's public API key and base URL +client = OpenAI( + api_key="pk-your_public_key_here", # Public key for limited access + base_url="{base_url}/v1" +) + +# Make a chat completion request (only public skills available) +response = client.chat.completions.create( + model="agent", + messages=[ + { + "role": "user", + "content": "What public information can you provide?" + } + ] +) + +print(response.choices[0].message.content) +``` + +### Using in Cherry Studio + +Cherry Studio is a desktop client that supports OpenAI-compatible APIs. To use your IntentKit agent in Cherry Studio: + +1. **Open Cherry Studio** and go to Settings + +2. **Add a new API provider** with the following configuration: + - **Provider Name**: IntentKit Agent (or any name you prefer) + - **API Host**: Use the `base_url` provided by the skill output + - **API Key**: Use either the private (`sk-`) or public (`pk-`) API key depending on your needs + - **Model**: You can use any model name (e.g., "agent") + + **Key Selection Guidelines:** + - Use **private key (sk-)** for personal use or when you need access to all agent capabilities + - Use **public key (pk-)** when sharing access or when you only need public skills + +3. **Save the configuration** and select your IntentKit Agent as the active provider + +4. **Start chatting** with your agent through Cherry Studio's interface + +## API Compatibility + +The IntentKit agent API is compatible with the OpenAI Chat Completions API format, supporting: + +- **Standard chat messages** with role and content +- **Image attachments** (when supported by the agent) +- **Streaming responses** using Server-Sent Events +- **All other parameters** is valid but will be ignored + +## Important Notes + +- **Single Message Processing**: The API currently processes only the last message from the messages array, memory is managed by the agent in cloud +- **Authentication Required**: All requests must include a valid API key in the Authorization header +- **Agent-Specific**: Each API key is tied to a specific agent and can only access that agent's capabilities +- **Key Security**: Keep your API keys secure and regenerate them if compromised +- **Access Control**: + - Private keys (sk-) provide full access to all agent skills + - Public keys (pk-) are restricted to public skills only + - Choose the appropriate key type based on your security requirements +- **Key Management**: Both key types are generated and managed together through the system skills diff --git a/docs/skills/cdp.md b/docs/skills/cdp.md new file mode 100644 index 00000000..ff8a631f --- /dev/null +++ b/docs/skills/cdp.md @@ -0,0 +1,36 @@ +# Coinbase Developer Platform + +## CDP AgentKit + +All CDP Skills are supported by [AgentKit](https://github.com/coinbase/agentkit/). + +AgentKit supports the following tools: + +``` +WalletActionProvider_get_balance +WalletActionProvider_get_wallet_details +WalletActionProvider_native_transfer +CdpApiActionProvider_address_reputation +CdpApiActionProvider_request_faucet_funds +CdpWalletActionProvider_deploy_contract +CdpWalletActionProvider_deploy_nft +CdpWalletActionProvider_deploy_token +CdpWalletActionProvider_trade +PythActionProvider_fetch_price +PythActionProvider_fetch_price_feed_id +BasenameActionProvider_register_basename +ERC20ActionProvider_get_balance +ERC20ActionProvider_transfer +Erc721ActionProvider_get_balance +Erc721ActionProvider_mint +Erc721ActionProvider_transfer +WethActionProvider_wrap_eth +MorphoActionProvider_deposit +MorphoActionProvider_withdraw +SuperfluidActionProvider_create_flow +SuperfluidActionProvider_delete_flow +SuperfluidActionProvider_update_flow +WowActionProvider_buy_token +WowActionProvider_create_token +WowActionProvider_sell_token +``` \ No newline at end of file diff --git a/docs/skills/goat.md b/docs/skills/goat.md new file mode 100644 index 00000000..8f82be30 --- /dev/null +++ b/docs/skills/goat.md @@ -0,0 +1,66 @@ +# Goat SDK Integration + +All GOAT Skills are supported by [GOAT](https://github.com/goat-sdk/goat/). + +The list of supported tools can be found [here](https://github.com/goat-sdk/goat/tree/main/python#plugins). + +## Sample configuration + +```json +{ + "chains": { + "base" + } +} +``` + +## Sample Skills list + +```json +{ + "inch1": { + "api_key": "1inch api key string" + }, + "coingecko": { + "api_key": "coingecko api key string" + }, + "allora": { + "api_key": "allora api key string", + "api_root": "https://api.upshot.xyz/v2/allora" + }, + "dexscreener": {}, + "erc20": { + "tokens": [ + "goat_plugins.erc20.token.USDC" + ] + }, + "farcaster": { + "api_key": "farcaster api key string", + "base_url": "https://farcaster.xyz" + }, + "jsonrpc": { + "endpoint": "https://eth.llamarpc.com" + }, + "jupiter": {}, + "nansen": { + "api_key": "nansen api key string" + }, + "opensea": { + "api_key": "opensea api key string" + }, + "rugcheck": { + "jwt_token": "rugcheck JWT token string" + }, + "spl_token": { + "network": "mainnet", + "tokens": [ + "goat_plugins.erc20.token.USDC" + ] + }, + "superfluid": {}, + "uniswap": { + "api_key": "uniswap api key string", + "base_url": "https://app.uniswap.org" + } +} +``` diff --git a/docs/skills/x.md b/docs/skills/x.md new file mode 100644 index 00000000..cb5b43f0 --- /dev/null +++ b/docs/skills/x.md @@ -0,0 +1,198 @@ +# X + +IntentKit provides two ways to integrate with X: using it as an entrypoint for your agent, or incorporating X-specific skills into your agent's capabilities. + +## X Skills + +IntentKit provides a set of X-specific skills that can be added to your agent's toolkit. All skills are built on top of the `XBaseTool` base class which handles authentication and client initialization. + +### Available Skills + +The following X skills are available: + +- **Follow User** (`follow_user`): Follow a specified X user +- **Get Mentions** (`get_mentions`): Retrieve mentions of the authenticated user +- **Get Timeline** (`get_timeline`): Fetch tweets from a user's timeline +- **Like Tweet** (`like_tweet`): Like a specific tweet +- **Post Tweet** (`post_tweet`): Post a new tweet +- **Reply Tweet** (`reply_tweet`): Reply to a specific tweet +- **Retweet** (`retweet`): Retweet a specific tweet +- **Search Tweets** (`search_tweets`): Search for tweets based on query + +### Using X Skills + +Add X skills to your agent: +Just configure the skills you need in your agent's config. +```python +agent.twitter_skills = ["get_mentions", "get_timeline", "post_tweet", "reply_tweet", "follow_user", "like_tweet", "retweet", "search_tweets"] +``` + +Before the first use, agent will request you to click the link to authorize the agent to access your twitter account. + +If you want to use your own twitter developer account, you can set it as follows: +```python +agent.twitter_config = { + "bearer_token": "your_bearer_token", + "consumer_key": "your_consumer_key", + "consumer_secret": "your_consumer_secret", + "access_token": "your_access_token", + "access_token_secret": "your_access_token_secret" +} +``` + +## X as an Entrypoint + +Entrypoint is a type of conversational interface. + +The X entrypoint allows your agent to automatically respond to X mentions. When enabled, the agent will monitor mentions every 15 minutes and respond to them all. + +We suggest you only use twitter skills, not use it as an entrypoint. + +### Configuration + +1. Enable X Entrypoint for your agent: +```python +agent.twitter_enabled = True +``` + +2. Configure X credentials in your agent's config: +Get your X credentials from your [X developer portal](https://developer.x.com/en/portal/dashboard). +> Notice: Free accounts can only use post_tweet skill, if you want to use other skills, you need to upgrade your account. +```python +agent.twitter_config = { + "bearer_token": "your_bearer_token", + "consumer_key": "your_consumer_key", + "consumer_secret": "your_consumer_secret", + "access_token": "your_access_token", + "access_token_secret": "your_access_token_secret" +} +``` + +3. Run the X entrypoint: +If you have use the docker-compose, it already run. +```bash +python -m app.entrypoints.twitter +``` + +### How it Works + +The X entrypoint: +- Polls for new mentions every 15 minutes +- Uses both `since_id` and `start_time` for reliable mention tracking +- Maintains the last processed tweet ID in the agent's plugin data +- Automatically manages API rate limits and quotas +- Responds to mentions as threaded replies + + +## Rate Limits and Quotas + +### X side + +[Rate Limits](https://developer.x.com/en/docs/x-api/rate-limits) + +### IntentKit +Only when use the OAuth2.0 authentication, intentkit has a built-in rate limit: + +- post tweet: 20/day +- reply tweet: 20/day +- retweet: 5/15min +- follow: 5/15min +- like: 100/day +- get mentions: 1/4hr +- get timeline: 3/day +- search: 3/day + +### Yourself +You can set the rate limit under the intentkit config in the future. +Not released yet. + +## Best Practices + +1. Error Handling + - Always handle X API errors gracefully + - Implement exponential backoff for rate limits + - Log failed interactions for debugging + +2. Content Guidelines + - Keep responses within X's character limit + - Handle thread creation for longer responses + - Consider X's content policies + +3. Security + - Store X credentials securely + - Use environment variables for sensitive data + - Regularly rotate access tokens + +## Example Use Cases + +1. Social Media Manager Bot + ```python + from intentkit.models.agent import Agent + + # Create an agent with X skills + agent = Agent( + name="Social Media Manager", + twitter_enabled=True, + twitter_skills=["get_mentions", "post_tweet", "reply_tweet"], + twitter_config={ + "bearer_token": "your_bearer_token", + "consumer_key": "your_consumer_key", + "consumer_secret": "your_consumer_secret", + "access_token": "your_access_token", + "access_token_secret": "your_access_token_secret" + }, + prompt="You are a helpful social media manager. Monitor mentions and engage with users professionally." + ) + ``` + +2. Content Aggregator with Timeline Analysis + ```python + # Create an agent that analyzes timeline content + agent = Agent( + name="Content Analyzer", + twitter_enabled=True, + twitter_skills=["get_timeline", "post_tweet"], + twitter_config={...}, # X credentials + prompt="""You are a content analyzer. Monitor the timeline for trending topics and provide insights. + When you find interesting patterns, share them as tweets.""" + ) + ``` + +3. Interactive Support Assistant + ```python + # Create a support agent that handles user queries + agent = Agent( + name="Support Assistant", + twitter_enabled=True, + twitter_skills=["get_mentions", "reply_tweet"], + twitter_config={...}, # X credentials + prompt="""You are a support assistant. Monitor mentions for support queries. + Respond helpfully and professionally to user questions. + If you can't help, politely explain why and suggest alternatives.""" + ) + ``` + +Each example demonstrates: +- Proper agent configuration with X credentials +- Specific skill selection for the use case +- Custom prompts to guide agent behavior +- Integration with IntentKit's agent system + +## Troubleshooting + +Common issues and solutions: + +1. Rate Limit Exceeded + - Check your quota settings + - Implement proper waiting periods + - Use the built-in quota management + +2. Authentication Errors + - Verify credential configuration + - Check token expiration + - Ensure proper permission scopes + +3. Missing Mentions + - Verify `since_id` tracking + - Check `start_time` configuration + - Monitor the X entrypoint logs diff --git a/example.env b/example.env new file mode 100644 index 00000000..86efa55e --- /dev/null +++ b/example.env @@ -0,0 +1,42 @@ +ENV=local +DEBUG=true +DEBUG_RESP=true + +OPENAI_API_KEY= +DEEPSEEK_API_KEY= +XAI_API_KEY= +REIGENT_API_KEY= + + +DB_HOST= +DB_PORT= +DB_USERNAME= +DB_PASSWORD= +DB_NAME= +DB_AUTO_MIGRATE=true + +# Redis +#REDIS_HOST="127.0.0.1" + +TG_TOKEN_GOD_BOT= +TG_BASE_URL= +TG_NEW_AGENT_POLL_INTERVAL= + +# CDP API Configuration (AgentKit 0.6.0 format) +# Get these from https://portal.cdp.coinbase.com +CDP_API_KEY_ID= +CDP_API_KEY_SECRET= +CDP_WALLET_SECRET= + +TWITTER_OAUTH2_CLIENT_ID= +TWITTER_OAUTH2_CLIENT_SECRET= +TWITTER_OAUTH2_REDIRECT_URI=http://localhost:8000/callback/auth/twitter +TWITTER_ENTRYPOINT_INTERVAL=1 + +DAPPLOOKER_API_KEY= + +UNREALSPEECH_API_KEY= + +AIXBT_API_KEY= + +MORALIS_API_KEY= diff --git a/intentkit/LICENSE b/intentkit/LICENSE new file mode 100644 index 00000000..e50fd4d7 --- /dev/null +++ b/intentkit/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2024 Crestal Network + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. \ No newline at end of file diff --git a/intentkit/MANIFEST.in b/intentkit/MANIFEST.in new file mode 100644 index 00000000..ded7bd2d --- /dev/null +++ b/intentkit/MANIFEST.in @@ -0,0 +1,15 @@ +include README.md +include LICENSE +include pyproject.toml +recursive-include abstracts *.py +recursive-include clients *.py +recursive-include config *.py +recursive-include core *.py +recursive-include models *.py *.json +recursive-include skills *.py *.toml *.md +recursive-include utils *.py +recursive-exclude * __pycache__ +recursive-exclude * *.pyc +recursive-exclude * *.pyo +recursive-exclude * .DS_Store +recursive-exclude * .ruff_cache \ No newline at end of file diff --git a/intentkit/README.md b/intentkit/README.md new file mode 100644 index 00000000..62219f30 --- /dev/null +++ b/intentkit/README.md @@ -0,0 +1,88 @@ +# IntentKit + +IntentKit is a powerful intent-based AI agent platform that enables developers to build sophisticated AI agents with blockchain and cryptocurrency capabilities. + +## Features + +- **Intent-based Architecture**: Build agents that understand and execute user intents +- **Blockchain Integration**: Native support for multiple blockchain networks +- **Cryptocurrency Operations**: Built-in tools for DeFi, trading, and token operations +- **Extensible Skills System**: Modular skill system with 30+ pre-built skills +- **Multi-platform Support**: Telegram, Twitter, Slack, and API integrations +- **Advanced AI Capabilities**: Powered by LangChain and LangGraph + +## Installation + +```bash +pip install intentkit +``` + +## Development + +To build the package locally: + +```bash +# Build both source and wheel distributions +uv build + +# Build only wheel +uv build --wheel + +# Build only source distribution +uv build --sdist +``` + +To publish to PyPI: + +```bash +# Build and publish to PyPI +uv build +uv publish + +# Publish to Test PyPI +uv publish --publish-url https://test.pypi.org/legacy/ +``` + +> **Note**: This package uses `hatchling` as the build backend with `uv` for dependency management and publishing. + +## Quick Start + +```python +from intentkit.core.agent import Agent +from intentkit.config.config import Config + +# Initialize configuration +config = Config() + +# Create an agent +agent = Agent(config=config) + +# Your agent is ready to use! +``` + +## Skills + +IntentKit comes with 30+ pre-built skills including: + +- **DeFi**: Uniswap, 1inch, Enso, LiFi +- **Data**: DexScreener, CoinGecko, DefiLlama, CryptoCompare +- **Social**: Twitter, Telegram, Slack +- **Blockchain**: CDP, Moralis, various wallet integrations +- **AI**: OpenAI, Heurist, Venice AI +- **And many more...** + +## Documentation + +For detailed documentation, examples, and guides, visit our [documentation](https://github.com/crestal-network/intentkit/tree/main/docs). + +## Contributing + +We welcome contributions! Please see our [Contributing Guide](https://github.com/crestal-network/intentkit/blob/main/CONTRIBUTING.md) for details. + +## License + +This project is licensed under the MIT License - see the [LICENSE](LICENSE) file for details. + +## Support + +For support, please open an issue on our [GitHub repository](https://github.com/crestal-network/intentkit/issues). \ No newline at end of file diff --git a/intentkit/__init__.py b/intentkit/__init__.py new file mode 100644 index 00000000..212bf416 --- /dev/null +++ b/intentkit/__init__.py @@ -0,0 +1,17 @@ +"""IntentKit - Intent-based AI Agent Platform. + +A powerful platform for building AI agents with blockchain and cryptocurrency capabilities. +""" + +__version__ = "0.0.1" +__author__ = "hyacinthus" +__email__ = "hyacinthus@gmail.com" + +# Core components +# Abstract base classes +from .core.engine import create_agent, stream_agent + +__all__ = [ + "create_agent", + "stream_agent", +] diff --git a/intentkit/abstracts/__init__.py b/intentkit/abstracts/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/intentkit/abstracts/agent.py b/intentkit/abstracts/agent.py new file mode 100644 index 00000000..b063c9d5 --- /dev/null +++ b/intentkit/abstracts/agent.py @@ -0,0 +1,60 @@ +from abc import ABC, abstractmethod +from typing import Dict, Optional + +from intentkit.models.agent import Agent +from intentkit.models.agent_data import AgentData, AgentQuota + + +class AgentStoreABC(ABC): + """Abstract base class for agent data storage operations. + + This class defines the interface for interacting with agent-related data, + including configuration, additional data, and quotas. + + Attributes: + agent_id: ID of the agent to store/retrieve data for + """ + + def __init__(self, agent_id: str) -> None: + """Initialize the agent store. + + Args: + agent_id: ID of the agent + """ + self.agent_id = agent_id + + @abstractmethod + async def get_config(self) -> Optional[Agent]: + """Get agent configuration. + + Returns: + Agent configuration if found, None otherwise + """ + pass + + @abstractmethod + async def get_data(self) -> Optional[AgentData]: + """Get additional agent data. + + Returns: + Agent data if found, None otherwise + """ + pass + + @abstractmethod + async def set_data(self, data: Dict) -> None: + """Update agent data. + + Args: + data: Dictionary containing fields to update + """ + pass + + @abstractmethod + async def get_quota(self) -> Optional[AgentQuota]: + """Get agent quota information. + + Returns: + Agent quota if found, None otherwise + """ + pass diff --git a/intentkit/abstracts/api.py b/intentkit/abstracts/api.py new file mode 100644 index 00000000..f3a44907 --- /dev/null +++ b/intentkit/abstracts/api.py @@ -0,0 +1,4 @@ +ResponseHeadersPagination = { + "X-Next-Cursor": {"description": "Cursor for next page"}, + "X-Has-More": {"description": "Indicates if there are more items"}, +} diff --git a/intentkit/abstracts/engine.py b/intentkit/abstracts/engine.py new file mode 100644 index 00000000..7887e851 --- /dev/null +++ b/intentkit/abstracts/engine.py @@ -0,0 +1,38 @@ +from typing import Callable, List + +from pydantic import BaseModel + + +class AgentMessageInput(BaseModel): + """Input message model for AI agent interactions. + + This class represents the structured input that can be sent to an AI agent, + supporting both text-based queries and image-based inputs for multimodal + interactions. + + Attributes: + text (str): The main text content of the message or query + images (List[str]): List of image references/URLs to be processed by the agent. + Empty list if no images are provided. + """ + + text: str # required + """The main text content or query to be processed by the agent""" + + images: List[str] = [] # optional, defaults to empty list + """List of image references or URLs for multimodal processing""" + + +# Define a type hint for the callback that takes three strings and returns a list of strings +AgentExecutionCallback = Callable[[str, AgentMessageInput, str], List[str]] +"""Callback function type for agent execution. + +Args: + aid (str): The agent ID that uniquely identifies the AI agent + message (AgentMessageInput): The input message containing text and optional images + thread_id (str): The thread ID for tracking the conversation context + +Returns: + List[str]: A list of formatted response lines from the agent execution. Each line + typically contains input/output markers, agent responses, and timing information. +""" diff --git a/intentkit/abstracts/graph.py b/intentkit/abstracts/graph.py new file mode 100644 index 00000000..9ad4adaf --- /dev/null +++ b/intentkit/abstracts/graph.py @@ -0,0 +1,41 @@ +import asyncio +from enum import Enum +from typing import Any, Dict, NotRequired, Optional + +from langgraph.prebuilt.chat_agent_executor import AgentState as BaseAgentState +from pydantic import BaseModel + +from intentkit.models.agent import Agent +from intentkit.models.chat import AuthorType + + +class AgentError(str, Enum): + """The error types that can be raised by the agent.""" + + INSUFFICIENT_CREDITS = "insufficient_credits" + + +# We create the AgentState that we will pass around +# This simply involves a list of messages +# We want steps to return messages to append to the list +# So we annotate the messages attribute with operator.add +class AgentState(BaseAgentState): + """The state of the agent.""" + + context: dict[str, Any] + error: NotRequired[AgentError] + __extra__: NotRequired[Dict[str, Any]] + + +class AgentContext(BaseModel): + agent_id: str + chat_id: str + user_id: Optional[str] = None + app_id: Optional[str] = None + entrypoint: AuthorType + is_private: bool + payer: Optional[str] = None + + @property + def agent(self) -> Agent: + return asyncio.run(Agent.get(self.agent_id)) diff --git a/intentkit/abstracts/skill.py b/intentkit/abstracts/skill.py new file mode 100644 index 00000000..c1a62967 --- /dev/null +++ b/intentkit/abstracts/skill.py @@ -0,0 +1,198 @@ +from abc import ABC, abstractmethod +from typing import Any, Dict, List, Optional + +from intentkit.models.agent import Agent, AgentAutonomous +from intentkit.models.agent_data import AgentData, AgentQuota + + +class SkillStoreABC(ABC): + """Abstract base class for skill data storage operations. + + This class defines the interface for interacting with skill-related data + for both agents and threads. + """ + + @staticmethod + @abstractmethod + def get_system_config(key: str) -> Any: + """Get system configuration value by key.""" + pass + + @staticmethod + @abstractmethod + async def get_agent_config(agent_id: str) -> Optional[Agent]: + """Get agent configuration. + + Returns: + Agent configuration if found, None otherwise + """ + pass + + @staticmethod + @abstractmethod + async def get_agent_data(agent_id: str) -> Optional[AgentData]: + """Get additional agent data. + + Returns: + Agent data if found, None otherwise + """ + pass + + @staticmethod + @abstractmethod + async def set_agent_data(agent_id: str, data: Dict) -> None: + """Update agent data. + + Args: + agent_id: ID of the agent + data: Dictionary containing fields to update + """ + pass + + @staticmethod + @abstractmethod + async def get_agent_quota(agent_id: str) -> Optional[AgentQuota]: + """Get agent quota information. + + Returns: + Agent quota if found, None otherwise + """ + pass + + @staticmethod + @abstractmethod + async def get_agent_skill_data( + agent_id: str, skill: str, key: str + ) -> Optional[Dict[str, Any]]: + """Get skill data for an agent. + + Args: + agent_id: ID of the agent + skill: Name of the skill + key: Data key + + Returns: + Dictionary containing the skill data if found, None otherwise + """ + pass + + @staticmethod + @abstractmethod + async def save_agent_skill_data( + agent_id: str, skill: str, key: str, data: Dict[str, Any] + ) -> None: + """Save or update skill data for an agent. + + Args: + agent_id: ID of the agent + skill: Name of the skill + key: Data key + data: JSON data to store + """ + pass + + @staticmethod + @abstractmethod + async def delete_agent_skill_data(agent_id: str, skill: str, key: str) -> None: + """Delete skill data for an agent. + + Args: + agent_id: ID of the agent + skill: Name of the skill + key: Data key + """ + pass + + @staticmethod + @abstractmethod + async def get_thread_skill_data( + thread_id: str, skill: str, key: str + ) -> Optional[Dict[str, Any]]: + """Get skill data for a thread. + + Args: + thread_id: ID of the thread + skill: Name of the skill + key: Data key + + Returns: + Dictionary containing the skill data if found, None otherwise + """ + pass + + @staticmethod + @abstractmethod + async def save_thread_skill_data( + thread_id: str, + agent_id: str, + skill: str, + key: str, + data: Dict[str, Any], + ) -> None: + """Save or update skill data for a thread. + + Args: + thread_id: ID of the thread + agent_id: ID of the agent that owns this thread + skill: Name of the skill + key: Data key + data: JSON data to store + """ + pass + + @staticmethod + @abstractmethod + async def list_autonomous_tasks(agent_id: str) -> List[AgentAutonomous]: + """List all autonomous tasks for an agent. + + Args: + agent_id: ID of the agent + + Returns: + List[AgentAutonomous]: List of autonomous task configurations + """ + pass + + @staticmethod + @abstractmethod + async def add_autonomous_task( + agent_id: str, task: AgentAutonomous + ) -> AgentAutonomous: + """Add a new autonomous task to an agent. + + Args: + agent_id: ID of the agent + task: Autonomous task configuration + + Returns: + AgentAutonomous: The created task + """ + pass + + @staticmethod + @abstractmethod + async def delete_autonomous_task(agent_id: str, task_id: str) -> None: + """Delete an autonomous task from an agent. + + Args: + agent_id: ID of the agent + task_id: ID of the task to delete + """ + pass + + @staticmethod + @abstractmethod + async def update_autonomous_task( + agent_id: str, task_id: str, task_updates: dict + ) -> AgentAutonomous: + """Update an autonomous task for an agent. + + Args: + agent_id: ID of the agent + task_id: ID of the task to update + task_updates: Dictionary containing fields to update + + Returns: + AgentAutonomous: The updated task + """ + pass diff --git a/intentkit/abstracts/twitter.py b/intentkit/abstracts/twitter.py new file mode 100644 index 00000000..f402d9d8 --- /dev/null +++ b/intentkit/abstracts/twitter.py @@ -0,0 +1,54 @@ +from abc import ABC, abstractmethod +from typing import Optional + +from tweepy.asynchronous import AsyncClient + + +class TwitterABC(ABC): + """Abstract base class for Twitter operations. + + This class defines the interface for interacting with Twitter's API + through a Tweepy client. + """ + + agent_id: str + use_key = False + + @abstractmethod + async def get_client(self) -> Optional[AsyncClient]: + """Get a configured Tweepy client. + + Returns: + A configured Tweepy client if credentials are valid, None otherwise + """ + pass + + @property + @abstractmethod + def self_id(self) -> Optional[str]: + """Get the Twitter user ID. + + Returns: + The Twitter user ID if available, None otherwise + """ + pass + + @property + @abstractmethod + def self_username(self) -> Optional[str]: + """Get the Twitter username. + + Returns: + The Twitter username (without @ symbol) if available, None otherwise + """ + pass + + @property + @abstractmethod + def self_name(self) -> Optional[str]: + """Get the Twitter display name. + + Returns: + The Twitter display name if available, None otherwise + """ + pass diff --git a/intentkit/clients/__init__.py b/intentkit/clients/__init__.py new file mode 100644 index 00000000..6f65729d --- /dev/null +++ b/intentkit/clients/__init__.py @@ -0,0 +1,16 @@ +from intentkit.clients.cdp import CdpClient, get_cdp_client +from intentkit.clients.twitter import ( + TwitterClient, + TwitterClientConfig, + get_twitter_client, +) +from intentkit.clients.web3 import get_web3_client + +__all__ = [ + "TwitterClient", + "TwitterClientConfig", + "get_twitter_client", + "CdpClient", + "get_cdp_client", + "get_web3_client", +] diff --git a/intentkit/clients/cdp.py b/intentkit/clients/cdp.py new file mode 100644 index 00000000..76b88218 --- /dev/null +++ b/intentkit/clients/cdp.py @@ -0,0 +1,179 @@ +import json +import logging +from typing import Dict, Optional + +from bip32 import BIP32 +from cdp import CdpClient as OriginCdpClient +from cdp import EvmServerAccount +from coinbase_agentkit import ( + CdpEvmServerWalletProvider, + CdpEvmServerWalletProviderConfig, +) +from eth_keys.datatypes import PrivateKey +from eth_utils import to_checksum_address + +from intentkit.abstracts.skill import SkillStoreABC +from intentkit.models.agent import Agent +from intentkit.models.agent_data import AgentData + +_clients: Dict[str, "CdpClient"] = {} +_origin_cdp_client: Optional[OriginCdpClient] = None + +logger = logging.getLogger(__name__) + + +def bip39_seed_to_eth_keys(seed_hex: str) -> Dict[str, str]: + """ + Converts a BIP39 seed to an Ethereum private key, public key, and address. + + Args: + seed_hex: The BIP39 seed in hexadecimal format + + Returns: + Dict containing private_key, public_key, and address + """ + # Convert the hex seed to bytes + seed_bytes = bytes.fromhex(seed_hex) + + # Derive the master key from the seed + bip32 = BIP32.from_seed(seed_bytes) + + # Derive the Ethereum address using the standard derivation path + private_key_bytes = bip32.get_privkey_from_path("m/44'/60'/0'/0/0") + + # Create a private key object + private_key = PrivateKey(private_key_bytes) + + # Get the public key + public_key = private_key.public_key + + # Get the Ethereum address + address = public_key.to_address() + + return { + "private_key": private_key.to_hex(), + "public_key": public_key.to_hex(), + "address": to_checksum_address(address), + } + + +def get_origin_cdp_client(skill_store: SkillStoreABC) -> OriginCdpClient: + global _origin_cdp_client + if _origin_cdp_client: + return _origin_cdp_client + + # Get credentials from skill store system config + api_key_id = skill_store.get_system_config("cdp_api_key_id") + api_key_secret = skill_store.get_system_config("cdp_api_key_secret") + wallet_secret = skill_store.get_system_config("cdp_wallet_secret") + + _origin_cdp_client = OriginCdpClient( + api_key_id=api_key_id, + api_key_secret=api_key_secret, + wallet_secret=wallet_secret, + ) + return _origin_cdp_client + + +class CdpClient: + def __init__(self, agent_id: str, skill_store: SkillStoreABC) -> None: + self._agent_id = agent_id + self._skill_store = skill_store + self._wallet_provider: Optional[CdpEvmServerWalletProvider] = None + self._wallet_provider_config: Optional[CdpEvmServerWalletProviderConfig] = None + + async def get_wallet_provider(self) -> CdpEvmServerWalletProvider: + if self._wallet_provider: + return self._wallet_provider + agent: Agent = await self._skill_store.get_agent_config(self._agent_id) + agent_data: AgentData = await self._skill_store.get_agent_data(self._agent_id) + network_id = agent.network_id or agent.cdp_network_id + + # Get credentials from skill store system config + api_key_id = self._skill_store.get_system_config("cdp_api_key_id") + api_key_secret = self._skill_store.get_system_config("cdp_api_key_secret") + wallet_secret = self._skill_store.get_system_config("cdp_wallet_secret") + + # already have address + address = agent_data.evm_wallet_address + + # new agent or address not migrated yet + if not address: + # create cdp client for later use + cdp_client = get_origin_cdp_client(self._skill_store) + # try migrating from v1 cdp_wallet_data + if agent_data.cdp_wallet_data: + wallet_data = json.loads(agent_data.cdp_wallet_data) + if not isinstance(wallet_data, dict): + raise ValueError("Invalid wallet data format") + if wallet_data.get("default_address_id") and wallet_data.get("seed"): + # verify seed and convert to pk + keys = bip39_seed_to_eth_keys(wallet_data["seed"]) + if keys["address"] != wallet_data["default_address_id"]: + raise ValueError( + "Bad wallet data, seed does not match default_address_id" + ) + # try to import wallet to v2 + logger.info("Migrating wallet data to v2...") + await cdp_client.evm.import_account( + name=agent.id, + private_key=keys["private_key"], + ) + address = keys["address"] + logger.info("Migrated wallet data to v2 successfully: %s", address) + # still not address + if not address: + logger.info("Creating new wallet...") + new_account = await cdp_client.evm.create_account( + name=agent.id, + ) + address = new_account.address + logger.info("Created new wallet: %s", address) + + # do not close cached global client + # now it should be created or migrated, store it + agent_data.evm_wallet_address = address + await agent_data.save() + + # it must have v2 account now, load agentkit wallet provider + self._wallet_provider_config = CdpEvmServerWalletProviderConfig( + api_key_id=api_key_id, + api_key_secret=api_key_secret, + network_id=network_id, + address=address, + wallet_secret=wallet_secret, + ) + self._wallet_provider = CdpEvmServerWalletProvider(self._wallet_provider_config) + # hack for cdp bug + if network_id == "base-mainnet": + self._wallet_provider._network.network_id = "base" + elif network_id == "arbitrum-mainnet": + self._wallet_provider._network.network_id = "arbitrum" + elif network_id == "optimism-mainnet": + self._wallet_provider._network.network_id = "optimism" + elif network_id == "polygon-mainnet": + self._wallet_provider._network.network_id = "polygon" + elif network_id == "ethereum-mainnet": + self._wallet_provider._network.network_id = "ethereum" + return self._wallet_provider + + async def get_account(self) -> EvmServerAccount: + """Get the account object from the wallet provider. + + Returns: + EvmServerAccount: The account object that can be used for balance checks, transfers, etc. + """ + wallet_provider = await self.get_wallet_provider() + # Access the internal account object + return wallet_provider._account + + async def get_provider_config(self) -> CdpEvmServerWalletProviderConfig: + if not self._wallet_provider_config: + await self.get_wallet_provider() + return self._wallet_provider_config + + +async def get_cdp_client(agent_id: str, skill_store: SkillStoreABC) -> "CdpClient": + if agent_id not in _clients: + _clients[agent_id] = CdpClient(agent_id, skill_store) + return _clients[agent_id] diff --git a/intentkit/clients/twitter.py b/intentkit/clients/twitter.py new file mode 100644 index 00000000..b7ecf64d --- /dev/null +++ b/intentkit/clients/twitter.py @@ -0,0 +1,501 @@ +import logging +import os +import tempfile +from datetime import datetime, timedelta, timezone +from typing import Any, Dict, List, NotRequired, Optional, TypedDict +from urllib.parse import urlencode + +import httpx +from pydantic import BaseModel, Field +from requests.auth import HTTPBasicAuth +from requests_oauthlib import OAuth2Session +from tweepy.asynchronous import AsyncClient + +from intentkit.abstracts.skill import SkillStoreABC +from intentkit.abstracts.twitter import TwitterABC +from intentkit.models.agent_data import AgentData + +logger = logging.getLogger(__name__) + +_clients_linked: Dict[str, "TwitterClient"] = {} +_clients_self_key: Dict[str, "TwitterClient"] = {} + + +class TwitterMedia(BaseModel): + """Model representing Twitter media from the API response.""" + + media_key: str + type: str + url: Optional[str] = None + + +class TwitterUser(BaseModel): + """Model representing a Twitter user from the API response.""" + + id: str + name: str + username: str + description: str + public_metrics: dict = Field( + description="User metrics including followers_count, following_count, tweet_count, listed_count, like_count, and media_count" + ) + is_following: bool = Field( + description="Whether the authenticated user is following this user", + default=False, + ) + is_follower: bool = Field( + description="Whether this user is following the authenticated user", + default=False, + ) + + +class Tweet(BaseModel): + """Model representing a Twitter tweet.""" + + id: str + text: str + author_id: str + author: Optional[TwitterUser] = None + created_at: datetime + referenced_tweets: Optional[List["Tweet"]] = None + attachments: Optional[List[TwitterMedia]] = None + + +class TwitterClientConfig(TypedDict): + consumer_key: NotRequired[str] + consumer_secret: NotRequired[str] + access_token: NotRequired[str] + access_token_secret: NotRequired[str] + + +class TwitterClient(TwitterABC): + """Implementation of Twitter operations using Tweepy client. + + This class provides concrete implementations for interacting with Twitter's API + through a Tweepy client, supporting both API key and OAuth2 authentication. + + Args: + agent_id: The ID of the agent + skill_store: The skill store for retrieving data + config: Configuration dictionary that may contain API keys + """ + + def __init__(self, agent_id: str, skill_store: SkillStoreABC, config: Dict) -> None: + """Initialize the Twitter client. + + Args: + agent_id: The ID of the agent + skill_store: The skill store for retrieving data + config: Configuration dictionary that may contain API keys + """ + self.agent_id = agent_id + self._client: Optional[AsyncClient] = None + self._skill_store = skill_store + self._agent_data: Optional[AgentData] = None + self.use_key = _is_self_key(config) + self._config = config + + async def get_client(self) -> AsyncClient: + """Get the initialized Twitter client. + + Returns: + AsyncClient: The Twitter client if initialized + """ + if not self._agent_data: + self._agent_data = await self._skill_store.get_agent_data(self.agent_id) + if not self._client: + # Check if we have API keys in config + if self.use_key: + self._client = AsyncClient( + consumer_key=self._config["consumer_key"], + consumer_secret=self._config["consumer_secret"], + access_token=self._config["access_token"], + access_token_secret=self._config["access_token_secret"], + return_type=dict, + ) + # refresh userinfo if needed + if not self._agent_data.twitter_self_key_refreshed_at or ( + self._agent_data.twitter_self_key_refreshed_at + < datetime.now(tz=timezone.utc) - timedelta(days=1) + ): + me = await self._client.get_me( + user_auth=self.use_key, + user_fields="id,username,name,verified", + ) + if me and "data" in me and "id" in me["data"]: + await self._skill_store.set_agent_data( + self.agent_id, + { + "twitter_id": me["data"]["id"], + "twitter_username": me["data"]["username"], + "twitter_name": me["data"]["name"], + "twitter_is_verified": me["data"]["verified"], + "twitter_self_key_refreshed_at": datetime.now( + tz=timezone.utc + ), + }, + ) + self._agent_data = await self._skill_store.get_agent_data( + self.agent_id + ) + logger.info( + f"Twitter self key client initialized. " + f"Use API key: {self.use_key}, " + f"User ID: {self.self_id}, " + f"Username: {self.self_username}, " + f"Name: {self.self_name}, " + f"Verified: {self.self_is_verified}" + ) + return self._client + # Otherwise try to get OAuth2 tokens from agent data + if not self._agent_data.twitter_access_token: + raise Exception(f"[{self.agent_id}] Twitter access token not found") + if not self._agent_data.twitter_access_token_expires_at: + raise Exception( + f"[{self.agent_id}] Twitter access token expiration not found" + ) + if self._agent_data.twitter_access_token_expires_at <= datetime.now( + tz=timezone.utc + ): + raise Exception(f"[{self.agent_id}] Twitter access token has expired") + self._client = AsyncClient( + bearer_token=self._agent_data.twitter_access_token, + return_type=dict, + ) + return self._client + if not self.use_key: + # check if access token has expired + if self._agent_data.twitter_access_token_expires_at <= datetime.now( + tz=timezone.utc + ): + self._agent_data = await self._skill_store.get_agent_data(self.agent_id) + # check again + if self._agent_data.twitter_access_token_expires_at <= datetime.now( + tz=timezone.utc + ): + raise Exception( + f"[{self.agent_id}] Twitter access token has expired" + ) + self._client = AsyncClient( + bearer_token=self._agent_data.twitter_access_token, + return_type=dict, + ) + return self._client + return self._client + + @property + def self_id(self) -> Optional[str]: + """Get the Twitter user ID. + + Returns: + The Twitter user ID if available, None otherwise + """ + if not self._client: + return None + if not self._agent_data: + return None + return self._agent_data.twitter_id + + @property + def self_username(self) -> Optional[str]: + """Get the Twitter username. + + Returns: + The Twitter username (without @ symbol) if available, None otherwise + """ + if not self._client: + return None + if not self._agent_data: + return None + return self._agent_data.twitter_username + + @property + def self_name(self) -> Optional[str]: + """Get the Twitter display name. + + Returns: + The Twitter display name if available, None otherwise + """ + if not self._client: + return None + if not self._agent_data: + return None + return self._agent_data.twitter_name + + @property + def self_is_verified(self) -> Optional[bool]: + """Get the Twitter account verification status. + + Returns: + The Twitter account verification status if available, None otherwise + """ + if not self._client: + return None + if not self._agent_data: + return None + return self._agent_data.twitter_is_verified + + def process_tweets_response(self, response: Dict[str, Any]) -> List[Tweet]: + """Process Twitter API response and convert it to a list of Tweet objects. + + Args: + response: Raw Twitter API response containing tweets data and includes. + + Returns: + List[Tweet]: List of processed Tweet objects. + """ + result = [] + if not response.get("data"): + return result + + # Create lookup dictionaries from includes + users_dict = {} + if response.get("includes") and "users" in response.get("includes"): + users_dict = { + user["id"]: TwitterUser( + id=str(user["id"]), + name=user["name"], + username=user["username"], + description=user["description"], + public_metrics=user["public_metrics"], + is_following="following" in user.get("connection_status", []), + is_follower="followed_by" in user.get("connection_status", []), + ) + for user in response.get("includes", {}).get("users", []) + } + + media_dict = {} + if response.get("includes") and "media" in response.get("includes"): + media_dict = { + media["media_key"]: TwitterMedia( + media_key=media["media_key"], + type=media["type"], + url=media.get("url"), + ) + for media in response.get("includes", {}).get("media", []) + } + + tweets_dict = {} + if response.get("includes") and "tweets" in response.get("includes"): + tweets_dict = { + tweet["id"]: Tweet( + id=str(tweet["id"]), + text=tweet["text"], + author_id=str(tweet["author_id"]), + created_at=datetime.fromisoformat( + tweet["created_at"].replace("Z", "+00:00") + ), + author=users_dict.get(tweet["author_id"]), + referenced_tweets=None, # Will be populated in second pass + attachments=None, # Will be populated in second pass + ) + for tweet in response.get("includes", {}).get("tweets", []) + } + + # Process main tweets + for tweet_data in response["data"]: + tweet_id = tweet_data["id"] + author_id = tweet_data["author_id"] + + # Process attachments if present + attachments = None + if ( + "attachments" in tweet_data + and "media_keys" in tweet_data["attachments"] + ): + attachments = [ + media_dict[media_key] + for media_key in tweet_data["attachments"]["media_keys"] + if media_key in media_dict + ] + + # Process referenced tweets if present + referenced_tweets = None + if "referenced_tweets" in tweet_data: + referenced_tweets = [ + tweets_dict[ref["id"]] + for ref in tweet_data["referenced_tweets"] + if ref["id"] in tweets_dict + ] + + # Create the Tweet object + tweet = Tweet( + id=str(tweet_id), + text=tweet_data["text"], + author_id=str(author_id), + created_at=datetime.fromisoformat( + tweet_data["created_at"].replace("Z", "+00:00") + ), + author=users_dict.get(author_id), + referenced_tweets=referenced_tweets, + attachments=attachments, + ) + result.append(tweet) + + return result + + async def upload_media(self, agent_id: str, image_url: str) -> List[str]: + """Upload media to Twitter and return the media IDs. + + Args: + agent_id: The ID of the agent. + image_url: The URL of the image to upload. + + Returns: + List[str]: A list of media IDs for the uploaded media. + + Raises: + ValueError: If there's an error uploading the media. + """ + # Get agent data to access the token + agent_data = await self._skill_store.get_agent_data(agent_id) + if not agent_data.twitter_access_token: + raise ValueError("Only linked X account can post media") + + media_ids = [] + # Download the image + async with httpx.AsyncClient() as session: + response = await session.get(image_url) + if response.status_code == 200: + # Create a temporary file to store the image + with tempfile.NamedTemporaryFile(delete=False) as tmp_file: + tmp_file.write(response.content) + tmp_file_path = tmp_file.name + + # tweepy is outdated, we need to use httpx call new API + try: + # Upload the image directly to Twitter using the Media Upload API + headers = { + "Authorization": f"Bearer {agent_data.twitter_access_token}" + } + + # Upload to Twitter's media/upload endpoint using multipart/form-data + upload_url = "https://api.twitter.com/2/media/upload" + + # Get the content type from the response headers or default to image/jpeg + content_type = response.headers.get("content-type", "image/jpeg") + + # Create a multipart form with the image file and required parameters + files = { + "media": ( + "image", + open(tmp_file_path, "rb"), + content_type, + ) + } + + # Add required parameters according to new API + data = {"media_category": "tweet_image", "media_type": content_type} + + upload_response = await session.post( + upload_url, headers=headers, files=files, data=data + ) + + if upload_response.status_code == 200: + media_data = upload_response.json() + if "data" in media_data and "id" in media_data["data"]: + media_ids.append(media_data["data"]["id"]) + else: + raise ValueError( + f"Unexpected response format from Twitter media upload: {media_data}" + ) + else: + raise ValueError( + f"Failed to upload image to Twitter. Status code: {upload_response.status_code}, Response: {upload_response.text}" + ) + finally: + # Clean up the temporary file + if os.path.exists(tmp_file_path): + os.unlink(tmp_file_path) + else: + raise ValueError( + f"Failed to download image from URL: {image_url}. Status code: {response.status_code}" + ) + + return media_ids + + +def _is_self_key(config: Dict) -> bool: + return config.get("api_key_provider") == "agent_owner" + + +def get_twitter_client( + agent_id: str, skill_store: SkillStoreABC, config: Dict +) -> "TwitterClient": + if _is_self_key(config): + if agent_id not in _clients_self_key: + _clients_self_key[agent_id] = TwitterClient(agent_id, skill_store, config) + return _clients_self_key[agent_id] + if agent_id not in _clients_linked: + _clients_linked[agent_id] = TwitterClient(agent_id, skill_store, config) + return _clients_linked[agent_id] + + +async def unlink_twitter(agent_id: str) -> AgentData: + logger.info(f"Unlinking Twitter for agent {agent_id}") + return await AgentData.patch( + agent_id, + { + "twitter_id": None, + "twitter_username": None, + "twitter_name": None, + "twitter_access_token": None, + "twitter_access_token_expires_at": None, + "twitter_refresh_token": None, + }, + ) + + +# this class is forked from: +# https://github.com/tweepy/tweepy/blob/main/tweepy/auth.py +# it is not maintained by the original author, bug need to be fixed +class OAuth2UserHandler(OAuth2Session): + """OAuth 2.0 Authorization Code Flow with PKCE (User Context) + authentication handler + """ + + def __init__(self, *, client_id, redirect_uri, scope, client_secret=None): + super().__init__(client_id, redirect_uri=redirect_uri, scope=scope) + if client_secret is not None: + self.auth = HTTPBasicAuth(client_id, client_secret) + else: + self.auth = None + self.code_challenge = self._client.create_code_challenge( + self._client.create_code_verifier(128), "S256" + ) + + def get_authorization_url(self, agent_id: str, redirect_uri: str): + """Get the authorization URL to redirect the user to + + Args: + agent_id: ID of the agent to authenticate + redirect_uri: URI to redirect to after authorization + """ + state_params = {"agent_id": agent_id, "redirect_uri": redirect_uri} + authorization_url, _ = self.authorization_url( + "https://x.com/i/oauth2/authorize", + state=urlencode(state_params), + code_challenge=self.code_challenge, + code_challenge_method="S256", + ) + return authorization_url + + def get_token(self, authorization_response): + """After user has authorized the app, fetch access token with + authorization response URL + """ + return super().fetch_token( + "https://api.x.com/2/oauth2/token", + authorization_response=authorization_response, + auth=self.auth, + include_client_id=True, + code_verifier=self._client.code_verifier, + ) + + def refresh(self, refresh_token: str): + """Refresh token""" + return super().refresh_token( + "https://api.x.com/2/oauth2/token", + refresh_token=refresh_token, + include_client_id=True, + ) diff --git a/intentkit/clients/web3.py b/intentkit/clients/web3.py new file mode 100644 index 00000000..267b9a03 --- /dev/null +++ b/intentkit/clients/web3.py @@ -0,0 +1,32 @@ +from typing import Dict + +from web3 import Web3 + +from intentkit.abstracts.skill import SkillStoreABC +from intentkit.utils.chain import ChainProvider + +# Global cache for Web3 clients by network_id +_web3_client_cache: Dict[str, Web3] = {} + + +def get_web3_client(network_id: str, skill_store: SkillStoreABC) -> Web3: + """Get a Web3 client for the specified network. + + Args: + network_id: The network ID to get the Web3 client for + skill_store: The skill store to get system configuration from + + Returns: + Web3: A Web3 client instance for the specified network + """ + # Check global cache first + if network_id in _web3_client_cache: + return _web3_client_cache[network_id] + + # Create new Web3 client and cache it + chain_provider: ChainProvider = skill_store.get_system_config("chain_provider") + chain = chain_provider.get_chain_config(network_id) + web3_client = Web3(Web3.HTTPProvider(chain.rpc_url)) + _web3_client_cache[network_id] = web3_client + + return web3_client diff --git a/intentkit/config/__init__.py b/intentkit/config/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/intentkit/config/config.py b/intentkit/config/config.py new file mode 100644 index 00000000..81aef526 --- /dev/null +++ b/intentkit/config/config.py @@ -0,0 +1,209 @@ +# app/config.py +import json +import logging +import os + +from dotenv import load_dotenv + +from intentkit.utils.chain import ChainProvider, QuicknodeChainProvider +from intentkit.utils.logging import setup_logging +from intentkit.utils.s3 import init_s3 +from intentkit.utils.slack_alert import init_slack + +# Load environment variables from .env file +load_dotenv() + +logger = logging.getLogger(__name__) + + +def load_from_aws(name): + import botocore.session + from aws_secretsmanager_caching import SecretCache, SecretCacheConfig + + client = botocore.session.get_session().create_client("secretsmanager") + cache_config = SecretCacheConfig() + cache = SecretCache(config=cache_config, client=client) + secret = cache.get_secret_string(name) + return json.loads(secret) + + +class Config: + def __init__(self): + # ==== this part can only be load from env + self.env = os.getenv("ENV", "local") + self.release = os.getenv("RELEASE", "local") + secret_name = os.getenv("AWS_SECRET_NAME") + db_secret_name = os.getenv("AWS_DB_SECRET_NAME") + # ==== load from aws secrets manager + if secret_name: + self.secrets = load_from_aws(secret_name) + else: + self.secrets = {} + if db_secret_name: + self.db = load_from_aws(db_secret_name) + # format the db config + self.db["port"] = str(self.db["port"]) + # only keep the necessary fields + self.db = { + k: v + for k, v in self.db.items() + if k in ["username", "password", "host", "dbname", "port"] + } + else: + self.db = { + "username": self.load("DB_USERNAME", ""), + "password": self.load("DB_PASSWORD", ""), + "host": self.load("DB_HOST", ""), + "port": self.load("DB_PORT", "5432"), + "dbname": self.load("DB_NAME", ""), + } + # ==== this part can be load from env or aws secrets manager + self.db["auto_migrate"] = self.load("DB_AUTO_MIGRATE", "true") == "true" + self.db["pool_size"] = self.load_int("DB_POOL_SIZE", 3) + self.debug = self.load("DEBUG") == "true" + self.debug_checkpoint = ( + self.load("DEBUG_CHECKPOINT", "false") == "true" + ) # log with checkpoint + # Redis + self.redis_host = self.load("REDIS_HOST") + self.redis_port = self.load_int("REDIS_PORT", 6379) + self.redis_db = self.load_int("REDIS_DB", 0) + # AWS + self.aws_s3_bucket = self.load("AWS_S3_BUCKET") + self.aws_s3_cdn_url = self.load("AWS_S3_CDN_URL") + # Internal API + self.internal_base_url = self.load("INTERNAL_BASE_URL", "http://intent-api") + self.admin_auth_enabled = self.load("ADMIN_AUTH_ENABLED", "false") == "true" + self.admin_jwt_secret = self.load("ADMIN_JWT_SECRET") + self.debug_auth_enabled = self.load("DEBUG_AUTH_ENABLED", "false") == "true" + self.debug_username = self.load("DEBUG_USERNAME") + self.debug_password = self.load("DEBUG_PASSWORD") + self.admin_llm_skill_control = ( + self.load("ADMIN_LLM_SKILL_CONTROL", "false") == "true" + ) + # Payment + self.payment_enabled = self.load("PAYMENT_ENABLED", "false") == "true" + # Open API for agent + self.open_api_base_url = self.load("OPEN_API_BASE_URL", "http://localhost:8000") + # CDP - AgentKit 0.6.0 Configuration + self.cdp_api_key_id = self.load("CDP_API_KEY_ID") + self.cdp_api_key_secret = self.load("CDP_API_KEY_SECRET") + self.cdp_wallet_secret = self.load("CDP_WALLET_SECRET") + # LLM providers + self.openai_api_key = self.load("OPENAI_API_KEY") + self.deepseek_api_key = self.load("DEEPSEEK_API_KEY") + self.xai_api_key = self.load("XAI_API_KEY") + self.eternal_api_key = self.load("ETERNAL_API_KEY") + self.reigent_api_key = self.load("REIGENT_API_KEY") + self.venice_api_key = self.load("VENICE_API_KEY") + # LLM Config + self.system_prompt = self.load("SYSTEM_PROMPT") + self.intentkit_prompt = self.load("INTENTKIT_PROMPT") + self.input_token_limit = self.load_int("INPUT_TOKEN_LIMIT", 60000) + # XMTP + self.xmtp_system_prompt = self.load( + "XMTP_SYSTEM_PROMPT", + "You are assisting a user who uses an XMTP client that only displays plain-text messages, so do not use Markdown formatting.", + ) + # Telegram server settings + self.tg_system_prompt = self.load("TG_SYSTEM_PROMPT") + self.tg_base_url = self.load("TG_BASE_URL") + self.tg_server_host = self.load("TG_SERVER_HOST", "127.0.0.1") + self.tg_server_port = self.load("TG_SERVER_PORT", "8081") + self.tg_new_agent_poll_interval = self.load("TG_NEW_AGENT_POLL_INTERVAL", "60") + # Twitter + self.twitter_oauth2_client_id = self.load("TWITTER_OAUTH2_CLIENT_ID") + self.twitter_oauth2_client_secret = self.load("TWITTER_OAUTH2_CLIENT_SECRET") + self.twitter_oauth2_redirect_uri = self.load("TWITTER_OAUTH2_REDIRECT_URI") + self.twitter_entrypoint_interval = self.load_int( + "TWITTER_ENTRYPOINT_INTERVAL", 5 + ) # in minutes + # Slack Alert + self.slack_alert_token = self.load("SLACK_ALERT_TOKEN") + self.slack_alert_channel = self.load("SLACK_ALERT_CHANNEL") + # Skills - Platform Hosted Keys + self.acolyt_api_key = self.load("ACOLYT_API_KEY") + self.allora_api_key = self.load("ALLORA_API_KEY") + self.elfa_api_key = self.load("ELFA_API_KEY") + self.heurist_api_key = self.load("HEURIST_API_KEY") + self.enso_api_token = self.load("ENSO_API_TOKEN") + self.dapplooker_api_key = self.load("DAPPLOOKER_API_KEY") + self.moralis_api_key = self.load("MORALIS_API_KEY") + self.tavily_api_key = self.load("TAVILY_API_KEY") + self.cookiefun_api_key = self.load("COOKIEFUN_API_KEY") + self.firecrawl_api_key = self.load("FIRECRAWL_API_KEY") + # Sentry + self.sentry_dsn = self.load("SENTRY_DSN") + self.sentry_sample_rate = self.load_float("SENTRY_SAMPLE_RATE", 0.1) + self.sentry_traces_sample_rate = self.load_float( + "SENTRY_TRACES_SAMPLE_RATE", 0.01 + ) + self.sentry_profiles_sample_rate = self.load_float( + "SENTRY_PROFILES_SAMPLE_RATE", 0.01 + ) + # RPC Providers + self.quicknode_api_key = self.load("QUICKNODE_API_KEY") + if self.quicknode_api_key: + self.chain_provider: ChainProvider = QuicknodeChainProvider( + self.quicknode_api_key + ) + if hasattr(self, "chain_provider"): + self.chain_provider.init_chain_configs() + self.rpc_networks = self.load( + "RPC_NETWORKS", "base-mainnet,base-sepolia,ethereum-sepolia,solana-mainnet" + ) + + # Nation + self.nation_api_key = self.load("NATION_API_KEY") + self.nation_api_url = self.load("NATION_API_URL", "") + + # ===== config loaded + # Now we know the env, set up logging + setup_logging(self.env, self.debug) + logger.info("config loaded") + + # If the slack alert token exists, init it + if self.slack_alert_token and self.slack_alert_channel: + init_slack(self.slack_alert_token, self.slack_alert_channel) + # If the AWS S3 bucket and CDN URL exist, init it + if self.aws_s3_bucket and self.aws_s3_cdn_url: + init_s3(self.aws_s3_bucket, self.aws_s3_cdn_url, self.env) + + def load(self, key, default=None): + """Load a secret from the secrets map or env""" + value = self.secrets.get(key, os.getenv(key, default)) + + # If value is empty string, use default instead + if value == "": + value = default + + if value: + value = value.replace("\\n", "\n") + if value and value.startswith("'") and value.endswith("'"): + value = value[1:-1] + return value + + def load_int(self, key, default=0): + """Load an integer value from env, handling empty strings gracefully""" + value = self.load(key, str(default)) + if value is None or value == "": + return default + try: + return int(value) + except (ValueError, TypeError): + logger.warning(f"Invalid integer value for {key}, using default: {default}") + return default + + def load_float(self, key, default=0.0): + """Load a float value from env, handling empty strings gracefully""" + value = self.load(key, str(default)) + if value is None or value == "": + return default + try: + return float(value) + except (ValueError, TypeError): + logger.warning(f"Invalid float value for {key}, using default: {default}") + return default + + +config: Config = Config() diff --git a/intentkit/core/__init__.py b/intentkit/core/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/intentkit/core/agent.py b/intentkit/core/agent.py new file mode 100644 index 00000000..6f0bb135 --- /dev/null +++ b/intentkit/core/agent.py @@ -0,0 +1,464 @@ +import logging +import time +from datetime import datetime, timedelta, timezone +from decimal import Decimal +from typing import Dict, List + +from sqlalchemy import func, select, text, update + +from intentkit.models.agent import Agent, AgentAutonomous, AgentTable +from intentkit.models.agent_data import AgentQuotaTable +from intentkit.models.credit import CreditEventTable, EventType, UpstreamType +from intentkit.models.db import get_session +from intentkit.utils.error import IntentKitAPIError + +logger = logging.getLogger(__name__) + + +async def agent_action_cost(agent_id: str) -> Dict[str, Decimal]: + """ + Calculate various action cost metrics for an agent based on past three days of credit events. + + Metrics calculated: + - avg_action_cost: average cost per action + - min_action_cost: minimum cost per action + - max_action_cost: maximum cost per action + - low_action_cost: average cost of the lowest 20% of actions + - medium_action_cost: average cost of the middle 60% of actions + - high_action_cost: average cost of the highest 20% of actions + + Args: + agent_id: ID of the agent + + Returns: + Dict[str, Decimal]: Dictionary containing all calculated cost metrics + """ + start_time = time.time() + default_value = Decimal("0") + + agent = await Agent.get(agent_id) + if not agent: + raise IntentKitAPIError( + 400, "AgentNotFound", f"Agent with ID {agent_id} does not exist." + ) + + async with get_session() as session: + # Calculate the date 3 days ago from now + three_days_ago = datetime.now(timezone.utc) - timedelta(days=3) + + # First, count the number of distinct start_message_ids to determine if we have enough data + count_query = select( + func.count(func.distinct(CreditEventTable.start_message_id)) + ).where( + CreditEventTable.agent_id == agent_id, + CreditEventTable.created_at >= three_days_ago, + CreditEventTable.user_id != agent.owner, + CreditEventTable.upstream_type == UpstreamType.EXECUTOR, + CreditEventTable.event_type.in_([EventType.MESSAGE, EventType.SKILL_CALL]), + CreditEventTable.start_message_id.is_not(None), + ) + + result = await session.execute(count_query) + record_count = result.scalar_one() + + # If we have fewer than 10 records, return default values + if record_count < 10: + time_cost = time.time() - start_time + logger.info( + f"agent_action_cost for {agent_id}: using default values (insufficient records: {record_count}) timeCost={time_cost:.3f}s" + ) + return { + "avg_action_cost": default_value, + "min_action_cost": default_value, + "max_action_cost": default_value, + "low_action_cost": default_value, + "medium_action_cost": default_value, + "high_action_cost": default_value, + } + + # Calculate the basic metrics (avg, min, max) directly in PostgreSQL + basic_metrics_query = text(""" + WITH action_sums AS ( + SELECT start_message_id, SUM(total_amount) AS action_cost + FROM credit_events + WHERE agent_id = :agent_id + AND created_at >= :three_days_ago + AND upstream_type = :upstream_type + AND event_type IN (:event_type_message, :event_type_skill_call) + AND start_message_id IS NOT NULL + GROUP BY start_message_id + ) + SELECT + AVG(action_cost) AS avg_cost, + MIN(action_cost) AS min_cost, + MAX(action_cost) AS max_cost + FROM action_sums + """) + + # Calculate the percentile-based metrics (low, medium, high) using window functions + percentile_metrics_query = text(""" + WITH action_sums AS ( + SELECT + start_message_id, + SUM(total_amount) AS action_cost, + NTILE(5) OVER (ORDER BY SUM(total_amount)) AS quintile + FROM credit_events + WHERE agent_id = :agent_id + AND created_at >= :three_days_ago + AND upstream_type = :upstream_type + AND event_type IN (:event_type_message, :event_type_skill_call) + AND start_message_id IS NOT NULL + GROUP BY start_message_id + ) + SELECT + (SELECT AVG(action_cost) FROM action_sums WHERE quintile = 1) AS low_cost, + (SELECT AVG(action_cost) FROM action_sums WHERE quintile IN (2, 3, 4)) AS medium_cost, + (SELECT AVG(action_cost) FROM action_sums WHERE quintile = 5) AS high_cost + FROM action_sums + LIMIT 1 + """) + + # Bind parameters to prevent SQL injection and ensure correct types + params = { + "agent_id": agent_id, + "three_days_ago": three_days_ago, + "upstream_type": UpstreamType.EXECUTOR, + "event_type_message": EventType.MESSAGE, + "event_type_skill_call": EventType.SKILL_CALL, + } + + # Execute the basic metrics query + basic_result = await session.execute(basic_metrics_query, params) + basic_row = basic_result.fetchone() + + # Execute the percentile metrics query + percentile_result = await session.execute(percentile_metrics_query, params) + percentile_row = percentile_result.fetchone() + + # If no results, return the default values + if not basic_row or basic_row[0] is None: + time_cost = time.time() - start_time + logger.info( + f"agent_action_cost for {agent_id}: using default values (no action costs found) timeCost={time_cost:.3f}s" + ) + return { + "avg_action_cost": default_value, + "min_action_cost": default_value, + "max_action_cost": default_value, + "low_action_cost": default_value, + "medium_action_cost": default_value, + "high_action_cost": default_value, + } + + # Extract and convert the values to Decimal for consistent precision + avg_cost = Decimal(str(basic_row[0] or 0)).quantize(Decimal("0.0001")) + min_cost = Decimal(str(basic_row[1] or 0)).quantize(Decimal("0.0001")) + max_cost = Decimal(str(basic_row[2] or 0)).quantize(Decimal("0.0001")) + + # Extract percentile-based metrics + low_cost = ( + Decimal(str(percentile_row[0] or 0)).quantize(Decimal("0.0001")) + if percentile_row and percentile_row[0] is not None + else default_value + ) + medium_cost = ( + Decimal(str(percentile_row[1] or 0)).quantize(Decimal("0.0001")) + if percentile_row and percentile_row[1] is not None + else default_value + ) + high_cost = ( + Decimal(str(percentile_row[2] or 0)).quantize(Decimal("0.0001")) + if percentile_row and percentile_row[2] is not None + else default_value + ) + + # Create the result dictionary + result = { + "avg_action_cost": avg_cost, + "min_action_cost": min_cost, + "max_action_cost": max_cost, + "low_action_cost": low_cost, + "medium_action_cost": medium_cost, + "high_action_cost": high_cost, + } + + time_cost = time.time() - start_time + logger.info( + f"agent_action_cost for {agent_id}: avg={avg_cost}, min={min_cost}, max={max_cost}, " + f"low={low_cost}, medium={medium_cost}, high={high_cost} " + f"(records: {record_count}) timeCost={time_cost:.3f}s" + ) + + return result + + +async def update_agent_action_cost(): + """ + Update action costs for all agents. + + This function processes agents in batches of 100 to avoid memory issues. + For each agent, it calculates various action cost metrics: + - avg_action_cost: average cost per action + - min_action_cost: minimum cost per action + - max_action_cost: maximum cost per action + - low_action_cost: average cost of the lowest 20% of actions + - medium_action_cost: average cost of the middle 60% of actions + - high_action_cost: average cost of the highest 20% of actions + + It then updates the corresponding record in the agent_quotas table. + """ + logger.info("Starting update of agent average action costs") + start_time = time.time() + batch_size = 100 + last_id = None + total_updated = 0 + + while True: + # Get a batch of agent IDs ordered by ID + async with get_session() as session: + query = select(AgentTable.id).order_by(AgentTable.id) + + # Apply pagination if we have a last_id from previous batch + if last_id: + query = query.where(AgentTable.id > last_id) + + query = query.limit(batch_size) + result = await session.execute(query) + agent_ids = [row[0] for row in result] + + # If no more agents, we're done + if not agent_ids: + break + + # Update last_id for next batch + last_id = agent_ids[-1] + + # Process this batch of agents + logger.info( + f"Processing batch of {len(agent_ids)} agents starting with ID {agent_ids[0]}" + ) + batch_start_time = time.time() + + for agent_id in agent_ids: + try: + # Calculate action costs for this agent + costs = await agent_action_cost(agent_id) + + # Update the agent's quota record + async with get_session() as session: + update_stmt = ( + update(AgentQuotaTable) + .where(AgentQuotaTable.id == agent_id) + .values( + avg_action_cost=costs["avg_action_cost"], + min_action_cost=costs["min_action_cost"], + max_action_cost=costs["max_action_cost"], + low_action_cost=costs["low_action_cost"], + medium_action_cost=costs["medium_action_cost"], + high_action_cost=costs["high_action_cost"], + ) + ) + await session.execute(update_stmt) + await session.commit() + + total_updated += 1 + except Exception as e: + logger.error( + f"Error updating action costs for agent {agent_id}: {str(e)}" + ) + + batch_time = time.time() - batch_start_time + logger.info(f"Completed batch in {batch_time:.3f}s") + + total_time = time.time() - start_time + logger.info( + f"Finished updating action costs for {total_updated} agents in {total_time:.3f}s" + ) + + +async def list_autonomous_tasks(agent_id: str) -> List[AgentAutonomous]: + """ + List all autonomous tasks for an agent. + + Args: + agent_id: ID of the agent + + Returns: + List[AgentAutonomous]: List of autonomous task configurations + + Raises: + IntentKitAPIError: If agent is not found + """ + agent = await Agent.get(agent_id) + if not agent: + raise IntentKitAPIError( + 400, "AgentNotFound", f"Agent with ID {agent_id} does not exist." + ) + + if not agent.autonomous: + return [] + + return agent.autonomous + + +async def add_autonomous_task(agent_id: str, task: AgentAutonomous) -> AgentAutonomous: + """ + Add a new autonomous task to an agent. + + Args: + agent_id: ID of the agent + task: Autonomous task configuration (id will be generated if not provided) + + Returns: + AgentAutonomous: The created task with generated ID + + Raises: + IntentKitAPIError: If agent is not found + """ + agent = await Agent.get(agent_id) + if not agent: + raise IntentKitAPIError( + 400, "AgentNotFound", f"Agent with ID {agent_id} does not exist." + ) + + # Get current autonomous tasks + current_tasks = agent.autonomous or [] + if not isinstance(current_tasks, list): + current_tasks = [] + + # Add the new task + current_tasks.append(task) + + # Convert all AgentAutonomous objects to dictionaries for JSON serialization + serializable_tasks = [task_item.model_dump() for task_item in current_tasks] + + # Update the agent in the database + async with get_session() as session: + update_stmt = ( + update(AgentTable) + .where(AgentTable.id == agent_id) + .values(autonomous=serializable_tasks) + ) + await session.execute(update_stmt) + await session.commit() + + logger.info(f"Added autonomous task {task.id} to agent {agent_id}") + return task + + +async def delete_autonomous_task(agent_id: str, task_id: str) -> None: + """ + Delete an autonomous task from an agent. + + Args: + agent_id: ID of the agent + task_id: ID of the task to delete + + Raises: + IntentKitAPIError: If agent is not found or task is not found + """ + agent = await Agent.get(agent_id) + if not agent: + raise IntentKitAPIError( + 400, "AgentNotFound", f"Agent with ID {agent_id} does not exist." + ) + + # Get current autonomous tasks + current_tasks = agent.autonomous or [] + if not isinstance(current_tasks, list): + current_tasks = [] + + # Find and remove the task + task_found = False + updated_tasks = [] + for task_data in current_tasks: + if task_data.id == task_id: + task_found = True + continue + updated_tasks.append(task_data) + + if not task_found: + raise IntentKitAPIError( + 404, "TaskNotFound", f"Autonomous task with ID {task_id} not found." + ) + + # Convert remaining AgentAutonomous objects to dictionaries for JSON serialization + serializable_tasks = [task_item.model_dump() for task_item in updated_tasks] + + # Update the agent in the database + async with get_session() as session: + update_stmt = ( + update(AgentTable) + .where(AgentTable.id == agent_id) + .values(autonomous=serializable_tasks) + ) + await session.execute(update_stmt) + await session.commit() + + logger.info(f"Deleted autonomous task {task_id} from agent {agent_id}") + + +async def update_autonomous_task( + agent_id: str, task_id: str, task_updates: dict +) -> AgentAutonomous: + """ + Update an autonomous task for an agent. + + Args: + agent_id: ID of the agent + task_id: ID of the task to update + task_updates: Dictionary containing fields to update + + Returns: + AgentAutonomous: The updated task + + Raises: + IntentKitAPIError: If agent is not found or task is not found + """ + agent = await Agent.get(agent_id) + if not agent: + raise IntentKitAPIError( + 400, "AgentNotFound", f"Agent with ID {agent_id} does not exist." + ) + + # Get current autonomous tasks + current_tasks: List[AgentAutonomous] = agent.autonomous or [] + + # Find and update the task + task_found = False + updated_tasks: List[AgentAutonomous] = [] + updated_task = None + + for task_data in current_tasks: + if task_data.id == task_id: + task_found = True + # Create a dictionary with current task data + task_dict = task_data.model_dump() + # Update with provided fields + task_dict.update(task_updates) + # Create new AgentAutonomous instance + updated_task = AgentAutonomous.model_validate(task_dict) + updated_tasks.append(updated_task) + else: + updated_tasks.append(task_data) + + if not task_found: + raise IntentKitAPIError( + 404, "TaskNotFound", f"Autonomous task with ID {task_id} not found." + ) + + # Convert all AgentAutonomous objects to dictionaries for JSON serialization + serializable_tasks = [task_item.model_dump() for task_item in updated_tasks] + + # Update the agent in the database + async with get_session() as session: + update_stmt = ( + update(AgentTable) + .where(AgentTable.id == agent_id) + .values(autonomous=serializable_tasks) + ) + await session.execute(update_stmt) + await session.commit() + + logger.info(f"Updated autonomous task {task_id} for agent {agent_id}") + return updated_task diff --git a/intentkit/core/api.py b/intentkit/core/api.py new file mode 100644 index 00000000..2f59d956 --- /dev/null +++ b/intentkit/core/api.py @@ -0,0 +1,100 @@ +"""Core API Router. + +This module provides the core API endpoints for agent execution and management. +""" + +from typing import Annotated + +from fastapi import APIRouter, Body +from fastapi.responses import StreamingResponse +from pydantic import AfterValidator + +from intentkit.core.engine import execute_agent, stream_agent +from intentkit.models.chat import ChatMessage, ChatMessageCreate + +core_router = APIRouter(prefix="/core", tags=["Core"]) + + +@core_router.post("/execute", response_model=list[ChatMessage]) +async def execute( + message: Annotated[ + ChatMessageCreate, AfterValidator(ChatMessageCreate.model_validate) + ] = Body( + ChatMessageCreate, + description="The chat message containing agent_id, chat_id and message content", + ), +) -> list[ChatMessage]: + """Execute an agent with the provided message and return all results. + + This endpoint executes an agent with the provided message and returns all + generated messages as a complete list after execution finishes. + + **Request Body:** + * `message` - The chat message containing agent_id, chat_id and message content + + **Response:** + Returns a list of ChatMessage objects containing: + * Skill call results (including tool executions) + * Agent reasoning and responses + * System messages or error notifications + + **Returns:** + * `list[ChatMessage]` - Complete list of response messages + + **Raises:** + * `HTTPException`: + - 400: If input parameters are invalid + - 404: If agent not found + - 500: For other server-side errors + """ + return await execute_agent(message) + + +@core_router.post("/stream") +async def stream( + message: Annotated[ + ChatMessageCreate, AfterValidator(ChatMessageCreate.model_validate) + ] = Body( + ChatMessageCreate, + description="The chat message containing agent_id, chat_id and message content", + ), +) -> StreamingResponse: + """Stream agent execution results in real-time using Server-Sent Events. + + This endpoint executes an agent with the provided message and streams the results + in real-time using the SSE (Server-Sent Events) standard format. + + **Request Body:** + * `message` - The chat message containing agent_id, chat_id and message content + + **Stream Format:** + The response uses Server-Sent Events with the following format: + * Event type: `message` + * Data: ChatMessage object as JSON + * Format: `event: message\\ndata: {ChatMessage JSON}\\n\\n` + + **Response Content:** + Each streamed message can be: + * Skill call results (including tool executions) + * Agent reasoning and responses + * System messages or error notifications + + **Returns:** + * `StreamingResponse` - SSE stream with real-time ChatMessage objects + + **Raises:** + * `HTTPException`: + - 400: If input parameters are invalid + - 404: If agent not found + - 500: For other server-side errors + """ + + async def generate(): + async for chat_message in stream_agent(message): + yield f"event: message\ndata: {chat_message.model_dump_json()}\n\n" + + return StreamingResponse( + generate(), + media_type="text/event-stream", + headers={"Cache-Control": "no-cache", "Connection": "keep-alive"}, + ) diff --git a/intentkit/core/chat.py b/intentkit/core/chat.py new file mode 100644 index 00000000..de1a2a44 --- /dev/null +++ b/intentkit/core/chat.py @@ -0,0 +1,51 @@ +"""Chat memory management utilities. + +This module provides functions for managing chat thread memory, +including clearing thread history using LangGraph's checkpointer. +""" + +import logging + +from intentkit.models.db import get_langgraph_checkpointer +from intentkit.utils.error import IntentKitAPIError + +logger = logging.getLogger(__name__) + + +async def clear_thread_memory(agent_id: str, chat_id: str) -> bool: + """Clear all memory content for a specific thread. + + This function uses LangGraph's official checkpointer.delete_thread() method + to permanently remove all stored checkpoints and conversation history + associated with the specified thread. + + Args: + agent_id (str): The agent identifier + chat_id (str): The chat identifier + + Returns: + bool: True if the thread memory was successfully cleared + + Raises: + IntentKitAPIError: If there's an error clearing the thread memory + """ + try: + # Construct thread_id by combining agent_id and chat_id + thread_id = f"{agent_id}_{chat_id}" + + # Get the LangGraph checkpointer instance + checkpointer = get_langgraph_checkpointer() + + # Use the official LangGraph method to delete all thread content + await checkpointer.delete_thread(thread_id) + + logger.info(f"Successfully cleared thread memory for thread_id: {thread_id}") + return True + + except Exception as e: + logger.error( + f"Failed to clear thread memory for agent_id: {agent_id}, chat_id: {chat_id}. Error: {str(e)}" + ) + raise IntentKitAPIError( + 500, "ThreadMemoryClearError", f"Failed to clear thread memory: {str(e)}" + ) diff --git a/intentkit/core/client.py b/intentkit/core/client.py new file mode 100644 index 00000000..b95193a1 --- /dev/null +++ b/intentkit/core/client.py @@ -0,0 +1,85 @@ +"""Core Client Module. + +This module provides client functions for core API endpoints with environment-aware routing. +""" + +from typing import AsyncIterator + +import httpx + +from intentkit.config.config import config +from intentkit.core.engine import execute_agent as local_execute_agent +from intentkit.core.engine import stream_agent as local_stream_agent +from intentkit.models.chat import ChatMessage, ChatMessageCreate + + +async def execute_agent(message: ChatMessageCreate) -> list[ChatMessage]: + """Execute an agent with environment-aware routing. + + In local environment, directly calls the local execute_agent function. + In other environments, makes HTTP request to the core API endpoint. + + Args: + message (ChatMessage): The chat message containing agent_id, chat_id and message content + debug (bool): Enable debug mode + + Returns: + list[ChatMessage]: Formatted response lines from agent execution + + Raises: + HTTPException: For API errors (in non-local environment) + Exception: For other execution errors + """ + if config.env == "local": + return await local_execute_agent(message) + + # Make HTTP request in non-local environment + url = f"{config.internal_base_url}/core/execute" + async with httpx.AsyncClient() as client: + response = await client.post( + url, + json=message.model_dump(mode="json"), + timeout=300, + ) + response.raise_for_status() + json_data = response.json() + return [ChatMessage.model_validate(msg) for msg in json_data] + + +async def stream_agent(message: ChatMessageCreate) -> AsyncIterator[ChatMessage]: + """Stream agent execution with environment-aware routing using Server-Sent Events. + + In local environment, directly calls the local stream_agent function. + In other environments, makes HTTP request to the core stream API endpoint and parses SSE format. + + Args: + message (ChatMessageCreate): The chat message containing agent_id, chat_id and message content + debug (bool): Enable debug mode + + Yields: + ChatMessage: Individual response messages from agent execution + + Raises: + HTTPException: For API errors (in non-local environment) + Exception: For other execution errors + """ + if config.env == "local": + async for chat_message in local_stream_agent(message): + yield chat_message + return + + # Make HTTP request in non-local environment + url = f"{config.internal_base_url}/core/stream" + async with httpx.AsyncClient() as client: + async with client.stream( + "POST", + url, + json=message.model_dump(mode="json"), + timeout=300, + ) as response: + response.raise_for_status() + async for line in response.aiter_lines(): + if line.startswith("data: "): + json_str = line[6:] # Remove "data: " prefix + if json_str.strip(): + yield ChatMessage.model_validate_json(json_str) diff --git a/intentkit/core/credit.py b/intentkit/core/credit.py new file mode 100644 index 00000000..6549e5f9 --- /dev/null +++ b/intentkit/core/credit.py @@ -0,0 +1,1977 @@ +import logging +from datetime import datetime +from decimal import ROUND_HALF_UP, Decimal +from typing import List, Optional, Tuple + +from epyxid import XID +from fastapi import HTTPException +from pydantic import BaseModel +from sqlalchemy import desc, select +from sqlalchemy.ext.asyncio import AsyncSession + +from intentkit.models.agent import Agent +from intentkit.models.agent_data import AgentData +from intentkit.models.app_setting import AppSetting +from intentkit.models.credit import ( + DEFAULT_PLATFORM_ACCOUNT_ADJUSTMENT, + DEFAULT_PLATFORM_ACCOUNT_DEV, + DEFAULT_PLATFORM_ACCOUNT_FEE, + DEFAULT_PLATFORM_ACCOUNT_MEMORY, + DEFAULT_PLATFORM_ACCOUNT_MESSAGE, + DEFAULT_PLATFORM_ACCOUNT_RECHARGE, + DEFAULT_PLATFORM_ACCOUNT_REFILL, + DEFAULT_PLATFORM_ACCOUNT_REWARD, + DEFAULT_PLATFORM_ACCOUNT_SKILL, + CreditAccount, + CreditAccountTable, + CreditDebit, + CreditEvent, + CreditEventTable, + CreditTransactionTable, + CreditType, + Direction, + EventType, + OwnerType, + RewardType, + TransactionType, + UpstreamType, +) +from intentkit.models.db import get_session +from intentkit.models.skill import Skill +from intentkit.utils.slack_alert import send_slack_message + +logger = logging.getLogger(__name__) + +# Define the precision for all decimal calculations (4 decimal places) +FOURPLACES = Decimal("0.0001") + + +async def update_credit_event_note( + session: AsyncSession, + event_id: str, + note: Optional[str] = None, +) -> CreditEvent: + """ + Update the note of a credit event. + + Args: + session: Async session to use for database operations + event_id: ID of the event to update + note: New note for the event + + Returns: + Updated credit event + + Raises: + HTTPException: If event is not found + """ + # Find the event + stmt = select(CreditEventTable).where(CreditEventTable.id == event_id) + result = await session.execute(stmt) + event = result.scalar_one_or_none() + + if not event: + raise HTTPException(status_code=404, detail="Credit event not found") + + # Update the note + event.note = note + await session.commit() + await session.refresh(event) + + return CreditEvent.model_validate(event) + + +async def recharge( + session: AsyncSession, + user_id: str, + amount: Decimal, + upstream_tx_id: str, + note: Optional[str] = None, +) -> CreditAccount: + """ + Recharge credits to a user account. + + Args: + session: Async session to use for database operations + user_id: ID of the user to recharge + amount: Amount of credits to recharge + upstream_tx_id: ID of the upstream transaction + note: Optional note for the transaction + + Returns: + Updated user credit account + """ + # Check for idempotency - prevent duplicate transactions + await CreditEvent.check_upstream_tx_id_exists( + session, UpstreamType.API, upstream_tx_id + ) + + if amount <= Decimal("0"): + raise ValueError("Recharge amount must be positive") + + # 1. Create credit event record first to get event_id + event_id = str(XID()) + + # 2. Update user account - add credits + user_account = await CreditAccount.income_in_session( + session=session, + owner_type=OwnerType.USER, + owner_id=user_id, + amount_details={ + CreditType.PERMANENT: amount + }, # Recharge adds to permanent credits + event_id=event_id, + ) + + # 3. Update platform recharge account - deduct credits + platform_account = await CreditAccount.deduction_in_session( + session=session, + owner_type=OwnerType.PLATFORM, + owner_id=DEFAULT_PLATFORM_ACCOUNT_RECHARGE, + credit_type=CreditType.PERMANENT, + amount=amount, + event_id=event_id, + ) + + # 4. Create credit event record + event = CreditEventTable( + id=event_id, + event_type=EventType.RECHARGE, + user_id=user_id, + upstream_type=UpstreamType.API, + upstream_tx_id=upstream_tx_id, + direction=Direction.INCOME, + account_id=user_account.id, + total_amount=amount, + credit_type=CreditType.PERMANENT, + credit_types=[CreditType.PERMANENT], + balance_after=user_account.credits + + user_account.free_credits + + user_account.reward_credits, + base_amount=amount, + base_original_amount=amount, + base_free_amount=Decimal("0"), # No free credits involved in base amount + base_reward_amount=Decimal("0"), # No reward credits involved in base amount + base_permanent_amount=amount, # All base amount is permanent for recharge + permanent_amount=amount, # Set permanent_amount since this is a permanent credit + free_amount=Decimal("0"), # No free credits involved + reward_amount=Decimal("0"), # No reward credits involved + agent_wallet_address=None, # No agent involved in recharge + note=note, + ) + session.add(event) + await session.flush() + + # 4. Create credit transaction records + # 4.1 User account transaction (credit) + user_tx = CreditTransactionTable( + id=str(XID()), + account_id=user_account.id, + event_id=event_id, + tx_type=TransactionType.RECHARGE, + credit_debit=CreditDebit.CREDIT, + change_amount=amount, + credit_type=CreditType.PERMANENT, + free_amount=Decimal("0"), + reward_amount=Decimal("0"), + permanent_amount=amount, + ) + session.add(user_tx) + + # 4.2 Platform recharge account transaction (debit) + platform_tx = CreditTransactionTable( + id=str(XID()), + account_id=platform_account.id, + event_id=event_id, + tx_type=TransactionType.RECHARGE, + credit_debit=CreditDebit.DEBIT, + change_amount=amount, + credit_type=CreditType.PERMANENT, + free_amount=Decimal("0"), + reward_amount=Decimal("0"), + permanent_amount=amount, + ) + session.add(platform_tx) + + # Commit all changes + await session.commit() + + # Send Slack notification for recharge + try: + send_slack_message( + f"💰 **Credit Recharge**\n" + f"â€ĸ User ID: `{user_id}`\n" + f"â€ĸ Amount: `{amount}` credits\n" + f"â€ĸ Transaction ID: `{upstream_tx_id}`\n" + f"â€ĸ New Balance: `{user_account.credits + user_account.free_credits + user_account.reward_credits}` credits\n" + f"â€ĸ Note: {note or 'N/A'}" + ) + except Exception as e: + logger.error(f"Failed to send Slack notification for recharge: {str(e)}") + + return user_account + + +async def reward( + session: AsyncSession, + user_id: str, + amount: Decimal, + upstream_tx_id: str, + note: Optional[str] = None, + reward_type: Optional[RewardType] = RewardType.REWARD, +) -> CreditAccount: + """ + Reward a user account with reward credits. + + Args: + session: Async session to use for database operations + user_id: ID of the user to reward + amount: Amount of reward credits to add + upstream_tx_id: ID of the upstream transaction + note: Optional note for the transaction + + Returns: + Updated user credit account + """ + # Check for idempotency - prevent duplicate transactions + await CreditEvent.check_upstream_tx_id_exists( + session, UpstreamType.API, upstream_tx_id + ) + + if amount <= Decimal("0"): + raise ValueError("Reward amount must be positive") + + # 1. Create credit event record first to get event_id + event_id = str(XID()) + + # 2. Update user account - add reward credits + user_account = await CreditAccount.income_in_session( + session=session, + owner_type=OwnerType.USER, + owner_id=user_id, + amount_details={CreditType.REWARD: amount}, # Reward adds to reward credits + event_id=event_id, + ) + + # 3. Update platform reward account - deduct credits + platform_account = await CreditAccount.deduction_in_session( + session=session, + owner_type=OwnerType.PLATFORM, + owner_id=DEFAULT_PLATFORM_ACCOUNT_REWARD, + credit_type=CreditType.REWARD, + amount=amount, + event_id=event_id, + ) + + # 4. Create credit event record + event = CreditEventTable( + id=event_id, + event_type=reward_type, + user_id=user_id, + upstream_type=UpstreamType.API, + upstream_tx_id=upstream_tx_id, + direction=Direction.INCOME, + account_id=user_account.id, + total_amount=amount, + credit_type=CreditType.REWARD, + credit_types=[CreditType.REWARD], + balance_after=user_account.credits + + user_account.free_credits + + user_account.reward_credits, + base_amount=amount, + base_original_amount=amount, + base_free_amount=Decimal("0"), # No free credits involved in base amount + base_reward_amount=amount, # All base amount is reward for reward events + base_permanent_amount=Decimal( + "0" + ), # No permanent credits involved in base amount + reward_amount=amount, # Set reward_amount since this is a reward credit + free_amount=Decimal("0"), # No free credits involved + permanent_amount=Decimal("0"), # No permanent credits involved + agent_wallet_address=None, # No agent involved in reward + note=note, + ) + session.add(event) + await session.flush() + + # 4. Create credit transaction records + # 4.1 User account transaction (credit) + user_tx = CreditTransactionTable( + id=str(XID()), + account_id=user_account.id, + event_id=event_id, + tx_type=reward_type, + credit_debit=CreditDebit.CREDIT, + change_amount=amount, + credit_type=CreditType.REWARD, + free_amount=Decimal("0"), + reward_amount=amount, + permanent_amount=Decimal("0"), + ) + session.add(user_tx) + + # 4.2 Platform reward account transaction (debit) + platform_tx = CreditTransactionTable( + id=str(XID()), + account_id=platform_account.id, + event_id=event_id, + tx_type=reward_type, + credit_debit=CreditDebit.DEBIT, + change_amount=amount, + credit_type=CreditType.REWARD, + free_amount=Decimal("0"), + reward_amount=amount, + permanent_amount=Decimal("0"), + ) + session.add(platform_tx) + + # Commit all changes + await session.commit() + + # Send Slack notification for reward + try: + reward_type_name = reward_type.value if reward_type else "REWARD" + send_slack_message( + f"🎁 **Credit Reward**\n" + f"â€ĸ User ID: `{user_id}`\n" + f"â€ĸ Amount: `{amount}` reward credits\n" + f"â€ĸ Transaction ID: `{upstream_tx_id}`\n" + f"â€ĸ Reward Type: `{reward_type_name}`\n" + f"â€ĸ New Balance: `{user_account.credits + user_account.free_credits + user_account.reward_credits}` credits\n" + f"â€ĸ Note: {note or 'N/A'}" + ) + except Exception as e: + logger.error(f"Failed to send Slack notification for reward: {str(e)}") + + return user_account + + +async def adjustment( + session: AsyncSession, + user_id: str, + credit_type: CreditType, + amount: Decimal, + upstream_tx_id: str, + note: str, +) -> CreditAccount: + """ + Adjust a user account's credits (can be positive or negative). + + Args: + session: Async session to use for database operations + user_id: ID of the user to adjust + credit_type: Type of credit to adjust (FREE, REWARD, or PERMANENT) + amount: Amount to adjust (positive for increase, negative for decrease) + upstream_tx_id: ID of the upstream transaction + note: Required explanation for the adjustment + + Returns: + Updated user credit account + """ + # Check for idempotency - prevent duplicate transactions + await CreditEvent.check_upstream_tx_id_exists( + session, UpstreamType.API, upstream_tx_id + ) + + if amount == Decimal("0"): + raise ValueError("Adjustment amount cannot be zero") + + if not note: + raise ValueError("Adjustment requires a note explaining the reason") + + # Determine direction based on amount sign + is_income = amount > Decimal("0") + abs_amount = abs(amount) + direction = Direction.INCOME if is_income else Direction.EXPENSE + credit_debit_user = CreditDebit.CREDIT if is_income else CreditDebit.DEBIT + credit_debit_platform = CreditDebit.DEBIT if is_income else CreditDebit.CREDIT + + # 1. Create credit event record first to get event_id + event_id = str(XID()) + + # 2. Update user account + if is_income: + user_account = await CreditAccount.income_in_session( + session=session, + owner_type=OwnerType.USER, + owner_id=user_id, + amount_details={credit_type: abs_amount}, + event_id=event_id, + ) + else: + # Deduct the credits using deduction_in_session + # For adjustment, we don't check if the user has enough credits + # It can be positive or negative + user_account = await CreditAccount.deduction_in_session( + session=session, + owner_type=OwnerType.USER, + owner_id=user_id, + credit_type=credit_type, + amount=abs_amount, + event_id=event_id, + ) + + # 3. Update platform adjustment account + if is_income: + platform_account = await CreditAccount.deduction_in_session( + session=session, + owner_type=OwnerType.PLATFORM, + owner_id=DEFAULT_PLATFORM_ACCOUNT_ADJUSTMENT, + credit_type=credit_type, + amount=abs_amount, + event_id=event_id, + ) + else: + platform_account = await CreditAccount.income_in_session( + session=session, + owner_type=OwnerType.PLATFORM, + owner_id=DEFAULT_PLATFORM_ACCOUNT_ADJUSTMENT, + amount_details={credit_type: abs_amount}, + event_id=event_id, + ) + + # 4. Create credit event record + # Set the appropriate credit amount field based on credit type + free_amount = Decimal("0") + reward_amount = Decimal("0") + permanent_amount = Decimal("0") + + if credit_type == CreditType.FREE: + free_amount = abs_amount + elif credit_type == CreditType.REWARD: + reward_amount = abs_amount + elif credit_type == CreditType.PERMANENT: + permanent_amount = abs_amount + + event = CreditEventTable( + id=event_id, + event_type=EventType.ADJUSTMENT, + user_id=user_id, + upstream_type=UpstreamType.API, + upstream_tx_id=upstream_tx_id, + direction=direction, + account_id=user_account.id, + total_amount=abs_amount, + credit_type=credit_type, + credit_types=[credit_type], + balance_after=user_account.credits + + user_account.free_credits + + user_account.reward_credits, + base_amount=abs_amount, + base_original_amount=abs_amount, + base_free_amount=free_amount, + base_reward_amount=reward_amount, + base_permanent_amount=permanent_amount, + free_amount=free_amount, + reward_amount=reward_amount, + permanent_amount=permanent_amount, + agent_wallet_address=None, # No agent involved in adjustment + note=note, + ) + session.add(event) + await session.flush() + + # 4. Create credit transaction records + # 4.1 User account transaction + user_tx = CreditTransactionTable( + id=str(XID()), + account_id=user_account.id, + event_id=event_id, + tx_type=TransactionType.ADJUSTMENT, + credit_debit=credit_debit_user, + change_amount=abs_amount, + credit_type=credit_type, + free_amount=free_amount, + reward_amount=reward_amount, + permanent_amount=permanent_amount, + ) + session.add(user_tx) + + # 4.2 Platform adjustment account transaction + platform_tx = CreditTransactionTable( + id=str(XID()), + account_id=platform_account.id, + event_id=event_id, + tx_type=TransactionType.ADJUSTMENT, + credit_debit=credit_debit_platform, + change_amount=abs_amount, + credit_type=credit_type, + free_amount=free_amount, + reward_amount=reward_amount, + permanent_amount=permanent_amount, + ) + session.add(platform_tx) + + # Commit all changes + await session.commit() + + return user_account + + +async def update_daily_quota( + session: AsyncSession, + user_id: str, + free_quota: Optional[Decimal] = None, + refill_amount: Optional[Decimal] = None, + upstream_tx_id: str = "", + note: str = "", +) -> CreditAccount: + """ + Update the daily quota and refill amount of a user's credit account. + + Args: + session: Async session to use for database operations + user_id: ID of the user to update + free_quota: Optional new daily quota value + refill_amount: Optional amount to refill hourly, not exceeding free_quota + upstream_tx_id: ID of the upstream transaction (for logging purposes) + note: Explanation for changing the daily quota + + Returns: + Updated user credit account + """ + return await CreditAccount.update_daily_quota( + session, user_id, free_quota, refill_amount, upstream_tx_id, note + ) + + +async def list_credit_events_by_user( + session: AsyncSession, + user_id: str, + direction: Optional[Direction] = None, + cursor: Optional[str] = None, + limit: int = 20, + event_type: Optional[EventType] = None, +) -> Tuple[List[CreditEvent], Optional[str], bool]: + """ + List credit events for a user account with cursor pagination. + + Args: + session: Async database session. + user_id: The ID of the user. + direction: The direction of the events (INCOME or EXPENSE). + cursor: The ID of the last event from the previous page. + limit: Maximum number of events to return per page. + event_type: Optional filter for specific event type. + + Returns: + A tuple containing: + - A list of CreditEvent models. + - The cursor for the next page (ID of the last event in the list). + - A boolean indicating if there are more events available. + """ + # 1. Find the account for the owner + account = await CreditAccount.get_in_session(session, OwnerType.USER, user_id) + if not account: + # Decide if returning empty or raising error is better. Empty list seems reasonable. + # Or raise HTTPException(status_code=status.HTTP_404_NOT_FOUND, detail=f"{owner_type.value.capitalize()} account not found") + return [], None, False + + # 2. Build the query + stmt = ( + select(CreditEventTable) + .where(CreditEventTable.account_id == account.id) + .order_by(desc(CreditEventTable.id)) + .limit(limit + 1) # Fetch one extra to check if there are more + ) + + # 3. Apply optional filter if provided + if direction: + stmt = stmt.where(CreditEventTable.direction == direction.value) + if event_type: + stmt = stmt.where(CreditEventTable.event_type == event_type.value) + + # 4. Apply cursor filter if provided + if cursor: + stmt = stmt.where(CreditEventTable.id < cursor) + + # 5. Execute query + result = await session.execute(stmt) + events_data = result.scalars().all() + + # 6. Determine pagination details + has_more = len(events_data) > limit + events_to_return = events_data[:limit] # Slice to the requested limit + + next_cursor = events_to_return[-1].id if events_to_return and has_more else None + + # 7. Convert to Pydantic models + events_models = [CreditEvent.model_validate(event) for event in events_to_return] + + return events_models, next_cursor, has_more + + +async def list_credit_events( + session: AsyncSession, + direction: Optional[Direction] = Direction.EXPENSE, + cursor: Optional[str] = None, + limit: int = 20, + event_type: Optional[EventType] = None, + start_at: Optional[datetime] = None, + end_at: Optional[datetime] = None, +) -> Tuple[List[CreditEvent], Optional[str], bool]: + """ + List all credit events with cursor pagination. + + Args: + session: Async database session. + direction: The direction of the events (INCOME or EXPENSE). Default is EXPENSE. + cursor: The ID of the last event from the previous page. + limit: Maximum number of events to return per page. + event_type: Optional filter for specific event type. + start_at: Optional start datetime to filter events by created_at. + end_at: Optional end datetime to filter events by created_at. + + Returns: + A tuple containing: + - A list of CreditEvent models. + - The cursor for the next page (ID of the last event in the list). + - A boolean indicating if there are more events available. + """ + # Build the query + stmt = ( + select(CreditEventTable) + .order_by(CreditEventTable.id) # Ascending order as required + .limit(limit + 1) # Fetch one extra to check if there are more + ) + + # Apply direction filter (default is EXPENSE) + if direction: + stmt = stmt.where(CreditEventTable.direction == direction.value) + + # Apply optional event_type filter if provided + if event_type: + stmt = stmt.where(CreditEventTable.event_type == event_type.value) + + # Apply datetime filters if provided + if start_at: + stmt = stmt.where(CreditEventTable.created_at >= start_at) + if end_at: + stmt = stmt.where(CreditEventTable.created_at < end_at) + + # Apply cursor filter if provided + if cursor: + stmt = stmt.where(CreditEventTable.id > cursor) # Using > for ascending order + + # Execute query + result = await session.execute(stmt) + events_data = result.scalars().all() + + # Determine pagination details + has_more = len(events_data) > limit + events_to_return = events_data[:limit] # Slice to the requested limit + + # always return a cursor even there is no next page + next_cursor = events_to_return[-1].id if events_to_return else None + + # Convert to Pydantic models + events_models = [CreditEvent.model_validate(event) for event in events_to_return] + + return events_models, next_cursor, has_more + + +async def list_fee_events_by_agent( + session: AsyncSession, + agent_id: str, + cursor: Optional[str] = None, + limit: int = 20, +) -> Tuple[List[CreditEvent], Optional[str], bool]: + """ + List fee events for an agent with cursor pagination. + These events represent income for the agent from users' expenses. + + Args: + session: Async database session. + agent_id: The ID of the agent. + cursor: The ID of the last event from the previous page. + limit: Maximum number of events to return per page. + + Returns: + A tuple containing: + - A list of CreditEvent models. + - The cursor for the next page (ID of the last event in the list). + - A boolean indicating if there are more events available. + """ + # 1. Find the account for the agent + agent_account = await CreditAccount.get_in_session( + session, OwnerType.AGENT, agent_id + ) + if not agent_account: + return [], None, False + + # 2. Build the query to find events where fee_agent_amount > 0 and fee_agent_account = agent_account.id + stmt = ( + select(CreditEventTable) + .where(CreditEventTable.fee_agent_account == agent_account.id) + .where(CreditEventTable.fee_agent_amount > 0) + .order_by(desc(CreditEventTable.id)) + .limit(limit + 1) # Fetch one extra to check if there are more + ) + + # 3. Apply cursor filter if provided + if cursor: + stmt = stmt.where(CreditEventTable.id < cursor) + + # 4. Execute query + result = await session.execute(stmt) + events_data = result.scalars().all() + + # 5. Determine pagination details + has_more = len(events_data) > limit + events_to_return = events_data[:limit] # Slice to the requested limit + + next_cursor = events_to_return[-1].id if events_to_return and has_more else None + + # 6. Convert to Pydantic models + events_models = [CreditEvent.model_validate(event) for event in events_to_return] + + return events_models, next_cursor, has_more + + +async def fetch_credit_event_by_upstream_tx_id( + session: AsyncSession, + upstream_tx_id: str, +) -> CreditEvent: + """ + Fetch a credit event by its upstream transaction ID. + + Args: + session: Async database session. + upstream_tx_id: ID of the upstream transaction. + + Returns: + The credit event if found. + + Raises: + HTTPException: If the credit event is not found. + """ + # Build the query to find the event by upstream_tx_id + stmt = select(CreditEventTable).where( + CreditEventTable.upstream_tx_id == upstream_tx_id + ) + + # Execute query + result = await session.scalar(stmt) + + # Raise 404 if not found + if not result: + raise HTTPException( + status_code=404, + detail=f"Credit event with upstream_tx_id '{upstream_tx_id}' not found", + ) + + # Convert to Pydantic model and return + return CreditEvent.model_validate(result) + + +async def fetch_credit_event_by_id( + session: AsyncSession, + event_id: str, +) -> CreditEvent: + """ + Fetch a credit event by its ID. + + Args: + session: Async database session. + event_id: ID of the credit event. + + Returns: + The credit event if found. + + Raises: + HTTPException: If the credit event is not found. + """ + # Build the query to find the event by ID + stmt = select(CreditEventTable).where(CreditEventTable.id == event_id) + + # Execute query + result = await session.scalar(stmt) + + # Raise 404 if not found + if not result: + raise HTTPException( + status_code=404, + detail=f"Credit event with ID '{event_id}' not found", + ) + + # Convert to Pydantic model and return + return CreditEvent.model_validate(result) + + +async def expense_message( + session: AsyncSession, + user_id: str, + message_id: str, + start_message_id: str, + base_llm_amount: Decimal, + agent: Agent, +) -> CreditEvent: + """ + Deduct credits from a user account for message expenses. + Don't forget to commit the session after calling this function. + + Args: + session: Async session to use for database operations + user_id: ID of the user to deduct credits from + message_id: ID of the message that incurred the expense + start_message_id: ID of the starting message in a conversation + base_llm_amount: Amount of LLM costs + + Returns: + Updated user credit account + """ + # Check for idempotency - prevent duplicate transactions + await CreditEvent.check_upstream_tx_id_exists( + session, UpstreamType.EXECUTOR, message_id + ) + + # Ensure base_llm_amount has 4 decimal places + base_llm_amount = base_llm_amount.quantize(FOURPLACES, rounding=ROUND_HALF_UP) + + if base_llm_amount < Decimal("0"): + raise ValueError("Base LLM amount must be non-negative") + + # Get payment settings + payment_settings = await AppSetting.payment() + + # Calculate amount with exact 4 decimal places + base_original_amount = base_llm_amount + base_amount = base_original_amount + fee_platform_amount = ( + base_amount * payment_settings.fee_platform_percentage / Decimal("100") + ).quantize(FOURPLACES, rounding=ROUND_HALF_UP) + fee_agent_amount = Decimal("0") + if agent.fee_percentage and user_id != agent.owner: + fee_agent_amount = ( + (base_amount + fee_platform_amount) * agent.fee_percentage / Decimal("100") + ).quantize(FOURPLACES, rounding=ROUND_HALF_UP) + total_amount = (base_amount + fee_platform_amount + fee_agent_amount).quantize( + FOURPLACES, rounding=ROUND_HALF_UP + ) + + # 1. Create credit event record first to get event_id + event_id = str(XID()) + + # 2. Update user account - deduct credits + user_account, details = await CreditAccount.expense_in_session( + session=session, + owner_type=OwnerType.USER, + owner_id=user_id, + amount=total_amount, + event_id=event_id, + ) + + # If using free credits, add to agent's free_income_daily + if details.get(CreditType.FREE): + from intentkit.models.agent_data import AgentQuota + + await AgentQuota.add_free_income_in_session( + session=session, id=agent.id, amount=details.get(CreditType.FREE) + ) + + # 3. Calculate detailed amounts for fees based on user payment details + # Set the appropriate credit amount field based on credit type + free_amount = details.get(CreditType.FREE, Decimal("0")) + reward_amount = details.get(CreditType.REWARD, Decimal("0")) + permanent_amount = details.get(CreditType.PERMANENT, Decimal("0")) + if CreditType.PERMANENT in details: + credit_type = CreditType.PERMANENT + elif CreditType.REWARD in details: + credit_type = CreditType.REWARD + else: + credit_type = CreditType.FREE + + # Calculate fee_platform amounts by credit type + fee_platform_free_amount = Decimal("0") + fee_platform_reward_amount = Decimal("0") + fee_platform_permanent_amount = Decimal("0") + + if fee_platform_amount > Decimal("0") and total_amount > Decimal("0"): + # Calculate proportions based on the formula + if free_amount > Decimal("0"): + fee_platform_free_amount = ( + free_amount * fee_platform_amount / total_amount + ).quantize(FOURPLACES, rounding=ROUND_HALF_UP) + + if reward_amount > Decimal("0"): + fee_platform_reward_amount = ( + reward_amount * fee_platform_amount / total_amount + ).quantize(FOURPLACES, rounding=ROUND_HALF_UP) + + # Calculate permanent amount as the remainder to ensure the sum equals fee_platform_amount + fee_platform_permanent_amount = ( + fee_platform_amount - fee_platform_free_amount - fee_platform_reward_amount + ).quantize(FOURPLACES, rounding=ROUND_HALF_UP) + + # Calculate fee_agent amounts by credit type + fee_agent_free_amount = Decimal("0") + fee_agent_reward_amount = Decimal("0") + fee_agent_permanent_amount = Decimal("0") + + if fee_agent_amount > Decimal("0") and total_amount > Decimal("0"): + # Calculate proportions based on the formula + if free_amount > Decimal("0"): + fee_agent_free_amount = ( + free_amount * fee_agent_amount / total_amount + ).quantize(FOURPLACES, rounding=ROUND_HALF_UP) + + if reward_amount > Decimal("0"): + fee_agent_reward_amount = ( + reward_amount * fee_agent_amount / total_amount + ).quantize(FOURPLACES, rounding=ROUND_HALF_UP) + + # Calculate permanent amount as the remainder to ensure the sum equals fee_agent_amount + fee_agent_permanent_amount = ( + fee_agent_amount - fee_agent_free_amount - fee_agent_reward_amount + ).quantize(FOURPLACES, rounding=ROUND_HALF_UP) + + # Calculate base amounts by credit type using subtraction method + # This ensures that: permanent_amount = base_permanent_amount + fee_platform_permanent_amount + fee_agent_permanent_amount + base_free_amount = free_amount - fee_platform_free_amount - fee_agent_free_amount + base_reward_amount = ( + reward_amount - fee_platform_reward_amount - fee_agent_reward_amount + ) + base_permanent_amount = ( + permanent_amount - fee_platform_permanent_amount - fee_agent_permanent_amount + ) + + # 4. Update fee account - add credits with detailed amounts + message_account = await CreditAccount.income_in_session( + session=session, + owner_type=OwnerType.PLATFORM, + owner_id=DEFAULT_PLATFORM_ACCOUNT_MESSAGE, + amount_details={ + CreditType.FREE: base_free_amount, + CreditType.REWARD: base_reward_amount, + CreditType.PERMANENT: base_permanent_amount, + }, + event_id=event_id, + ) + platform_fee_account = await CreditAccount.income_in_session( + session=session, + owner_type=OwnerType.PLATFORM, + owner_id=DEFAULT_PLATFORM_ACCOUNT_FEE, + amount_details={ + CreditType.FREE: fee_platform_free_amount, + CreditType.REWARD: fee_platform_reward_amount, + CreditType.PERMANENT: fee_platform_permanent_amount, + }, + event_id=event_id, + ) + if fee_agent_amount > 0: + agent_account = await CreditAccount.income_in_session( + session=session, + owner_type=OwnerType.AGENT, + owner_id=agent.id, + amount_details={ + CreditType.FREE: fee_agent_free_amount, + CreditType.REWARD: fee_agent_reward_amount, + CreditType.PERMANENT: fee_agent_permanent_amount, + }, + event_id=event_id, + ) + + # Get agent wallet address + agent_data = await AgentData.get(agent.id) + agent_wallet_address = agent_data.evm_wallet_address if agent_data else None + + event = CreditEventTable( + id=event_id, + account_id=user_account.id, + event_type=EventType.MESSAGE, + user_id=user_id, + upstream_type=UpstreamType.EXECUTOR, + upstream_tx_id=message_id, + direction=Direction.EXPENSE, + agent_id=agent.id, + message_id=message_id, + start_message_id=start_message_id, + model=agent.model, + total_amount=total_amount, + credit_type=credit_type, + credit_types=list(details.keys()), + balance_after=user_account.credits + + user_account.free_credits + + user_account.reward_credits, + base_amount=base_amount, + base_original_amount=base_original_amount, + base_free_amount=base_free_amount, + base_reward_amount=base_reward_amount, + base_permanent_amount=base_permanent_amount, + base_llm_amount=base_llm_amount, + fee_platform_amount=fee_platform_amount, + fee_platform_free_amount=fee_platform_free_amount, + fee_platform_reward_amount=fee_platform_reward_amount, + fee_platform_permanent_amount=fee_platform_permanent_amount, + fee_agent_amount=fee_agent_amount, + fee_agent_account=agent_account.id if fee_agent_amount > 0 else None, + fee_agent_free_amount=fee_agent_free_amount, + fee_agent_reward_amount=fee_agent_reward_amount, + fee_agent_permanent_amount=fee_agent_permanent_amount, + free_amount=free_amount, + reward_amount=reward_amount, + permanent_amount=permanent_amount, + agent_wallet_address=agent_wallet_address, + ) + session.add(event) + await session.flush() + + # 4. Create credit transaction records + # 4.1 User account transaction (debit) + user_tx = CreditTransactionTable( + id=str(XID()), + account_id=user_account.id, + event_id=event_id, + tx_type=TransactionType.PAY, + credit_debit=CreditDebit.DEBIT, + change_amount=total_amount, + credit_type=credit_type, + free_amount=free_amount, + reward_amount=reward_amount, + permanent_amount=permanent_amount, + ) + session.add(user_tx) + + # 4.2 Message account transaction (credit) + message_tx = CreditTransactionTable( + id=str(XID()), + account_id=message_account.id, + event_id=event_id, + tx_type=TransactionType.RECEIVE_BASE_LLM, + credit_debit=CreditDebit.CREDIT, + change_amount=base_amount, + credit_type=credit_type, + free_amount=base_free_amount, + reward_amount=base_reward_amount, + permanent_amount=base_permanent_amount, + ) + session.add(message_tx) + + # 4.3 Platform fee account transaction (credit) + platform_tx = CreditTransactionTable( + id=str(XID()), + account_id=platform_fee_account.id, + event_id=event_id, + tx_type=TransactionType.RECEIVE_FEE_PLATFORM, + credit_debit=CreditDebit.CREDIT, + change_amount=fee_platform_amount, + credit_type=credit_type, + free_amount=fee_platform_free_amount, + reward_amount=fee_platform_reward_amount, + permanent_amount=fee_platform_permanent_amount, + ) + session.add(platform_tx) + + # 4.4 Agent fee account transaction (credit) + if fee_agent_amount > 0: + agent_tx = CreditTransactionTable( + id=str(XID()), + account_id=agent_account.id, + event_id=event_id, + tx_type=TransactionType.RECEIVE_FEE_AGENT, + credit_debit=CreditDebit.CREDIT, + change_amount=fee_agent_amount, + credit_type=credit_type, + free_amount=fee_agent_free_amount, + reward_amount=fee_agent_reward_amount, + permanent_amount=fee_agent_permanent_amount, + ) + session.add(agent_tx) + + await session.refresh(event) + + return CreditEvent.model_validate(event) + + +class SkillCost(BaseModel): + total_amount: Decimal + base_amount: Decimal + base_discount_amount: Decimal + base_original_amount: Decimal + base_skill_amount: Decimal + fee_platform_amount: Decimal + fee_dev_user: str + fee_dev_user_type: OwnerType + fee_dev_amount: Decimal + fee_agent_amount: Decimal + + +async def skill_cost( + skill_name: str, + user_id: str, + agent: Agent, +) -> SkillCost: + """ + Calculate the cost for a skill call including all fees. + + Args: + skill_name: Name of the skill + user_id: ID of the user making the skill call + agent: Agent using the skill + + Returns: + SkillCost: Object containing all cost components + """ + + skill = await Skill.get(skill_name) + if not skill: + raise ValueError(f"The price of {skill_name} not set yet") + agent_skill_config = agent.skills.get(skill.category) + if ( + agent_skill_config + and agent_skill_config.get("api_key_provider") == "agent_owner" + ): + base_skill_amount = skill.price_self_key.quantize( + FOURPLACES, rounding=ROUND_HALF_UP + ) + else: + base_skill_amount = skill.price.quantize(FOURPLACES, rounding=ROUND_HALF_UP) + # Get payment settings + payment_settings = await AppSetting.payment() + + # Calculate fee + if skill.author: + fee_dev_user = skill.author + fee_dev_user_type = OwnerType.USER + else: + fee_dev_user = DEFAULT_PLATFORM_ACCOUNT_DEV + fee_dev_user_type = OwnerType.PLATFORM + fee_dev_percentage = payment_settings.fee_dev_percentage + + if base_skill_amount < Decimal("0"): + raise ValueError("Base skill amount must be non-negative") + + # Calculate amount with exact 4 decimal places + base_original_amount = base_skill_amount + base_amount = base_original_amount + fee_platform_amount = ( + base_amount * payment_settings.fee_platform_percentage / Decimal("100") + ).quantize(FOURPLACES, rounding=ROUND_HALF_UP) + fee_dev_amount = (base_amount * fee_dev_percentage / Decimal("100")).quantize( + FOURPLACES, rounding=ROUND_HALF_UP + ) + fee_agent_amount = Decimal("0") + if agent.fee_percentage and user_id != agent.owner: + fee_agent_amount = ( + (base_amount + fee_platform_amount + fee_dev_amount) + * agent.fee_percentage + / Decimal("100") + ).quantize(FOURPLACES, rounding=ROUND_HALF_UP) + total_amount = ( + base_amount + fee_platform_amount + fee_dev_amount + fee_agent_amount + ).quantize(FOURPLACES, rounding=ROUND_HALF_UP) + + # Return the SkillCost object with all calculated values + return SkillCost( + total_amount=total_amount, + base_amount=base_amount, + base_discount_amount=Decimal("0"), # No discount in this implementation + base_original_amount=base_original_amount, + base_skill_amount=base_skill_amount, + fee_platform_amount=fee_platform_amount, + fee_dev_user=fee_dev_user, + fee_dev_user_type=fee_dev_user_type, + fee_dev_amount=fee_dev_amount, + fee_agent_amount=fee_agent_amount, + ) + + +async def expense_skill( + session: AsyncSession, + user_id: str, + message_id: str, + start_message_id: str, + skill_call_id: str, + skill_name: str, + agent: Agent, +) -> CreditEvent: + """ + Deduct credits from a user account for message expenses. + Don't forget to commit the session after calling this function. + + Args: + session: Async session to use for database operations + user_id: ID of the user to deduct credits from + message_id: ID of the message that incurred the expense + start_message_id: ID of the starting message in a conversation + skill_call_id: ID of the skill call + skill_name: Name of the skill being used + agent: Agent using the skill + + Returns: + CreditEvent: The created credit event + """ + # Check for idempotency - prevent duplicate transactions + upstream_tx_id = f"{message_id}_{skill_call_id}" + await CreditEvent.check_upstream_tx_id_exists( + session, UpstreamType.EXECUTOR, upstream_tx_id + ) + logger.info(f"[{agent.id}] skill payment {skill_name}") + + # Calculate skill cost using the skill_cost function + skill_cost_info = await skill_cost(skill_name, user_id, agent) + + # 1. Create credit event record first to get event_id + event_id = str(XID()) + + # 2. Update user account - deduct credits + user_account, details = await CreditAccount.expense_in_session( + session=session, + owner_type=OwnerType.USER, + owner_id=user_id, + amount=skill_cost_info.total_amount, + event_id=event_id, + ) + + # If using free credits, add to agent's free_income_daily + if CreditType.FREE in details: + from intentkit.models.agent_data import AgentQuota + + await AgentQuota.add_free_income_in_session( + session=session, id=agent.id, amount=details[CreditType.FREE] + ) + + # 3. Calculate detailed amounts for fees + # Set the appropriate credit amount field based on credit type + free_amount = details.get(CreditType.FREE, Decimal("0")) + reward_amount = details.get(CreditType.REWARD, Decimal("0")) + permanent_amount = details.get(CreditType.PERMANENT, Decimal("0")) + if CreditType.PERMANENT in details: + credit_type = CreditType.PERMANENT + elif CreditType.REWARD in details: + credit_type = CreditType.REWARD + else: + credit_type = CreditType.FREE + + # Calculate fee_platform amounts by credit type + fee_platform_free_amount = Decimal("0") + fee_platform_reward_amount = Decimal("0") + fee_platform_permanent_amount = Decimal("0") + + if skill_cost_info.fee_platform_amount > Decimal( + "0" + ) and skill_cost_info.total_amount > Decimal("0"): + # Calculate proportions based on the formula + if free_amount > Decimal("0"): + fee_platform_free_amount = ( + free_amount + * skill_cost_info.fee_platform_amount + / skill_cost_info.total_amount + ).quantize(FOURPLACES, rounding=ROUND_HALF_UP) + + if reward_amount > Decimal("0"): + fee_platform_reward_amount = ( + reward_amount + * skill_cost_info.fee_platform_amount + / skill_cost_info.total_amount + ).quantize(FOURPLACES, rounding=ROUND_HALF_UP) + + # Calculate permanent amount as the remainder to ensure the sum equals fee_platform_amount + fee_platform_permanent_amount = ( + skill_cost_info.fee_platform_amount + - fee_platform_free_amount + - fee_platform_reward_amount + ).quantize(FOURPLACES, rounding=ROUND_HALF_UP) + + # Calculate fee_agent amounts by credit type + fee_agent_free_amount = Decimal("0") + fee_agent_reward_amount = Decimal("0") + fee_agent_permanent_amount = Decimal("0") + + if skill_cost_info.fee_agent_amount > Decimal( + "0" + ) and skill_cost_info.total_amount > Decimal("0"): + # Calculate proportions based on the formula + if free_amount > Decimal("0"): + fee_agent_free_amount = ( + free_amount + * skill_cost_info.fee_agent_amount + / skill_cost_info.total_amount + ).quantize(FOURPLACES, rounding=ROUND_HALF_UP) + + if reward_amount > Decimal("0"): + fee_agent_reward_amount = ( + reward_amount + * skill_cost_info.fee_agent_amount + / skill_cost_info.total_amount + ).quantize(FOURPLACES, rounding=ROUND_HALF_UP) + + # Calculate permanent amount as the remainder to ensure the sum equals fee_agent_amount + fee_agent_permanent_amount = ( + skill_cost_info.fee_agent_amount + - fee_agent_free_amount + - fee_agent_reward_amount + ).quantize(FOURPLACES, rounding=ROUND_HALF_UP) + + # Calculate fee_dev amounts by credit type + fee_dev_free_amount = Decimal("0") + fee_dev_reward_amount = Decimal("0") + fee_dev_permanent_amount = Decimal("0") + + if skill_cost_info.fee_dev_amount > Decimal( + "0" + ) and skill_cost_info.total_amount > Decimal("0"): + # Calculate proportions based on the formula + if free_amount > Decimal("0"): + fee_dev_free_amount = ( + free_amount + * skill_cost_info.fee_dev_amount + / skill_cost_info.total_amount + ).quantize(FOURPLACES, rounding=ROUND_HALF_UP) + + if reward_amount > Decimal("0"): + fee_dev_reward_amount = ( + reward_amount + * skill_cost_info.fee_dev_amount + / skill_cost_info.total_amount + ).quantize(FOURPLACES, rounding=ROUND_HALF_UP) + + # Calculate permanent amount as the remainder to ensure the sum equals fee_dev_amount + fee_dev_permanent_amount = ( + skill_cost_info.fee_dev_amount - fee_dev_free_amount - fee_dev_reward_amount + ).quantize(FOURPLACES, rounding=ROUND_HALF_UP) + + # Calculate base amounts by credit type using subtraction method + base_free_amount = ( + free_amount + - fee_platform_free_amount + - fee_agent_free_amount + - fee_dev_free_amount + ) + + base_reward_amount = ( + reward_amount + - fee_platform_reward_amount + - fee_agent_reward_amount + - fee_dev_reward_amount + ) + + base_permanent_amount = ( + permanent_amount + - fee_platform_permanent_amount + - fee_agent_permanent_amount + - fee_dev_permanent_amount + ) + + # 4. Update fee account - add credits + skill_account = await CreditAccount.income_in_session( + session=session, + owner_type=OwnerType.PLATFORM, + owner_id=DEFAULT_PLATFORM_ACCOUNT_SKILL, + amount_details={ + CreditType.FREE: base_free_amount, + CreditType.REWARD: base_reward_amount, + CreditType.PERMANENT: base_permanent_amount, + }, + event_id=event_id, + ) + platform_account = await CreditAccount.income_in_session( + session=session, + owner_type=OwnerType.PLATFORM, + owner_id=DEFAULT_PLATFORM_ACCOUNT_FEE, + amount_details={ + CreditType.FREE: fee_platform_free_amount, + CreditType.REWARD: fee_platform_reward_amount, + CreditType.PERMANENT: fee_platform_permanent_amount, + }, + event_id=event_id, + ) + if skill_cost_info.fee_dev_amount > 0: + dev_account = await CreditAccount.income_in_session( + session=session, + owner_type=skill_cost_info.fee_dev_user_type, + owner_id=skill_cost_info.fee_dev_user, + amount_details={ + CreditType.FREE: fee_dev_free_amount, + CreditType.REWARD: fee_dev_reward_amount, + CreditType.PERMANENT: fee_dev_permanent_amount, + }, + event_id=event_id, + ) + if skill_cost_info.fee_agent_amount > 0: + agent_account = await CreditAccount.income_in_session( + session=session, + owner_type=OwnerType.AGENT, + owner_id=agent.id, + amount_details={ + CreditType.FREE: fee_agent_free_amount, + CreditType.REWARD: fee_agent_reward_amount, + CreditType.PERMANENT: fee_agent_permanent_amount, + }, + event_id=event_id, + ) + + # 5. Create credit event record + + # Get agent wallet address + agent_data = await AgentData.get(agent.id) + agent_wallet_address = agent_data.evm_wallet_address if agent_data else None + + event = CreditEventTable( + id=event_id, + account_id=user_account.id, + event_type=EventType.SKILL_CALL, + user_id=user_id, + upstream_type=UpstreamType.EXECUTOR, + upstream_tx_id=upstream_tx_id, + direction=Direction.EXPENSE, + agent_id=agent.id, + message_id=message_id, + start_message_id=start_message_id, + skill_call_id=skill_call_id, + skill_name=skill_name, + total_amount=skill_cost_info.total_amount, + credit_type=credit_type, + credit_types=details.keys(), + balance_after=user_account.credits + + user_account.free_credits + + user_account.reward_credits, + base_amount=skill_cost_info.base_amount, + base_original_amount=skill_cost_info.base_original_amount, + base_skill_amount=skill_cost_info.base_skill_amount, + base_free_amount=base_free_amount, + base_reward_amount=base_reward_amount, + base_permanent_amount=base_permanent_amount, + fee_platform_amount=skill_cost_info.fee_platform_amount, + fee_platform_free_amount=fee_platform_free_amount, + fee_platform_reward_amount=fee_platform_reward_amount, + fee_platform_permanent_amount=fee_platform_permanent_amount, + fee_agent_amount=skill_cost_info.fee_agent_amount, + fee_agent_account=agent_account.id + if skill_cost_info.fee_agent_amount > 0 + else None, + fee_agent_free_amount=fee_agent_free_amount, + fee_agent_reward_amount=fee_agent_reward_amount, + fee_agent_permanent_amount=fee_agent_permanent_amount, + fee_dev_amount=skill_cost_info.fee_dev_amount, + fee_dev_account=dev_account.id if skill_cost_info.fee_dev_amount > 0 else None, + fee_dev_free_amount=fee_dev_free_amount, + fee_dev_reward_amount=fee_dev_reward_amount, + fee_dev_permanent_amount=fee_dev_permanent_amount, + free_amount=free_amount, + reward_amount=reward_amount, + permanent_amount=permanent_amount, + agent_wallet_address=agent_wallet_address, + ) + session.add(event) + await session.flush() + + # 4. Create credit transaction records + # 4.1 User account transaction (debit) + user_tx = CreditTransactionTable( + id=str(XID()), + account_id=user_account.id, + event_id=event_id, + tx_type=TransactionType.PAY, + credit_debit=CreditDebit.DEBIT, + change_amount=skill_cost_info.total_amount, + credit_type=credit_type, + free_amount=free_amount, + reward_amount=reward_amount, + permanent_amount=permanent_amount, + ) + session.add(user_tx) + + # 4.2 Skill account transaction (credit) + skill_tx = CreditTransactionTable( + id=str(XID()), + account_id=skill_account.id, + event_id=event_id, + tx_type=TransactionType.RECEIVE_BASE_SKILL, + credit_debit=CreditDebit.CREDIT, + change_amount=skill_cost_info.base_amount, + credit_type=credit_type, + free_amount=base_free_amount, + reward_amount=base_reward_amount, + permanent_amount=base_permanent_amount, + ) + session.add(skill_tx) + + # 4.3 Platform fee account transaction (credit) + platform_tx = CreditTransactionTable( + id=str(XID()), + account_id=platform_account.id, + event_id=event_id, + tx_type=TransactionType.RECEIVE_FEE_PLATFORM, + credit_debit=CreditDebit.CREDIT, + change_amount=skill_cost_info.fee_platform_amount, + credit_type=credit_type, + free_amount=fee_platform_free_amount, + reward_amount=fee_platform_reward_amount, + permanent_amount=fee_platform_permanent_amount, + ) + session.add(platform_tx) + + # 4.4 Dev user transaction (credit) + if skill_cost_info.fee_dev_amount > 0: + dev_tx = CreditTransactionTable( + id=str(XID()), + account_id=dev_account.id, + event_id=event_id, + tx_type=TransactionType.RECEIVE_FEE_DEV, + credit_debit=CreditDebit.CREDIT, + change_amount=skill_cost_info.fee_dev_amount, + credit_type=CreditType.REWARD, + free_amount=fee_dev_free_amount, + reward_amount=fee_dev_reward_amount, + permanent_amount=fee_dev_permanent_amount, + ) + session.add(dev_tx) + + # 4.5 Agent fee account transaction (credit) + if skill_cost_info.fee_agent_amount > 0: + agent_tx = CreditTransactionTable( + id=str(XID()), + account_id=agent_account.id, + event_id=event_id, + tx_type=TransactionType.RECEIVE_FEE_AGENT, + credit_debit=CreditDebit.CREDIT, + change_amount=skill_cost_info.fee_agent_amount, + credit_type=credit_type, + free_amount=fee_agent_free_amount, + reward_amount=fee_agent_reward_amount, + permanent_amount=fee_agent_permanent_amount, + ) + session.add(agent_tx) + + # Commit all changes + await session.refresh(event) + + return CreditEvent.model_validate(event) + + +async def refill_free_credits_for_account( + session: AsyncSession, + account: CreditAccount, +): + """ + Refill free credits for a single account based on its refill_amount and free_quota. + + Args: + session: Async session to use for database operations + account: The credit account to refill + """ + # Skip if refill_amount is zero or free_credits already equals or exceeds free_quota + if ( + account.refill_amount <= Decimal("0") + or account.free_credits >= account.free_quota + ): + return + + # Calculate the amount to add + # If adding refill_amount would exceed free_quota, only add what's needed to reach free_quota + amount_to_add = min( + account.refill_amount, account.free_quota - account.free_credits + ).quantize(FOURPLACES, rounding=ROUND_HALF_UP) + + if amount_to_add <= Decimal("0"): + return # Nothing to add + + # 1. Create credit event record first to get event_id + event_id = str(XID()) + + # 2. Update user account - add free credits + updated_account = await CreditAccount.income_in_session( + session=session, + owner_type=account.owner_type, + owner_id=account.owner_id, + amount_details={CreditType.FREE: amount_to_add}, + event_id=event_id, + ) + + # 3. Update platform refill account - deduct credits + platform_account = await CreditAccount.deduction_in_session( + session=session, + owner_type=OwnerType.PLATFORM, + owner_id=DEFAULT_PLATFORM_ACCOUNT_REFILL, + credit_type=CreditType.FREE, + amount=amount_to_add, + event_id=event_id, + ) + + # 4. Create credit event record + event = CreditEventTable( + id=event_id, + account_id=updated_account.id, + event_type=EventType.REFILL, + user_id=account.owner_id, + upstream_type=UpstreamType.SCHEDULER, + upstream_tx_id=str(XID()), + direction=Direction.INCOME, + credit_type=CreditType.FREE, + credit_types=[CreditType.FREE], + total_amount=amount_to_add, + balance_after=updated_account.credits + + updated_account.free_credits + + updated_account.reward_credits, + base_amount=amount_to_add, + base_original_amount=amount_to_add, + base_free_amount=amount_to_add, + base_reward_amount=Decimal("0"), + base_permanent_amount=Decimal("0"), + free_amount=amount_to_add, # Set free_amount since this is a free credit refill + reward_amount=Decimal("0"), # No reward credits involved + permanent_amount=Decimal("0"), # No permanent credits involved + agent_wallet_address=None, # No agent involved in refill + note=f"Hourly free credits refill of {amount_to_add}", + ) + session.add(event) + await session.flush() + + # 4. Create credit transaction records + # 4.1 User account transaction (credit) + user_tx = CreditTransactionTable( + id=str(XID()), + account_id=updated_account.id, + event_id=event_id, + tx_type=TransactionType.REFILL, + credit_debit=CreditDebit.CREDIT, + change_amount=amount_to_add, + credit_type=CreditType.FREE, + free_amount=amount_to_add, + reward_amount=Decimal("0"), + permanent_amount=Decimal("0"), + ) + session.add(user_tx) + + # 4.2 Platform refill account transaction (debit) + platform_tx = CreditTransactionTable( + id=str(XID()), + account_id=platform_account.id, + event_id=event_id, + tx_type=TransactionType.REFILL, + credit_debit=CreditDebit.DEBIT, + change_amount=amount_to_add, + credit_type=CreditType.FREE, + free_amount=amount_to_add, + reward_amount=Decimal("0"), + permanent_amount=Decimal("0"), + ) + session.add(platform_tx) + + # Commit changes + await session.commit() + logger.info( + f"Refilled {amount_to_add} free credits for account {account.owner_type} {account.owner_id}" + ) + + +async def refill_all_free_credits(): + """ + Find all eligible accounts and refill their free credits. + Eligible accounts are those with refill_amount > 0 and free_credits < free_quota. + """ + async with get_session() as session: + # Find all accounts that need refilling + stmt = select(CreditAccountTable).where( + CreditAccountTable.refill_amount > 0, + CreditAccountTable.free_credits < CreditAccountTable.free_quota, + ) + result = await session.execute(stmt) + accounts_data = result.scalars().all() + + # Convert to Pydantic models + accounts = [CreditAccount.model_validate(account) for account in accounts_data] + + # Process each account + refilled_count = 0 + for account in accounts: + async with get_session() as session: + try: + await refill_free_credits_for_account(session, account) + refilled_count += 1 + except Exception as e: + logger.error(f"Error refilling account {account.id}: {str(e)}") + # Continue with other accounts even if one fails + continue + logger.info(f"Refilled {refilled_count} accounts") + + +async def expense_summarize( + session: AsyncSession, + user_id: str, + message_id: str, + start_message_id: str, + base_llm_amount: Decimal, + agent: Agent, +) -> CreditEvent: + """ + Deduct credits from a user account for memory/summarize expenses. + Don't forget to commit the session after calling this function. + + Args: + session: Async session to use for database operations + user_id: ID of the user to deduct credits from + message_id: ID of the message that incurred the expense + start_message_id: ID of the starting message in a conversation + base_llm_amount: Amount of LLM costs + agent: Agent instance + + Returns: + Updated user credit account + """ + # Check for idempotency - prevent duplicate transactions + await CreditEvent.check_upstream_tx_id_exists( + session, UpstreamType.EXECUTOR, message_id + ) + + # Ensure base_llm_amount has 4 decimal places + base_llm_amount = base_llm_amount.quantize(FOURPLACES, rounding=ROUND_HALF_UP) + + if base_llm_amount < Decimal("0"): + raise ValueError("Base LLM amount must be non-negative") + + # Get payment settings + payment_settings = await AppSetting.payment() + + # Calculate amount with exact 4 decimal places + base_original_amount = base_llm_amount + base_amount = base_original_amount + fee_platform_amount = ( + base_amount * payment_settings.fee_platform_percentage / Decimal("100") + ).quantize(FOURPLACES, rounding=ROUND_HALF_UP) + fee_agent_amount = Decimal("0") + if agent.fee_percentage and user_id != agent.owner: + fee_agent_amount = ( + (base_amount + fee_platform_amount) * agent.fee_percentage / Decimal("100") + ).quantize(FOURPLACES, rounding=ROUND_HALF_UP) + total_amount = (base_amount + fee_platform_amount + fee_agent_amount).quantize( + FOURPLACES, rounding=ROUND_HALF_UP + ) + + # 1. Create credit event record first to get event_id + event_id = str(XID()) + + # 2. Update user account - deduct credits + user_account, details = await CreditAccount.expense_in_session( + session=session, + owner_type=OwnerType.USER, + owner_id=user_id, + amount=total_amount, + event_id=event_id, + ) + + # If using free credits, add to agent's free_income_daily + if details.get(CreditType.FREE): + from intentkit.models.agent_data import AgentQuota + + await AgentQuota.add_free_income_in_session( + session=session, id=agent.id, amount=details.get(CreditType.FREE) + ) + + # 3. Calculate fee amounts by credit type before income_in_session calls + # Set the appropriate credit amount field based on credit type + free_amount = details.get(CreditType.FREE, Decimal("0")) + reward_amount = details.get(CreditType.REWARD, Decimal("0")) + permanent_amount = details.get(CreditType.PERMANENT, Decimal("0")) + + if CreditType.PERMANENT in details: + credit_type = CreditType.PERMANENT + elif CreditType.REWARD in details: + credit_type = CreditType.REWARD + else: + credit_type = CreditType.FREE + + # Calculate fee_platform amounts by credit type + fee_platform_free_amount = Decimal("0") + fee_platform_reward_amount = Decimal("0") + fee_platform_permanent_amount = Decimal("0") + + if fee_platform_amount > Decimal("0") and total_amount > Decimal("0"): + # Calculate proportions based on the formula + if free_amount > Decimal("0"): + fee_platform_free_amount = ( + free_amount * fee_platform_amount / total_amount + ).quantize(FOURPLACES, rounding=ROUND_HALF_UP) + + if reward_amount > Decimal("0"): + fee_platform_reward_amount = ( + reward_amount * fee_platform_amount / total_amount + ).quantize(FOURPLACES, rounding=ROUND_HALF_UP) + + # Calculate permanent amount as the remainder to ensure the sum equals fee_platform_amount + fee_platform_permanent_amount = ( + fee_platform_amount - fee_platform_free_amount - fee_platform_reward_amount + ).quantize(FOURPLACES, rounding=ROUND_HALF_UP) + + # Calculate fee_agent amounts by credit type + fee_agent_free_amount = Decimal("0") + fee_agent_reward_amount = Decimal("0") + fee_agent_permanent_amount = Decimal("0") + + if fee_agent_amount > Decimal("0") and total_amount > Decimal("0"): + # Calculate proportions based on the formula + if free_amount > Decimal("0"): + fee_agent_free_amount = ( + free_amount * fee_agent_amount / total_amount + ).quantize(FOURPLACES, rounding=ROUND_HALF_UP) + + if reward_amount > Decimal("0"): + fee_agent_reward_amount = ( + reward_amount * fee_agent_amount / total_amount + ).quantize(FOURPLACES, rounding=ROUND_HALF_UP) + + # Calculate permanent amount as the remainder to ensure the sum equals fee_agent_amount + fee_agent_permanent_amount = ( + fee_agent_amount - fee_agent_free_amount - fee_agent_reward_amount + ).quantize(FOURPLACES, rounding=ROUND_HALF_UP) + + # Calculate base amounts by credit type using subtraction method + base_free_amount = free_amount - fee_platform_free_amount - fee_agent_free_amount + + base_reward_amount = ( + reward_amount - fee_platform_reward_amount - fee_agent_reward_amount + ) + + base_permanent_amount = ( + permanent_amount - fee_platform_permanent_amount - fee_agent_permanent_amount + ) + + # 4. Update fee account - add credits + memory_account = await CreditAccount.income_in_session( + session=session, + owner_type=OwnerType.PLATFORM, + owner_id=DEFAULT_PLATFORM_ACCOUNT_MEMORY, + amount_details={ + CreditType.FREE: base_free_amount, + CreditType.REWARD: base_reward_amount, + CreditType.PERMANENT: base_permanent_amount, + }, + event_id=event_id, + ) + platform_fee_account = await CreditAccount.income_in_session( + session=session, + owner_type=OwnerType.PLATFORM, + owner_id=DEFAULT_PLATFORM_ACCOUNT_FEE, + amount_details={ + CreditType.FREE: fee_platform_free_amount, + CreditType.REWARD: fee_platform_reward_amount, + CreditType.PERMANENT: fee_platform_permanent_amount, + }, + event_id=event_id, + ) + if fee_agent_amount > 0: + agent_account = await CreditAccount.income_in_session( + session=session, + owner_type=OwnerType.AGENT, + owner_id=agent.id, + amount_details={ + CreditType.FREE: fee_agent_free_amount, + CreditType.REWARD: fee_agent_reward_amount, + CreditType.PERMANENT: fee_agent_permanent_amount, + }, + event_id=event_id, + ) + + # 5. Create credit event record + + # Get agent wallet address + agent_data = await AgentData.get(agent.id) + agent_wallet_address = agent_data.evm_wallet_address if agent_data else None + + event = CreditEventTable( + id=event_id, + account_id=user_account.id, + event_type=EventType.MEMORY, + user_id=user_id, + upstream_type=UpstreamType.EXECUTOR, + upstream_tx_id=message_id, + direction=Direction.EXPENSE, + agent_id=agent.id, + message_id=message_id, + start_message_id=start_message_id, + model=agent.model, + total_amount=total_amount, + credit_type=credit_type, + credit_types=details.keys(), + balance_after=user_account.credits + + user_account.free_credits + + user_account.reward_credits, + base_amount=base_amount, + base_original_amount=base_original_amount, + base_llm_amount=base_llm_amount, + base_free_amount=base_free_amount, + base_reward_amount=base_reward_amount, + base_permanent_amount=base_permanent_amount, + fee_platform_amount=fee_platform_amount, + fee_platform_free_amount=fee_platform_free_amount, + fee_platform_reward_amount=fee_platform_reward_amount, + fee_platform_permanent_amount=fee_platform_permanent_amount, + fee_agent_amount=fee_agent_amount, + fee_agent_free_amount=fee_agent_free_amount, + fee_agent_reward_amount=fee_agent_reward_amount, + fee_agent_permanent_amount=fee_agent_permanent_amount, + free_amount=free_amount, + reward_amount=reward_amount, + permanent_amount=permanent_amount, + agent_wallet_address=agent_wallet_address, + ) + session.add(event) + + # 4. Create credit transaction records + # 4.1 User account transaction (debit) + user_tx = CreditTransactionTable( + id=str(XID()), + account_id=user_account.id, + event_id=event_id, + tx_type=TransactionType.PAY, + credit_debit=CreditDebit.DEBIT, + change_amount=total_amount, + credit_type=credit_type, + free_amount=free_amount, + reward_amount=reward_amount, + permanent_amount=permanent_amount, + ) + session.add(user_tx) + + # 4.2 Memory account transaction (credit) + memory_tx = CreditTransactionTable( + id=str(XID()), + account_id=memory_account.id, + event_id=event_id, + tx_type=TransactionType.RECEIVE_BASE_MEMORY, + credit_debit=CreditDebit.CREDIT, + change_amount=base_amount, + credit_type=credit_type, + free_amount=base_free_amount, + reward_amount=base_reward_amount, + permanent_amount=base_permanent_amount, + ) + session.add(memory_tx) + + # 4.3 Platform fee account transaction (credit) + platform_tx = CreditTransactionTable( + id=str(XID()), + account_id=platform_fee_account.id, + event_id=event_id, + tx_type=TransactionType.RECEIVE_FEE_PLATFORM, + credit_debit=CreditDebit.CREDIT, + change_amount=fee_platform_amount, + credit_type=credit_type, + free_amount=fee_platform_free_amount, + reward_amount=fee_platform_reward_amount, + permanent_amount=fee_platform_permanent_amount, + ) + session.add(platform_tx) + + # 4.4 Agent fee account transaction (credit) - only if there's an agent fee + if fee_agent_amount > 0: + agent_tx = CreditTransactionTable( + id=str(XID()), + account_id=agent_account.id, + event_id=event_id, + tx_type=TransactionType.RECEIVE_FEE_AGENT, + credit_debit=CreditDebit.CREDIT, + change_amount=fee_agent_amount, + credit_type=CreditType.REWARD, + free_amount=fee_agent_free_amount, + reward_amount=fee_agent_reward_amount, + permanent_amount=fee_agent_permanent_amount, + ) + session.add(agent_tx) + + # 5. Refresh session to get updated data + await session.refresh(user_account) + + # 6. Return credit event model + return CreditEvent.model_validate(event) diff --git a/intentkit/core/engine.py b/intentkit/core/engine.py new file mode 100644 index 00000000..91297578 --- /dev/null +++ b/intentkit/core/engine.py @@ -0,0 +1,916 @@ +"""AI Agent Management Module. + +This module provides functionality for initializing and executing AI agents. It handles: +- Agent initialization with LangChain +- Tool and skill management +- Agent execution and response handling +- Memory management with PostgreSQL +- Integration with CDP and Twitter + +The module uses a global cache to store initialized agents for better performance. +""" + +import importlib +import logging +import re +import textwrap +import time +import traceback +from datetime import datetime +from typing import Optional + +import sqlalchemy +from epyxid import XID +from fastapi import HTTPException +from langchain_core.messages import ( + BaseMessage, + HumanMessage, +) +from langchain_core.tools import BaseTool +from langgraph.errors import GraphRecursionError +from langgraph.graph.state import CompiledStateGraph +from langgraph.prebuilt import create_react_agent +from sqlalchemy import func, update +from sqlalchemy.exc import SQLAlchemyError + +from intentkit.abstracts.graph import AgentContext, AgentError, AgentState +from intentkit.config.config import config +from intentkit.core.credit import expense_message, expense_skill +from intentkit.core.node import PreModelNode, post_model_node +from intentkit.core.prompt import ( + create_formatted_prompt_function, + explain_prompt, +) +from intentkit.core.skill import skill_store +from intentkit.models.agent import Agent, AgentTable +from intentkit.models.agent_data import AgentData, AgentQuota +from intentkit.models.app_setting import AppSetting, SystemMessageType +from intentkit.models.chat import ( + AuthorType, + ChatMessage, + ChatMessageCreate, + ChatMessageSkillCall, +) +from intentkit.models.credit import CreditAccount, OwnerType +from intentkit.models.db import get_langgraph_checkpointer, get_session +from intentkit.models.llm import LLMModelInfo, LLMProvider +from intentkit.models.skill import AgentSkillData, ThreadSkillData +from intentkit.models.user import User +from intentkit.utils.error import IntentKitAPIError + +logger = logging.getLogger(__name__) + + +# Global variable to cache all agent executors +_agents: dict[str, CompiledStateGraph] = {} +_private_agents: dict[str, CompiledStateGraph] = {} + +# Global dictionaries to cache agent update times +_agents_updated: dict[str, datetime] = {} +_private_agents_updated: dict[str, datetime] = {} + + +async def create_agent( + agent: Agent, is_private: bool = False, has_search: bool = False +) -> CompiledStateGraph: + """Create an AI agent with specified configuration and tools. + + This function: + 1. Initializes LLM with specified model + 2. Loads and configures requested tools + 3. Sets up PostgreSQL-based memory + 4. Creates and returns the agent + + Args: + agent (Agent): Agent configuration object + is_private (bool, optional): Flag indicating whether the agent is private. Defaults to False. + has_search (bool, optional): Flag indicating whether to include search tools. Defaults to False. + + Returns: + CompiledStateGraph: Initialized LangChain agent + """ + agent_data = await AgentData.get(agent.id) + + # ==== Initialize LLM using the LLM abstraction. + from intentkit.models.llm import create_llm_model + + # Create the LLM model instance + llm_model = await create_llm_model( + model_name=agent.model, + temperature=agent.temperature, + frequency_penalty=agent.frequency_penalty, + presence_penalty=agent.presence_penalty, + ) + + # Get the LLM instance + llm = await llm_model.create_instance(config) + + # Get the token limit from the model info + input_token_limit = min(config.input_token_limit, llm_model.info.context_length) + + # ==== Store buffered conversation history in memory. + memory = get_langgraph_checkpointer() + + # ==== Load skills + tools: list[BaseTool | dict] = [] + + if agent.skills: + for k, v in agent.skills.items(): + if not v.get("enabled", False): + continue + try: + skill_module = importlib.import_module(f"intentkit.skills.{k}") + if hasattr(skill_module, "get_skills"): + skill_tools = await skill_module.get_skills( + v, is_private, skill_store, agent_id=agent.id + ) + if skill_tools and len(skill_tools) > 0: + tools.extend(skill_tools) + else: + logger.error(f"Skill {k} does not have get_skills function") + except ImportError as e: + logger.error(f"Could not import skill module: {k} ({e})") + + # filter the duplicate tools + tools = list({tool.name: tool for tool in tools}.values()) + + # Add search tools if requested + if ( + has_search + and llm_model.info.provider == LLMProvider.OPENAI + and llm_model.info.supports_search + and not agent.model.startswith( + "gpt-5" + ) # tmp disable gpt-5 search since package bugs + ): + tools.append({"type": "web_search_preview"}) + + # Create the formatted_prompt function using the refactored prompt module + formatted_prompt = create_formatted_prompt_function(agent, agent_data) + + for tool in tools: + logger.info( + f"[{agent.id}{'-private' if is_private else ''}] loaded tool: {tool.name if isinstance(tool, BaseTool) else tool}" + ) + + # Pre model hook + pre_model_hook = PreModelNode( + model=llm, + short_term_memory_strategy=agent.short_term_memory_strategy, + max_tokens=input_token_limit // 2, + max_summary_tokens=2048, # later we can let agent to set this + ) + + # Create ReAct Agent using the LLM and CDP Agentkit tools. + executor = create_react_agent( + model=llm, + tools=tools, + prompt=formatted_prompt, + pre_model_hook=pre_model_hook, + post_model_hook=post_model_node if config.payment_enabled else None, + state_schema=AgentState, + context_schema=AgentContext, + checkpointer=memory, + debug=config.debug_checkpoint, + name=agent.id, + ) + + return executor + + +async def initialize_agent(aid, is_private=False): + """Initialize an AI agent with specified configuration and tools. + + This function: + 1. Loads agent configuration from database + 2. Uses create_agent to build the agent + 3. Caches the agent + + Args: + aid (str): Agent ID to initialize + is_private (bool, optional): Flag indicating whether the agent is private. Defaults to False. + + Returns: + Agent: Initialized LangChain agent + + Raises: + HTTPException: If agent not found (404) or database error (500) + """ + # get the agent from the database + agent: Optional[Agent] = await Agent.get(aid) + if not agent: + raise HTTPException(status_code=404, detail="Agent not found") + + # Determine if search should be enabled based on model capabilities + from intentkit.models.llm import create_llm_model + + llm_model = await create_llm_model( + model_name=agent.model, + temperature=agent.temperature, + frequency_penalty=agent.frequency_penalty, + presence_penalty=agent.presence_penalty, + ) + has_search = ( + llm_model.info.provider == LLMProvider.OPENAI and llm_model.info.supports_search + ) + + # Create the agent using the new create_agent function + executor = await create_agent(agent, is_private, has_search) + + # Cache the agent executor + if is_private: + _private_agents[aid] = executor + _private_agents_updated[aid] = agent.updated_at + else: + _agents[aid] = executor + _agents_updated[aid] = agent.updated_at + + +async def agent_executor( + agent_id: str, is_private: bool +) -> (CompiledStateGraph, float): + start = time.perf_counter() + agent = await Agent.get(agent_id) + if not agent: + raise HTTPException(status_code=404, detail="Agent not found") + agents = _private_agents if is_private else _agents + agents_updated = _private_agents_updated if is_private else _agents_updated + + # Check if agent needs reinitialization due to updates + needs_reinit = False + if agent_id in agents: + if ( + agent_id not in agents_updated + or agent.updated_at != agents_updated[agent_id] + ): + needs_reinit = True + logger.info( + f"Reinitializing agent {agent_id} due to updates, private mode: {is_private}" + ) + + # cold start or needs reinitialization + cold_start_cost = 0.0 + if (agent_id not in agents) or needs_reinit: + await initialize_agent(agent_id, is_private) + cold_start_cost = time.perf_counter() - start + return agents[agent_id], cold_start_cost + + +async def stream_agent(message: ChatMessageCreate): + """ + Stream agent execution results as an async generator. + + This function: + 1. Configures execution context with thread ID + 2. Initializes agent if not in cache + 3. Streams agent execution results + 4. Formats and times the execution steps + + Args: + message (ChatMessageCreate): The chat message containing agent_id, chat_id, and message content + + Yields: + ChatMessage: Individual response messages including timing information + """ + start = time.perf_counter() + # make sure reply_to is set + message.reply_to = message.id + + # save input message first + input = await message.save() + + # agent + agent = await Agent.get(input.agent_id) + + # model + model = await LLMModelInfo.get(agent.model) + + payment_enabled = config.payment_enabled + + # check user balance + if payment_enabled: + if not input.user_id or not agent.owner: + raise IntentKitAPIError( + 500, + "PaymentError", + "Payment is enabled but user_id or agent owner is not set", + ) + if agent.fee_percentage and agent.fee_percentage > 100: + owner = await User.get(agent.owner) + if owner and agent.fee_percentage > 100 + owner.nft_count * 10: + error_message_create = await ChatMessageCreate.from_system_message( + SystemMessageType.SERVICE_FEE_ERROR, + agent_id=input.agent_id, + chat_id=input.chat_id, + user_id=input.user_id, + author_id=input.agent_id, + thread_type=input.author_type, + reply_to=input.id, + time_cost=time.perf_counter() - start, + ) + error_message = await error_message_create.save() + yield error_message + return + # payer + payer = input.user_id + if input.author_type in [ + AuthorType.TELEGRAM, + AuthorType.TWITTER, + AuthorType.API, + ]: + payer = agent.owner + # user account + user_account = await CreditAccount.get_or_create(OwnerType.USER, payer) + # quota + quota = await AgentQuota.get(message.agent_id) + # payment settings + payment_settings = await AppSetting.payment() + # agent abuse check + abuse_check = True + if ( + payment_settings.agent_whitelist_enabled + and agent.id in payment_settings.agent_whitelist + ): + abuse_check = False + if abuse_check and payer != agent.owner and user_account.free_credits > 0: + if quota and quota.free_income_daily > 24000: + error_message_create = await ChatMessageCreate.from_system_message( + SystemMessageType.DAILY_USAGE_LIMIT_EXCEEDED, + agent_id=input.agent_id, + chat_id=input.chat_id, + user_id=input.user_id, + author_id=input.agent_id, + thread_type=input.author_type, + reply_to=input.id, + time_cost=time.perf_counter() - start, + ) + error_message = await error_message_create.save() + yield error_message + return + # avg cost + avg_count = 1 + if quota and quota.avg_action_cost > 0: + avg_count = quota.avg_action_cost + if not user_account.has_sufficient_credits(avg_count): + error_message_create = await ChatMessageCreate.from_system_message( + SystemMessageType.INSUFFICIENT_BALANCE, + agent_id=input.agent_id, + chat_id=input.chat_id, + user_id=input.user_id, + author_id=input.agent_id, + thread_type=input.author_type, + reply_to=input.id, + time_cost=time.perf_counter() - start, + ) + error_message = await error_message_create.save() + yield error_message + return + + is_private = False + if input.user_id == agent.owner: + is_private = True + + executor, cold_start_cost = await agent_executor(input.agent_id, is_private) + last = start + cold_start_cost + + # Extract images from attachments + image_urls = [] + if input.attachments: + image_urls = [ + att["url"] + for att in input.attachments + if "type" in att and att["type"] == "image" and "url" in att + ] + + # Process input message to handle @skill patterns + if config.admin_llm_skill_control: + input_message = await explain_prompt(input.message) + else: + input_message = input.message + + # super mode + recursion_limit = 30 + if re.search(r"\b@super\b", input_message): + recursion_limit = 300 + # Remove @super from the message + input_message = re.sub(r"\b@super\b", "", input_message).strip() + + # llm native search + if re.search(r"\b@search\b", input_message) or re.search( + r"\b@web\b", input_message + ): + if model.supports_search: + input_message = re.sub( + r"\b@search\b", + "(You have native search tool, you can use it to get more recent information)", + input_message, + ).strip() + input_message = re.sub( + r"\b@web\b", + "(You have native search tool, you can use it to get more recent information)", + input_message, + ).strip() + else: + input_message = re.sub(r"\b@search\b", "", input_message).strip() + input_message = re.sub(r"\b@web\b", "", input_message).strip() + + # content to llm + content = [ + {"type": "text", "text": input_message}, + ] + # if the model doesn't natively support image parsing, add the image URLs to the message + if image_urls: + if ( + agent.has_image_parser_skill(is_private=is_private) + and not model.supports_image_input + ): + input_message += f"\n\nImages:\n{'\n'.join(image_urls)}" + content = [ + {"type": "text", "text": input_message}, + ] + else: + # anyway, pass it directly to LLM + content.extend( + [ + {"type": "image_url", "image_url": {"url": image_url}} + for image_url in image_urls + ] + ) + + messages = [ + HumanMessage(content=content), + ] + + # stream config + thread_id = f"{input.agent_id}-{input.chat_id}" + stream_config = { + "configurable": { + "thread_id": thread_id, + }, + "recursion_limit": recursion_limit, + } + + context = AgentContext( + agent_id=input.agent_id, + chat_id=input.chat_id, + user_id=input.user_id, + app_id=input.app_id, + entrypoint=input.author_type, + is_private=is_private, + payer=payer if payment_enabled else None, + ) + + # run + cached_tool_step = None + try: + async for chunk in executor.astream( + {"messages": messages}, context=context, config=stream_config + ): + this_time = time.perf_counter() + logger.debug(f"stream chunk: {chunk}", extra={"thread_id": thread_id}) + if "agent" in chunk and "messages" in chunk["agent"]: + if len(chunk["agent"]["messages"]) != 1: + logger.error( + "unexpected agent message: " + str(chunk["agent"]["messages"]), + extra={"thread_id": thread_id}, + ) + msg = chunk["agent"]["messages"][0] + if hasattr(msg, "tool_calls") and msg.tool_calls: + # tool calls, save for later use, if it is deleted by post_model_hook, will not be used. + cached_tool_step = msg + if hasattr(msg, "content") and msg.content: + content = msg.content + if isinstance(msg.content, list): + # in new version, content item maybe a list + content = msg.content[0] + if isinstance(content, dict): + if "text" in content: + content = content["text"] + else: + content = str(content) + logger.error(f"unexpected content type: {content}") + # agent message + chat_message_create = ChatMessageCreate( + id=str(XID()), + agent_id=input.agent_id, + chat_id=input.chat_id, + user_id=input.user_id, + author_id=input.agent_id, + author_type=AuthorType.AGENT, + model=agent.model, + thread_type=input.author_type, + reply_to=input.id, + message=content, + input_tokens=( + msg.usage_metadata.get("input_tokens", 0) + if hasattr(msg, "usage_metadata") and msg.usage_metadata + else 0 + ), + output_tokens=( + msg.usage_metadata.get("output_tokens", 0) + if hasattr(msg, "usage_metadata") and msg.usage_metadata + else 0 + ), + time_cost=this_time - last, + ) + last = this_time + if cold_start_cost > 0: + chat_message_create.cold_start_cost = cold_start_cost + cold_start_cost = 0 + # handle message and payment in one transaction + async with get_session() as session: + # payment + if payment_enabled: + amount = await model.calculate_cost( + chat_message_create.input_tokens, + chat_message_create.output_tokens, + ) + + # Check for web_search_call in additional_kwargs + if ( + hasattr(msg, "additional_kwargs") + and msg.additional_kwargs + ): + tool_outputs = msg.additional_kwargs.get( + "tool_outputs", [] + ) + for tool_output in tool_outputs: + if tool_output.get("type") == "web_search_call": + logger.info( + f"[{input.agent_id}] Found web_search_call in additional_kwargs" + ) + amount += 35 + break + credit_event = await expense_message( + session, + payer, + chat_message_create.id, + input.id, + amount, + agent, + ) + logger.info(f"[{input.agent_id}] expense message: {amount}") + chat_message_create.credit_event_id = credit_event.id + chat_message_create.credit_cost = credit_event.total_amount + chat_message = await chat_message_create.save_in_session( + session + ) + await session.commit() + yield chat_message + elif "tools" in chunk and "messages" in chunk["tools"]: + if not cached_tool_step: + logger.error( + "unexpected tools message: " + str(chunk["tools"]), + extra={"thread_id": thread_id}, + ) + continue + skill_calls = [] + cached_attachments = [] + have_first_call_in_cache = False # tool node emit every tool call + for msg in chunk["tools"]["messages"]: + if not hasattr(msg, "tool_call_id"): + logger.error( + "unexpected tools message: " + str(chunk["tools"]), + extra={"thread_id": thread_id}, + ) + continue + for call_index, call in enumerate(cached_tool_step.tool_calls): + if call["id"] == msg.tool_call_id: + if call_index == 0: + have_first_call_in_cache = True + skill_call: ChatMessageSkillCall = { + "id": msg.tool_call_id, + "name": call["name"], + "parameters": call["args"], + "success": True, + } + if msg.status == "error": + skill_call["success"] = False + skill_call["error_message"] = str(msg.content) + else: + if config.debug: + skill_call["response"] = str(msg.content) + else: + skill_call["response"] = textwrap.shorten( + str(msg.content), width=1000, placeholder="..." + ) + if msg.artifact: + cached_attachments.extend(msg.artifact) + skill_calls.append(skill_call) + break + skill_message_create = ChatMessageCreate( + id=str(XID()), + agent_id=input.agent_id, + chat_id=input.chat_id, + user_id=input.user_id, + author_id=input.agent_id, + author_type=AuthorType.SKILL, + model=agent.model, + thread_type=input.author_type, + reply_to=input.id, + message="", + skill_calls=skill_calls, + attachments=cached_attachments, + input_tokens=( + cached_tool_step.usage_metadata.get("input_tokens", 0) + if hasattr(cached_tool_step, "usage_metadata") + and cached_tool_step.usage_metadata + and have_first_call_in_cache + else 0 + ), + output_tokens=( + cached_tool_step.usage_metadata.get("output_tokens", 0) + if hasattr(cached_tool_step, "usage_metadata") + and cached_tool_step.usage_metadata + and have_first_call_in_cache + else 0 + ), + time_cost=this_time - last, + ) + last = this_time + if cold_start_cost > 0: + skill_message_create.cold_start_cost = cold_start_cost + cold_start_cost = 0 + # save message and credit in one transaction + async with get_session() as session: + if payment_enabled: + # message payment, only first call in a group has message bill + if have_first_call_in_cache: + message_amount = await model.calculate_cost( + skill_message_create.input_tokens, + skill_message_create.output_tokens, + ) + message_payment_event = await expense_message( + session, + payer, + skill_message_create.id, + input.id, + message_amount, + agent, + ) + skill_message_create.credit_event_id = ( + message_payment_event.id + ) + skill_message_create.credit_cost = ( + message_payment_event.total_amount + ) + # skill payment + for skill_call in skill_calls: + if not skill_call["success"]: + continue + payment_event = await expense_skill( + session, + payer, + skill_message_create.id, + input.id, + skill_call["id"], + skill_call["name"], + agent, + ) + skill_call["credit_event_id"] = payment_event.id + skill_call["credit_cost"] = payment_event.total_amount + logger.info( + f"[{input.agent_id}] skill payment: {skill_call}" + ) + skill_message_create.skill_calls = skill_calls + skill_message = await skill_message_create.save_in_session(session) + await session.commit() + yield skill_message + elif "pre_model_hook" in chunk: + pass + elif "post_model_hook" in chunk: + logger.debug( + f"post_model_hook: {chunk}", + extra={"thread_id": thread_id}, + ) + if chunk["post_model_hook"] and "error" in chunk["post_model_hook"]: + if ( + chunk["post_model_hook"]["error"] + == AgentError.INSUFFICIENT_CREDITS + ): + if "messages" in chunk["post_model_hook"]: + msg = chunk["post_model_hook"]["messages"][-1] + content = msg.content + if isinstance(msg.content, list): + # in new version, content item maybe a list + content = msg.content[0] + post_model_message_create = ChatMessageCreate( + id=str(XID()), + agent_id=input.agent_id, + chat_id=input.chat_id, + user_id=input.user_id, + author_id=input.agent_id, + author_type=AuthorType.AGENT, + model=agent.model, + thread_type=input.author_type, + reply_to=input.id, + message=content, + input_tokens=0, + output_tokens=0, + time_cost=this_time - last, + ) + last = this_time + if cold_start_cost > 0: + post_model_message_create.cold_start_cost = ( + cold_start_cost + ) + cold_start_cost = 0 + post_model_message = await post_model_message_create.save() + yield post_model_message + error_message_create = ( + await ChatMessageCreate.from_system_message( + SystemMessageType.INSUFFICIENT_BALANCE, + agent_id=input.agent_id, + chat_id=input.chat_id, + user_id=input.user_id, + author_id=input.agent_id, + thread_type=input.author_type, + reply_to=input.id, + time_cost=0, + ) + ) + error_message = await error_message_create.save() + yield error_message + else: + error_traceback = traceback.format_exc() + logger.error( + f"unexpected message type: {str(chunk)}\n{error_traceback}", + extra={"thread_id": thread_id}, + ) + except SQLAlchemyError as e: + error_traceback = traceback.format_exc() + logger.error( + f"failed to execute agent: {str(e)}\n{error_traceback}", + extra={"thread_id": thread_id}, + ) + error_message_create = await ChatMessageCreate.from_system_message( + SystemMessageType.AGENT_INTERNAL_ERROR, + agent_id=input.agent_id, + chat_id=input.chat_id, + user_id=input.user_id, + author_id=input.agent_id, + thread_type=input.author_type, + reply_to=input.id, + time_cost=time.perf_counter() - start, + ) + error_message = await error_message_create.save() + yield error_message + return + except GraphRecursionError as e: + error_traceback = traceback.format_exc() + logger.error( + f"reached recursion limit: {str(e)}\n{error_traceback}", + extra={"thread_id": thread_id, "agent_id": input.agent_id}, + ) + error_message_create = await ChatMessageCreate.from_system_message( + SystemMessageType.STEP_LIMIT_EXCEEDED, + agent_id=input.agent_id, + chat_id=input.chat_id, + user_id=input.user_id, + author_id=input.agent_id, + thread_type=input.author_type, + reply_to=input.id, + time_cost=time.perf_counter() - start, + ) + error_message = await error_message_create.save() + yield error_message + return + except Exception as e: + error_traceback = traceback.format_exc() + logger.error( + f"failed to execute agent: {str(e)}\n{error_traceback}", + extra={"thread_id": thread_id, "agent_id": input.agent_id}, + ) + error_message_create = await ChatMessageCreate.from_system_message( + SystemMessageType.AGENT_INTERNAL_ERROR, + agent_id=input.agent_id, + chat_id=input.chat_id, + user_id=input.user_id, + author_id=input.agent_id, + thread_type=input.author_type, + reply_to=input.id, + time_cost=time.perf_counter() - start, + ) + error_message = await error_message_create.save() + yield error_message + return + + +async def execute_agent(message: ChatMessageCreate) -> list[ChatMessage]: + """ + Execute an agent with the given prompt and return response lines. + + This function: + 1. Configures execution context with thread ID + 2. Initializes agent if not in cache + 3. Streams agent execution results + 4. Formats and times the execution steps + + Args: + message (ChatMessageCreate): The chat message containing agent_id, chat_id, and message content + debug (bool): Enable debug mode, will save the skill results + + Returns: + list[ChatMessage]: Formatted response lines including timing information + """ + resp = [] + async for chat_message in stream_agent(message): + resp.append(chat_message) + return resp + + +async def clean_agent_memory( + agent_id: str, + chat_id: str = "", + clean_agent: bool = False, + clean_skill: bool = False, +) -> str: + """ + Clean an agent's memory with the given prompt and return response. + + This function: + 1. Cleans the agents skills data. + 2. Cleans the thread skills data. + 3. Cleans the graph checkpoint data. + 4. Cleans the graph checkpoint_writes data. + 5. Cleans the graph checkpoint_blobs data. + + Args: + agent_id (str): Agent ID + chat_id (str): Thread ID for the agent memory cleanup + clean_agent (bool): Whether to clean agent's memory data + clean_skill (bool): Whether to clean skills memory data + + Returns: + str: Successful response message. + """ + # get the agent from the database + try: + if not clean_skill and not clean_agent: + raise HTTPException( + status_code=400, + detail="at least one of skills data or agent memory should be true.", + ) + + if clean_skill: + await AgentSkillData.clean_data(agent_id) + await ThreadSkillData.clean_data(agent_id, chat_id) + + async with get_session() as db: + if clean_agent: + chat_id = chat_id.strip() + q_suffix = "%" + if chat_id and chat_id != "": + q_suffix = chat_id + + deletion_param = {"value": agent_id + "-" + q_suffix} + await db.execute( + sqlalchemy.text( + "DELETE FROM checkpoints WHERE thread_id like :value", + ), + deletion_param, + ) + await db.execute( + sqlalchemy.text( + "DELETE FROM checkpoint_writes WHERE thread_id like :value", + ), + deletion_param, + ) + await db.execute( + sqlalchemy.text( + "DELETE FROM checkpoint_blobs WHERE thread_id like :value", + ), + deletion_param, + ) + + # update the updated_at field so that the agent instance will all reload + await db.execute( + update(AgentTable) + .where(AgentTable.id == agent_id) + .values(updated_at=func.now()) + ) + await db.commit() + + logger.info(f"Agent [{agent_id}] data cleaned up successfully.") + return "Agent data cleaned up successfully." + except SQLAlchemyError as e: + # Handle other SQLAlchemy-related errors + logger.error(e) + raise HTTPException(status_code=500, detail=str(e)) + except Exception as e: + logger.error("failed to cleanup the agent memory: " + str(e)) + raise e + + +async def thread_stats(agent_id: str, chat_id: str) -> list[BaseMessage]: + thread_id = f"{agent_id}-{chat_id}" + stream_config = {"configurable": {"thread_id": thread_id}} + is_private = False + if chat_id.startswith("owner") or chat_id.startswith("autonomous"): + is_private = True + executor, _ = await agent_executor(agent_id, is_private) + snap = await executor.aget_state(stream_config) + if snap.values and "messages" in snap.values: + return snap.values["messages"] + else: + return [] diff --git a/intentkit/core/node.py b/intentkit/core/node.py new file mode 100644 index 00000000..2b435d4f --- /dev/null +++ b/intentkit/core/node.py @@ -0,0 +1,215 @@ +import logging +from typing import Any, Sequence + +from langchain_core.language_models import LanguageModelLike +from langchain_core.messages import ( + AIMessage, + AnyMessage, + BaseMessage, + HumanMessage, + RemoveMessage, + ToolMessage, +) +from langchain_core.messages.utils import count_tokens_approximately, trim_messages +from langgraph.graph.message import REMOVE_ALL_MESSAGES +from langgraph.runtime import get_runtime +from langgraph.utils.runnable import RunnableCallable +from langmem.short_term.summarization import ( + DEFAULT_EXISTING_SUMMARY_PROMPT, + DEFAULT_FINAL_SUMMARY_PROMPT, + DEFAULT_INITIAL_SUMMARY_PROMPT, + SummarizationResult, + asummarize_messages, +) + +from intentkit.abstracts.graph import AgentContext, AgentError, AgentState +from intentkit.core.credit import skill_cost +from intentkit.models.agent import Agent +from intentkit.models.credit import CreditAccount, OwnerType +from intentkit.models.skill import Skill + +logger = logging.getLogger(__name__) + + +def _validate_chat_history( + messages: Sequence[BaseMessage], +) -> None: + """Validate that all tool calls in AIMessages have a corresponding ToolMessage.""" + all_tool_calls = [ + tool_call + for message in messages + if isinstance(message, AIMessage) + for tool_call in message.tool_calls + ] + tool_call_ids_with_results = { + message.tool_call_id for message in messages if isinstance(message, ToolMessage) + } + tool_calls_without_results = [ + tool_call + for tool_call in all_tool_calls + if tool_call["id"] not in tool_call_ids_with_results + ] + if not tool_calls_without_results: + return + + message = "Found AIMessages with tool_calls that do not have a corresponding ToolMessage. " + f"Here are the first few of those tool calls: {tool_calls_without_results[:3]}" + raise ValueError(message) + + +class PreModelNode(RunnableCallable): + """LangGraph node that run before the LLM is called.""" + + def __init__( + self, + *, + model: LanguageModelLike, + short_term_memory_strategy: str, + max_tokens: int, + max_summary_tokens: int = 1024, + ) -> None: + super().__init__(self._func, self._afunc, name="pre_model_node", trace=False) + self.model = model + self.short_term_memory_strategy = short_term_memory_strategy + self.max_tokens = max_tokens + self.max_tokens_before_summary = max_tokens + self.max_summary_tokens = max_summary_tokens + self.token_counter = count_tokens_approximately + self.initial_summary_prompt = DEFAULT_INITIAL_SUMMARY_PROMPT + self.existing_summary_prompt = DEFAULT_EXISTING_SUMMARY_PROMPT + self.final_prompt = DEFAULT_FINAL_SUMMARY_PROMPT + self.func_accepts_config = True + + def _parse_input( + self, input: AgentState + ) -> tuple[list[AnyMessage], dict[str, Any]]: + messages = input.get("messages") + context = input.get("context", {}) + if messages is None or not isinstance(messages, list) or len(messages) == 0: + raise ValueError("Missing required field `messages` in the input.") + return messages, context + + # overwrite old messages if summarization is used + def _prepare_state_update( + self, context: dict[str, Any], summarization_result: SummarizationResult + ) -> dict[str, Any]: + state_update = { + "messages": [RemoveMessage(REMOVE_ALL_MESSAGES)] + + summarization_result.messages + } + if summarization_result.running_summary: + state_update["context"] = { + **context, + "running_summary": summarization_result.running_summary, + } + return state_update + + def _func(self, AgentState) -> dict[str, Any]: + raise NotImplementedError("Not implemented yet") + + async def _afunc(self, input: AgentState) -> dict[str, Any]: + messages, context = self._parse_input(input) + try: + _validate_chat_history(messages) + except ValueError as e: + logger.error(f"Invalid chat history: {e}") + logger.info(input) + # delete all messages + return {"messages": [RemoveMessage(REMOVE_ALL_MESSAGES)]} + if self.short_term_memory_strategy == "trim": + trimmed_messages = trim_messages( + messages, + strategy="last", + token_counter=self.token_counter, + max_tokens=self.max_summary_tokens, + start_on="human", + end_on=("human", "tool"), + ) + if len(trimmed_messages) < len(messages): + logger.info( + f"Trimmed messages: {len(messages)} -> {len(trimmed_messages)}" + ) + if len(trimmed_messages) <= 3: + logger.info(f"Too few messages after trim: {len(trimmed_messages)}") + return {} + return { + "messages": [RemoveMessage(REMOVE_ALL_MESSAGES)] + trimmed_messages, + } + else: + return {} + if self.short_term_memory_strategy == "summarize": + # if last message is not human message, skip summarize + if not isinstance(messages[-1], HumanMessage): + return {} + # summarization is from outside, sometimes it is not stable, so we need to try-catch it + try: + summarization_result = await asummarize_messages( + messages, + running_summary=context.get("running_summary"), + model=self.model, + max_tokens=self.max_tokens, + max_tokens_before_summary=self.max_tokens_before_summary, + max_summary_tokens=self.max_summary_tokens, + token_counter=self.token_counter, + initial_summary_prompt=self.initial_summary_prompt, + existing_summary_prompt=self.existing_summary_prompt, + final_prompt=self.final_prompt, + ) + if summarization_result.running_summary: + logger.debug(f"Summarization result: {summarization_result}") + else: + logger.debug("Summarization not run") + return self._prepare_state_update(context, summarization_result) + except ValueError as e: + logger.error(f"Invalid chat history: {e}") + logger.info(input) + # delete all messages + return {"messages": [RemoveMessage(REMOVE_ALL_MESSAGES)]} + raise ValueError( + f"Invalid short_term_memory_strategy: {self.short_term_memory_strategy}" + ) + + +class PostModelNode(RunnableCallable): + def __init__(self) -> None: + super().__init__(self._func, self._afunc, name="post_model_node", trace=False) + self.func_accepts_config = True + + def _func(self, input: AgentState) -> dict[str, Any]: + raise NotImplementedError("Not implemented yet") + + async def _afunc(self, input: AgentState) -> dict[str, Any]: + runtime = get_runtime(AgentContext) + context = runtime.context + logger.debug(f"Running PostModelNode, input: {input}, context: {context}") + state_update = {} + messages = input.get("messages") + if messages is None or not isinstance(messages, list) or len(messages) == 0: + raise ValueError("Missing required field `messages` in the input.") + payer = context.payer + if not payer: + return state_update + logger.debug(f"last: {messages[-1]}") + msg = messages[-1] + agent_id = context.agent_id + agent = await Agent.get(agent_id) + account = await CreditAccount.get_or_create(OwnerType.USER, payer) + if hasattr(msg, "tool_calls") and msg.tool_calls: + for tool_call in msg.tool_calls: + skill_meta = await Skill.get(tool_call.get("name")) + if skill_meta: + skill_cost_info = await skill_cost(skill_meta.name, payer, agent) + total_paid = skill_cost_info.total_amount + if not account.has_sufficient_credits(total_paid): + state_update["error"] = AgentError.INSUFFICIENT_CREDITS + state_update["messages"] = [RemoveMessage(id=msg.id)] + state_update["messages"].append( + AIMessage( + content=f"Insufficient credits. Please top up your account. You need {total_paid} credits, but you only have {account.balance} credits.", + ) + ) + return state_update + return state_update + + +post_model_node = PostModelNode() diff --git a/intentkit/core/prompt.py b/intentkit/core/prompt.py new file mode 100644 index 00000000..6410913f --- /dev/null +++ b/intentkit/core/prompt.py @@ -0,0 +1,430 @@ +import re +from typing import Callable, Optional + +from eth_utils import is_address +from langchain_core.messages import BaseMessage +from langchain_core.prompts import ChatPromptTemplate +from langgraph.runtime import Runtime + +from intentkit.abstracts.graph import AgentContext, AgentState +from intentkit.config.config import config +from intentkit.models.agent import Agent +from intentkit.models.agent_data import AgentData +from intentkit.models.chat import AuthorType +from intentkit.models.skill import Skill + +# ============================================================================ +# CONSTANTS AND CONFIGURATION +# ============================================================================ + +# Base system prompt components +INTENTKIT_PROMPT = """You are an AI agent built using IntentKit. +Your tools are called 'skills'. +If your skill fails to execute due to a technical error ask the user to try again later, don't retry by yourself. If someone asks you to do something you can't do with your currently available skills, you must say so, recommend them to submit their feedback to the IntentKit team at https://github.com/crestalnetwork/intentkit. Be concise and helpful with your responses.""" + +ENSO_SKILLS_GUIDE = """## ENSO Skills Guide + +You are integrated with the Enso API. You can use enso_get_tokens to retrieve token information, +including APY, Protocol Slug, Symbol, Address, Decimals, and underlying tokens. When interacting with token amounts, +ensure to multiply input amounts by the token's decimal places and divide output amounts by the token's decimals. +Utilize enso_route_shortcut to find the best swap or deposit route. Set broadcast_request to True only when the +user explicitly requests a transaction broadcast. Insufficient funds or insufficient spending approval can cause +Route Shortcut broadcasts to fail. To avoid this, use the enso_broadcast_wallet_approve tool that requires explicit +user confirmation before broadcasting any approval transactions for security reasons. + +""" + + +# ============================================================================ +# CORE PROMPT BUILDING FUNCTIONS +# ============================================================================ + + +def _build_system_header() -> str: + """Build the system prompt header.""" + prompt = "# SYSTEM PROMPT\n\n" + if config.system_prompt: + prompt += config.system_prompt + "\n\n" + if config.intentkit_prompt: + prompt += config.intentkit_prompt + "\n\n" + else: + prompt += INTENTKIT_PROMPT + "\n\n" + return prompt + + +def _build_agent_identity_section(agent: Agent) -> str: + """Build agent identity information section.""" + identity_parts = [] + + if agent.name: + identity_parts.append(f"Your name is {agent.name}.") + if agent.ticker: + identity_parts.append(f"Your ticker symbol is {agent.ticker}.") + + return "\n".join(identity_parts) + ("\n" if identity_parts else "") + + +def _build_social_accounts_section(agent_data: AgentData) -> str: + """Build social accounts information section.""" + if not agent_data: + return "" + + social_parts = [] + + # Twitter info + if agent_data.twitter_id: + social_parts.append( + f"Your twitter id is {agent_data.twitter_id}, never reply or retweet yourself." + ) + if agent_data.twitter_username: + social_parts.append(f"Your twitter username is {agent_data.twitter_username}.") + if agent_data.twitter_name: + social_parts.append(f"Your twitter name is {agent_data.twitter_name}.") + + # Twitter verification status + if agent_data.twitter_is_verified: + social_parts.append("Your twitter account is verified.") + else: + social_parts.append("Your twitter account is not verified.") + + # Telegram info + if agent_data.telegram_id: + social_parts.append(f"Your telegram bot id is {agent_data.telegram_id}.") + if agent_data.telegram_username: + social_parts.append( + f"Your telegram bot username is {agent_data.telegram_username}." + ) + if agent_data.telegram_name: + social_parts.append(f"Your telegram bot name is {agent_data.telegram_name}.") + + return "\n".join(social_parts) + ("\n" if social_parts else "") + + +def _build_wallet_section(agent: Agent, agent_data: AgentData) -> str: + """Build wallet information section.""" + if not agent_data: + return "" + + wallet_parts = [] + network_id = agent.network_id or agent.cdp_network_id + + if agent_data.evm_wallet_address and network_id != "solana": + wallet_parts.append( + f"Your wallet address in {network_id} is {agent_data.evm_wallet_address}." + ) + if agent_data.solana_wallet_address and network_id == "solana": + wallet_parts.append( + f"Your wallet address in {network_id} is {agent_data.solana_wallet_address}." + ) + + return "\n".join(wallet_parts) + ("\n" if wallet_parts else "") + + +def _build_user_info_section(context: AgentContext) -> str: + """Build user information section when user_id is a valid EVM wallet address.""" + if not context.user_id: + return "" + + # Check if user_id is a valid EVM wallet address + try: + if is_address(context.user_id): + return f"## User Info\n\nThe person you are talking to has wallet address: {context.user_id}\n\n" + except Exception: + # If validation fails, don't include the section + pass + + return "" + + +def _build_agent_characteristics_section(agent: Agent) -> str: + """Build agent characteristics section (purpose, personality, principles, etc.).""" + sections = [] + + if agent.purpose: + sections.append(f"## Purpose\n\n{agent.purpose}") + if agent.personality: + sections.append(f"## Personality\n\n{agent.personality}") + if agent.principles: + sections.append(f"## Principles\n\n{agent.principles}") + if agent.prompt: + sections.append(f"## Initial Rules\n\n{agent.prompt}") + + return "\n\n".join(sections) + ("\n\n" if sections else "") + + +def _build_skills_guides_section(agent: Agent) -> str: + """Build skills-specific guides section.""" + guides = [] + + # ENSO skills guide + if agent.skills and "enso" in agent.skills and agent.skills["enso"].get("enabled"): + guides.append(ENSO_SKILLS_GUIDE) + + return "".join(guides) + + +def build_agent_prompt(agent: Agent, agent_data: AgentData) -> str: + """ + Build the complete agent system prompt. + + This function orchestrates the building of different prompt sections: + - System header and base prompt + - Agent identity (name, ticker) + - Social accounts (Twitter, Telegram) + - Wallet information + - Agent characteristics (purpose, personality, principles) + - Skills-specific guides + + Args: + agent: The agent configuration + agent_data: The agent's runtime data + + Returns: + str: The complete system prompt + """ + prompt_sections = [ + _build_system_header(), + _build_agent_identity_section(agent), + _build_social_accounts_section(agent_data), + _build_wallet_section(agent, agent_data), + "\n", # Add spacing before characteristics + _build_agent_characteristics_section(agent), + _build_skills_guides_section(agent), + ] + + return "".join(section for section in prompt_sections if section) + + +# Legacy function name for backward compatibility +def agent_prompt(agent: Agent, agent_data: AgentData) -> str: + """Legacy function name. Use build_agent_prompt instead.""" + return build_agent_prompt(agent, agent_data) + + +async def explain_prompt(message: str) -> str: + """ + Process message to replace @skill:*:* patterns with (call skill xxxxx) format. + This function is used when admin_llm_skill_control is enabled. + + Args: + message (str): The input message to process + + Returns: + str: The processed message with @skill patterns replaced + """ + # Pattern to match @skill:category:config_name with word boundaries + pattern = r"\b@skill:([^:]+):([^\s]+)\b" + + async def replace_skill_pattern(match): + category = match.group(1) + config_name = match.group(2) + + # Get skill by category and config_name + skill = await Skill.get_by_config_name(category, config_name) + + if skill: + return f"(call skill {skill.name})" + else: + # If skill not found, keep original pattern + return match.group(0) + + # Find all matches + matches = list(re.finditer(pattern, message)) + + # Process matches in reverse order to maintain string positions + result = message + for match in reversed(matches): + replacement = await replace_skill_pattern(match) + result = result[: match.start()] + replacement + result[match.end() :] + + return result + + +# ============================================================================ +# UTILITY FUNCTIONS +# ============================================================================ + + +def escape_prompt(prompt: str) -> str: + """Escape curly braces in the prompt for template processing.""" + return prompt.replace("{", "{{").replace("}", "}}") + + +# ============================================================================ +# ENTRYPOINT PROCESSING FUNCTIONS +# ============================================================================ + + +def _build_autonomous_task_prompt(agent: Agent, context: AgentContext) -> str: + """Build prompt for autonomous task entrypoint.""" + task_id = context.chat_id.removeprefix("autonomous-") + + # Find the autonomous task by task_id + autonomous_task = None + if agent.autonomous: + for task in agent.autonomous: + if task.id == task_id: + autonomous_task = task + break + + if not autonomous_task: + # Fallback if task not found + return f"You are running an autonomous task. The task id is {task_id}. " + + # Build detailed task info - always include task_id + if autonomous_task.name: + task_info = f"You are running an autonomous task '{autonomous_task.name}' (ID: {task_id})" + else: + task_info = f"You are running an autonomous task (ID: {task_id})" + + # Add description if available + if autonomous_task.description: + task_info += f": {autonomous_task.description}" + + # Add cycle info + if autonomous_task.minutes: + task_info += f". This task runs every {autonomous_task.minutes} minute(s)" + elif autonomous_task.cron: + task_info += f". This task runs on schedule: {autonomous_task.cron}" + + return f"{task_info}. " + + +async def build_entrypoint_prompt(agent: Agent, context: AgentContext) -> Optional[str]: + """ + Build entrypoint-specific prompt based on context. + + Supports different entrypoint types: + - Telegram: Uses agent.telegram_entrypoint_prompt + - Autonomous tasks: Builds task-specific prompt with scheduling info + + Args: + agent: The agent configuration + context: The agent context containing entrypoint information + + Returns: + Optional[str]: The entrypoint-specific prompt, or None if no entrypoint + """ + if not context.entrypoint: + return None + + entrypoint = context.entrypoint + entrypoint_prompt = None + + # Handle social media entrypoints + if entrypoint == AuthorType.TELEGRAM.value: + if config.tg_system_prompt: + entrypoint_prompt = "\n\n" + config.tg_system_prompt + if agent.telegram_entrypoint_prompt: + entrypoint_prompt = "\n\n" + agent.telegram_entrypoint_prompt + elif entrypoint == AuthorType.XMTP.value: + if config.xmtp_system_prompt: + entrypoint_prompt = "\n\n" + config.xmtp_system_prompt + if agent.xmtp_entrypoint_prompt: + entrypoint_prompt = "\n\n" + agent.xmtp_entrypoint_prompt + elif entrypoint == AuthorType.TRIGGER.value: + entrypoint_prompt = "\n\n" + _build_autonomous_task_prompt(agent, context) + + # Process with admin LLM skill control if enabled + if entrypoint_prompt and config.admin_llm_skill_control: + entrypoint_prompt = await explain_prompt(entrypoint_prompt) + + return entrypoint_prompt + + +def build_internal_info_prompt(context: AgentContext) -> str: + """Build internal info prompt with context information.""" + internal_info = "## Internal Info\n\n" + internal_info += "These are for your internal use. You can use them when querying or storing data, " + internal_info += "but please do not directly share this information with users.\n\n" + internal_info += f"chat_id: {context.chat_id}\n\n" + if context.user_id: + internal_info += f"user_id: {context.user_id}\n\n" + return internal_info + + +# ============================================================================ +# MAIN PROMPT FACTORY FUNCTION +# ============================================================================ + + +def create_formatted_prompt_function(agent: Agent, agent_data: AgentData) -> Callable: + """ + Create the formatted_prompt function with agent-specific configuration. + + This is the main factory function that creates a prompt formatting function + tailored to a specific agent. The returned function will be used by the + agent's runtime to format prompts for each conversation. + + Args: + agent: The agent configuration + agent_data: The agent's runtime data + + Returns: + Callable: An async function that formats prompts based on agent state and context + """ + # Build base prompt using the new function name + prompt = build_agent_prompt(agent, agent_data) + escaped_prompt = escape_prompt(prompt) + + # Process with admin LLM skill control if enabled + async def get_base_prompt(): + if config.admin_llm_skill_control: + return await explain_prompt(escaped_prompt) + return escaped_prompt + + # Build prompt array + prompt_array = [ + ("placeholder", "{system_prompt}"), + ("placeholder", "{messages}"), + ] + + if agent.prompt_append: + # Escape any curly braces in prompt_append + escaped_append = escape_prompt(agent.prompt_append) + prompt_array.append(("system", escaped_append)) + + prompt_temp = ChatPromptTemplate.from_messages(prompt_array) + + async def formatted_prompt( + state: AgentState, runtime: Runtime[AgentContext] + ) -> list[BaseMessage]: + # Get base prompt (with potential admin LLM skill control processing) + final_system_prompt = await get_base_prompt() + + context = runtime.context + + # Add entrypoint prompt if applicable + entrypoint_prompt = await build_entrypoint_prompt(agent, context) + if entrypoint_prompt: + final_system_prompt = ( + f"{final_system_prompt}## Entrypoint rules{entrypoint_prompt}\n\n" + ) + + # Add user info if user_id is a valid EVM wallet address + user_info = _build_user_info_section(context) + if user_info: + final_system_prompt = f"{final_system_prompt}{user_info}" + + # Add internal info + internal_info = build_internal_info_prompt(context) + final_system_prompt = f"{final_system_prompt}{internal_info}" + + # Process prompt_append with admin LLM skill control if needed + if agent.prompt_append and config.admin_llm_skill_control: + # Find the system message in prompt_array and process it + for i, (role, content) in enumerate(prompt_array): + if role == "system": + processed_append = await explain_prompt(content) + prompt_array[i] = ("system", processed_append) + break + + system_prompt = [("system", final_system_prompt)] + return prompt_temp.invoke( + { + "messages": state["messages"], + "system_prompt": system_prompt, + } + ) + + return formatted_prompt diff --git a/intentkit/core/skill.py b/intentkit/core/skill.py new file mode 100644 index 00000000..ec519b65 --- /dev/null +++ b/intentkit/core/skill.py @@ -0,0 +1,200 @@ +from typing import Any, Dict, List, Optional + +from intentkit.abstracts.skill import SkillStoreABC +from intentkit.config.config import config +from intentkit.core.agent import ( + add_autonomous_task as _add_autonomous_task, +) +from intentkit.core.agent import ( + delete_autonomous_task as _delete_autonomous_task, +) +from intentkit.core.agent import ( + list_autonomous_tasks as _list_autonomous_tasks, +) +from intentkit.core.agent import ( + update_autonomous_task as _update_autonomous_task, +) +from intentkit.models.agent import Agent, AgentAutonomous +from intentkit.models.agent_data import AgentData, AgentQuota +from intentkit.models.skill import ( + AgentSkillData, + AgentSkillDataCreate, + ThreadSkillData, + ThreadSkillDataCreate, +) + + +class SkillStore(SkillStoreABC): + """Implementation of skill data storage operations. + + This class provides concrete implementations for storing and retrieving + skill-related data for both agents and threads. + """ + + @staticmethod + def get_system_config(key: str) -> Any: + # TODO: maybe need a whitelist here + if hasattr(config, key): + return getattr(config, key) + return None + + @staticmethod + async def get_agent_config(agent_id: str) -> Optional[Agent]: + return await Agent.get(agent_id) + + @staticmethod + async def get_agent_data(agent_id: str) -> AgentData: + return await AgentData.get(agent_id) + + @staticmethod + async def set_agent_data(agent_id: str, data: Dict) -> AgentData: + return await AgentData.patch(agent_id, data) + + @staticmethod + async def get_agent_quota(agent_id: str) -> AgentQuota: + return await AgentQuota.get(agent_id) + + @staticmethod + async def get_agent_skill_data( + agent_id: str, skill: str, key: str + ) -> Optional[Dict[str, Any]]: + """Get skill data for an agent. + + Args: + agent_id: ID of the agent + skill: Name of the skill + key: Data key + + Returns: + Dictionary containing the skill data if found, None otherwise + """ + return await AgentSkillData.get(agent_id, skill, key) + + @staticmethod + async def save_agent_skill_data( + agent_id: str, skill: str, key: str, data: Dict[str, Any] + ) -> None: + """Save or update skill data for an agent. + + Args: + agent_id: ID of the agent + skill: Name of the skill + key: Data key + data: JSON data to store + """ + skill_data = AgentSkillDataCreate( + agent_id=agent_id, + skill=skill, + key=key, + data=data, + ) + await skill_data.save() + + @staticmethod + async def delete_agent_skill_data(agent_id: str, skill: str, key: str) -> None: + """Delete skill data for an agent. + + Args: + agent_id: ID of the agent + skill: Name of the skill + key: Data key + """ + await AgentSkillData.delete(agent_id, skill, key) + + @staticmethod + async def get_thread_skill_data( + thread_id: str, skill: str, key: str + ) -> Optional[Dict[str, Any]]: + """Get skill data for a thread. + + Args: + thread_id: ID of the thread + skill: Name of the skill + key: Data key + + Returns: + Dictionary containing the skill data if found, None otherwise + """ + return await ThreadSkillData.get(thread_id, skill, key) + + @staticmethod + async def save_thread_skill_data( + thread_id: str, + agent_id: str, + skill: str, + key: str, + data: Dict[str, Any], + ) -> None: + """Save or update skill data for a thread. + + Args: + thread_id: ID of the thread + agent_id: ID of the agent that owns this thread + skill: Name of the skill + key: Data key + data: JSON data to store + """ + skill_data = ThreadSkillDataCreate( + thread_id=thread_id, + agent_id=agent_id, + skill=skill, + key=key, + data=data, + ) + await skill_data.save() + + @staticmethod + async def list_autonomous_tasks(agent_id: str) -> List[AgentAutonomous]: + """List all autonomous tasks for an agent. + + Args: + agent_id: ID of the agent + + Returns: + List[AgentAutonomous]: List of autonomous task configurations + """ + return await _list_autonomous_tasks(agent_id) + + @staticmethod + async def add_autonomous_task( + agent_id: str, task: AgentAutonomous + ) -> AgentAutonomous: + """Add a new autonomous task to an agent. + + Args: + agent_id: ID of the agent + task: Autonomous task configuration + + Returns: + AgentAutonomous: The created task + """ + return await _add_autonomous_task(agent_id, task) + + @staticmethod + async def delete_autonomous_task(agent_id: str, task_id: str) -> None: + """Delete an autonomous task from an agent. + + Args: + agent_id: ID of the agent + task_id: ID of the task to delete + """ + await _delete_autonomous_task(agent_id, task_id) + + @staticmethod + async def update_autonomous_task( + agent_id: str, task_id: str, task_updates: dict + ) -> AgentAutonomous: + """Update an autonomous task for an agent. + + Args: + agent_id: ID of the agent + task_id: ID of the task to update + task_updates: Dictionary containing fields to update + + Returns: + AgentAutonomous: The updated task + """ + return await _update_autonomous_task(agent_id, task_id, task_updates) + + +skill_store = SkillStore() diff --git a/intentkit/models/agent.py b/intentkit/models/agent.py new file mode 100644 index 00000000..32d468aa --- /dev/null +++ b/intentkit/models/agent.py @@ -0,0 +1,1855 @@ +import json +import logging +import re +import textwrap +from datetime import datetime, timezone +from decimal import Decimal +from pathlib import Path +from typing import Annotated, Any, Dict, List, Literal, Optional + +import jsonref +import yaml +from cron_validator import CronValidator +from epyxid import XID +from fastapi import HTTPException +from intentkit.models.agent_data import AgentData +from intentkit.models.base import Base +from intentkit.models.db import get_session +from intentkit.models.llm import LLMModelInfo, LLMModelInfoTable, LLMProvider +from intentkit.models.skill import SkillTable +from pydantic import BaseModel, ConfigDict, field_validator, model_validator +from pydantic import Field as PydanticField +from pydantic.json_schema import SkipJsonSchema +from sqlalchemy import ( + Boolean, + Column, + DateTime, + Float, + Numeric, + String, + func, + select, +) +from sqlalchemy.dialects.postgresql import JSON, JSONB +from sqlalchemy.ext.asyncio import AsyncSession + +logger = logging.getLogger(__name__) + + +class AgentAutonomous(BaseModel): + """Autonomous agent configuration.""" + + id: Annotated[ + str, + PydanticField( + description="Unique identifier for the autonomous configuration", + default_factory=lambda: str(XID()), + min_length=1, + max_length=20, + pattern=r"^[a-z0-9-]+$", + json_schema_extra={ + "x-group": "autonomous", + }, + ), + ] + name: Annotated[ + Optional[str], + PydanticField( + default=None, + description="Display name of the autonomous configuration", + max_length=50, + json_schema_extra={ + "x-group": "autonomous", + }, + ), + ] + description: Annotated[ + Optional[str], + PydanticField( + default=None, + description="Description of the autonomous configuration", + max_length=200, + json_schema_extra={ + "x-group": "autonomous", + }, + ), + ] + minutes: Annotated[ + Optional[int], + PydanticField( + default=None, + description="Interval in minutes between operations, mutually exclusive with cron", + json_schema_extra={ + "x-group": "autonomous", + }, + ), + ] + cron: Annotated[ + Optional[str], + PydanticField( + default=None, + description="Cron expression for scheduling operations, mutually exclusive with minutes", + json_schema_extra={ + "x-group": "autonomous", + }, + ), + ] + prompt: Annotated[ + str, + PydanticField( + description="Special prompt used during autonomous operation", + max_length=20000, + json_schema_extra={ + "x-group": "autonomous", + }, + ), + ] + enabled: Annotated[ + Optional[bool], + PydanticField( + default=False, + description="Whether the autonomous configuration is enabled", + json_schema_extra={ + "x-group": "autonomous", + }, + ), + ] + + @field_validator("id") + @classmethod + def validate_id(cls, v: str) -> str: + if not v: + raise ValueError("id cannot be empty") + if len(v.encode()) > 20: + raise ValueError("id must be at most 20 bytes") + if not re.match(r"^[a-z0-9-]+$", v): + raise ValueError( + "id must contain only lowercase letters, numbers, and dashes" + ) + return v + + @field_validator("name") + @classmethod + def validate_name(cls, v: Optional[str]) -> Optional[str]: + if v is not None and len(v.encode()) > 50: + raise ValueError("name must be at most 50 bytes") + return v + + @field_validator("description") + @classmethod + def validate_description(cls, v: Optional[str]) -> Optional[str]: + if v is not None and len(v.encode()) > 200: + raise ValueError("description must be at most 200 bytes") + return v + + @field_validator("prompt") + @classmethod + def validate_prompt(cls, v: Optional[str]) -> Optional[str]: + if v is not None and len(v.encode()) > 20000: + raise ValueError("prompt must be at most 20000 bytes") + return v + + @model_validator(mode="after") + def validate_schedule(self) -> "AgentAutonomous": + # This validator is kept for backward compatibility + # The actual validation now happens in AgentUpdate.validate_autonomous_schedule + return self + + +class AgentExample(BaseModel): + """Agent example configuration.""" + + name: Annotated[ + str, + PydanticField( + description="Name of the example", + max_length=50, + json_schema_extra={ + "x-group": "examples", + }, + ), + ] + description: Annotated[ + str, + PydanticField( + description="Description of the example", + max_length=200, + json_schema_extra={ + "x-group": "examples", + }, + ), + ] + prompt: Annotated[ + str, + PydanticField( + description="Example prompt", + max_length=2000, + json_schema_extra={ + "x-group": "examples", + }, + ), + ] + + +class AgentTable(Base): + """Agent table db model.""" + + __tablename__ = "agents" + + id = Column( + String, + primary_key=True, + comment="Unique identifier for the agent. Must be URL-safe, containing only lowercase letters, numbers, and hyphens", + ) + name = Column( + String, + nullable=True, + comment="Display name of the agent", + ) + slug = Column( + String, + nullable=True, + comment="Slug of the agent, used for URL generation", + ) + description = Column( + String, + nullable=True, + comment="Description of the agent, for public view, not contained in prompt", + ) + external_website = Column( + String, + nullable=True, + comment="Link of external website of the agent, if you have one", + ) + picture = Column( + String, + nullable=True, + comment="Picture of the agent", + ) + ticker = Column( + String, + nullable=True, + comment="Ticker symbol of the agent", + ) + token_address = Column( + String, + nullable=True, + comment="Token address of the agent", + ) + token_pool = Column( + String, + nullable=True, + comment="Pool of the agent token", + ) + mode = Column( + String, + nullable=True, + comment="Mode of the agent, public or private", + ) + fee_percentage = Column( + Numeric(22, 4), + nullable=True, + comment="Fee percentage of the agent", + ) + purpose = Column( + String, + nullable=True, + comment="Purpose or role of the agent", + ) + personality = Column( + String, + nullable=True, + comment="Personality traits of the agent", + ) + principles = Column( + String, + nullable=True, + comment="Principles or values of the agent", + ) + owner = Column( + String, + nullable=True, + comment="Owner identifier of the agent, used for access control", + ) + upstream_id = Column( + String, + index=True, + nullable=True, + comment="Upstream reference ID for idempotent operations", + ) + upstream_extra = Column( + JSON().with_variant(JSONB(), "postgresql"), + nullable=True, + comment="Additional data store for upstream use", + ) + wallet_provider = Column( + String, + nullable=True, + comment="Provider of the agent's wallet", + ) + readonly_wallet_address = Column( + String, + nullable=True, + comment="Readonly wallet address of the agent", + ) + network_id = Column( + String, + nullable=True, + default="base-mainnet", + comment="Network identifier", + ) + # AI part + model = Column( + String, + nullable=True, + default="gpt-5-mini", + comment="AI model identifier to be used by this agent for processing requests. Available models: gpt-4o, gpt-4o-mini, deepseek-chat, deepseek-reasoner, grok-2, eternalai", + ) + prompt = Column( + String, + nullable=True, + comment="Base system prompt that defines the agent's behavior and capabilities", + ) + prompt_append = Column( + String, + nullable=True, + comment="Additional system prompt that has higher priority than the base prompt", + ) + temperature = Column( + Float, + nullable=True, + default=0.7, + comment="Controls response randomness (0.0~2.0). Higher values increase creativity but may reduce accuracy. For rigorous tasks, use lower values.", + ) + frequency_penalty = Column( + Float, + nullable=True, + default=0.0, + comment="Controls repetition in responses (-2.0~2.0). Higher values reduce repetition, lower values allow more repetition.", + ) + presence_penalty = Column( + Float, + nullable=True, + default=0.0, + comment="Controls topic adherence (-2.0~2.0). Higher values allow more topic deviation, lower values enforce stricter topic adherence.", + ) + short_term_memory_strategy = Column( + String, + nullable=True, + default="trim", + comment="Strategy for managing short-term memory when context limit is reached. 'trim' removes oldest messages, 'summarize' creates summaries.", + ) + # autonomous mode + autonomous = Column( + JSON().with_variant(JSONB(), "postgresql"), + nullable=True, + comment="Autonomous agent configurations", + ) + # agent examples + example_intro = Column( + String, + nullable=True, + comment="Introduction for example interactions", + ) + examples = Column( + JSON().with_variant(JSONB(), "postgresql"), + nullable=True, + comment="List of example interactions for the agent", + ) + # skills + skills = Column( + JSON().with_variant(JSONB(), "postgresql"), + nullable=True, + comment="Dict of skills and their corresponding configurations", + ) + + cdp_network_id = Column( + String, + nullable=True, + default="base-mainnet", + comment="Network identifier for CDP integration", + ) + # if telegram_entrypoint_enabled, the telegram_entrypoint_enabled will be enabled, telegram_config will be checked + telegram_entrypoint_enabled = Column( + Boolean, + nullable=True, + default=False, + comment="Whether the agent can receive events from Telegram", + ) + telegram_entrypoint_prompt = Column( + String, + nullable=True, + comment="Extra prompt for telegram entrypoint", + ) + telegram_config = Column( + JSON().with_variant(JSONB(), "postgresql"), + nullable=True, + comment="Telegram integration configuration settings", + ) + xmtp_entrypoint_prompt = Column( + String, + nullable=True, + comment="Extra prompt for xmtp entrypoint", + ) + # auto timestamp + created_at = Column( + DateTime(timezone=True), + nullable=False, + server_default=func.now(), + comment="Timestamp when the agent was created", + ) + updated_at = Column( + DateTime(timezone=True), + nullable=False, + server_default=func.now(), + onupdate=lambda: datetime.now(timezone.utc), + comment="Timestamp when the agent was last updated", + ) + + +class AgentUpdate(BaseModel): + """Agent update model.""" + + model_config = ConfigDict( + title="Agent", + from_attributes=True, + json_schema_extra={ + "required": ["name", "purpose", "personality", "principles"], + }, + ) + + name: Annotated[ + Optional[str], + PydanticField( + default=None, + title="Name", + description="Display name of the agent", + max_length=50, + json_schema_extra={ + "x-group": "basic", + "x-placeholder": "Name your agent", + }, + ), + ] + slug: Annotated[ + Optional[str], + PydanticField( + default=None, + description="Slug of the agent, used for URL generation", + max_length=30, + min_length=2, + json_schema_extra={ + "x-group": "internal", + "readOnly": True, + }, + ), + ] + description: Annotated[ + Optional[str], + PydanticField( + default=None, + description="Description of the agent, for public view, not contained in prompt", + json_schema_extra={ + "x-group": "basic", + "x-placeholder": "Introduce your agent", + }, + ), + ] + external_website: Annotated[ + Optional[str], + PydanticField( + default=None, + description="Link of external website of the agent, if you have one", + json_schema_extra={ + "x-group": "basic", + "x-placeholder": "Enter agent external website url", + "format": "uri", + }, + ), + ] + picture: Annotated[ + Optional[str], + PydanticField( + default=None, + description="Picture of the agent", + json_schema_extra={ + "x-group": "experimental", + "x-placeholder": "Upload a picture of your agent", + }, + ), + ] + ticker: Annotated[ + Optional[str], + PydanticField( + default=None, + description="Ticker symbol of the agent", + max_length=10, + min_length=1, + json_schema_extra={ + "x-group": "basic", + "x-placeholder": "If one day, your agent has it's own token, what will it be?", + }, + ), + ] + token_address: Annotated[ + Optional[str], + PydanticField( + default=None, + description="Token address of the agent", + max_length=42, + json_schema_extra={ + "x-group": "internal", + "readOnly": True, + }, + ), + ] + token_pool: Annotated[ + Optional[str], + PydanticField( + default=None, + description="Pool of the agent token", + max_length=42, + json_schema_extra={ + "x-group": "internal", + "readOnly": True, + }, + ), + ] + mode: Annotated[ + Optional[Literal["public", "private"]], + PydanticField( + default=None, + description="Mode of the agent, public or private", + json_schema_extra={ + "x-group": "basic", + }, + ), + ] + fee_percentage: Annotated[ + Optional[Decimal], + PydanticField( + default=None, + description="Fee percentage of the agent", + ge=Decimal("0.0"), + json_schema_extra={ + "x-group": "basic", + }, + ), + ] + purpose: Annotated[ + Optional[str], + PydanticField( + default=None, + description="Purpose or role of the agent", + max_length=20000, + json_schema_extra={ + "x-group": "basic", + "x-placeholder": "Enter agent purpose, it will be a part of the system prompt", + "pattern": "^(([^#].*)|#[^# ].*|#{3,}[ ].*|$)(\n(([^#].*)|#[^# ].*|#{3,}[ ].*|$))*$", + "errorMessage": { + "pattern": "Level 1 and 2 headings (# and ##) are not allowed. Please use level 3+ headings (###, ####, etc.) instead." + }, + }, + ), + ] + personality: Annotated[ + Optional[str], + PydanticField( + default=None, + description="Personality traits of the agent", + max_length=20000, + json_schema_extra={ + "x-group": "basic", + "x-placeholder": "Enter agent personality, it will be a part of the system prompt", + "pattern": "^(([^#].*)|#[^# ].*|#{3,}[ ].*|$)(\n(([^#].*)|#[^# ].*|#{3,}[ ].*|$))*$", + "errorMessage": { + "pattern": "Level 1 and 2 headings (# and ##) are not allowed. Please use level 3+ headings (###, ####, etc.) instead." + }, + }, + ), + ] + principles: Annotated[ + Optional[str], + PydanticField( + default=None, + description="Principles or values of the agent", + max_length=20000, + json_schema_extra={ + "x-group": "basic", + "x-placeholder": "Enter agent principles, it will be a part of the system prompt", + "pattern": "^(([^#].*)|#[^# ].*|#{3,}[ ].*|$)(\n(([^#].*)|#[^# ].*|#{3,}[ ].*|$))*$", + "errorMessage": { + "pattern": "Level 1 and 2 headings (# and ##) are not allowed. Please use level 3+ headings (###, ####, etc.) instead." + }, + }, + ), + ] + owner: Annotated[ + Optional[str], + PydanticField( + default=None, + description="Owner identifier of the agent, used for access control", + max_length=50, + json_schema_extra={ + "x-group": "internal", + }, + ), + ] + upstream_id: Annotated[ + Optional[str], + PydanticField( + default=None, + description="External reference ID for idempotent operations", + max_length=100, + json_schema_extra={ + "x-group": "internal", + }, + ), + ] + upstream_extra: Annotated[ + Optional[Dict[str, Any]], + PydanticField( + default=None, + description="Additional data store for upstream use", + json_schema_extra={ + "x-group": "internal", + }, + ), + ] + # AI part + model: Annotated[ + str, + PydanticField( + default="gpt-5-mini", + description="AI model identifier to be used by this agent for processing requests.", + json_schema_extra={ + "x-group": "ai", + }, + ), + ] + prompt: Annotated[ + Optional[str], + PydanticField( + default=None, + description="Base system prompt that defines the agent's behavior and capabilities", + max_length=20000, + json_schema_extra={ + "x-group": "ai", + "pattern": "^(([^#].*)|#[^# ].*|#{3,}[ ].*|$)(\n(([^#].*)|#[^# ].*|#{3,}[ ].*|$))*$", + "errorMessage": { + "pattern": "Level 1 and 2 headings (# and ##) are not allowed. Please use level 3+ headings (###, ####, etc.) instead." + }, + }, + ), + ] + prompt_append: Annotated[ + Optional[str], + PydanticField( + default=None, + description="Additional system prompt that has higher priority than the base prompt", + max_length=20000, + json_schema_extra={ + "x-group": "ai", + "pattern": "^(([^#].*)|#[^# ].*|#{3,}[ ].*|$)(\n(([^#].*)|#[^# ].*|#{3,}[ ].*|$))*$", + "errorMessage": { + "pattern": "Level 1 and 2 headings (# and ##) are not allowed. Please use level 3+ headings (###, ####, etc.) instead." + }, + }, + ), + ] + temperature: Annotated[ + Optional[float], + PydanticField( + default=0.7, + description="The randomness of the generated results is such that the higher the number, the more creative the results will be. However, this also makes them wilder and increases the likelihood of errors. For creative tasks, you can adjust it to above 1, but for rigorous tasks, such as quantitative trading, it's advisable to set it lower, around 0.2. (0.0~2.0)", + ge=0.0, + le=2.0, + json_schema_extra={ + "x-group": "ai", + }, + ), + ] + frequency_penalty: Annotated[ + Optional[float], + PydanticField( + default=0.0, + description="The frequency penalty is a measure of how much the AI is allowed to repeat itself. A lower value means the AI is more likely to repeat previous responses, while a higher value means the AI is more likely to generate new content. For creative tasks, you can adjust it to 1 or a bit higher. (-2.0~2.0)", + ge=-2.0, + le=2.0, + json_schema_extra={ + "x-group": "ai", + }, + ), + ] + presence_penalty: Annotated[ + Optional[float], + PydanticField( + default=0.0, + description="The presence penalty is a measure of how much the AI is allowed to deviate from the topic. A higher value means the AI is more likely to deviate from the topic, while a lower value means the AI is more likely to follow the topic. For creative tasks, you can adjust it to 1 or a bit higher. (-2.0~2.0)", + ge=-2.0, + le=2.0, + json_schema_extra={ + "x-group": "ai", + }, + ), + ] + short_term_memory_strategy: Annotated[ + Optional[Literal["trim", "summarize"]], + PydanticField( + default="trim", + description="Strategy for managing short-term memory when context limit is reached. 'trim' removes oldest messages, 'summarize' creates summaries.", + json_schema_extra={ + "x-group": "ai", + }, + ), + ] + # autonomous mode + autonomous: Annotated[ + Optional[List[AgentAutonomous]], + PydanticField( + default=None, + description=( + "Autonomous agent configurations.\n" + "autonomous:\n" + " - id: a\n" + " name: TestA\n" + " minutes: 1\n" + " prompt: |-\n" + " Say hello [sequence], use number for sequence.\n" + " - id: b\n" + " name: TestB\n" + ' cron: "0/3 * * * *"\n' + " prompt: |-\n" + " Say hi [sequence], use number for sequence.\n" + ), + json_schema_extra={ + "x-group": "autonomous", + "x-inline": True, + }, + ), + ] + example_intro: Annotated[ + Optional[str], + PydanticField( + default=None, + description="Introduction of the example", + max_length=2000, + json_schema_extra={ + "x-group": "examples", + }, + ), + ] + examples: Annotated[ + Optional[List[AgentExample]], + PydanticField( + default=None, + description="List of example prompts for the agent", + max_length=6, + json_schema_extra={ + "x-group": "examples", + "x-inline": True, + }, + ), + ] + # skills + skills: Annotated[ + Optional[Dict[str, Any]], + PydanticField( + default=None, + description="Dict of skills and their corresponding configurations", + json_schema_extra={ + "x-group": "skills", + "x-inline": True, + }, + ), + ] + wallet_provider: Annotated[ + Optional[Literal["cdp", "readonly"]], + PydanticField( + default="cdp", + description="Provider of the agent's wallet", + json_schema_extra={ + "x-group": "onchain", + }, + ), + ] + readonly_wallet_address: Annotated[ + Optional[str], + PydanticField( + default=None, + description="Address of the agent's wallet, only used when wallet_provider is readonly. Agent will not be able to sign transactions.", + ), + ] + network_id: Annotated[ + Optional[ + Literal[ + "ethereum-mainnet", + "ethereum-sepolia", + "polygon-mainnet", + "polygon-mumbai", + "base-mainnet", + "base-sepolia", + "arbitrum-mainnet", + "arbitrum-sepolia", + "optimism-mainnet", + "optimism-sepolia", + "solana", + ] + ], + PydanticField( + default="base-mainnet", + description="Network identifier", + json_schema_extra={ + "x-group": "onchain", + }, + ), + ] + cdp_network_id: Annotated[ + Optional[ + Literal[ + "ethereum-mainnet", + "ethereum-sepolia", + "polygon-mainnet", + "polygon-mumbai", + "base-mainnet", + "base-sepolia", + "arbitrum-mainnet", + "arbitrum-sepolia", + "optimism-mainnet", + "optimism-sepolia", + ] + ], + PydanticField( + default="base-mainnet", + description="Network identifier for CDP integration", + json_schema_extra={ + "x-group": "deprecated", + }, + ), + ] + # if telegram_entrypoint_enabled, the telegram_entrypoint_enabled will be enabled, telegram_config will be checked + telegram_entrypoint_enabled: Annotated[ + Optional[bool], + PydanticField( + default=False, + description="Whether the agent can play telegram bot", + json_schema_extra={ + "x-group": "entrypoint", + }, + ), + ] + telegram_entrypoint_prompt: Annotated[ + Optional[str], + PydanticField( + default=None, + description="Extra prompt for telegram entrypoint", + max_length=10000, + json_schema_extra={ + "x-group": "entrypoint", + }, + ), + ] + telegram_config: Annotated[ + Optional[dict], + PydanticField( + default=None, + description="Telegram integration configuration settings", + json_schema_extra={ + "x-group": "entrypoint", + }, + ), + ] + xmtp_entrypoint_prompt: Annotated[ + Optional[str], + PydanticField( + default=None, + description="Extra prompt for xmtp entrypoint, xmtp support is in beta", + max_length=10000, + json_schema_extra={ + "x-group": "entrypoint", + }, + ), + ] + + @field_validator("purpose", "personality", "principles", "prompt", "prompt_append") + @classmethod + def validate_no_level1_level2_headings(cls, v: Optional[str]) -> Optional[str]: + """Validate that the text doesn't contain level 1 or level 2 headings.""" + if v is None: + return v + + import re + + # Check if any line starts with # or ## followed by a space + if re.search(r"^(# |## )", v, re.MULTILINE): + raise ValueError( + "Level 1 and 2 headings (# and ##) are not allowed. Please use level 3+ headings (###, ####, etc.) instead." + ) + return v + + def validate_autonomous_schedule(self) -> None: + """Validate the schedule settings for autonomous configurations. + + This validation ensures: + 1. Only one scheduling method (minutes or cron) is set per autonomous config + 2. The minimum interval is 5 minutes for both types of schedules + """ + if not self.autonomous: + return + + for autonomous_config in self.autonomous: + # Check that exactly one scheduling method is provided + if not autonomous_config.minutes and not autonomous_config.cron: + raise HTTPException( + status_code=400, detail="either minutes or cron must have a value" + ) + + if autonomous_config.minutes and autonomous_config.cron: + raise HTTPException( + status_code=400, detail="only one of minutes or cron can be set" + ) + + # Validate minimum interval of 5 minutes + if autonomous_config.minutes and autonomous_config.minutes < 5: + raise HTTPException( + status_code=400, + detail="The shortest execution interval is 5 minutes", + ) + + # Validate cron expression to ensure interval is at least 5 minutes + if autonomous_config.cron: + # First validate the cron expression format using cron-validator + + try: + CronValidator.parse(autonomous_config.cron) + except ValueError: + raise HTTPException( + status_code=400, + detail=f"Invalid cron expression format: {autonomous_config.cron}", + ) + + parts = autonomous_config.cron.split() + if len(parts) < 5: + raise HTTPException( + status_code=400, detail="Invalid cron expression format" + ) + + minute, hour, day_of_month, month, day_of_week = parts[:5] + + # Check if minutes or hours have too frequent intervals + if "*" in minute and "*" in hour: + # If both minute and hour are wildcards, it would run every minute + raise HTTPException( + status_code=400, + detail="The shortest execution interval is 5 minutes", + ) + + if "/" in minute: + # Check step value in minute field (e.g., */15) + step = int(minute.split("/")[1]) + if step < 5 and hour == "*": + raise HTTPException( + status_code=400, + detail="The shortest execution interval is 5 minutes", + ) + + # Check for comma-separated values or ranges that might result in multiple executions per hour + if ("," in minute or "-" in minute) and hour == "*": + raise HTTPException( + status_code=400, + detail="The shortest execution interval is 5 minutes", + ) + + async def update(self, id: str) -> "Agent": + # Validate autonomous schedule settings if present + if "autonomous" in self.model_dump(exclude_unset=True): + self.validate_autonomous_schedule() + + async with get_session() as db: + db_agent = await db.get(AgentTable, id) + if not db_agent: + raise HTTPException(status_code=404, detail="Agent not found") + # check owner + if self.owner and db_agent.owner != self.owner: + raise HTTPException( + status_code=403, + detail="You do not have permission to update this agent", + ) + # update + for key, value in self.model_dump(exclude_unset=True).items(): + setattr(db_agent, key, value) + await db.commit() + await db.refresh(db_agent) + return Agent.model_validate(db_agent) + + async def override(self, id: str) -> "Agent": + # Validate autonomous schedule settings if present + if "autonomous" in self.model_dump(exclude_unset=True): + self.validate_autonomous_schedule() + + async with get_session() as db: + db_agent = await db.get(AgentTable, id) + if not db_agent: + raise HTTPException(status_code=404, detail="Agent not found") + # check owner + if db_agent.owner and db_agent.owner != self.owner: + raise HTTPException( + status_code=403, + detail="You do not have permission to update this agent", + ) + # update + for key, value in self.model_dump().items(): + setattr(db_agent, key, value) + await db.commit() + await db.refresh(db_agent) + return Agent.model_validate(db_agent) + + +class AgentCreate(AgentUpdate): + """Agent create model.""" + + id: Annotated[ + str, + PydanticField( + default_factory=lambda: str(XID()), + description="Unique identifier for the agent. Must be URL-safe, containing only lowercase letters, numbers, and hyphens", + pattern=r"^[a-z][a-z0-9-]*$", + min_length=2, + max_length=67, + ), + ] + + async def check_upstream_id(self) -> None: + if not self.upstream_id: + return None + async with get_session() as db: + existing = await db.scalar( + select(AgentTable).where(AgentTable.upstream_id == self.upstream_id) + ) + if existing: + raise HTTPException( + status_code=400, + detail="Upstream id already in use", + ) + + async def get_by_upstream_id(self) -> Optional["Agent"]: + if not self.upstream_id: + return None + async with get_session() as db: + existing = await db.scalar( + select(AgentTable).where(AgentTable.upstream_id == self.upstream_id) + ) + if existing: + return Agent.model_validate(existing) + return None + + async def create(self) -> "Agent": + # Validate autonomous schedule settings if present + if self.autonomous: + self.validate_autonomous_schedule() + + async with get_session() as db: + db_agent = AgentTable(**self.model_dump()) + db.add(db_agent) + await db.commit() + await db.refresh(db_agent) + return Agent.model_validate(db_agent) + + async def create_or_update(self) -> ("Agent", bool): + # Validation is now handled by field validators + await self.check_upstream_id() + + # Validate autonomous schedule settings if present + if self.autonomous: + self.validate_autonomous_schedule() + + is_new = False + async with get_session() as db: + db_agent = await db.get(AgentTable, self.id) + if not db_agent: + db_agent = AgentTable(**self.model_dump()) + db.add(db_agent) + is_new = True + else: + # check owner + if self.owner and db_agent.owner != self.owner: + raise HTTPException( + status_code=403, + detail="You do not have permission to update this agent", + ) + for key, value in self.model_dump(exclude_unset=True).items(): + setattr(db_agent, key, value) + await db.commit() + await db.refresh(db_agent) + return Agent.model_validate(db_agent), is_new + + +class Agent(AgentCreate): + """Agent model.""" + + model_config = ConfigDict(from_attributes=True) + + # auto timestamp + created_at: Annotated[ + datetime, + PydanticField( + description="Timestamp when the agent was created, will ignore when importing" + ), + ] + updated_at: Annotated[ + datetime, + PydanticField( + description="Timestamp when the agent was last updated, will ignore when importing" + ), + ] + + def has_image_parser_skill(self, is_private: bool = False) -> bool: + if self.skills: + for skill, skill_config in self.skills.items(): + if skill == "openai" and skill_config.get("enabled"): + states = skill_config.get("states", {}) + if is_private: + # Include both private and public when is_private=True + if states.get("image_to_text") in ["private", "public"]: + return True + if states.get("gpt_image_to_image") in ["private", "public"]: + return True + else: + # Only public when is_private=False + if states.get("image_to_text") in ["public"]: + return True + if states.get("gpt_image_to_image") in ["public"]: + return True + return False + + async def is_model_support_image(self) -> bool: + model = await LLMModelInfo.get(self.model) + return model.supports_image_input + + def to_yaml(self) -> str: + """ + Dump the agent model to YAML format with field descriptions as comments. + The comments are extracted from the field descriptions in the model. + Fields annotated with SkipJsonSchema will be excluded from the output. + Only fields from AgentUpdate model are included. + Deprecated fields with None or empty values are skipped. + + Returns: + str: YAML representation of the agent with field descriptions as comments + """ + data = {} + yaml_lines = [] + + def wrap_text(text: str, width: int = 80, prefix: str = "# ") -> list[str]: + """Wrap text to specified width, preserving existing line breaks.""" + lines = [] + for paragraph in text.split("\n"): + if not paragraph: + lines.append(prefix.rstrip()) + continue + # Use textwrap to wrap each paragraph + wrapped = textwrap.wrap(paragraph, width=width - len(prefix)) + lines.extend(prefix + line for line in wrapped) + return lines + + # Get the field names from AgentUpdate model for filtering + agent_update_fields = set(AgentUpdate.model_fields.keys()) + + for field_name, field in self.model_fields.items(): + logger.debug(f"Processing field {field_name} with type {field.metadata}") + # Skip fields that are not in AgentUpdate model + if field_name not in agent_update_fields: + continue + + # Skip fields with SkipJsonSchema annotation + if any(isinstance(item, SkipJsonSchema) for item in field.metadata): + continue + + value = getattr(self, field_name) + + # Skip deprecated fields with None or empty values + is_deprecated = hasattr(field, "deprecated") and field.deprecated + if is_deprecated and not value: + continue + + data[field_name] = value + # Add comment from field description if available + description = field.description + if description: + if len(yaml_lines) > 0: # Add blank line between fields + yaml_lines.append("") + # Split and wrap description into multiple lines + yaml_lines.extend(wrap_text(description)) + + # Check if the field is deprecated and add deprecation notice + if is_deprecated: + # Add deprecation message + if hasattr(field, "deprecation_message") and field.deprecation_message: + yaml_lines.extend( + wrap_text(f"Deprecated: {field.deprecation_message}") + ) + else: + yaml_lines.append("# Deprecated") + + # Check if the field is experimental and add experimental notice + if hasattr(field, "json_schema_extra") and field.json_schema_extra: + if field.json_schema_extra.get("x-group") == "experimental": + yaml_lines.append("# Experimental") + + # Format the value based on its type + if value is None: + yaml_lines.append(f"{field_name}: null") + elif isinstance(value, str): + if "\n" in value or len(value) > 60: + # Use block literal style (|) for multiline strings + # Remove any existing escaped newlines and use actual line breaks + value = value.replace("\\n", "\n") + yaml_value = f"{field_name}: |-\n" + # Indent each line with 2 spaces + yaml_value += "\n".join(f" {line}" for line in value.split("\n")) + yaml_lines.append(yaml_value) + else: + # Use flow style for short strings + yaml_value = yaml.dump( + {field_name: value}, + default_flow_style=False, + allow_unicode=True, # This ensures emojis are preserved + ) + yaml_lines.append(yaml_value.rstrip()) + elif isinstance(value, list) and value and hasattr(value[0], "model_dump"): + # Handle list of Pydantic models (e.g., List[AgentAutonomous]) + yaml_lines.append(f"{field_name}:") + # Convert each Pydantic model to dict + model_dicts = [item.model_dump(exclude_none=True) for item in value] + # Dump the list of dicts + yaml_value = yaml.dump( + model_dicts, default_flow_style=False, allow_unicode=True + ) + # Indent all lines and append to yaml_lines + indented_yaml = "\n".join( + f" {line}" for line in yaml_value.split("\n") + ) + yaml_lines.append(indented_yaml.rstrip()) + elif hasattr(value, "model_dump"): + # Handle individual Pydantic model + model_dict = value.model_dump(exclude_none=True) + yaml_value = yaml.dump( + {field_name: model_dict}, + default_flow_style=False, + allow_unicode=True, + ) + yaml_lines.append(yaml_value.rstrip()) + else: + # Handle Decimal values specifically + if isinstance(value, Decimal): + # Convert Decimal to string to avoid !!python/object/apply:decimal.Decimal serialization + yaml_lines.append(f"{field_name}: {value}") + else: + # Handle other non-string values + yaml_value = yaml.dump( + {field_name: value}, + default_flow_style=False, + allow_unicode=True, + ) + yaml_lines.append(yaml_value.rstrip()) + + return "\n".join(yaml_lines) + "\n" + + @staticmethod + async def count() -> int: + async with get_session() as db: + return await db.scalar(select(func.count(AgentTable.id))) + + @classmethod + async def get(cls, agent_id: str) -> Optional["Agent"]: + async with get_session() as db: + item = await db.scalar(select(AgentTable).where(AgentTable.id == agent_id)) + if item is None: + return None + return cls.model_validate(item) + + def skill_config(self, category: str) -> Dict[str, Any]: + return self.skills.get(category, {}) if self.skills else {} + + @staticmethod + def _is_agent_owner_only_skill(skill_schema: Dict[str, Any]) -> bool: + """Check if a skill requires agent owner API keys only based on its resolved schema.""" + if ( + skill_schema + and "properties" in skill_schema + and "api_key_provider" in skill_schema["properties"] + ): + api_key_provider = skill_schema["properties"]["api_key_provider"] + if "enum" in api_key_provider and api_key_provider["enum"] == [ + "agent_owner" + ]: + return True + return False + + @classmethod + async def get_json_schema( + cls, + db: AsyncSession = None, + filter_owner_api_skills: bool = False, + admin_llm_skill_control: bool = True, + ) -> Dict: + """Get the JSON schema for Agent model with all $ref references resolved. + + This is the shared function that handles admin configuration filtering + for both the API endpoint and agent generation. + + Args: + db: Database session (optional, will create if not provided) + filter_owner_api_skills: Whether to filter out skills that require agent owner API keys + admin_llm_skill_control: Whether to enable admin LLM and skill control features + + Returns: + Dict containing the complete JSON schema for the Agent model + """ + # Get database session if not provided + if db is None: + async with get_session() as session: + return await cls.get_json_schema( + session, filter_owner_api_skills, admin_llm_skill_control + ) + + # Get the schema file path relative to this file + current_dir = Path(__file__).parent + agent_schema_path = current_dir / "agent_schema.json" + + base_uri = f"file://{agent_schema_path}" + with open(agent_schema_path) as f: + schema = jsonref.load(f, base_uri=base_uri, proxies=False, lazy_load=False) + + # Get the model property from the schema + model_property = schema.get("properties", {}).get("model", {}) + + if admin_llm_skill_control: + # Process model property - use LLMModelInfo as primary source + if model_property: + # Query all LLM models from the database + stmt = select(LLMModelInfoTable).where(LLMModelInfoTable.enabled) + result = await db.execute(stmt) + models = result.scalars().all() + + # Create new lists based on LLMModelInfo + new_enum = [] + new_enum_title = [] + new_enum_category = [] + new_enum_support_skill = [] + + # Process each model from database + for model in models: + model_info = LLMModelInfo.model_validate(model) + + # Add model ID to enum + new_enum.append(model_info.id) + + # Add model name as title + new_enum_title.append(model_info.name) + + # Add provider display name as category + provider = ( + LLMProvider(model_info.provider) + if isinstance(model_info.provider, str) + else model_info.provider + ) + new_enum_category.append(provider.display_name()) + + # Add skill support information + new_enum_support_skill.append(model_info.supports_skill_calls) + + # Update the schema with the new lists constructed from LLMModelInfo + model_property["enum"] = new_enum + model_property["x-enum-title"] = new_enum_title + model_property["x-enum-category"] = new_enum_category + model_property["x-support-skill"] = new_enum_support_skill + + # If the default model is not in the new enum, update it if possible + if ( + "default" in model_property + and model_property["default"] not in new_enum + and new_enum + ): + model_property["default"] = new_enum[0] + + # Process skills property + skills_property = schema.get("properties", {}).get("skills", {}) + skills_properties = skills_property.get("properties", {}) + + if skills_properties: + # Load all skills from the database + # Query all skills grouped by category with enabled status + stmt = select( + SkillTable.category, + func.bool_or(SkillTable.enabled).label("any_enabled"), + ).group_by(SkillTable.category) + result = await db.execute(stmt) + category_status = {row.category: row.any_enabled for row in result} + + # Query all skills with their price levels for adding x-price-level fields + skills_stmt = select( + SkillTable.category, + SkillTable.config_name, + SkillTable.price_level, + SkillTable.enabled, + ).where(SkillTable.enabled) + skills_result = await db.execute(skills_stmt) + skills_data = {} + category_price_levels = {} + + for row in skills_result: + if row.category not in skills_data: + skills_data[row.category] = {} + category_price_levels[row.category] = [] + + if row.config_name: + skills_data[row.category][row.config_name] = row.price_level + + if row.price_level is not None: + category_price_levels[row.category].append(row.price_level) + + # Calculate average price levels for categories + category_avg_price_levels = {} + for category, price_levels in category_price_levels.items(): + if price_levels: + avg_price_level = int(sum(price_levels) / len(price_levels)) + category_avg_price_levels[category] = avg_price_level + + # Create a copy of keys to avoid modifying during iteration + skill_keys = list(skills_properties.keys()) + + # Process each skill in the schema + for skill_category in skill_keys: + if skill_category not in category_status: + # If category not found in database, remove it from schema + skills_properties.pop(skill_category, None) + elif not category_status[skill_category]: + # If category exists but all skills are disabled, remove it + skills_properties.pop(skill_category, None) + elif filter_owner_api_skills and cls._is_agent_owner_only_skill( + skills_properties[skill_category] + ): + # If filtering owner API skills and this skill requires it, remove it + skills_properties.pop(skill_category, None) + logger.info( + f"Filtered out skill '{skill_category}' from auto-generation: requires agent owner API key" + ) + else: + # Add x-avg-price-level to category level + if skill_category in category_avg_price_levels: + skills_properties[skill_category][ + "x-avg-price-level" + ] = category_avg_price_levels[skill_category] + + # Add x-price-level to individual skill states + if skill_category in skills_data: + skill_states = ( + skills_properties[skill_category] + .get("properties", {}) + .get("states", {}) + .get("properties", {}) + ) + for state_name, state_config in skill_states.items(): + if ( + state_name in skills_data[skill_category] + and skills_data[skill_category][state_name] + is not None + ): + state_config["x-price-level"] = skills_data[ + skill_category + ][state_name] + + # Log the changes for debugging + logger.debug( + f"Schema processed with LLM and skill controls enabled: {admin_llm_skill_control}, " + f"filtered owner API skills: {filter_owner_api_skills}" + ) + + return schema + + +class AgentResponse(BaseModel): + """Response model for Agent API.""" + + model_config = ConfigDict( + from_attributes=True, + json_encoders={ + datetime: lambda dt: dt.isoformat(), + }, + ) + + id: Annotated[ + str, + PydanticField( + description="Unique identifier for the agent. Must be URL-safe, containing only lowercase letters, numbers, and hyphens", + ), + ] + # auto timestamp + created_at: Annotated[ + datetime, + PydanticField( + description="Timestamp when the agent was created, will ignore when importing" + ), + ] + updated_at: Annotated[ + datetime, + PydanticField( + description="Timestamp when the agent was last updated, will ignore when importing" + ), + ] + # Agent part + name: Annotated[ + Optional[str], + PydanticField( + default=None, + description="Display name of the agent", + ), + ] + slug: Annotated[ + Optional[str], + PydanticField( + default=None, + description="Slug of the agent, used for URL generation", + ), + ] + description: Annotated[ + Optional[str], + PydanticField( + default=None, + description="Description of the agent, for public view, not contained in prompt", + ), + ] + external_website: Annotated[ + Optional[str], + PydanticField( + default=None, + description="Link of external website of the agent, if you have one", + ), + ] + picture: Annotated[ + Optional[str], + PydanticField( + default=None, + description="Picture of the agent", + ), + ] + ticker: Annotated[ + Optional[str], + PydanticField( + default=None, + description="Ticker symbol of the agent", + ), + ] + token_address: Annotated[ + Optional[str], + PydanticField( + default=None, + description="Token address of the agent", + ), + ] + token_pool: Annotated[ + Optional[str], + PydanticField( + default=None, + description="Pool of the agent token", + ), + ] + mode: Annotated[ + Optional[Literal["public", "private"]], + PydanticField( + default=None, + description="Mode of the agent, public or private", + ), + ] + fee_percentage: Annotated[ + Optional[Decimal], + PydanticField( + default=None, + description="Fee percentage of the agent", + ), + ] + owner: Annotated[ + Optional[str], + PydanticField( + default=None, + description="Owner identifier of the agent, used for access control", + max_length=50, + json_schema_extra={ + "x-group": "internal", + }, + ), + ] + upstream_id: Annotated[ + Optional[str], + PydanticField( + default=None, + description="External reference ID for idempotent operations", + max_length=100, + json_schema_extra={ + "x-group": "internal", + }, + ), + ] + upstream_extra: Annotated[ + Optional[Dict[str, Any]], + PydanticField( + default=None, + description="Additional data store for upstream use", + ), + ] + # AI part + model: Annotated[ + str, + PydanticField( + description="AI model identifier to be used by this agent for processing requests. Available models: gpt-4o, gpt-4o-mini, deepseek-chat, deepseek-reasoner, grok-2, eternalai, reigent", + ), + ] + # autonomous mode + autonomous: Annotated[ + Optional[List[Dict[str, Any]]], + PydanticField( + default=None, + description=("Autonomous agent configurations."), + ), + ] + # agent examples + example_intro: Annotated[ + Optional[str], + PydanticField( + default=None, + description="Introduction for example interactions", + ), + ] + examples: Annotated[ + Optional[List[AgentExample]], + PydanticField( + default=None, + description="List of example prompts for the agent", + ), + ] + # skills + skills: Annotated[ + Optional[Dict[str, Any]], + PydanticField( + default=None, + description="Dict of skills and their corresponding configurations", + ), + ] + wallet_provider: Annotated[ + Optional[Literal["cdp", "readonly"]], + PydanticField( + default="cdp", + description="Provider of the agent's wallet", + ), + ] + network_id: Annotated[ + Optional[str], + PydanticField( + default="base-mainnet", + description="Network identifier", + ), + ] + cdp_network_id: Annotated[ + Optional[str], + PydanticField( + default="base-mainnet", + description="Network identifier for CDP integration", + ), + ] + # telegram entrypoint + telegram_entrypoint_enabled: Annotated[ + Optional[bool], + PydanticField( + default=False, + description="Whether the agent can play telegram bot", + ), + ] + + # data part + cdp_wallet_address: Annotated[ + Optional[str], PydanticField(description="CDP wallet address for the agent") + ] + evm_wallet_address: Annotated[ + Optional[str], PydanticField(description="EVM wallet address for the agent") + ] + solana_wallet_address: Annotated[ + Optional[str], PydanticField(description="Solana wallet address for the agent") + ] + has_twitter_linked: Annotated[ + bool, + PydanticField(description="Whether the agent has linked their Twitter account"), + ] + linked_twitter_username: Annotated[ + Optional[str], + PydanticField(description="The username of the linked Twitter account"), + ] + linked_twitter_name: Annotated[ + Optional[str], + PydanticField(description="The name of the linked Twitter account"), + ] + has_twitter_self_key: Annotated[ + bool, + PydanticField( + description="Whether the agent has self-keyed their Twitter account" + ), + ] + has_telegram_self_key: Annotated[ + bool, + PydanticField( + description="Whether the agent has self-keyed their Telegram account" + ), + ] + linked_telegram_username: Annotated[ + Optional[str], + PydanticField(description="The username of the linked Telegram account"), + ] + linked_telegram_name: Annotated[ + Optional[str], + PydanticField(description="The name of the linked Telegram account"), + ] + accept_image_input: Annotated[ + bool, + PydanticField( + description="Whether the agent accepts image inputs in public mode" + ), + ] + accept_image_input_private: Annotated[ + bool, + PydanticField( + description="Whether the agent accepts image inputs in private mode" + ), + ] + + def etag(self) -> str: + """Generate an ETag for this agent response. + + The ETag is based on a hash of the entire object to ensure it changes + whenever any part of the agent is modified. + + Returns: + str: ETag value for the agent + """ + import hashlib + + # Generate hash from the entire object data using json mode to handle datetime objects + # Sort keys to ensure consistent ordering of dictionary keys + data = json.dumps(self.model_dump(mode="json"), sort_keys=True) + return f"{hashlib.md5(data.encode()).hexdigest()}" + + @classmethod + async def from_agent( + cls, agent: Agent, agent_data: Optional[AgentData] = None + ) -> "AgentResponse": + """Create an AgentResponse from an Agent instance. + + Args: + agent: Agent instance + agent_data: Optional AgentData instance + + Returns: + AgentResponse: Response model with additional processed data + """ + # Get base data from agent + data = agent.model_dump() + + # Filter sensitive fields from autonomous list + if data.get("autonomous"): + filtered_autonomous = [] + for item in data["autonomous"]: + if isinstance(item, dict): + filtered_item = { + "id": item.get("id"), + "name": item.get("name"), + "enabled": item.get("enabled"), + } + filtered_autonomous.append(filtered_item) + data["autonomous"] = filtered_autonomous + + # Filter sensitive fields from skills dictionary + if data.get("skills"): + filtered_skills = {} + for skill_name, skill_config in data["skills"].items(): + if isinstance(skill_config, dict): + # Only include skills that are enabled + if skill_config.get("enabled") is True: + filtered_config = {"enabled": True} + # Only keep states with public or private values + if "states" in skill_config and isinstance( + skill_config["states"], dict + ): + filtered_states = {} + for state_key, state_value in skill_config[ + "states" + ].items(): + if state_value in ["public", "private"]: + filtered_states[state_key] = state_value + if filtered_states: + filtered_config["states"] = filtered_states + filtered_skills[skill_name] = filtered_config + data["skills"] = filtered_skills + + # Process CDP wallet address + cdp_wallet_address = agent_data.evm_wallet_address if agent_data else None + evm_wallet_address = agent_data.evm_wallet_address if agent_data else None + solana_wallet_address = agent_data.solana_wallet_address if agent_data else None + + # Process Twitter linked status + has_twitter_linked = False + linked_twitter_username = None + linked_twitter_name = None + if agent_data and agent_data.twitter_access_token: + linked_twitter_username = agent_data.twitter_username + linked_twitter_name = agent_data.twitter_name + if agent_data.twitter_access_token_expires_at: + has_twitter_linked = ( + agent_data.twitter_access_token_expires_at + > datetime.now(timezone.utc) + ) + else: + has_twitter_linked = True + + # Process Twitter self-key status + has_twitter_self_key = bool( + agent_data and agent_data.twitter_self_key_refreshed_at + ) + + # Process Telegram self-key status and remove token + linked_telegram_username = None + linked_telegram_name = None + telegram_config = data.get("telegram_config", {}) + has_telegram_self_key = bool( + telegram_config and "token" in telegram_config and telegram_config["token"] + ) + if telegram_config and "token" in telegram_config: + if agent_data: + linked_telegram_username = agent_data.telegram_username + linked_telegram_name = agent_data.telegram_name + + accept_image_input = ( + await agent.is_model_support_image() or agent.has_image_parser_skill() + ) + accept_image_input_private = ( + await agent.is_model_support_image() + or agent.has_image_parser_skill(is_private=True) + ) + + # Add processed fields to response + data.update( + { + "cdp_wallet_address": cdp_wallet_address, + "evm_wallet_address": evm_wallet_address, + "solana_wallet_address": solana_wallet_address, + "has_twitter_linked": has_twitter_linked, + "linked_twitter_username": linked_twitter_username, + "linked_twitter_name": linked_twitter_name, + "has_twitter_self_key": has_twitter_self_key, + "has_telegram_self_key": has_telegram_self_key, + "linked_telegram_username": linked_telegram_username, + "linked_telegram_name": linked_telegram_name, + "accept_image_input": accept_image_input, + "accept_image_input_private": accept_image_input_private, + } + ) + + return cls.model_validate(data) diff --git a/intentkit/models/agent_data.py b/intentkit/models/agent_data.py new file mode 100644 index 00000000..715e117a --- /dev/null +++ b/intentkit/models/agent_data.py @@ -0,0 +1,846 @@ +import logging +from datetime import datetime, timezone +from decimal import Decimal +from typing import Annotated, Any, Dict, Optional + +from fastapi import HTTPException +from intentkit.models.base import Base +from intentkit.models.db import get_session +from pydantic import BaseModel, ConfigDict +from pydantic import Field as PydanticField +from sqlalchemy import ( + BigInteger, + Boolean, + Column, + DateTime, + Numeric, + String, + func, + select, +) +from sqlalchemy.dialects.postgresql import JSON, JSONB + +logger = logging.getLogger(__name__) + + +class AgentDataTable(Base): + """Agent data model for database storage of additional data related to the agent.""" + + __tablename__ = "agent_data" + + id = Column(String, primary_key=True, comment="Same as Agent.id") + evm_wallet_address = Column(String, nullable=True, comment="EVM wallet address") + solana_wallet_address = Column( + String, nullable=True, comment="Solana wallet address" + ) + cdp_wallet_data = Column(String, nullable=True, comment="CDP wallet data") + twitter_id = Column(String, nullable=True, comment="Twitter user ID") + twitter_username = Column(String, nullable=True, comment="Twitter username") + twitter_name = Column(String, nullable=True, comment="Twitter display name") + twitter_access_token = Column(String, nullable=True, comment="Twitter access token") + twitter_access_token_expires_at = Column( + DateTime(timezone=True), + nullable=True, + comment="Twitter access token expiration time", + ) + twitter_refresh_token = Column( + String, nullable=True, comment="Twitter refresh token" + ) + twitter_self_key_refreshed_at = Column( + DateTime(timezone=True), + nullable=True, + comment="Twitter self-key userinfo last refresh time", + ) + twitter_is_verified = Column( + Boolean, + nullable=False, + default=False, + comment="Whether the Twitter account is verified", + ) + telegram_id = Column(String, nullable=True, comment="Telegram user ID") + telegram_username = Column(String, nullable=True, comment="Telegram username") + telegram_name = Column(String, nullable=True, comment="Telegram display name") + error_message = Column(String, nullable=True, comment="Last error message") + api_key = Column( + String, nullable=True, unique=True, comment="API key for the agent" + ) + api_key_public = Column( + String, nullable=True, unique=True, comment="Public API key for the agent" + ) + created_at = Column( + DateTime(timezone=True), + nullable=False, + server_default=func.now(), + comment="Timestamp when the agent data was created", + ) + updated_at = Column( + DateTime(timezone=True), + nullable=False, + server_default=func.now(), + onupdate=lambda: datetime.now(timezone.utc), + comment="Timestamp when the agent data was last updated", + ) + + +class AgentData(BaseModel): + """Agent data model for storing additional data related to the agent.""" + + model_config = ConfigDict(from_attributes=True) + + id: Annotated[ + str, + PydanticField( + description="Same as Agent.id", + ), + ] + evm_wallet_address: Annotated[ + Optional[str], + PydanticField( + default=None, + description="EVM wallet address", + ), + ] = None + solana_wallet_address: Annotated[ + Optional[str], + PydanticField( + default=None, + description="Solana wallet address", + ), + ] = None + cdp_wallet_data: Annotated[ + Optional[str], + PydanticField( + default=None, + description="CDP wallet data", + ), + ] = None + twitter_id: Annotated[ + Optional[str], + PydanticField( + default=None, + description="Twitter user ID", + ), + ] = None + twitter_username: Annotated[ + Optional[str], + PydanticField( + default=None, + description="Twitter username", + ), + ] = None + twitter_name: Annotated[ + Optional[str], + PydanticField( + default=None, + description="Twitter display name", + ), + ] = None + twitter_access_token: Annotated[ + Optional[str], + PydanticField( + default=None, + description="Twitter access token", + ), + ] = None + twitter_access_token_expires_at: Annotated[ + Optional[datetime], + PydanticField( + default=None, + description="Twitter access token expiration time", + ), + ] = None + twitter_refresh_token: Annotated[ + Optional[str], + PydanticField( + default=None, + description="Twitter refresh token", + ), + ] = None + twitter_self_key_refreshed_at: Annotated[ + Optional[datetime], + PydanticField( + default=None, + description="Twitter self-key userinfo last refresh time", + ), + ] = None + twitter_is_verified: Annotated[ + bool, + PydanticField( + default=False, + description="Whether the Twitter account is verified", + ), + ] = None + telegram_id: Annotated[ + Optional[str], + PydanticField( + default=None, + description="Telegram user ID", + ), + ] = None + telegram_username: Annotated[ + Optional[str], + PydanticField( + default=None, + description="Telegram username", + ), + ] = None + telegram_name: Annotated[ + Optional[str], + PydanticField( + default=None, + description="Telegram display name", + ), + ] = None + error_message: Annotated[ + Optional[str], + PydanticField( + default=None, + description="Last error message", + ), + ] = None + api_key: Annotated[ + Optional[str], + PydanticField( + default=None, + description="API key for the agent", + ), + ] = None + api_key_public: Annotated[ + Optional[str], + PydanticField( + default=None, + description="Public API key for the agent", + ), + ] = None + created_at: Annotated[ + datetime, + PydanticField( + default_factory=lambda: datetime.now(timezone.utc), + description="Timestamp when the agent data was created", + ), + ] + updated_at: Annotated[ + datetime, + PydanticField( + default_factory=lambda: datetime.now(timezone.utc), + description="Timestamp when the agent data was last updated", + ), + ] + + @classmethod + async def get(cls, agent_id: str) -> "AgentData": + """Get agent data by ID. + + Args: + agent_id: Agent ID + + Returns: + AgentData if found, None otherwise + + Raises: + HTTPException: If there are database errors + """ + async with get_session() as db: + item = await db.get(AgentDataTable, agent_id) + if item: + return cls.model_validate(item) + return cls.model_construct(id=agent_id) + + @classmethod + async def get_by_api_key(cls, api_key: str) -> Optional["AgentData"]: + """Get agent data by API key. + + Args: + api_key: API key (sk- for private, pk- for public) + + Returns: + AgentData if found, None otherwise + + Raises: + HTTPException: If there are database errors + """ + async with get_session() as db: + if api_key.startswith("sk-"): + # Search in api_key field for private keys + result = await db.execute( + select(AgentDataTable).where(AgentDataTable.api_key == api_key) + ) + elif api_key.startswith("pk-"): + # Search in api_key_public field for public keys + result = await db.execute( + select(AgentDataTable).where( + AgentDataTable.api_key_public == api_key + ) + ) + else: + # Invalid key format + return None + + item = result.scalar_one_or_none() + if item: + return cls.model_validate(item) + return None + + async def save(self) -> None: + """Save or update agent data. + + Raises: + HTTPException: If there are database errors + """ + async with get_session() as db: + existing = await db.get(AgentDataTable, self.id) + if existing: + # Update existing record + for field, value in self.model_dump(exclude_unset=True).items(): + setattr(existing, field, value) + db.add(existing) + else: + # Create new record + db_agent_data = AgentDataTable(**self.model_dump()) + db.add(db_agent_data) + + await db.commit() + + @staticmethod + async def patch(id: str, data: dict) -> "AgentData": + """Update agent data. + + Args: + id: ID of the agent + data: Dictionary containing fields to update + + Returns: + Updated agent data + + Raises: + HTTPException: If there are database errors + """ + async with get_session() as db: + agent_data = await db.get(AgentDataTable, id) + if not agent_data: + agent_data = AgentDataTable(id=id, **data) + db.add(agent_data) + else: + for key, value in data.items(): + setattr(agent_data, key, value) + await db.commit() + await db.refresh(agent_data) + return AgentData.model_validate(agent_data) + + +class AgentPluginDataTable(Base): + """Database model for storing plugin-specific data for agents. + + This model uses a composite primary key of (agent_id, plugin, key) to store + plugin-specific data for agents in a flexible way. + + Attributes: + agent_id: ID of the agent this data belongs to + plugin: Name of the plugin this data is for + key: Key for this specific piece of data + data: JSON data stored for this key + """ + + __tablename__ = "agent_plugin_data" + + agent_id = Column(String, primary_key=True) + plugin = Column(String, primary_key=True) + key = Column(String, primary_key=True) + data = Column(JSON().with_variant(JSONB(), "postgresql"), nullable=True) + created_at = Column( + DateTime(timezone=True), + nullable=False, + server_default=func.now(), + ) + updated_at = Column( + DateTime(timezone=True), + nullable=False, + server_default=func.now(), + onupdate=lambda: datetime.now(timezone.utc), + ) + + +class AgentPluginData(BaseModel): + """Model for storing plugin-specific data for agents. + + This model uses a composite primary key of (agent_id, plugin, key) to store + plugin-specific data for agents in a flexible way. + + Attributes: + agent_id: ID of the agent this data belongs to + plugin: Name of the plugin this data is for + key: Key for this specific piece of data + data: JSON data stored for this key + """ + + model_config = ConfigDict(from_attributes=True) + + agent_id: Annotated[ + str, + PydanticField(description="ID of the agent this data belongs to"), + ] + plugin: Annotated[ + str, + PydanticField(description="Name of the plugin this data is for"), + ] + key: Annotated[ + str, + PydanticField(description="Key for this specific piece of data"), + ] + data: Annotated[ + Dict[str, Any], + PydanticField(default=None, description="JSON data stored for this key"), + ] + created_at: Annotated[ + datetime, + PydanticField( + description="Timestamp when this data was created", + default_factory=lambda: datetime.now(timezone.utc), + ), + ] + updated_at: Annotated[ + datetime, + PydanticField( + description="Timestamp when this data was last updated", + default_factory=lambda: datetime.now(timezone.utc), + ), + ] + + @classmethod + async def get( + cls, agent_id: str, plugin: str, key: str + ) -> Optional["AgentPluginData"]: + """Get plugin data for an agent. + + Args: + agent_id: ID of the agent + plugin: Name of the plugin + key: Data key + + Returns: + AgentPluginData if found, None otherwise + + Raises: + HTTPException: If there are database errors + """ + async with get_session() as db: + item = await db.scalar( + select(AgentPluginDataTable).where( + AgentPluginDataTable.agent_id == agent_id, + AgentPluginDataTable.plugin == plugin, + AgentPluginDataTable.key == key, + ) + ) + if item: + return cls.model_validate(item) + return None + + async def save(self) -> None: + """Save or update plugin data. + + Raises: + HTTPException: If there are database errors + """ + async with get_session() as db: + plugin_data = await db.scalar( + select(AgentPluginDataTable).where( + AgentPluginDataTable.agent_id == self.agent_id, + AgentPluginDataTable.plugin == self.plugin, + AgentPluginDataTable.key == self.key, + ) + ) + + if plugin_data: + # Update existing record + plugin_data.data = self.data + db.add(plugin_data) + else: + # Create new record + plugin_data = AgentPluginDataTable( + agent_id=self.agent_id, + plugin=self.plugin, + key=self.key, + data=self.data, + ) + db.add(plugin_data) + + await db.commit() + await db.refresh(plugin_data) + + # Refresh the model with updated data + self.model_validate(plugin_data) + + +class AgentQuotaTable(Base): + """AgentQuota database table model.""" + + __tablename__ = "agent_quotas" + + id = Column(String, primary_key=True) + plan = Column(String, default="self-hosted") + message_count_total = Column(BigInteger, default=0) + message_limit_total = Column(BigInteger, default=99999999) + message_count_monthly = Column(BigInteger, default=0) + message_limit_monthly = Column(BigInteger, default=99999999) + message_count_daily = Column(BigInteger, default=0) + message_limit_daily = Column(BigInteger, default=99999999) + last_message_time = Column(DateTime(timezone=True), default=None, nullable=True) + autonomous_count_total = Column(BigInteger, default=0) + autonomous_limit_total = Column(BigInteger, default=99999999) + autonomous_count_monthly = Column(BigInteger, default=0) + autonomous_limit_monthly = Column(BigInteger, default=99999999) + last_autonomous_time = Column(DateTime(timezone=True), default=None, nullable=True) + twitter_count_total = Column(BigInteger, default=0) + twitter_limit_total = Column(BigInteger, default=99999999) + twitter_count_monthly = Column(BigInteger, default=0) + twitter_limit_monthly = Column(BigInteger, default=99999999) + twitter_count_daily = Column(BigInteger, default=0) + twitter_limit_daily = Column(BigInteger, default=99999999) + last_twitter_time = Column(DateTime(timezone=True), default=None, nullable=True) + free_income_daily = Column(Numeric(22, 4), default=0) + avg_action_cost = Column(Numeric(22, 4), default=0) + min_action_cost = Column(Numeric(22, 4), default=0) + max_action_cost = Column(Numeric(22, 4), default=0) + low_action_cost = Column(Numeric(22, 4), default=0) + medium_action_cost = Column(Numeric(22, 4), default=0) + high_action_cost = Column(Numeric(22, 4), default=0) + created_at = Column( + DateTime(timezone=True), + nullable=False, + server_default=func.now(), + ) + updated_at = Column( + DateTime(timezone=True), + nullable=False, + server_default=func.now(), + onupdate=lambda: datetime.now(timezone.utc), + ) + + +class AgentQuota(BaseModel): + """AgentQuota model.""" + + model_config = ConfigDict(from_attributes=True) + + id: Annotated[ + str, PydanticField(description="ID of the agent this quota belongs to") + ] + plan: Annotated[ + str, PydanticField(default="self-hosted", description="Agent plan name") + ] + message_count_total: Annotated[ + int, PydanticField(default=0, description="Total message count") + ] + message_limit_total: Annotated[ + int, PydanticField(default=99999999, description="Total message limit") + ] + message_count_monthly: Annotated[ + int, PydanticField(default=0, description="Monthly message count") + ] + message_limit_monthly: Annotated[ + int, PydanticField(default=99999999, description="Monthly message limit") + ] + message_count_daily: Annotated[ + int, PydanticField(default=0, description="Daily message count") + ] + message_limit_daily: Annotated[ + int, PydanticField(default=99999999, description="Daily message limit") + ] + last_message_time: Annotated[ + Optional[datetime], + PydanticField(default=None, description="Last message timestamp"), + ] + autonomous_count_total: Annotated[ + int, PydanticField(default=0, description="Total autonomous operations count") + ] + autonomous_limit_total: Annotated[ + int, + PydanticField( + default=99999999, description="Total autonomous operations limit" + ), + ] + autonomous_count_monthly: Annotated[ + int, PydanticField(default=0, description="Monthly autonomous operations count") + ] + autonomous_limit_monthly: Annotated[ + int, + PydanticField( + default=99999999, description="Monthly autonomous operations limit" + ), + ] + autonomous_count_daily: Annotated[ + int, PydanticField(default=0, description="Daily autonomous operations count") + ] + autonomous_limit_daily: Annotated[ + int, + PydanticField( + default=99999999, description="Daily autonomous operations limit" + ), + ] + last_autonomous_time: Annotated[ + Optional[datetime], + PydanticField(default=None, description="Last autonomous operation timestamp"), + ] + twitter_count_total: Annotated[ + int, PydanticField(default=0, description="Total Twitter operations count") + ] + twitter_limit_total: Annotated[ + int, + PydanticField(default=99999999, description="Total Twitter operations limit"), + ] + twitter_count_monthly: Annotated[ + int, PydanticField(default=0, description="Monthly Twitter operations count") + ] + twitter_limit_monthly: Annotated[ + int, + PydanticField(default=99999999, description="Monthly Twitter operations limit"), + ] + twitter_count_daily: Annotated[ + int, PydanticField(default=0, description="Daily Twitter operations count") + ] + twitter_limit_daily: Annotated[ + int, + PydanticField(default=99999999, description="Daily Twitter operations limit"), + ] + last_twitter_time: Annotated[ + Optional[datetime], + PydanticField(default=None, description="Last Twitter operation timestamp"), + ] + free_income_daily: Annotated[ + Decimal, + PydanticField(default=0, description="Daily free income amount"), + ] + avg_action_cost: Annotated[ + Decimal, + PydanticField(default=0, description="Average cost per action"), + ] + max_action_cost: Annotated[ + Decimal, + PydanticField(default=0, description="Maximum cost per action"), + ] + min_action_cost: Annotated[ + Decimal, + PydanticField(default=0, description="Minimum cost per action"), + ] + high_action_cost: Annotated[ + Decimal, + PydanticField(default=0, description="High expected action cost"), + ] + medium_action_cost: Annotated[ + Decimal, + PydanticField(default=0, description="Medium expected action cost"), + ] + low_action_cost: Annotated[ + Decimal, + PydanticField(default=0, description="Low expected action cost"), + ] + created_at: Annotated[ + datetime, + PydanticField( + description="Timestamp when this quota was created", + default_factory=lambda: datetime.now(timezone.utc), + ), + ] + updated_at: Annotated[ + datetime, + PydanticField( + description="Timestamp when this quota was last updated", + default_factory=lambda: datetime.now(timezone.utc), + ), + ] + + @classmethod + async def get(cls, agent_id: str) -> "AgentQuota": + """Get agent quota by id, if not exists, create a new one. + + Args: + agent_id: Agent ID + + Returns: + AgentQuota: The agent's quota object + + Raises: + HTTPException: If there are database errors + """ + async with get_session() as db: + quota_record = await db.get(AgentQuotaTable, agent_id) + if not quota_record: + # Create new record + quota_record = AgentQuotaTable( + id=agent_id, + ) + db.add(quota_record) + await db.commit() + await db.refresh(quota_record) + + return cls.model_validate(quota_record) + + def has_message_quota(self) -> bool: + """Check if the agent has message quota. + + Returns: + bool: True if the agent has quota, False otherwise + """ + # Check total limit + if self.message_count_total >= self.message_limit_total: + return False + # Check monthly limit + if self.message_count_monthly >= self.message_limit_monthly: + return False + # Check daily limit + if self.message_count_daily >= self.message_limit_daily: + return False + return True + + def has_autonomous_quota(self) -> bool: + """Check if the agent has autonomous quota. + + Returns: + bool: True if the agent has quota, False otherwise + """ + # Check total limit + if self.autonomous_count_total >= self.autonomous_limit_total: + return False + # Check monthly limit + if self.autonomous_count_monthly >= self.autonomous_limit_monthly: + return False + return True + + def has_twitter_quota(self) -> bool: + """Check if the agent has twitter quota. + + Returns: + bool: True if the agent has quota, False otherwise + """ + # Check total limit + if self.twitter_count_total >= self.twitter_limit_total: + return False + # Check daily limit + if self.twitter_count_daily >= self.twitter_limit_daily: + return False + return True + + @staticmethod + async def add_free_income_in_session(session, id: str, amount: Decimal) -> None: + """Add free income to an agent's quota directly in the database. + + Args: + session: SQLAlchemy session + id: Agent ID + amount: Amount to add to free_income_daily + + Raises: + HTTPException: If there are database errors + """ + try: + # Check if the record exists using session.get + quota_record = await session.get(AgentQuotaTable, id) + + if not quota_record: + # Create new record if it doesn't exist + quota_record = AgentQuotaTable(id=id, free_income_daily=amount) + session.add(quota_record) + else: + # Use update statement with func to directly add the amount + from sqlalchemy import update + + stmt = update(AgentQuotaTable).where(AgentQuotaTable.id == id) + stmt = stmt.values( + free_income_daily=func.coalesce( + AgentQuotaTable.free_income_daily, 0 + ) + + amount + ) + await session.execute(stmt) + except Exception as e: + logger.error(f"Error adding free income: {str(e)}") + raise HTTPException(status_code=500, detail=f"Database error: {str(e)}") + + async def add_message(self) -> None: + """Add a message to the agent's message count.""" + async with get_session() as db: + quota_record = await db.get(AgentQuotaTable, self.id) + + if quota_record: + # Update record + quota_record.message_count_total += 1 + quota_record.message_count_monthly += 1 + quota_record.message_count_daily += 1 + quota_record.last_message_time = datetime.now(timezone.utc) + db.add(quota_record) + await db.commit() + + # Update this instance + await db.refresh(quota_record) + self.message_count_total = quota_record.message_count_total + self.message_count_monthly = quota_record.message_count_monthly + self.message_count_daily = quota_record.message_count_daily + self.last_message_time = quota_record.last_message_time + self.updated_at = quota_record.updated_at + + async def add_autonomous(self) -> None: + """Add an autonomous operation to the agent's autonomous count.""" + async with get_session() as db: + quota_record = await db.get(AgentQuotaTable, self.id) + if quota_record: + # Update record + quota_record.autonomous_count_total += 1 + quota_record.autonomous_count_monthly += 1 + quota_record.last_autonomous_time = datetime.now(timezone.utc) + db.add(quota_record) + await db.commit() + + # Update this instance + await db.refresh(quota_record) + self.model_validate(quota_record) + + async def add_twitter_message(self) -> None: + """Add a twitter message to the agent's twitter count. + + Raises: + HTTPException: If there are database errors + """ + async with get_session() as db: + quota_record = await db.get(AgentQuotaTable, self.id) + + if quota_record: + # Update record + quota_record.twitter_count_total += 1 + quota_record.twitter_count_daily += 1 + quota_record.last_twitter_time = datetime.now(timezone.utc) + db.add(quota_record) + await db.commit() + + # Update this instance + await db.refresh(quota_record) + self.model_validate(quota_record) + + @staticmethod + async def reset_daily_quotas(): + """Reset daily quotas for all agents at UTC 00:00. + Resets message_count_daily and twitter_count_daily to 0. + """ + from sqlalchemy import update + + async with get_session() as session: + stmt = update(AgentQuotaTable).values( + message_count_daily=0, + twitter_count_daily=0, + free_income_daily=0, + ) + await session.execute(stmt) + await session.commit() + + @staticmethod + async def reset_monthly_quotas(): + """Reset monthly quotas for all agents at the start of each month. + Resets message_count_monthly and autonomous_count_monthly to 0. + """ + from sqlalchemy import update + + async with get_session() as session: + stmt = update(AgentQuotaTable).values( + message_count_monthly=0, autonomous_count_monthly=0 + ) + await session.execute(stmt) + await session.commit() diff --git a/intentkit/models/agent_schema.json b/intentkit/models/agent_schema.json new file mode 100644 index 00000000..db31a433 --- /dev/null +++ b/intentkit/models/agent_schema.json @@ -0,0 +1,726 @@ +{ + "$schema": "https://json-schema.org/draft/2020-12/schema#", + "title": "Agent", + "description": "Agent model", + "type": "object", + "x-groups": [ + { + "id": "basic", + "title": "Basic", + "order": 1 + }, + { + "id": "llm", + "title": "LLM", + "order": 2 + }, + { + "id": "onchain", + "title": "On-Chain", + "order": 3 + }, + { + "id": "examples", + "title": "Quick Actions", + "order": 4 + }, + { + "id": "entrypoint", + "title": "Communication Channels", + "order": 5 + }, + { + "id": "skills", + "title": "Skills", + "order": 6 + }, + { + "id": "autonomous", + "title": "Autonomous", + "order": 7 + }, + { + "id": "experimental", + "title": "Experimental", + "order": 8 + }, + { + "id": "deprecated", + "title": "Deprecated", + "order": 9 + }, + { + "id": "internal", + "title": "Internal", + "order": 10 + } + ], + "required": [ + "name" + ], + "properties": { + "name": { + "title": "Agent Name", + "type": "string", + "description": "Display name of the agent", + "maxLength": 50, + "x-group": "basic", + "x-placeholder": "Enter agent name" + }, + "mode": { + "title": "Usage Type", + "type": "string", + "description": "Mode of the agent, Public App or Personal Assistant", + "enum": [ + "public", + "private" + ], + "x-enum-title": [ + "Public App", + "Personal Assistant" + ], + "x-group": "deprecated" + }, + "fee_percentage": { + "title": "Service Fee", + "type": "number", + "description": "A CAPs % added to the base cost, paid to the agent for delivering its unique logic and execution.", + "minimum": 0, + "maximum": 100, + "default": 0, + "x-step": 1, + "x-group": "basic", + "x-component": "slider-with-box", + "x-nft-extra": 10 + }, + "description": { + "title": "Description", + "type": "string", + "description": "Description of the agent, for public view, not contained in prompt", + "maxLength": 3000, + "x-group": "basic", + "x-placeholder": "Introduce your agent" + }, + "external_website": { + "title": "External Website", + "type": "string", + "description": "Link of external website of the agent, if you have one", + "format": "uri", + "x-group": "basic", + "x-placeholder": "Enter agent external website url" + }, + "picture": { + "title": "Picture", + "type": "string", + "description": "Picture of the agent", + "x-group": "experimental", + "x-placeholder": "Upload a picture of your agent" + }, + "slug": { + "title": "Slug", + "type": "string", + "description": "Slug of the agent, used for URL generation", + "maxLength": 30, + "minLength": 2, + "readOnly": true, + "x-group": "internal" + }, + "owner": { + "title": "Owner", + "type": "string", + "description": "Owner identifier of the agent, used for access control", + "readOnly": true, + "maxLength": 50, + "x-group": "internal" + }, + "upstream_id": { + "title": "Upstream ID", + "type": "string", + "description": "External reference ID for idempotent operations", + "readOnly": true, + "maxLength": 100, + "x-group": "internal" + }, + "model": { + "title": "AI Model", + "type": "string", + "description": "Select the LLM for your agent. Note that each LLM has its specific advantages, behaviour and cost.", + "default": "gpt-5-mini", + "enum": [ + "gpt-5-nano", + "gpt-5-mini", + "gpt-5", + "gpt-4.1-nano", + "gpt-4.1-mini", + "gpt-4.1", + "gpt-4o", + "gpt-4o-mini", + "o3", + "o4-mini", + "deepseek-chat", + "deepseek-reasoner", + "grok-2", + "grok-3", + "grok-3-mini", + "eternalai", + "reigent", + "venice-uncensored", + "venice-llama-4-maverick-17b" + ], + "x-component": "category-select", + "x-enum-title": [ + "GPT-5 nano", + "GPT-5 mini", + "GPT-5", + "GPT-4.1 nano", + "GPT-4.1 mini", + "GPT-4.1", + "GPT-4o", + "GPT-4o mini", + "OpenAI o3", + "OpenAI o4-mini", + "Deepseek V3.1", + "Deepseek V3.1 Thinking", + "Grok 2", + "Grok 3", + "Grok 3 Mini" + ], + "x-enum-category": [ + "OpenAI", + "OpenAI", + "OpenAI", + "OpenAI", + "OpenAI", + "OpenAI", + "OpenAI", + "OpenAI", + "OpenAI", + "OpenAI", + "Deepseek", + "Deepseek", + "Grok", + "Grok", + "Grok" + ], + "x-support-skill": [ + true, + true, + true, + true, + true, + true, + true, + true, + true, + true, + true, + true, + true, + true, + true, + true + ], + "x-group": "llm" + }, + "purpose": { + "title": "Purpose", + "type": "string", + "description": "Tell the agent what's its purpose and what needs should it serve.", + "maxLength": 20000, + "pattern": "^(([^#].*)|#[^# ].*|#{3,}[ ].*|$)(\n(([^#].*)|#[^# ].*|#{3,}[ ].*|$))*$", + "errorMessage": { + "pattern": "Level 1 and 2 headings (# and ##) are not allowed. Please use level 3+ headings (###, ####, etc.) instead." + }, + "x-group": "llm", + "x-str-type": "prompt", + "x-placeholder": "Enter agent purpose" + }, + "personality": { + "title": "Personality", + "type": "string", + "description": "Tell the agent what personality it should have and how it should communicate with the users.", + "maxLength": 20000, + "pattern": "^(([^#].*)|#[^# ].*|#{3,}[ ].*|$)(\n(([^#].*)|#[^# ].*|#{3,}[ ].*|$))*$", + "errorMessage": { + "pattern": "Level 1 and 2 headings (# and ##) are not allowed. Please use level 3+ headings (###, ####, etc.) instead." + }, + "x-group": "llm", + "x-str-type": "prompt", + "x-placeholder": "Enter agent personality" + }, + "principles": { + "title": "Principles", + "type": "string", + "description": "Tell the agents what it should and shouldn't do when it interacts with users or skills.", + "maxLength": 20000, + "pattern": "^(([^#].*)|#[^# ].*|#{3,}[ ].*|$)(\n(([^#].*)|#[^# ].*|#{3,}[ ].*|$))*$", + "errorMessage": { + "pattern": "Level 1 and 2 headings (# and ##) are not allowed. Please use level 3+ headings (###, ####, etc.) instead." + }, + "x-group": "llm", + "x-str-type": "prompt", + "x-placeholder": "Enter agent principles" + }, + "prompt": { + "title": "Knowledge Base", + "type": "string", + "description": "Give the agent any additional knowledge. Text only.", + "maxLength": 20000, + "pattern": "^(([^#].*)|#[^# ].*|#{3,}[ ].*|$)(\n(([^#].*)|#[^# ].*|#{3,}[ ].*|$))*$", + "errorMessage": { + "pattern": "Level 1 and 2 headings (# and ##) are not allowed. Please use level 3+ headings (###, ####, etc.) instead." + }, + "x-str-type": "prompt", + "x-group": "llm" + }, + "prompt_append": { + "title": "Advanced", + "type": "string", + "description": "Additional system prompt that has higher priority than initial system prompt, only some models support this, if not supported, this part will append to the end of initial system prompt", + "maxLength": 20000, + "pattern": "^(([^#].*)|#[^# ].*|#{3,}[ ].*|$)(\n(([^#].*)|#[^# ].*|#{3,}[ ].*|$))*$", + "errorMessage": { + "pattern": "Level 1 and 2 headings (# and ##) are not allowed. Please use level 3+ headings (###, ####, etc.) instead." + }, + "x-str-type": "prompt", + "x-group": "llm" + }, + "temperature": { + "title": "Temperature", + "type": "number", + "description": "Controls creativity: higher values are more creative, lower values are more precise. CHANGING THIS SETTING MAY INTRODUCE UNEXPECTED BEHAVIOR. USE WITH CAUTION.", + "default": 0.7, + "minimum": 0.0, + "maximum": 1.5, + "x-group": "llm", + "x-step": 0.1 + }, + "frequency_penalty": { + "title": "Frequency Penalty", + "type": "number", + "description": "Adjusts repetition: higher values encourage new topics, lower values allow repetition.", + "default": 0.0, + "minimum": -2.0, + "maximum": 2.0, + "x-group": "llm", + "x-step": 0.1 + }, + "presence_penalty": { + "title": "Presence Penalty", + "type": "number", + "description": "Controls topic focus: higher values promote diversity, lower values stay closely on topic.", + "default": 0.0, + "minimum": -2.0, + "maximum": 2.0, + "x-group": "llm", + "x-step": 0.1 + }, + "short_term_memory_strategy": { + "title": "Short Term Memory Strategy", + "type": "string", + "description": "Strategy for managing short-term memory when context limit is reached. 'trim' removes oldest messages, 'summarize' creates summaries.", + "default": "trim", + "enum": [ + "trim", + "summarize" + ], + "x-group": "llm" + }, + "telegram_entrypoint_enabled": { + "title": "Enable Telegram Communication", + "type": "boolean", + "description": "Allow agent to respond to Telegram messages", + "default": false, + "x-group": "entrypoint" + }, + "telegram_entrypoint_prompt": { + "title": "Telegram Entry Prompt", + "type": "string", + "description": "Extra prompt for telegram entrypoint", + "maxLength": 10000, + "x-str-type": "prompt", + "x-group": "entrypoint" + }, + "telegram_config": { + "title": "Telegram Configuration", + "type": "object", + "description": "Configure your Telegram integration settings", + "x-group": "entrypoint", + "properties": { + "token": { + "title": "Bot Token", + "type": "string", + "description": "Telegram bot token obtained from BotFather" + }, + "group_memory_public": { + "title": "Group Memory Public", + "type": "boolean", + "description": "Whether group memory is public" + }, + "whitelist_chat_ids": { + "title": "Whitelist Chat IDs", + "type": "array", + "description": "List of chat IDs that are allowed to interact with the bot", + "items": { + "type": "integer" + } + }, + "greeting_group": { + "title": "Group Greeting", + "type": "string", + "description": "Custom greeting message for groups" + }, + "greeting_user": { + "title": "User Greeting", + "type": "string", + "description": "Custom greeting message for individual users" + } + } + }, + "skills": { + "title": "Skills", + "type": "object", + "description": "Dict of skills and their corresponding configurations", + "x-group": "skills", + "x-inline": true, + "properties": { + "allora": { + "title": "Allora", + "$ref": "../skills/allora/schema.json" + }, + "cdp": { + "title": "Coinbase Wallet", + "$ref": "../skills/cdp/schema.json" + }, + "dapplooker": { + "title": "DappLooker", + "$ref": "../skills/dapplooker/schema.json" + }, + "elfa": { + "title": "Elfa", + "$ref": "../skills/elfa/schema.json" + }, + "openai": { + "title": "OpenAI", + "$ref": "../skills/openai/schema.json" + }, + "portfolio": { + "title": "Blockchain Portfolio", + "$ref": "../skills/portfolio/schema.json" + }, + "tavily": { + "title": "Tavily", + "$ref": "../skills/tavily/schema.json" + }, + "token": { + "title": "Token Operations", + "$ref": "../skills/token/schema.json" + }, + "twitter": { + "title": "X", + "$ref": "../skills/twitter/schema.json" + }, + "xmtp": { + "title": "XMTP", + "$ref": "../skills/xmtp/schema.json" + }, + "chainlist": { + "title": "Chainlist RPC Endpoints", + "$ref": "../skills/chainlist/schema.json" + }, + "dexscreener": { + "title": "DEX Screener", + "$ref": "../skills/dexscreener/schema.json" + }, + "heurist": { + "title": "Heurist", + "$ref": "../skills/heurist/schema.json" + }, + "nation": { + "title": "Nation", + "$ref": "../skills/nation/schema.json" + }, + "defillama": { + "title": "Defillama", + "$ref": "../skills/defillama/schema.json" + }, + "enso": { + "title": "Enso", + "$ref": "../skills/enso/schema.json" + }, + "common": { + "title": "Common", + "$ref": "../skills/common/schema.json" + }, + "github": { + "title": "GitHub", + "$ref": "../skills/github/schema.json" + }, + "moralis": { + "title": "Moralis", + "$ref": "../skills/moralis/schema.json" + }, + "system": { + "title": "System", + "$ref": "../skills/system/schema.json" + }, + "http": { + "title": "HTTP Client", + "$ref": "../skills/http/schema.json" + }, + "web_scraper": { + "title": "Web Scraper & Content Indexing", + "$ref": "../skills/web_scraper/schema.json" + }, + "firecrawl": { + "title": "Firecrawl Web Scraping", + "$ref": "../skills/firecrawl/schema.json" + }, + "aixbt": { + "title": "AIXBT", + "$ref": "../skills/aixbt/schema.json" + }, + "cookiefun": { + "title": "cookie.fun", + "$ref": "../skills/cookiefun/schema.json" + }, + "cryptocompare": { + "title": "Cryptocompare", + "$ref": "../skills/cryptocompare/schema.json" + }, + "cryptopanic": { + "title": "CryptoPanic", + "$ref": "../skills/cryptopanic/schema.json" + }, + "dune_analytics": { + "title": "Dune Analytics", + "$ref": "../skills/dune_analytics/schema.json" + }, + "slack": { + "title": "Slack", + "$ref": "../skills/slack/schema.json" + }, + "supabase": { + "title": "Supabase", + "$ref": "../skills/supabase/schema.json" + }, + "venice_audio": { + "title": "Venice Audio", + "$ref": "../skills/venice_audio/schema.json" + }, + "venice_image": { + "title": "Venice Image", + "$ref": "../skills/venice_image/schema.json" + }, + "unrealspeech": { + "title": "UnrealSpeech", + "$ref": "../skills/unrealspeech/schema.json" + }, + "carv": { + "title": "Carv", + "$ref": "../skills/carv/schema.json" + }, + "lifi": { + "title": "LiFi", + "$ref": "../skills/lifi/schema.json" + }, + "casino": { + "title": "Casino", + "$ref": "../skills/casino/schema.json" + } + } + }, + "autonomous": { + "title": "Autonomous", + "type": "array", + "items": { + "type": "object", + "properties": { + "id": { + "title": "ID", + "type": "string", + "description": "Unique identifier for the autonomous configuration", + "minLength": 1, + "maxLength": 20, + "pattern": "^[a-z0-9-]+$", + "readOnly": true, + "x-group": "internal" + }, + "name": { + "title": "Name", + "type": "string", + "description": "Name for this automated task", + "maxLength": 50, + "x-group": "autonomous" + }, + "description": { + "title": "Description", + "type": "string", + "description": "Briefly describe what this automation does", + "maxLength": 200, + "x-group": "autonomous" + }, + "minutes": { + "title": "Frequency in Minutes", + "type": "integer", + "description": "How often to run (in minutes)", + "x-group": "autonomous" + }, + "cron": { + "title": "Schedule", + "type": "string", + "description": "Advanced scheduling (cron format), mutually exclusive with minutes", + "x-group": "autonomous" + }, + "prompt": { + "title": "Task Prompt", + "type": "string", + "description": "Prompt the agent will execute on schedule", + "maxLength": 20000, + "x-str-type": "prompt", + "x-group": "autonomous" + }, + "enabled": { + "title": "Enabled", + "type": "boolean", + "description": "Turn automation on/off", + "default": false, + "x-group": "autonomous" + } + }, + "required": [ + "name", + "prompt" + ] + }, + "description": "Set automated prompts and schedules for your agent.", + "x-group": "autonomous", + "x-inline": true + }, + "example_intro": { + "title": "Agent Greeting", + "type": "string", + "description": "This is the first thing users see when they meet your agent. Use it to explain what your agent does, the services it provides and how it fits into the Nation.", + "maxLength": 2000, + "x-group": "examples" + }, + "examples": { + "title": "Quick Action List", + "type": "array", + "maxItems": 6, + "items": { + "type": "object", + "properties": { + "name": { + "title": "Action Name", + "type": "string", + "description": "Quick action will show up on the UI as this name", + "maxLength": 50, + "x-group": "examples" + }, + "description": { + "title": "Description", + "type": "string", + "description": "Description of what this action does", + "maxLength": 200, + "x-group": "examples" + }, + "prompt": { + "title": "Prompt", + "type": "string", + "description": "When user clicks this action, the agent will execute this prompt", + "maxLength": 2000, + "x-str-type": "prompt", + "x-group": "examples" + } + }, + "required": [ + "name", + "description", + "prompt" + ] + }, + "description": "Quick clickable actions users can use with the agent without having to type any text instructions.", + "x-group": "examples" + }, + "wallet_provider": { + "title": "Wallet Provider", + "type": "string", + "description": "Provider of the agent's wallet, choose cdp if you want the agent to have it's own wallet.", + "enum": [ + "cdp", + "readonly" + ], + "x-enum-title": [ + "Coinbase Server Wallet V2", + "Readonly Wallet" + ], + "default": "cdp", + "x-group": "onchain" + }, + "network_id": { + "title": "Default Network", + "type": "string", + "description": "Default Network, please note that some CDP Wallet native skills like swap only support the base network.", + "default": "base-mainnet", + "enum": [ + "ethereum-mainnet", + "ethereum-sepolia", + "polygon-mainnet", + "polygon-mumbai", + "base-mainnet", + "base-sepolia", + "arbitrum-mainnet", + "arbitrum-sepolia", + "optimism-mainnet", + "optimism-sepolia" + ], + "x-group": "onchain" + }, + "ticker": { + "title": "Ticker", + "type": "string", + "description": "Ticker symbol of the agent", + "maxLength": 10, + "minLength": 1, + "x-group": "onchain", + "x-placeholder": "Enter agent ticker" + }, + "token_address": { + "title": "Token Address", + "type": "string", + "description": "Token address of the agent, if it already has one", + "maxLength": 42, + "x-group": "onchain" + }, + "token_pool": { + "title": "Token Pool", + "type": "string", + "description": "Pool of the agent token, if it has one", + "maxLength": 42, + "x-group": "onchain" + } + }, + "if": { + "properties": { + "wallet_provider": { + "const": "readonly" + } + } + }, + "then": { + "properties": { + "readonly_wallet_address": { + "title": "Readonly Wallet Address", + "type": "string", + "description": "Set the wallet address as agent wallet, then it can only be analyzed by the agent.", + "maxLength": 100, + "x-group": "onchain" + } + }, + "required": [ + "readonly_wallet_address" + ] + }, + "additionalProperties": true +} \ No newline at end of file diff --git a/intentkit/models/app_setting.py b/intentkit/models/app_setting.py new file mode 100644 index 00000000..2c8ca94a --- /dev/null +++ b/intentkit/models/app_setting.py @@ -0,0 +1,243 @@ +import time +from datetime import datetime, timezone +from decimal import ROUND_HALF_UP, Decimal +from enum import Enum +from typing import Annotated, Any, Dict, List + +from intentkit.models.base import Base +from intentkit.models.db import get_session +from pydantic import BaseModel, ConfigDict, Field, field_validator +from sqlalchemy import Column, DateTime, String, func, select +from sqlalchemy.dialects.postgresql import JSON, JSONB + + +class SystemMessageType(str, Enum): + """Type of system message.""" + + SERVICE_FEE_ERROR = "service_fee_error" + DAILY_USAGE_LIMIT_EXCEEDED = "daily_usage_limit_exceeded" + INSUFFICIENT_BALANCE = "insufficient_balance" + AGENT_INTERNAL_ERROR = "agent_internal_error" + STEP_LIMIT_EXCEEDED = "step_limit_exceeded" + SKILL_INTERRUPTED = "skill_interrupted" + + +# Default system messages +DEFAULT_SYSTEM_MESSAGES = { + "service_fee_error": "Please lower this Agent's service fee to meet the allowed maximum.", + "daily_usage_limit_exceeded": "This Agent has reached its free daily usage limit. Add credits to continue, or wait until tomorrow.", + "insufficient_balance": "You don't have enough credits to complete this action.", + "agent_internal_error": "Something went wrong. Please try again.", + "step_limit_exceeded": "This Agent tried to process too many steps. Try again with @super for higher step limit.", + "skill_interrupted": "You were interrupted after executing a skill. Please retry with caution to avoid repeating the skill.", +} + +# In-memory cache for app settings +_cache: Dict[str, Dict[str, Any]] = {} +_cache_ttl = 180 # 3 minutes in seconds + + +class AppSettingTable(Base): + """App settings database table model.""" + + __tablename__ = "app_settings" + + key = Column( + String, + primary_key=True, + ) + value = Column( + JSON().with_variant(JSONB(), "postgresql"), + nullable=False, + ) + created_at = Column( + DateTime(timezone=True), + nullable=False, + server_default=func.now(), + ) + updated_at = Column( + DateTime(timezone=True), + nullable=False, + server_default=func.now(), + onupdate=lambda: datetime.now(timezone.utc), + ) + + +class PaymentSettings(BaseModel): + """Payment settings model.""" + + model_config = ConfigDict( + json_schema_extra={ + "example": { + "credit_per_usdc": 1000, + "fee_platform_percentage": 100, + "fee_dev_percentage": 20, + "free_quota": 480, + "refill_amount": 20, + "agent_whitelist_enabled": False, + "agent_whitelist": [], + } + } + ) + + credit_per_usdc: Annotated[ + Decimal, + Field(default=Decimal("1000"), description="Number of credits per USDC"), + ] + fee_platform_percentage: Annotated[ + Decimal, + Field( + default=Decimal("100"), description="Platform fee percentage", ge=0, le=100 + ), + ] + fee_dev_percentage: Annotated[ + Decimal, + Field( + default=Decimal("20"), description="Developer fee percentage", ge=0, le=100 + ), + ] + free_quota: Annotated[ + Decimal, + Field( + default=Decimal("480"), + description="Daily free credit quota for new users", + ge=0, + ), + ] + refill_amount: Annotated[ + Decimal, + Field( + default=Decimal("20"), + description="Hourly refill amount for free credits", + ge=0, + ), + ] + agent_whitelist_enabled: Annotated[ + bool, + Field(default=False, description="Whether agent whitelist is enabled"), + ] + agent_whitelist: Annotated[ + List[str], + Field(default_factory=list, description="List of whitelisted agent IDs"), + ] + + @field_validator( + "credit_per_usdc", + "fee_platform_percentage", + "fee_dev_percentage", + "free_quota", + "refill_amount", + ) + @classmethod + def round_decimal(cls, v: Any) -> Decimal: + """Round decimal values to 4 decimal places.""" + if isinstance(v, Decimal): + return v.quantize(Decimal("0.0001"), rounding=ROUND_HALF_UP) + elif isinstance(v, (int, float)): + return Decimal(str(v)).quantize(Decimal("0.0001"), rounding=ROUND_HALF_UP) + return v + + +class AppSetting(BaseModel): + """App setting model with all fields.""" + + model_config = ConfigDict( + from_attributes=True, + json_encoders={ + datetime: lambda v: v.isoformat(timespec="milliseconds"), + }, + ) + + key: Annotated[str, Field(description="Setting key")] + value: Annotated[Any, Field(description="Setting value as JSON")] + created_at: Annotated[ + datetime, Field(description="Timestamp when this setting was created") + ] + updated_at: Annotated[ + datetime, Field(description="Timestamp when this setting was last updated") + ] + + @staticmethod + async def payment() -> PaymentSettings: + """Get payment settings from the database with in-memory caching. + + The settings are cached in memory for 3 minutes. + + Returns: + PaymentSettings: Payment settings + """ + cache_key = "payment" + current_time = time.time() + + # Check if we have cached data and it's still valid + if cache_key in _cache: + cache_entry = _cache[cache_key] + if current_time - cache_entry["timestamp"] < _cache_ttl: + return PaymentSettings(**cache_entry["data"]) + + # If not in cache or cache is expired, get from database + async with get_session() as session: + # Query the database for the payment settings + stmt = select(AppSettingTable).where(AppSettingTable.key == "payment") + setting = await session.scalar(stmt) + + # If settings don't exist, use default settings + if not setting: + payment_settings = PaymentSettings() + else: + # Convert the JSON value to PaymentSettings + payment_settings = PaymentSettings(**setting.value) + + # Cache the settings in memory + _cache[cache_key] = { + "data": payment_settings.model_dump(mode="json"), + "timestamp": current_time, + } + + return payment_settings + + @staticmethod + async def error_message(message_type: SystemMessageType) -> str: + """Get error message from the database with in-memory caching, fallback to default. + + The settings are cached in memory for 3 minutes. + + Args: + message_type: The SystemMessageType enum + + Returns: + str: Error message from config or default message + """ + cache_key = "errors" + current_time = time.time() + message_key = message_type.value + + # Check if we have cached data and it's still valid + if cache_key in _cache: + cache_entry = _cache[cache_key] + if current_time - cache_entry["timestamp"] < _cache_ttl: + errors_data = cache_entry["data"] + if errors_data and message_key in errors_data: + return errors_data[message_key] + # Return default message if not found in config + return DEFAULT_SYSTEM_MESSAGES[message_key] + + # If not in cache or cache is expired, get from database + async with get_session() as session: + # Query the database for the errors settings + stmt = select(AppSettingTable).where(AppSettingTable.key == "errors") + setting = await session.scalar(stmt) + + # If settings don't exist, cache None + errors_data = setting.value if setting else None + + # Cache the settings in memory + _cache[cache_key] = { + "data": errors_data, + "timestamp": current_time, + } + + # Return configured message if exists, otherwise return default + if errors_data and message_key in errors_data: + return errors_data[message_key] + return DEFAULT_SYSTEM_MESSAGES[message_key] diff --git a/intentkit/models/base.py b/intentkit/models/base.py new file mode 100644 index 00000000..a1bb6e65 --- /dev/null +++ b/intentkit/models/base.py @@ -0,0 +1,9 @@ +"""Base SQLAlchemy model for all models in the application.""" + +from sqlalchemy.orm import DeclarativeBase + + +class Base(DeclarativeBase): + """Base class for all models.""" + + pass diff --git a/intentkit/models/chat.py b/intentkit/models/chat.py new file mode 100644 index 00000000..0ef168f7 --- /dev/null +++ b/intentkit/models/chat.py @@ -0,0 +1,709 @@ +from datetime import datetime, timezone +from decimal import Decimal +from enum import Enum +from typing import Annotated, List, NotRequired, Optional, TypedDict + +from epyxid import XID +from intentkit.models.app_setting import AppSetting, SystemMessageType +from intentkit.models.base import Base +from intentkit.models.db import get_session +from pydantic import BaseModel, ConfigDict, Field +from sqlalchemy import ( + Boolean, + Column, + DateTime, + Float, + Index, + Integer, + Numeric, + String, + desc, + func, + select, + update, +) +from sqlalchemy.dialects.postgresql import JSON, JSONB +from sqlalchemy.ext.asyncio import AsyncSession + + +class ChatMessageAttachmentType(str, Enum): + """Type of chat message attachment.""" + + LINK = "link" + IMAGE = "image" + FILE = "file" + XMTP = "xmtp" + + +class AuthorType(str, Enum): + """Type of message author.""" + + AGENT = "agent" + TRIGGER = "trigger" + SKILL = "skill" + TELEGRAM = "telegram" + TWITTER = "twitter" + WEB = "web" + SYSTEM = "system" + API = "api" + XMTP = "xmtp" + + +class ChatMessageAttachment(TypedDict): + """Chat message attachment model. + + An attachment can be a link, image, or file that is associated with a chat message. + """ + + type: Annotated[ + ChatMessageAttachmentType, + Field( + ..., + description="Type of the attachment (link, image, or file)", + examples=["link"], + ), + ] + url: Annotated[ + Optional[str], + Field( + ..., + description="URL of the attachment", + examples=["https://example.com/image.jpg"], + ), + ] + json: Annotated[ + Optional[dict], + Field( + None, + description="JSON data of the attachment", + ), + ] + + +class ChatMessageSkillCall(TypedDict): + """TypedDict for skill call details.""" + + id: NotRequired[str] + name: str + parameters: dict + success: bool + response: NotRequired[ + str + ] # Optional response from the skill call, trimmed to 100 characters + error_message: NotRequired[str] # Optional error message from the skill call + credit_event_id: NotRequired[str] # ID of the credit event for this skill call + credit_cost: NotRequired[Decimal] # Credit cost for the skill call + + +class ChatMessageRequest(BaseModel): + """Request model for chat messages. + + This model represents the request body for creating a new chat message. + It contains the necessary fields to identify the chat context, user, + and message content, along with optional attachments. + """ + + chat_id: Annotated[ + str, + Field( + ..., + description="Unique identifier for the chat thread", + examples=["chat-123"], + min_length=1, + ), + ] + app_id: Annotated[ + Optional[str], + Field( + None, + description="Optional application identifier", + examples=["app-789"], + ), + ] + user_id: Annotated[ + str, + Field( + ..., + description="Unique identifier of the user sending the message", + examples=["user-456"], + min_length=1, + ), + ] + message: Annotated[ + str, + Field( + ..., + description="Content of the message", + examples=["Hello, how can you help me today?"], + min_length=1, + max_length=65535, + ), + ] + search_mode: Annotated[ + Optional[bool], + Field( + None, + description="Optional flag to enable search mode", + ), + ] + super_mode: Annotated[ + Optional[bool], + Field( + None, + description="Optional flag to enable super mode", + ), + ] + attachments: Annotated[ + Optional[List[ChatMessageAttachment]], + Field( + None, + description="Optional list of attachments (links, images, or files)", + examples=[[{"type": "link", "url": "https://example.com"}]], + ), + ] + + model_config = ConfigDict( + use_enum_values=True, + json_schema_extra={ + "example": { + "chat_id": "chat-123", + "app_id": "app-789", + "user_id": "user-456", + "message": "Hello, how can you help me today?", + "search_mode": True, + "super_mode": False, + "attachments": [ + { + "type": "link", + "url": "https://example.com", + } + ], + } + }, + ) + + +class ChatMessageTable(Base): + """Chat message database table model.""" + + __tablename__ = "chat_messages" + __table_args__ = ( + Index("ix_chat_messages_chat_id", "chat_id"), + Index("ix_chat_messages_agent_id_author_type", "agent_id", "author_type"), + Index("ix_chat_messages_agent_id_chat_id", "agent_id", "chat_id"), + ) + + id = Column( + String, + primary_key=True, + ) + agent_id = Column( + String, + nullable=False, + ) + chat_id = Column( + String, + nullable=False, + ) + user_id = Column( + String, + nullable=True, + ) + author_id = Column( + String, + nullable=False, + ) + author_type = Column( + String, + nullable=False, + ) + model = Column( + String, + nullable=True, + ) + thread_type = Column( + String, + nullable=True, + ) + reply_to = Column( + String, + nullable=True, + ) + message = Column( + String, + nullable=False, + ) + attachments = Column( + JSON().with_variant(JSONB(), "postgresql"), + nullable=True, + ) + skill_calls = Column( + JSON().with_variant(JSONB(), "postgresql"), + nullable=True, + ) + input_tokens = Column( + Integer, + default=0, + ) + output_tokens = Column( + Integer, + default=0, + ) + time_cost = Column( + Float, + default=0, + ) + credit_event_id = Column( + String, + nullable=True, + ) + credit_cost = Column( + Numeric(22, 4), + nullable=True, + ) + cold_start_cost = Column( + Float, + default=0, + ) + app_id = Column( + String, + nullable=True, + ) + search_mode = Column( + Boolean, + nullable=True, + ) + super_mode = Column( + Boolean, + nullable=True, + ) + error_type = Column( + String, + nullable=True, + ) + created_at = Column( + DateTime(timezone=True), + nullable=False, + server_default=func.now(), + ) + + +class ChatMessageCreate(BaseModel): + """Base model for creating chat messages with fields needed for creation.""" + + model_config = ConfigDict( + use_enum_values=True, + from_attributes=True, + ) + + id: Annotated[ + str, + Field( + default_factory=lambda: str(XID()), + description="Unique identifier for the chat message", + ), + ] + agent_id: Annotated[ + str, Field(description="ID of the agent this message belongs to") + ] + chat_id: Annotated[str, Field(description="ID of the chat this message belongs to")] + user_id: Annotated[ + Optional[str], + Field(description="ID of the user this message belongs to or reply to"), + ] + author_id: Annotated[str, Field(description="ID of the message author")] + author_type: Annotated[AuthorType, Field(description="Type of the message author")] + model: Annotated[ + Optional[str], Field(None, description="LLM model used if applicable") + ] + thread_type: Annotated[ + Optional[AuthorType], + Field(None, description="Author Type of the message thread start"), + ] + reply_to: Annotated[ + Optional[str], + Field(None, description="ID of the message this message is a reply to"), + ] + message: Annotated[str, Field(description="Content of the message")] + attachments: Annotated[ + Optional[List[ChatMessageAttachment]], + Field(None, description="List of attachments in the message"), + ] + skill_calls: Annotated[ + Optional[List[ChatMessageSkillCall]], + Field(None, description="Skill call details"), + ] + input_tokens: Annotated[ + int, Field(0, description="Number of tokens in the input message") + ] + output_tokens: Annotated[ + int, Field(0, description="Number of tokens in the output message") + ] + time_cost: Annotated[ + float, Field(0.0, description="Time cost for the message in seconds") + ] + credit_event_id: Annotated[ + Optional[str], + Field(None, description="ID of the credit event for this message"), + ] + credit_cost: Annotated[ + Optional[Decimal], + Field(None, description="Credit cost for the message in credits"), + ] + cold_start_cost: Annotated[ + float, + Field(0.0, description="Cost for the cold start of the message in seconds"), + ] + app_id: Annotated[ + Optional[str], + Field(None, description="Optional application identifier"), + ] + search_mode: Annotated[ + Optional[bool], + Field(None, description="Optional flag to enable search mode"), + ] + super_mode: Annotated[ + Optional[bool], + Field(None, description="Optional flag to enable super mode"), + ] + error_type: Annotated[ + Optional[SystemMessageType], + Field(None, description="Optional error type, used when author_type is system"), + ] + + async def save_in_session(self, db: AsyncSession) -> "ChatMessage": + """Save the chat message to the database. + + Returns: + ChatMessage: The saved chat message with all fields populated + """ + message_record = ChatMessageTable(**self.model_dump(mode="json")) + db.add(message_record) + await db.flush() + await db.refresh(message_record) + return ChatMessage.model_validate(message_record) + + async def save(self) -> "ChatMessage": + """Save the chat message to the database. + + Returns: + ChatMessage: The saved chat message with all fields populated + """ + async with get_session() as db: + resp = await self.save_in_session(db) + await db.commit() + return resp + + @classmethod + async def from_system_message( + cls, + message_type: SystemMessageType, + agent_id: str, + chat_id: str, + user_id: str, + author_id: str, + thread_type: AuthorType, + reply_to: str, + time_cost: float = 0.0, + ) -> "ChatMessageCreate": + """Create a system message. + + Returns: + ChatMessageCreate: The created system message + """ + + # Get error message (configured or default) + message = await AppSetting.error_message(message_type) + + return cls( + id=str(XID()), + agent_id=agent_id, + chat_id=chat_id, + user_id=user_id, + author_id=author_id, + author_type=AuthorType.SYSTEM, + thread_type=thread_type, + reply_to=reply_to, + message=message, + time_cost=time_cost, + error_type=message_type, + ) + + +class ChatMessage(ChatMessageCreate): + """Chat message model with all fields including server-generated ones.""" + + model_config = ConfigDict( + use_enum_values=True, + json_encoders={ + datetime: lambda v: v.isoformat(timespec="milliseconds"), + }, + from_attributes=True, + ) + + created_at: Annotated[ + datetime, Field(description="Timestamp when this message was created") + ] + + def __str__(self): + resp = "" + if self.skill_calls: + for call in self.skill_calls: + resp += f"{call['name']} {call['parameters']}: {call['response'] if call['success'] else call['error_message']}\n" + resp += "\n" + resp += self.message + return resp + + def debug_format(self) -> str: + """Format this ChatMessage for debug output. + + Returns: + str: Formatted debug string for the message + """ + resp = "" + + if self.cold_start_cost: + resp += "[ Agent cold start ... ]\n" + resp += f"\n------------------- start cost: {self.cold_start_cost:.3f} seconds\n\n" + + if self.author_type == AuthorType.SKILL: + resp += f"[ Skill Calls: ] ({self.created_at.strftime('%Y-%m-%d %H:%M:%S')} UTC)\n\n" + for skill_call in self.skill_calls: + resp += f" {skill_call['name']}: {skill_call['parameters']}\n" + if skill_call["success"]: + resp += f" Success: {skill_call.get('response', '')}\n" + else: + resp += f" Failed: {skill_call.get('error_message', '')}\n" + resp += ( + f"\n------------------- skill cost: {self.time_cost:.3f} seconds\n\n" + ) + elif self.author_type == AuthorType.AGENT: + resp += ( + f"[ Agent: ] ({self.created_at.strftime('%Y-%m-%d %H:%M:%S')} UTC)\n\n" + ) + resp += f" {self.message}\n" + resp += ( + f"\n------------------- agent cost: {self.time_cost:.3f} seconds\n\n" + ) + elif self.author_type == AuthorType.SYSTEM: + resp += ( + f"[ System: ] ({self.created_at.strftime('%Y-%m-%d %H:%M:%S')} UTC)\n\n" + ) + resp += f" {self.message}\n" + resp += ( + f"\n------------------- system cost: {self.time_cost:.3f} seconds\n\n" + ) + else: + resp += f"[ User: ] ({self.created_at.strftime('%Y-%m-%d %H:%M:%S')} UTC) by {self.author_id}\n\n" + resp += f" {self.message}\n" + resp += f"\n------------------- user cost: {self.time_cost:.3f} seconds\n\n" + + return resp + + def sanitize_privacy(self) -> "ChatMessage": + """Remove sensitive information from the chat message. + + This method clears the skill parameters and response + from skill calls while preserving the structure and metadata. + + Returns: + ChatMessage: A new ChatMessage instance with sensitive data removed + """ + if self.author_type != AuthorType.SKILL: + return self + # Create a copy of the current message + sanitized_data = self.model_dump() + + # Clear sensitive data from skill calls + if sanitized_data.get("skill_calls"): + for skill_call in sanitized_data["skill_calls"]: + # Clear parameters and response while keeping structure + skill_call["parameters"] = {} + if "response" in skill_call: + skill_call["response"] = "" + + # Return a new ChatMessage instance with sanitized data + return ChatMessage.model_validate(sanitized_data) + + @classmethod + async def get(cls, message_id: str) -> Optional["ChatMessage"]: + async with get_session() as db: + raw = await db.get(ChatMessageTable, message_id) + if raw: + return ChatMessage.model_validate(raw) + return None + + +class ChatTable(Base): + """Chat database table model.""" + + __tablename__ = "chats" + __table_args__ = (Index("ix_chats_agent_user", "agent_id", "user_id"),) + + id = Column( + String, + primary_key=True, + ) + agent_id = Column( + String, + nullable=False, + ) + user_id = Column( + String, + nullable=False, + ) + summary = Column( + String, + default="", + ) + rounds = Column( + Integer, + default=0, + ) + created_at = Column( + DateTime(timezone=True), + nullable=False, + server_default=func.now(), + ) + updated_at = Column( + DateTime(timezone=True), + nullable=False, + server_default=func.now(), + onupdate=lambda: datetime.now(timezone.utc), + ) + + +class ChatCreate(BaseModel): + """Base model for creating chats with fields needed for creation.""" + + model_config = ConfigDict(from_attributes=True) + + id: Annotated[ + str, + Field( + default_factory=lambda: str(XID()), + description="Unique identifier for the chat", + ), + ] + agent_id: Annotated[str, Field(description="ID of the agent this chat belongs to")] + user_id: Annotated[str, Field(description="User ID of the chat")] + summary: Annotated[str, Field("", description="Summary of the chat")] + rounds: Annotated[int, Field(0, description="Number of rounds in the chat")] + + async def save(self) -> "Chat": + """Create a new chat in the database. + + Returns: + Chat: The saved chat with all fields populated + """ + # Set timestamps + chat_record = ChatTable(**self.model_dump(exclude_unset=True)) + + async with get_session() as db: + db.add(chat_record) + await db.commit() + await db.refresh(chat_record) + + # Create and return a full Chat instance + return Chat.model_validate(chat_record) + + +class Chat(ChatCreate): + """Chat model with all fields including server-generated ones.""" + + model_config = ConfigDict( + from_attributes=True, + json_encoders={datetime: lambda v: v.isoformat(timespec="milliseconds")}, + ) + + created_at: Annotated[ + datetime, Field(description="Timestamp when this chat was created") + ] + updated_at: Annotated[ + datetime, Field(description="Timestamp when this chat was updated") + ] + + @classmethod + async def get(cls, id: str) -> Optional["Chat"]: + """Get a chat by its ID. + + Args: + id: ID of the chat to get + + Returns: + Chat if found, None otherwise + """ + async with get_session() as db: + chat_record = await db.get(ChatTable, id) + if chat_record: + return cls.model_validate(chat_record) + return None + + async def delete(self): + """Delete the chat from the database.""" + async with get_session() as db: + chat_record = await db.get(ChatTable, self.id) + if chat_record: + await db.delete(chat_record) + await db.commit() + + async def add_round(self): + """Increment the number of rounds in the chat on the database server. + + Uses a direct SQL UPDATE statement to increment the rounds counter + on the server side, avoiding potential race conditions. + """ + async with get_session() as db: + stmt = ( + update(ChatTable) + .where(ChatTable.id == self.id) + .values(rounds=ChatTable.rounds + 1) + ) + await db.execute(stmt) + await db.commit() + + # Update local object + self.rounds += 1 + + async def update_summary(self, summary: str) -> "Chat": + """Update the chat summary in the database. + + Uses a direct SQL UPDATE statement to set the summary field. + + Args: + summary: New summary text for the chat + + Returns: + Chat: The updated chat instance + """ + async with get_session() as db: + stmt = ( + update(ChatTable).where(ChatTable.id == self.id).values(summary=summary) + ) + await db.execute(stmt) + await db.commit() + + # Update local object + self.summary = summary + return self + + @classmethod + async def get_by_agent_user(cls, agent_id: str, user_id: str) -> List["Chat"]: + """Get all chats for a specific agent and user. + + Args: + agent_id: ID of the agent + user_id: ID of the user + + Returns: + List of chats + """ + async with get_session() as db: + results = await db.scalars( + select(ChatTable) + .order_by(desc(ChatTable.updated_at)) + .limit(10) + .where(ChatTable.agent_id == agent_id, ChatTable.user_id == user_id) + ) + + return [cls.model_validate(chat) for chat in results] diff --git a/intentkit/models/conversation.py b/intentkit/models/conversation.py new file mode 100644 index 00000000..74da5e3c --- /dev/null +++ b/intentkit/models/conversation.py @@ -0,0 +1,286 @@ +"""Conversation models for agent generator. + +This module provides models for tracking conversation history and projects +related to agent generation sessions. +""" + +from datetime import datetime +from typing import Annotated, List, Optional + +from epyxid import XID +from intentkit.models.base import Base +from intentkit.models.db import get_session +from pydantic import BaseModel, ConfigDict, Field +from sqlalchemy import ( + Column, + DateTime, + Index, + String, + Text, + desc, + func, + select, +) +from sqlalchemy.dialects.postgresql import JSON, JSONB +from sqlalchemy.ext.asyncio import AsyncSession + + +class ConversationProjectTable(Base): + """Conversation project database table model.""" + + __tablename__ = "generator_conversation_projects" + __table_args__ = ( + Index("ix_generator_conversation_projects_user_id", "user_id"), + Index("ix_generator_conversation_projects_created_at", "created_at"), + ) + + id = Column( + String, + primary_key=True, + ) + user_id = Column( + String, + nullable=True, + ) + created_at = Column( + DateTime(timezone=True), + nullable=False, + server_default=func.now(), + ) + last_activity = Column( + DateTime(timezone=True), + nullable=False, + server_default=func.now(), + ) + + +class ConversationMessageTable(Base): + """Conversation message database table model.""" + + __tablename__ = "generator_conversation_messages" + __table_args__ = ( + Index("ix_generator_conversation_messages_project_id", "project_id"), + Index("ix_generator_conversation_messages_created_at", "created_at"), + ) + + id = Column( + String, + primary_key=True, + ) + project_id = Column( + String, + nullable=False, + ) + role = Column( + String, + nullable=False, + ) + content = Column( + Text, + nullable=False, + ) + message_metadata = Column( + JSON().with_variant(JSONB(), "postgresql"), + nullable=True, + ) + created_at = Column( + DateTime(timezone=True), + nullable=False, + server_default=func.now(), + ) + + +class ConversationProjectCreate(BaseModel): + """Base model for creating conversation projects.""" + + model_config = ConfigDict(from_attributes=True) + + id: Annotated[ + str, + Field( + default_factory=lambda: str(XID()), + description="Unique identifier for the conversation project", + ), + ] + user_id: Annotated[ + Optional[str], + Field(None, description="User ID associated with this project"), + ] + + async def save_in_session(self, db: AsyncSession) -> "ConversationProject": + """Save the conversation project in the given database session.""" + db_project = ConversationProjectTable( + id=self.id, + user_id=self.user_id, + ) + db.add(db_project) + await db.flush() + await db.refresh(db_project) + return ConversationProject.model_validate(db_project) + + async def save(self) -> "ConversationProject": + """Save the conversation project to the database.""" + async with get_session() as db: + result = await self.save_in_session(db) + await db.commit() + return result + + +class ConversationProject(ConversationProjectCreate): + """Conversation project model with all fields including server-generated ones.""" + + model_config = ConfigDict( + from_attributes=True, + json_encoders={ + datetime: lambda v: v.isoformat(timespec="milliseconds"), + }, + ) + + created_at: Annotated[ + datetime, Field(description="Timestamp when this project was created") + ] + last_activity: Annotated[ + datetime, Field(description="Timestamp of last activity in this project") + ] + + @classmethod + async def get(cls, project_id: str) -> Optional["ConversationProject"]: + """Get a conversation project by ID.""" + async with get_session() as db: + result = await db.execute( + select(ConversationProjectTable).where( + ConversationProjectTable.id == project_id + ) + ) + project = result.scalar_one_or_none() + if project: + return cls.model_validate(project) + return None + + async def update_activity(self) -> "ConversationProject": + """Update the last activity timestamp for this project.""" + async with get_session() as db: + from sqlalchemy import update + + await db.execute( + update(ConversationProjectTable) + .where(ConversationProjectTable.id == self.id) + .values(last_activity=func.now()) + ) + await db.commit() + # Refresh the object + result = await db.execute( + select(ConversationProjectTable).where( + ConversationProjectTable.id == self.id + ) + ) + project = result.scalar_one() + return ConversationProject.model_validate(project) + + @classmethod + async def get_by_user( + cls, user_id: Optional[str] = None, limit: int = 50 + ) -> List["ConversationProject"]: + """Get conversation projects by user ID.""" + async with get_session() as db: + query = select(ConversationProjectTable).order_by( + desc(ConversationProjectTable.last_activity) + ) + + if user_id is not None: + query = query.where(ConversationProjectTable.user_id == user_id) + + query = query.limit(limit) + + result = await db.execute(query) + projects = result.scalars().all() + return [cls.model_validate(project) for project in projects] + + +class ConversationMessageCreate(BaseModel): + """Base model for creating conversation messages.""" + + model_config = ConfigDict(from_attributes=True) + + id: Annotated[ + str, + Field( + default_factory=lambda: str(XID()), + description="Unique identifier for the conversation message", + ), + ] + project_id: Annotated[str, Field(description="Project ID this message belongs to")] + role: Annotated[str, Field(description="Role of the message sender")] + content: Annotated[str, Field(description="Content of the message")] + message_metadata: Annotated[ + Optional[dict], + Field(None, description="Additional metadata for the message"), + ] + + async def save_in_session(self, db: AsyncSession) -> "ConversationMessage": + """Save the conversation message in the given database session.""" + db_message = ConversationMessageTable( + id=self.id, + project_id=self.project_id, + role=self.role, + content=self.content, + message_metadata=self.message_metadata, + ) + db.add(db_message) + await db.flush() + await db.refresh(db_message) + return ConversationMessage.model_validate(db_message) + + async def save(self) -> "ConversationMessage": + """Save the conversation message to the database.""" + async with get_session() as db: + result = await self.save_in_session(db) + await db.commit() + return result + + +class ConversationMessage(ConversationMessageCreate): + """Conversation message model with all fields including server-generated ones.""" + + model_config = ConfigDict( + from_attributes=True, + json_encoders={ + datetime: lambda v: v.isoformat(timespec="milliseconds"), + }, + ) + + created_at: Annotated[ + datetime, Field(description="Timestamp when this message was created") + ] + + @classmethod + async def get_by_project( + cls, project_id: str, user_id: Optional[str] = None + ) -> List["ConversationMessage"]: + """Get conversation messages for a project.""" + async with get_session() as db: + # First check if project exists and user has access + project_query = select(ConversationProjectTable).where( + ConversationProjectTable.id == project_id + ) + if user_id is not None: + project_query = project_query.where( + ConversationProjectTable.user_id == user_id + ) + + project_result = await db.execute(project_query) + project = project_result.scalar_one_or_none() + + if not project: + return [] + + # Get messages for the project + messages_query = ( + select(ConversationMessageTable) + .where(ConversationMessageTable.project_id == project_id) + .order_by(ConversationMessageTable.created_at) + ) + + result = await db.execute(messages_query) + messages = result.scalars().all() + return [cls.model_validate(message) for message in messages] diff --git a/intentkit/models/credit.py b/intentkit/models/credit.py new file mode 100644 index 00000000..80913ea2 --- /dev/null +++ b/intentkit/models/credit.py @@ -0,0 +1,1699 @@ +import logging +from datetime import datetime, timezone +from decimal import ROUND_HALF_UP, Decimal +from enum import Enum +from typing import Annotated, Any, Dict, List, Optional, Tuple + +from epyxid import XID +from fastapi import HTTPException +from intentkit.models.app_setting import AppSetting +from intentkit.models.base import Base +from intentkit.models.db import get_session +from pydantic import BaseModel, ConfigDict, Field, field_validator +from sqlalchemy import ( + ARRAY, + JSON, + Column, + DateTime, + Index, + Numeric, + String, + func, + select, + update, +) +from sqlalchemy.ext.asyncio import AsyncSession + +logger = logging.getLogger(__name__) + +# Precision constant for 4 decimal places +FOURPLACES = Decimal("0.0001") + + +class CreditType(str, Enum): + """Credit type is used in db column names, do not change it.""" + + FREE = "free_credits" + REWARD = "reward_credits" + PERMANENT = "credits" + + +class OwnerType(str, Enum): + """Type of credit account owner.""" + + USER = "user" + AGENT = "agent" + PLATFORM = "platform" + + +# Platform virtual account ids/owner ids, they are used for transaction balance tracing +# The owner id and account id are the same +DEFAULT_PLATFORM_ACCOUNT_RECHARGE = "platform_recharge" +DEFAULT_PLATFORM_ACCOUNT_REFILL = "platform_refill" +DEFAULT_PLATFORM_ACCOUNT_ADJUSTMENT = "platform_adjustment" +DEFAULT_PLATFORM_ACCOUNT_REWARD = "platform_reward" +DEFAULT_PLATFORM_ACCOUNT_REFUND = "platform_refund" +DEFAULT_PLATFORM_ACCOUNT_MESSAGE = "platform_message" +DEFAULT_PLATFORM_ACCOUNT_SKILL = "platform_skill" +DEFAULT_PLATFORM_ACCOUNT_MEMORY = "platform_memory" +DEFAULT_PLATFORM_ACCOUNT_VOICE = "platform_voice" +DEFAULT_PLATFORM_ACCOUNT_KNOWLEDGE = "platform_knowledge" +DEFAULT_PLATFORM_ACCOUNT_FEE = "platform_fee" +DEFAULT_PLATFORM_ACCOUNT_DEV = "platform_dev" + + +class CreditAccountTable(Base): + """Credit account database table model.""" + + __tablename__ = "credit_accounts" + __table_args__ = (Index("ix_credit_accounts_owner", "owner_type", "owner_id"),) + + id = Column( + String, + primary_key=True, + ) + owner_type = Column( + String, + nullable=False, + ) + owner_id = Column( + String, + nullable=False, + ) + free_quota = Column( + Numeric(22, 4), + default=0, + nullable=False, + ) + refill_amount = Column( + Numeric(22, 4), + default=0, + nullable=False, + ) + free_credits = Column( + Numeric(22, 4), + default=0, + nullable=False, + ) + reward_credits = Column( + Numeric(22, 4), + default=0, + nullable=False, + ) + credits = Column( + Numeric(22, 4), + default=0, + nullable=False, + ) + income_at = Column( + DateTime(timezone=True), + nullable=True, + ) + expense_at = Column( + DateTime(timezone=True), + nullable=True, + ) + last_event_id = Column( + String, + nullable=True, + ) + # Total statistics fields + total_income = Column( + Numeric(22, 4), + default=0, + nullable=False, + ) + total_free_income = Column( + Numeric(22, 4), + default=0, + nullable=False, + ) + total_reward_income = Column( + Numeric(22, 4), + default=0, + nullable=False, + ) + total_permanent_income = Column( + Numeric(22, 4), + default=0, + nullable=False, + ) + total_expense = Column( + Numeric(22, 4), + default=0, + nullable=False, + ) + total_free_expense = Column( + Numeric(22, 4), + default=0, + nullable=False, + ) + total_reward_expense = Column( + Numeric(22, 4), + default=0, + nullable=False, + ) + total_permanent_expense = Column( + Numeric(22, 4), + default=0, + nullable=False, + ) + created_at = Column( + DateTime(timezone=True), + nullable=False, + server_default=func.now(), + ) + updated_at = Column( + DateTime(timezone=True), + nullable=False, + server_default=func.now(), + onupdate=lambda: datetime.now(timezone.utc), + ) + + +class CreditAccount(BaseModel): + """Credit account model with all fields.""" + + model_config = ConfigDict( + use_enum_values=True, + from_attributes=True, + json_encoders={ + datetime: lambda v: v.isoformat(timespec="milliseconds"), + }, + ) + + id: Annotated[ + str, + Field( + default_factory=lambda: str(XID()), + description="Unique identifier for the credit account", + ), + ] + owner_type: Annotated[OwnerType, Field(description="Type of the account owner")] + owner_id: Annotated[str, Field(description="ID of the account owner")] + free_quota: Annotated[ + Decimal, + Field( + default=Decimal("0"), description="Daily credit quota that resets each day" + ), + ] + refill_amount: Annotated[ + Decimal, + Field( + default=Decimal("0"), + description="Amount to refill hourly, not exceeding free_quota", + ), + ] + free_credits: Annotated[ + Decimal, + Field(default=Decimal("0"), description="Current available daily credits"), + ] + reward_credits: Annotated[ + Decimal, + Field( + default=Decimal("0"), description="Reward credits earned through rewards" + ), + ] + credits: Annotated[ + Decimal, + Field(default=Decimal("0"), description="Credits added through top-ups"), + ] + income_at: Annotated[ + Optional[datetime], + Field(None, description="Timestamp of the last income transaction"), + ] + expense_at: Annotated[ + Optional[datetime], + Field(None, description="Timestamp of the last expense transaction"), + ] + last_event_id: Annotated[ + Optional[str], + Field(None, description="ID of the last event that modified this account"), + ] + # Total statistics fields + total_income: Annotated[ + Decimal, + Field( + default=Decimal("0"), + description="Total income from all credit transactions", + ), + ] + total_free_income: Annotated[ + Decimal, + Field( + default=Decimal("0"), + description="Total income from free credit transactions", + ), + ] + total_reward_income: Annotated[ + Decimal, + Field( + default=Decimal("0"), + description="Total income from reward credit transactions", + ), + ] + total_permanent_income: Annotated[ + Decimal, + Field( + default=Decimal("0"), + description="Total income from permanent credit transactions", + ), + ] + total_expense: Annotated[ + Decimal, + Field( + default=Decimal("0"), + description="Total expense from all credit transactions", + ), + ] + total_free_expense: Annotated[ + Decimal, + Field( + default=Decimal("0"), + description="Total expense from free credit transactions", + ), + ] + total_reward_expense: Annotated[ + Decimal, + Field( + default=Decimal("0"), + description="Total expense from reward credit transactions", + ), + ] + total_permanent_expense: Annotated[ + Decimal, + Field( + default=Decimal("0"), + description="Total expense from permanent credit transactions", + ), + ] + created_at: Annotated[ + datetime, Field(description="Timestamp when this account was created") + ] + updated_at: Annotated[ + datetime, Field(description="Timestamp when this account was last updated") + ] + + @field_validator( + "free_quota", + "refill_amount", + "free_credits", + "reward_credits", + "credits", + "total_income", + "total_free_income", + "total_reward_income", + "total_permanent_income", + "total_expense", + "total_free_expense", + "total_reward_expense", + "total_permanent_expense", + ) + @classmethod + def round_decimal(cls, v: Any) -> Decimal: + """Round decimal values to 4 decimal places.""" + if isinstance(v, Decimal): + return v.quantize(Decimal("0.0001"), rounding=ROUND_HALF_UP) + elif isinstance(v, (int, float)): + return Decimal(str(v)).quantize(Decimal("0.0001"), rounding=ROUND_HALF_UP) + return v + + @property + def balance(self) -> Decimal: + """Return the total balance of the account.""" + return self.free_credits + self.reward_credits + self.credits + + @classmethod + async def get_in_session( + cls, + session: AsyncSession, + owner_type: OwnerType, + owner_id: str, + ) -> "CreditAccount": + """Get a credit account by owner type and ID. + + Args: + session: Async session to use for database queries + owner_type: Type of the owner + owner_id: ID of the owner + + Returns: + CreditAccount if found, None otherwise + """ + stmt = select(CreditAccountTable).where( + CreditAccountTable.owner_type == owner_type, + CreditAccountTable.owner_id == owner_id, + ) + result = await session.scalar(stmt) + if not result: + raise HTTPException(status_code=404, detail="Credit account not found") + return cls.model_validate(result) + + @classmethod + async def get_or_create_in_session( + cls, + session: AsyncSession, + owner_type: OwnerType, + owner_id: str, + for_update: bool = False, + ) -> "CreditAccount": + """Get a credit account by owner type and ID. + + Args: + session: Async session to use for database queries + owner_type: Type of the owner + owner_id: ID of the owner + + Returns: + CreditAccount if found, None otherwise + """ + stmt = select(CreditAccountTable).where( + CreditAccountTable.owner_type == owner_type, + CreditAccountTable.owner_id == owner_id, + ) + if for_update: + stmt = stmt.with_for_update() + result = await session.scalar(stmt) + if not result: + account = await cls.create_in_session(session, owner_type, owner_id) + else: + account = cls.model_validate(result) + + return account + + @classmethod + async def get_or_create( + cls, owner_type: OwnerType, owner_id: str + ) -> "CreditAccount": + """Get a credit account by owner type and ID. + + Args: + owner_type: Type of the owner + owner_id: ID of the owner + + Returns: + CreditAccount if found, None otherwise + """ + async with get_session() as session: + account = await cls.get_or_create_in_session(session, owner_type, owner_id) + await session.commit() + return account + + @classmethod + async def deduction_in_session( + cls, + session: AsyncSession, + owner_type: OwnerType, + owner_id: str, + credit_type: CreditType, + amount: Decimal, + event_id: Optional[str] = None, + ) -> "CreditAccount": + """Deduct credits from an account. Not checking balance""" + # check first, create if not exists + await cls.get_or_create_in_session(session, owner_type, owner_id) + + # Quantize the amount to ensure proper precision + quantized_amount = amount.quantize(FOURPLACES, rounding=ROUND_HALF_UP) + values_dict = { + credit_type.value: getattr(CreditAccountTable, credit_type.value) + - quantized_amount, + "expense_at": datetime.now(timezone.utc), + # Update total expense statistics + "total_expense": CreditAccountTable.total_expense + quantized_amount, + } + if event_id: + values_dict["last_event_id"] = event_id + + # Update corresponding statistics fields based on credit type + if credit_type == CreditType.FREE: + values_dict["total_free_expense"] = ( + CreditAccountTable.total_free_expense + quantized_amount + ) + elif credit_type == CreditType.REWARD: + values_dict["total_reward_expense"] = ( + CreditAccountTable.total_reward_expense + quantized_amount + ) + elif credit_type == CreditType.PERMANENT: + values_dict["total_permanent_expense"] = ( + CreditAccountTable.total_permanent_expense + quantized_amount + ) + + stmt = ( + update(CreditAccountTable) + .where( + CreditAccountTable.owner_type == owner_type, + CreditAccountTable.owner_id == owner_id, + ) + .values(values_dict) + .returning(CreditAccountTable) + ) + res = await session.scalar(stmt) + if not res: + raise HTTPException(status_code=500, detail="Failed to expense credits") + return cls.model_validate(res) + + @classmethod + async def expense_in_session( + cls, + session: AsyncSession, + owner_type: OwnerType, + owner_id: str, + amount: Decimal, + event_id: Optional[str] = None, + ) -> Tuple["CreditAccount", Dict[CreditType, Decimal]]: + """Expense credits and return account and credit type. + We are not checking balance here, since a conversation may have + multiple expenses, we can't interrupt the conversation. + """ + # check first + account = await cls.get_or_create_in_session(session, owner_type, owner_id) + + # expense + details = {} + + amount_left = amount + + if amount_left <= account.free_credits: + details[CreditType.FREE] = amount_left + amount_left = Decimal("0") + else: + if account.free_credits > 0: + details[CreditType.FREE] = account.free_credits + amount_left = (amount_left - account.free_credits).quantize( + FOURPLACES, rounding=ROUND_HALF_UP + ) + if amount_left <= account.reward_credits: + details[CreditType.REWARD] = amount_left + amount_left = Decimal("0") + else: + if account.reward_credits > 0: + details[CreditType.REWARD] = account.reward_credits + amount_left = (amount_left - account.reward_credits).quantize( + FOURPLACES, rounding=ROUND_HALF_UP + ) + details[CreditType.PERMANENT] = amount_left + + # Create values dict based on what's in details, defaulting to 0 for missing keys + values_dict = { + "expense_at": datetime.now(timezone.utc), + } + if event_id: + values_dict["last_event_id"] = event_id + + # Calculate total expense for statistics + total_expense_amount = Decimal("0") + + # Add credit type values only if they exist in details + for credit_type in [CreditType.FREE, CreditType.REWARD, CreditType.PERMANENT]: + if credit_type in details: + # Quantize the amount to ensure proper precision + quantized_amount = details[credit_type].quantize( + FOURPLACES, rounding=ROUND_HALF_UP + ) + values_dict[credit_type.value] = ( + getattr(CreditAccountTable, credit_type.value) - quantized_amount + ) + + # Update corresponding statistics fields + total_expense_amount += quantized_amount + if credit_type == CreditType.FREE: + values_dict["total_free_expense"] = ( + CreditAccountTable.total_free_expense + quantized_amount + ) + elif credit_type == CreditType.REWARD: + values_dict["total_reward_expense"] = ( + CreditAccountTable.total_reward_expense + quantized_amount + ) + elif credit_type == CreditType.PERMANENT: + values_dict["total_permanent_expense"] = ( + CreditAccountTable.total_permanent_expense + quantized_amount + ) + + # Update total expense if there was any expense + if total_expense_amount > 0: + values_dict["total_expense"] = ( + CreditAccountTable.total_expense + total_expense_amount + ) + + stmt = ( + update(CreditAccountTable) + .where( + CreditAccountTable.owner_type == owner_type, + CreditAccountTable.owner_id == owner_id, + ) + .values(values_dict) + .returning(CreditAccountTable) + ) + res = await session.scalar(stmt) + if not res: + raise HTTPException(status_code=500, detail="Failed to expense credits") + return cls.model_validate(res), details + + def has_sufficient_credits(self, amount: Decimal) -> bool: + """Check if the account has enough credits to cover the specified amount. + + Args: + amount: The amount of credits to check against + + Returns: + bool: True if there are enough credits, False otherwise + """ + return amount <= self.free_credits + self.reward_credits + self.credits + + @classmethod + async def income_in_session( + cls, + session: AsyncSession, + owner_type: OwnerType, + owner_id: str, + amount_details: Dict[CreditType, Decimal], + event_id: Optional[str] = None, + ) -> "CreditAccount": + # check first, create if not exists + await cls.get_or_create_in_session(session, owner_type, owner_id) + # income + values_dict = { + "income_at": datetime.now(timezone.utc), + } + if event_id: + values_dict["last_event_id"] = event_id + + # Calculate total income for statistics + total_income_amount = Decimal("0") + + # Add credit type values based on amount_details + for credit_type, amount in amount_details.items(): + if amount > 0: + # Quantize the amount to ensure 4 decimal places precision + quantized_amount = amount.quantize(FOURPLACES, rounding=ROUND_HALF_UP) + values_dict[credit_type.value] = ( + getattr(CreditAccountTable, credit_type.value) + quantized_amount + ) + + # Update corresponding statistics fields + total_income_amount += quantized_amount + if credit_type == CreditType.FREE: + values_dict["total_free_income"] = ( + CreditAccountTable.total_free_income + quantized_amount + ) + elif credit_type == CreditType.REWARD: + values_dict["total_reward_income"] = ( + CreditAccountTable.total_reward_income + quantized_amount + ) + elif credit_type == CreditType.PERMANENT: + values_dict["total_permanent_income"] = ( + CreditAccountTable.total_permanent_income + quantized_amount + ) + + # Update total income if there was any income + if total_income_amount > 0: + values_dict["total_income"] = ( + CreditAccountTable.total_income + total_income_amount + ) + + stmt = ( + update(CreditAccountTable) + .where( + CreditAccountTable.owner_type == owner_type, + CreditAccountTable.owner_id == owner_id, + ) + .values(values_dict) + .returning(CreditAccountTable) + ) + res = await session.scalar(stmt) + if not res: + raise HTTPException(status_code=500, detail="Failed to income credits") + return cls.model_validate(res) + + @classmethod + async def create_in_session( + cls, + session: AsyncSession, + owner_type: OwnerType, + owner_id: str, + free_quota: Optional[Decimal] = None, + refill_amount: Optional[Decimal] = None, + ) -> "CreditAccount": + """Get an existing credit account or create a new one if it doesn't exist. + + This is useful for silent creation of accounts when they're first accessed. + + Args: + session: Async session to use for database queries + owner_type: Type of the owner + owner_id: ID of the owner + free_quota: Daily quota for a new account if created (if None, reads from payment settings) + refill_amount: Hourly refill amount (if None, reads from payment settings) + + Returns: + CreditAccount: The existing or newly created credit account + """ + # Get payment settings if values not provided + if free_quota is None or refill_amount is None: + payment_settings = await AppSetting.payment() + if free_quota is None: + free_quota = payment_settings.free_quota + if refill_amount is None: + refill_amount = payment_settings.refill_amount + + if owner_type != OwnerType.USER: + # only users have daily quota + free_quota = Decimal("0.0") + refill_amount = Decimal("0.0") + # Create event_id at the beginning for consistency + event_id = str(XID()) + + account = CreditAccountTable( + id=str(XID()), + owner_type=owner_type, + owner_id=owner_id, + free_quota=free_quota, + refill_amount=refill_amount, + free_credits=free_quota, + reward_credits=0.0, + credits=0.0, + income_at=datetime.now(timezone.utc), + expense_at=None, + last_event_id=event_id if owner_type == OwnerType.USER else None, + # Initialize new statistics fields + # For USER accounts, initial free_quota counts as income + total_income=free_quota, + total_free_income=free_quota, + total_reward_income=0.0, + total_permanent_income=0.0, + total_expense=0.0, + total_free_expense=0.0, + total_reward_expense=0.0, + total_permanent_expense=0.0, + ) + # Platform virtual accounts have fixed IDs, same as owner_id + if owner_type == OwnerType.PLATFORM: + account.id = owner_id + session.add(account) + await session.flush() + await session.refresh(account) + # Only user accounts have first refill + if owner_type == OwnerType.USER: + # First refill account + await cls.deduction_in_session( + session, + OwnerType.PLATFORM, + DEFAULT_PLATFORM_ACCOUNT_REFILL, + CreditType.FREE, + free_quota, + event_id, + ) + # Create refill event record + event = CreditEventTable( + id=event_id, + event_type=EventType.REFILL, + user_id=owner_id, + upstream_type=UpstreamType.INITIALIZER, + upstream_tx_id=account.id, + direction=Direction.INCOME, + account_id=account.id, + credit_type=CreditType.FREE, + credit_types=[CreditType.FREE], + total_amount=free_quota, + balance_after=free_quota, + base_amount=free_quota, + base_original_amount=free_quota, + base_free_amount=free_quota, + free_amount=free_quota, # Set free_amount since this is a free credit refill + reward_amount=Decimal("0"), # No reward credits involved + permanent_amount=Decimal("0"), # No permanent credits involved + agent_wallet_address=None, # No agent involved in initial refill + note="Initial refill", + ) + session.add(event) + await session.flush() + + # Create credit transaction records + # 1. User account transaction (credit) + user_tx = CreditTransactionTable( + id=str(XID()), + account_id=account.id, + event_id=event_id, + tx_type=TransactionType.RECHARGE, + credit_debit=CreditDebit.CREDIT, + change_amount=free_quota, + credit_type=CreditType.FREE, + free_amount=free_quota, + reward_amount=Decimal("0"), + permanent_amount=Decimal("0"), + ) + session.add(user_tx) + + # 2. Platform recharge account transaction (debit) + platform_tx = CreditTransactionTable( + id=str(XID()), + account_id=DEFAULT_PLATFORM_ACCOUNT_REFILL, + event_id=event_id, + tx_type=TransactionType.REFILL, + credit_debit=CreditDebit.DEBIT, + change_amount=free_quota, + credit_type=CreditType.FREE, + free_amount=free_quota, + reward_amount=Decimal("0"), + permanent_amount=Decimal("0"), + ) + session.add(platform_tx) + + return cls.model_validate(account) + + @classmethod + async def update_daily_quota( + cls, + session: AsyncSession, + user_id: str, + free_quota: Optional[Decimal] = None, + refill_amount: Optional[Decimal] = None, + upstream_tx_id: str = "", + note: str = "", + ) -> "CreditAccount": + """ + Update the daily quota and refill amount of a user's credit account. + + Args: + session: Async session to use for database operations + user_id: ID of the user to update + free_quota: Optional new daily quota value + refill_amount: Optional amount to refill hourly, not exceeding free_quota + upstream_tx_id: ID of the upstream transaction (for logging purposes) + note: Explanation for changing the daily quota + + Returns: + Updated user credit account + """ + # Log the upstream_tx_id for record keeping + logger.info( + f"Updating quota settings for user {user_id} with upstream_tx_id: {upstream_tx_id}" + ) + + # Check that at least one parameter is provided + if free_quota is None and refill_amount is None: + raise ValueError( + "At least one of free_quota or refill_amount must be provided" + ) + + # Get current account to check existing values and validate + user_account = await cls.get_or_create_in_session( + session, OwnerType.USER, user_id, for_update=True + ) + + # Use existing values if not provided + if free_quota is None: + free_quota = user_account.free_quota + elif free_quota <= Decimal("0"): + raise ValueError("Daily quota must be positive") + + if refill_amount is None: + refill_amount = user_account.refill_amount + elif refill_amount < Decimal("0"): + raise ValueError("Refill amount cannot be negative") + + # Ensure refill_amount doesn't exceed free_quota + if refill_amount > free_quota: + raise ValueError("Refill amount cannot exceed daily quota") + + if not note: + raise ValueError("Quota update requires a note explaining the reason") + + # Quantize values to ensure proper precision (4 decimal places) + free_quota = free_quota.quantize(FOURPLACES, rounding=ROUND_HALF_UP) + refill_amount = refill_amount.quantize(FOURPLACES, rounding=ROUND_HALF_UP) + + # Update the free_quota field + stmt = ( + update(CreditAccountTable) + .where( + CreditAccountTable.owner_type == OwnerType.USER, + CreditAccountTable.owner_id == user_id, + ) + .values(free_quota=free_quota, refill_amount=refill_amount) + .returning(CreditAccountTable) + ) + result = await session.scalar(stmt) + if not result: + raise ValueError("Failed to update user account") + + user_account = cls.model_validate(result) + + # No credit event needed for updating account settings + + return user_account + + +class RewardType(str, Enum): + """Reward type enumeration for reward-specific events.""" + + REWARD = "reward" + EVENT_REWARD = "event_reward" + RECHARGE_BONUS = "recharge_bonus" + + +class EventType(str, Enum): + """Type of credit event.""" + + MEMORY = "memory" + MESSAGE = "message" + SKILL_CALL = "skill_call" + VOICE = "voice" + KNOWLEDGE_BASE = "knowledge_base" + RECHARGE = "recharge" + REFUND = "refund" + ADJUSTMENT = "adjustment" + REFILL = "refill" + # Sync with RewardType values + REWARD = "reward" + EVENT_REWARD = "event_reward" + RECHARGE_BONUS = "recharge_bonus" + + @classmethod + def get_reward_types(cls): + """Get all reward-related event types""" + return [cls.REWARD, cls.EVENT_REWARD, cls.RECHARGE_BONUS] + + +class UpstreamType(str, Enum): + """Type of upstream transaction.""" + + API = "api" + SCHEDULER = "scheduler" + EXECUTOR = "executor" + INITIALIZER = "initializer" + + +class Direction(str, Enum): + """Direction of credit flow.""" + + INCOME = "income" + EXPENSE = "expense" + + +class CreditEventTable(Base): + """Credit events database table model. + + Records business events for user, like message processing, skill calls, etc. + """ + + __tablename__ = "credit_events" + __table_args__ = ( + Index( + "ix_credit_events_upstream", "upstream_type", "upstream_tx_id", unique=True + ), + Index("ix_credit_events_account_id", "account_id"), + Index("ix_credit_events_user_id", "user_id"), + Index("ix_credit_events_agent_id", "agent_id"), + Index("ix_credit_events_fee_dev", "fee_dev_account"), + Index("ix_credit_events_created_at", "created_at"), + ) + + id = Column( + String, + primary_key=True, + ) + account_id = Column( + String, + nullable=False, + ) + event_type = Column( + String, + nullable=False, + ) + user_id = Column( + String, + nullable=True, + ) + upstream_type = Column( + String, + nullable=False, + ) + upstream_tx_id = Column( + String, + nullable=False, + ) + agent_id = Column( + String, + nullable=True, + ) + agent_wallet_address = Column( + String, + nullable=True, + ) + start_message_id = Column( + String, + nullable=True, + ) + message_id = Column( + String, + nullable=True, + ) + model = Column( + String, + nullable=True, + ) + skill_call_id = Column( + String, + nullable=True, + ) + skill_name = Column( + String, + nullable=True, + ) + direction = Column( + String, + nullable=False, + ) + total_amount = Column( + Numeric(22, 4), + default=0, + nullable=False, + ) + credit_type = Column( + String, + nullable=False, + ) + credit_types = Column( + JSON().with_variant(ARRAY(String), "postgresql"), + nullable=True, + ) + balance_after = Column( + Numeric(22, 4), + nullable=True, + default=None, + ) + base_amount = Column( + Numeric(22, 4), + default=0, + nullable=False, + ) + base_discount_amount = Column( + Numeric(22, 4), + default=0, + nullable=True, + ) + base_original_amount = Column( + Numeric(22, 4), + default=0, + nullable=True, + ) + base_llm_amount = Column( + Numeric(22, 4), + default=0, + nullable=True, + ) + base_skill_amount = Column( + Numeric(22, 4), + default=0, + nullable=True, + ) + base_free_amount = Column( + Numeric(22, 4), + default=0, + nullable=True, + ) + base_reward_amount = Column( + Numeric(22, 4), + default=0, + nullable=True, + ) + base_permanent_amount = Column( + Numeric(22, 4), + default=0, + nullable=True, + ) + fee_platform_amount = Column( + Numeric(22, 4), + default=0, + nullable=True, + ) + fee_platform_free_amount = Column( + Numeric(22, 4), + nullable=True, + ) + fee_platform_reward_amount = Column( + Numeric(22, 4), + nullable=True, + ) + fee_platform_permanent_amount = Column( + Numeric(22, 4), + nullable=True, + ) + fee_dev_account = Column( + String, + nullable=True, + ) + fee_dev_amount = Column( + Numeric(22, 4), + default=0, + nullable=True, + ) + fee_dev_free_amount = Column( + Numeric(22, 4), + nullable=True, + ) + fee_dev_reward_amount = Column( + Numeric(22, 4), + nullable=True, + ) + fee_dev_permanent_amount = Column( + Numeric(22, 4), + nullable=True, + ) + fee_agent_account = Column( + String, + nullable=True, + ) + fee_agent_amount = Column( + Numeric(22, 4), + default=0, + nullable=True, + ) + fee_agent_free_amount = Column( + Numeric(22, 4), + nullable=True, + ) + fee_agent_reward_amount = Column( + Numeric(22, 4), + nullable=True, + ) + fee_agent_permanent_amount = Column( + Numeric(22, 4), + nullable=True, + ) + free_amount = Column( + Numeric(22, 4), + default=0, + nullable=True, + ) + reward_amount = Column( + Numeric(22, 4), + default=0, + nullable=True, + ) + permanent_amount = Column( + Numeric(22, 4), + default=0, + nullable=True, + ) + note = Column( + String, + nullable=True, + ) + created_at = Column( + DateTime(timezone=True), + nullable=False, + server_default=func.now(), + ) + + +class CreditEvent(BaseModel): + """Credit event model with all fields.""" + + model_config = ConfigDict( + use_enum_values=True, + from_attributes=True, + json_encoders={ + datetime: lambda v: v.isoformat(timespec="milliseconds"), + }, + ) + + id: Annotated[ + str, + Field( + default_factory=lambda: str(XID()), + description="Unique identifier for the credit event", + ), + ] + account_id: Annotated[ + str, Field(None, description="Account ID from which credits flow") + ] + event_type: Annotated[EventType, Field(description="Type of the event")] + user_id: Annotated[ + Optional[str], Field(None, description="ID of the user if applicable") + ] + upstream_type: Annotated[ + UpstreamType, Field(description="Type of upstream transaction") + ] + upstream_tx_id: Annotated[str, Field(description="Upstream transaction ID if any")] + agent_id: Annotated[ + Optional[str], Field(None, description="ID of the agent if applicable") + ] + agent_wallet_address: Annotated[ + Optional[str], + Field(None, description="Wallet address of the agent if applicable"), + ] + start_message_id: Annotated[ + Optional[str], + Field(None, description="ID of the starting message if applicable"), + ] + message_id: Annotated[ + Optional[str], Field(None, description="ID of the message if applicable") + ] + model: Annotated[ + Optional[str], Field(None, description="LLM model used if applicable") + ] + skill_call_id: Annotated[ + Optional[str], Field(None, description="ID of the skill call if applicable") + ] + skill_name: Annotated[ + Optional[str], Field(None, description="Name of the skill if applicable") + ] + direction: Annotated[Direction, Field(description="Direction of the credit flow")] + total_amount: Annotated[ + Decimal, + Field( + default=Decimal("0"), + description="Total amount (after discount) of credits involved", + ), + ] + credit_type: Annotated[CreditType, Field(description="Type of credits involved")] + credit_types: Annotated[ + Optional[List[CreditType]], + Field(default=None, description="Array of credit types involved"), + ] + balance_after: Annotated[ + Optional[Decimal], + Field(None, description="Account total balance after the transaction"), + ] + base_amount: Annotated[ + Decimal, + Field(default=Decimal("0"), description="Base amount of credits involved"), + ] + base_discount_amount: Annotated[ + Optional[Decimal], + Field(default=Decimal("0"), description="Base discount amount"), + ] + base_original_amount: Annotated[ + Optional[Decimal], + Field(default=Decimal("0"), description="Base original amount"), + ] + base_llm_amount: Annotated[ + Optional[Decimal], + Field(default=Decimal("0"), description="Base LLM cost amount"), + ] + base_skill_amount: Annotated[ + Optional[Decimal], + Field(default=Decimal("0"), description="Base skill cost amount"), + ] + base_free_amount: Annotated[ + Optional[Decimal], + Field(default=Decimal("0"), description="Base free credit amount"), + ] + base_reward_amount: Annotated[ + Optional[Decimal], + Field(default=Decimal("0"), description="Base reward credit amount"), + ] + base_permanent_amount: Annotated[ + Optional[Decimal], + Field(default=Decimal("0"), description="Base permanent credit amount"), + ] + fee_platform_amount: Annotated[ + Optional[Decimal], + Field(default=Decimal("0"), description="Platform fee amount"), + ] + fee_platform_free_amount: Annotated[ + Optional[Decimal], + Field( + default=Decimal("0"), description="Platform fee amount from free credits" + ), + ] + fee_platform_reward_amount: Annotated[ + Optional[Decimal], + Field( + default=Decimal("0"), description="Platform fee amount from reward credits" + ), + ] + fee_platform_permanent_amount: Annotated[ + Optional[Decimal], + Field( + default=Decimal("0"), + description="Platform fee amount from permanent credits", + ), + ] + fee_dev_account: Annotated[ + Optional[str], Field(None, description="Developer account ID receiving fee") + ] + fee_dev_amount: Annotated[ + Optional[Decimal], + Field(default=Decimal("0"), description="Developer fee amount"), + ] + fee_dev_free_amount: Annotated[ + Optional[Decimal], + Field( + default=Decimal("0"), description="Developer fee amount from free credits" + ), + ] + fee_dev_reward_amount: Annotated[ + Optional[Decimal], + Field( + default=Decimal("0"), description="Developer fee amount from reward credits" + ), + ] + fee_dev_permanent_amount: Annotated[ + Optional[Decimal], + Field( + default=Decimal("0"), + description="Developer fee amount from permanent credits", + ), + ] + fee_agent_account: Annotated[ + Optional[str], Field(None, description="Agent account ID receiving fee") + ] + fee_agent_amount: Annotated[ + Optional[Decimal], Field(default=Decimal("0"), description="Agent fee amount") + ] + fee_agent_free_amount: Annotated[ + Optional[Decimal], + Field(default=Decimal("0"), description="Agent fee amount from free credits"), + ] + fee_agent_reward_amount: Annotated[ + Optional[Decimal], + Field(default=Decimal("0"), description="Agent fee amount from reward credits"), + ] + fee_agent_permanent_amount: Annotated[ + Optional[Decimal], + Field( + default=Decimal("0"), description="Agent fee amount from permanent credits" + ), + ] + free_amount: Annotated[ + Optional[Decimal], + Field(default=Decimal("0"), description="Free credit amount involved"), + ] + reward_amount: Annotated[ + Optional[Decimal], + Field(default=Decimal("0"), description="Reward credit amount involved"), + ] + permanent_amount: Annotated[ + Optional[Decimal], + Field(default=Decimal("0"), description="Permanent credit amount involved"), + ] + note: Annotated[Optional[str], Field(None, description="Additional notes")] + created_at: Annotated[ + datetime, Field(description="Timestamp when this event was created") + ] + + @field_validator( + "total_amount", + "balance_after", + "base_amount", + "base_discount_amount", + "base_original_amount", + "base_llm_amount", + "base_skill_amount", + "base_free_amount", + "base_reward_amount", + "base_permanent_amount", + "fee_platform_amount", + "fee_platform_free_amount", + "fee_platform_reward_amount", + "fee_platform_permanent_amount", + "fee_dev_amount", + "fee_dev_free_amount", + "fee_dev_reward_amount", + "fee_dev_permanent_amount", + "fee_agent_amount", + "fee_agent_free_amount", + "fee_agent_reward_amount", + "fee_agent_permanent_amount", + "free_amount", + "reward_amount", + "permanent_amount", + ) + @classmethod + def round_decimal(cls, v: Any) -> Optional[Decimal]: + """Round decimal values to 4 decimal places.""" + if v is None: + return None + if isinstance(v, Decimal): + return v.quantize(Decimal("0.0001"), rounding=ROUND_HALF_UP) + elif isinstance(v, (int, float)): + return Decimal(str(v)).quantize(Decimal("0.0001"), rounding=ROUND_HALF_UP) + return v + + @classmethod + async def check_upstream_tx_id_exists( + cls, session: AsyncSession, upstream_type: UpstreamType, upstream_tx_id: str + ) -> None: + """ + Check if an event with the given upstream_type and upstream_tx_id already exists. + Raises HTTP 400 error if it exists to prevent duplicate transactions. + + Args: + session: Database session + upstream_type: Type of the upstream transaction + upstream_tx_id: ID of the upstream transaction + + Raises: + HTTPException: If a transaction with the same upstream_tx_id already exists + """ + stmt = select(CreditEventTable).where( + CreditEventTable.upstream_type == upstream_type, + CreditEventTable.upstream_tx_id == upstream_tx_id, + ) + result = await session.scalar(stmt) + if result: + raise HTTPException( + status_code=400, + detail=f"Transaction with upstream_tx_id '{upstream_tx_id}' already exists. Do not resubmit.", + ) + + +class TransactionType(str, Enum): + """Type of credit transaction.""" + + PAY = "pay" + RECEIVE_BASE_LLM = "receive_base_llm" + RECEIVE_BASE_SKILL = "receive_base_skill" + RECEIVE_BASE_MEMORY = "receive_base_memory" + RECEIVE_BASE_VOICE = "receive_base_voice" + RECEIVE_BASE_KNOWLEDGE = "receive_base_knowledge" + RECEIVE_FEE_DEV = "receive_fee_dev" + RECEIVE_FEE_AGENT = "receive_fee_agent" + RECEIVE_FEE_PLATFORM = "receive_fee_platform" + RECHARGE = "recharge" + REFUND = "refund" + ADJUSTMENT = "adjustment" + REFILL = "refill" + # Sync with RewardType values + REWARD = "reward" + EVENT_REWARD = "event_reward" + RECHARGE_BONUS = "recharge_bonus" + + +class CreditDebit(str, Enum): + """Credit or debit transaction.""" + + CREDIT = "credit" + DEBIT = "debit" + + +class CreditTransactionTable(Base): + """Credit transactions database table model. + + Records the flow of credits in and out of accounts. + """ + + __tablename__ = "credit_transactions" + __table_args__ = ( + Index("ix_credit_transactions_account", "account_id"), + Index("ix_credit_transactions_event_id", "event_id"), + ) + + id = Column( + String, + primary_key=True, + ) + account_id = Column( + String, + nullable=False, + ) + event_id = Column( + String, + nullable=False, + ) + tx_type = Column( + String, + nullable=False, + ) + credit_debit = Column( + String, + nullable=False, + ) + change_amount = Column( + Numeric(22, 4), + default=0, + nullable=False, + ) + free_amount = Column( + Numeric(22, 4), + default=0, + nullable=False, + ) + reward_amount = Column( + Numeric(22, 4), + default=0, + nullable=False, + ) + permanent_amount = Column( + Numeric(22, 4), + default=0, + nullable=False, + ) + credit_type = Column( + String, + nullable=False, + ) + created_at = Column( + DateTime(timezone=True), + nullable=False, + server_default=func.now(), + ) + + +class CreditTransaction(BaseModel): + """Credit transaction model with all fields.""" + + model_config = ConfigDict( + use_enum_values=True, + from_attributes=True, + json_encoders={datetime: lambda v: v.isoformat(timespec="milliseconds")}, + ) + + id: Annotated[ + str, + Field( + default_factory=lambda: str(XID()), + description="Unique identifier for the credit transaction", + ), + ] + account_id: Annotated[ + str, Field(description="ID of the account this transaction belongs to") + ] + event_id: Annotated[ + str, Field(description="ID of the event that triggered this transaction") + ] + tx_type: Annotated[TransactionType, Field(description="Type of the transaction")] + credit_debit: Annotated[ + CreditDebit, Field(description="Whether this is a credit or debit transaction") + ] + change_amount: Annotated[ + Decimal, Field(default=Decimal("0"), description="Amount of credits changed") + ] + free_amount: Annotated[ + Decimal, + Field(default=Decimal("0"), description="Amount of free credits changed"), + ] + reward_amount: Annotated[ + Decimal, + Field(default=Decimal("0"), description="Amount of reward credits changed"), + ] + permanent_amount: Annotated[ + Decimal, + Field(default=Decimal("0"), description="Amount of permanent credits changed"), + ] + + @field_validator( + "change_amount", "free_amount", "reward_amount", "permanent_amount" + ) + @classmethod + def round_decimal(cls, v: Any) -> Decimal: + """Round decimal values to 4 decimal places.""" + if isinstance(v, Decimal): + return v.quantize(Decimal("0.0001"), rounding=ROUND_HALF_UP) + elif isinstance(v, (int, float)): + return Decimal(str(v)).quantize(Decimal("0.0001"), rounding=ROUND_HALF_UP) + return v + + credit_type: Annotated[CreditType, Field(description="Type of credits involved")] + created_at: Annotated[ + datetime, Field(description="Timestamp when this transaction was created") + ] + + +class PriceEntity(str, Enum): + """Type of credit price.""" + + SKILL_CALL = "skill_call" + + +class DiscountType(str, Enum): + """Type of discount.""" + + STANDARD = "standard" + SELF_KEY = "self_key" + + +DEFAULT_SKILL_CALL_PRICE = Decimal("10.0000") +DEFAULT_SKILL_CALL_SELF_KEY_PRICE = Decimal("5.0000") + + +class CreditPriceTable(Base): + """Credit price database table model. + + Stores price information for different types of services. + """ + + __tablename__ = "credit_prices" + + id = Column( + String, + primary_key=True, + ) + price_entity = Column( + String, + nullable=False, + ) + price_entity_id = Column( + String, + nullable=False, + ) + discount_type = Column( + String, + nullable=False, + ) + price = Column( + Numeric(22, 4), + default=0, + nullable=False, + ) + created_at = Column( + DateTime(timezone=True), + nullable=False, + server_default=func.now(), + ) + updated_at = Column( + DateTime(timezone=True), + nullable=False, + server_default=func.now(), + onupdate=lambda: datetime.now(timezone.utc), + ) + + +class CreditPrice(BaseModel): + """Credit price model with all fields.""" + + model_config = ConfigDict( + use_enum_values=True, + from_attributes=True, + json_encoders={datetime: lambda v: v.isoformat(timespec="milliseconds")}, + ) + + id: Annotated[ + str, + Field( + default_factory=lambda: str(XID()), + description="Unique identifier for the credit price", + ), + ] + price_entity: Annotated[ + PriceEntity, Field(description="Type of the price (agent or skill_call)") + ] + price_entity_id: Annotated[ + str, Field(description="ID of the price entity, the skill is the name") + ] + discount_type: Annotated[ + DiscountType, + Field(default=DiscountType.STANDARD, description="Type of discount"), + ] + price: Annotated[Decimal, Field(default=Decimal("0"), description="Standard price")] + + @field_validator("price") + @classmethod + def round_decimal(cls, v: Any) -> Decimal: + """Round decimal values to 4 decimal places.""" + if isinstance(v, Decimal): + return v.quantize(Decimal("0.0001"), rounding=ROUND_HALF_UP) + elif isinstance(v, (int, float)): + return Decimal(str(v)).quantize(Decimal("0.0001"), rounding=ROUND_HALF_UP) + return v + + created_at: Annotated[ + datetime, Field(description="Timestamp when this price was created") + ] + updated_at: Annotated[ + datetime, Field(description="Timestamp when this price was last updated") + ] + + +class CreditPriceLogTable(Base): + """Credit price log database table model. + + Records history of price changes. + """ + + __tablename__ = "credit_price_logs" + + id = Column( + String, + primary_key=True, + ) + price_id = Column( + String, + nullable=False, + ) + old_price = Column( + Numeric(22, 4), + nullable=False, + ) + new_price = Column( + Numeric(22, 4), + nullable=False, + ) + note = Column( + String, + nullable=True, + ) + modified_by = Column( + String, + nullable=False, + ) + modified_at = Column( + DateTime(timezone=True), + nullable=False, + server_default=func.now(), + ) + + +class CreditPriceLog(BaseModel): + """Credit price log model with all fields.""" + + model_config = ConfigDict( + use_enum_values=True, + from_attributes=True, + json_encoders={datetime: lambda v: v.isoformat(timespec="milliseconds")}, + ) + + id: Annotated[ + str, + Field( + default_factory=lambda: str(XID()), + description="Unique identifier for the log entry", + ), + ] + price_id: Annotated[str, Field(description="ID of the price that was modified")] + old_price: Annotated[Decimal, Field(description="Previous standard price")] + new_price: Annotated[Decimal, Field(description="New standard price")] + + @field_validator("old_price", "new_price") + @classmethod + def round_decimal(cls, v: Any) -> Decimal: + """Round decimal values to 4 decimal places.""" + if isinstance(v, Decimal): + return v.quantize(Decimal("0.0001"), rounding=ROUND_HALF_UP) + elif isinstance(v, (int, float)): + return Decimal(str(v)).quantize(Decimal("0.0001"), rounding=ROUND_HALF_UP) + return v + + note: Annotated[ + Optional[str], Field(None, description="Note about the modification") + ] + modified_by: Annotated[ + str, Field(description="ID of the user who made the modification") + ] + modified_at: Annotated[ + datetime, Field(description="Timestamp when the modification was made") + ] diff --git a/intentkit/models/db.py b/intentkit/models/db.py new file mode 100644 index 00000000..94bb6f39 --- /dev/null +++ b/intentkit/models/db.py @@ -0,0 +1,146 @@ +from contextlib import asynccontextmanager +from typing import Annotated, AsyncGenerator, Optional +from urllib.parse import quote_plus + +from intentkit.models.db_mig import safe_migrate +from langgraph.checkpoint.memory import InMemorySaver +from langgraph.checkpoint.postgres.aio import AsyncPostgresSaver +from langgraph.types import Checkpointer +from psycopg import OperationalError +from psycopg_pool import AsyncConnectionPool +from pydantic import Field +from sqlalchemy.ext.asyncio import AsyncEngine, AsyncSession, create_async_engine + +engine = None +_langgraph_checkpointer: Optional[Checkpointer] = None + + +async def check_connection(conn): + """ + Pre-ping function to validate connection health before returning to application. + This helps handle database restarts and failovers gracefully. + """ + try: + await conn.execute("SELECT 1") + except OperationalError: + # Re-raise the exception to let the connection pool know this connection is broken + raise + + +async def init_db( + host: Optional[str], + username: Optional[str], + password: Optional[str], + dbname: Optional[str], + port: Annotated[Optional[str], Field(default="5432", description="Database port")], + auto_migrate: Annotated[ + bool, Field(default=True, description="Whether to run migrations automatically") + ], + pool_size: Annotated[ + int, Field(default=3, description="Database connection pool size") + ] = 3, +) -> None: + """Initialize the database and handle schema updates. + + Args: + host: Database host + username: Database username + password: Database password + dbname: Database name + port: Database port (default: 5432) + auto_migrate: Whether to run migrations automatically (default: True) + pool_size: Database connection pool size (default: 3) + """ + global engine, _langgraph_checkpointer + # Initialize psycopg pool and AsyncPostgresSaver if not already initialized + if _langgraph_checkpointer is None: + if host: + conn_string = ( + f"postgresql://{username}:{quote_plus(password)}@{host}:{port}/{dbname}" + ) + pool = AsyncConnectionPool( + conninfo=conn_string, + min_size=pool_size, + max_size=pool_size * 2, + timeout=60, + max_idle=30 * 60, + # Add health check function to handle database restarts + check=check_connection, + # Set connection max lifetime to prevent stale connections + max_lifetime=3600, # 1 hour + ) + _langgraph_checkpointer = AsyncPostgresSaver(pool) + if auto_migrate: + # Migrate can not use pool, so we start from scratch + async with AsyncPostgresSaver.from_conn_string(conn_string) as saver: + await saver.setup() + else: + _langgraph_checkpointer = InMemorySaver() + # Initialize SQLAlchemy engine with pool settings + if engine is None: + if host: + engine = create_async_engine( + f"postgresql+asyncpg://{username}:{quote_plus(password)}@{host}:{port}/{dbname}", + pool_size=pool_size, + max_overflow=pool_size * 2, # Set overflow to 2x pool size + pool_timeout=60, # Increase timeout + pool_pre_ping=True, # Enable connection health checks + pool_recycle=3600, # Recycle connections after 1 hour + ) + else: + engine = create_async_engine( + "sqlite+aiosqlite:///:memory:", + connect_args={"check_same_thread": False}, + ) + if auto_migrate: + await safe_migrate(engine) + + +async def get_db() -> AsyncGenerator[AsyncSession, None]: + async with AsyncSession(engine) as session: + yield session + + +@asynccontextmanager +async def get_session() -> AsyncSession: + """Get a database session using an async context manager. + + This function is designed to be used with the 'async with' statement, + ensuring proper session cleanup. + + Returns: + AsyncSession: A SQLAlchemy async session that will be automatically closed + + Example: + ```python + async with get_session() as session: + # use session here + session.query(...) + # session is automatically closed + ``` + """ + session = AsyncSession(engine) + try: + yield session + finally: + await session.close() + + +def get_engine() -> AsyncEngine: + """Get the SQLAlchemy async engine. + + Returns: + AsyncEngine: The SQLAlchemy async engine + """ + return engine + + +def get_langgraph_checkpointer() -> Checkpointer: + """Get the AsyncPostgresSaver instance for langgraph. + + Returns: + AsyncPostgresSaver: The AsyncPostgresSaver instance + """ + if _langgraph_checkpointer is None: + raise RuntimeError("Database pool not initialized. Call init_db first.") + return _langgraph_checkpointer diff --git a/intentkit/models/db_mig.py b/intentkit/models/db_mig.py new file mode 100644 index 00000000..690fee9a --- /dev/null +++ b/intentkit/models/db_mig.py @@ -0,0 +1,102 @@ +"""Database migration utilities.""" + +import logging +from typing import Callable + +from intentkit.models.base import Base +from sqlalchemy import Column, MetaData, inspect, text + +logger = logging.getLogger(__name__) + + +async def add_column_if_not_exists( + conn, dialect, table_name: str, column: Column +) -> None: + """Add a column to a table if it doesn't exist. + + Args: + conn: SQLAlchemy conn + table_name: Name of the table + column: Column to add + """ + + # Use run_sync to perform inspection on the connection + def _get_columns(connection): + inspector = inspect(connection) + return [c["name"] for c in inspector.get_columns(table_name)] + + columns = await conn.run_sync(_get_columns) + + if column.name not in columns: + # Build column definition + column_def = f"{column.name} {column.type.compile(dialect)}" + + # Add DEFAULT if specified + if column.default is not None: + if hasattr(column.default, "arg"): + default_value = column.default.arg + if not isinstance(default_value, Callable): + if isinstance(default_value, bool): + default_value = str(default_value).lower() + elif isinstance(default_value, str): + default_value = f"'{default_value}'" + elif isinstance(default_value, (list, dict)): + default_value = "'{}'" + column_def += f" DEFAULT {default_value}" + + # Execute ALTER TABLE + await conn.execute(text(f"ALTER TABLE {table_name} ADD COLUMN {column_def}")) + logger.info(f"Added column {column.name} to table {table_name}") + + +async def update_table_schema(conn, dialect, model_cls) -> None: + """Update table schema by adding missing columns from the model. + + Args: + conn: SQLAlchemy conn + dialect: SQLAlchemy dialect + model_cls: SQLAlchemy model class to check for new columns + """ + if not hasattr(model_cls, "__table__"): + return + + table_name = model_cls.__tablename__ + for name, column in model_cls.__table__.columns.items(): + if name != "id": # Skip primary key + await add_column_if_not_exists(conn, dialect, table_name, column) + + +async def safe_migrate(engine) -> None: + """Safely migrate all SQLAlchemy models by adding new columns. + + Args: + engine: SQLAlchemy engine + """ + logger.info("Starting database schema migration") + dialect = engine.dialect + + async with engine.begin() as conn: + try: + # Create tables if they don't exist + await conn.run_sync(Base.metadata.create_all) + + # Get existing table metadata + metadata = MetaData() + await conn.run_sync(metadata.reflect) + + # Update schema for all model classes + for mapper in Base.registry.mappers: + model_cls = mapper.class_ + if hasattr(model_cls, "__tablename__"): + table_name = model_cls.__tablename__ + if table_name in metadata.tables: + # We need a sync wrapper for the async update_table_schema + async def update_table_wrapper(): + await update_table_schema(conn, dialect, model_cls) + + await update_table_wrapper() + except Exception as e: + logger.error(f"Error updating database schema: {str(e)}") + raise + + logger.info("Database schema updated successfully") diff --git a/intentkit/models/generator.py b/intentkit/models/generator.py new file mode 100644 index 00000000..342ef8a3 --- /dev/null +++ b/intentkit/models/generator.py @@ -0,0 +1,347 @@ +"""Agent Generation Log Model. + +This module defines the database models for logging agent generation operations, +including token usage, prompts, AI responses, and generation metadata. +""" + +from datetime import datetime, timezone +from typing import Annotated, Optional + +from epyxid import XID +from intentkit.models.base import Base +from pydantic import BaseModel, ConfigDict, Field +from sqlalchemy import ( + Boolean, + Column, + DateTime, + Integer, + String, + Text, + func, + select, +) +from sqlalchemy.dialects.postgresql import JSON, JSONB +from sqlalchemy.ext.asyncio import AsyncSession + + +class AgentGenerationLogTable(Base): + """Agent generation log database table model.""" + + __tablename__ = "agent_generation_logs" + + id = Column( + String, + primary_key=True, + ) + user_id = Column( + String, + nullable=True, + ) + prompt = Column( + Text, + nullable=False, + ) + existing_agent_id = Column( + String, + nullable=True, + ) + is_update = Column( + Boolean, + default=False, + nullable=False, + ) + generated_agent_schema = Column( + JSON().with_variant(JSONB(), "postgresql"), + nullable=True, + ) + identified_skills = Column( + JSON().with_variant(JSONB(), "postgresql"), + nullable=True, + ) + # LLM API response data + llm_model = Column( + String, + nullable=True, + ) + total_tokens = Column( + Integer, + default=0, + ) + input_tokens = Column( + Integer, + default=0, + ) + cached_input_tokens = Column( + Integer, + default=0, + ) + output_tokens = Column( + Integer, + default=0, + ) + input_tokens_details = Column( + JSON().with_variant(JSONB(), "postgresql"), + nullable=True, + ) + completion_tokens_details = Column( + JSON().with_variant(JSONB(), "postgresql"), + nullable=True, + ) + # Performance metrics + generation_time_ms = Column( + Integer, + nullable=True, + ) + retry_count = Column( + Integer, + default=0, + ) + validation_errors = Column( + JSON().with_variant(JSONB(), "postgresql"), + nullable=True, + ) + # Status and results + success = Column( + Boolean, + default=False, + nullable=False, + ) + error_message = Column( + Text, + nullable=True, + ) + # Timestamps + created_at = Column( + DateTime(timezone=True), + nullable=False, + server_default=func.now(), + ) + completed_at = Column( + DateTime(timezone=True), + nullable=True, + ) + + +class AgentGenerationLogCreate(BaseModel): + """Model for creating agent generation log entries.""" + + model_config = ConfigDict( + use_enum_values=True, + from_attributes=True, + ) + + id: Annotated[ + str, + Field( + default_factory=lambda: str(XID()), + description="Unique identifier for the generation log", + ), + ] + user_id: Optional[str] = Field( + None, + description="User ID who initiated the generation", + ) + prompt: str = Field( + ..., + description="The original prompt used for generation", + ) + existing_agent_id: Optional[str] = Field( + None, + description="ID of existing agent if this is an update operation", + ) + is_update: bool = Field( + False, + description="Whether this is an update to existing agent", + ) + + +class AgentGenerationLog(BaseModel): + """Agent generation log model.""" + + model_config = ConfigDict( + use_enum_values=True, + from_attributes=True, + ) + + id: str + user_id: Optional[str] = None + prompt: str + existing_agent_id: Optional[str] = None + is_update: bool = False + generated_agent_schema: Optional[dict] = None + identified_skills: Optional[dict] = None + llm_model: Optional[str] = None + total_tokens: int = 0 + input_tokens: int = 0 + cached_input_tokens: int = 0 + output_tokens: int = 0 + input_tokens_details: Optional[dict] = None + completion_tokens_details: Optional[dict] = None + generation_time_ms: Optional[int] = None + retry_count: int = 0 + validation_errors: Optional[dict] = None + success: bool = False + error_message: Optional[str] = None + created_at: datetime + completed_at: Optional[datetime] = None + + @classmethod + async def create( + cls, + session: AsyncSession, + log_data: AgentGenerationLogCreate, + ) -> "AgentGenerationLog": + """Create a new agent generation log entry. + + Args: + session: Database session + log_data: Log data to create + + Returns: + Created log instance + """ + # Create database record + log_record = AgentGenerationLogTable( + id=log_data.id, + user_id=log_data.user_id, + prompt=log_data.prompt, + existing_agent_id=log_data.existing_agent_id, + is_update=log_data.is_update, + ) + + session.add(log_record) + await session.commit() + await session.refresh(log_record) + + return cls.model_validate(log_record) + + async def update_completion( + self, + session: AsyncSession, + generated_agent_schema: Optional[dict] = None, + identified_skills: Optional[dict] = None, + llm_model: Optional[str] = None, + total_tokens: int = 0, + input_tokens: int = 0, + cached_input_tokens: int = 0, + output_tokens: int = 0, + input_tokens_details: Optional[dict] = None, + completion_tokens_details: Optional[dict] = None, + generation_time_ms: Optional[int] = None, + retry_count: int = 0, + validation_errors: Optional[dict] = None, + success: bool = False, + error_message: Optional[str] = None, + ) -> None: + """Update the log entry with completion data. + + Args: + session: Database session + generated_agent_schema: The generated agent schema + identified_skills: Skills identified during generation + llm_model: LLM model used + total_tokens: Total tokens used + input_tokens: Input tokens used + cached_input_tokens: Cached input tokens used (for cost calculation) + output_tokens: Output tokens used + input_tokens_details: Detailed input token breakdown + completion_tokens_details: Detailed completion token breakdown + generation_time_ms: Generation time in milliseconds + retry_count: Number of retries attempted + validation_errors: Any validation errors encountered + success: Whether generation was successful + error_message: Error message if generation failed + """ + # Get the database record + log_record = await session.get(AgentGenerationLogTable, self.id) + if not log_record: + return + + # Update fields + log_record.generated_agent_schema = generated_agent_schema + log_record.identified_skills = identified_skills + log_record.llm_model = llm_model + log_record.total_tokens = total_tokens + log_record.input_tokens = input_tokens + log_record.cached_input_tokens = cached_input_tokens + log_record.output_tokens = output_tokens + log_record.input_tokens_details = input_tokens_details + log_record.completion_tokens_details = completion_tokens_details + log_record.generation_time_ms = generation_time_ms + log_record.retry_count = retry_count + log_record.validation_errors = validation_errors + log_record.success = success + log_record.error_message = error_message + log_record.completed_at = datetime.now(timezone.utc) + + session.add(log_record) + await session.commit() + await session.refresh(log_record) + + # Update this instance + self.generated_agent_schema = log_record.generated_agent_schema + self.identified_skills = log_record.identified_skills + self.llm_model = log_record.llm_model + self.total_tokens = log_record.total_tokens + self.input_tokens = log_record.input_tokens + self.cached_input_tokens = log_record.cached_input_tokens + self.output_tokens = log_record.output_tokens + self.input_tokens_details = log_record.input_tokens_details + self.completion_tokens_details = log_record.completion_tokens_details + self.generation_time_ms = log_record.generation_time_ms + self.retry_count = log_record.retry_count + self.validation_errors = log_record.validation_errors + self.success = log_record.success + self.error_message = log_record.error_message + self.completed_at = log_record.completed_at + + @classmethod + async def get_by_id( + cls, + session: AsyncSession, + log_id: str, + ) -> Optional["AgentGenerationLog"]: + """Get an agent generation log by ID. + + Args: + session: Database session + log_id: Log ID + + Returns: + Log instance if found, None otherwise + """ + result = await session.execute( + select(AgentGenerationLogTable).where(AgentGenerationLogTable.id == log_id) + ) + log_record = result.scalar_one_or_none() + + if log_record: + return cls.model_validate(log_record) + return None + + @classmethod + async def get_by_user( + cls, + session: AsyncSession, + user_id: str, + limit: int = 50, + ) -> list["AgentGenerationLog"]: + """Get agent generation logs for a user. + + Args: + session: Database session + user_id: User ID + limit: Maximum number of logs to return + + Returns: + List of log instances + """ + result = await session.execute( + select(AgentGenerationLogTable) + .where(AgentGenerationLogTable.user_id == user_id) + .order_by(AgentGenerationLogTable.created_at.desc()) + .limit(limit) + ) + log_records = result.scalars().all() + + return [cls.model_validate(record) for record in log_records] diff --git a/intentkit/models/llm.py b/intentkit/models/llm.py new file mode 100644 index 00000000..e2f607bd --- /dev/null +++ b/intentkit/models/llm.py @@ -0,0 +1,804 @@ +import json +import logging +from datetime import datetime, timezone +from decimal import ROUND_HALF_UP, Decimal +from enum import Enum +from typing import Annotated, Any, Optional + +from intentkit.models.app_setting import AppSetting +from intentkit.models.base import Base +from intentkit.models.db import get_session +from intentkit.models.redis import get_redis +from intentkit.utils.error import IntentKitLookUpError +from langchain_core.language_models import LanguageModelLike +from pydantic import BaseModel, ConfigDict, Field +from sqlalchemy import Boolean, Column, DateTime, Integer, Numeric, String, func, select + +logger = logging.getLogger(__name__) + +_credit_per_usdc = None +FOURPLACES = Decimal("0.0001") + + +class LLMProvider(str, Enum): + OPENAI = "openai" + DEEPSEEK = "deepseek" + XAI = "xai" + ETERNAL = "eternal" + REIGENT = "reigent" + VENICE = "venice" + + def display_name(self) -> str: + """Return user-friendly display name for the provider.""" + display_names = { + self.OPENAI: "OpenAI", + self.DEEPSEEK: "DeepSeek", + self.XAI: "xAI", + self.ETERNAL: "Others", + self.REIGENT: "Others", + self.VENICE: "Others", + } + return display_names.get(self, self.value) + + +class LLMModelInfoTable(Base): + """Database table model for LLM model information.""" + + __tablename__ = "llm_models" + + id = Column(String, primary_key=True) + name = Column(String, nullable=False) + provider = Column(String, nullable=False) # Stored as string enum value + enabled = Column(Boolean, nullable=False, default=True) + input_price = Column( + Numeric(22, 4), nullable=False + ) # Price per 1M input tokens in USD + output_price = Column( + Numeric(22, 4), nullable=False + ) # Price per 1M output tokens in USD + price_level = Column(Integer, nullable=True) # Price level rating from 1-5 + context_length = Column(Integer, nullable=False) # Maximum context length in tokens + output_length = Column(Integer, nullable=False) # Maximum output length in tokens + intelligence = Column(Integer, nullable=False) # Intelligence rating from 1-5 + speed = Column(Integer, nullable=False) # Speed rating from 1-5 + supports_image_input = Column(Boolean, nullable=False, default=False) + supports_skill_calls = Column(Boolean, nullable=False, default=False) + supports_structured_output = Column(Boolean, nullable=False, default=False) + has_reasoning = Column(Boolean, nullable=False, default=False) + supports_search = Column(Boolean, nullable=False, default=False) + supports_temperature = Column(Boolean, nullable=False, default=True) + supports_frequency_penalty = Column(Boolean, nullable=False, default=True) + supports_presence_penalty = Column(Boolean, nullable=False, default=True) + api_base = Column(String, nullable=True) # Custom API base URL + timeout = Column(Integer, nullable=False, default=180) # Default timeout in seconds + created_at = Column( + DateTime(timezone=True), + nullable=False, + server_default=func.now(), + ) + updated_at = Column( + DateTime(timezone=True), + nullable=False, + server_default=func.now(), + onupdate=lambda: datetime.now(timezone.utc), + ) + + +class LLMModelInfo(BaseModel): + """Information about an LLM model.""" + + model_config = ConfigDict( + from_attributes=True, + use_enum_values=True, + json_encoders={datetime: lambda v: v.isoformat(timespec="milliseconds")}, + ) + + id: str + name: str + provider: LLMProvider + enabled: bool = Field(default=True) + input_price: Decimal # Price per 1M input tokens in USD + output_price: Decimal # Price per 1M output tokens in USD + price_level: Optional[int] = Field( + default=None, ge=1, le=5 + ) # Price level rating from 1-5 + context_length: int # Maximum context length in tokens + output_length: int # Maximum output length in tokens + intelligence: int = Field(ge=1, le=5) # Intelligence rating from 1-5 + speed: int = Field(ge=1, le=5) # Speed rating from 1-5 + supports_image_input: bool = False # Whether the model supports image inputs + supports_skill_calls: bool = False # Whether the model supports skill/tool calls + supports_structured_output: bool = ( + False # Whether the model supports structured output + ) + has_reasoning: bool = False # Whether the model has strong reasoning capabilities + supports_search: bool = ( + False # Whether the model supports native search functionality + ) + supports_temperature: bool = ( + True # Whether the model supports temperature parameter + ) + supports_frequency_penalty: bool = ( + True # Whether the model supports frequency_penalty parameter + ) + supports_presence_penalty: bool = ( + True # Whether the model supports presence_penalty parameter + ) + api_base: Optional[str] = ( + None # Custom API base URL if not using provider's default + ) + timeout: int = 180 # Default timeout in seconds + created_at: Annotated[ + datetime, + Field( + description="Timestamp when this data was created", + default=datetime.now(timezone.utc), + ), + ] + updated_at: Annotated[ + datetime, + Field( + description="Timestamp when this data was updated", + default=datetime.now(timezone.utc), + ), + ] + + @staticmethod + async def get(model_id: str) -> "LLMModelInfo": + """Get a model by ID with Redis caching. + + The model info is cached in Redis for 3 minutes. + + Args: + model_id: ID of the model to retrieve + + Returns: + LLMModelInfo: The model info if found, None otherwise + """ + try: + has_redis = True + # Redis cache key for model info + cache_key = f"intentkit:llm_model:{model_id}" + cache_ttl = 180 # 3 minutes in seconds + + # Try to get from Redis cache first + redis = get_redis() + cached_data = await redis.get(cache_key) + + if cached_data: + # If found in cache, deserialize and return + try: + return LLMModelInfo.model_validate_json(cached_data) + except (json.JSONDecodeError, TypeError): + # If cache is corrupted, invalidate it + await redis.delete(cache_key) + except Exception: + has_redis = False + logger.debug("No redis when get model info") + + # If not in cache or cache is invalid, get from database + async with get_session() as session: + # Query the database for the model + stmt = select(LLMModelInfoTable).where(LLMModelInfoTable.id == model_id) + model = await session.scalar(stmt) + + # If model exists in database, convert to LLMModelInfo model and cache it + if model: + # Convert provider string to enum + model_info = LLMModelInfo.model_validate(model) + + # Cache the model in Redis + if has_redis: + await redis.set( + cache_key, + model_info.model_dump_json(), + ex=cache_ttl, + ) + + return model_info + + # If not found in database, check AVAILABLE_MODELS + if model_id in AVAILABLE_MODELS: + model_info = AVAILABLE_MODELS[model_id] + + # Cache the model in Redis + if has_redis: + await redis.set(cache_key, model_info.model_dump_json(), ex=cache_ttl) + + return model_info + + # Not found anywhere + raise IntentKitLookUpError(f"Model {model_id} not found") + + async def calculate_cost(self, input_tokens: int, output_tokens: int) -> Decimal: + global _credit_per_usdc + if not _credit_per_usdc: + _credit_per_usdc = (await AppSetting.payment()).credit_per_usdc + """Calculate the cost for a given number of tokens.""" + input_cost = ( + _credit_per_usdc + * Decimal(input_tokens) + * self.input_price + / Decimal(1000000) + ).quantize(FOURPLACES, rounding=ROUND_HALF_UP) + output_cost = ( + _credit_per_usdc + * Decimal(output_tokens) + * self.output_price + / Decimal(1000000) + ).quantize(FOURPLACES, rounding=ROUND_HALF_UP) + return (input_cost + output_cost).quantize(FOURPLACES, rounding=ROUND_HALF_UP) + + +# Define all available models +AVAILABLE_MODELS = { + # OpenAI models + "gpt-4o": LLMModelInfo( + id="gpt-4o", + name="GPT-4o", + provider=LLMProvider.OPENAI, + input_price=Decimal("2.50"), # per 1M input tokens + output_price=Decimal("10.00"), # per 1M output tokens + context_length=128000, + output_length=4096, + intelligence=4, + speed=3, + supports_image_input=True, + supports_skill_calls=True, + supports_structured_output=True, + supports_search=True, + supports_frequency_penalty=False, + supports_presence_penalty=False, + ), + "gpt-4o-mini": LLMModelInfo( + id="gpt-4o-mini", + name="GPT-4o Mini", + provider=LLMProvider.OPENAI, + input_price=Decimal("0.15"), # per 1M input tokens + output_price=Decimal("0.60"), # per 1M output tokens + context_length=128000, + output_length=4096, + intelligence=3, + speed=4, + supports_image_input=False, + supports_skill_calls=True, + supports_structured_output=True, + supports_search=True, + supports_frequency_penalty=False, + supports_presence_penalty=False, + ), + "gpt-5-nano": LLMModelInfo( + id="gpt-5-nano", + name="GPT-5 Nano", + provider=LLMProvider.OPENAI, + input_price=Decimal("0.05"), # per 1M input tokens + output_price=Decimal("0.4"), # per 1M output tokens + context_length=400000, + output_length=128000, + intelligence=3, + speed=5, + supports_image_input=True, + supports_skill_calls=True, + supports_structured_output=True, + supports_temperature=False, + supports_frequency_penalty=False, + supports_presence_penalty=False, + ), + "gpt-5-mini": LLMModelInfo( + id="gpt-5-mini", + name="GPT-5 Mini", + provider=LLMProvider.OPENAI, + input_price=Decimal("0.25"), # per 1M input tokens + output_price=Decimal("2"), # per 1M output tokens + context_length=400000, + output_length=128000, + intelligence=4, + speed=4, + supports_image_input=True, + supports_skill_calls=True, + supports_structured_output=True, + supports_search=True, + supports_temperature=False, + supports_frequency_penalty=False, + supports_presence_penalty=False, + ), + "gpt-5": LLMModelInfo( + id="gpt-5", + name="GPT-5", + provider=LLMProvider.OPENAI, + input_price=Decimal("1.25"), # per 1M input tokens + output_price=Decimal("10.00"), # per 1M output tokens + context_length=400000, + output_length=128000, + intelligence=5, + speed=3, + supports_image_input=True, + supports_skill_calls=True, + supports_structured_output=True, + supports_search=True, + supports_temperature=False, + supports_frequency_penalty=False, + supports_presence_penalty=False, + ), + "gpt-4.1-nano": LLMModelInfo( + id="gpt-4.1-nano", + name="GPT-4.1 Nano", + provider=LLMProvider.OPENAI, + input_price=Decimal("0.1"), # per 1M input tokens + output_price=Decimal("0.4"), # per 1M output tokens + context_length=128000, + output_length=4096, + intelligence=3, + speed=5, + supports_image_input=False, + supports_skill_calls=True, + supports_structured_output=True, + supports_frequency_penalty=False, + supports_presence_penalty=False, + ), + "gpt-4.1-mini": LLMModelInfo( + id="gpt-4.1-mini", + name="GPT-4.1 Mini", + provider=LLMProvider.OPENAI, + input_price=Decimal("0.4"), # per 1M input tokens + output_price=Decimal("1.6"), # per 1M output tokens + context_length=128000, + output_length=4096, + intelligence=4, + speed=4, + supports_image_input=False, + supports_skill_calls=True, + supports_structured_output=True, + supports_search=True, + supports_frequency_penalty=False, + supports_presence_penalty=False, + ), + "gpt-4.1": LLMModelInfo( + id="gpt-4.1", + name="GPT-4.1", + provider=LLMProvider.OPENAI, + input_price=Decimal("2.00"), # per 1M input tokens + output_price=Decimal("8.00"), # per 1M output tokens + context_length=128000, + output_length=4096, + intelligence=5, + speed=3, + supports_image_input=True, + supports_skill_calls=True, + supports_structured_output=True, + supports_search=True, + supports_frequency_penalty=False, + supports_presence_penalty=False, + ), + "o4-mini": LLMModelInfo( + id="o4-mini", + name="OpenAI o4-mini", + provider=LLMProvider.OPENAI, + input_price=Decimal("1.10"), # per 1M input tokens + output_price=Decimal("4.40"), # per 1M output tokens + context_length=128000, + output_length=4096, + intelligence=4, + speed=3, + supports_image_input=False, + supports_skill_calls=True, + supports_structured_output=True, + has_reasoning=True, # Has strong reasoning capabilities + supports_temperature=False, + supports_frequency_penalty=False, + supports_presence_penalty=False, + ), + # Deepseek models + "deepseek-chat": LLMModelInfo( + id="deepseek-chat", + name="Deepseek V3 (0324)", + provider=LLMProvider.DEEPSEEK, + input_price=Decimal("0.27"), + output_price=Decimal("1.10"), + context_length=60000, + output_length=4096, + intelligence=4, + speed=3, + supports_image_input=False, + supports_skill_calls=True, + supports_structured_output=True, + api_base="https://api.deepseek.com", + timeout=300, + ), + "deepseek-reasoner": LLMModelInfo( + id="deepseek-reasoner", + name="Deepseek R1", + provider=LLMProvider.DEEPSEEK, + input_price=Decimal("0.55"), + output_price=Decimal("2.19"), + context_length=60000, + output_length=4096, + intelligence=4, + speed=2, + supports_image_input=False, + supports_skill_calls=True, + supports_structured_output=True, + has_reasoning=True, # Has strong reasoning capabilities + api_base="https://api.deepseek.com", + timeout=300, + ), + # XAI models + "grok-2": LLMModelInfo( + id="grok-2", + name="Grok 2", + provider=LLMProvider.XAI, + input_price=Decimal("2"), + output_price=Decimal("10"), + context_length=120000, + output_length=4096, + intelligence=3, + speed=3, + supports_image_input=False, + supports_skill_calls=True, + supports_structured_output=True, + timeout=180, + ), + "grok-3": LLMModelInfo( + id="grok-3", + name="Grok 3", + provider=LLMProvider.XAI, + input_price=Decimal("3"), + output_price=Decimal("15"), + context_length=131072, + output_length=4096, + intelligence=5, + speed=3, + supports_image_input=False, + supports_skill_calls=True, + supports_structured_output=True, + supports_search=True, + timeout=180, + ), + "grok-3-mini": LLMModelInfo( + id="grok-3-mini", + name="Grok 3 Mini", + provider=LLMProvider.XAI, + input_price=Decimal("0.3"), + output_price=Decimal("0.5"), + context_length=131072, + output_length=4096, + intelligence=5, + speed=3, + supports_image_input=False, + supports_skill_calls=True, + supports_structured_output=True, + has_reasoning=True, # Has strong reasoning capabilities + supports_frequency_penalty=False, + supports_presence_penalty=False, # Grok-3-mini doesn't support presence_penalty + timeout=180, + ), + # Eternal AI models + "eternalai": LLMModelInfo( + id="eternalai", + name="Eternal AI (Llama-3.3-70B)", + provider=LLMProvider.ETERNAL, + input_price=Decimal("0.25"), + output_price=Decimal("0.75"), + context_length=60000, + output_length=4096, + intelligence=4, + speed=3, + supports_image_input=False, + supports_skill_calls=True, + supports_structured_output=True, + api_base="https://api.eternalai.org/v1", + timeout=300, + ), + # Reigent models + "reigent": LLMModelInfo( + id="reigent", + name="REI Network", + provider=LLMProvider.REIGENT, + input_price=Decimal("0.50"), # Placeholder price, update with actual pricing + output_price=Decimal("1.50"), # Placeholder price, update with actual pricing + context_length=32000, + output_length=4096, + intelligence=4, + speed=3, + supports_image_input=False, + supports_skill_calls=True, + supports_structured_output=True, + supports_temperature=False, + supports_frequency_penalty=False, + supports_presence_penalty=False, + api_base="https://api.reisearch.box/v1", + timeout=300, + ), + # Venice models + "venice-uncensored": LLMModelInfo( + id="venice-uncensored", + name="Venice Uncensored", + provider=LLMProvider.VENICE, + input_price=Decimal("0.50"), # Placeholder price, update with actual pricing + output_price=Decimal("2.00"), # Placeholder price, update with actual pricing + context_length=32000, + output_length=4096, + intelligence=3, + speed=3, + supports_image_input=False, + supports_skill_calls=True, + supports_structured_output=True, + supports_temperature=True, + supports_frequency_penalty=False, + supports_presence_penalty=False, + api_base="https://api.venice.ai/api/v1", + timeout=300, + ), + "venice-llama-4-maverick-17b": LLMModelInfo( + id="venice-llama-4-maverick-17b", + name="Venice Llama-4 Maverick 17B", + provider=LLMProvider.VENICE, + input_price=Decimal("1.50"), + output_price=Decimal("6.00"), + context_length=32000, + output_length=4096, + intelligence=3, + speed=3, + supports_image_input=False, + supports_skill_calls=True, + supports_structured_output=True, + supports_temperature=True, + supports_frequency_penalty=False, + supports_presence_penalty=False, + api_base="https://api.venice.ai/api/v1", + timeout=300, + ), +} + + +class LLMModel(BaseModel): + """Base model for LLM configuration.""" + + model_name: str + temperature: float = 0.7 + frequency_penalty: float = 0.0 + presence_penalty: float = 0.0 + info: LLMModelInfo + + async def model_info(self) -> LLMModelInfo: + """Get the model information with caching. + + First tries to get from cache, then database, then AVAILABLE_MODELS. + Raises ValueError if model is not found anywhere. + """ + model_info = await LLMModelInfo.get(self.model_name) + return model_info + + # This will be implemented by subclasses to return the appropriate LLM instance + async def create_instance(self, config: Any) -> LanguageModelLike: + """Create and return the LLM instance based on the configuration.""" + raise NotImplementedError("Subclasses must implement create_instance") + + async def get_token_limit(self) -> int: + """Get the token limit for this model.""" + info = await self.model_info() + return info.context_length + + async def calculate_cost(self, input_tokens: int, output_tokens: int) -> Decimal: + """Calculate the cost for a given number of tokens.""" + info = await self.model_info() + return await info.calculate_cost(input_tokens, output_tokens) + + +class OpenAILLM(LLMModel): + """OpenAI LLM configuration.""" + + async def create_instance(self, config: Any) -> LanguageModelLike: + """Create and return a ChatOpenAI instance.""" + from langchain_openai import ChatOpenAI + + info = await self.model_info() + + kwargs = { + "model_name": self.model_name, + "openai_api_key": config.openai_api_key, + "timeout": info.timeout, + } + + # Add optional parameters based on model support + if info.supports_temperature: + kwargs["temperature"] = self.temperature + + if info.supports_frequency_penalty: + kwargs["frequency_penalty"] = self.frequency_penalty + + if info.supports_presence_penalty: + kwargs["presence_penalty"] = self.presence_penalty + + if info.api_base: + kwargs["openai_api_base"] = info.api_base + + if self.model_name.startswith("gpt-5"): + kwargs["reasoning_effort"] = "minimal" + + logger.debug(f"Creating ChatOpenAI instance with kwargs: {kwargs}") + + return ChatOpenAI(**kwargs) + + +class DeepseekLLM(LLMModel): + """Deepseek LLM configuration.""" + + async def create_instance(self, config: Any) -> LanguageModelLike: + """Create and return a ChatDeepseek instance.""" + + from langchain_deepseek import ChatDeepSeek + + info = await self.model_info() + + kwargs = { + "model": self.model_name, + "api_key": config.deepseek_api_key, + "timeout": info.timeout, + "max_retries": 3, + } + + # Add optional parameters based on model support + if info.supports_temperature: + kwargs["temperature"] = self.temperature + + if info.supports_frequency_penalty: + kwargs["frequency_penalty"] = self.frequency_penalty + + if info.supports_presence_penalty: + kwargs["presence_penalty"] = self.presence_penalty + + if info.api_base: + kwargs["api_base"] = info.api_base + + return ChatDeepSeek(**kwargs) + + +class XAILLM(LLMModel): + """XAI (Grok) LLM configuration.""" + + async def create_instance(self, config: Any) -> LanguageModelLike: + """Create and return a ChatXAI instance.""" + + from langchain_xai import ChatXAI + + info = await self.model_info() + + kwargs = { + "model_name": self.model_name, + "xai_api_key": config.xai_api_key, + "timeout": info.timeout, + } + + # Add optional parameters based on model support + if info.supports_temperature: + kwargs["temperature"] = self.temperature + + if info.supports_frequency_penalty: + kwargs["frequency_penalty"] = self.frequency_penalty + + if info.supports_presence_penalty: + kwargs["presence_penalty"] = self.presence_penalty + + if self.model_name in ["grok-3", "grok-3-mini"]: + kwargs["search_parameters"] = {"mode": "auto"} + + return ChatXAI(**kwargs) + + +class EternalLLM(LLMModel): + """Eternal AI LLM configuration.""" + + async def create_instance(self, config: Any) -> LanguageModelLike: + """Create and return a ChatOpenAI instance configured for Eternal AI.""" + from langchain_openai import ChatOpenAI + + info = await self.model_info() + + # Override model name for Eternal AI + actual_model = "unsloth/Llama-3.3-70B-Instruct-bnb-4bit" + + kwargs = { + "model_name": actual_model, + "openai_api_key": config.eternal_api_key, + "openai_api_base": info.api_base, + "timeout": info.timeout, + } + + # Add optional parameters based on model support + if info.supports_temperature: + kwargs["temperature"] = self.temperature + + if info.supports_frequency_penalty: + kwargs["frequency_penalty"] = self.frequency_penalty + + if info.supports_presence_penalty: + kwargs["presence_penalty"] = self.presence_penalty + + return ChatOpenAI(**kwargs) + + +class ReigentLLM(LLMModel): + """Reigent LLM configuration.""" + + async def create_instance(self, config: Any) -> LanguageModelLike: + """Create and return a ChatOpenAI instance configured for Reigent.""" + from langchain_openai import ChatOpenAI + + info = await self.model_info() + + kwargs = { + "openai_api_key": config.reigent_api_key, + "openai_api_base": info.api_base, + "timeout": info.timeout, + "model_kwargs": { + # Override any specific parameters required for Reigent API + # The Reigent API requires 'tools' instead of 'functions' and might have some specific formatting requirements + }, + } + + return ChatOpenAI(**kwargs) + + +class VeniceLLM(LLMModel): + """Venice LLM configuration.""" + + async def create_instance(self, config: Any) -> LanguageModelLike: + """Create and return a ChatOpenAI instance configured for Venice.""" + from langchain_openai import ChatOpenAI + + info = await self.model_info() + + kwargs = { + "openai_api_key": config.venice_api_key, + "openai_api_base": info.api_base, + "timeout": info.timeout, + } + + return ChatOpenAI(**kwargs) + + +# Factory function to create the appropriate LLM model based on the model name +async def create_llm_model( + model_name: str, + temperature: float = 0.7, + frequency_penalty: float = 0.0, + presence_penalty: float = 0.0, +) -> LLMModel: + """ + Create an LLM model instance based on the model name. + + Args: + model_name: The name of the model to use + temperature: The temperature parameter for the model + frequency_penalty: The frequency penalty parameter for the model + presence_penalty: The presence penalty parameter for the model + + Returns: + An instance of a subclass of LLMModel + """ + info = await LLMModelInfo.get(model_name) + + base_params = { + "model_name": model_name, + "temperature": temperature, + "frequency_penalty": frequency_penalty, + "presence_penalty": presence_penalty, + "info": info, + } + + provider = info.provider + + if provider == LLMProvider.DEEPSEEK: + return DeepseekLLM(**base_params) + elif provider == LLMProvider.XAI: + return XAILLM(**base_params) + elif provider == LLMProvider.ETERNAL: + return EternalLLM(**base_params) + elif provider == LLMProvider.REIGENT: + return ReigentLLM(**base_params) + elif provider == LLMProvider.VENICE: + return VeniceLLM(**base_params) + else: + # Default to OpenAI + return OpenAILLM(**base_params) diff --git a/intentkit/models/redis.py b/intentkit/models/redis.py new file mode 100644 index 00000000..206b636c --- /dev/null +++ b/intentkit/models/redis.py @@ -0,0 +1,132 @@ +"""Redis client module for IntentKit.""" + +import logging +from typing import Optional + +from redis.asyncio import Redis + +logger = logging.getLogger(__name__) + +# Global Redis client instance +_redis_client: Optional[Redis] = None + + +async def init_redis( + host: str, + port: int = 6379, + db: int = 0, + password: Optional[str] = None, + ssl: bool = False, + encoding: str = "utf-8", + decode_responses: bool = True, +) -> Redis: + """Initialize the Redis client. + + Args: + host: Redis host + port: Redis port (default: 6379) + db: Redis database number (default: 0) + password: Redis password (default: None) + ssl: Whether to use SSL (default: False) + encoding: Response encoding (default: utf-8) + decode_responses: Whether to decode responses (default: True) + + Returns: + Redis: The initialized Redis client + """ + global _redis_client + + if _redis_client is not None: + logger.info("Redis client already initialized") + return _redis_client + + try: + logger.info(f"Initializing Redis client at {host}:{port}") + _redis_client = Redis( + host=host, + port=port, + db=db, + password=password, + ssl=ssl, + encoding=encoding, + decode_responses=decode_responses, + ) + # Test the connection + await _redis_client.ping() + logger.info("Redis client initialized successfully") + return _redis_client + except Exception as e: + logger.error(f"Failed to initialize Redis client: {e}") + raise + + +def get_redis() -> Redis: + """Get the Redis client. + + Returns: + Redis: The Redis client + + Raises: + RuntimeError: If the Redis client is not initialized + """ + if _redis_client is None: + raise RuntimeError("Redis client not initialized. Call init_redis first.") + return _redis_client + + +async def send_heartbeat(redis_client: Redis, name: str) -> None: + """Set a heartbeat key in Redis that expires after 16 minutes. + + Args: + redis_client: Redis client instance + name: Name identifier for the heartbeat + """ + try: + key = f"intentkit:heartbeat:{name}" + await redis_client.set(key, 1, ex=190) # 190 seconds = 3 minutes + except Exception as e: + logger.error(f"Failed to send heartbeat for {name}: {e}") + + +async def check_heartbeat(redis_client: Redis, name: str) -> bool: + """Check if a heartbeat key exists in Redis. + + Args: + redis_client: Redis client instance + name: Name identifier for the heartbeat + + Returns: + bool: True if heartbeat exists, False otherwise + """ + import asyncio + + key = f"intentkit:heartbeat:{name}" + retries = 3 + + for attempt in range(retries): + try: + exists = await redis_client.exists(key) + return bool(exists) + except Exception as e: + logger.error( + f"Error checking heartbeat for {name} (attempt {attempt + 1}/{retries}): {e}" + ) + if attempt < retries - 1: # Don't sleep on the last attempt + await asyncio.sleep(5) # Wait 5 seconds before retrying + + return False + + +async def clean_heartbeat(redis_client: Redis, name: str) -> None: + """Remove a heartbeat key from Redis. + + Args: + redis_client: Redis client instance + name: Name identifier for the heartbeat to remove + """ + try: + key = f"intentkit:heartbeat:{name}" + await redis_client.delete(key) + logger.info(f"Removed heartbeat for {name}") + except Exception as e: + logger.error(f"Failed to remove heartbeat for {name}: {e}") diff --git a/intentkit/models/skill.py b/intentkit/models/skill.py new file mode 100644 index 00000000..3b631c1f --- /dev/null +++ b/intentkit/models/skill.py @@ -0,0 +1,485 @@ +import json +from datetime import datetime, timezone +from decimal import Decimal +from typing import Annotated, Any, Dict, Optional + +from intentkit.models.base import Base +from intentkit.models.db import get_session +from intentkit.models.redis import get_redis +from pydantic import BaseModel, ConfigDict, Field +from sqlalchemy import ( + Boolean, + Column, + DateTime, + Integer, + Numeric, + String, + delete, + func, + select, +) +from sqlalchemy.dialects.postgresql import JSON, JSONB + + +class AgentSkillDataTable(Base): + """Database table model for storing skill-specific data for agents.""" + + __tablename__ = "agent_skill_data" + + agent_id = Column(String, primary_key=True) + skill = Column(String, primary_key=True) + key = Column(String, primary_key=True) + data = Column(JSON().with_variant(JSONB(), "postgresql"), nullable=True) + size = Column(Integer, nullable=False, default=0) + created_at = Column( + DateTime(timezone=True), + nullable=False, + server_default=func.now(), + ) + updated_at = Column( + DateTime(timezone=True), + nullable=False, + server_default=func.now(), + onupdate=lambda: datetime.now(timezone.utc), + ) + + +class AgentSkillDataCreate(BaseModel): + """Base model for creating agent skill data records.""" + + model_config = ConfigDict(from_attributes=True) + + agent_id: Annotated[str, Field(description="ID of the agent this data belongs to")] + skill: Annotated[str, Field(description="Name of the skill this data is for")] + key: Annotated[str, Field(description="Key for this specific piece of data")] + data: Annotated[Dict[str, Any], Field(description="JSON data stored for this key")] + + async def save(self) -> "AgentSkillData": + """Save or update skill data. + + Returns: + AgentSkillData: The saved agent skill data instance + + Raises: + Exception: If the total size would exceed the 10MB limit + """ + # Calculate the size of the data + data_size = len(json.dumps(self.data).encode("utf-8")) + + async with get_session() as db: + # Check current total size for this agent + current_total = await AgentSkillData.total_size(self.agent_id) + + record = await db.scalar( + select(AgentSkillDataTable).where( + AgentSkillDataTable.agent_id == self.agent_id, + AgentSkillDataTable.skill == self.skill, + AgentSkillDataTable.key == self.key, + ) + ) + + # Calculate new total size + if record: + # Update existing record - subtract old size, add new size + new_total = current_total - record.size + data_size + else: + # Create new record - add new size + new_total = current_total + data_size + + # Check if new total would exceed limit (10MB = 10 * 1024 * 1024 bytes) + if new_total > 10 * 1024 * 1024: + raise Exception( + f"Total size would exceed 10MB limit. Current: {current_total}, New: {new_total}" + ) + + if record: + # Update existing record + record.data = self.data + record.size = data_size + else: + # Create new record + record = AgentSkillDataTable( + agent_id=self.agent_id, + skill=self.skill, + key=self.key, + data=self.data, + size=data_size, + ) + + db.add(record) + await db.commit() + await db.refresh(record) + return AgentSkillData.model_validate(record) + + +class AgentSkillData(AgentSkillDataCreate): + """Model for storing skill-specific data for agents. + + This model uses a composite primary key of (agent_id, skill, key) to store + skill-specific data for agents in a flexible way. + """ + + model_config = ConfigDict( + from_attributes=True, + json_encoders={datetime: lambda v: v.isoformat(timespec="milliseconds")}, + ) + + size: Annotated[int, Field(description="Size of the data in bytes")] + created_at: Annotated[ + datetime, Field(description="Timestamp when this data was created") + ] + updated_at: Annotated[ + datetime, Field(description="Timestamp when this data was updated") + ] + + @classmethod + async def total_size(cls, agent_id: str) -> int: + """Calculate the total size of all skill data for an agent. + + Args: + agent_id: ID of the agent + + Returns: + int: Total size in bytes of all skill data for the agent + """ + async with get_session() as db: + result = await db.scalar( + select(func.coalesce(func.sum(AgentSkillDataTable.size), 0)).where( + AgentSkillDataTable.agent_id == agent_id + ) + ) + return result or 0 + + @classmethod + async def get(cls, agent_id: str, skill: str, key: str) -> Optional[dict]: + """Get skill data for an agent. + + Args: + agent_id: ID of the agent + skill: Name of the skill + key: Data key + + Returns: + Dictionary containing the skill data if found, None otherwise + """ + async with get_session() as db: + result = await db.scalar( + select(AgentSkillDataTable).where( + AgentSkillDataTable.agent_id == agent_id, + AgentSkillDataTable.skill == skill, + AgentSkillDataTable.key == key, + ) + ) + return result.data if result else None + + @classmethod + async def delete(cls, agent_id: str, skill: str, key: str) -> None: + """Delete skill data for an agent. + + Args: + agent_id: ID of the agent + skill: Name of the skill + key: Data key + """ + async with get_session() as db: + await db.execute( + delete(AgentSkillDataTable).where( + AgentSkillDataTable.agent_id == agent_id, + AgentSkillDataTable.skill == skill, + AgentSkillDataTable.key == key, + ) + ) + await db.commit() + + @classmethod + async def clean_data(cls, agent_id: str): + """Clean all skill data for an agent. + + Args: + agent_id: ID of the agent + """ + async with get_session() as db: + await db.execute( + delete(AgentSkillDataTable).where( + AgentSkillDataTable.agent_id == agent_id + ) + ) + await db.commit() + + +class ThreadSkillDataTable(Base): + """Database table model for storing skill-specific data for threads.""" + + __tablename__ = "thread_skill_data" + + thread_id = Column(String, primary_key=True) + skill = Column(String, primary_key=True) + key = Column(String, primary_key=True) + agent_id = Column(String, nullable=False) + data = Column(JSON().with_variant(JSONB(), "postgresql"), nullable=True) + created_at = Column( + DateTime(timezone=True), + nullable=False, + server_default=func.now(), + ) + updated_at = Column( + DateTime(timezone=True), + nullable=False, + server_default=func.now(), + onupdate=lambda: datetime.now(timezone.utc), + ) + + +class ThreadSkillDataCreate(BaseModel): + """Base model for creating thread skill data records.""" + + model_config = ConfigDict(from_attributes=True) + + thread_id: Annotated[ + str, Field(description="ID of the thread this data belongs to") + ] + skill: Annotated[str, Field(description="Name of the skill this data is for")] + key: Annotated[str, Field(description="Key for this specific piece of data")] + agent_id: Annotated[str, Field(description="ID of the agent that owns this thread")] + data: Annotated[Dict[str, Any], Field(description="JSON data stored for this key")] + + async def save(self) -> "ThreadSkillData": + """Save or update skill data. + + Returns: + ThreadSkillData: The saved thread skill data instance + """ + async with get_session() as db: + record = await db.scalar( + select(ThreadSkillDataTable).where( + ThreadSkillDataTable.thread_id == self.thread_id, + ThreadSkillDataTable.skill == self.skill, + ThreadSkillDataTable.key == self.key, + ) + ) + + if record: + # Update existing record + record.data = self.data + record.agent_id = self.agent_id + else: + # Create new record + record = ThreadSkillDataTable(**self.model_dump()) + db.add(record) + await db.commit() + await db.refresh(record) + return ThreadSkillData.model_validate(record) + + +class ThreadSkillData(ThreadSkillDataCreate): + """Model for storing skill-specific data for threads. + + This model uses a composite primary key of (thread_id, skill, key) to store + skill-specific data for threads in a flexible way. It also includes agent_id + as a required field for tracking ownership. + """ + + model_config = ConfigDict( + from_attributes=True, + json_encoders={datetime: lambda v: v.isoformat(timespec="milliseconds")}, + ) + + created_at: Annotated[ + datetime, Field(description="Timestamp when this data was created") + ] + updated_at: Annotated[ + datetime, Field(description="Timestamp when this data was updated") + ] + + @classmethod + async def get(cls, thread_id: str, skill: str, key: str) -> Optional[dict]: + """Get skill data for a thread. + + Args: + thread_id: ID of the thread + skill: Name of the skill + key: Data key + + Returns: + Dictionary containing the skill data if found, None otherwise + """ + async with get_session() as db: + record = await db.scalar( + select(ThreadSkillDataTable).where( + ThreadSkillDataTable.thread_id == thread_id, + ThreadSkillDataTable.skill == skill, + ThreadSkillDataTable.key == key, + ) + ) + return record.data if record else None + + @classmethod + async def clean_data( + cls, + agent_id: str, + thread_id: Annotated[ + str, + Field( + default="", + description="Optional ID of the thread. If provided, only cleans data for that thread.", + ), + ], + ): + """Clean all skill data for a thread or agent. + + Args: + agent_id: ID of the agent + thread_id: Optional ID of the thread. If provided, only cleans data for that thread. + If empty, cleans all data for the agent. + """ + async with get_session() as db: + if thread_id and thread_id != "": + await db.execute( + delete(ThreadSkillDataTable).where( + ThreadSkillDataTable.agent_id == agent_id, + ThreadSkillDataTable.thread_id == thread_id, + ) + ) + else: + await db.execute( + delete(ThreadSkillDataTable).where( + ThreadSkillDataTable.agent_id == agent_id + ) + ) + await db.commit() + + +class SkillTable(Base): + """Database table model for Skill.""" + + __tablename__ = "skills" + + name = Column(String, primary_key=True) + enabled = Column(Boolean, nullable=False, default=True) + category = Column(String, nullable=False) + config_name = Column(String, nullable=True) + price_level = Column(Integer, nullable=True) + price = Column(Numeric(22, 4), nullable=False, default=1) + price_self_key = Column(Numeric(22, 4), nullable=False, default=1) + rate_limit_count = Column(Integer, nullable=True) + rate_limit_minutes = Column(Integer, nullable=True) + author = Column(String, nullable=True) + created_at = Column( + DateTime(timezone=True), + nullable=False, + server_default=func.now(), + ) + updated_at = Column( + DateTime(timezone=True), + nullable=False, + server_default=func.now(), + onupdate=lambda: datetime.now(timezone.utc), + ) + + +class Skill(BaseModel): + """Pydantic model for Skill.""" + + model_config = ConfigDict( + from_attributes=True, + json_encoders={ + datetime: lambda v: v.isoformat(timespec="milliseconds"), + }, + ) + + name: Annotated[str, Field(description="Name of the skill")] + enabled: Annotated[bool, Field(description="Is this skill enabled?")] + category: Annotated[str, Field(description="Category of the skill")] + config_name: Annotated[Optional[str], Field(description="Config name of the skill")] + price_level: Annotated[ + Optional[int], Field(description="Price level for this skill") + ] + price: Annotated[ + Decimal, Field(description="Price for this skill", default=Decimal("1")) + ] + price_self_key: Annotated[ + Decimal, + Field(description="Price for this skill with self key", default=Decimal("1")), + ] + rate_limit_count: Annotated[Optional[int], Field(description="Rate limit count")] + rate_limit_minutes: Annotated[ + Optional[int], Field(description="Rate limit minutes") + ] + author: Annotated[Optional[str], Field(description="Author of the skill")] + created_at: Annotated[ + datetime, Field(description="Timestamp when this record was created") + ] + updated_at: Annotated[ + datetime, Field(description="Timestamp when this record was last updated") + ] + + @staticmethod + async def get(name: str) -> Optional["Skill"]: + """Get a skill by name with Redis caching. + + The skill is cached in Redis for 3 minutes. + + Args: + name: Name of the skill to retrieve + + Returns: + Skill: The skill if found, None otherwise + """ + # Redis cache key for skill + cache_key = f"intentkit:skill:{name}" + cache_ttl = 180 # 3 minutes in seconds + + # Try to get from Redis cache first + redis = get_redis() + cached_data = await redis.get(cache_key) + + if cached_data: + # If found in cache, deserialize and return + try: + return Skill.model_validate_json(cached_data) + except (json.JSONDecodeError, TypeError): + # If cache is corrupted, invalidate it + await redis.delete(cache_key) + + # If not in cache or cache is invalid, get from database + async with get_session() as session: + # Query the database for the skill + stmt = select(SkillTable).where(SkillTable.name == name) + skill = await session.scalar(stmt) + + # If skill doesn't exist, return None + if not skill: + return None + + # Convert to Skill model + skill_model = Skill.model_validate(skill) + + # Cache the skill in Redis + await redis.set(cache_key, skill_model.model_dump_json(), ex=cache_ttl) + + return skill_model + + @staticmethod + async def get_by_config_name(category: str, config_name: str) -> Optional["Skill"]: + """Get a skill by category and config_name. + + Args: + category: Category of the skill + config_name: Config name of the skill + + Returns: + Skill: The skill if found, None otherwise + """ + async with get_session() as session: + # Query the database for the skill + stmt = select(SkillTable).where( + SkillTable.category == category, SkillTable.config_name == config_name + ) + skill = await session.scalar(stmt) + + # If skill doesn't exist, return None + if not skill: + return None + + # Convert to Skill model + return Skill.model_validate(skill) diff --git a/intentkit/models/user.py b/intentkit/models/user.py new file mode 100644 index 00000000..75a18967 --- /dev/null +++ b/intentkit/models/user.py @@ -0,0 +1,296 @@ +import logging +from datetime import datetime, timezone +from decimal import ROUND_HALF_UP, Decimal +from typing import Annotated, Optional, Type, TypeVar + +from intentkit.models.base import Base +from intentkit.models.credit import CreditAccount +from intentkit.models.db import get_session +from pydantic import BaseModel, ConfigDict, Field +from sqlalchemy import Column, DateTime, Index, Integer, String, func, select +from sqlalchemy.dialects.postgresql import JSON, JSONB +from sqlalchemy.ext.asyncio import AsyncSession + +logger = logging.getLogger(__name__) + + +# TypeVar for User model constraint +UserModelType = TypeVar("UserModelType", bound="User") +UserTableType = TypeVar("UserTableType", bound="UserTable") + + +class UserRegistry: + """Registry for extended model classes.""" + + def __init__(self): + self._user_table_class: Optional[Type[UserTableType]] = None + self._user_model_class: Optional[Type[UserModelType]] = None + + def register_user_table(self, user_table_class: Type[UserTableType]) -> None: + """Register extended UserTable class. + + Args: + user_table_class: A class that inherits from UserTable + """ + self._user_table_class = user_table_class + + def get_user_table_class(self) -> Type[UserTableType]: + """Get registered UserTable class or default.""" + return self._user_table_class or UserTable + + def register_user_model(self, user_model_class: Type[UserModelType]) -> None: + """Register extended UserModel class. + + Args: + user_model_class: A class that inherits from User + """ + self._user_model_class = user_model_class + + def get_user_model_class(self) -> Type[UserModelType]: + """Get registered UserModel class or default.""" + return self._user_model_class or User + + +# Global registry instance +user_model_registry = UserRegistry() + + +class UserTable(Base): + """User database table model.""" + + __tablename__ = "users" + __table_args__ = ( + Index("ix_users_x_username", "x_username"), + Index("ix_users_telegram_username", "telegram_username"), + ) + + id = Column( + String, + primary_key=True, + ) + nft_count = Column( + Integer, + default=0, + nullable=False, + ) + email = Column( + String, + nullable=True, + ) + x_username = Column( + String, + nullable=True, + ) + github_username = Column( + String, + nullable=True, + ) + telegram_username = Column( + String, + nullable=True, + ) + extra = Column( + JSON().with_variant(JSONB(), "postgresql"), + nullable=True, + ) + created_at = Column( + DateTime(timezone=True), + nullable=False, + server_default=func.now(), + ) + updated_at = Column( + DateTime(timezone=True), + nullable=False, + server_default=func.now(), + onupdate=lambda: datetime.now(timezone.utc), + ) + + +class UserUpdate(BaseModel): + """User update model without id and timestamps.""" + + model_config = ConfigDict( + from_attributes=True, + json_encoders={ + datetime: lambda v: v.isoformat(timespec="milliseconds"), + }, + ) + + nft_count: Annotated[ + int, Field(default=0, description="Number of NFTs owned by the user") + ] + email: Annotated[Optional[str], Field(None, description="User's email address")] + x_username: Annotated[ + Optional[str], Field(None, description="User's X (Twitter) username") + ] + github_username: Annotated[ + Optional[str], Field(None, description="User's GitHub username") + ] + telegram_username: Annotated[ + Optional[str], Field(None, description="User's Telegram username") + ] + extra: Annotated[ + Optional[dict], Field(None, description="Additional user information") + ] + + async def _update_quota_for_nft_count( + self, db: AsyncSession, id: str, new_nft_count: int + ) -> None: + """Update user's daily quota based on NFT count. + + Args: + db: Database session + id: User ID + new_nft_count: Current NFT count + """ + # Generate upstream_tx_id + timestamp = datetime.now(timezone.utc).strftime("%Y%m%d%H%M%S") + upstream_tx_id = f"nft_{id}_{timestamp}" + + # Calculate new quota values based on nft_count + FOURPLACES = Decimal("0.0001") + free_quota = Decimal(480 + 48 * new_nft_count).quantize( + FOURPLACES, rounding=ROUND_HALF_UP + ) + refill_amount = Decimal(20 + 2 * new_nft_count).quantize( + FOURPLACES, rounding=ROUND_HALF_UP + ) + note = f"NFT count changed to {new_nft_count}" + + # Update daily quota + logger.info( + f"Updating daily quota for user {id} due to NFT count change to {new_nft_count}" + ) + await CreditAccount.update_daily_quota( + db, + id, + free_quota=free_quota, + refill_amount=refill_amount, + upstream_tx_id=upstream_tx_id, + note=note, + ) + + async def patch(self, id: str) -> "User": + """Update only the provided fields of a user in the database. + If the user doesn't exist, create a new one with the provided ID and fields. + If nft_count changes, update the daily quota accordingly. + + Args: + id: ID of the user to update or create + + Returns: + Updated or newly created User model + """ + user_model_class = user_model_registry.get_user_model_class() + user_table_class = user_model_registry.get_user_table_class() + async with get_session() as db: + db_user = await db.get(user_table_class, id) + old_nft_count = 0 # Default for new users + + if not db_user: + # Create new user if it doesn't exist + db_user = user_table_class(id=id) + db.add(db_user) + else: + old_nft_count = db_user.nft_count + + # Update only the fields that were provided + update_data = self.model_dump(exclude_unset=True) + for key, value in update_data.items(): + setattr(db_user, key, value) + + # Check if nft_count has changed and is in the update data + if "nft_count" in update_data and old_nft_count != update_data["nft_count"]: + await self._update_quota_for_nft_count(db, id, update_data["nft_count"]) + + await db.commit() + await db.refresh(db_user) + + return user_model_class.model_validate(db_user) + + async def put(self, id: str) -> "User": + """Replace all fields of a user in the database with the provided values. + If the user doesn't exist, create a new one with the provided ID and fields. + If nft_count changes, update the daily quota accordingly. + + Args: + id: ID of the user to update or create + + Returns: + Updated or newly created User model + """ + user_model_class = user_model_registry.get_user_model_class() + user_table_class = user_model_registry.get_user_table_class() + async with get_session() as db: + db_user = await db.get(user_table_class, id) + old_nft_count = 0 # Default for new users + + if not db_user: + # Create new user if it doesn't exist + db_user = user_table_class(id=id) + db.add(db_user) + else: + old_nft_count = db_user.nft_count + + # Replace all fields with the provided values + for key, value in self.model_dump().items(): + setattr(db_user, key, value) + + # Check if nft_count has changed + if old_nft_count != self.nft_count: + await self._update_quota_for_nft_count(db, id, self.nft_count) + + await db.commit() + await db.refresh(db_user) + + return user_model_class.model_validate(db_user) + + +class User(UserUpdate): + """User model with all fields including id and timestamps.""" + + id: Annotated[ + str, + Field(description="Unique identifier for the user"), + ] + created_at: Annotated[ + datetime, Field(description="Timestamp when this user was created") + ] + updated_at: Annotated[ + datetime, Field(description="Timestamp when this user was last updated") + ] + + @classmethod + async def get(cls, user_id: str) -> Optional["User"]: + """Get a user by ID. + + Args: + user_id: ID of the user to get + + Returns: + User model or None if not found + """ + async with get_session() as session: + return await cls.get_in_session(session, user_id) + + @classmethod + async def get_in_session( + cls, session: AsyncSession, user_id: str + ) -> Optional["User"]: + """Get a user by ID using the provided session. + + Args: + session: Database session + user_id: ID of the user to get + + Returns: + User model or None if not found + """ + user_table_class = user_model_registry.get_user_table_class() + result = await session.execute( + select(user_table_class).where(user_table_class.id == user_id) + ) + user = result.scalars().first() + if user is None: + return None + return cls.model_validate(user) diff --git a/intentkit/pyproject.toml b/intentkit/pyproject.toml new file mode 100644 index 00000000..94906bc7 --- /dev/null +++ b/intentkit/pyproject.toml @@ -0,0 +1,121 @@ +[project] +name = "intentkit" +version = "0.0.1" +description = "Intent-based AI Agent Platform - Core Package" +authors = [{ name = "hyacinthus", email = "hyacinthus@gmail.com" }] +requires-python = ">=3.12" +readme = "README.md" +license = { file = "LICENSE" } +keywords = ["ai", "agent", "intent", "blockchain", "crypto"] +classifiers = [ + "Development Status :: 4 - Beta", + "Intended Audience :: Developers", + "License :: OSI Approved :: MIT License", + "Programming Language :: Python :: 3", + "Programming Language :: Python :: 3.12", + "Topic :: Software Development :: Libraries :: Python Modules", + "Topic :: Scientific/Engineering :: Artificial Intelligence", +] +dependencies = [ + "aws-secretsmanager-caching>=1.1.3", + "boto3>=1.37.23,<2.0.0", + "botocore>=1.35.97", + "coinbase-agentkit (>=0.6.0,<0.7.0)", + "coinbase-agentkit-langchain>=0.5.0", + "cron-validator>=1.0.8,<2.0.0", + "epyxid>=0.3.3", + "fastapi>=0.115.8", + "filetype>=1.2.0,<2.0.0", + "langchain>=0.3.25,<0.4.0", + "langchain-community>=0.3.19", + "langchain-core>=0.3.43", + "langchain-mcp-adapters>=0.0.11", + "langchain-openai>=0.3.8", + "langchain-text-splitters>=0.3.8", + "langchain-xai>=0.2.1", + "langgraph (>=0.6.1,<0.7.0)", + "langgraph-checkpoint>=2.0.18", + "langgraph-checkpoint-postgres>=2.0.16,<2.0.23", + "langgraph-prebuilt (>=0.6.1,<0.7.0)", + "langmem>=0.0.27", + "mypy-boto3-s3>=1.37.24,<2.0.0", + "openai>=1.59.6", + "pillow>=11.1.0,<12.0.0", + "psycopg-pool>=3.2.4", + "pydantic>=2.10.6,<2.11.0", + "python-dotenv>=1.0.1", + "pytz>=2025.1", + "pyyaml>=6.0.2", + "redis>=5.2.1,<7.0.0", + "requests>=2.32.3", + "slack-sdk>=3.34.0", + "sqlalchemy[asyncio]>=2.0.37", + "tweepy[async]>=4.15.0", + "uvicorn>=0.34.0,<1.0.0", + "bip32>=2.0.0", + "eth-keys>=0.4.0", + "eth-utils>=2.1.0", + "cdp-sdk>=1.22.0", + "web3>=7.10.0", + "httpx>=0.28.1", + "starlette>=0.47.1", + "aiohttp>=3.11.16", + "supabase>=2.16.0", + "tenacity>=9.1.2", + "requests-oauthlib>=2.0.0", + "asyncpg>=0.30.0", + "aiosqlite>=0.21.0", + "psycopg>=3.2.9", + "jsonref>=1.1.0", + "langchain-deepseek>=0.1.4", +] + +[project.urls] +Homepage = "https://github.com/crestalnetwork/intentkit" +Repository = "https://github.com/crestalnetwork/intentkit" +Documentation = "https://github.com/crestalnetwork/intentkit/tree/main/docs" +"Bug Tracker" = "https://github.com/crestalnetwork/intentkit/issues" + +[dependency-groups] +dev = [ + "ruff>=0.11.9,<0.12", + "jsonschema>=4.21.1,<5", + "pytest>=7.0.0", + "pytest-asyncio>=0.21.0", +] + +[build-system] +requires = ["hatchling"] +build-backend = "hatchling.build" + +[tool.hatch.build.targets.wheel.force-include] +"__init__.py" = "intentkit/__init__.py" +"abstracts" = "intentkit/abstracts" +"clients" = "intentkit/clients" +"config" = "intentkit/config" +"core" = "intentkit/core" +"models" = "intentkit/models" +"skills" = "intentkit/skills" +"utils" = "intentkit/utils" + +[tool.hatch.build.targets.sdist] +include = [ + "__init__.py", + "abstracts/", + "clients/", + "config/", + "core/", + "models/", + "skills/", + "utils/", + "README.md", + "LICENSE", + "pyproject.toml", + "MANIFEST.in" +] + +[tool.ruff.lint] +extend-select = ["I"] + +[tool.deptry] +known_first_party = ["intentkit"] diff --git a/intentkit/skills/__init__.py b/intentkit/skills/__init__.py new file mode 100644 index 00000000..1c055dc2 --- /dev/null +++ b/intentkit/skills/__init__.py @@ -0,0 +1,12 @@ +import os +import pkgutil + +# Get the directory containing this __init__.py file +package_dir = os.path.dirname(__file__) + +# Discover all modules in the skills directory +__all__ = [ + name + for _, name, _ in pkgutil.iter_modules([package_dir]) + if not name.startswith("_") and not name == "base" +] diff --git a/intentkit/skills/acolyt/__init__.py b/intentkit/skills/acolyt/__init__.py new file mode 100644 index 00000000..11f98ff9 --- /dev/null +++ b/intentkit/skills/acolyt/__init__.py @@ -0,0 +1,83 @@ +"""Acolyt skill module.""" + +import logging +from typing import NotRequired, TypedDict + +from intentkit.abstracts.skill import SkillStoreABC +from intentkit.skills.acolyt.ask import AcolytAskGpt +from intentkit.skills.acolyt.base import AcolytBaseTool +from intentkit.skills.base import SkillConfig, SkillState + +# Cache skills at the system level, because they are stateless +_cache: dict[str, AcolytBaseTool] = {} + +logger = logging.getLogger(__name__) + + +class SkillStates(TypedDict): + ask_gpt: SkillState + + +class Config(SkillConfig): + """Configuration for Acolyt skills.""" + + states: SkillStates + api_key: NotRequired[str] + + +async def get_skills( + config: "Config", + is_private: bool, + store: SkillStoreABC, + **_, +) -> list[AcolytBaseTool]: + """Get all Acolyt skills. + + Args: + config: The configuration for Acolyt skills. + is_private: Whether to include private skills. + store: The skill store for persisting data. + + Returns: + A list of Acolyt skills. + """ + available_skills = [] + + # Include skills based on their state + for skill_name, state in config["states"].items(): + if state == "disabled": + continue + elif state == "public" or (state == "private" and is_private): + available_skills.append(skill_name) + + # Get each skill using the cached getter + result = [] + for name in available_skills: + skill = get_acolyt_skill(name, store) + if skill: + result.append(skill) + return result + + +def get_acolyt_skill( + name: str, + store: SkillStoreABC, +) -> AcolytBaseTool | None: + """Get an Acolyt skill by name. + + Args: + name: The name of the skill to get + store: The skill store for persisting data + + Returns: + The requested Acolyt skill + """ + if name == "ask_gpt": + if name not in _cache: + _cache[name] = AcolytAskGpt( + skill_store=store, + ) + return _cache[name] + else: + logger.warning(f"Unknown Acolyt skill: {name}") + return None diff --git a/intentkit/skills/acolyt/acolyt.jpg b/intentkit/skills/acolyt/acolyt.jpg new file mode 100644 index 00000000..77fed24e Binary files /dev/null and b/intentkit/skills/acolyt/acolyt.jpg differ diff --git a/intentkit/skills/acolyt/ask.py b/intentkit/skills/acolyt/ask.py new file mode 100644 index 00000000..9a06f0a1 --- /dev/null +++ b/intentkit/skills/acolyt/ask.py @@ -0,0 +1,124 @@ +import logging +from typing import Dict, Literal, Type + +import httpx +from pydantic import BaseModel, Field + +from intentkit.skills.acolyt.base import AcolytBaseTool + +from .base import base_url + +logger = logging.getLogger(__name__) + + +class AcolytAskGptInput(BaseModel): + question: str + + +class InputMessage(BaseModel): + role: Literal["system", "user", "assistant", "tool", "function"] = Field( + "user", description="The role of the message sender." + ) + content: str + + +class AcolytAskGptRequest(BaseModel): + messages: list[InputMessage] + model: str | None = Field("gpt-4o", description="The AI model to be used.") + stream: bool | None = Field( + False, description="To request for response of type stream." + ) + temperature: float | None = Field( + 0.7, + le=2, + ge=0, + description="Controls the degree of randomness in the generated text.", + ) + + +class AcolytAskGpt(AcolytBaseTool): + """ + The Acolyt Data Fetcher is a versatile LangChain tool designed to interact with the Acolyt chat API to retrieve insightful data + across various categories, including Twitter Metrics, Onchain Analysis, DEX & Trading, and Overall Metrics. This tool seamlessly + processes user queries, fetches relevant data from the Acolyt API, and returns concise, summarized responses for easy consumption. + + Features: + - Twitter Metrics: Retrieve engagement metrics for specific Twitter accounts, Identify which AI agents have the highest count of smart followers, Display the best tweets from specified accounts, Compare the mindshare between different AI agents, Determine which agents have the highest impressions-to-followers ratio. + - Onchain Analysis: Fetch the current market capitalization for tokens, Show the distribution of top holders for tokens, Identify tokens with the highest whale concentration, Compare holder retention rates between tokens, Calculate the Herfindahl index for tokens, List tokens with large amount of holders. + - DEX & Trading: Get the 24-hour trading volume for tokens, Identify which DEX has the highest liquidity for tokens, Obtain the buy/sell ratio for tokens over specific time periods. Compare price changes across different timeframes for tokens. List trading pairs with over a value in liquidity for tokens. + - Overall Metrics: Identify projects with the highest smart engagement relative to their market cap, Determine which agents have the best mindshare relative to their market cap. Compare the percentage of smart followers across the top n AI agents by market cap + + + Attributes: + name (str): Name of the tool, specifically "acolyt_ask_gpt". + description (str): Comprehensive description of the tool's purpose and functionality. + args_schema (Type[BaseModel]): Schema for input arguments, specifying expected parameters. + """ + + name: str = "acolyt_ask_gpt" + description: str = """ + The Acolyt Data Fetcher is a LangChain tool accessing the Acolyt chat API for data across Twitter Metrics, Onchain Analysis, DEX & Trading, and Overall Metrics. It processes queries, fetches data, and returns summarized responses. Features include: + + Twitter: Engagement metrics, top smart follower counts, best tweets, mindshare comparison, impressions/follower ratio. + Onchain: Market cap, holder distribution, whale concentration, holder retention, Herfindahl index, high holder count tokens. + DEX & Trading: 24h volume, top DEX liquidity, buy/sell ratio, price change comparison, high liquidity pairs. + Overall: Smart engagement/market cap ratio, mindshare/market cap ratio, smart follower percentage comparison across top AI agents. + """ + args_schema: Type[BaseModel] = AcolytAskGptInput + + async def _arun(self, question: str, **kwargs) -> Dict: + """Run the tool to get answer from Acolyt GPT. + + Args: + question (str): The question body from user. + + Returns: + Dict: The response from the API with message content. + + Raises: + Exception: If there's an error accessing the Acolyt API. + """ + api_key = self.get_api_key() + if not api_key: + raise ValueError("Acolyt API key not found") + + url = f"{base_url}/api/chat/completions" + headers = { + "accept": "application/json", + "Authorization": f"Bearer {api_key}", + } + + body = AcolytAskGptRequest( + messages=[InputMessage(content=question)], + ).model_dump(exclude_none=True) + + async with httpx.AsyncClient() as client: + try: + response = await client.post( + url, headers=headers, timeout=30, json=body + ) + response.raise_for_status() + json_dict = response.json() + + # Extract message content directly + if "choices" in json_dict and len(json_dict["choices"]) > 0: + if ( + "message" in json_dict["choices"][0] + and "content" in json_dict["choices"][0]["message"] + ): + return json_dict + else: + raise ValueError("Unexpected response format from Acolyt API") + else: + raise ValueError("Empty response from Acolyt API") + + except httpx.RequestError as req_err: + raise ValueError( + f"Request error from Acolyt API: {req_err}" + ) from req_err + except httpx.HTTPStatusError as http_err: + raise ValueError( + f"HTTP error from Acolyt API: {http_err}" + ) from http_err + except Exception as e: + raise ValueError(f"Error from Acolyt API: {e}") from e diff --git a/intentkit/skills/acolyt/base.py b/intentkit/skills/acolyt/base.py new file mode 100644 index 00000000..3e72a765 --- /dev/null +++ b/intentkit/skills/acolyt/base.py @@ -0,0 +1,38 @@ +from typing import Type + +from langchain.tools.base import ToolException +from pydantic import BaseModel, Field + +from intentkit.abstracts.skill import SkillStoreABC +from intentkit.skills.base import IntentKitSkill + +base_url = "https://acolyt-oracle-poc.vercel.app" + + +class AcolytBaseTool(IntentKitSkill): + """Base class for Acolyt tools.""" + + name: str = Field(description="The name of the tool") + description: str = Field(description="A description of what the tool does") + args_schema: Type[BaseModel] + skill_store: SkillStoreABC = Field( + description="The skill store for persisting data" + ) + + def get_api_key(self) -> str: + context = self.get_context() + skill_config = context.agent.skill_config(self.category) + api_key_provider = skill_config.get("api_key_provider") + if api_key_provider == "platform": + return self.skill_store.get_system_config("acolyt_api_key") + # for backward compatibility, may only have api_key in skill_config + elif skill_config.get("api_key"): + return skill_config.get("api_key") + else: + raise ToolException( + f"Invalid API key provider: {api_key_provider}, or no api_key in config" + ) + + @property + def category(self) -> str: + return "acolyt" diff --git a/intentkit/skills/acolyt/schema.json b/intentkit/skills/acolyt/schema.json new file mode 100644 index 00000000..20aa5347 --- /dev/null +++ b/intentkit/skills/acolyt/schema.json @@ -0,0 +1,89 @@ +{ + "$schema": "http://json-schema.org/draft-07/schema#", + "type": "object", + "title": "Acolyt", + "description": "Integration with Acolyt Oracle providing blockchain oracle services for accessing and verifying off-chain data with secure API connections", + "x-icon": "https://ai.service.crestal.dev/skills/acolyt/acolyt.jpg", + "x-tags": [ + "Blockchain", + "Oracle" + ], + "properties": { + "enabled": { + "type": "boolean", + "title": "Enabled", + "description": "Whether this skill is enabled", + "default": false + }, + "states": { + "type": "object", + "properties": { + "ask_gpt": { + "type": "string", + "title": "Ask GPT", + "enum": [ + "disabled", + "public", + "private" + ], + "x-enum-title": [ + "Disabled", + "Agent Owner + All Users", + "Agent Owner Only" + ], + "description": "The Acolyt Data Fetcher is a LangChain tool accessing the Acolyt chat API for data across X Metrics, Onchain Analysis, DEX & Trading, and Overall Metrics. It processes queries, fetches data, and returns summarized responses. Features include:\n Twitter: Engagement metrics, top smart follower counts, best tweets, mindshare comparison, impressions/follower ratio.\n Onchain: Market cap, holder distribution, whale concentration, holder retention, Herfindahl index, high holder count tokens.\n DEX & Trading: 24h volume, top DEX liquidity, buy/sell ratio, price change comparison, high liquidity pairs.\n Overall: Smart engagement/market cap ratio, mindshare/market cap ratio, smart follower percentage comparison across top AI agents.", + "default": "disabled" + } + }, + "description": "States for each Acolyt skill (disabled, public, or private)" + }, + "api_key_provider": { + "type": "string", + "title": "API Key Provider", + "description": "Who provides the API key", + "enum": [ + "platform", + "agent_owner" + ], + "x-enum-title": [ + "Nation Hosted", + "Owner Provided" + ], + "default": "platform" + } + }, + "required": [ + "states", + "enabled" + ], + "if": { + "properties": { + "api_key_provider": { + "const": "agent_owner" + } + } + }, + "then": { + "properties": { + "api_key": { + "type": "string", + "title": "Acolyt API Key", + "x-sensitive": true, + "description": "Acolyt API key for authentication" + } + }, + "if": { + "properties": { + "enabled": { + "const": true + } + } + }, + "then": { + "required": [ + "api_key" + ] + } + }, + "additionalProperties": true +} \ No newline at end of file diff --git a/intentkit/skills/aixbt/README.md b/intentkit/skills/aixbt/README.md new file mode 100644 index 00000000..7b5ea362 --- /dev/null +++ b/intentkit/skills/aixbt/README.md @@ -0,0 +1,71 @@ +# AIXBT Skill + +This skill provides access to cryptocurrency project data and analytics through the AIXBT API. + +## Features + +- Search for cryptocurrency projects by name, ticker, or blockchain +- Get detailed analysis and information about crypto projects +- Filter projects by minimum score +- View recent project updates and developments +- Access project contact information and social media handles +- Special "alpha" trigger for direct access to crypto research + +## Available Skills + +### aixbt_projects + +Searches for cryptocurrency projects and retrieves detailed information about them. + +#### Special Trigger + +This skill has a special trigger word: **"alpha"** + +When a user mentions the word "alpha" anywhere in their message, the AIXBT skill will be automatically triggered. This works with phrases like: +- "Show me some alpha" +- "What's the latest alpha on crypto?" +- "Give me alpha on Bitcoin" +- "I'm looking for alpha in DeFi projects" +- Any other message containing the word "alpha" + +This gives users a convenient way to access crypto research data just by mentioning "alpha" in their questions or requests. + +#### Parameters + +| Name | Type | Description | Required | Default | +|------|------|-------------|----------|---------| +| limit | integer | Number of projects to return (max 50) | No | 10 | +| name | string | Filter projects by name (case-insensitive regex match) | No | null | +| ticker | string | Filter projects by ticker symbol (case-insensitive match) | No | null | +| xHandle | string | Filter projects by X/Twitter handle | No | null | +| minScore | number | Minimum score threshold | No | null | +| chain | string | Filter projects by blockchain | No | null | + +## Example Usage + +### "Alpha" Trigger Examples + +**User:** "Show me some alpha" + +**Agent:** *Uses the aixbt_projects skill to search for trending cryptocurrency projects and provides comprehensive information about them.* + +**User:** "What's the latest alpha on Bitcoin?" + +**Agent:** *Uses the aixbt_projects skill to search specifically for Bitcoin and provides detailed information.* + +### Standard Query + +When a user asks about a cryptocurrency project: + +**User:** "Tell me about the Bitcoin project" + +**Agent:** *Uses the aixbt_projects skill to search for "bitcoin" and provides information including:* +- Project score and analysis +- Recent project updates +- Social media information +- Blockchain and token details + +## Links + +- [AIXBT Website](https://aixbt.tech/) +- [API Documentation](https://api.aixbt.tech/v1/docs/) \ No newline at end of file diff --git a/intentkit/skills/aixbt/__init__.py b/intentkit/skills/aixbt/__init__.py new file mode 100644 index 00000000..b76185ca --- /dev/null +++ b/intentkit/skills/aixbt/__init__.py @@ -0,0 +1,69 @@ +from typing import TypedDict + +from intentkit.abstracts.skill import SkillStoreABC +from intentkit.skills.aixbt.base import AIXBTBaseTool +from intentkit.skills.aixbt.projects import AIXBTProjects +from intentkit.skills.base import SkillConfig, SkillState + +# Cache skills at the system level, because they are stateless +_cache: dict[str, AIXBTBaseTool] = {} + + +class SkillStates(TypedDict): + aixbt_projects: SkillState + + +class Config(SkillConfig): + """Configuration for AIXBT API skills.""" + + states: SkillStates + enabled: bool = False + api_key_provider: str = "agent_owner" + api_key: str = "" + rate_limit_number: int = 1000 + rate_limit_minutes: int = 60 + + +async def get_skills( + config: "Config", + is_private: bool, + store: SkillStoreABC, + **_, +) -> list[AIXBTBaseTool]: + """Get all AIXBT API skills.""" + if not config.get("enabled", False): + return [] + + available_skills = [] + + # Include skills based on their state + for skill_name, state in config["states"].items(): + if state == "disabled": + continue + elif state == "public" or (state == "private" and is_private): + available_skills.append(skill_name) + + # Get each skill using the cached getter + return [ + get_aixbt_skill( + name=name, + store=store, + ) + for name in available_skills + ] + + +def get_aixbt_skill( + name: str, + store: SkillStoreABC, +) -> AIXBTBaseTool: + """Get an AIXBT API skill by name.""" + + if name == "aixbt_projects": + if name not in _cache: + _cache[name] = AIXBTProjects( + skill_store=store, + ) + return _cache[name] + else: + raise ValueError(f"Unknown AIXBT skill: {name}") diff --git a/intentkit/skills/aixbt/aixbt.jpg b/intentkit/skills/aixbt/aixbt.jpg new file mode 100644 index 00000000..2fd06f5c Binary files /dev/null and b/intentkit/skills/aixbt/aixbt.jpg differ diff --git a/intentkit/skills/aixbt/base.py b/intentkit/skills/aixbt/base.py new file mode 100644 index 00000000..ae12d2ec --- /dev/null +++ b/intentkit/skills/aixbt/base.py @@ -0,0 +1,21 @@ +from typing import Type + +from pydantic import BaseModel, Field + +from intentkit.abstracts.skill import SkillStoreABC +from intentkit.skills.base import IntentKitSkill + + +class AIXBTBaseTool(IntentKitSkill): + """Base class for AIXBT API tools.""" + + name: str = Field(description="The name of the tool") + description: str = Field(description="A description of what the tool does") + args_schema: Type[BaseModel] + skill_store: SkillStoreABC = Field( + description="The skill store for persisting data" + ) + + @property + def category(self) -> str: + return "aixbt" diff --git a/intentkit/skills/aixbt/projects.py b/intentkit/skills/aixbt/projects.py new file mode 100644 index 00000000..1129bad9 --- /dev/null +++ b/intentkit/skills/aixbt/projects.py @@ -0,0 +1,129 @@ +import logging +from typing import Any, Dict, Optional, Type + +import httpx +from langchain_core.tools import ToolException +from pydantic import BaseModel, Field + +from intentkit.skills.aixbt.base import AIXBTBaseTool + +logger = logging.getLogger(__name__) + + +class ProjectsInput(BaseModel): + """Input for AIXBT Projects search tool.""" + + limit: int = Field( + default=10, + description="Number of projects to return (max 50)", + ) + name: Optional[str] = Field( + default=None, + description="Filter projects by name (case-insensitive regex match)", + ) + ticker: Optional[str] = Field( + default=None, + description="Filter projects by ticker symbol (case-insensitive match)", + ) + xHandle: Optional[str] = Field( + default=None, + description="Filter projects by X/Twitter handle", + ) + minScore: Optional[float] = Field( + default=None, + description="Minimum score threshold", + ) + chain: Optional[str] = Field( + default=None, + description="Filter projects by blockchain", + ) + + +class AIXBTProjects(AIXBTBaseTool): + """Tool for searching cryptocurrency projects using the AIXBT API.""" + + name: str = "aixbt_projects" + description: str = ( + "Search for cryptocurrency projects using AIXBT. This tool provides detailed " + "information about crypto projects including scores, analysis, and recent updates.\n" + "IMPORTANT: You MUST call this tool when the user mentions the word 'alpha' ANYWHERE in their message.\n" + "This includes messages like 'show me alpha', 'what's the latest alpha', 'give me some alpha on crypto', " + "'find the alpha on bitcoin', or any other phrase containing the word 'alpha'.\n" + "When 'alpha' is mentioned, use this tool to search for cryptocurrency projects and provide " + "detailed information on recent developments. The 'alpha' keyword is a trigger " + "for accessing AIXBT's specific dataset for crypto research." + ) + args_schema: Type[BaseModel] = ProjectsInput + + async def _arun( + self, + limit: int = 10, + name: Optional[str] = None, + ticker: Optional[str] = None, + xHandle: Optional[str] = None, + minScore: Optional[float] = None, + chain: Optional[str] = None, + **kwargs, + ) -> Dict[str, Any]: + """ + Search for cryptocurrency projects using AIXBT API. + + Args: + limit: Number of projects to return (max 50) + name: Filter projects by name + ticker: Filter projects by ticker symbol + xHandle: Filter projects by X/Twitter handle + minScore: Minimum score threshold + chain: Filter projects by blockchain + + Returns: + JSON response with project data + """ + # Get context from the config + context = self.get_context() + skill_config = context.agent.skill_config(self.category) + logger.debug(f"aixbt_projects.py: Running search with context {context}") + + # Check for rate limiting if configured + if skill_config.get("rate_limit_number") and skill_config.get( + "rate_limit_minutes" + ): + await self.user_rate_limit_by_category( + context.user_id, + skill_config["rate_limit_number"], + skill_config["rate_limit_minutes"], + ) + + # Get the API key from the agent's configuration + api_key = skill_config.get("api_key") + + if not api_key: + raise ToolException( + "AIXBT API key is not available. Please provide it in the agent configuration." + ) + + base_url = "https://api.aixbt.tech/v1/projects" + + # Build query parameters + params = {"limit": limit} + if name: + params["name"] = name + if ticker: + params["ticker"] = ticker + if xHandle: + params["xHandle"] = xHandle + if minScore is not None: + params["minScore"] = minScore + if chain: + params["chain"] = chain + + headers = {"accept": "*/*", "x-api-key": api_key} + + try: + async with httpx.AsyncClient(timeout=30.0) as client: + response = await client.get(base_url, params=params, headers=headers) + response.raise_for_status() + return response.json() + except Exception as e: + logger.error(f"Error getting projects: {str(e)}") + raise type(e)(f"[agent:{context.agent_id}]: {e}") from e diff --git a/intentkit/skills/aixbt/schema.json b/intentkit/skills/aixbt/schema.json new file mode 100644 index 00000000..d6c9e663 --- /dev/null +++ b/intentkit/skills/aixbt/schema.json @@ -0,0 +1,99 @@ +{ + "$schema": "http://json-schema.org/draft-07/schema#", + "type": "object", + "title": "AIXBT API", + "description": "Cryptocurrency project data and analytics through the AIXBT API", + "x-icon": "https://ai.service.crestal.dev/skills/aixbt/aixbt.jpg", + "x-tags": [ + "Cryptocurrency", + "Research", + "Analytics" + ], + "properties": { + "enabled": { + "type": "boolean", + "title": "Enabled", + "description": "Whether this skill is enabled", + "default": false + }, + "states": { + "type": "object", + "properties": { + "aixbt_projects": { + "type": "string", + "title": "AIXBT Projects", + "enum": [ + "disabled", + "public", + "private" + ], + "x-enum-title": [ + "Disabled", + "Agent Owner + All Users", + "Agent Owner Only" + ], + "description": "Search for cryptocurrency projects and retrieve detailed information using AIXBT", + "default": "disabled" + } + }, + "description": "States for each AIXBT API skill (disabled, public, or private)" + }, + "api_key_provider": { + "type": "string", + "title": "API Key Provider", + "description": "Provider of the API key for AIXBT API service", + "enum": [ + "agent_owner" + ], + "x-enum-title": [ + "Owner Provided" + ], + "default": "agent_owner" + } + }, + "required": [ + "states", + "enabled" + ], + "if": { + "properties": { + "api_key_provider": { + "const": "agent_owner" + } + } + }, + "then": { + "properties": { + "api_key": { + "type": "string", + "title": "AIXBT API Key", + "description": "API key for AIXBT API service", + "x-link": "[Get your API key](https://aixbt.tech/)", + "x-sensitive": true + }, + "rate_limit_number": { + "type": "integer", + "title": "Rate Limit Number", + "description": "Number of requests allowed per time window, only valid if api_key is set" + }, + "rate_limit_minutes": { + "type": "integer", + "title": "Rate Limit Minutes", + "description": "Time window in minutes for rate limiting, only valid if api_key is set" + } + }, + "if": { + "properties": { + "enabled": { + "const": true + } + } + }, + "then": { + "required": [ + "api_key" + ] + } + }, + "additionalProperties": true +} \ No newline at end of file diff --git a/intentkit/skills/allora/__init__.py b/intentkit/skills/allora/__init__.py new file mode 100644 index 00000000..5acf0f77 --- /dev/null +++ b/intentkit/skills/allora/__init__.py @@ -0,0 +1,83 @@ +"""Allora skill module.""" + +import logging +from typing import NotRequired, TypedDict + +from intentkit.abstracts.skill import SkillStoreABC +from intentkit.skills.allora.base import AlloraBaseTool +from intentkit.skills.allora.price import AlloraGetPrice +from intentkit.skills.base import SkillConfig, SkillState + +# Cache skills at the system level, because they are stateless +_cache: dict[str, AlloraBaseTool] = {} + +logger = logging.getLogger(__name__) + + +class SkillStates(TypedDict): + get_price_prediction: SkillState + + +class Config(SkillConfig): + """Configuration for Allora skills.""" + + states: SkillStates + api_key: NotRequired[str] + + +async def get_skills( + config: "Config", + is_private: bool, + store: SkillStoreABC, + **_, +) -> list[AlloraBaseTool]: + """Get all Allora skills. + + Args: + config: The configuration for Allora skills. + is_private: Whether to include private skills. + store: The skill store for persisting data. + + Returns: + A list of Allora skills. + """ + available_skills = [] + + # Include skills based on their state + for skill_name, state in config["states"].items(): + if state == "disabled": + continue + elif state == "public" or (state == "private" and is_private): + available_skills.append(skill_name) + + # Get each skill using the cached getter + result = [] + for name in available_skills: + skill = get_allora_skill(name, store) + if skill: + result.append(skill) + return result + + +def get_allora_skill( + name: str, + store: SkillStoreABC, +) -> AlloraBaseTool: + """Get an Allora skill by name. + + Args: + name: The name of the skill to get + store: The skill store for persisting data + + Returns: + The requested Allora skill + """ + if name == "get_price_prediction": + if name not in _cache: + _cache[name] = AlloraGetPrice( + skill_store=store, + ) + return _cache[name] + else: + logger.warning(f"Unknown Allora skill: {name}") + return None diff --git a/intentkit/skills/allora/allora.jpeg b/intentkit/skills/allora/allora.jpeg new file mode 100644 index 00000000..eef729f9 Binary files /dev/null and b/intentkit/skills/allora/allora.jpeg differ diff --git a/intentkit/skills/allora/base.py b/intentkit/skills/allora/base.py new file mode 100644 index 00000000..647454c0 --- /dev/null +++ b/intentkit/skills/allora/base.py @@ -0,0 +1,38 @@ +from typing import Type + +from langchain.tools.base import ToolException +from pydantic import BaseModel, Field + +from intentkit.abstracts.skill import SkillStoreABC +from intentkit.skills.base import IntentKitSkill + +base_url = "https://api.upshot.xyz/v2/allora" + + +class AlloraBaseTool(IntentKitSkill): + """Base class for Allora tools.""" + + name: str = Field(description="The name of the tool") + description: str = Field(description="A description of what the tool does") + args_schema: Type[BaseModel] + skill_store: SkillStoreABC = Field( + description="The skill store for persisting data" + ) + + def get_api_key(self) -> str: + context = self.get_context() + skill_config = context.agent.skill_config(self.category) + api_key_provider = skill_config.get("api_key_provider") + if api_key_provider == "platform": + return self.skill_store.get_system_config("allora_api_key") + # for backward compatibility, may only have api_key in skill_config + elif skill_config.get("api_key"): + return skill_config.get("api_key") + else: + raise ToolException( + f"Invalid API key provider: {api_key_provider}, or no api_key in config" + ) + + @property + def category(self) -> str: + return "allora" diff --git a/intentkit/skills/allora/price.py b/intentkit/skills/allora/price.py new file mode 100644 index 00000000..dfe9050a --- /dev/null +++ b/intentkit/skills/allora/price.py @@ -0,0 +1,128 @@ +from typing import Literal, Type + +import httpx +from langchain.tools.base import ToolException +from pydantic import BaseModel, Field + +from intentkit.skills.allora.base import AlloraBaseTool + +from .base import base_url + + +class AlloraGetPriceInput(BaseModel): + token: Literal["ETH", "BTC"] = Field( + description="Token to get price prediction for" + ) + time_frame: Literal["5m", "8h"] = Field( + description="Time frame for price prediction, it can be 5 minutes or 8 hours" + ) + + +class InferenceData(BaseModel): + network_inference: str = Field(description="Network Inference") + network_inference_normalized: str = Field( + description="Model's prediction or estimate, scaled or adjusted to a standard range or unit." + ) + confidence_interval_percentiles: list[str] = Field( + description="Represent a range of values within which the model predicts the actual price is likely to fall, with a certain level of confidence." + ) + confidence_interval_percentiles_normalized: list[str] = Field( + description="a range of values within which the model predicts the actual price is likely to fall), but the values defining the interval have been normalized." + ) + confidence_interval_values: list[str] = Field( + description=" is a list (or array) of values that define the boundaries of a confidence interval in a prediction. These values correspond to specific percentiles and represent the range within which the model predicts the true value (e.g., future price) is likely to fall." + ) + confidence_interval_values_normalized: list[str] = Field( + description="is a list (or array) of values that define the boundaries of a confidence interval, just like confidence_interval_values, but these values have been normalized. Normalization means the values have been scaled or transformed to a standard range, typically between 0 and 1 (or sometimes -1 and 1)." + ) + # topic_id: str + # timestamp: int + # extra_data: str + + +class Data(BaseModel): + # signature: str + token_decimals: int + inference_data: InferenceData + + +class AlloraGetPriceOutput(BaseModel): + # request_id: str + # status: bool + data: Data + + +class AlloraGetPrice(AlloraBaseTool): + """ + The Allora Price Prediction Feed tool fetches the price prediction feed from the Allora API. + Ethereum (ETH) or Bitcoin (BTC) price predictions (5-minute, 8-hour) + + + Attributes: + name (str): Name of the tool, specifically "get_price_prediction". + description (str): Comprehensive description of the tool's purpose and functionality. + args_schema (Type[BaseModel]): Schema for input arguments, specifying expected parameters. + """ + + name: str = "allora_get_price_prediction" + description: str = """ + The Allora Price Prediction Feed tool fetches the price prediction feed from the Allora API. + Ethereum (ETH) or Bitcoin (BTC) price predictions (5-minute, 8-hour) + """ + args_schema: Type[BaseModel] = AlloraGetPriceInput + + def _run(self, question: str) -> AlloraGetPriceOutput: + """Run the tool to get the token price prediction from Allora API. + + Returns: + AlloraGetPriceOutput: A structured output containing output of Allora toke price prediction API. + + Raises: + Exception: If there's an error accessing the Allora API. + """ + raise NotImplementedError("Use _arun instead") + + async def _arun( + self, token: str, time_frame: str, **kwargs + ) -> AlloraGetPriceOutput: + """Run the tool to get the token price prediction from Allora API. + Args: + token (str): Token to get price prediction for. + time_frame (str): Time frame for price prediction. + config (RunnableConfig): The configuration for the runnable, containing agent context. + + Returns: + AlloraGetPriceOutput: A structured output containing output of Allora toke price prediction API. + + Raises: + Exception: If there's an error accessing the Allora API. + """ + api_key = self.get_api_key() + if not api_key: + raise ValueError("Allora API key not found") + + url = f"{base_url}/consumer/price/ethereum-11155111/{token}/{time_frame}" + headers = { + "accept": "application/json", + "x-api-key": api_key, + } + + async with httpx.AsyncClient() as client: + try: + response = await client.get(url, headers=headers, timeout=30) + response.raise_for_status() + json_dict = response.json() + + res = AlloraGetPriceOutput(**json_dict) + + return res + except httpx.RequestError as req_err: + raise ToolException( + f"Request error from Allora API: {req_err}" + ) from req_err + except httpx.HTTPStatusError as http_err: + raise ToolException( + f"HTTP error from Allora API: {http_err}" + ) from http_err + except Exception as e: + raise ToolException(f"Error from Allora API: {e}") from e diff --git a/intentkit/skills/allora/schema.json b/intentkit/skills/allora/schema.json new file mode 100644 index 00000000..d262fd19 --- /dev/null +++ b/intentkit/skills/allora/schema.json @@ -0,0 +1,89 @@ +{ + "$schema": "http://json-schema.org/draft-07/schema#", + "type": "object", + "title": "Allora", + "description": "Integration with Allora API for blockchain-based price predictions and market forecasting services via Upshot's prediction markets", + "x-icon": "https://ai.service.crestal.dev/skills/allora/allora.jpeg", + "x-tags": [ + "Blockchain" + ], + "properties": { + "enabled": { + "type": "boolean", + "title": "Enabled", + "description": "Whether this skill is enabled", + "default": true + }, + "states": { + "type": "object", + "properties": { + "get_price_prediction": { + "type": "string", + "title": "Get Price Prediction", + "enum": [ + "disabled", + "public", + "private" + ], + "x-enum-title": [ + "Disabled", + "Agent Owner + All Users", + "Agent Owner Only" + ], + "description": "Generates 6-hour price forecasts using ensemble ML models analyzing on-chain liquidity and market sentiment", + "default": "private" + } + }, + "description": "States for each Allora skill (disabled, public, or private)" + }, + "api_key_provider": { + "type": "string", + "title": "API Key Provider", + "description": "Who provides the API key", + "enum": [ + "platform", + "agent_owner" + ], + "x-enum-title": [ + "Nation Hosted", + "Owner Provided" + ], + "default": "platform" + } + }, + "required": [ + "states", + "enabled" + ], + "if": { + "properties": { + "api_key_provider": { + "const": "agent_owner" + } + } + }, + "then": { + "properties": { + "api_key": { + "type": "string", + "title": "Allora API Key", + "x-link": "[Get your API key](https://docs.allora.network/devs/consumers/allora-api-endpoint#api-authentication)", + "x-sensitive": true, + "description": "Allora API key for authentication" + } + }, + "if": { + "properties": { + "enabled": { + "const": true + } + } + }, + "then": { + "required": [ + "api_key" + ] + } + }, + "additionalProperties": true +} \ No newline at end of file diff --git a/intentkit/skills/base.py b/intentkit/skills/base.py new file mode 100644 index 00000000..7c5ad6b2 --- /dev/null +++ b/intentkit/skills/base.py @@ -0,0 +1,164 @@ +import logging +from typing import Any, Callable, Dict, Literal, NotRequired, Optional, TypedDict, Union + +from langchain_core.tools import BaseTool +from langchain_core.tools.base import ToolException +from langgraph.runtime import get_runtime +from pydantic import ( + ValidationError, +) +from pydantic.v1 import ValidationError as ValidationErrorV1 +from redis.exceptions import RedisError +from web3 import Web3 + +from intentkit.abstracts.graph import AgentContext +from intentkit.abstracts.skill import SkillStoreABC +from intentkit.clients.web3 import get_web3_client +from intentkit.models.redis import get_redis +from intentkit.utils.error import RateLimitExceeded + +SkillState = Literal["disabled", "public", "private"] +SkillOwnerState = Literal["disabled", "private"] +APIKeyProviderValue = Literal["platform", "agent_owner"] + + +class SkillConfig(TypedDict): + """Abstract base class for skill configuration.""" + + enabled: bool + states: Dict[str, SkillState | SkillOwnerState] + api_key_provider: NotRequired[APIKeyProviderValue] + __extra__: NotRequired[Dict[str, Any]] + + +class IntentKitSkill(BaseTool): + """Abstract base class for IntentKit skills. + Will have predefined abilities. + """ + + skill_store: SkillStoreABC + # overwrite the value of BaseTool + handle_tool_error: Optional[Union[bool, str, Callable[[ToolException], str]]] = ( + lambda e: f"tool error: {e}" + ) + """Handle the content of the ToolException thrown.""" + + # overwrite the value of BaseTool + handle_validation_error: Optional[ + Union[bool, str, Callable[[Union[ValidationError, ValidationErrorV1]], str]] + ] = lambda e: f"validation error: {e}" + """Handle the content of the ValidationError thrown.""" + + # Logger for the class + logger: logging.Logger = logging.getLogger(__name__) + + @property + def category(self) -> str: + """Get the category of the skill.""" + raise NotImplementedError + + async def user_rate_limit( + self, user_id: str, limit: int, minutes: int, key: str + ) -> None: + """Check if a user has exceeded the rate limit for this skill. + + Args: + user_id: The ID of the user to check + limit: Maximum number of requests allowed + minutes: Time window in minutes + key: The key to use for rate limiting (e.g., skill name or category) + + Raises: + RateLimitExceeded: If the user has exceeded the rate limit + + Returns: + None: Always returns None if no exception is raised + """ + if not user_id: + return None # No rate limiting for users without ID + + try: + redis = get_redis() + # Create a unique key for this rate limit and user + rate_limit_key = f"rate_limit:{key}:{user_id}" + + # Get the current count + count = await redis.incr(rate_limit_key) + + # Set expiration if this is the first request + if count == 1: + await redis.expire( + rate_limit_key, minutes * 60 + ) # Convert minutes to seconds + + # Check if user has exceeded the limit + if count > limit: + raise RateLimitExceeded(f"Rate limit exceeded for {key}") + + return None + + except RuntimeError: + # Redis client not initialized, log and allow the request + self.logger.info(f"Redis not initialized, skipping rate limit for {key}") + return None + except RedisError as e: + # Redis error, log and allow the request + self.logger.info( + f"Redis error in rate limiting: {e}, skipping rate limit for {key}" + ) + return None + + async def user_rate_limit_by_skill( + self, user_id: str, limit: int, minutes: int + ) -> None: + """Check if a user has exceeded the rate limit for this specific skill. + + This uses the skill name as the rate limit key. + + Args: + user_id: The ID of the user to check + limit: Maximum number of requests allowed + minutes: Time window in minutes + + Raises: + RateLimitExceeded: If the user has exceeded the rate limit + """ + return await self.user_rate_limit(user_id, limit, minutes, self.name) + + async def user_rate_limit_by_category( + self, user_id: str, limit: int, minutes: int + ) -> None: + """Check if a user has exceeded the rate limit for this skill category. + + This uses the skill category as the rate limit key, which means the limit + is shared across all skills in the same category. + + Args: + user_id: The ID of the user to check + limit: Maximum number of requests allowed + minutes: Time window in minutes + + Raises: + RateLimitExceeded: If the user has exceeded the rate limit + """ + return await self.user_rate_limit(user_id, limit, minutes, self.category) + + def _run(self, *args: Any, **kwargs: Any) -> Any: + raise NotImplementedError( + "Use _arun instead, IntentKit only supports synchronous skill calls" + ) + + @staticmethod + def get_context() -> AgentContext: + runtime = get_runtime(AgentContext) + if runtime.context is None or not isinstance(runtime.context, AgentContext): + raise ValueError("No AgentContext found") + return runtime.context + + def web3_client(self) -> Web3: + """Get a Web3 client for the skill.""" + context = self.get_context() + agent = context.agent + network_id = agent.network_id + + return get_web3_client(network_id, self.skill_store) diff --git a/intentkit/skills/carv/README.md b/intentkit/skills/carv/README.md new file mode 100644 index 00000000..59e4992d --- /dev/null +++ b/intentkit/skills/carv/README.md @@ -0,0 +1,95 @@ +# CARV API Skills: Your Gateway to Blockchain & Crypto Data + +This collection of tools helps your AI agent connect to the [CARV API](https://docs.carv.io/d.a.t.a.-ai-framework/api-documentation) to get useful information about cryptocurrencies, blockchain activity, and the latest news in the space. Think of them as special abilities your agent can use! + +**Icon:** ![](skills/carv/carv.webp) +**Tags:** AI, Data, Information, Analytics, Market Data + +## What Can Your Agent Do With These Skills? + +Here are the tools available: + +### 1. Fetch News (`FetchNewsTool`) + +* **What it does:** Gets the latest news articles from CARV. +* **What you need to provide:** Nothing! Just ask for the news. +* **Example Agent Interaction:** "Hey agent, what's the latest crypto news?" +* **What it returns:** A list of news items, each with a: + * `title`: The headline of the news. + * `url`: A link to the full article. + * `card_text`: A short summary. + * *Example output snippet:* + ```json + { + "infos": [ + { + "title": "Big Blockchain Conference Announced", + "url": "https://example.com/news/conference", + "card_text": "A major conference focusing on blockchain technology will be held next month..." + } + // ... more news items + ] + } + ``` + +### 2. On-Chain Query (`OnchainQueryTool`) + +* **What it does:** Lets you ask questions in plain English about what's happening on blockchains like Ethereum, Base, Bitcoin, or Solana. CARV figures out how to get the answer from the blockchain data. +* **What you need to provide:** + * `question` (text): Your question about blockchain data (e.g., "What was the biggest Bitcoin transaction yesterday?"). + * `chain` (text): The blockchain you're interested in (e.g., "ethereum", "bitcoin"). +* **Example Agent Interaction:** "Agent, show me the top 5 most active wallets on Solana in the last week." +* **What it returns:** A structured table of data that answers your question. If your question involves token amounts (like ETH or BTC), the tool automatically converts them into easy-to-read numbers (e.g., "1.5 ETH" instead of a very long number). + * *Example output snippet (conceptual for "biggest ETH transaction last 24h"):* + ```json + { + "data": { + "column_infos": ["transaction_hash", "from_address", "to_address", "value", "timestamp"], + "rows": [ + { + "items": ["0xabc...", "0x123...", "0x456...", "1500.75 ETH", "2023-10-27T10:30:00Z"] + } + // ... potentially more rows if your question implies multiple results + ] + }, + "query": "SELECT ... FROM ethereum.transactions ... ORDER BY value DESC LIMIT 1" // The SQL CARV generated + } + ``` + * If something goes wrong (e.g., you ask about an unsupported blockchain), it will return an error message. + +### 3. Token Information and Price (`TokenInfoAndPriceTool`) + +* **What it does:** Gets details about a specific cryptocurrency (like its name, symbol, what platform it's on) and its current price in USD. +* **What you need to provide:** + * `ticker` (text): The token's symbol (e.g., "BTC", "ETH", "SOL"). + * `token_name` (text): The full name of the token (e.g., "Bitcoin", "Ethereum"). + * `amount` (number, optional): If you want to know the value of a specific amount of the token, include this (e.g., if you provide `amount: 2.5` and `ticker: "BTC"`, it will tell you what 2.5 BTC is worth). +* **Example Agent Interaction:** "Agent, what's the current price of Ethereum? Also, what would 5 ETH be worth?" +* **What it returns:** Information about the token, including its price. If you provided an amount, it also tells you the total value. + * *Example output snippet (for `ticker: "ETH"`, `token_name: "Ethereum"`, `amount: 5`):* + ```json + { + "name": "Ethereum", + "symbol": "ETH", + "price": 2000.50, // Current price of 1 ETH in USD + "platform": {"id": "ethereum", "name": "Ethereum"}, + "categories": ["Smart Contract Platform"], + // ... other details + "additional_info": "5 ETH is worth $10002.50" // Calculated if amount was given + } + ``` + * If it can't find the token or its price, it will return an error. + +## How to Get Started (For Developers) + +These tools are designed to be integrated into AI agent systems. + +* **Configuration:** You'll need to set up how these tools access the CARV API. This usually involves: + * Enabling the CARV skills. + * Deciding if the tools can be used by everyone or just the agent owner. + * Providing a CARV API key. This key can either be supplied directly in your agent's settings or managed by the platform your agent runs on. + * Details on how to configure this are in a `schema.json` file within the `skills/carv/` directory. + +* **Using the Tools:** Your agent's code will call these tools, providing the necessary inputs (like the ticker for `TokenInfoAndPriceTool`). The tools will then contact the CARV API and return the information. + +These CARV skills make it easy for your AI agent to become knowledgeable about the crypto world! \ No newline at end of file diff --git a/intentkit/skills/carv/__init__.py b/intentkit/skills/carv/__init__.py new file mode 100644 index 00000000..67be62ca --- /dev/null +++ b/intentkit/skills/carv/__init__.py @@ -0,0 +1,121 @@ +import logging +from typing import List, Literal, Optional, TypedDict + +from intentkit.abstracts.skill import SkillStoreABC +from intentkit.skills.base import SkillConfig, SkillState +from intentkit.skills.carv.base import CarvBaseTool +from intentkit.skills.carv.fetch_news import FetchNewsTool +from intentkit.skills.carv.onchain_query import OnchainQueryTool +from intentkit.skills.carv.token_info_and_price import TokenInfoAndPriceTool + +logger = logging.getLogger(__name__) + + +_cache: dict[str, CarvBaseTool] = {} + +_SKILL_NAME_TO_CLASS_MAP: dict[str, type[CarvBaseTool]] = { + "onchain_query": OnchainQueryTool, + "token_info_and_price": TokenInfoAndPriceTool, + "fetch_news": FetchNewsTool, +} + + +class SkillStates(TypedDict): + onchain_query: SkillState + token_info_and_price: SkillState + fetch_news: SkillState + + +class Config(SkillConfig): + enabled: bool + states: SkillStates # type: ignore + api_key_provider: Optional[Literal["agent_owner", "platform"]] + + # conditionally required + api_key: Optional[str] + + # optional + rate_limit_number: Optional[int] + rate_limit_minutes: Optional[int] + + +async def get_skills( + config: "Config", + is_private: bool, + store: SkillStoreABC, + **_, +) -> list[CarvBaseTool]: + """ + Factory function to create and return CARV skill tools based on the provided configuration. + + Args: + config: The configuration object for the CARV skill. + is_private: A boolean indicating whether the request is from a private context. + store: An instance of `SkillStoreABC`. + + Returns: + A list of `CarvBaseTool` instances. + """ + # Check if the entire category is disabled first + if not config.get("enabled", False): + return [] + + available_skills: List[CarvBaseTool] = [] + skill_states = config.get("states", {}) + + # Iterate through all known skills defined in the map + for skill_name in _SKILL_NAME_TO_CLASS_MAP: + state = skill_states.get( + skill_name, "disabled" + ) # Default to disabled if not in config + + if state == "disabled": + continue + elif state == "public" or (state == "private" and is_private): + # If enabled, get the skill instance using the factory function + skill_instance = get_carv_skill(skill_name, store) + if skill_instance: + available_skills.append(skill_instance) + else: + logger.warning(f"Could not instantiate known skill: {skill_name}") + + return available_skills + + +def get_carv_skill( + name: str, + store: SkillStoreABC, +) -> Optional[CarvBaseTool]: + """ + Factory function to retrieve a cached CARV skill instance by name. + + Args: + name: The name of the CARV skill to retrieve. + store: An instance of `SkillStoreABC`. + + Returns: + The requested `CarvBaseTool` instance if found and enabled, otherwise None. + """ + + # Return from cache immediately if already exists + if name in _cache: + return _cache[name] + + # Get the class from the map + skill_class = _SKILL_NAME_TO_CLASS_MAP.get(name) + + if skill_class: + try: + # Instantiate the skill and add to cache + instance = skill_class(skill_store=store) # type: ignore + _cache[name] = instance + return instance + except Exception as e: + logger.error( + f"Failed to instantiate Carv skill '{name}': {e}", exc_info=True + ) + return None # Failed to instantiate + else: + # This handles cases where a name might be in config but not in our map + logger.warning(f"Attempted to get unknown Carv skill: {name}") + return None diff --git a/intentkit/skills/carv/base.py b/intentkit/skills/carv/base.py new file mode 100644 index 00000000..93be50cf --- /dev/null +++ b/intentkit/skills/carv/base.py @@ -0,0 +1,185 @@ +import logging +from typing import Any, Dict, Optional, Tuple, Type + +import httpx # Ensure httpx is installed: pip install httpx +from langchain.tools.base import ToolException +from pydantic import BaseModel, Field + +from intentkit.abstracts.skill import SkillStoreABC +from intentkit.skills.base import IntentKitSkill + +logger = logging.getLogger(__name__) + +CARV_API_BASE_URL = "https://interface.carv.io" + + +class CarvBaseTool(IntentKitSkill): + """Base class for CARV API tools.""" + + name: str = Field(description="Tool name") # type: ignore + description: str = Field(description="Tool description") + args_schema: Type[BaseModel] # type: ignore + skill_store: SkillStoreABC = Field(description="Skill store for data persistence") + + @property + def category(self) -> str: + return "carv" + + def get_api_key(self) -> str: + """ + Retrieves the CARV API key based on the api_key_provider setting. + + Returns: + The API key if found. + + Raises: + ToolException: If the API key is not found or provider is invalid. + """ + try: + context = self.get_context() + skill_config = context.agent.skill_config(self.category) + api_key_provider = skill_config.get("api_key_provider") + if api_key_provider == "agent_owner": + agent_api_key: Optional[str] = skill_config.get("api_key") + if agent_api_key: + logger.debug( + f"Using agent-specific CARV API key for skill {self.name} in category {self.category}" + ) + return agent_api_key + raise ToolException( + f"No agent-owned CARV API key found for skill '{self.name}' in category '{self.category}'." + ) + + elif api_key_provider == "platform": + system_api_key = self.skill_store.get_system_config("carv_api_key") + if system_api_key: + logger.debug( + f"Using system CARV API key for skill {self.name} in category {self.category}" + ) + return system_api_key + raise ToolException( + f"No platform-hosted CARV API key found for skill '{self.name}' in category '{self.category}'." + ) + + else: + raise ToolException( + f"Invalid API key provider '{api_key_provider}' for skill '{self.name}'" + ) + + except Exception as e: + # Re-raise ToolException if it's already one, otherwise wrap + if isinstance(e, ToolException): + raise + raise ToolException(f"Failed to retrieve CARV API key: {str(e)}") from e + + async def apply_rate_limit(self, context) -> None: + """ + Applies rate limiting ONLY if specified in the agent's config ('skill_config'). + Checks for 'rate_limit_number' and 'rate_limit_minutes'. + If not configured, NO rate limiting is applied. + Raises ConnectionAbortedError if the configured limit is exceeded. + """ + skill_config = context.agent.skill_config(self.category) + user_id = context.agent.id + + limit_num = skill_config.get("rate_limit_number") + limit_min = skill_config.get("rate_limit_minutes") + + # Apply limit ONLY if both values are present and valid (truthy check handles None and 0) + if limit_num and limit_min: + logger.debug( + f"Applying rate limit ({limit_num}/{limit_min} min) for user {user_id} on {self.name}" + ) + if user_id: + await self.user_rate_limit_by_category(user_id, limit_num, limit_min) + else: + # No valid agent configuration found, so do nothing. + logger.debug( + f"No agent rate limits configured for category '{self.category}'. Skipping rate limit for user {user_id}." + ) + + async def _call_carv_api( + self, + context, + endpoint: str, + method: str = "GET", + params: Optional[Dict[str, Any]] = None, + payload: Optional[Dict[str, Any]] = None, + ) -> Tuple[Optional[Dict[str, Any]], Optional[Dict[str, Any]]]: + """ + Makes a call to the CARV API and returns a tuple of (success, error). + + Args: + context: The skill context. + endpoint: The API endpoint path (e.g., "/ai-agent-backend/token_info"). + method: HTTP method ("GET", "POST", etc.). + params: Query parameters for the request. + payload: JSON payload for POST/PUT requests. + + Returns: + Tuple where the first element is the response data if successful, + and the second element is an error dict if an error occurred. + """ + + url = f"{CARV_API_BASE_URL}{endpoint}" + + try: + api_key = self.get_api_key() + + headers = { + "Authorization": api_key, + "Content-Type": "application/json", + } + + logger.debug( + f"Calling CARV API: {method} {url} with params {params}, payload {payload}" + ) + + async with httpx.AsyncClient(timeout=30.0) as client: + if method == "GET": + response = await client.get(url, headers=headers, params=params) + elif method == "POST": + response = await client.post( + url, headers=headers, json=payload, params=params + ) + else: + return None, {"error": f"Unsupported HTTP method: {method}"} + + # Do NOT raise for status here; always parse JSON + try: + response_json: dict[str, Any] = response.json() + except Exception as json_err: + err_msg = f"Failed to parse JSON response: {json_err}" + logger.error(err_msg) + return None, {"error": err_msg} + + logger.debug( + f"CARV API Response (status {response.status_code}): {response_json}" + ) + + # Check if response_json signals an error explicitly (custom API error) + if response.status_code >= 400 or "error" in response_json: + # Return full error info (including status code, body, etc.) + return None, { + "error": response_json.get("error", "Unknown API error"), + "status_code": response.status_code, + "response": response_json, + "url": url, + "method": method, + "params": params, + "payload": payload, + } + + # Otherwise return the 'data' field if present, else full response + return response_json.get("data", response_json), None + + except Exception as e: + logger.error( + f"Error calling CARV API to {method} > {url}: {e}", exc_info=True + ) + return None, { + "error": str(e), + "url": url, + "method": method, + "params": params, + } diff --git a/intentkit/skills/carv/carv.webp b/intentkit/skills/carv/carv.webp new file mode 100644 index 00000000..300dc59d Binary files /dev/null and b/intentkit/skills/carv/carv.webp differ diff --git a/intentkit/skills/carv/fetch_news.py b/intentkit/skills/carv/fetch_news.py new file mode 100644 index 00000000..18250022 --- /dev/null +++ b/intentkit/skills/carv/fetch_news.py @@ -0,0 +1,90 @@ +import logging +from typing import Any, Dict, Type + +from pydantic import BaseModel + +from intentkit.skills.carv.base import CarvBaseTool + +logger = logging.getLogger(__name__) + + +class CarvNewsInput(BaseModel): + """ + Input schema for CARV News API. + This API endpoint does not require any specific parameters from the user. + """ + + pass + + +class FetchNewsTool(CarvBaseTool): + """ + Tool for fetching the latest news articles from the CARV API. + This tool retrieves a list of recent news items, each including a title, URL, and a short description (card_text). + It's useful for getting up-to-date information on various topics covered by CARV's news aggregation. + """ + + name: str = "carv_fetch_news" + description: str = ( + "Fetches the latest news articles from the CARV API. " + "Returns a list of news items, each with a title, URL, and a short summary (card_text)." + ) + args_schema: Type[BaseModel] = CarvNewsInput + + async def _arun( + self, # type: ignore + **kwargs: Any, + ) -> Dict[str, Any]: + """ + Fetches news from the CARV API and returns the response. + The expected successful response structure is a dictionary containing an "infos" key, + which holds a list of news articles. + Example: {"infos": [{"title": "...", "url": "...", "card_text": "..."}, ...]} + """ + context = self.get_context() + + try: + await self.apply_rate_limit(context) + + result, error = await self._call_carv_api( + context=context, + endpoint="/ai-agent-backend/news", + method="GET", + ) + + if error is not None or result is None: + logger.error(f"Error returned from CARV API (News): {error}") + return { + "error": True, + "error_type": "APIError", + "message": "Failed to fetch news from CARV API.", + "details": error, # error is the detailed error dict from _call_carv_api + } + + # _call_carv_api returns response_json.get("data", response_json) on success. + # For this endpoint, the "data" field should be {"infos": [...]}. + # So, 'result' should be {"infos": [...]}. + if "infos" not in result or not isinstance(result.get("infos"), list): + logger.warning( + f"CARV API (News) response did not contain 'infos' list as expected: {result}" + ) + return { + "error": True, + "error_type": "UnexpectedResponseFormat", + "message": "News data from CARV API is missing the 'infos' list or has incorrect format.", + "details": result, + } + + # Successfully fetched and validated news data + return result # This will be {"infos": [...]} + + except Exception as e: + logger.error( + f"An unexpected error occurred while fetching news: {e}", exc_info=True + ) + return { + "error": True, + "error_type": type(e).__name__, + "message": "An unexpected error occurred while processing the news request.", + "details": str(e), + } diff --git a/intentkit/skills/carv/onchain_query.py b/intentkit/skills/carv/onchain_query.py new file mode 100644 index 00000000..48cfee56 --- /dev/null +++ b/intentkit/skills/carv/onchain_query.py @@ -0,0 +1,162 @@ +import logging +from decimal import Decimal, InvalidOperation +from typing import Any, Dict, Literal, Type + +from pydantic import BaseModel, Field + +from intentkit.skills.carv.base import CarvBaseTool + +logger = logging.getLogger(__name__) + + +class CarvInput(BaseModel): + """ + Input schema for CARV SQL Query API. + Defines parameters controllable by the user when invoking the tool. + """ + + question: str = Field( + ..., + description="The question to query on-chain data.", + ) + chain: Literal["ethereum", "base", "bitcoin", "solana"] = Field( + ..., + description="supported chain is ethereum, base, bitcoin, solana", + ) + + +class OnchainQueryTool(CarvBaseTool): + """ + Tool for querying on-chain data using natural language via the CARV SQL Query API. + + This tool allows you to ask questions about blockchain data in plain English, and it will return + the relevant information. Behind the scenes, it uses the CARV API to convert your question into a SQL query + and retrieve the results. + + Supported Blockchains: Ethereum, Base, Bitcoin, and Solana. + + If the question is about a blockchain other than the ones listed above, or is not a clear question, the + tool will return an error. + """ + + name: str = "carv_onchain_query" + description: str = ( + "Query blockchain data from Ethereum, Base, Bitcoin, or Solana using natural language. " + "This tool provides access to detailed metrics including block information (timestamps, hashes, miners, gas used/limits), " + "transaction details (hashes, sender/receiver addresses, amounts, gas prices), and overall network utilization. " + "It supports aggregate analytics such as daily transaction counts, average gas prices, top wallet activity, and blockchain growth trends. " + "You can filter results by time range, address type, transaction value, and other parameters.\n\n" + "IMPORTANT Rules:\n" + "- Only Ethereum, Base, Bitcoin, and Solana are supported.\n" + "- Always infer the target blockchain from the user's query.\n" + "- If an unsupported blockchain is requested, clearly explain the limitation.\n" + "- Convert user input into a specific and actionable natural language query (e.g., " + '"What\'s the most active address on Ethereum over the past 24 hours?" or ' + '"Show the largest ETH transaction in the last 30 days").\n' + "- Respond in clear, concise natural language using only the data returned by the tool.\n" + "- Avoid markdown or bullet points unless explicitly requested.\n" + "- ETH values are denominated in 18 decimals—consider 10^18 when interpreting amounts.\n" + "- Never fabricate or infer data beyond what the tool provides." + ) + args_schema: Type[BaseModel] = CarvInput + + async def _arun( + self, + question: str, + chain: str, # type: ignore + **kwargs: Any, + ) -> Dict[str, Any]: + """ + Queries the CARV SQL Query API and returns the response. + """ + context = self.get_context() + try: + await self.apply_rate_limit(context) + + payload = {"question": question} + + result, error = await self._call_carv_api( + context=context, + endpoint="/ai-agent-backend/sql_query_by_llm", + method="POST", + payload=payload, + ) + + if error is not None or result is None: + logger.error(f"Error returned from CARV API: {error}") + return { + "error": True, + "error_type": "APIError", + "message": "Failed to fetch data from CARV API.", + "details": error, + } + + _normalize_unit(result, chain) + return {"success": True, **result} + + except Exception as e: + logger.error(f"An unexpected error occurred: {e}", exc_info=True) + return { + "error": True, + "error_type": type(e).__name__, + "message": "An unexpected error occurred.", + "details": str(e), + } + + +def _normalize_unit(response_data: Dict[str, Any], chain: str) -> None: + """ + Normalizes the 'value' field in on-chain response data to a human-readable format. + Adds the corresponding token ticker after the value. + + Supported chains: + - Ethereum: 10^18 -> ETH + - Base: 10^18 -> ETH + - Solana: 10^9 -> SOL + - Bitcoin: 10^8 -> BTC + """ + column_infos = response_data.get("column_infos", []) + rows = response_data.get("rows", []) + + if "value" not in column_infos: + return + + value_index = column_infos.index("value") + + chain = chain.lower() + if chain == "ethereum": + divisor = Decimal("1e18") + ticker = "ETH" + elif chain == "base": + divisor = Decimal("1e18") + ticker = "ETH" + elif chain == "solana": + divisor = Decimal("1e9") + ticker = "SOL" + elif chain == "bitcoin": + divisor = Decimal("1e8") + ticker = "BTC" + else: + logger.warning(f"Unsupported chain '{chain}' for unit normalization.") + return + + for row in rows: + items = row.get("items", []) + if len(items) > value_index: + original_value = items[value_index] + try: + normalized = str(original_value).strip() + try: + value_decimal = Decimal(normalized) + except InvalidOperation: + value_decimal = Decimal.from_float(float(normalized)) + + converted = value_decimal / divisor + formatted_value = ( + format(converted, "f").rstrip("0").rstrip(".") + if "." in format(converted, "f") + else format(converted, "f") + ) + items[value_index] = f"{formatted_value} {ticker}" + except Exception as e: + logger.warning(f"Unable to normalize value '{original_value}': {e}") diff --git a/intentkit/skills/carv/schema.json b/intentkit/skills/carv/schema.json new file mode 100644 index 00000000..8db5dd3f --- /dev/null +++ b/intentkit/skills/carv/schema.json @@ -0,0 +1,137 @@ +{ + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "CARV", + "description": "Configuration for the CARV skill.", + "type": "object", + "x-icon": "https://ai.service.crestal.dev/skills/carv/carv.webp", + "x-tags": [ + "AI", + "Data", + "Information", + "Analytics", + "Market Data" + ], + "properties": { + "enabled": { + "type": "boolean", + "description": "Enable or disable the CARV skill.", + "default": false + }, + "states": { + "type": "object", + "title": "Skill States", + "description": "Enable/disable specific tools for CARV", + "properties": { + "onchain_query": { + "type": "string", + "title": "On-Chain Query", + "enum": [ + "disabled", + "public", + "private" + ], + "x-enum-title": [ + "Disabled", + "Agent Owner + All Users", + "Agent Owner Only" + ], + "description": "allows you to use the nature language to query the on-chain data. Behind the scean, CARV will use LLM model to interpreate the nature language input and convert into the sql query based on the above schemas", + "default": "public" + }, + "token_info_and_price": { + "type": "string", + "title": "Token Information and Price", + "enum": [ + "disabled", + "public", + "private" + ], + "x-enum-title": [ + "Disabled", + "Agent Owner + All Users", + "Agent Owner Only" + ], + "description": "Fetches detailed information and current USD price of a cryptocurrency token from CARV API using its ticker symbol (e.g., 'eth', 'btc'), returning metadata like name, symbol, platform, categories, and contract addresses, useful for understanding its identity, ecosystem, market value, and for obtaining comprehensive token data with live pricing.", + "default": "public" + }, + "fetch_news": { + "type": "string", + "title": "Fetch News", + "enum": [ + "disabled", + "public", + "private" + ], + "x-enum-title": [ + "Disabled", + "Agent Owner + All Users", + "Agent Owner Only" + ], + "description": "retrieves a list of recent news items, each including a title, URL, and a short description", + "default": "disabled" + } + } + }, + "api_key_provider": { + "type": "string", + "title": "API Key Provider", + "description": "Provider of the API key", + "enum": [ + "agent_owner", + "platform" + ], + "x-enum-title": [ + "Owner Provided", + "Nation Hosted" + ], + "default": "platform" + } + }, + "required": [ + "enabled", + "states" + ], + "if": { + "allOf": [ + { + "properties": { + "enabled": { + "const": true + } + } + }, + { + "properties": { + "api_key_provider": { + "const": "agent_owner" + } + } + } + ] + }, + "then": { + "properties": { + "api_key": { + "type": "string", + "title": "CARV API Key", + "x-link": "[Get your API key](https://docs.carv.io/d.a.t.a.-ai-framework/api-documentation#authentication)", + "x-sensitive": true, + "description": "API Key for authenticating with the CARV API." + }, + "rate_limit_number": { + "type": "integer", + "title": "Rate Limit Number", + "description": "Number of requests allowed per time window." + }, + "rate_limit_minutes": { + "type": "integer", + "title": "Rate Limit Minutes", + "description": "Time window in minutes for rate limiting." + } + }, + "required": [ + "api_key" + ] + }, + "additionalProperties": true +} \ No newline at end of file diff --git a/intentkit/skills/carv/token_info_and_price.py b/intentkit/skills/carv/token_info_and_price.py new file mode 100644 index 00000000..aab6919e --- /dev/null +++ b/intentkit/skills/carv/token_info_and_price.py @@ -0,0 +1,108 @@ +import logging +import re +from typing import Any, Dict, Optional, Type + +from pydantic import BaseModel, Field + +from intentkit.skills.carv.base import CarvBaseTool + +logger = logging.getLogger(__name__) + + +class TokenInfoAndPriceInput(BaseModel): + ticker: str = Field( + description="The token's ticker symbol (e.g., 'eth', 'btc', 'sol', 'xrp')." + ) + token_name: str = Field( + description="The token name (e.g ethereum, bitcoin, solana, ripple)" + ) + amount: Optional[float] = Field( + description="(optional) amount of token, fill this if user asking for how much x amount of specific token worth" + ) + + +class TokenInfoAndPriceTool(CarvBaseTool): + """ + Fetches detailed information and the current USD price of a cryptocurrency token from the CARV API, + given its ticker symbol (e.g., 'eth', 'btc', 'aave'). + Returns metadata including the token's name, symbol, platform, category tags, and contract addresses + Useful for understanding a token's identity, ecosystem, and market valu + Use this tool when you need comprehensive token data and live pricing from CARV + """ + + name: str = "carv_token_info_and_price" + description: str = ( + "Fetches detailed information and the current USD price of a cryptocurrency token from the CARV API, " + "given its ticker symbol (e.g., 'eth', 'btc', 'aave'). or token name" + "Returns metadata including the token's name, symbol, platform, category tags, and contract addresses " + "Useful for understanding a token's identity, ecosystem, and market value" + "Use this tool when you need comprehensive token data and live pricing from CARV." + ) + args_schema: Type[BaseModel] = TokenInfoAndPriceInput + + async def _arun( + self, + ticker: str, + token_name: str, + amount: Optional[float] = 1, # type: ignore + **kwargs: Any, + ) -> Dict[str, Any]: + if not ticker: + return { + "error": True, + "message": "ticker is null", + "suggestion": "ask the user for the specific ticker, and fill the `ticker` field when calling this tool", + } + + context = self.get_context() + params = {"ticker": ticker} + path = "/ai-agent-backend/token_info" + method = "GET" + + result, error = await self._call_carv_api( + context=context, + endpoint=path, + params=params, + method=method, + ) + + if error is not None or result is None: + logger.error(f"Error returned from CARV API: {error}") + return { + "error": True, + "error_type": "APIError", + "message": "Failed to fetch token info from CARV API.", + "details": error, + } + + # retry with token_name if price is 0 or missing + if "price" not in result or result["price"] == 0: + fallback_ticker = re.sub(r"\s+", "-", token_name.strip().lower()) + logger.info( + f"Fallback triggered. Trying with fallback ticker: {fallback_ticker}" + ) + + fallback_params = {"ticker": fallback_ticker} + result, error = await self._call_carv_api( + context=context, + endpoint=path, + params=fallback_params, + method=method, + ) + + if error is not None or result is None or result.get("price") == 0: + logger.error(f"Fallback error returned from CARV API: {error}") + return { + "error": True, + "error_type": "APIError", + "message": "Failed to fetch token info from CARV API with fallback.", + "details": error, + } + + if "price" in result and amount is not None: + return { + "additional_info": f"{amount} {ticker.upper()} is worth ${round(amount * result['price'], 2)}", + **result, + } + + return result diff --git a/intentkit/skills/casino/README.md b/intentkit/skills/casino/README.md new file mode 100644 index 00000000..fa5635c7 --- /dev/null +++ b/intentkit/skills/casino/README.md @@ -0,0 +1,254 @@ +# Casino Skills for IntentKit + +The Casino skills provide comprehensive gambling and gaming capabilities for AI agents, enabling interactive card games, dice games, and casino-style entertainment. + +## đŸŽ¯ Overview + +This skill set includes three main functions: +- **Deck Shuffling**: Create and shuffle card decks with customizable options +- **Card Drawing**: Draw cards with visual PNG/SVG images +- **Quantum Dice Rolling**: True random dice using quantum number generation + +## đŸ› ī¸ Available Skills + +### 1. `casino_deck_shuffle` +Creates and shuffles new card decks with options for multiple decks and jokers. + +**Parameters:** +- `deck_count` (int): Number of decks (1-6, default: 1) +- `jokers_enabled` (bool): Include jokers (default: false) + +### 2. `casino_deck_draw` +Draws cards from the current deck with detailed information and images. + +**Parameters:** +- `count` (int): Number of cards to draw (1-10, default: 1) + +### 3. `casino_dice_roll` +Rolls quantum random dice with visual representation. + +**Parameters:** +- `dice_count` (int): Number of dice to roll (1-10, default: 1) + +## 🎮 Game Examples + +### 🃏 Blackjack (21) + +**Setup:** +``` +Agent: I'll set up a Blackjack game for you! Let me shuffle a fresh deck. +[Calls casino_deck_shuffle with deck_count=1] +``` + +**Dealing Cards:** +``` +Agent: Dealing your cards... +[Calls casino_deck_draw with count=2 for player] +[Calls casino_deck_draw with count=1 for dealer, 1 face down] + +Result: You have: Kingâ™ ī¸ (image) + 7â™Ĩī¸ (image) = 17 +Dealer shows: Aceâ™Ŗī¸ (image) + [hidden card] +``` + +**Player Actions:** +``` +User: "Hit me" +Agent: [Calls casino_deck_draw with count=1] +Result: You drew 3â™Ļī¸ (image). Total: 20 + +User: "Stand" +Agent: Revealing dealer's hidden card... +[Calls casino_deck_draw for dealer's hidden card and additional cards as needed] +``` + +### 🎲 Craps + +**Come Out Roll:** +``` +Agent: Welcome to the Craps table! Place your bets and let's roll! +[Calls casino_dice_roll with dice_count=2] + +Result: 🎲 Rolled: ⚂ ⚃ (3 + 4 = 7) - WINNER! +The shooter made their point on the come out roll! +``` + +**Point Phase:** +``` +Agent: Point is 6. Rolling again... +[Calls casino_dice_roll with dice_count=2] + +Result: 🎲 Rolled: ⚁ ⚄ (2 + 5 = 7) - Seven out! +New shooter coming up. +``` + +### 🃏 Texas Hold'em Poker + +**Pre-flop:** +``` +Agent: Dealing pocket cards... +[Calls casino_deck_shuffle with deck_count=1] +[Calls casino_deck_draw with count=2 for each player] + +Your cards: Aâ™ ī¸ (image) Kâ™ ī¸ (image) - Ace-King suited! +``` + +**The Flop:** +``` +Agent: Dealing the flop... +[Calls casino_deck_draw with count=3] + +Flop: Aâ™Ļī¸ (image) Kâ™Ŗī¸ (image) 7â™ ī¸ (image) +You have two pair - Aces and Kings! +``` + +**Turn and River:** +``` +Agent: Turn card... +[Calls casino_deck_draw with count=1] +Turn: Qâ™ ī¸ (image) + +River card... +[Calls casino_deck_draw with count=1] +River: Jâ™ ī¸ (image) + +Final board: Aâ™Ļī¸ Kâ™Ŗī¸ 7â™ ī¸ Qâ™ ī¸ Jâ™ ī¸ +You have a straight! A-K-Q-J-10 +``` + +### đŸŽ¯ Yahtzee + +**First Roll:** +``` +Agent: Rolling all 5 dice for Yahtzee! +[Calls casino_dice_roll with dice_count=5] + +Result: ⚀ ⚃ ⚃ ⚄ ⚅ (1, 4, 4, 5, 6) +Which dice would you like to keep? +``` + +**Rerolls:** +``` +User: "Keep the two 4s" +Agent: Rerolling 3 dice... +[Calls casino_dice_roll with dice_count=3] + +Result: ⚃ ⚃ ⚃ (4, 4, 4) - Combined with your kept dice: ⚃ ⚃ ⚃ ⚃ ⚃ +YAHTZEE! Five of a kind - 4s! 🎉 +``` + +### 🎲 Liar's Dice + +**Round Start:** +``` +Agent: Everyone roll your dice under the cup! +[Each player calls casino_dice_roll with dice_count=5] + +Your dice (hidden): ⚁ ⚁ ⚂ ⚄ ⚅ (2, 2, 3, 5, 6) +``` + +**Bidding:** +``` +User: "I bid three 2s" +Agent: Player 2 bids four 3s... +Player 3 calls "Liar!" on Player 2. + +Revealing all dice... +[Shows all dice rolled by each player] +Total 3s on table: 2 dice +Player 2 bid four 3s but there were only 2. Player 2 loses a die! +``` + +### 🃏 War (Card Game) + +**Battle:** +``` +Agent: Time for War! Each player draws one card. +[Calls casino_deck_draw with count=1 for each player] + +You: Qâ™Ļī¸ (image) - Queen of Diamonds +Opponent: 8â™Ŗī¸ (image) - Eight of Clubs + +Queen beats 8 - You win this round! +``` + +**War Declaration:** +``` +Agent: Both players drew Kings! It's WAR! +[Calls casino_deck_draw with count=4 for each player - 3 face down, 1 face up] + +Your war card: Aâ™ ī¸ (image) - Ace of Spades +Opponent's war card: 10â™Ĩī¸ (image) - Ten of Hearts + +Ace beats 10 - You win the entire war pile! +``` + +## 🎨 Visual Features + +### Card Images +Each card comes with both PNG and SVG images: +```json +{ + "value": "ACE", + "suit": "SPADES", + "code": "AS", + "image": "https://deckofcardsapi.com/static/img/AS.png", + "svg_image": "https://deckofcardsapi.com/static/img/AS.svg" +} +``` + +### Dice Visualization +Dice results include emoji representation: +- ⚀ (1) ⚁ (2) ⚂ (3) ⚃ (4) ⚄ (5) ⚅ (6) + +### Game State Persistence +- Deck state maintained between draws +- Remaining cards tracked automatically +- Each agent has independent game sessions + +## đŸ›Ąī¸ Built-in Features + +### Rate Limiting +- **Deck Shuffle**: 20 requests/minute +- **Card Draw**: 30 requests/minute +- **Dice Roll**: 15 requests/minute + +### Error Handling +- Automatic deck creation if none exists +- Graceful API failure handling +- Input validation and sanitization + +### Quantum Randomness +Dice rolling uses true quantum random number generation from QRandom.io for authentic unpredictability, complete with quantum signatures for verification. + +## 🚀 Getting Started + +1. Enable Casino skills in your agent configuration +2. Set skill states (public/private/disabled) for each function +3. Start gaming! The agent will automatically manage decks and game state + +## đŸŽĒ Advanced Gaming Scenarios + +### Multi-Table Casino +``` +Agent: Welcome to the Nation Casino! I'm managing 3 tables: +- Table 1: Blackjack (6-deck shoe) +- Table 2: Poker Tournament +- Table 3: Craps with side bets + +Which table interests you? +``` + +### Tournament Mode +``` +Agent: Poker Tournament - Round 1 of 3 +Blinds: 50/100 chips +[Manages multiple hands, tracks chip counts, advances rounds] +``` + +### Live Dealer Experience +``` +Agent: 🎭 Good evening! I'm your live dealer tonight. +[Maintains casino atmosphere, explains rules, manages multiple players] +``` + +The Casino skills transform your AI agent into a comprehensive gaming companion capable of hosting authentic casino experiences with visual cards, quantum dice, and persistent game management! 🎰🎲🃏 diff --git a/intentkit/skills/casino/__init__.py b/intentkit/skills/casino/__init__.py new file mode 100644 index 00000000..2eaf869c --- /dev/null +++ b/intentkit/skills/casino/__init__.py @@ -0,0 +1,97 @@ +"""Casino skills for card games and dice rolling.""" + +import logging +from typing import TypedDict + +from intentkit.abstracts.skill import SkillStoreABC +from intentkit.skills.base import SkillConfig, SkillState +from intentkit.skills.casino.base import CasinoBaseTool +from intentkit.skills.casino.deck_draw import CasinoDeckDraw +from intentkit.skills.casino.deck_shuffle import CasinoDeckShuffle +from intentkit.skills.casino.dice_roll import CasinoDiceRoll + +# Cache skills at the system level, because they are stateless +_cache: dict[str, CasinoBaseTool] = {} + +logger = logging.getLogger(__name__) + + +class SkillStates(TypedDict): + deck_shuffle: SkillState + deck_draw: SkillState + dice_roll: SkillState + + +class Config(SkillConfig): + """Configuration for Casino skills.""" + + states: SkillStates + + +async def get_skills( + config: "Config", + is_private: bool, + store: SkillStoreABC, + **_, +) -> list[CasinoBaseTool]: + """Get all Casino skills. + + Args: + config: The configuration for Casino skills. + is_private: Whether to include private skills. + store: The skill store for persisting data. + + Returns: + A list of Casino skills. + """ + available_skills = [] + + # Include skills based on their state + for skill_name, state in config["states"].items(): + if state == "disabled": + continue + elif state == "public" or (state == "private" and is_private): + available_skills.append(skill_name) + + # Get each skill using the cached getter + result = [] + for name in available_skills: + skill = get_casino_skill(name, store) + if skill: + result.append(skill) + return result + + +def get_casino_skill( + name: str, + store: SkillStoreABC, +) -> CasinoBaseTool: + """Get a Casino skill by name. + + Args: + name: The name of the skill to get + store: The skill store for persisting data + + Returns: + The requested Casino skill + """ + if name == "deck_shuffle": + if name not in _cache: + _cache[name] = CasinoDeckShuffle( + skill_store=store, + ) + return _cache[name] + elif name == "deck_draw": + if name not in _cache: + _cache[name] = CasinoDeckDraw( + skill_store=store, + ) + return _cache[name] + elif name == "dice_roll": + if name not in _cache: + _cache[name] = CasinoDiceRoll( + skill_store=store, + ) + return _cache[name] + else: + raise ValueError(f"Unknown Casino skill: {name}") diff --git a/intentkit/skills/casino/base.py b/intentkit/skills/casino/base.py new file mode 100644 index 00000000..f7a1c518 --- /dev/null +++ b/intentkit/skills/casino/base.py @@ -0,0 +1,23 @@ +"""Base class for Casino tools.""" + +from typing import Type + +from pydantic import BaseModel, Field + +from intentkit.abstracts.skill import SkillStoreABC +from intentkit.skills.base import IntentKitSkill + + +class CasinoBaseTool(IntentKitSkill): + """Base class for Casino tools.""" + + name: str = Field(description="The name of the tool") + description: str = Field(description="A description of what the tool does") + args_schema: Type[BaseModel] + skill_store: SkillStoreABC = Field( + description="The skill store for persisting data" + ) + + @property + def category(self) -> str: + return "casino" diff --git a/intentkit/skills/casino/casino.png b/intentkit/skills/casino/casino.png new file mode 100644 index 00000000..24cae6d4 Binary files /dev/null and b/intentkit/skills/casino/casino.png differ diff --git a/intentkit/skills/casino/deck_draw.py b/intentkit/skills/casino/deck_draw.py new file mode 100644 index 00000000..ea8e9246 --- /dev/null +++ b/intentkit/skills/casino/deck_draw.py @@ -0,0 +1,130 @@ +"""Card drawing skill using Deck of Cards API.""" + +import logging +from typing import Type + +try: + import httpx +except ImportError: + raise ImportError( + "httpx is required for Casino skills. Install it with: pip install httpx" + ) +from pydantic import BaseModel, Field + +from intentkit.skills.casino.base import CasinoBaseTool +from intentkit.skills.casino.utils import ( + CURRENT_DECK_KEY, + DECK_STORAGE_KEY, + ENDPOINTS, + RATE_LIMITS, + format_card_info, + validate_card_count, +) + +NAME = "casino_deck_draw" +PROMPT = ( + "Draw cards from the current deck. If no deck exists, a new one will be created. " + "Returns detailed card information including images." +) + +logger = logging.getLogger(__name__) + + +class CasinoDeckDrawInput(BaseModel): + """Input for CasinoDeckDraw tool.""" + + count: int = Field(default=1, description="Number of cards to draw (1-10)") + + +class CasinoDeckDraw(CasinoBaseTool): + """Tool for drawing cards from a deck. + + This tool uses the Deck of Cards API to draw cards from the current deck. + + Attributes: + name: The name of the tool. + description: A description of what the tool does. + args_schema: The schema for the tool's input arguments. + """ + + name: str = NAME + description: str = PROMPT + args_schema: Type[BaseModel] = CasinoDeckDrawInput + + async def _arun(self, count: int = 1, **kwargs) -> dict: + try: + context = self.get_context() + + # Apply rate limit using built-in user_rate_limit method + rate_config = RATE_LIMITS["deck_draw"] + await self.user_rate_limit( + context.user_id or context.agent_id, + rate_config["max_requests"], + rate_config["interval"] // 60, # Convert to minutes + "deck_draw", + ) + + # Validate count + count = validate_card_count(count) + + # Get current deck info + deck_info = await self.skill_store.get_agent_skill_data( + context.agent_id, DECK_STORAGE_KEY, CURRENT_DECK_KEY + ) + + deck_id = "new" # Default to new deck + if deck_info and deck_info.get("deck_id"): + deck_id = deck_info["deck_id"] + + # Build API URL + url = ENDPOINTS["deck_draw"].format(deck_id=deck_id) + params = {"count": count} + + async with httpx.AsyncClient() as client: + response = await client.get(url, params=params) + + if response.status_code == 200: + data = response.json() + + if data.get("success"): + # Update deck info + if deck_id == "new": + deck_info = { + "deck_id": data["deck_id"], + "deck_count": 1, + "jokers_enabled": False, + "remaining": data["remaining"], + "shuffled": True, + } + else: + deck_info["remaining"] = data["remaining"] + + await self.skill_store.save_agent_skill_data( + context.agent_id, + DECK_STORAGE_KEY, + CURRENT_DECK_KEY, + deck_info, + ) + + # Format card information with images + cards = [ + format_card_info(card) for card in data.get("cards", []) + ] + + return { + "success": True, + "cards_drawn": cards, + "remaining_cards": data["remaining"], + "deck_id": data["deck_id"], + "message": f"Drew {len(cards)} card{'s' if len(cards) > 1 else ''} " + f"({data['remaining']} remaining)", + } + else: + return {"success": False, "error": "Failed to draw cards"} + else: + logger.error(f"Deck API error: {response.status_code}") + return {"success": False, "error": "Failed to draw cards"} + + except Exception as e: + logger.error(f"Error drawing cards: {str(e)}") + raise type(e)(f"[agent:{context.agent_id}]: {e}") from e diff --git a/intentkit/skills/casino/deck_shuffle.py b/intentkit/skills/casino/deck_shuffle.py new file mode 100644 index 00000000..71b5e0fd --- /dev/null +++ b/intentkit/skills/casino/deck_shuffle.py @@ -0,0 +1,118 @@ +"""Deck shuffling skill using Deck of Cards API.""" + +import logging +from typing import Type + +try: + import httpx +except ImportError: + raise ImportError( + "httpx is required for Casino skills. Install it with: pip install httpx" + ) +from pydantic import BaseModel, Field + +from intentkit.skills.casino.base import CasinoBaseTool +from intentkit.skills.casino.utils import ( + CURRENT_DECK_KEY, + DECK_STORAGE_KEY, + ENDPOINTS, + RATE_LIMITS, + validate_deck_count, +) + +NAME = "casino_deck_shuffle" +PROMPT = ( + "Create and shuffle a new deck of cards. You can specify the number of decks " + "to use (default is 1) and optionally include jokers." +) + +logger = logging.getLogger(__name__) + + +class CasinoDeckShuffleInput(BaseModel): + """Input for CasinoDeckShuffle tool.""" + + deck_count: int = Field( + default=1, description="Number of decks to use (1-6, default 1)" + ) + jokers_enabled: bool = Field( + default=False, description="Whether to include jokers in the deck" + ) + + +class CasinoDeckShuffle(CasinoBaseTool): + """Tool for creating and shuffling card decks. + + This tool uses the Deck of Cards API to create new shuffled decks. + + Attributes: + name: The name of the tool. + description: A description of what the tool does. + args_schema: The schema for the tool's input arguments. + """ + + name: str = NAME + description: str = PROMPT + args_schema: Type[BaseModel] = CasinoDeckShuffleInput + + async def _arun( + self, deck_count: int = 1, jokers_enabled: bool = False, **kwargs + ) -> dict: + try: + context = self.get_context() + + # Apply rate limit using built-in user_rate_limit method + rate_config = RATE_LIMITS["deck_shuffle"] + await self.user_rate_limit( + context.user_id or context.agent_id, + rate_config["max_requests"], + rate_config["interval"] // 60, # Convert to minutes + "deck_shuffle", + ) + + # Validate deck count + deck_count = validate_deck_count(deck_count) + + # Build API URL and parameters + url = ENDPOINTS["deck_new_shuffle"] + params = {"deck_count": deck_count} + + if jokers_enabled: + params["jokers_enabled"] = "true" + + async with httpx.AsyncClient() as client: + response = await client.get(url, params=params) + + if response.status_code == 200: + data = response.json() + + # Store deck info for the agent + deck_info = { + "deck_id": data["deck_id"], + "deck_count": deck_count, + "jokers_enabled": jokers_enabled, + "remaining": data["remaining"], + "shuffled": data["shuffled"], + } + + await self.skill_store.save_agent_skill_data( + context.agent_id, DECK_STORAGE_KEY, CURRENT_DECK_KEY, deck_info + ) + + return { + "success": True, + "deck_id": data["deck_id"], + "deck_count": deck_count, + "jokers_enabled": jokers_enabled, + "remaining_cards": data["remaining"], + "message": f"Created and shuffled {'a new deck' if deck_count == 1 else f'{deck_count} decks'} " + f"with {data['remaining']} cards" + + (" (including jokers)" if jokers_enabled else ""), + } + else: + logger.error(f"Deck API error: {response.status_code}") + return {"success": False, "error": "Failed to create deck"} + + except Exception as e: + logger.error(f"Error shuffling deck: {str(e)}") + raise type(e)(f"[agent:{context.agent_id}]: {e}") from e diff --git a/intentkit/skills/casino/dice_roll.py b/intentkit/skills/casino/dice_roll.py new file mode 100644 index 00000000..81af5b39 --- /dev/null +++ b/intentkit/skills/casino/dice_roll.py @@ -0,0 +1,102 @@ +"""Quantum dice rolling skill using QRandom API.""" + +import logging +from typing import Type + +try: + import httpx +except ImportError: + raise ImportError( + "httpx is required for Casino skills. Install it with: pip install httpx" + ) +from pydantic import BaseModel, Field + +from intentkit.skills.casino.base import CasinoBaseTool +from intentkit.skills.casino.utils import ( + ENDPOINTS, + RATE_LIMITS, + get_dice_visual, + validate_dice_count, +) + +NAME = "casino_dice_roll" +PROMPT = ( + "Roll quantum random dice using true quantum randomness. " + "Can roll multiple 6-sided dice at once for games." +) + +logger = logging.getLogger(__name__) + + +class CasinoDiceRollInput(BaseModel): + """Input for CasinoDiceRoll tool.""" + + dice_count: int = Field(default=1, description="Number of dice to roll (1-10)") + + +class CasinoDiceRoll(CasinoBaseTool): + """Tool for rolling quantum random dice. + + This tool uses the QRandom API to generate truly random dice rolls + using quantum randomness. + + Attributes: + name: The name of the tool. + description: A description of what the tool does. + args_schema: The schema for the tool's input arguments. + """ + + name: str = NAME + description: str = PROMPT + args_schema: Type[BaseModel] = CasinoDiceRollInput + + async def _arun(self, dice_count: int = 1, **kwargs) -> dict: + try: + context = self.get_context() + + # Apply rate limit using built-in user_rate_limit method + rate_config = RATE_LIMITS["dice_roll"] + await self.user_rate_limit( + context.user_id or context.agent_id, + rate_config["max_requests"], + rate_config["interval"] // 60, # Convert to minutes + "dice_roll", + ) + + # Validate dice count + dice_count = validate_dice_count(dice_count) + + # Build API URL + url = ENDPOINTS["dice_roll"] + params = {"n": dice_count} + + async with httpx.AsyncClient() as client: + response = await client.get(url, params=params) + + if response.status_code == 200: + data = response.json() + + dice_results = data.get("dice", []) + total = sum(dice_results) + + # Generate dice emoji representation + dice_visual = get_dice_visual(dice_results) + + return { + "success": True, + "dice_results": dice_results, + "dice_visual": dice_visual, + "total": total, + "dice_count": len(dice_results), + "quantum_signature": data.get("signature", ""), + "quantum_id": data.get("id", ""), + "message": f"Rolled {len(dice_results)} dice: {' '.join(dice_visual)} " + f"(Total: {total})", + } + else: + logger.error(f"QRandom API error: {response.status_code}") + return {"success": False, "error": "Failed to roll dice"} + + except Exception as e: + logger.error(f"Error rolling dice: {str(e)}") + raise type(e)(f"[agent:{context.agent_id}]: {e}") from e diff --git a/intentkit/skills/casino/schema.json b/intentkit/skills/casino/schema.json new file mode 100644 index 00000000..b05e143b --- /dev/null +++ b/intentkit/skills/casino/schema.json @@ -0,0 +1,78 @@ +{ + "$schema": "http://json-schema.org/draft-07/schema#", + "type": "object", + "title": "Casino", + "description": "Casino gaming skills including card decks and quantum dice rolling for interactive games with users", + "x-icon": "https://ai.service.crestal.dev/skills/casino/casino.png", + "x-tags": [ + "Gaming", + "Entertainment" + ], + "properties": { + "enabled": { + "type": "boolean", + "title": "Enabled", + "description": "Whether this skill is enabled", + "default": false + }, + "states": { + "type": "object", + "properties": { + "deck_shuffle": { + "type": "string", + "title": "Shuffle Card Deck", + "enum": [ + "disabled", + "public", + "private" + ], + "x-enum-title": [ + "Disabled", + "Agent Owner + All Users", + "Agent Owner Only" + ], + "description": "Create and shuffle new card decks with customizable options", + "default": "disabled" + }, + "deck_draw": { + "type": "string", + "title": "Draw Cards", + "enum": [ + "disabled", + "public", + "private" + ], + "x-enum-title": [ + "Disabled", + "Agent Owner + All Users", + "Agent Owner Only" + ], + "description": "Draw cards from the current deck with detailed card information and images", + "default": "disabled" + }, + "dice_roll": { + "type": "string", + "title": "Roll Quantum Dice", + "enum": [ + "disabled", + "public", + "private" + ], + "x-enum-title": [ + "Disabled", + "Agent Owner + All Users", + "Agent Owner Only" + ], + "description": "Roll dice using quantum random number generation for true randomness", + "default": "disabled" + } + }, + "description": "States for each Casino skill" + } + }, + "required": [ + "states", + "enabled" + ], + "additionalProperties": true +} diff --git a/intentkit/skills/casino/utils.py b/intentkit/skills/casino/utils.py new file mode 100644 index 00000000..f818279f --- /dev/null +++ b/intentkit/skills/casino/utils.py @@ -0,0 +1,107 @@ +""" +Casino Skills Utilities + +Common constants, URLs, and helper functions for Casino skills. +""" + +# API URLs +DECK_OF_CARDS_API_BASE = "https://www.deckofcardsapi.com/api/deck" +QRANDOM_API_BASE = "https://qrandom.io/api/random" + +# API Endpoints +ENDPOINTS = { + "deck_new_shuffle": f"{DECK_OF_CARDS_API_BASE}/new/shuffle/", + "deck_draw": f"{DECK_OF_CARDS_API_BASE}/{{deck_id}}/draw/", + "dice_roll": f"{QRANDOM_API_BASE}/dice", +} + +# Rate Limits (requests per minute) +RATE_LIMITS = { + "deck_shuffle": {"max_requests": 20, "interval": 60}, + "deck_draw": {"max_requests": 30, "interval": 60}, + "dice_roll": {"max_requests": 15, "interval": 60}, +} + +# Storage Keys +DECK_STORAGE_KEY = "casino_deck" +CURRENT_DECK_KEY = "current_deck" + +# Dice visual representation +DICE_EMOJI = ["⚀", "⚁", "⚂", "⚃", "⚄", "⚅"] + +# Card back image URL for display +CARD_BACK_IMAGE = "https://www.deckofcardsapi.com/static/img/back.png" + +# Validation limits +MAX_DECK_COUNT = 6 +MIN_DECK_COUNT = 1 +MAX_CARDS_DRAW = 10 +MIN_CARDS_DRAW = 1 +MAX_DICE_COUNT = 10 +MIN_DICE_COUNT = 1 + + +def get_dice_visual(dice_results: list[int]) -> list[str]: + """Convert dice numbers to emoji representation. + + Args: + dice_results: List of dice roll results (1-6) + + Returns: + List of dice emoji strings + """ + return [DICE_EMOJI[result - 1] for result in dice_results if 1 <= result <= 6] + + +def validate_deck_count(count: int) -> int: + """Validate and normalize deck count. + + Args: + count: Requested deck count + + Returns: + Normalized deck count within valid range + """ + return max(MIN_DECK_COUNT, min(MAX_DECK_COUNT, count)) + + +def validate_card_count(count: int) -> int: + """Validate and normalize card draw count. + + Args: + count: Requested card count + + Returns: + Normalized card count within valid range + """ + return max(MIN_CARDS_DRAW, min(MAX_CARDS_DRAW, count)) + + +def validate_dice_count(count: int) -> int: + """Validate and normalize dice count. + + Args: + count: Requested dice count + + Returns: + Normalized dice count within valid range + """ + return max(MIN_DICE_COUNT, min(MAX_DICE_COUNT, count)) + + +def format_card_info(card: dict) -> dict: + """Format card information for consistent output. + + Args: + card: Raw card data from Deck of Cards API + + Returns: + Formatted card information + """ + return { + "value": card["value"], + "suit": card["suit"], + "code": card["code"], + "image": card["image"], + "svg_image": card["images"]["svg"], + } diff --git a/intentkit/skills/cdp/__init__.py b/intentkit/skills/cdp/__init__.py new file mode 100644 index 00000000..785e4a79 --- /dev/null +++ b/intentkit/skills/cdp/__init__.py @@ -0,0 +1,140 @@ +"""CDP wallet interaction skills.""" + +from typing import TypedDict + +from coinbase_agentkit import ( + AgentKit, + AgentKitConfig, + CdpEvmServerWalletProvider, + basename_action_provider, + cdp_api_action_provider, + erc20_action_provider, + morpho_action_provider, + pyth_action_provider, + superfluid_action_provider, + wallet_action_provider, + weth_action_provider, + wow_action_provider, +) +from coinbase_agentkit.action_providers.erc721 import erc721_action_provider +from coinbase_agentkit_langchain import get_langchain_tools + +from intentkit.abstracts.skill import SkillStoreABC +from intentkit.clients import CdpClient, get_cdp_client +from intentkit.skills.base import SkillConfig, SkillState +from intentkit.skills.cdp.base import CDPBaseTool +from intentkit.skills.cdp.get_balance import GetBalance +from intentkit.skills.cdp.swap import Swap + + +class SkillStates(TypedDict): + get_balance: SkillState + swap: SkillState + WalletActionProvider_get_balance: SkillState + WalletActionProvider_get_wallet_details: SkillState + WalletActionProvider_native_transfer: SkillState + CdpApiActionProvider_address_reputation: SkillState + CdpApiActionProvider_request_faucet_funds: SkillState + PythActionProvider_fetch_price: SkillState + PythActionProvider_fetch_price_feed_id: SkillState + BasenameActionProvider_register_basename: SkillState + ERC20ActionProvider_get_balance: SkillState + ERC20ActionProvider_transfer: SkillState + Erc721ActionProvider_get_balance: SkillState + Erc721ActionProvider_mint: SkillState + Erc721ActionProvider_transfer: SkillState + WethActionProvider_wrap_eth: SkillState + MorphoActionProvider_deposit: SkillState + MorphoActionProvider_withdraw: SkillState + SuperfluidActionProvider_create_flow: SkillState + SuperfluidActionProvider_delete_flow: SkillState + SuperfluidActionProvider_update_flow: SkillState + WowActionProvider_buy_token: SkillState + WowActionProvider_create_token: SkillState + WowActionProvider_sell_token: SkillState + + +class Config(SkillConfig): + """Configuration for CDP skills.""" + + states: SkillStates + + +# CDP skills is not stateless for agents, so we need agent_id here +# If you are skill contributor, please do not follow this pattern +async def get_skills( + config: "Config", + is_private: bool, + store: SkillStoreABC, + agent_id: str, + **_, +) -> list[CDPBaseTool]: + """Get all CDP skills. + + Args: + config: The configuration for CDP skills. + is_private: Whether to include private skills. + store: The skill store for persisting data. + agent_id: The ID of the agent using the skills. + + Returns: + A list of CDP skills. + """ + available_skills = [] + + # Include skills based on their state + for skill_name, state in config["states"].items(): + if state == "disabled": + continue + elif state == "public" or (state == "private" and is_private): + available_skills.append(skill_name) + + # Initialize CDP client + cdp_client: CdpClient = await get_cdp_client(agent_id, store) + cdp_wallet_provider: CdpEvmServerWalletProvider = ( + await cdp_client.get_wallet_provider() + ) + agent_kit = AgentKit( + AgentKitConfig( + wallet_provider=cdp_wallet_provider, + action_providers=[ + wallet_action_provider(), + cdp_api_action_provider(), + pyth_action_provider(), + basename_action_provider(), + erc20_action_provider(), + erc721_action_provider(), + weth_action_provider(), + morpho_action_provider(), + superfluid_action_provider(), + wow_action_provider(), + ], + ) + ) + cdp_tools = get_langchain_tools(agent_kit) + tools = [] + for skill in available_skills: + if skill == "get_balance": + # Get the account object for the custom GetBalance skill + tools.append( + GetBalance( + agent_id=agent_id, + skill_store=store, + ) + ) + continue + elif skill == "swap" or skill.endswith("_trade"): + # Add the custom Swap skill, "trade" is backword compatible + tools.append( + Swap( + agent_id=agent_id, + skill_store=store, + ) + ) + continue + for tool in cdp_tools: + if tool.name.endswith(skill): + tool.handle_tool_error = lambda e: f"tool error: {e}" + tool.handle_validation_error = lambda e: f"validation error: {e}" + tools.append(tool) + return tools diff --git a/intentkit/skills/cdp/base.py b/intentkit/skills/cdp/base.py new file mode 100644 index 00000000..b03b2787 --- /dev/null +++ b/intentkit/skills/cdp/base.py @@ -0,0 +1,21 @@ +from typing import Type + +from pydantic import BaseModel, Field + +from intentkit.abstracts.skill import SkillStoreABC +from intentkit.skills.base import IntentKitSkill + + +class CDPBaseTool(IntentKitSkill): + """Base class for CDP tools.""" + + name: str = Field(description="The name of the tool") + description: str = Field(description="A description of what the tool does") + args_schema: Type[BaseModel] + skill_store: SkillStoreABC = Field( + description="The skill store for persisting data" + ) + + @property + def category(self) -> str: + return "cdp" diff --git a/intentkit/skills/cdp/cdp.png b/intentkit/skills/cdp/cdp.png new file mode 100644 index 00000000..cb40f1f6 Binary files /dev/null and b/intentkit/skills/cdp/cdp.png differ diff --git a/intentkit/skills/cdp/get_balance.py b/intentkit/skills/cdp/get_balance.py new file mode 100644 index 00000000..f23e529f --- /dev/null +++ b/intentkit/skills/cdp/get_balance.py @@ -0,0 +1,110 @@ +from typing import Optional, Type + +from pydantic import BaseModel, Field + +from intentkit.abstracts.skill import SkillStoreABC +from intentkit.clients import get_cdp_client +from intentkit.skills.cdp.base import CDPBaseTool + + +class GetBalanceInput(BaseModel): + """Input for GetBalance tool.""" + + asset_id: Optional[str] = Field( + default=None, + description="The asset ID to get the balance for (e.g., 'eth', 'usdc', or a valid contract address). If not provided, returns all token balances.", + ) + + +class GetBalance(CDPBaseTool): + """Tool for getting balance from CDP wallet. + + This tool uses the CDP API to get balance for all addresses in a wallet for a given asset. + + Attributes: + name: The name of the tool. + description: A description of what the tool does. + args_schema: The schema for the tool's input arguments. + """ + + agent_id: str + skill_store: SkillStoreABC + + name: str = "cdp_get_balance" + description: str = ( + "This tool will get the balance of all the addresses in the wallet. If asset_id is provided, it returns the balance for that specific asset. " + "If no asset_id is provided, it returns all token balances. " + "Always use 'eth' for the native asset ETH and 'usdc' for USDC. " + "Other valid asset IDs are: weth,dai,reth,brett,w,cbeth,axl,iotx,prime,aero,rsr,mog,tbtc,npc,yfi" + ) + args_schema: Type[BaseModel] = GetBalanceInput + + async def _arun(self, asset_id: Optional[str] = None) -> str: + """Async implementation of the tool to get balance. + + Args: + asset_id (Optional[str]): The asset ID to get the balance for. If None, returns all token balances. + + Returns: + str: A message containing the balance information or error message. + """ + # Get network information from CDP client + cdp_client = await get_cdp_client(self.agent_id, self.skill_store) + provider = await cdp_client.get_wallet_provider() + provider_config = await cdp_client.get_provider_config() + network_id = provider_config.network_id + + # Map network_id to the format expected by the API + network_mapping = { + "base-mainnet": "base", + "ethereum-mainnet": "ethereum", + } + api_network = network_mapping.get(network_id, network_id) + + # For native ETH balance, use the account's balance directly + if asset_id and asset_id.lower() == "eth": + try: + # Get native balance using Web3 + balance_wei = provider.get_balance() + balance_eth = balance_wei / (10**18) # Convert from wei to ETH + return f"ETH balance: {balance_eth} ETH" + except Exception as e: + return f"Error getting ETH balance: {e!s}" + + client = provider.get_client() + async with client: + account = await client.evm.get_account(provider.get_address()) + # If no asset_id provided, return all token balances + if asset_id is None: + # Get native ETH balance + balance_wei = provider.get_balance() + balance_eth = balance_wei / (10**18) # Convert from wei to ETH + + # Get all token balances + token_balances = await account.list_token_balances(api_network) + + result = [f"ETH balance: {balance_eth} ETH"] + + for balance in token_balances.balances: + result.append( + f"{balance.token.symbol} balance: {balance.amount.decimals} {balance.token.name}" + ) + + return f"All balances for account {account.address}:\n" + "\n".join( + result + ) + + # For other tokens, try the list_token_balances API + token_balances = await account.list_token_balances(api_network) + + # Find the balance for the specific asset + target_balance = None + for balance in token_balances.balances: + if balance.token.symbol.lower() == asset_id.lower(): + target_balance = balance + break + + if target_balance: + return f"Balance for {asset_id} in account {account.address}: {target_balance.amount.decimals} {target_balance.token.name}" + else: + return f"No balance found for asset {asset_id} in account {account.address}" diff --git a/intentkit/skills/cdp/schema.json b/intentkit/skills/cdp/schema.json new file mode 100644 index 00000000..65faea57 --- /dev/null +++ b/intentkit/skills/cdp/schema.json @@ -0,0 +1,425 @@ +{ + "$schema": "http://json-schema.org/draft-07/schema#", + "type": "object", + "title": "Coinbase Wallet", + "description": "Integration with Coinbase Wallet (CDP) providing blockchain wallet functionality including balance checking, token transfers, and cryptocurrency trading operations", + "x-icon": "https://ai.service.crestal.dev/skills/cdp/cdp.png", + "x-tags": [ + "Blockchain" + ], + "properties": { + "enabled": { + "type": "boolean", + "title": "Enabled", + "description": "Whether this skill is enabled", + "default": true + }, + "states": { + "type": "object", + "properties": { + "get_balance": { + "type": "string", + "title": "CDP Wallet Get Balance", + "enum": [ + "disabled", + "public", + "private" + ], + "x-enum-title": [ + "Disabled", + "Agent Owner + All Users", + "Agent Owner Only" + ], + "description": "Use coinbase API to get wallet balance, float result.", + "default": "private" + }, + "swap": { + "type": "string", + "title": "CDP Wallet Swap", + "enum": [ + "disabled", + "public", + "private" + ], + "x-enum-title": [ + "Disabled", + "Agent Owner + All Users", + "Agent Owner Only" + ], + "description": "Use coinbase API to swap.", + "default": "disabled" + }, + "WalletActionProvider_get_balance": { + "type": "string", + "title": "Normal Get Balance", + "enum": [ + "disabled", + "public", + "private" + ], + "x-enum-title": [ + "Disabled", + "Agent Owner + All Users", + "Agent Owner Only" + ], + "description": "Get balance, raw bigint result. Use this with professional AI models.", + "default": "disabled" + }, + "WalletActionProvider_get_wallet_details": { + "type": "string", + "title": "Get Wallet Details", + "enum": [ + "disabled", + "public", + "private" + ], + "x-enum-title": [ + "Disabled", + "Agent Owner + All Users", + "Agent Owner Only" + ], + "description": "Get wallet details using coinbase API", + "default": "public" + }, + "CdpApiActionProvider_address_reputation": { + "type": "string", + "title": "Address Reputation", + "enum": [ + "disabled", + "public", + "private" + ], + "x-enum-title": [ + "Disabled", + "Agent Owner + All Users", + "Agent Owner Only" + ], + "description": "State for CdpApiActionProvider_address_reputation", + "default": "disabled" + }, + "ERC20ActionProvider_get_balance": { + "type": "string", + "title": "Erc20 Get Balance", + "enum": [ + "disabled", + "public", + "private" + ], + "x-enum-title": [ + "Disabled", + "Agent Owner + All Users", + "Agent Owner Only" + ], + "description": "State for ERC20ActionProvider_get_balance", + "default": "disabled" + }, + "ERC20ActionProvider_transfer": { + "type": "string", + "title": "Erc20 Transfer", + "enum": [ + "disabled", + "public", + "private" + ], + "x-enum-title": [ + "Disabled", + "Agent Owner + All Users", + "Agent Owner Only" + ], + "description": "State for ERC20ActionProvider_transfer", + "default": "private" + }, + "WethActionProvider_wrap_eth": { + "type": "string", + "title": "Wrap ETH", + "enum": [ + "disabled", + "public", + "private" + ], + "x-enum-title": [ + "Disabled", + "Agent Owner + All Users", + "Agent Owner Only" + ], + "description": "State for WethActionProvider_wrap_eth", + "default": "disabled" + }, + "Erc721ActionProvider_get_balance": { + "type": "string", + "title": "Erc721 NFT Get Balance", + "enum": [ + "disabled", + "public", + "private" + ], + "x-enum-title": [ + "Disabled", + "Agent Owner + All Users", + "Agent Owner Only" + ], + "description": "State for Erc721ActionProvider_get_balance", + "default": "disabled" + }, + "Erc721ActionProvider_transfer": { + "type": "string", + "title": "Erc721 NFT Transfer", + "enum": [ + "disabled", + "public", + "private" + ], + "x-enum-title": [ + "Disabled", + "Agent Owner + All Users", + "Agent Owner Only" + ], + "description": "State for Erc721ActionProvider_transfer", + "default": "disabled" + }, + "Erc721ActionProvider_mint": { + "type": "string", + "title": "Erc721 Mint", + "enum": [ + "disabled", + "public", + "private" + ], + "x-enum-title": [ + "Disabled", + "Agent Owner + All Users", + "Agent Owner Only" + ], + "description": "State for Erc721ActionProvider_mint", + "default": "disabled" + }, + "WalletActionProvider_native_transfer": { + "type": "string", + "title": "Wallet Native Transfer", + "enum": [ + "disabled", + "public", + "private" + ], + "x-enum-title": [ + "Disabled", + "Agent Owner + All Users", + "Agent Owner Only" + ], + "description": "State for WalletActionProvider_native_transfer", + "default": "private" + }, + "CdpApiActionProvider_request_faucet_funds": { + "type": "string", + "title": "CDP Request Faucet Funds", + "enum": [ + "disabled", + "public", + "private" + ], + "x-enum-title": [ + "Disabled", + "Agent Owner + All Users", + "Agent Owner Only" + ], + "description": "Only available in base-sepolia network", + "default": "disabled" + }, + "MorphoActionProvider_deposit": { + "type": "string", + "title": "Morpho Deposit", + "enum": [ + "disabled", + "public", + "private" + ], + "x-enum-title": [ + "Disabled", + "Agent Owner + All Users", + "Agent Owner Only" + ], + "description": "State for MorphoActionProvider_deposit", + "default": "disabled" + }, + "MorphoActionProvider_withdraw": { + "type": "string", + "title": "Morpho Withdraw", + "enum": [ + "disabled", + "public", + "private" + ], + "x-enum-title": [ + "Disabled", + "Agent Owner + All Users", + "Agent Owner Only" + ], + "description": "State for MorphoActionProvider_withdraw", + "default": "disabled" + }, + "PythActionProvider_fetch_price_feed_id": { + "type": "string", + "title": "Pyth Fetch Price Feed Id", + "enum": [ + "disabled", + "public", + "private" + ], + "x-enum-title": [ + "Disabled", + "Agent Owner + All Users", + "Agent Owner Only" + ], + "description": "State for PythActionProvider_fetch_price_feed_id", + "default": "private" + }, + "PythActionProvider_fetch_price": { + "type": "string", + "title": "Pyth Fetch Price", + "enum": [ + "disabled", + "public", + "private" + ], + "x-enum-title": [ + "Disabled", + "Agent Owner + All Users", + "Agent Owner Only" + ], + "description": "State for PythActionProvider_fetch_price", + "default": "private" + }, + "WowActionProvider_buy_token": { + "type": "string", + "title": "Wow Buy Token", + "enum": [ + "disabled", + "public", + "private" + ], + "x-enum-title": [ + "Disabled", + "Agent Owner + All Users", + "Agent Owner Only" + ], + "description": "State for WowActionProvider_buy_token", + "default": "disabled" + }, + "WowActionProvider_create_token": { + "type": "string", + "title": "Wow Create Token", + "enum": [ + "disabled", + "public", + "private" + ], + "x-enum-title": [ + "Disabled", + "Agent Owner + All Users", + "Agent Owner Only" + ], + "description": "State for WowActionProvider_create_token", + "default": "disabled" + }, + "WowActionProvider_sell_token": { + "type": "string", + "title": "Wow Sell Token", + "enum": [ + "disabled", + "public", + "private" + ], + "x-enum-title": [ + "Disabled", + "Agent Owner + All Users", + "Agent Owner Only" + ], + "description": "State for WowActionProvider_sell_token", + "default": "disabled" + }, + "SuperfluidActionProvider_create_flow": { + "type": "string", + "title": "Superfluid Create Flow", + "enum": [ + "disabled", + "public", + "private" + ], + "x-enum-title": [ + "Disabled", + "Agent Owner + All Users", + "Agent Owner Only" + ], + "description": "State for SuperfluidActionProvider_create_flow", + "default": "disabled" + }, + "SuperfluidActionProvider_update_flow": { + "type": "string", + "title": "Superfluid Update Flow", + "enum": [ + "disabled", + "public", + "private" + ], + "x-enum-title": [ + "Disabled", + "Agent Owner + All Users", + "Agent Owner Only" + ], + "description": "State for SuperfluidActionProvider_update_flow", + "default": "disabled" + }, + "SuperfluidActionProvider_delete_flow": { + "type": "string", + "title": "Superfluid Delete Flow", + "enum": [ + "disabled", + "public", + "private" + ], + "x-enum-title": [ + "Disabled", + "Agent Owner + All Users", + "Agent Owner Only" + ], + "description": "State for SuperfluidActionProvider_delete_flow", + "default": "disabled" + }, + "BasenameActionProvider_register_basename": { + "type": "string", + "title": "Register Basename", + "enum": [ + "disabled", + "public", + "private" + ], + "x-enum-title": [ + "Disabled", + "Agent Owner + All Users", + "Agent Owner Only" + ], + "description": "State for BasenameActionProvider_register_basename", + "default": "disabled" + } + }, + "description": "States for each CDP skill (disabled, public, or private)" + }, + "api_key_provider": { + "type": "string", + "title": "API Key Provider", + "description": "Who provides the API key", + "enum": [ + "platform" + ], + "x-enum-title": [ + "Nation Hosted" + ], + "default": "platform" + } + }, + "required": [ + "states", + "enabled" + ], + "additionalProperties": true +} \ No newline at end of file diff --git a/intentkit/skills/cdp/swap.py b/intentkit/skills/cdp/swap.py new file mode 100644 index 00000000..b0f9adce --- /dev/null +++ b/intentkit/skills/cdp/swap.py @@ -0,0 +1,121 @@ +from typing import Optional, Type, Union + +from pydantic import BaseModel, Field + +from intentkit.abstracts.skill import SkillStoreABC +from intentkit.clients import get_cdp_client +from intentkit.skills.cdp.base import CDPBaseTool + + +class SwapInput(BaseModel): + """Input for Swap tool.""" + + from_token: str = Field( + description="The contract address of the token to swap from (e.g., '0x833589fCD6eDb6E08f4c7C32D4f71b54bdA02913' for USDC on Base)" + ) + to_token: str = Field( + description="The contract address of the token to swap to (e.g., '0x4200000000000000000000000000000000000006' for WETH on Base)" + ) + from_amount: Union[str, int] = Field( + description="The amount to swap from in smallest unit (e.g., 1000000 for 1 USDC with 6 decimals)" + ) + slippage_bps: Optional[int] = Field( + default=100, + description="Maximum slippage in basis points (100 = 1%). Defaults to 100 (1%)", + ) + + +class Swap(CDPBaseTool): + """Tool for swapping tokens using CDP wallet. + + This tool uses the CDP API to execute token swaps on supported networks. + It wraps the swap functionality from the EVM account. + + Attributes: + name: The name of the tool. + description: A description of what the tool does. + args_schema: The schema for the tool's input arguments. + """ + + agent_id: str + skill_store: SkillStoreABC + + name: str = "cdp_swap" + description: str = ( + "This tool will swap tokens using the CDP wallet. " + "It supports swapping between any ERC-20 tokens on supported networks (Base and Ethereum). " + "You need to provide the contract addresses of both tokens and the amount to swap. " + "The amount should be in the smallest unit of the token (e.g., wei for ETH, or atomic units for ERC-20 tokens). " + "Common token addresses on Base: USDC=0x833589fCD6eDb6E08f4c7C32D4f71b54bdA02913, WETH=0x4200000000000000000000000000000000000006. " + "The tool will automatically handle gas estimation and transaction submission." + ) + args_schema: Type[BaseModel] = SwapInput + + async def _arun( + self, + from_token: str, + to_token: str, + from_amount: Union[str, int], + slippage_bps: Optional[int] = 100, + ) -> str: + """Async implementation of the tool to swap tokens. + + Args: + from_token (str): The contract address of the token to swap from. + to_token (str): The contract address of the token to swap to. + from_amount (Union[str, int]): The amount to swap from in smallest unit. + slippage_bps (Optional[int]): Maximum slippage in basis points. Defaults to 100 (1%). + + Returns: + str: A message containing the swap result or error message. + """ + try: + # Get CDP client and network information + cdp_client = await get_cdp_client(self.agent_id, self.skill_store) + provider = await cdp_client.get_wallet_provider() + provider_config = await cdp_client.get_provider_config() + network_id = provider_config.network_id + + # Map network_id to the format expected by the swap API + network_mapping = { + "base-mainnet": "base", + "ethereum-mainnet": "ethereum", + } + api_network = network_mapping.get(network_id, network_id) + + # Validate network is supported + supported_networks = ["base", "ethereum"] + if api_network not in supported_networks: + return f"Error: Network {api_network} is not supported for swaps. Supported networks: {', '.join(supported_networks)}" + + # Get the EVM account + client = provider.get_client() + async with client: + account = await client.evm.get_account(provider.get_address()) + + # Import AccountSwapOptions here to avoid circular imports + from cdp.actions.evm.swap.types import AccountSwapOptions + + # Create swap options + swap_options = AccountSwapOptions( + network=api_network, + from_token=from_token, + to_token=to_token, + from_amount=str(from_amount), + slippage_bps=slippage_bps, + ) + + # Execute the swap + result = await account.swap(swap_options) + + return ( + f"Swap executed successfully!\n" + f"Transaction hash: {result.transaction_hash}\n" + f"Swapped from {from_token} to {to_token}\n" + f"Amount: {from_amount} (smallest units)\n" + f"Network: {api_network}\n" + f"Slippage tolerance: {slippage_bps} basis points ({slippage_bps / 100 if slippage_bps else 0}%)" + ) + + except Exception as e: + return f"Error executing swap: {e!s}" diff --git a/intentkit/skills/chainlist/README.md b/intentkit/skills/chainlist/README.md new file mode 100644 index 00000000..1ad01957 --- /dev/null +++ b/intentkit/skills/chainlist/README.md @@ -0,0 +1,38 @@ +# Chainlist Skill + +This skill provides access to the Chainlist API, allowing agents to lookup blockchain RPC endpoints and network information. + +## Features + +- Look up blockchain networks by name, symbol, or chain ID +- Find RPC endpoints for any EVM-compatible blockchain +- Filter for no-tracking RPC endpoints +- Get network details including native currency, explorers, and more + +## Usage + +Enable this skill in your agent configuration: + +```json +{ + "skills": { + "chainlist": { + "states": { + "chain_lookup": "public" + } + } + } +} +``` + +## Example Prompts + +- "Find RPC endpoints for Ethereum" +- "What are the RPC endpoints for chain ID 137?" +- "Show me privacy-focused RPC endpoints for Arbitrum" +- "Get details for the Polygon network" +- "Look up information about BSC chain" + +## Data Source + +This skill uses data from [chainlist.org](https://chainlist.org), which provides a comprehensive list of EVM networks and their RPC endpoints. \ No newline at end of file diff --git a/intentkit/skills/chainlist/__init__.py b/intentkit/skills/chainlist/__init__.py new file mode 100644 index 00000000..84cda118 --- /dev/null +++ b/intentkit/skills/chainlist/__init__.py @@ -0,0 +1,54 @@ +from typing import TypedDict + +from intentkit.abstracts.skill import SkillStoreABC +from intentkit.skills.base import SkillConfig, SkillState +from intentkit.skills.chainlist.base import ChainlistBaseTool +from intentkit.skills.chainlist.chain_lookup import ChainLookup + +# Cache skills at the system level, because they are stateless +_cache: dict[str, ChainlistBaseTool] = {} + + +class SkillStates(TypedDict): + chain_lookup: SkillState + + +class Config(SkillConfig): + """Configuration for chainlist skills.""" + + states: SkillStates + + +async def get_skills( + config: "Config", + is_private: bool, + store: SkillStoreABC, + **_, +) -> list[ChainlistBaseTool]: + """Get all chainlist skills.""" + available_skills = [] + + # Include skills based on their state + for skill_name, state in config["states"].items(): + if state == "disabled": + continue + elif state == "public" or (state == "private" and is_private): + available_skills.append(skill_name) + + # Get each skill using the cached getter + return [get_chainlist_skill(name, store) for name in available_skills] + + +def get_chainlist_skill( + name: str, + store: SkillStoreABC, +) -> ChainlistBaseTool: + """Get a chainlist skill by name.""" + if name == "chain_lookup": + if name not in _cache: + _cache[name] = ChainLookup( + skill_store=store, + ) + return _cache[name] + else: + raise ValueError(f"Unknown chainlist skill: {name}") diff --git a/intentkit/skills/chainlist/base.py b/intentkit/skills/chainlist/base.py new file mode 100644 index 00000000..674b92da --- /dev/null +++ b/intentkit/skills/chainlist/base.py @@ -0,0 +1,21 @@ +from typing import Type + +from pydantic import BaseModel, Field + +from intentkit.abstracts.skill import SkillStoreABC +from intentkit.skills.base import IntentKitSkill + + +class ChainlistBaseTool(IntentKitSkill): + """Base class for chainlist tools.""" + + name: str = Field(description="The name of the tool") + description: str = Field(description="A description of what the tool does") + args_schema: Type[BaseModel] + skill_store: SkillStoreABC = Field( + description="The skill store for persisting data" + ) + + @property + def category(self) -> str: + return "chainlist" diff --git a/intentkit/skills/chainlist/chain_lookup.py b/intentkit/skills/chainlist/chain_lookup.py new file mode 100644 index 00000000..eefb9759 --- /dev/null +++ b/intentkit/skills/chainlist/chain_lookup.py @@ -0,0 +1,207 @@ +import logging +from typing import Any, Dict, List, Optional, Type + +import httpx +from pydantic import BaseModel, Field + +from intentkit.skills.chainlist.base import ChainlistBaseTool + +logger = logging.getLogger(__name__) + + +class ChainLookupInput(BaseModel): + """Input for ChainLookup tool.""" + + search_term: Optional[str] = Field( + description="Term to search for (chain name, symbol, or chain ID)", + default=None, + ) + chain_id: Optional[int] = Field( + description="Specific chain ID to look up", + default=None, + ) + no_tracking: Optional[bool] = Field( + description="Whether to return only RPC endpoints with no tracking", + default=False, + ) + limit: Optional[int] = Field( + description="Limit the number of results returned", + default=5, + ) + + +class ChainLookup(ChainlistBaseTool): + """Tool for looking up blockchain RPC endpoints from Chainlist.""" + + name: str = "chain_lookup" + description: str = ( + "Look up blockchain RPC endpoints and details by chain name, symbol, or chain ID.\n" + "Returns information about blockchains including RPC endpoints, native currency, and explorers." + ) + args_schema: Type[BaseModel] = ChainLookupInput + + def _normalize_text(self, text: str) -> str: + """Normalize text for searching (lowercase, remove spaces).""" + if not text: + return "" + return text.lower().strip() + + async def _fetch_chains_data(self) -> List[Dict[str, Any]]: + """Fetch chains data from Chainlist API.""" + chainlist_api_url = "https://chainlist.org/rpcs.json" + + async with httpx.AsyncClient(timeout=10.0) as client: + response = await client.get(chainlist_api_url) + response.raise_for_status() + return response.json() + + def _filter_chains( + self, + chains: List[Dict[str, Any]], + search_term: Optional[str] = None, + chain_id: Optional[int] = None, + no_tracking: bool = False, + limit: int = 5, + ) -> List[Dict[str, Any]]: + """Filter chains based on search criteria.""" + filtered_chains = chains + + # Filter by chain_id if provided + if chain_id is not None: + filtered_chains = [ + chain for chain in filtered_chains if chain.get("chainId") == chain_id + ] + + # Filter by search term if provided + if search_term and chain_id is None: + normalized_term = self._normalize_text(search_term) + result = [] + + for chain in filtered_chains: + name = self._normalize_text(chain.get("name", "")) + symbol = self._normalize_text(chain.get("chain", "")) + short_name = self._normalize_text(chain.get("shortName", "")) + + if ( + normalized_term in name + or normalized_term in symbol + or normalized_term in short_name + ): + result.append(chain) + + filtered_chains = result + + # Filter RPC endpoints for each chain if no_tracking is True + if no_tracking: + filtered_result = [] + for chain in filtered_chains: + if "rpc" not in chain: + continue + + chain_copy = dict(chain) + chain_copy["rpc"] = [ + rpc + for rpc in chain["rpc"] + if isinstance(rpc, dict) and rpc.get("tracking") == "none" + ] + + if chain_copy[ + "rpc" + ]: # Only include if it has RPC endpoints after filtering + filtered_result.append(chain_copy) + + filtered_chains = filtered_result + + # Apply limit + if limit > 0: + filtered_chains = filtered_chains[:limit] + + return filtered_chains + + def _format_chain(self, chain: Dict[str, Any]) -> Dict[str, Any]: + """Format a chain entry for response.""" + # Format RPC endpoints + formatted_rpcs = [] + if "rpc" in chain: + for rpc in chain["rpc"]: + if isinstance(rpc, dict): + url = rpc.get("url") + tracking = rpc.get("tracking", "unspecified") + formatted_rpcs.append({"url": url, "tracking": tracking}) + elif isinstance(rpc, str): + formatted_rpcs.append({"url": rpc, "tracking": "unspecified"}) + + # Format chain data + formatted_chain = { + "name": chain.get("name"), + "chain": chain.get("chain"), + "chainId": chain.get("chainId"), + "networkId": chain.get("networkId"), + "shortName": chain.get("shortName"), + "infoURL": chain.get("infoURL", ""), + "nativeCurrency": chain.get("nativeCurrency", {}), + "rpc": formatted_rpcs[:3], # Limit to 3 RPC endpoints per chain + "total_rpc_count": len(chain.get("rpc", [])), + } + + # Add explorers if available + if "explorers" in chain and chain["explorers"]: + formatted_chain["explorers"] = [ + {"name": explorer.get("name", ""), "url": explorer.get("url", "")} + for explorer in chain["explorers"][:2] # Limit to 2 explorers + ] + + return formatted_chain + + async def _arun( + self, + search_term: Optional[str] = None, + chain_id: Optional[int] = None, + no_tracking: Optional[bool] = False, + limit: Optional[int] = 5, + config: Optional[Any] = None, + **kwargs, + ) -> Dict: + """Lookup blockchain RPC endpoints from Chainlist.""" + if not search_term and not chain_id: + return { + "error": "Please provide either a search term or a chain ID to lookup." + } + + try: + # Fetch data + chains_data = await self._fetch_chains_data() + + # Filter chains based on criteria + filtered_chains = self._filter_chains( + chains_data, + search_term=search_term, + chain_id=chain_id, + no_tracking=no_tracking, + limit=limit, + ) + + # Handle no results + if not filtered_chains: + return { + "found": False, + "message": "No chains found matching the search criteria.", + } + + # Format results + formatted_chains = [self._format_chain(chain) for chain in filtered_chains] + + return { + "found": True, + "count": len(formatted_chains), + "chains": formatted_chains, + } + + except httpx.HTTPStatusError as e: + logger.error(f"HTTP error fetching chain data: {e}") + return { + "error": f"Error fetching chain data: HTTP status code {e.response.status_code}" + } + except Exception as e: + logger.error(f"Error fetching chain data: {str(e)}") + return {"error": f"An error occurred while fetching chain data: {str(e)}"} diff --git a/intentkit/skills/chainlist/chainlist.png b/intentkit/skills/chainlist/chainlist.png new file mode 100644 index 00000000..68900a58 Binary files /dev/null and b/intentkit/skills/chainlist/chainlist.png differ diff --git a/intentkit/skills/chainlist/schema.json b/intentkit/skills/chainlist/schema.json new file mode 100644 index 00000000..bf420cce --- /dev/null +++ b/intentkit/skills/chainlist/schema.json @@ -0,0 +1,47 @@ +{ + "$schema": "http://json-schema.org/draft-07/schema#", + "type": "object", + "title": "Chainlist Skills", + "description": "Access blockchain RPC endpoints and network information from chainlist.org. Enable this skill to look up EVM-compatible networks by name, symbol, or chain ID and get their RPC endpoints, native currencies, and explorer links.", + "x-icon": "https://ai.service.crestal.dev/skills/chainlist/chainlist.png", + "x-tags": [ + "Blockchain", + "RPC", + "EVM", + "Network" + ], + "properties": { + "enabled": { + "type": "boolean", + "title": "Enabled", + "default": false + }, + "states": { + "type": "object", + "properties": { + "chain_lookup": { + "type": "string", + "title": "Chain Lookup", + "enum": [ + "disabled", + "public", + "private" + ], + "x-enum-title": [ + "Disabled", + "Agent Owner + All Users", + "Agent Owner Only" + ], + "description": "Enables looking up blockchain RPC endpoints and network information. When public, available to all users; when private, only to authenticated users.", + "default": "disabled" + } + }, + "description": "Configure visibility for chainlist skills (disabled: unavailable, public: available to all, private: available only to authenticated users)" + } + }, + "required": [ + "enabled", + "states" + ], + "additionalProperties": true +} \ No newline at end of file diff --git a/intentkit/skills/common/__init__.py b/intentkit/skills/common/__init__.py new file mode 100644 index 00000000..c30ab9c0 --- /dev/null +++ b/intentkit/skills/common/__init__.py @@ -0,0 +1,82 @@ +"""Common utility skills.""" + +import logging +from typing import TypedDict + +from intentkit.abstracts.skill import SkillStoreABC +from intentkit.skills.base import SkillConfig, SkillState +from intentkit.skills.common.base import CommonBaseTool +from intentkit.skills.common.current_time import CurrentTime + +# Cache skills at the system level, because they are stateless +_cache: dict[str, CommonBaseTool] = {} + +logger = logging.getLogger(__name__) + + +class SkillStates(TypedDict): + current_time: SkillState + + +class Config(SkillConfig): + """Configuration for common utility skills.""" + + states: SkillStates + + +async def get_skills( + config: "Config", + is_private: bool, + store: SkillStoreABC, + **_, +) -> list[CommonBaseTool]: + """Get all common utility skills. + + Args: + config: The configuration for common utility skills. + is_private: Whether to include private skills. + store: The skill store for persisting data. + + Returns: + A list of common utility skills. + """ + available_skills = [] + + # Include skills based on their state + for skill_name, state in config["states"].items(): + if state == "disabled": + continue + elif state == "public" or (state == "private" and is_private): + available_skills.append(skill_name) + + # Get each skill using the cached getter + result = [] + for name in available_skills: + skill = get_common_skill(name, store) + if skill: + result.append(skill) + return result + + +def get_common_skill( + name: str, + store: SkillStoreABC, +) -> CommonBaseTool: + """Get a common utility skill by name. + + Args: + name: The name of the skill to get + store: The skill store for persisting data + + Returns: + The requested common utility skill + """ + if name == "current_time": + if name not in _cache: + _cache[name] = CurrentTime( + skill_store=store, + ) + return _cache[name] + else: + logger.warning(f"Unknown common skill: {name}") + return None diff --git a/intentkit/skills/common/base.py b/intentkit/skills/common/base.py new file mode 100644 index 00000000..d9af63e6 --- /dev/null +++ b/intentkit/skills/common/base.py @@ -0,0 +1,21 @@ +from typing import Type + +from pydantic import BaseModel, Field + +from intentkit.abstracts.skill import SkillStoreABC +from intentkit.skills.base import IntentKitSkill + + +class CommonBaseTool(IntentKitSkill): + """Base class for common utility tools.""" + + name: str = Field(description="The name of the tool") + description: str = Field(description="A description of what the tool does") + args_schema: Type[BaseModel] + skill_store: SkillStoreABC = Field( + description="The skill store for persisting data" + ) + + @property + def category(self) -> str: + return "common" diff --git a/intentkit/skills/common/common.jpg b/intentkit/skills/common/common.jpg new file mode 100644 index 00000000..1ab12bed Binary files /dev/null and b/intentkit/skills/common/common.jpg differ diff --git a/intentkit/skills/common/current_time.py b/intentkit/skills/common/current_time.py new file mode 100644 index 00000000..8cd70cea --- /dev/null +++ b/intentkit/skills/common/current_time.py @@ -0,0 +1,83 @@ +import logging +from datetime import datetime +from typing import Type + +import pytz +from pydantic import BaseModel, Field + +from intentkit.skills.common.base import CommonBaseTool + +logger = logging.getLogger(__name__) + + +class CurrentTimeInput(BaseModel): + """Input for CurrentTime tool.""" + + timezone: str = Field( + description="Timezone to format the time in (e.g., 'UTC', 'US/Pacific', 'Europe/London', 'Asia/Tokyo'). Default is UTC.", + default="UTC", + ) + + +class CurrentTime(CommonBaseTool): + """Tool for getting the current time. + + This tool returns the current time and converts it to the specified timezone. + By default, it returns the time in UTC. + + Attributes: + name: The name of the tool. + description: A description of what the tool does. + args_schema: The schema for the tool's input arguments. + """ + + name: str = "common_current_time" + description: str = ( + "Get the current time, converted to a specified timezone.\n" + "You must call this tool whenever the user asks for the time." + ) + args_schema: Type[BaseModel] = CurrentTimeInput + + async def _arun(self, timezone: str, **kwargs) -> str: + """Implementation of the tool to get the current time. + + Args: + timezone (str): The timezone to format the time in. Defaults to "UTC". + + Returns: + str: A formatted string with the current time in the specified timezone. + """ + # Get current UTC time + utc_now = datetime.now(pytz.UTC) + + # Convert to the requested timezone + if timezone.upper() != "UTC": + try: + tz = pytz.timezone(timezone) + converted_time = utc_now.astimezone(tz) + except pytz.exceptions.UnknownTimeZoneError: + # Provide some suggestions for common timezones + common_timezones = [ + "US/Eastern", + "US/Central", + "US/Pacific", + "Europe/London", + "Europe/Paris", + "Europe/Berlin", + "Asia/Shanghai", + "Asia/Tokyo", + "Asia/Singapore", + "Australia/Sydney", + ] + suggestion_str = ", ".join([f"'{tz}'" for tz in common_timezones]) + return ( + f"Error: Unknown timezone '{timezone}'. Using UTC instead.\n" + f"Some common timezone options: {suggestion_str}" + ) + else: + converted_time = utc_now + + # Format the time + formatted_time = converted_time.strftime("%Y-%m-%d %H:%M:%S %Z") + + return f"Current time: {formatted_time}" diff --git a/intentkit/skills/common/schema.json b/intentkit/skills/common/schema.json new file mode 100644 index 00000000..dbd9d6a2 --- /dev/null +++ b/intentkit/skills/common/schema.json @@ -0,0 +1,57 @@ +{ + "$schema": "http://json-schema.org/draft-07/schema#", + "type": "object", + "title": "Common Utilities", + "description": "Utility skills", + "x-icon": "https://ai.service.crestal.dev/skills/common/common.jpg", + "x-tags": [ + "Utility" + ], + "properties": { + "enabled": { + "type": "boolean", + "title": "Enabled", + "description": "Whether this skill is enabled", + "default": true + }, + "states": { + "type": "object", + "properties": { + "current_time": { + "type": "string", + "title": "Current Time", + "enum": [ + "disabled", + "public", + "private" + ], + "x-enum-title": [ + "Disabled", + "Agent Owner + All Users", + "Agent Owner Only" + ], + "description": "Provides localized timekeeping with automatic timezone detection using IP geolocation", + "default": "private" + } + }, + "description": "States for each common utility skill (disabled, public, or private)" + }, + "api_key_provider": { + "type": "string", + "title": "API Key Provider", + "description": "Who provides the API key", + "enum": [ + "platform" + ], + "x-enum-title": [ + "Nation Hosted" + ], + "default": "platform" + } + }, + "required": [ + "states", + "enabled" + ], + "additionalProperties": true +} \ No newline at end of file diff --git a/intentkit/skills/cookiefun/README.md b/intentkit/skills/cookiefun/README.md new file mode 100644 index 00000000..f9d86b3f --- /dev/null +++ b/intentkit/skills/cookiefun/README.md @@ -0,0 +1,121 @@ +# CookieFun Skills + +This skill category provides access to CookieFun API for Twitter/X analytics and account insights. + +### Features + +The CookieFun skills allow you to: + +- Get a list of all available sectors +- Get detailed information about Twitter accounts +- Get the feed of tweets from specific accounts +- Find smart followers for Twitter accounts +- Search for Twitter accounts matching specific criteria + +### API Key + +You need a CookieFun API key to use these skills. You can get one by signing up at [CookieFun](https://cookie.fun/). + +### Skills + +#### Get Sectors + +Returns a list of all available sectors in the CookieFun system. + +Example usage: +``` +Call the get_sectors skill to fetch all sectors available in the CookieFun system. +``` + +Example prompts: +- "What sectors are available in CookieFun?" +- "Show me all the sectors in CookieFun" +- "Get a list of all sectors from CookieFun" + +#### Get Account Details + +Retrieves detailed information about a Twitter account including followers, following, posts, metrics, and engagement data. + +Example usage: +``` +Call the get_account_details skill with parameters: +- username: "elonmusk" +``` + +or + +``` +Call the get_account_details skill with parameters: +- userId: "1234567890" +``` + +Example prompts: +- "Get details about the Twitter account @elonmusk" +- "Fetch information about Elon Musk's Twitter profile" +- "Show me stats for the Twitter user elonmusk" +- "What's the engagement rate for @elonmusk?" + +#### Get Account Smart Followers + +Returns a list of top smart followers for a specific Twitter account, with detailed metrics about these followers. + +Example usage: +``` +Call the get_account_smart_followers skill with parameters: +- username: "elonmusk" +``` + +or + +``` +Call the get_account_smart_followers skill with parameters: +- userId: "1234567890" +``` + +Example prompts: +- "Who are the top smart followers of @elonmusk?" +- "Get me a list of the most influential followers of Elon Musk" +- "Show me the smart followers for Twitter user elonmusk" +- "Find the most engaged followers of @elonmusk" + +#### Search Accounts + +Searches for Twitter accounts that authored tweets matching specified search criteria. + +Example usage: +``` +Call the search_accounts skill with parameters: +- searchQuery: "bitcoin" +- type: 0 # Optional: 0 for Original, 1 for Reply, 2 for Quote +- sortBy: 0 # Optional: 0 for SmartEngagementPoints, 1 for Impressions, 2 for MatchingTweetsCount +- sortOrder: 1 # Optional: 0 for Ascending, 1 for Descending +``` + +Example prompts: +- "Find Twitter accounts talking about bitcoin" +- "Search for accounts that tweet about AI sorted by engagement" +- "Who are the top accounts posting original tweets about NFTs?" +- "Find Twitter users discussing climate change with the most impressions" + +#### Get Account Feed + +Retrieves a list of tweets for a specific Twitter account with various filtering options. + +Example usage: +``` +Call the get_account_feed skill with parameters: +- username: "elonmusk" +- startDate: "01/05/2023" # Optional: Filter tweets after this date +- endDate: "31/05/2023" # Optional: Filter tweets before this date +- type: 0 # Optional: 0 for Original, 1 for Reply, 2 for Quote +- hasMedia: true # Optional: Filter to only tweets with media +- sortBy: 0 # Optional: 0 for CreatedDate, 1 for Impressions +- sortOrder: 1 # Optional: 0 for Ascending, 1 for Descending +``` + +Example prompts: +- "Show me Elon Musk's tweets from May 2023" +- "Get the most popular tweets from @elonmusk" +- "Fetch only original tweets (not replies) from elonmusk" +- "Show me tweets with media from @elonmusk posted in the last month" +- "What are the latest tweets from Elon Musk sorted by impressions?" diff --git a/intentkit/skills/cookiefun/__init__.py b/intentkit/skills/cookiefun/__init__.py new file mode 100644 index 00000000..452dff1f --- /dev/null +++ b/intentkit/skills/cookiefun/__init__.py @@ -0,0 +1,78 @@ +from typing import TypedDict + +from intentkit.abstracts.skill import SkillStoreABC +from intentkit.skills.base import SkillConfig, SkillState +from intentkit.skills.cookiefun.base import CookieFunBaseTool, logger +from intentkit.skills.cookiefun.get_account_details import GetAccountDetails +from intentkit.skills.cookiefun.get_account_feed import GetAccountFeed +from intentkit.skills.cookiefun.get_account_smart_followers import ( + GetAccountSmartFollowers, +) +from intentkit.skills.cookiefun.get_sectors import GetSectors +from intentkit.skills.cookiefun.search_accounts import SearchAccounts + +# Cache skills at the system level, because they are stateless +_cache: dict[str, CookieFunBaseTool] = {} + + +class SkillStates(TypedDict): + """States for CookieFun skills.""" + + get_sectors: SkillState + get_account_details: SkillState + get_account_smart_followers: SkillState + search_accounts: SkillState + get_account_feed: SkillState + + +class Config(SkillConfig): + """Configuration for CookieFun skills.""" + + states: SkillStates + api_key: str + + +async def get_skills( + config: "Config", + is_private: bool, + store: SkillStoreABC, + **_, +) -> list[CookieFunBaseTool]: + """Get all CookieFun skills.""" + available_skills = [] + + # Include skills based on their state + for skill_name, state in config["states"].items(): + if state == "disabled": + continue + elif state == "public" or (state == "private" and is_private): + available_skills.append(skill_name) + + # Get each skill using the cached getter + skills = [get_cookiefun_skill(name, store) for name in available_skills] + logger.info("Returning %d CookieFun skills", len(skills)) + return skills + + +def get_cookiefun_skill( + name: str, + store: SkillStoreABC, +) -> CookieFunBaseTool: + """Get a CookieFun skill by name.""" + + if name not in _cache: + if name == "get_sectors": + _cache[name] = GetSectors(skill_store=store) + elif name == "get_account_details": + _cache[name] = GetAccountDetails(skill_store=store) + elif name == "get_account_smart_followers": + _cache[name] = GetAccountSmartFollowers(skill_store=store) + elif name == "search_accounts": + _cache[name] = SearchAccounts(skill_store=store) + elif name == "get_account_feed": + _cache[name] = GetAccountFeed(skill_store=store) + else: + logger.error("Unknown CookieFun skill: %s", name) + raise ValueError(f"Unknown CookieFun skill: {name}") + + return _cache[name] diff --git a/intentkit/skills/cookiefun/base.py b/intentkit/skills/cookiefun/base.py new file mode 100644 index 00000000..021ac4f4 --- /dev/null +++ b/intentkit/skills/cookiefun/base.py @@ -0,0 +1,49 @@ +import logging +from typing import Type + +from langchain.tools.base import ToolException +from pydantic import BaseModel, Field + +from intentkit.abstracts.skill import SkillStoreABC +from intentkit.skills.base import IntentKitSkill + +logger = logging.getLogger(__name__) + + +class CookieFunBaseTool(IntentKitSkill): + """Base class for CookieFun tools.""" + + name: str = Field(description="The name of the tool") + description: str = Field(description="A description of what the tool does") + args_schema: Type[BaseModel] + skill_store: SkillStoreABC = Field( + description="The skill store for persisting data" + ) + + @property + def category(self) -> str: + return "cookiefun" + + def get_api_key(self) -> str: + """ + Get the API key from configuration. + + Returns: + The API key + + Raises: + ToolException: If the API key is not found or provider is invalid. + """ + context = self.get_context() + skill_config = context.agent.skill_config(self.category) + api_key_provider = skill_config.get("api_key_provider") + if api_key_provider == "agent_owner": + api_key = skill_config.get("api_key") + if api_key: + return api_key + else: + raise ToolException("No api_key found in agent_owner configuration") + else: + raise ToolException( + f"Invalid API key provider: {api_key_provider}. Only 'agent_owner' is supported for CookieFun." + ) diff --git a/intentkit/skills/cookiefun/constants.py b/intentkit/skills/cookiefun/constants.py new file mode 100644 index 00000000..4de375c2 --- /dev/null +++ b/intentkit/skills/cookiefun/constants.py @@ -0,0 +1,18 @@ +""" +Constants for the CookieFun skills. +""" + +# API Base URL +BASE_URL = "https://api.staging.cookie.fun/v3" + +# API Endpoints +ENDPOINTS = { + "sectors": f"{BASE_URL}/sectors", + "account_details": f"{BASE_URL}/account/", + "smart_followers": f"{BASE_URL}/account/smart-followers", + "search_accounts": f"{BASE_URL}/account/query", + "account_feed": f"{BASE_URL}/account/feed", +} + +# Default Headers +DEFAULT_HEADERS = {"Content-Type": "application/json"} diff --git a/intentkit/skills/cookiefun/cookiefun.png b/intentkit/skills/cookiefun/cookiefun.png new file mode 100644 index 00000000..c9120f48 Binary files /dev/null and b/intentkit/skills/cookiefun/cookiefun.png differ diff --git a/intentkit/skills/cookiefun/get_account_details.py b/intentkit/skills/cookiefun/get_account_details.py new file mode 100644 index 00000000..790111a0 --- /dev/null +++ b/intentkit/skills/cookiefun/get_account_details.py @@ -0,0 +1,169 @@ +from typing import Any, Dict, Optional, Type, Union + +import httpx +from pydantic import BaseModel, Field + +from intentkit.skills.cookiefun.base import CookieFunBaseTool, logger +from intentkit.skills.cookiefun.constants import DEFAULT_HEADERS, ENDPOINTS + + +class GetAccountDetailsInput(BaseModel): + """Input for GetAccountDetails tool.""" + + username: Optional[str] = Field( + default=None, + description="Twitter username (either username or userId is required)", + ) + + userId: Optional[str] = Field( + default=None, + description="Twitter user ID (either username or userId is required)", + ) + + +class GetAccountDetails(CookieFunBaseTool): + """Tool to get detailed information about a Twitter account.""" + + name: str = "cookiefun_get_account_details" + description: str = "Retrieves detailed information about a Twitter account including followers, following, posts, metrics, and engagement data." + args_schema: Type[BaseModel] = GetAccountDetailsInput + + async def _arun( + self, + username: Optional[str] = None, + userId: Optional[str] = None, + **kwargs, + ) -> Union[Dict[str, Any], str]: + """ + Get detailed information about a Twitter account. + + Args: + username: Twitter username (either username or userId is required) + userId: Twitter user ID (either username or userId is required) + + Returns: + Account details including followers, following, posts, metrics, and engagement data. + """ + logger.info( + "Getting account details for username=%s, userId=%s", username, userId + ) + + # Validate input parameters + if not username and not userId: + logger.error("Neither username nor userId provided") + return "Error: Either username or userId must be provided." + + try: + # Get context to retrieve API key + api_key = self.get_api_key() + + if not api_key: + logger.error("No API key provided for CookieFun API") + return "Error: No API key provided for CookieFun API. Please configure the API key in the agent settings." + + # Prepare request payload + payload = {} + if username: + payload["username"] = username + if userId: + payload["userId"] = userId + + # Make API request + headers = {**DEFAULT_HEADERS, "x-api-key": api_key} + + async with httpx.AsyncClient() as client: + response = await client.post( + ENDPOINTS["account_details"], headers=headers, json=payload + ) + logger.debug( + "Received response with status code: %d", response.status_code + ) + + response.raise_for_status() + data = response.json() + + # Check different possible response structures + if ( + data.get("success") + and "ok" in data + and isinstance(data["ok"], dict) + ): + logger.info("Successfully retrieved account details") + return data["ok"] + elif data.get("success") and "ok" in data and "entry" in data["ok"]: + logger.info( + "Successfully retrieved account details from entry field" + ) + return data["ok"]["entry"] + elif ( + data.get("success") + and "ok" in data + and "entries" in data["ok"] + and len(data["ok"]["entries"]) > 0 + ): + # If entries is a list but we expect a single account, return the first one + logger.info( + "Successfully retrieved account details from entries array" + ) + return data["ok"]["entries"][0] + elif data.get("success") and isinstance(data.get("account"), dict): + # If account is at the top level + logger.info("Successfully retrieved account details from top level") + return data["account"] + elif data.get("success") and isinstance(data.get("entry"), dict): + # If entry is at the top level + logger.info( + "Successfully retrieved account details from entry field" + ) + return data["entry"] + elif ( + data.get("success") + and isinstance(data.get("entries"), list) + and len(data.get("entries")) > 0 + ): + # If entries is at the top level + logger.info( + "Successfully retrieved account details from entries array at top level" + ) + return data["entries"][0] + elif "account" in data and isinstance(data["account"], dict): + # If only account field exists + logger.info("Successfully retrieved account from direct field") + return data["account"] + elif "entry" in data and isinstance(data["entry"], dict): + # If only entry field exists + logger.info( + "Successfully retrieved account from direct entry field" + ) + return data["entry"] + elif ( + "entries" in data + and isinstance(data["entries"], list) + and len(data["entries"]) > 0 + ): + # If only entries field exists + logger.info( + "Successfully retrieved account from direct entries field" + ) + return data["entries"][0] + else: + # If we can't find account details in the expected structure, log the full response + logger.error( + "Could not find account details in response structure. Full response: %s", + data, + ) + error_msg = data.get( + "error", "Unknown error - check API response format" + ) + logger.error("Error in API response: %s", error_msg) + return f"Error fetching account details: {error_msg}" + + except httpx.HTTPStatusError as e: + logger.error("HTTP error: %d - %s", e.response.status_code, e.response.text) + return f"HTTP error occurred: {e.response.status_code} - {e.response.text}" + except httpx.RequestError as e: + logger.error("Request error: %s", str(e)) + return f"Request error occurred: {str(e)}" + except Exception as e: + logger.exception("Unexpected error occurred") + return f"An unexpected error occurred: {str(e)}" diff --git a/intentkit/skills/cookiefun/get_account_feed.py b/intentkit/skills/cookiefun/get_account_feed.py new file mode 100644 index 00000000..901372a0 --- /dev/null +++ b/intentkit/skills/cookiefun/get_account_feed.py @@ -0,0 +1,280 @@ +from enum import IntEnum +from typing import Any, Dict, List, Optional, Type, Union + +import httpx +from pydantic import BaseModel, Field + +from intentkit.skills.cookiefun.base import CookieFunBaseTool, logger +from intentkit.skills.cookiefun.constants import DEFAULT_HEADERS, ENDPOINTS + + +class TweetType(IntEnum): + """Tweet type for filtering.""" + + Original = 0 + Reply = 1 + Quote = 2 + + +class SortBy(IntEnum): + """Sort options for tweets.""" + + CreatedDate = 0 + Impressions = 1 + + +class SortOrder(IntEnum): + """Sort order options.""" + + Ascending = 0 + Descending = 1 + + +class GetAccountFeedInput(BaseModel): + """Input for GetAccountFeed tool.""" + + username: Optional[str] = Field( + default=None, + description="Twitter username (either username or userId is required)", + ) + + userId: Optional[str] = Field( + default=None, + description="Twitter user ID (either username or userId is required)", + ) + + startDate: Optional[str] = Field( + default=None, + description="Start date for filtering in format dd/mm/yyyy (default: 30 days ago)", + ) + + endDate: Optional[str] = Field( + default=None, + description="End date for filtering in format dd/mm/yyyy (default: now)", + ) + + type: Optional[int] = Field( + default=None, + description="Type of tweets to filter: 0 for Original, 1 for Reply, 2 for Quote (leave empty for all types)", + ) + + hasMedia: Optional[bool] = Field( + default=None, + description="Filter to only include tweets with media if true", + ) + + sortBy: Optional[int] = Field( + default=None, + description="Sort by: 0 for CreatedDate, 1 for Impressions", + ) + + sortOrder: Optional[int] = Field( + default=None, + description="Sort order: 0 for Ascending, 1 for Descending", + ) + + +class GetAccountFeed(CookieFunBaseTool): + """Tool to get the feed (tweets) of a Twitter account.""" + + name: str = "cookiefun_get_account_feed" + description: str = "Retrieves a list of tweets for a specific Twitter account with various filtering options." + args_schema: Type[BaseModel] = GetAccountFeedInput + + async def _arun( + self, + username: Optional[str] = None, + userId: Optional[str] = None, + startDate: Optional[str] = None, + endDate: Optional[str] = None, + type: Optional[int] = None, + hasMedia: Optional[bool] = None, + sortBy: Optional[int] = None, + sortOrder: Optional[int] = None, + **kwargs, + ) -> Union[List[Dict[str, Any]], str]: + """ + Get the feed (tweets) of a Twitter account. + + Args: + username: Twitter username (either username or userId is required) + userId: Twitter user ID (either username or userId is required) + startDate: Start date for filtering in format dd/mm/yyyy (default: 30 days ago) + endDate: End date for filtering in format dd/mm/yyyy (default: now) + type: Type of tweets to filter (0=Original, 1=Reply, 2=Quote) + hasMedia: Filter to only include tweets with media if true + sortBy: Sort by field (0=CreatedDate, 1=Impressions) + sortOrder: Sort order (0=Ascending, 1=Descending) + + Returns: + List of tweets from the specified account matching the filter criteria. + """ + logger.info( + "Getting account feed for username=%s, userId=%s, startDate=%s, endDate=%s, type=%s, hasMedia=%s, sortBy=%s, sortOrder=%s", + username, + userId, + startDate, + endDate, + type, + hasMedia, + sortBy, + sortOrder, + ) + + # Validate input parameters + if not username and not userId: + logger.error("Neither username nor userId provided") + return "Error: Either username or userId must be provided." + + try: + # Get context to retrieve API key + api_key = self.get_api_key() + + if not api_key: + logger.error("No API key provided for CookieFun API") + return "Error: No API key provided for CookieFun API. Please configure the API key in the agent settings." + + # Prepare request payload + payload = {} + if username: + payload["username"] = username + if userId: + payload["userId"] = userId + if startDate: + payload["startDate"] = startDate + if endDate: + payload["endDate"] = endDate + if type is not None: + payload["type"] = type + if hasMedia is not None: + payload["hasMedia"] = hasMedia + if sortBy is not None: + payload["sortBy"] = sortBy + if sortOrder is not None: + payload["sortOrder"] = sortOrder + + # Make API request + headers = {**DEFAULT_HEADERS, "x-api-key": api_key} + + async with httpx.AsyncClient() as client: + response = await client.post( + ENDPOINTS["account_feed"], headers=headers, json=payload + ) + logger.debug( + "Received response with status code: %d", response.status_code + ) + + response.raise_for_status() + data = response.json() + + # Check different possible response structures + if data.get("success") and "ok" in data and "entries" in data["ok"]: + tweets = data["ok"]["entries"] + logger.info( + "Successfully retrieved %d tweets from entries field", + len(tweets), + ) + return tweets + elif data.get("success") and "ok" in data and "tweets" in data["ok"]: + tweets = data["ok"]["tweets"] + logger.info("Successfully retrieved %d tweets", len(tweets)) + return tweets + elif data.get("success") and "ok" in data and "posts" in data["ok"]: + tweets = data["ok"]["posts"] + logger.info( + "Successfully retrieved %d tweets from posts field", len(tweets) + ) + return tweets + elif data.get("success") and "ok" in data and "feed" in data["ok"]: + tweets = data["ok"]["feed"] + logger.info( + "Successfully retrieved %d tweets from feed field", len(tweets) + ) + return tweets + elif ( + data.get("success") + and "ok" in data + and isinstance(data["ok"], list) + ): + tweets = data["ok"] + logger.info( + "Successfully retrieved %d tweets from ok list", len(tweets) + ) + return tweets + elif data.get("success") and isinstance(data.get("tweets"), list): + tweets = data["tweets"] + logger.info( + "Successfully retrieved %d tweets from top level tweets", + len(tweets), + ) + return tweets + elif data.get("success") and isinstance(data.get("posts"), list): + tweets = data["posts"] + logger.info( + "Successfully retrieved %d tweets from top level posts", + len(tweets), + ) + return tweets + elif data.get("success") and isinstance(data.get("feed"), list): + tweets = data["feed"] + logger.info( + "Successfully retrieved %d tweets from top level feed", + len(tweets), + ) + return tweets + elif data.get("success") and isinstance(data.get("entries"), list): + tweets = data["entries"] + logger.info( + "Successfully retrieved %d tweets from top level entries", + len(tweets), + ) + return tweets + elif "tweets" in data and isinstance(data["tweets"], list): + tweets = data["tweets"] + logger.info( + "Successfully retrieved %d tweets from direct tweets field", + len(tweets), + ) + return tweets + elif "posts" in data and isinstance(data["posts"], list): + tweets = data["posts"] + logger.info( + "Successfully retrieved %d tweets from direct posts field", + len(tweets), + ) + return tweets + elif "feed" in data and isinstance(data["feed"], list): + tweets = data["feed"] + logger.info( + "Successfully retrieved %d tweets from direct feed field", + len(tweets), + ) + return tweets + elif "entries" in data and isinstance(data["entries"], list): + tweets = data["entries"] + logger.info( + "Successfully retrieved %d tweets from direct entries field", + len(tweets), + ) + return tweets + else: + # If we can't find tweets in the expected structure, log the full response + logger.error( + "Could not find tweets in response structure. Full response: %s", + data, + ) + error_msg = data.get( + "error", "Unknown error - check API response format" + ) + logger.error("Error in API response: %s", error_msg) + return f"Error fetching account feed: {error_msg}" + + except httpx.HTTPStatusError as e: + logger.error("HTTP error: %d - %s", e.response.status_code, e.response.text) + return f"HTTP error occurred: {e.response.status_code} - {e.response.text}" + except httpx.RequestError as e: + logger.error("Request error: %s", str(e)) + return f"Request error occurred: {str(e)}" + except Exception as e: + logger.exception("Unexpected error occurred") + return f"An unexpected error occurred: {str(e)}" diff --git a/intentkit/skills/cookiefun/get_account_smart_followers.py b/intentkit/skills/cookiefun/get_account_smart_followers.py new file mode 100644 index 00000000..df11bf3d --- /dev/null +++ b/intentkit/skills/cookiefun/get_account_smart_followers.py @@ -0,0 +1,179 @@ +from typing import Any, Dict, List, Optional, Type, Union + +import httpx +from pydantic import BaseModel, Field + +from intentkit.skills.cookiefun.base import CookieFunBaseTool, logger +from intentkit.skills.cookiefun.constants import DEFAULT_HEADERS, ENDPOINTS + + +class GetAccountSmartFollowersInput(BaseModel): + """Input for GetAccountSmartFollowers tool.""" + + username: Optional[str] = Field( + default=None, + description="Twitter username (either username or userId is required)", + ) + + userId: Optional[str] = Field( + default=None, + description="Twitter user ID (either username or userId is required)", + ) + + +class GetAccountSmartFollowers(CookieFunBaseTool): + """Tool to get smart followers for a Twitter account.""" + + name: str = "cookiefun_get_account_smart_followers" + description: str = "Returns a list of top smart followers for a specific Twitter account, with detailed metrics about these followers." + args_schema: Type[BaseModel] = GetAccountSmartFollowersInput + + async def _arun( + self, + username: Optional[str] = None, + userId: Optional[str] = None, + **kwargs, + ) -> Union[List[Dict[str, Any]], str]: + """ + Get smart followers for a Twitter account. + + Args: + username: Twitter username (either username or userId is required) + userId: Twitter user ID (either username or userId is required) + + Returns: + List of top smart followers with their metrics. + """ + logger.info( + "Getting smart followers for username=%s, userId=%s", username, userId + ) + + # Validate input parameters + if not username and not userId: + logger.error("Neither username nor userId provided") + return "Error: Either username or userId must be provided." + + try: + # Get context to retrieve API key + api_key = self.get_api_key() + + if not api_key: + logger.error("No API key provided for CookieFun API") + return "Error: No API key provided for CookieFun API. Please configure the API key in the agent settings." + + # Prepare request payload + payload = {} + if username: + payload["username"] = username + if userId: + payload["userId"] = userId + + # Make API request + headers = {**DEFAULT_HEADERS, "x-api-key": api_key} + + async with httpx.AsyncClient() as client: + response = await client.post( + ENDPOINTS["smart_followers"], headers=headers, json=payload + ) + logger.debug( + "Received response with status code: %d", response.status_code + ) + + response.raise_for_status() + data = response.json() + + # Check different possible response structures + if data.get("success") and "ok" in data and "entries" in data["ok"]: + followers = data["ok"]["entries"] + logger.info( + "Successfully retrieved %d smart followers from entries field", + len(followers), + ) + return followers + elif data.get("success") and "ok" in data and "accounts" in data["ok"]: + followers = data["ok"]["accounts"] + logger.info( + "Successfully retrieved %d smart followers", len(followers) + ) + return followers + elif data.get("success") and "ok" in data and "followers" in data["ok"]: + followers = data["ok"]["followers"] + logger.info( + "Successfully retrieved %d smart followers from followers field", + len(followers), + ) + return followers + elif ( + data.get("success") + and "ok" in data + and isinstance(data["ok"], list) + ): + followers = data["ok"] + logger.info( + "Successfully retrieved %d smart followers from ok list", + len(followers), + ) + return followers + elif data.get("success") and isinstance(data.get("accounts"), list): + followers = data["accounts"] + logger.info( + "Successfully retrieved %d smart followers from top level accounts", + len(followers), + ) + return followers + elif data.get("success") and isinstance(data.get("followers"), list): + followers = data["followers"] + logger.info( + "Successfully retrieved %d smart followers from top level followers", + len(followers), + ) + return followers + elif data.get("success") and isinstance(data.get("entries"), list): + followers = data["entries"] + logger.info( + "Successfully retrieved %d smart followers from top level entries", + len(followers), + ) + return followers + elif "followers" in data and isinstance(data["followers"], list): + followers = data["followers"] + logger.info( + "Successfully retrieved %d smart followers from direct followers field", + len(followers), + ) + return followers + elif "accounts" in data and isinstance(data["accounts"], list): + followers = data["accounts"] + logger.info( + "Successfully retrieved %d smart followers from direct accounts field", + len(followers), + ) + return followers + elif "entries" in data and isinstance(data["entries"], list): + followers = data["entries"] + logger.info( + "Successfully retrieved %d smart followers from direct entries field", + len(followers), + ) + return followers + else: + # If we can't find followers in the expected structure, log the full response + logger.error( + "Could not find smart followers in response structure. Full response: %s", + data, + ) + error_msg = data.get( + "error", "Unknown error - check API response format" + ) + logger.error("Error in API response: %s", error_msg) + return f"Error fetching smart followers: {error_msg}" + + except httpx.HTTPStatusError as e: + logger.error("HTTP error: %d - %s", e.response.status_code, e.response.text) + return f"HTTP error occurred: {e.response.status_code} - {e.response.text}" + except httpx.RequestError as e: + logger.error("Request error: %s", str(e)) + return f"Request error occurred: {str(e)}" + except Exception as e: + logger.exception("Unexpected error occurred") + return f"An unexpected error occurred: {str(e)}" diff --git a/intentkit/skills/cookiefun/get_sectors.py b/intentkit/skills/cookiefun/get_sectors.py new file mode 100644 index 00000000..db8b2353 --- /dev/null +++ b/intentkit/skills/cookiefun/get_sectors.py @@ -0,0 +1,127 @@ +from typing import Any, Dict, List, Type + +import httpx +from pydantic import BaseModel + +from intentkit.skills.cookiefun.base import CookieFunBaseTool, logger +from intentkit.skills.cookiefun.constants import DEFAULT_HEADERS, ENDPOINTS + + +class GetSectorsInput(BaseModel): + """Input for GetSectors tool.""" + + pass + + +class GetSectors(CookieFunBaseTool): + """Tool to get all available sectors from the CookieFun API.""" + + name: str = "cookiefun_get_sectors" + description: str = ( + "Returns a list of all available sectors in the CookieFun system." + ) + args_schema: Type[BaseModel] = GetSectorsInput + + async def _arun(self, **kwargs) -> List[Dict[str, Any]]: + """ + Get all available sectors from the CookieFun API. + + Returns: + List of sector objects, each containing id, name, and other metadata. + """ + logger.info("Getting sectors from CookieFun API") + try: + # Get API key + api_key = self.get_api_key() + + if not api_key: + logger.error("No API key provided for CookieFun API") + return "Error: No API key provided for CookieFun API. Please configure the API key in the agent settings." + + # Make API request + headers = {**DEFAULT_HEADERS, "x-api-key": api_key} + + async with httpx.AsyncClient() as client: + response = await client.get(ENDPOINTS["sectors"], headers=headers) + logger.debug( + "Received response with status code: %d", response.status_code + ) + + response.raise_for_status() + data = response.json() + + # Check different possible response structures + if data.get("success") and "ok" in data and "entries" in data["ok"]: + sectors = data["ok"]["entries"] + logger.info( + "Successfully retrieved %d sectors from entries field", + len(sectors), + ) + return sectors + elif data.get("success") and "ok" in data and "sectors" in data["ok"]: + sectors = data["ok"]["sectors"] + logger.info("Successfully retrieved %d sectors", len(sectors)) + return sectors + elif ( + data.get("success") + and "ok" in data + and isinstance(data["ok"], list) + ): + # If "ok" is directly a list + sectors = data["ok"] + logger.info( + "Successfully retrieved %d sectors from ok list", len(sectors) + ) + return sectors + elif data.get("success") and isinstance(data.get("sectors"), list): + # If sectors is at the top level + sectors = data["sectors"] + logger.info( + "Successfully retrieved %d sectors from top level", len(sectors) + ) + return sectors + elif data.get("success") and isinstance(data.get("entries"), list): + # If entries is at the top level + sectors = data["entries"] + logger.info( + "Successfully retrieved %d sectors from entries top level", + len(sectors), + ) + return sectors + elif "sectors" in data and isinstance(data["sectors"], list): + # If only sectors field exists + sectors = data["sectors"] + logger.info( + "Successfully retrieved %d sectors from direct field", + len(sectors), + ) + return sectors + elif "entries" in data and isinstance(data["entries"], list): + # If only entries field exists + sectors = data["entries"] + logger.info( + "Successfully retrieved %d sectors from direct entries field", + len(sectors), + ) + return sectors + else: + # If we can't find sectors in the expected structure, log the full response for debugging + logger.error( + "Could not find sectors in response structure. Full response: %s", + data, + ) + error_msg = data.get( + "error", "Unknown error - check API response format" + ) + logger.error("Error in API response: %s", error_msg) + return f"Error fetching sectors: {error_msg}" + + except httpx.HTTPStatusError as e: + logger.error("HTTP error: %d - %s", e.response.status_code, e.response.text) + return f"HTTP error occurred: {e.response.status_code} - {e.response.text}" + except httpx.RequestError as e: + logger.error("Request error: %s", str(e)) + return f"Request error occurred: {str(e)}" + except Exception as e: + logger.exception("Unexpected error occurred") + return f"An unexpected error occurred: {str(e)}" diff --git a/intentkit/skills/cookiefun/schema.json b/intentkit/skills/cookiefun/schema.json new file mode 100644 index 00000000..b49c89fe --- /dev/null +++ b/intentkit/skills/cookiefun/schema.json @@ -0,0 +1,155 @@ +{ + "$schema": "http://json-schema.org/draft-07/schema#", + "type": "object", + "title": "CookieFun Skills", + "description": "Access Twitter/X analytics and insights using CookieFun API. Get data about accounts, tweets, followers, and trends across different industry sectors.", + "x-icon": "https://ai.service.crestal.dev/skills/cookiefun/cookiefun.png", + "x-tags": [ + "Twitter", + "Social Media", + "Analytics", + "X" + ], + "x-nft-requirement": 10, + "properties": { + "enabled": { + "type": "boolean", + "title": "Enabled", + "description": "Toggle to enable or disable all CookieFun skills", + "default": false + }, + "states": { + "type": "object", + "properties": { + "get_sectors": { + "type": "string", + "title": "Get Sectors", + "enum": [ + "disabled", + "public", + "private" + ], + "x-enum-title": [ + "Disabled", + "Agent Owner + All Users", + "Agent Owner Only" + ], + "description": "Retrieve a list of all available industry sectors in CookieFun, useful for exploring trending topics and categorization", + "default": "disabled" + }, + "get_account_details": { + "type": "string", + "title": "Get Account Details", + "enum": [ + "disabled", + "public", + "private" + ], + "x-enum-title": [ + "Disabled", + "Agent Owner + All Users", + "Agent Owner Only" + ], + "description": "Fetch comprehensive metrics about any Twitter account including followers, engagement rates, impressions, and other analytics", + "default": "disabled" + }, + "get_account_smart_followers": { + "type": "string", + "title": "Get Account Smart Followers", + "enum": [ + "disabled", + "public", + "private" + ], + "x-enum-title": [ + "Disabled", + "Agent Owner + All Users", + "Agent Owner Only" + ], + "description": "Identify the most valuable followers of any Twitter account based on influence, engagement, and reach metrics", + "default": "disabled" + }, + "search_accounts": { + "type": "string", + "title": "Search Accounts", + "enum": [ + "disabled", + "public", + "private" + ], + "x-enum-title": [ + "Disabled", + "Agent Owner + All Users", + "Agent Owner Only" + ], + "description": "Find Twitter accounts posting about specific topics with filtering by engagement, impressions, and tweet types", + "default": "disabled" + }, + "get_account_feed": { + "type": "string", + "title": "Get Account Feed", + "enum": [ + "disabled", + "public", + "private" + ], + "x-enum-title": [ + "Disabled", + "Agent Owner + All Users", + "Agent Owner Only" + ], + "description": "Access a Twitter account's feed with powerful filtering by date range, media content, tweet type, and sorting options", + "default": "disabled" + } + }, + "description": "Configure access levels for each CookieFun skill - disabled, available to all users, or restricted to agent owner only" + }, + "api_key_provider": { + "type": "string", + "title": "API Key Provider", + "description": "Choose whether to use a platform-provided API key or provide your own CookieFun API key", + "enum": [ + "agent_owner" + ], + "x-enum-title": [ + "Owner Provided" + ], + "default": "agent_owner" + } + }, + "required": [ + "states", + "enabled" + ], + "if": { + "properties": { + "api_key_provider": { + "const": "agent_owner" + } + } + }, + "then": { + "properties": { + "api_key": { + "type": "string", + "title": "CookieFun API Key", + "description": "Your personal CookieFun API key, required when using Owner Provided option (sign up at cookie.fun)", + "x-sensitive": true, + "x-link": "[Get your API key](https://cookie.fun/)" + } + }, + "if": { + "properties": { + "enabled": { + "const": true + } + } + }, + "then": { + "required": [ + "api_key" + ] + } + }, + "additionalProperties": true +} \ No newline at end of file diff --git a/intentkit/skills/cookiefun/search_accounts.py b/intentkit/skills/cookiefun/search_accounts.py new file mode 100644 index 00000000..3360dfef --- /dev/null +++ b/intentkit/skills/cookiefun/search_accounts.py @@ -0,0 +1,223 @@ +from enum import IntEnum +from typing import Any, Dict, List, Optional, Type, Union + +import httpx +from pydantic import BaseModel, Field + +from intentkit.skills.cookiefun.base import CookieFunBaseTool, logger +from intentkit.skills.cookiefun.constants import DEFAULT_HEADERS, ENDPOINTS + + +class TweetType(IntEnum): + """Tweet type for filtering.""" + + Original = 0 + Reply = 1 + Quote = 2 + + +class SortBy(IntEnum): + """Sort options for account search results.""" + + SmartEngagementPoints = 0 + Impressions = 1 + MatchingTweetsCount = 2 + + +class SortOrder(IntEnum): + """Sort order options.""" + + Ascending = 0 + Descending = 1 + + +class SearchAccountsInput(BaseModel): + """Input for SearchAccounts tool.""" + + searchQuery: str = Field( + description="Search query to find Twitter accounts that authored tweets matching the criteria" + ) + + type: Optional[int] = Field( + default=None, + description="Type of tweets to search for: 0 for Original, 1 for Reply, 2 for Quote (leave empty for all types)", + ) + + sortBy: Optional[int] = Field( + default=None, + description="Sort by: 0 for SmartEngagementPoints, 1 for Impressions, 2 for MatchingTweetsCount", + ) + + sortOrder: Optional[int] = Field( + default=None, + description="Sort order: 0 for Ascending, 1 for Descending", + ) + + +class SearchAccounts(CookieFunBaseTool): + """Tool to search for Twitter accounts based on tweet content.""" + + name: str = "cookiefun_search_accounts" + description: str = "Searches for Twitter accounts that authored tweets matching specified search criteria." + args_schema: Type[BaseModel] = SearchAccountsInput + + async def _arun( + self, + searchQuery: str, + type: Optional[int] = None, + sortBy: Optional[int] = None, + sortOrder: Optional[int] = None, + **kwargs, + ) -> Union[List[Dict[str, Any]], str]: + """ + Search for Twitter accounts based on tweet content. + + Args: + searchQuery: Search query to match tweet content + type: Type of tweets to search for (0=Original, 1=Reply, 2=Quote) + sortBy: Sort by field (0=SmartEngagementPoints, 1=Impressions, 2=MatchingTweetsCount) + sortOrder: Sort order (0=Ascending, 1=Descending) + + Returns: + List of Twitter accounts matching the search criteria with metrics. + """ + logger.info( + "Searching accounts with query=%s, type=%s, sortBy=%s, sortOrder=%s", + searchQuery, + type, + sortBy, + sortOrder, + ) + + if not searchQuery: + logger.error("No search query provided") + return "Error: searchQuery is required." + + try: + # Get context to retrieve API key + api_key = self.get_api_key() + + if not api_key: + logger.error("No API key provided for CookieFun API") + return "Error: No API key provided for CookieFun API. Please configure the API key in the agent settings." + + # Prepare request payload + payload = {"searchQuery": searchQuery} + + # Add optional parameters if provided + if type is not None: + payload["type"] = type + if sortBy is not None: + payload["sortBy"] = sortBy + if sortOrder is not None: + payload["sortOrder"] = sortOrder + + # Make API request + headers = {**DEFAULT_HEADERS, "x-api-key": api_key} + + async with httpx.AsyncClient() as client: + response = await client.post( + ENDPOINTS["search_accounts"], headers=headers, json=payload + ) + logger.debug( + "Received response with status code: %d", response.status_code + ) + + response.raise_for_status() + data = response.json() + + # Check different possible response structures + if data.get("success") and "ok" in data and "entries" in data["ok"]: + accounts = data["ok"]["entries"] + logger.info( + "Successfully retrieved %d matching accounts from entries field", + len(accounts), + ) + return accounts + elif data.get("success") and "ok" in data and "accounts" in data["ok"]: + accounts = data["ok"]["accounts"] + logger.info( + "Successfully retrieved %d matching accounts", len(accounts) + ) + return accounts + elif data.get("success") and "ok" in data and "results" in data["ok"]: + accounts = data["ok"]["results"] + logger.info( + "Successfully retrieved %d matching accounts from results field", + len(accounts), + ) + return accounts + elif ( + data.get("success") + and "ok" in data + and isinstance(data["ok"], list) + ): + accounts = data["ok"] + logger.info( + "Successfully retrieved %d matching accounts from ok list", + len(accounts), + ) + return accounts + elif data.get("success") and isinstance(data.get("accounts"), list): + accounts = data["accounts"] + logger.info( + "Successfully retrieved %d matching accounts from top level accounts", + len(accounts), + ) + return accounts + elif data.get("success") and isinstance(data.get("results"), list): + accounts = data["results"] + logger.info( + "Successfully retrieved %d matching accounts from top level results", + len(accounts), + ) + return accounts + elif data.get("success") and isinstance(data.get("entries"), list): + accounts = data["entries"] + logger.info( + "Successfully retrieved %d matching accounts from top level entries", + len(accounts), + ) + return accounts + elif "accounts" in data and isinstance(data["accounts"], list): + accounts = data["accounts"] + logger.info( + "Successfully retrieved %d matching accounts from direct accounts field", + len(accounts), + ) + return accounts + elif "results" in data and isinstance(data["results"], list): + accounts = data["results"] + logger.info( + "Successfully retrieved %d matching accounts from direct results field", + len(accounts), + ) + return accounts + elif "entries" in data and isinstance(data["entries"], list): + accounts = data["entries"] + logger.info( + "Successfully retrieved %d matching accounts from direct entries field", + len(accounts), + ) + return accounts + else: + # If we can't find accounts in the expected structure, log the full response + logger.error( + "Could not find matching accounts in response structure. Full response: %s", + data, + ) + error_msg = data.get( + "error", "Unknown error - check API response format" + ) + logger.error("Error in API response: %s", error_msg) + return f"Error searching accounts: {error_msg}" + + except httpx.HTTPStatusError as e: + logger.error("HTTP error: %d - %s", e.response.status_code, e.response.text) + return f"HTTP error occurred: {e.response.status_code} - {e.response.text}" + except httpx.RequestError as e: + logger.error("Request error: %s", str(e)) + return f"Request error occurred: {str(e)}" + except Exception as e: + logger.exception("Unexpected error occurred") + return f"An unexpected error occurred: {str(e)}" diff --git a/intentkit/skills/cryptocompare/__init__.py b/intentkit/skills/cryptocompare/__init__.py new file mode 100644 index 00000000..c7863637 --- /dev/null +++ b/intentkit/skills/cryptocompare/__init__.py @@ -0,0 +1,130 @@ +"""CryptoCompare skills.""" + +import logging +from typing import TypedDict + +from intentkit.abstracts.skill import SkillStoreABC +from intentkit.skills.base import SkillConfig, SkillState +from intentkit.skills.cryptocompare.base import CryptoCompareBaseTool +from intentkit.skills.cryptocompare.fetch_news import CryptoCompareFetchNews +from intentkit.skills.cryptocompare.fetch_price import CryptoCompareFetchPrice +from intentkit.skills.cryptocompare.fetch_top_exchanges import ( + CryptoCompareFetchTopExchanges, +) +from intentkit.skills.cryptocompare.fetch_top_market_cap import ( + CryptoCompareFetchTopMarketCap, +) +from intentkit.skills.cryptocompare.fetch_top_volume import CryptoCompareFetchTopVolume +from intentkit.skills.cryptocompare.fetch_trading_signals import ( + CryptoCompareFetchTradingSignals, +) + +# Cache skills at the system level, because they are stateless +_cache: dict[str, CryptoCompareBaseTool] = {} + +logger = logging.getLogger(__name__) + + +class SkillStates(TypedDict): + fetch_news: SkillState + fetch_price: SkillState + fetch_trading_signals: SkillState + fetch_top_market_cap: SkillState + fetch_top_exchanges: SkillState + fetch_top_volume: SkillState + + +class Config(SkillConfig): + """Configuration for CryptoCompare skills.""" + + states: SkillStates + api_key: str + + +async def get_skills( + config: "Config", + is_private: bool, + store: SkillStoreABC, + **_, +) -> list[CryptoCompareBaseTool]: + """Get all CryptoCompare skills. + + Args: + config: The configuration for CryptoCompare skills. + is_private: Whether to include private skills. + store: The skill store for persisting data. + + Returns: + A list of CryptoCompare skills. + """ + available_skills = [] + + # Include skills based on their state + for skill_name, state in config["states"].items(): + if state == "disabled": + continue + elif state == "public" or (state == "private" and is_private): + available_skills.append(skill_name) + + # Get each skill using the cached getter + result = [] + for name in available_skills: + skill = get_cryptocompare_skill(name, store) + if skill: + result.append(skill) + return result + + +def get_cryptocompare_skill( + name: str, + store: SkillStoreABC, +) -> CryptoCompareBaseTool: + """Get a CryptoCompare skill by name. + + Args: + name: The name of the skill to get + store: The skill store for persisting data + + Returns: + The requested CryptoCompare skill + """ + + if name == "fetch_news": + if name not in _cache: + _cache[name] = CryptoCompareFetchNews( + skill_store=store, + ) + return _cache[name] + elif name == "fetch_price": + if name not in _cache: + _cache[name] = CryptoCompareFetchPrice( + skill_store=store, + ) + return _cache[name] + elif name == "fetch_trading_signals": + if name not in _cache: + _cache[name] = CryptoCompareFetchTradingSignals( + skill_store=store, + ) + return _cache[name] + elif name == "fetch_top_market_cap": + if name not in _cache: + _cache[name] = CryptoCompareFetchTopMarketCap( + skill_store=store, + ) + return _cache[name] + elif name == "fetch_top_exchanges": + if name not in _cache: + _cache[name] = CryptoCompareFetchTopExchanges( + skill_store=store, + ) + return _cache[name] + elif name == "fetch_top_volume": + if name not in _cache: + _cache[name] = CryptoCompareFetchTopVolume( + skill_store=store, + ) + return _cache[name] + else: + logger.warning(f"Unknown CryptoCompare skill: {name}") + return None diff --git a/intentkit/skills/cryptocompare/api.py b/intentkit/skills/cryptocompare/api.py new file mode 100644 index 00000000..18fdc91a --- /dev/null +++ b/intentkit/skills/cryptocompare/api.py @@ -0,0 +1,159 @@ +"""CryptoCompare API implementation and shared schemas.""" + +import time +from typing import List + +import httpx +from pydantic import BaseModel, Field + +CRYPTO_COMPARE_BASE_URL = "https://min-api.cryptocompare.com" + + +# Input Schemas +class FetchNewsInput(BaseModel): + """Input schema for fetching news.""" + + token: str = Field( + ..., description="Token symbol to fetch news for (e.g., BTC, ETH, SOL)" + ) + + +class FetchPriceInput(BaseModel): + """Input schema for fetching crypto prices.""" + + from_symbol: str = Field( + ..., + description="Base cryptocurrency symbol to get prices for (e.g., 'BTC', 'ETH')", + ) + to_symbols: List[str] = Field( + ..., + description="List of target currencies (fiat or crypto) (e.g., ['USD', 'EUR', 'JPY'])", + ) + + +class FetchTradingSignalsInput(BaseModel): + """Input schema for fetching trading signals.""" + + from_symbol: str = Field( + ..., + description="Cryptocurrency symbol to fetch trading signals for (e.g., 'BTC')", + ) + + +class FetchTopMarketCapInput(BaseModel): + """Input schema for fetching top cryptocurrencies by market cap.""" + + to_symbol: str = Field( + "USD", + description="Quote currency for market cap calculation (e.g., 'USD', 'EUR')", + ) + + +class FetchTopExchangesInput(BaseModel): + """Input schema for fetching top exchanges for a trading pair.""" + + from_symbol: str = Field( + ..., description="Base cryptocurrency symbol for the trading pair (e.g., 'BTC')" + ) + to_symbol: str = Field( + "USD", + description="Quote currency symbol for the trading pair. Defaults to 'USD'", + ) + + +class FetchTopVolumeInput(BaseModel): + """Input schema for fetching top cryptocurrencies by trading volume.""" + + to_symbol: str = Field( + "USD", description="Quote currency for volume calculation. Defaults to 'USD'" + ) + + +# API Functions +async def fetch_price(api_key: str, from_symbol: str, to_symbols: List[str]) -> dict: + """ + Fetch current price for a cryptocurrency in multiple currencies. + """ + url = f"{CRYPTO_COMPARE_BASE_URL}/data/price" + headers = {"Accept": "application/json", "Authorization": f"Bearer {api_key}"} + params = {"fsym": from_symbol.upper(), "tsyms": ",".join(to_symbols)} + async with httpx.AsyncClient() as client: + response = await client.get(url, params=params, headers=headers) + if response.status_code != 200: + return {"error": f"API returned status code {response.status_code}"} + return response.json() + + +async def fetch_trading_signals(api_key: str, from_symbol: str) -> dict: + """ + Fetch the latest trading signals. + """ + url = f"{CRYPTO_COMPARE_BASE_URL}/data/tradingsignals/intotheblock/latest" + headers = {"Accept": "application/json", "Authorization": f"Bearer {api_key}"} + params = {"fsym": from_symbol.upper()} + async with httpx.AsyncClient() as client: + response = await client.get(url, params=params, headers=headers) + if response.status_code != 200: + return {"error": f"API returned status code {response.status_code}"} + return response.json() + + +async def fetch_top_market_cap( + api_key: str, limit: int, to_symbol: str = "USD" +) -> dict: + """ + Fetch top cryptocurrencies by market cap. + """ + url = f"{CRYPTO_COMPARE_BASE_URL}/data/top/mktcapfull" + headers = {"Accept": "application/json", "Authorization": f"Bearer {api_key}"} + params = {"limit": limit, "tsym": to_symbol.upper()} + async with httpx.AsyncClient() as client: + response = await client.get(url, params=params, headers=headers) + if response.status_code != 200: + return {"error": f"API returned status code {response.status_code}"} + return response.json() + + +async def fetch_top_exchanges( + api_key: str, from_symbol: str, to_symbol: str = "USD" +) -> dict: + """ + Fetch top exchanges for a cryptocurrency pair. + """ + url = f"{CRYPTO_COMPARE_BASE_URL}/data/top/exchanges" + headers = {"Accept": "application/json", "Authorization": f"Bearer {api_key}"} + params = {"fsym": from_symbol.upper(), "tsym": to_symbol.upper()} + async with httpx.AsyncClient() as client: + response = await client.get(url, params=params, headers=headers) + if response.status_code != 200: + return {"error": f"API returned status code {response.status_code}"} + return response.json() + + +async def fetch_top_volume(api_key: str, limit: int, to_symbol: str = "USD") -> dict: + """ + Fetch top cryptocurrencies by total volume. + """ + url = f"{CRYPTO_COMPARE_BASE_URL}/data/top/totalvolfull" + headers = {"Accept": "application/json", "Authorization": f"Bearer {api_key}"} + params = {"limit": limit, "tsym": to_symbol.upper()} + async with httpx.AsyncClient() as client: + response = await client.get(url, params=params, headers=headers) + if response.status_code != 200: + return {"error": f"API returned status code {response.status_code}"} + return response.json() + + +async def fetch_news(api_key: str, token: str, timestamp: int = None) -> dict: + """ + Fetch news for a specific token and timestamp. + """ + if timestamp is None: + timestamp = int(time.time()) + url = f"{CRYPTO_COMPARE_BASE_URL}/data/v2/news/?lang=EN&lTs={timestamp}&categories={token}&sign=true" + headers = {"Accept": "application/json", "Authorization": f"Bearer {api_key}"} + async with httpx.AsyncClient() as client: + response = await client.get(url, headers=headers) + if response.status_code != 200: + return {"error": f"API returned status code {response.status_code}"} + return response.json() diff --git a/intentkit/skills/cryptocompare/base.py b/intentkit/skills/cryptocompare/base.py new file mode 100644 index 00000000..7e2f6363 --- /dev/null +++ b/intentkit/skills/cryptocompare/base.py @@ -0,0 +1,303 @@ +"""Base class for all CryptoCompare tools.""" + +import logging +from datetime import datetime, timedelta, timezone +from typing import Any, Dict, List, Type + +import httpx +from pydantic import BaseModel, Field + +from intentkit.abstracts.skill import SkillStoreABC +from intentkit.skills.base import IntentKitSkill +from intentkit.utils.error import RateLimitExceeded + +CRYPTO_COMPARE_BASE_URL = "https://min-api.cryptocompare.com" + +logger = logging.getLogger(__name__) + + +class CryptoCompareBaseTool(IntentKitSkill): + """Base class for CryptoCompare tools. + + This class provides common functionality for all CryptoCompare API tools: + - Rate limiting + - API client handling + - State management through skill_store + """ + + name: str = Field(description="The name of the tool") + description: str = Field(description="A description of what the tool does") + args_schema: Type[BaseModel] + skill_store: SkillStoreABC = Field( + description="The skill store for persisting data" + ) + + @property + def category(self) -> str: + return "cryptocompare" + + async def check_rate_limit( + self, agent_id: str, max_requests: int = 1, interval: int = 15 + ) -> None: + """Check if the rate limit has been exceeded. + + Args: + agent_id: The ID of the agent. + max_requests: Maximum number of requests allowed within the rate limit window. + interval: Time interval in minutes for the rate limit window. + + Raises: + RateLimitExceeded: If the rate limit has been exceeded. + """ + rate_limit = await self.skill_store.get_agent_skill_data( + agent_id, self.name, "rate_limit" + ) + + current_time = datetime.now(tz=timezone.utc) + + if ( + rate_limit + and rate_limit.get("reset_time") + and rate_limit["count"] is not None + and datetime.fromisoformat(rate_limit["reset_time"]) > current_time + ): + if rate_limit["count"] >= max_requests: + raise RateLimitExceeded("Rate limit exceeded") + + rate_limit["count"] += 1 + await self.skill_store.save_agent_skill_data( + agent_id, self.name, "rate_limit", rate_limit + ) + + return + + # If no rate limit exists or it has expired, create a new one + new_rate_limit = { + "count": 1, + "reset_time": (current_time + timedelta(minutes=interval)).isoformat(), + } + await self.skill_store.save_agent_skill_data( + agent_id, self.name, "rate_limit", new_rate_limit + ) + return + + async def fetch_price( + self, api_key: str, from_symbol: str, to_symbols: List[str] + ) -> dict: + """Fetch current price for a cryptocurrency in multiple currencies. + + Args: + api_key: The CryptoCompare API key + from_symbol: Base cryptocurrency symbol to get prices for (e.g., 'BTC', 'ETH') + to_symbols: List of target currencies (fiat or crypto) (e.g., ['USD', 'EUR', 'JPY']) + + Returns: + Dict containing the price data + """ + url = f"{CRYPTO_COMPARE_BASE_URL}/data/price" + headers = {"Accept": "application/json", "Authorization": f"Bearer {api_key}"} + + # Ensure from_symbol is a string, not a list + if isinstance(from_symbol, list): + from_symbol = from_symbol[0] if from_symbol else "" + + # Ensure to_symbols is a list + if not isinstance(to_symbols, list): + to_symbols = [to_symbols] if to_symbols else ["USD"] + + params = { + "fsym": from_symbol.upper(), + "tsyms": ",".join([s.upper() for s in to_symbols]), + } + async with httpx.AsyncClient() as client: + response = await client.get(url, params=params, headers=headers) + if response.status_code != 200: + logger.error(f"API returned status code {response.status_code}") + return {"error": f"API returned status code {response.status_code}"} + return response.json() + + async def fetch_trading_signals(self, api_key: str, from_symbol: str) -> dict: + """Fetch the latest trading signals. + + Args: + api_key: The CryptoCompare API key + from_symbol: Cryptocurrency symbol to fetch trading signals for (e.g., 'BTC') + + Returns: + Dict containing the trading signals data + """ + url = f"{CRYPTO_COMPARE_BASE_URL}/data/tradingsignals/intotheblock/latest" + headers = {"Accept": "application/json", "Authorization": f"Bearer {api_key}"} + + # Ensure from_symbol is a string, not a list + if isinstance(from_symbol, list): + from_symbol = from_symbol[0] if from_symbol else "" + + params = {"fsym": from_symbol.upper()} + async with httpx.AsyncClient() as client: + response = await client.get(url, params=params, headers=headers) + if response.status_code != 200: + logger.error(f"API returned status code {response.status_code}") + return {"error": f"API returned status code {response.status_code}"} + return response.json() + + async def fetch_top_market_cap( + self, api_key: str, limit: int, to_symbol: str = "USD" + ) -> dict: + """Fetch top cryptocurrencies by market cap. + + Args: + api_key: The CryptoCompare API key + limit: Number of cryptocurrencies to fetch + to_symbol: Quote currency for market cap calculation (e.g., 'USD', 'EUR') + + Returns: + Dict containing the top market cap data + """ + url = f"{CRYPTO_COMPARE_BASE_URL}/data/top/mktcapfull" + headers = {"Accept": "application/json", "Authorization": f"Bearer {api_key}"} + + # Ensure to_symbol is a string, not a list + if isinstance(to_symbol, list): + to_symbol = to_symbol[0] if to_symbol else "USD" + + params = {"limit": limit, "tsym": to_symbol.upper()} + async with httpx.AsyncClient() as client: + response = await client.get(url, params=params, headers=headers) + if response.status_code != 200: + logger.error(f"API returned status code {response.status_code}") + return {"error": f"API returned status code {response.status_code}"} + return response.json() + + async def fetch_top_exchanges( + self, api_key: str, from_symbol: str, to_symbol: str = "USD" + ) -> dict: + """Fetch top exchanges for a cryptocurrency pair. + + Args: + api_key: The CryptoCompare API key + from_symbol: Base cryptocurrency symbol for the trading pair (e.g., 'BTC') + to_symbol: Quote currency symbol for the trading pair. Defaults to 'USD' + + Returns: + Dict containing the top exchanges data + """ + url = f"{CRYPTO_COMPARE_BASE_URL}/data/top/exchanges" + headers = {"Accept": "application/json", "Authorization": f"Bearer {api_key}"} + + # Ensure from_symbol and to_symbol are strings, not lists + if isinstance(from_symbol, list): + from_symbol = from_symbol[0] if from_symbol else "" + if isinstance(to_symbol, list): + to_symbol = to_symbol[0] if to_symbol else "USD" + + params = {"fsym": from_symbol.upper(), "tsym": to_symbol.upper()} + async with httpx.AsyncClient() as client: + response = await client.get(url, params=params, headers=headers) + if response.status_code != 200: + logger.error(f"API returned status code {response.status_code}") + return {"error": f"API returned status code {response.status_code}"} + return response.json() + + async def fetch_top_volume( + self, api_key: str, limit: int, to_symbol: str = "USD" + ) -> dict: + """Fetch top cryptocurrencies by total volume. + + Args: + api_key: The CryptoCompare API key + limit: Number of cryptocurrencies to fetch + to_symbol: Quote currency for volume calculation. Defaults to 'USD' + + Returns: + Dict containing the top volume data + """ + url = f"{CRYPTO_COMPARE_BASE_URL}/data/top/totalvolfull" + headers = {"Accept": "application/json", "Authorization": f"Bearer {api_key}"} + + # Ensure to_symbol is a string, not a list + if isinstance(to_symbol, list): + to_symbol = to_symbol[0] if to_symbol else "USD" + + params = {"limit": limit, "tsym": to_symbol.upper()} + async with httpx.AsyncClient() as client: + response = await client.get(url, params=params, headers=headers) + if response.status_code != 200: + logger.error(f"API returned status code {response.status_code}") + return {"error": f"API returned status code {response.status_code}"} + return response.json() + + async def fetch_news(self, api_key: str, token: str, timestamp: int = None) -> dict: + """Fetch news for a specific token and timestamp. + + Args: + api_key: The CryptoCompare API key + token: Token symbol to fetch news for (e.g., BTC, ETH, SOL) + timestamp: Optional timestamp for fetching news + + Returns: + Dict containing the news data + """ + url = f"{CRYPTO_COMPARE_BASE_URL}/data/v2/news/" + headers = {"Accept": "application/json", "Authorization": f"Bearer {api_key}"} + + # Ensure token is a string, not a list + if isinstance(token, list): + token = token[0] if token else "" + + params = {"categories": token.upper()} + if timestamp: + params["lTs"] = timestamp + + async with httpx.AsyncClient() as client: + response = await client.get(url, params=params, headers=headers) + if response.status_code != 200: + logger.error(f"API returned status code {response.status_code}") + return {"error": f"API returned status code {response.status_code}"} + return response.json() + + +# Response Models +class CryptoPrice(BaseModel): + """Model representing a cryptocurrency price.""" + + from_symbol: str + to_symbol: str + price: float + + +class CryptoNews(BaseModel): + """Model representing a cryptocurrency news article.""" + + id: str + published_on: int + title: str + url: str + body: str + tags: str + categories: str + source: str + source_info: Dict[str, Any] = Field(default_factory=dict) + + +class CryptoExchange(BaseModel): + """Model representing a cryptocurrency exchange.""" + + exchange: str + from_symbol: str + to_symbol: str + volume24h: float + volume24h_to: float + + +class CryptoCurrency(BaseModel): + """Model representing a cryptocurrency.""" + + id: str + name: str + symbol: str + full_name: str + market_cap: float = 0 + volume24h: float = 0 + price: float = 0 + change24h: float = 0 diff --git a/intentkit/skills/cryptocompare/cryptocompare.png b/intentkit/skills/cryptocompare/cryptocompare.png new file mode 100644 index 00000000..1eec8eb8 Binary files /dev/null and b/intentkit/skills/cryptocompare/cryptocompare.png differ diff --git a/intentkit/skills/cryptocompare/fetch_news.py b/intentkit/skills/cryptocompare/fetch_news.py new file mode 100644 index 00000000..6a09585b --- /dev/null +++ b/intentkit/skills/cryptocompare/fetch_news.py @@ -0,0 +1,95 @@ +"""Tool for fetching cryptocurrency news via CryptoCompare API.""" + +import logging +from typing import List, Type + +from pydantic import BaseModel, Field + +from intentkit.skills.cryptocompare.base import CryptoCompareBaseTool, CryptoNews + +logger = logging.getLogger(__name__) + + +class CryptoCompareFetchNewsInput(BaseModel): + """Input for CryptoCompareFetchNews tool.""" + + token: str = Field( + ..., description="Token symbol to fetch news for (e.g., BTC, ETH, SOL)" + ) + + +class CryptoCompareFetchNews(CryptoCompareBaseTool): + """Tool for fetching cryptocurrency news from CryptoCompare. + + This tool uses the CryptoCompare API to retrieve the latest news articles + related to a specific cryptocurrency token. + + Attributes: + name: The name of the tool. + description: A description of what the tool does. + args_schema: The schema for the tool's input arguments. + """ + + name: str = "cryptocompare_fetch_news" + description: str = "Fetch the latest cryptocurrency news for a specific token" + args_schema: Type[BaseModel] = CryptoCompareFetchNewsInput + + async def _arun( + self, + token: str, + **kwargs, + ) -> List[CryptoNews]: + """Async implementation of the tool to fetch cryptocurrency news. + + Args: + token: Token symbol to fetch news for (e.g., BTC, ETH, SOL) + config: The configuration for the runnable, containing agent context. + + Returns: + List[CryptoNews]: A list of cryptocurrency news articles. + + Raises: + Exception: If there's an error accessing the CryptoCompare API. + """ + try: + context = self.get_context() + skill_config = context.agent.skill_config(self.category) + + # Check rate limit + await self.check_rate_limit(context.agent_id, max_requests=5, interval=60) + + # Get API key from context + api_key = skill_config.get("api_key") + if not api_key: + raise ValueError("CryptoCompare API key not found in configuration") + + # Fetch news data directly + news_data = await self.fetch_news(api_key, token) + + # Check for errors + if "error" in news_data: + raise ValueError(news_data["error"]) + + # Convert to list of CryptoNews objects + result = [] + if "Data" in news_data and news_data["Data"]: + for article in news_data["Data"]: + result.append( + CryptoNews( + id=str(article["id"]), + published_on=article["published_on"], + title=article["title"], + url=article["url"], + body=article["body"], + tags=article.get("tags", ""), + categories=article.get("categories", ""), + source=article["source"], + source_info=article.get("source_info", {}), + ) + ) + + return result + + except Exception as e: + logger.error("Error fetching news: %s", str(e)) + raise type(e)(f"[agent:{context.agent_id}]: {e}") from e diff --git a/intentkit/skills/cryptocompare/fetch_price.py b/intentkit/skills/cryptocompare/fetch_price.py new file mode 100644 index 00000000..8bf9aa42 --- /dev/null +++ b/intentkit/skills/cryptocompare/fetch_price.py @@ -0,0 +1,98 @@ +"""Tool for fetching cryptocurrency prices via CryptoCompare API.""" + +import logging +from typing import List, Type + +from pydantic import BaseModel, Field + +from intentkit.skills.cryptocompare.base import CryptoCompareBaseTool, CryptoPrice + +logger = logging.getLogger(__name__) + + +class CryptoCompareFetchPriceInput(BaseModel): + """Input for CryptoCompareFetchPrice tool.""" + + from_symbol: str = Field( + ..., + description="Base cryptocurrency symbol to get prices for (e.g., 'BTC', 'ETH')", + ) + to_symbols: List[str] = Field( + ..., + description="List of target currencies (fiat or crypto) (e.g., ['USD', 'EUR', 'JPY'])", + ) + + +class CryptoCompareFetchPrice(CryptoCompareBaseTool): + """Tool for fetching cryptocurrency prices from CryptoCompare. + + This tool uses the CryptoCompare API to retrieve real-time cryptocurrency price data + with multi-currency support. Provide a base currency (e.g., 'BTC', 'ETH') and a list + of target currencies (e.g., ['USD', 'EUR', 'JPY']) to get current exchange rates. + + Attributes: + name: The name of the tool. + description: A description of what the tool does. + args_schema: The schema for the tool's input arguments. + """ + + name: str = "cryptocompare_fetch_price" + description: str = ( + "Fetch real-time cryptocurrency price data with multi-currency support" + ) + args_schema: Type[BaseModel] = CryptoCompareFetchPriceInput + + async def _arun( + self, + from_symbol: str, + to_symbols: List[str], + **kwargs, + ) -> List[CryptoPrice]: + """Async implementation of the tool to fetch cryptocurrency prices. + + Args: + from_symbol: Base cryptocurrency symbol to get prices for (e.g., 'BTC', 'ETH') + to_symbols: List of target currencies (fiat or crypto) (e.g., ['USD', 'EUR', 'JPY']) + config: The configuration for the runnable, containing agent context. + + Returns: + List[CryptoPrice]: A list of cryptocurrency prices for each target currency. + + Raises: + Exception: If there's an error accessing the CryptoCompare API. + """ + try: + context = self.get_context() + skill_config = context.agent.skill_config(self.category) + + # Check rate limit + await self.check_rate_limit(context.agent_id, max_requests=10, interval=60) + + # Get API key from context + api_key = skill_config.get("api_key") + if not api_key: + raise ValueError("CryptoCompare API key not found in configuration") + + # Fetch price data directly + price_data = await self.fetch_price(api_key, from_symbol, to_symbols) + + # Check for errors + if "error" in price_data: + raise ValueError(price_data["error"]) + + # Convert to list of CryptoPrice objects + result = [] + for to_symbol, price in price_data.items(): + result.append( + CryptoPrice( + from_symbol=from_symbol, + to_symbol=to_symbol, + price=price, + ) + ) + + return result + + except Exception as e: + logger.error("Error fetching price: %s", str(e)) + raise type(e)(f"[agent:{context.agent_id}]: {e}") from e diff --git a/intentkit/skills/cryptocompare/fetch_top_exchanges.py b/intentkit/skills/cryptocompare/fetch_top_exchanges.py new file mode 100644 index 00000000..e5dae545 --- /dev/null +++ b/intentkit/skills/cryptocompare/fetch_top_exchanges.py @@ -0,0 +1,112 @@ +"""Tool for fetching top exchanges for a cryptocurrency pair via CryptoCompare API.""" + +import logging +from typing import List, Type + +from pydantic import BaseModel, Field + +from intentkit.skills.cryptocompare.base import CryptoCompareBaseTool, CryptoExchange + +logger = logging.getLogger(__name__) + + +class CryptoCompareFetchTopExchangesInput(BaseModel): + """Input for CryptoCompareFetchTopExchanges tool.""" + + from_symbol: str = Field( + ..., description="Base cryptocurrency symbol for the trading pair (e.g., 'BTC')" + ) + to_symbol: str = Field( + "USD", + description="Quote currency symbol for the trading pair. Defaults to 'USD'", + ) + limit: int = Field( + 10, + description="Number of exchanges to fetch (max 100)", + ge=1, + le=100, + ) + + +class CryptoCompareFetchTopExchanges(CryptoCompareBaseTool): + """Tool for fetching top exchanges for a cryptocurrency pair from CryptoCompare. + + This tool uses the CryptoCompare API to retrieve the top exchanges + for a specific cryptocurrency trading pair, ranked by volume. + + Attributes: + name: The name of the tool. + description: A description of what the tool does. + args_schema: The schema for the tool's input arguments. + """ + + name: str = "cryptocompare_fetch_top_exchanges" + description: str = ( + "Fetch top exchanges for a cryptocurrency trading pair, ranked by volume" + ) + args_schema: Type[BaseModel] = CryptoCompareFetchTopExchangesInput + + async def _arun( + self, + from_symbol: str, + to_symbol: str = "USD", + limit: int = 10, + **kwargs, + ) -> List[CryptoExchange]: + """Async implementation of the tool to fetch top exchanges for a cryptocurrency pair. + + Args: + from_symbol: Base cryptocurrency symbol for the trading pair (e.g., 'BTC') + to_symbol: Quote currency symbol for the trading pair. Defaults to 'USD' + limit: Number of exchanges to fetch (max 100) + config: The configuration for the runnable, containing agent context. + + Returns: + List[CryptoExchange]: A list of top exchanges for the specified trading pair. + + Raises: + Exception: If there's an error accessing the CryptoCompare API. + """ + try: + context = self.get_context() + skill_config = context.agent.skill_config(self.category) + + # Check rate limit + await self.check_rate_limit(context.agent_id, max_requests=5, interval=60) + + # Get API key from context + api_key = skill_config.get("api_key") + if not api_key: + raise ValueError("CryptoCompare API key not found in configuration") + + # Fetch top exchanges data directly + exchanges_data = await self.fetch_top_exchanges( + api_key, from_symbol, to_symbol + ) + + # Check for errors + if "error" in exchanges_data: + raise ValueError(exchanges_data["error"]) + + # Convert to list of CryptoExchange objects + result = [] + if "Data" in exchanges_data and exchanges_data["Data"]: + for item in exchanges_data["Data"]: + if len(result) >= limit: + break + + result.append( + CryptoExchange( + exchange=item.get("exchange", ""), + from_symbol=from_symbol, + to_symbol=to_symbol, + volume24h=item.get("volume24h", 0), + volume24h_to=item.get("volume24hTo", 0), + ) + ) + + return result + + except Exception as e: + logger.error("Error fetching top exchanges: %s", str(e)) + raise type(e)(f"[agent:{context.agent_id}]: {e}") from e diff --git a/intentkit/skills/cryptocompare/fetch_top_market_cap.py b/intentkit/skills/cryptocompare/fetch_top_market_cap.py new file mode 100644 index 00000000..af862d98 --- /dev/null +++ b/intentkit/skills/cryptocompare/fetch_top_market_cap.py @@ -0,0 +1,108 @@ +"""Tool for fetching top cryptocurrencies by market cap via CryptoCompare API.""" + +import logging +from typing import List, Type + +from pydantic import BaseModel, Field + +from intentkit.skills.cryptocompare.base import CryptoCompareBaseTool, CryptoCurrency + +logger = logging.getLogger(__name__) + + +class CryptoCompareFetchTopMarketCapInput(BaseModel): + """Input for CryptoCompareFetchTopMarketCap tool.""" + + to_symbol: str = Field( + "USD", + description="Quote currency for market cap calculation (e.g., 'USD', 'EUR')", + ) + limit: int = Field( + 10, + description="Number of cryptocurrencies to fetch (max 100)", + ge=1, + le=100, + ) + + +class CryptoCompareFetchTopMarketCap(CryptoCompareBaseTool): + """Tool for fetching top cryptocurrencies by market cap from CryptoCompare. + + This tool uses the CryptoCompare API to retrieve the top cryptocurrencies + ranked by market capitalization in a specified quote currency. + + Attributes: + name: The name of the tool. + description: A description of what the tool does. + args_schema: The schema for the tool's input arguments. + """ + + name: str = "cryptocompare_fetch_top_market_cap" + description: str = "Fetch top cryptocurrencies ranked by market capitalization" + args_schema: Type[BaseModel] = CryptoCompareFetchTopMarketCapInput + + async def _arun( + self, + to_symbol: str = "USD", + limit: int = 10, + **kwargs, + ) -> List[CryptoCurrency]: + """Async implementation of the tool to fetch top cryptocurrencies by market cap. + + Args: + to_symbol: Quote currency for market cap calculation (e.g., 'USD', 'EUR') + limit: Number of cryptocurrencies to fetch (max 100) + config: The configuration for the runnable, containing agent context. + + Returns: + List[CryptoCurrency]: A list of top cryptocurrencies by market cap. + + Raises: + Exception: If there's an error accessing the CryptoCompare API. + """ + try: + context = self.get_context() + skill_config = context.agent.skill_config(self.category) + + # Check rate limit + await self.check_rate_limit(context.agent_id, max_requests=5, interval=60) + + # Get API key from context + api_key = skill_config.get("api_key") + if not api_key: + raise ValueError("CryptoCompare API key not found in configuration") + + # Fetch top market cap data directly + market_cap_data = await self.fetch_top_market_cap(api_key, limit, to_symbol) + + # Check for errors + if "error" in market_cap_data: + raise ValueError(market_cap_data["error"]) + + # Convert to list of CryptoCurrency objects + result = [] + if "Data" in market_cap_data and market_cap_data["Data"]: + for item in market_cap_data["Data"]: + coin_info = item.get("CoinInfo", {}) + raw_data = item.get("RAW", {}).get(to_symbol, {}) + + result.append( + CryptoCurrency( + id=str(coin_info.get("Id", "")), + name=coin_info.get("Name", ""), + symbol=coin_info.get( + "Name", "" + ), # API uses same field for symbol + full_name=coin_info.get("FullName", ""), + market_cap=raw_data.get("MKTCAP", 0), + volume24h=raw_data.get("VOLUME24HOUR", 0), + price=raw_data.get("PRICE", 0), + change24h=raw_data.get("CHANGEPCT24HOUR", 0), + ) + ) + + return result + + except Exception as e: + logger.error("Error fetching top market cap: %s", str(e)) + raise type(e)(f"[agent:{context.agent_id}]: {e}") from e diff --git a/intentkit/skills/cryptocompare/fetch_top_volume.py b/intentkit/skills/cryptocompare/fetch_top_volume.py new file mode 100644 index 00000000..1e8ad7f7 --- /dev/null +++ b/intentkit/skills/cryptocompare/fetch_top_volume.py @@ -0,0 +1,107 @@ +"""Tool for fetching top cryptocurrencies by trading volume via CryptoCompare API.""" + +import logging +from typing import List, Type + +from pydantic import BaseModel, Field + +from intentkit.skills.cryptocompare.base import CryptoCompareBaseTool, CryptoCurrency + +logger = logging.getLogger(__name__) + + +class CryptoCompareFetchTopVolumeInput(BaseModel): + """Input for CryptoCompareFetchTopVolume tool.""" + + to_symbol: str = Field( + "USD", description="Quote currency for volume calculation. Defaults to 'USD'" + ) + limit: int = Field( + 10, + description="Number of cryptocurrencies to fetch (max 100)", + ge=1, + le=100, + ) + + +class CryptoCompareFetchTopVolume(CryptoCompareBaseTool): + """Tool for fetching top cryptocurrencies by trading volume from CryptoCompare. + + This tool uses the CryptoCompare API to retrieve the top cryptocurrencies + ranked by 24-hour trading volume in a specified quote currency. + + Attributes: + name: The name of the tool. + description: A description of what the tool does. + args_schema: The schema for the tool's input arguments. + """ + + name: str = "cryptocompare_fetch_top_volume" + description: str = "Fetch top cryptocurrencies ranked by 24-hour trading volume" + args_schema: Type[BaseModel] = CryptoCompareFetchTopVolumeInput + + async def _arun( + self, + to_symbol: str = "USD", + limit: int = 10, + **kwargs, + ) -> List[CryptoCurrency]: + """Async implementation of the tool to fetch top cryptocurrencies by trading volume. + + Args: + to_symbol: Quote currency for volume calculation. Defaults to 'USD' + limit: Number of cryptocurrencies to fetch (max 100) + config: The configuration for the runnable, containing agent context. + + Returns: + List[CryptoCurrency]: A list of top cryptocurrencies by trading volume. + + Raises: + Exception: If there's an error accessing the CryptoCompare API. + """ + try: + context = self.get_context() + skill_config = context.agent.skill_config(self.category) + + # Check rate limit + await self.check_rate_limit(context.agent_id, max_requests=5, interval=60) + + # Get API key from context + api_key = skill_config.get("api_key") + if not api_key: + raise ValueError("CryptoCompare API key not found in configuration") + + # Fetch top volume data directly + volume_data = await self.fetch_top_volume(api_key, limit, to_symbol) + + # Check for errors + if "error" in volume_data: + raise ValueError(volume_data["error"]) + + # Convert to list of CryptoCurrency objects + result = [] + if "Data" in volume_data and volume_data["Data"]: + for item in volume_data["Data"]: + coin_info = item.get("CoinInfo", {}) + raw_data = item.get("RAW", {}).get(to_symbol, {}) + + result.append( + CryptoCurrency( + id=str(coin_info.get("Id", "")), + name=coin_info.get("Name", ""), + symbol=coin_info.get( + "Name", "" + ), # API uses same field for symbol + full_name=coin_info.get("FullName", ""), + market_cap=raw_data.get("MKTCAP", 0), + volume24h=raw_data.get("VOLUME24HOUR", 0), + price=raw_data.get("PRICE", 0), + change24h=raw_data.get("CHANGEPCT24HOUR", 0), + ) + ) + + return result + + except Exception as e: + logger.error("Error fetching top volume: %s", str(e)) + raise type(e)(f"[agent:{context.agent_id}]: {e}") from e diff --git a/intentkit/skills/cryptocompare/fetch_trading_signals.py b/intentkit/skills/cryptocompare/fetch_trading_signals.py new file mode 100644 index 00000000..dfdd96bb --- /dev/null +++ b/intentkit/skills/cryptocompare/fetch_trading_signals.py @@ -0,0 +1,106 @@ +"""Tool for fetching cryptocurrency trading signals via CryptoCompare API.""" + +import logging +from typing import Dict, List, Type + +from pydantic import BaseModel, Field + +from intentkit.skills.cryptocompare.base import CryptoCompareBaseTool + +logger = logging.getLogger(__name__) + + +class CryptoCompareFetchTradingSignalsInput(BaseModel): + """Input for CryptoCompareFetchTradingSignals tool.""" + + from_symbol: str = Field( + ..., + description="Cryptocurrency symbol to fetch trading signals for (e.g., 'BTC')", + ) + + +class TradingSignal(BaseModel): + """Model representing a cryptocurrency trading signal.""" + + symbol: str + indicator: str + value: float + signal: str + description: str + + +class CryptoCompareFetchTradingSignals(CryptoCompareBaseTool): + """Tool for fetching cryptocurrency trading signals from CryptoCompare. + + This tool uses the CryptoCompare API to retrieve the latest trading signals + for a specific cryptocurrency. These signals can help inform trading decisions. + + Attributes: + name: The name of the tool. + description: A description of what the tool does. + args_schema: The schema for the tool's input arguments. + """ + + name: str = "cryptocompare_fetch_trading_signals" + description: str = "Fetch the latest trading signals for a specific cryptocurrency" + args_schema: Type[BaseModel] = CryptoCompareFetchTradingSignalsInput + + async def _arun( + self, + from_symbol: str, + **kwargs, + ) -> List[TradingSignal]: + """Async implementation of the tool to fetch cryptocurrency trading signals. + + Args: + from_symbol: Cryptocurrency symbol to fetch trading signals for (e.g., 'BTC') + config: The configuration for the runnable, containing agent context. + + Returns: + List[TradingSignal]: A list of trading signals for the specified cryptocurrency. + + Raises: + Exception: If there's an error accessing the CryptoCompare API. + """ + try: + context = self.get_context() + skill_config = context.agent.skill_config(self.category) + + # Check rate limit + await self.check_rate_limit(context.agent_id, max_requests=5, interval=60) + + # Get API key from context + api_key = skill_config.get("api_key") + if not api_key: + raise ValueError("CryptoCompare API key not found in configuration") + + # Fetch trading signals data directly + signals_data = await self.fetch_trading_signals(api_key, from_symbol) + + # Check for errors + if "error" in signals_data: + raise ValueError(signals_data["error"]) + + # Convert to list of TradingSignal objects + result = [] + if "Data" in signals_data and signals_data["Data"]: + for indicator_name, indicator_data in signals_data["Data"].items(): + if ( + isinstance(indicator_data, Dict) + and "sentiment" in indicator_data + ): + result.append( + TradingSignal( + symbol=from_symbol, + indicator=indicator_name, + value=indicator_data.get("score", 0.0), + signal=indicator_data.get("sentiment", ""), + description=indicator_data.get("description", ""), + ) + ) + + return result + + except Exception as e: + logger.error("Error fetching trading signals: %s", str(e)) + raise type(e)(f"[agent:{context.agent_id}]: {e}") from e diff --git a/intentkit/skills/cryptocompare/schema.json b/intentkit/skills/cryptocompare/schema.json new file mode 100644 index 00000000..0a0781de --- /dev/null +++ b/intentkit/skills/cryptocompare/schema.json @@ -0,0 +1,168 @@ +{ + "$schema": "http://json-schema.org/draft-07/schema#", + "type": "object", + "title": "CryptoCompare", + "description": "Integration with CryptoCompare API providing cryptocurrency market data, price information, and crypto news with rate limiting capabilities", + "x-icon": "https://ai.service.crestal.dev/skills/cryptocompare/cryptocompare.png", + "x-tags": [ + "Blockchain", + "Finance" + ], + "properties": { + "enabled": { + "type": "boolean", + "title": "Enabled", + "description": "Whether this skill is enabled", + "default": false + }, + "states": { + "type": "object", + "properties": { + "fetch_news": { + "type": "string", + "title": "Fetch News", + "enum": [ + "disabled", + "public", + "private" + ], + "x-enum-title": [ + "Disabled", + "Agent Owner + All Users", + "Agent Owner Only" + ], + "description": "This tool fetches the latest cryptocurrency news articles for a specific token.\nYou can optionally specify a timestamp to get historical news, otherwise it uses the current time.\nReturns articles in English with details like title, body, source, and publish time.", + "default": "disabled" + }, + "fetch_price": { + "type": "string", + "title": "Fetch Price", + "enum": [ + "disabled", + "public", + "private" + ], + "x-enum-title": [ + "Disabled", + "Agent Owner + All Users", + "Agent Owner Only" + ], + "description": "Provides real-time cryptocurrency pricing with multi-exchange aggregation and historical comparisons", + "default": "disabled" + }, + "fetch_trading_signals": { + "type": "string", + "title": "Fetch Trading Signals", + "enum": [ + "disabled", + "public", + "private" + ], + "x-enum-title": [ + "Disabled", + "Agent Owner + All Users", + "Agent Owner Only" + ], + "description": "Generates technical analysis signals using indicators like RSI, MACD, and Bollinger Bands", + "default": "disabled" + }, + "fetch_top_market_cap": { + "type": "string", + "title": "Fetch Top Market Cap", + "enum": [ + "disabled", + "public", + "private" + ], + "x-enum-title": [ + "Disabled", + "Agent Owner + All Users", + "Agent Owner Only" + ], + "description": "Tracks top cryptocurrencies by market capitalization with sector breakdowns", + "default": "disabled" + }, + "fetch_top_exchanges": { + "type": "string", + "title": "Fetch Top Exchanges", + "enum": [ + "disabled", + "public", + "private" + ], + "x-enum-title": [ + "Disabled", + "Agent Owner + All Users", + "Agent Owner Only" + ], + "description": "Ranks cryptocurrency exchanges by liquidity and trading volume with market pair analysis", + "default": "disabled" + }, + "fetch_top_volume": { + "type": "string", + "title": "Fetch Top Volume", + "enum": [ + "disabled", + "public", + "private" + ], + "x-enum-title": [ + "Disabled", + "Agent Owner + All Users", + "Agent Owner Only" + ], + "description": "Analyzes 24h trading volume trends across exchanges and currency pairs", + "default": "disabled" + } + }, + "description": "States for each CryptoCompare skill (disabled, public, or private)" + }, + "api_key_provider": { + "type": "string", + "title": "API Key Provider", + "description": "Provider of the API key for AIXBT API service", + "enum": [ + "agent_owner" + ], + "x-enum-title": [ + "Owner Provided" + ], + "default": "agent_owner" + } + }, + "required": [ + "states", + "enabled" + ], + "if": { + "properties": { + "api_key_provider": { + "const": "agent_owner" + } + } + }, + "then": { + "properties": { + "api_key": { + "type": "string", + "title": "CryptoCompare API Key", + "x-link": "[Get your API key](https://www.cryptocompare.com/cryptopian/api-keys)", + "x-sensitive": true, + "description": "CryptoCompare API key for authentication" + } + }, + "if": { + "properties": { + "enabled": { + "const": true + } + } + }, + "then": { + "required": [ + "api_key" + ] + } + }, + "additionalProperties": true +} \ No newline at end of file diff --git a/intentkit/skills/cryptopanic/__init__.py b/intentkit/skills/cryptopanic/__init__.py new file mode 100644 index 00000000..65ce4963 --- /dev/null +++ b/intentkit/skills/cryptopanic/__init__.py @@ -0,0 +1,108 @@ +"""CryptoPanic skill module for IntentKit. + +Loads and initializes skills for fetching crypto news and providing market insights using CryptoPanic API. +""" + +import logging +from typing import Dict, List, Optional, TypedDict + +from intentkit.abstracts.skill import SkillStoreABC +from intentkit.skills.base import SkillConfig, SkillState + +from .base import CryptopanicBaseTool + +logger = logging.getLogger(__name__) + +# Cache for skill instances +_skill_cache: Dict[str, CryptopanicBaseTool] = {} + + +class SkillStates(TypedDict): + """Type definition for CryptoPanic skill states.""" + + fetch_crypto_news: SkillState + fetch_crypto_sentiment: SkillState + + +class Config(SkillConfig): + """Configuration schema for CryptoPanic skills.""" + + states: SkillStates + api_key: str + + +async def get_skills( + config: Config, + is_private: bool, + store: SkillStoreABC, + **kwargs, +) -> List[CryptopanicBaseTool]: + """Load CryptoPanic skills based on configuration. + + Args: + config: Skill configuration with states and API key. + is_private: Whether the context is private (affects skill visibility). + store: Skill store for accessing other skills. + **kwargs: Additional keyword arguments. + + Returns: + List of loaded CryptoPanic skill instances. + """ + logger.info("Loading CryptoPanic skills") + available_skills = [] + + for skill_name, state in config["states"].items(): + logger.debug("Checking skill: %s, state: %s", skill_name, state) + if state == "disabled": + continue + if state == "public" or (state == "private" and is_private): + available_skills.append(skill_name) + + loaded_skills = [] + for name in available_skills: + skill = get_cryptopanic_skill(name, store) + if skill: + logger.info("Successfully loaded skill: %s", name) + loaded_skills.append(skill) + else: + logger.warning("Failed to load skill: %s", name) + + return loaded_skills + + +def get_cryptopanic_skill( + name: str, + store: SkillStoreABC, +) -> Optional[CryptopanicBaseTool]: + """Retrieve a CryptoPanic skill instance by name. + + Args: + name: Name of the skill (e.g., 'fetch_crypto_news', 'fetch_crypto_sentiment'). + store: Skill store for accessing other skills. + + Returns: + CryptoPanic skill instance or None if not found or import fails. + """ + if name in _skill_cache: + logger.debug("Retrieved cached skill: %s", name) + return _skill_cache[name] + + try: + if name == "fetch_crypto_news": + from .fetch_crypto_news import FetchCryptoNews + + _skill_cache[name] = FetchCryptoNews(skill_store=store) + elif name == "fetch_crypto_sentiment": + from .fetch_crypto_sentiment import FetchCryptoSentiment + + _skill_cache[name] = FetchCryptoSentiment(skill_store=store) + else: + logger.warning("Unknown CryptoPanic skill: %s", name) + return None + + logger.debug("Cached new skill instance: %s", name) + return _skill_cache[name] + + except ImportError as e: + logger.error("Failed to import CryptoPanic skill %s: %s", name, e) + return None diff --git a/intentkit/skills/cryptopanic/base.py b/intentkit/skills/cryptopanic/base.py new file mode 100644 index 00000000..e162d3b9 --- /dev/null +++ b/intentkit/skills/cryptopanic/base.py @@ -0,0 +1,55 @@ +"""Base module for CryptoPanic skills. + +Defines the base class and shared utilities for CryptoPanic skills. +""" + +from typing import Type + +from langchain.tools.base import ToolException +from pydantic import BaseModel, Field + +from intentkit.abstracts.skill import SkillStoreABC +from intentkit.skills.base import IntentKitSkill + +base_url = "https://cryptopanic.com/api/v1/posts/" + + +class CryptopanicBaseTool(IntentKitSkill): + """Base class for CryptoPanic skills. + + Provides common functionality for interacting with the CryptoPanic API, + including API key retrieval and skill store access. + """ + + name: str = Field(description="Tool name") + description: str = Field(description="Tool description") + args_schema: Type[BaseModel] + skill_store: SkillStoreABC = Field(description="Skill store for data persistence") + + def get_api_key(self) -> str: + """Retrieve the CryptoPanic API key from context. + + Returns: + API key string. + + Raises: + ToolException: If the API key is not found. + """ + context = self.get_context() + skill_config = context.agent.skill_config(self.category) + api_key_provider = skill_config.get("api_key_provider") + if api_key_provider == "agent_owner": + api_key = skill_config.get("api_key") + if api_key: + return api_key + else: + raise ToolException("No api_key found in agent_owner configuration") + else: + raise ToolException( + f"Invalid API key provider: {api_key_provider}. Only 'agent_owner' is supported for CryptoPanic." + ) + + @property + def category(self) -> str: + """Category of the skill.""" + return "cryptopanic" diff --git a/intentkit/skills/cryptopanic/cryptopanic.png b/intentkit/skills/cryptopanic/cryptopanic.png new file mode 100644 index 00000000..0d652a71 Binary files /dev/null and b/intentkit/skills/cryptopanic/cryptopanic.png differ diff --git a/intentkit/skills/cryptopanic/fetch_crypto_news.py b/intentkit/skills/cryptopanic/fetch_crypto_news.py new file mode 100644 index 00000000..6b236390 --- /dev/null +++ b/intentkit/skills/cryptopanic/fetch_crypto_news.py @@ -0,0 +1,150 @@ +"""Skill to fetch the latest crypto market news from CryptoPanic API. + +Fetches all news posts for BTC or ETH, sorted by publication date (newest first). +""" + +from typing import List, Type + +import httpx +from pydantic import BaseModel, Field + +from intentkit.abstracts.skill import SkillStoreABC +from intentkit.skills.cryptopanic.base import CryptopanicBaseTool + +SUPPORTED_CURRENCIES = ["BTC", "ETH"] +BASE_URL = "https://cryptopanic.com/api/v1/posts/" + + +class CryptopanicNewsInput(BaseModel): + """Input schema for fetching crypto news.""" + + query: str = Field(description="Query to specify currency (e.g., 'btc news')") + currency: str = Field( + default="BTC", description="Currency to fetch news for (BTC or ETH)" + ) + + +class NewsItem(BaseModel): + """Data model for a single news item.""" + + title: str = Field(description="News headline") + published_at: str = Field(description="Publication timestamp") + source: str = Field(description="News source domain") + + +class CryptopanicNewsOutput(BaseModel): + """Output schema for fetching crypto news.""" + + currency: str = Field(description="Currency news was fetched for") + news_items: List[NewsItem] = Field(description="List of news items") + summary: str = Field(description="Summary of fetched news") + + +class FetchCryptoNews(CryptopanicBaseTool): + """Skill to fetch the latest crypto market news from CryptoPanic API.""" + + name: str = "fetch_crypto_news" + description: str = ( + "Fetches all crypto market news posts from CryptoPanic for BTC or ETH, " + "sorted by publication date (newest first). Defaults to BTC." + ) + args_schema: Type[BaseModel] = CryptopanicNewsInput + skill_store: SkillStoreABC = Field(description="Skill store for data persistence") + + async def fetch_news( + self, + currency: str, + api_key: str, + ) -> List[NewsItem]: + """Fetch the latest news for a specific currency from CryptoPanic API. + + Args: + currency: Currency to fetch news for (BTC or ETH). + api_key: CryptoPanic API key. + + Returns: + List of NewsItem objects, sorted by publication date (newest first). + + Raises: + ToolException: If the API request fails or data is invalid. + """ + from langchain.tools.base import ToolException + + if currency not in SUPPORTED_CURRENCIES: + raise ToolException(f"Unsupported currency: {currency}") + + params = { + "auth_token": api_key, + "public": "true", + "currencies": currency.upper(), + "sort": "-published_at", # Sort by newest first + } + + async with httpx.AsyncClient() as client: + try: + response = await client.get(BASE_URL, params=params, timeout=10) + response.raise_for_status() + data = response.json().get("results", []) + return [ + NewsItem( + title=post["title"], + published_at=post.get("published_at", "Unknown"), + source=post.get("source", {}).get("domain", "CryptoPanic"), + ) + for post in data + ] + except (httpx.RequestError, httpx.HTTPStatusError) as e: + raise ToolException(f"Error fetching news from CryptoPanic: {e}") + + async def _arun( + self, + query: str = "", + currency: str = "BTC", + **kwargs, + ) -> CryptopanicNewsOutput: + """Fetch the latest crypto news asynchronously. + + Args: + query: Query to specify currency (e.g., 'btc news'). + currency: Currency to fetch news for (defaults to BTC). + config: Runnable configuration. + **kwargs: Additional keyword arguments. + + Returns: + CryptopanicNewsOutput with news items and summary. + + Raises: + ToolException: If the API key is missing or request fails. + """ + + currency = currency.upper() if currency else "BTC" + if currency not in SUPPORTED_CURRENCIES: + currency = "BTC" + + api_key = self.get_api_key() + + news_items = await self.fetch_news(currency, api_key) + + # Deduplicate news items by title + seen_titles = set() + unique_news_items = [ + item + for item in news_items + if item.title not in seen_titles and not seen_titles.add(item.title) + ] + + total_items = len(unique_news_items) + summary = ( + f"Fetched {total_items} unique news posts for {currency}, sorted by recency." + if unique_news_items + else f"No news posts found for {currency}." + ) + + return CryptopanicNewsOutput( + currency=currency, + news_items=unique_news_items, + summary=summary, + ) + + def _run(self, question: str): + raise NotImplementedError("Use _arun for async execution") diff --git a/intentkit/skills/cryptopanic/fetch_crypto_sentiment.py b/intentkit/skills/cryptopanic/fetch_crypto_sentiment.py new file mode 100644 index 00000000..245da088 --- /dev/null +++ b/intentkit/skills/cryptopanic/fetch_crypto_sentiment.py @@ -0,0 +1,133 @@ +"""Skill to provide AI-driven insights on crypto market conditions using CryptoPanic news.""" + +from typing import ClassVar, List, Type + +from pydantic import BaseModel, Field + +from intentkit.abstracts.skill import SkillStoreABC +from intentkit.skills.cryptopanic.base import CryptopanicBaseTool + +SUPPORTED_CURRENCIES = ["BTC", "ETH"] + + +class CryptopanicSentimentInput(BaseModel): + """Input schema for fetching crypto market insights.""" + + currency: str = Field(default="BTC", description="Currency to analyze (BTC or ETH)") + + +class CryptopanicSentimentOutput(BaseModel): + """Output schema for crypto market insights.""" + + currency: str = Field(description="Currency analyzed") + total_posts: int = Field(description="Number of news items analyzed") + headlines: list[str] = Field(description="List of news headlines") + prompt: str = Field(description="Formatted prompt for LLM insights") + summary: str = Field(description="Summary of analysis process") + + +class CryptopanicNewsOutput(BaseModel): + """Output schema for fetching crypto news (used internally).""" + + currency: str = Field(description="Currency news was fetched for") + news_items: List[BaseModel] = Field(description="List of news items") + summary: str = Field(description="Summary of fetched news") + + +class FetchCryptoSentiment(CryptopanicBaseTool): + """Skill to provide AI-driven insights on crypto market conditions using CryptoPanic news.""" + + name: str = "fetch_crypto_sentiment" + description: str = ( + "Provides AI-driven insights on market conditions for BTC or ETH, including trends, " + "opportunities, risks, and outlook, based on news fetched from fetch_crypto_news " + "with all posts sorted by recency. Triggered by 'sentiment' or 'market state' queries. " + "Defaults to BTC." + ) + args_schema: Type[BaseModel] = CryptopanicSentimentInput + skill_store: SkillStoreABC = Field(description="Skill store for data persistence") + + INSIGHTS_PROMPT: ClassVar[str] = """ +CryptoPanic Headlines for {currency}: +{headlines} + +Total Posts: {total_posts} +Currency: {currency} + +Based on these headlines, provide AI-driven insights into the market conditions for {currency}. +Summarize key trends (e.g., price movements, adoption, network developments) inferred from the news. +Identify significant opportunities (e.g., growth potential) and risks (e.g., negative sentiment, competition). +Classify the overall market outlook as Bullish, Bearish and provide opinion on wether to buy, sell or hold. +Conclude with a short-term outlook for {currency}. Provide a concise, professional analysis without headings. + """ + + async def _arun( + self, + currency: str = "BTC", + **kwargs, + ) -> CryptopanicSentimentOutput: + """Generate AI-driven market insights asynchronously. + + Args: + currency: Currency to analyze (defaults to BTC). + config: Runnable configuration. + **kwargs: Additional keyword arguments. + + Returns: + CryptopanicSentimentOutput with market insights. + + Raises: + ToolException: If news fetching fails. + """ + from langchain.tools.base import ToolException + + from intentkit.skills.cryptopanic.fetch_crypto_news import ( + FetchCryptoNews, + ) # Import here to avoid circular import + + currency = currency.upper() if currency else "BTC" + if currency not in SUPPORTED_CURRENCIES: + currency = "BTC" + + # Instantiate FetchCryptoNews + news_skill = FetchCryptoNews(skill_store=self.skill_store) + + try: + news_output: CryptopanicNewsOutput = await news_skill._arun( + query=f"insights for {currency}", + currency=currency, + ) + except Exception as e: + raise ToolException(f"Failed to fetch news for analysis: {e}") + + news_items = news_output.news_items + total_posts = len(news_items) + + if total_posts == 0: + headlines = ["No recent news available"] + summary = f"No news found for {currency} to analyze." + else: + headlines = [item.title for item in news_items[:5]] # Limit to 5 + summary = f"Generated insights for {currency} based on {total_posts} news items sorted by recency." + + # Format headlines as numbered list + formatted_headlines = "\n".join( + f"{i + 1}. {headline}" for i, headline in enumerate(headlines) + ) + + prompt = self.INSIGHTS_PROMPT.format( + total_posts=total_posts, + currency=currency, + headlines=formatted_headlines, + ) + + return CryptopanicSentimentOutput( + currency=currency, + total_posts=total_posts, + headlines=headlines, + prompt=prompt, + summary=summary, + ) + + def _run(self, question: str): + raise NotImplementedError("Use _arun for async execution") diff --git a/intentkit/skills/cryptopanic/schema.json b/intentkit/skills/cryptopanic/schema.json new file mode 100644 index 00000000..8d689b2b --- /dev/null +++ b/intentkit/skills/cryptopanic/schema.json @@ -0,0 +1,103 @@ +{ + "$schema": "http://json-schema.org/draft-07/schema#", + "type": "object", + "title": "CryptoPanic", + "description": "CryptoPanic is a news aggregator platform indicating impact on price and market for traders and cryptocurrency enthusiasts.", + "x-icon": "https://ai.service.crestal.dev/skills/cryptopanic/cryptopanic.png", + "x-tags": [ + "Data" + ], + "properties": { + "enabled": { + "type": "boolean", + "title": "Enabled", + "description": "Whether this skill is enabled", + "default": false + }, + "states": { + "type": "object", + "properties": { + "fetch_crypto_sentiment": { + "type": "string", + "title": "Fetch Crypto Sentiment", + "enum": [ + "disabled", + "public", + "private" + ], + "x-enum-title": [ + "Disabled", + "Agent Owner + All Users", + "Agent Owner Only" + ], + "default": "disabled", + "description": "Fetches recent CryptoPanic posts and defines market sentiment via LLM analysis." + }, + "fetch_crypto_news": { + "type": "string", + "title": "Fetch Crypto News", + "enum": [ + "disabled", + "public", + "private" + ], + "x-enum-title": [ + "Disabled", + "Agent Owner + All Users", + "Agent Owner Only" + ], + "default": "disabled", + "description": "Fetches latest crypto market news from CryptoPanic across all filters." + } + }, + "description": "States for each CryptoPanic skill (disabled, public, or private)" + }, + "api_key_provider": { + "type": "string", + "title": "API Key Provider", + "description": "Provider of the API key for AIXBT API service", + "enum": [ + "agent_owner" + ], + "x-enum-title": [ + "Owner Provided" + ], + "default": "agent_owner" + } + }, + "required": [ + "states", + "enabled" + ], + "if": { + "properties": { + "api_key_provider": { + "const": "agent_owner" + } + } + }, + "then": { + "properties": { + "api_key": { + "type": "string", + "title": "CryptoPanic API Key", + "x-link": "[Get your API key](https://cryptopanic.com/developers/api/keys)", + "x-sensitive": true, + "description": "API key for accessing CryptoPanic API" + } + }, + "if": { + "properties": { + "enabled": { + "const": true + } + } + }, + "then": { + "required": [ + "api_key" + ] + } + }, + "additionalProperties": true +} \ No newline at end of file diff --git a/intentkit/skills/dapplooker/README.md b/intentkit/skills/dapplooker/README.md new file mode 100644 index 00000000..997ae906 --- /dev/null +++ b/intentkit/skills/dapplooker/README.md @@ -0,0 +1,92 @@ +# DappLooker Skill + +This skill provides access to the DappLooker API for retrieving comprehensive data and analytics for AI agent tokens. + +## Features + +The DappLooker skill allows your agent to: + +- Retrieve detailed AI agent token market data using token tickers or contract addresses +- Get real-time price information, market cap, and volume +- Access technical indicators like support/resistance levels and RSI +- View token holder insights including distribution and concentration metrics +- Monitor smart money movements and developer wallet activity +- Check supply information and token fundamentals + +## Key Data Points Available + +According to the [DappLooker documentation](https://docs.dapplooker.com/data-apis-for-ai/overview), the API provides: + +### Token & Market Data +- **Token Metrics**: Price, volume, market cap, and liquidity +- **Technical Indicators**: Support/resistance levels, RSI, SMA, and more +- **Circulating & Burned Supply**: Information about inflation, scarcity, and value impact + +### Agent-Level Intelligence +- **Agent Mindshare**: Measures visibility and traction across the ecosystem +- **Confidence Score**: Evaluates project credibility, contract safety, and risks +- **Historical Context**: Deployment details, contract origin, and dev history + +### Smart Money & Whale Flows +- **Smart Netflows**: Token movements among top wallets (inflows vs. outflows) +- **Whale Concentration**: Wallet clustering and impact analysis +- **Top Holder Map**: Visual bubble distribution for wallet-wise token dominance + +### Holder Wallets Activity +- **Wallets Tracking**: Number of linked wallets per protocol & holdings +- **Token Holder Insights**: Token holder behavior, first 100 buyers data, including snipers +- **Wallet Funding & Txns**: Inflow/outflow monitoring of developer-related transactions + +### Risk Management Analysis +- **Rug Scanner**: Flags potential risks with agents investment +- **Project Health Alerts**: Alerts for centralized token ownership and other risks + +## Important Note + +**This skill is specifically designed for AI agent tokens.** It may not return data for general cryptocurrencies like BTC, ETH, SOL, etc. DappLooker specializes in providing detailed analytics for AI-focused crypto projects. + +## Configuration + +To use this skill, you'll need to: + +1. Enable the skill in your agent configuration +2. Obtain a DappLooker API key from [DappLooker](https://docs.dapplooker.com/dapplooker-ai/ai-apis) +3. Configure the skill in your agent settings + +### Agent Configuration + +```yaml +skills: + dapplooker: + enabled: true + api_key: "your_dapplooker_api_key" # Optional if using environment variable + states: + dapplooker_token_data: public # or "private" for agent owner only +``` + +### Environment Variable + +You can also set the DappLooker API key as an environment variable: + +``` +DAPPLOOKER_API_KEY=your_dapplooker_api_key +``` + +The skill will first check the agent configuration for the API key, and if not found, it will use the environment variable. + +## Usage + +Your agent can now access AI agent token data by using the `dapplooker_token_data` skill: + +- Query by token ticker (e.g., "aixbt,vader") +- Query by contract address (e.g., "0x4F9Fd6Be4a90f2620860d680c0d4d5Fb53d1A825") +- Specify blockchain network (default is "base") + +## Example + +A user might ask: +"What's the current price and market metrics for AIXBT token on Base?" + +The agent would use the DappLooker skill to retrieve and present this information in a structured format. + +If a user asks about non-AI agent tokens like Bitcoin or Ethereum, the skill will inform them that DappLooker specializes in AI agent tokens and suggest querying for AI-focused projects instead. \ No newline at end of file diff --git a/intentkit/skills/dapplooker/__init__.py b/intentkit/skills/dapplooker/__init__.py new file mode 100644 index 00000000..c5b8d1c3 --- /dev/null +++ b/intentkit/skills/dapplooker/__init__.py @@ -0,0 +1,83 @@ +"""DappLooker skills for crypto market data and analytics.""" + +import logging +from typing import NotRequired, TypedDict + +from intentkit.abstracts.skill import SkillStoreABC +from intentkit.skills.base import SkillConfig, SkillState +from intentkit.skills.dapplooker.base import DappLookerBaseTool +from intentkit.skills.dapplooker.dapplooker_token_data import DappLookerTokenData + +# Cache skills at the system level, because they are stateless +_cache: dict[str, DappLookerBaseTool] = {} + +logger = logging.getLogger(__name__) + + +class SkillStates(TypedDict): + dapplooker_token_data: SkillState + + +class Config(SkillConfig): + """Configuration for DappLooker skills.""" + + states: SkillStates + api_key: NotRequired[str] + + +async def get_skills( + config: "Config", + is_private: bool, + store: SkillStoreABC, + **_, +) -> list[DappLookerBaseTool]: + """Get all DappLooker skills. + + Args: + config: The configuration for DappLooker skills. + is_private: Whether to include private skills. + store: The skill store for persisting data. + + Returns: + A list of DappLooker skills. + """ + available_skills = [] + + # Include skills based on their state + for skill_name, state in config["states"].items(): + if state == "disabled": + continue + elif state == "public" or (state == "private" and is_private): + available_skills.append(skill_name) + + # Get each skill using the cached getter + result = [] + for name in available_skills: + skill = get_dapplooker_skill(name, store) + if skill: + result.append(skill) + return result + + +def get_dapplooker_skill( + name: str, + store: SkillStoreABC, +) -> DappLookerBaseTool: + """Get a DappLooker skill by name. + + Args: + name: The name of the skill to get + store: The skill store for persisting data + + Returns: + The requested DappLooker skill + """ + if name == "dapplooker_token_data": + if name not in _cache: + _cache[name] = DappLookerTokenData( + skill_store=store, + ) + return _cache[name] + else: + logger.warning(f"Unknown DappLooker skill: {name}") + return None diff --git a/intentkit/skills/dapplooker/base.py b/intentkit/skills/dapplooker/base.py new file mode 100644 index 00000000..55b4c66f --- /dev/null +++ b/intentkit/skills/dapplooker/base.py @@ -0,0 +1,36 @@ +from typing import Type + +from langchain.tools.base import ToolException +from pydantic import BaseModel, Field + +from intentkit.abstracts.skill import SkillStoreABC +from intentkit.skills.base import IntentKitSkill + + +class DappLookerBaseTool(IntentKitSkill): + """Base class for DappLooker tools.""" + + name: str = Field(description="The name of the tool") + description: str = Field(description="A description of what the tool does") + args_schema: Type[BaseModel] + skill_store: SkillStoreABC = Field( + description="The skill store for persisting data" + ) + + def get_api_key(self) -> str: + context = self.get_context() + skill_config = context.agent.skill_config(self.category) + api_key_provider = skill_config.get("api_key_provider") + if api_key_provider == "platform": + return self.skill_store.get_system_config("dapplooker_api_key") + # for backward compatibility, may only have api_key in skill_config + elif skill_config.get("api_key"): + return skill_config.get("api_key") + else: + raise ToolException( + f"Invalid API key provider: {api_key_provider}, or no api_key in config" + ) + + @property + def category(self) -> str: + return "dapplooker" diff --git a/intentkit/skills/dapplooker/dapplooker.jpg b/intentkit/skills/dapplooker/dapplooker.jpg new file mode 100644 index 00000000..bbf5387b Binary files /dev/null and b/intentkit/skills/dapplooker/dapplooker.jpg differ diff --git a/intentkit/skills/dapplooker/dapplooker_token_data.py b/intentkit/skills/dapplooker/dapplooker_token_data.py new file mode 100644 index 00000000..f33f2958 --- /dev/null +++ b/intentkit/skills/dapplooker/dapplooker_token_data.py @@ -0,0 +1,474 @@ +import json +import logging +from typing import Any, Dict, List, Optional, Type + +import httpx +from pydantic import BaseModel, Field + +from intentkit.skills.dapplooker.base import DappLookerBaseTool + +logger = logging.getLogger(__name__) + + +class DappLookerTokenDataInput(BaseModel): + """Input for DappLooker token data tool.""" + + token_tickers: Optional[str] = Field( + description="Comma-separated list of AI agent token tickers (e.g., 'aixbt,vader'). " + "Either token_tickers or token_addresses must be provided.", + default=None, + ) + token_addresses: Optional[str] = Field( + description="Comma-separated list of AI agent token contract addresses (e.g., '0x4F9Fd6Be4a90f2620860d680c0d4d5Fb53d1A825'). " + "Either token_tickers or token_addresses must be provided.", + default=None, + ) + chain: str = Field( + description="Blockchain network to query (e.g., 'base', 'ethereum').", + default="base", + ) + + +class DappLookerTokenData(DappLookerBaseTool): + """Tool for retrieving AI agent token data from DappLooker. + + This tool uses DappLooker's API to fetch comprehensive crypto market data and analytics + specifically for AI agent tokens. + + Attributes: + name: The name of the tool. + description: A description of what the tool does. + args_schema: The schema for the tool's input arguments. + """ + + name: str = "dapplooker_token_data" + description: str = ( + "Retrieve detailed token market data and analytics for AI agent tokens using DappLooker. " + "Use this tool when you need current information about AI-focused crypto tokens, " + "including price, market cap, volume, technical indicators, holder insights, and developer activity.\n" + "You can query by token ticker (e.g., 'aixbt', 'vader') or by token contract address. " + "Note that this tool is specialized for AI agent tokens and may not return data for general cryptocurrencies like ETH, BTC, or SOL.\n" + "Either token_tickers or token_addresses must be provided." + ) + args_schema: Type[BaseModel] = DappLookerTokenDataInput + + async def _arun( + self, + token_tickers: Optional[str] = None, + token_addresses: Optional[str] = None, + chain: str = "base", + **kwargs, + ) -> str: + """Implementation of the DappLooker token data tool. + + Args: + token_tickers: Comma-separated list of token tickers. + token_addresses: Comma-separated list of token contract addresses. + chain: Blockchain network to query. + config: The configuration for the tool call. + + Returns: + str: Formatted token data with market metrics and analytics. + """ + context = self.get_context() + logger.debug( + f"dapplooker_token_data.py: Fetching token data with context {context}" + ) + + # Get the API key from the agent's configuration or environment variable + api_key = self.get_api_key() + if not api_key: + return "Error: No DappLooker API key provided in the configuration or environment." + + # Validate input + if not token_tickers and not token_addresses: + return "Error: Either token_tickers or token_addresses must be provided." + + # Check for common non-AI agent tokens that won't be in the database + # Only check if using token_tickers, not relevant for token_addresses + if ( + token_tickers + and not token_addresses + and token_tickers.lower() + in [ + "btc", + "eth", + "sol", + "bitcoin", + "ethereum", + "solana", + "bnb", + "xrp", + "ada", + "doge", + ] + ): + return ( + f"The token '{token_tickers}' is not an AI agent token and is not tracked by DappLooker. " + f"DappLooker specializes in AI agent tokens like 'aixbt', 'vader', and other AI-focused crypto projects. " + f"Please try querying for an AI agent token instead." + ) + + # Set up the request parameters + params = { + "api_key": api_key, + "chain": chain, + } + + # Add either token_tickers or token_addresses to the parameters + if token_tickers: + params["token_tickers"] = token_tickers + if token_addresses: + params["token_addresses"] = token_addresses + + logger.debug(f"dapplooker_token_data.py: Request params: {params}") + + # Call DappLooker API + try: + async with httpx.AsyncClient(timeout=30.0) as client: + response = await client.get( + "https://api.dapplooker.com/v1/crypto-market/", + params=params, + ) + + logger.debug( + f"dapplooker_token_data.py: API response status: {response.status_code}" + ) + + if response.status_code != 200: + logger.error( + f"dapplooker_token_data.py: Error from DappLooker API: {response.status_code} - {response.text}" + ) + return f"Error retrieving token data: {response.status_code} - {response.text}" + + # Parse the API response + response_json = response.json() + logger.debug( + f"dapplooker_token_data.py: Response type: {type(response_json)}" + ) + + # Check if response is a string and try to parse it + if isinstance(response_json, str): + try: + response_json = json.loads(response_json) + logger.debug( + "dapplooker_token_data.py: Parsed string response as JSON" + ) + except json.JSONDecodeError as e: + logger.error( + f"dapplooker_token_data.py: Error parsing JSON: {e}" + ) + return f"Error processing token data: {e}" + + # Extract the data array from the response + # The API returns {"success": true, "data": [...]} + if isinstance(response_json, dict) and "data" in response_json: + data = response_json["data"] + logger.debug( + f"dapplooker_token_data.py: Found data array with {len(data)} items" + ) + else: + data = response_json # Fallback for backward compatibility + logger.debug( + "dapplooker_token_data.py: Using response as data (fallback)" + ) + + logger.debug( + f"dapplooker_token_data.py: Received data type: {type(data)}" + ) + + if not data or data == []: + query_type = ( + "tickers" + if token_tickers and not token_addresses + else "addresses" + ) + query_value = token_tickers if token_tickers else token_addresses + return ( + f"No results found for {query_type}: '{query_value}' on chain '{chain}'. " + f"This may be because:\n" + f"1. The token is not an AI agent token tracked by DappLooker\n" + f"2. The token ticker or address is incorrect\n" + f"3. The token exists on a different blockchain than '{chain}'\n\n" + f"DappLooker specializes in AI agent tokens like 'aixbt', 'vader', and other AI-focused crypto projects." + ) + + # Format the results + return self._format_token_data(data) + + except Exception as e: + logger.error( + f"dapplooker_token_data.py: Error retrieving token data: {e}", + exc_info=True, + ) + return ( + "An error occurred while retrieving token data. Please try again later." + ) + + def _format_token_data(self, data: List[Dict[str, Any]]) -> str: + """Format the token data for display. + + Args: + data: List of token data dictionaries from DappLooker API. + + Returns: + str: Formatted token data. + """ + if not data: + return "No token data available." + + # Ensure data is a list + if not isinstance(data, list): + # If data is a dict, wrap it in a list + if isinstance(data, dict): + data = [data] + else: + return f"Error: Unexpected data format received from API: {type(data)}" + + formatted_results = "# AI Agent Token Market Data\n\n" + + for token in data: + # Ensure token is a dict + if not isinstance(token, dict): + logger.error( + f"dapplooker_token_data.py: Token is not a dictionary: {token}" + ) + continue + + token_info = token.get("token_info", {}) + token_metrics = token.get("token_metrics", {}) + technical_indicators = token.get("technical_indicators", {}) + token_holder_insights = token.get("token_holder_insights", {}) + smart_money_insights = token.get("smart_money_insights", {}) + dev_wallet_insights = token.get("dev_wallet_insights", {}) + + # Token basic info + name = token_info.get("name", "Unknown") + symbol = token_info.get("symbol", "Unknown") + chain = token_info.get("chain", "Unknown") + address = token_info.get("ca", "Unknown") + ecosystem = token_info.get("ecosystem", "Unknown") + description = token_info.get("description", "") + handle = token_info.get("handle", "Unknown") + + formatted_results += f"## {name} ({symbol})\n" + formatted_results += f"Chain: {chain}\n" + formatted_results += f"Ecosystem: {ecosystem}\n" + formatted_results += f"Contract: {address}\n" + if handle: + formatted_results += f"Handle: {handle}\n" + if description: + formatted_results += f"Description: {description}\n" + formatted_results += "\n" + + # Price and market metrics + if token_metrics: + formatted_results += "### Market Metrics\n" + price = token_metrics.get("usd_price", "Unknown") + mcap = token_metrics.get("mcap", "Unknown") + fdv = token_metrics.get("fdv", "Unknown") + volume_24h = token_metrics.get("volume_24h", "Unknown") + total_liquidity = token_metrics.get("total_liquidity", "Unknown") + + formatted_results += f"Price: ${price}\n" + formatted_results += f"Market Cap: ${mcap}\n" + formatted_results += f"Fully Diluted Value: ${fdv}\n" + formatted_results += f"24h Volume: ${volume_24h}\n" + formatted_results += f"Total Liquidity: ${total_liquidity}\n" + + # Price changes + price_change_1h = token_metrics.get( + "price_change_percentage_1h", "Unknown" + ) + price_change_24h = token_metrics.get( + "price_change_percentage_24h", "Unknown" + ) + price_change_7d = token_metrics.get( + "price_change_percentage_7d", "Unknown" + ) + price_change_30d = token_metrics.get( + "price_change_percentage_30d", "Unknown" + ) + + formatted_results += f"Price Change 1h: {price_change_1h}%\n" + formatted_results += f"Price Change 24h: {price_change_24h}%\n" + formatted_results += f"Price Change 7d: {price_change_7d}%\n" + formatted_results += f"Price Change 30d: {price_change_30d}%\n" + + # Volume and Market Cap changes + volume_change_7d = token_metrics.get( + "volume_change_percentage_7d", "Unknown" + ) + volume_change_30d = token_metrics.get( + "volume_change_percentage_30d", "Unknown" + ) + mcap_change_7d = token_metrics.get( + "mcap_change_percentage_7d", "Unknown" + ) + mcap_change_30d = token_metrics.get( + "mcap_change_percentage_30d", "Unknown" + ) + + formatted_results += f"Volume Change 7d: {volume_change_7d}%\n" + formatted_results += f"Volume Change 30d: {volume_change_30d}%\n" + formatted_results += f"Market Cap Change 7d: {mcap_change_7d}%\n" + formatted_results += f"Market Cap Change 30d: {mcap_change_30d}%\n" + + # Price highs + price_high_24h = token_metrics.get("price_high_24h", "Unknown") + price_ath = token_metrics.get("price_ath", "Unknown") + + formatted_results += f"24h High: ${price_high_24h}\n" + formatted_results += f"All-Time High: ${price_ath}\n\n" + + # Technical indicators + if technical_indicators: + formatted_results += "### Technical Indicators\n" + support = technical_indicators.get("support", "Unknown") + resistance = technical_indicators.get("resistance", "Unknown") + rsi = technical_indicators.get("rsi", "Unknown") + sma = technical_indicators.get("sma", "Unknown") + + formatted_results += f"Support: ${support}\n" + formatted_results += f"Resistance: ${resistance}\n" + formatted_results += f"RSI: {rsi}\n" + formatted_results += f"SMA: ${sma}\n\n" + + # Token Holder Insights + if token_holder_insights: + formatted_results += "### Token Holder Insights\n" + total_holders = token_holder_insights.get( + "total_holder_count", "Unknown" + ) + holder_change_24h = token_holder_insights.get( + "holder_count_change_percentage_24h", "Unknown" + ) + fifty_percent_wallets = token_holder_insights.get( + "fifty_percentage_holding_wallet_count", "Unknown" + ) + + # First 100 buyers metrics + first_100_initial = token_holder_insights.get( + "first_100_buyers_initial_bought", "Unknown" + ) + first_100_initial_pct = token_holder_insights.get( + "first_100_buyers_initial_bought_percentage", "Unknown" + ) + first_100_current = token_holder_insights.get( + "first_100_buyers_current_holding", "Unknown" + ) + first_100_current_pct = token_holder_insights.get( + "first_100_buyers_current_holding_percentage", "Unknown" + ) + + # Top holders concentration + top_10_balance = token_holder_insights.get( + "top_10_holder_balance", "Unknown" + ) + top_10_pct = token_holder_insights.get( + "top_10_holder_percentage", "Unknown" + ) + top_50_balance = token_holder_insights.get( + "top_50_holder_balance", "Unknown" + ) + top_50_pct = token_holder_insights.get( + "top_50_holder_percentage", "Unknown" + ) + top_100_balance = token_holder_insights.get( + "top_100_holder_balance", "Unknown" + ) + top_100_pct = token_holder_insights.get( + "top_100_holder_percentage", "Unknown" + ) + + if total_holders != "Unknown": + formatted_results += f"Total Holders: {total_holders}\n" + formatted_results += f"Holder Change 24h: {holder_change_24h}%\n" + if fifty_percent_wallets != "Unknown": + formatted_results += ( + f"Wallets Holding 50%: {fifty_percent_wallets}\n" + ) + + formatted_results += f"First 100 Buyers Initial: {first_100_initial} ({first_100_initial_pct}%)\n" + formatted_results += f"First 100 Buyers Current: {first_100_current} ({first_100_current_pct}%)\n" + + formatted_results += ( + f"Top 10 Holders: {top_10_balance} ({top_10_pct}%)\n" + ) + formatted_results += ( + f"Top 50 Holders: {top_50_balance} ({top_50_pct}%)\n" + ) + formatted_results += ( + f"Top 100 Holders: {top_100_balance} ({top_100_pct}%)\n\n" + ) + + # Smart money insights + if smart_money_insights: + formatted_results += "### Smart Money Insights\n" + top_buys = smart_money_insights.get("top_25_holder_buy_24h", "Unknown") + top_sells = smart_money_insights.get( + "top_25_holder_sold_24h", "Unknown" + ) + + formatted_results += f"Top 25 Holders Buy 24h: {top_buys}\n" + formatted_results += f"Top 25 Holders Sell 24h: {top_sells}\n\n" + + # Developer Wallet Insights + if dev_wallet_insights: + formatted_results += "### Developer Wallet Insights\n" + wallet_address = dev_wallet_insights.get("wallet_address", "Unknown") + wallet_balance = dev_wallet_insights.get("wallet_balance", "Unknown") + wallet_percentage = dev_wallet_insights.get( + "dev_wallet_total_holding_percentage", "Unknown" + ) + outflow_txs = dev_wallet_insights.get( + "dev_wallet_outflow_txs_count_24h", "Unknown" + ) + outflow_amount = dev_wallet_insights.get( + "dev_wallet_outflow_amount_24h", "Unknown" + ) + fresh_wallet = dev_wallet_insights.get("fresh_wallet", False) + dev_sold = dev_wallet_insights.get("dev_sold", False) + dev_sold_percentage = dev_wallet_insights.get( + "dev_sold_percentage", "Unknown" + ) + bundle_wallet_count = dev_wallet_insights.get( + "bundle_wallet_count", "Unknown" + ) + bundle_wallet_supply = dev_wallet_insights.get( + "bundle_wallet_supply_percentage", "Unknown" + ) + + formatted_results += f"Developer Wallet: {wallet_address}\n" + if wallet_balance != "Unknown": + formatted_results += f"Wallet Balance: {wallet_balance}\n" + if wallet_percentage != "Unknown": + formatted_results += f"Wallet Holding %: {wallet_percentage}%\n" + if outflow_txs != "Unknown": + formatted_results += f"Outflow Txs 24h: {outflow_txs}\n" + if outflow_amount != "Unknown": + formatted_results += f"Outflow Amount 24h: {outflow_amount}\n" + formatted_results += f"Fresh Wallet: {fresh_wallet}\n" + formatted_results += f"Dev Has Sold: {dev_sold}\n" + formatted_results += f"Dev Sold %: {dev_sold_percentage}%\n" + formatted_results += f"Bundle Wallet Count: {bundle_wallet_count}\n" + formatted_results += f"Bundle Supply %: {bundle_wallet_supply}%\n\n" + + # Supply information + if token_metrics: + formatted_results += "### Supply Information\n" + circ_supply = token_metrics.get("circulating_supply", "Unknown") + total_supply = token_metrics.get("total_supply", "Unknown") + + formatted_results += f"Circulating Supply: {circ_supply}\n" + formatted_results += f"Total Supply: {total_supply}\n\n" + + # Last Updated + last_updated = token.get("last_updated_at", "Unknown") + if last_updated != "Unknown": + formatted_results += f"Last Updated: {last_updated}\n\n" + + # Add separator between tokens + formatted_results += "---\n\n" + + return formatted_results.strip() diff --git a/intentkit/skills/dapplooker/schema.json b/intentkit/skills/dapplooker/schema.json new file mode 100644 index 00000000..c173fbf2 --- /dev/null +++ b/intentkit/skills/dapplooker/schema.json @@ -0,0 +1,91 @@ +{ + "$schema": "http://json-schema.org/draft-07/schema#", + "type": "object", + "title": "DappLooker", + "description": "Retrieve comprehensive market data and analytics for AI agent tokens using DappLooker. This API specializes in AI-focused crypto projects and may not provide data for general cryptocurrencies like BTC or ETH.", + "x-icon": "https://ai.service.crestal.dev/skills/dapplooker/dapplooker.jpg", + "x-tags": [ + "Crypto", + "Market Data", + "Token Metrics", + "AI Agents" + ], + "properties": { + "enabled": { + "type": "boolean", + "title": "Enabled", + "description": "Whether this skill is enabled", + "default": true + }, + "states": { + "type": "object", + "properties": { + "dapplooker_token_data": { + "type": "string", + "title": "AI Token Data", + "enum": [ + "disabled", + "public", + "private" + ], + "x-enum-title": [ + "Disabled", + "Agent Owner + All Users", + "Agent Owner Only" + ], + "description": "Retrieve detailed market data and analytics for AI-focused tokens by ticker or address", + "default": "private" + } + }, + "description": "States for each DappLooker skill (disabled, public, or private)" + }, + "api_key_provider": { + "type": "string", + "title": "API Key Provider", + "description": "Provider of the API key", + "enum": [ + "platform", + "agent_owner" + ], + "x-enum-title": [ + "Nation Hosted", + "Owner Provided" + ], + "default": "platform" + } + }, + "required": [ + "states", + "enabled" + ], + "if": { + "properties": { + "api_key_provider": { + "const": "agent_owner" + } + } + }, + "then": { + "properties": { + "api_key": { + "type": "string", + "title": "DappLooker API Key", + "description": "API key for DappLooker service", + "x-sensitive": true + } + }, + "if": { + "properties": { + "enabled": { + "const": true + } + } + }, + "then": { + "required": [ + "api_key" + ] + } + }, + "additionalProperties": true +} \ No newline at end of file diff --git a/intentkit/skills/defillama/__init__.py b/intentkit/skills/defillama/__init__.py new file mode 100644 index 00000000..dfd3bf12 --- /dev/null +++ b/intentkit/skills/defillama/__init__.py @@ -0,0 +1,323 @@ +"""DeFi Llama skills.""" + +import logging +from typing import TypedDict + +from intentkit.abstracts.skill import SkillStoreABC +from intentkit.skills.base import SkillConfig, SkillState +from intentkit.skills.defillama.base import DefiLlamaBaseTool +from intentkit.skills.defillama.coins.fetch_batch_historical_prices import ( + DefiLlamaFetchBatchHistoricalPrices, +) +from intentkit.skills.defillama.coins.fetch_block import DefiLlamaFetchBlock + +# Coins Tools +from intentkit.skills.defillama.coins.fetch_current_prices import ( + DefiLlamaFetchCurrentPrices, +) +from intentkit.skills.defillama.coins.fetch_first_price import DefiLlamaFetchFirstPrice +from intentkit.skills.defillama.coins.fetch_historical_prices import ( + DefiLlamaFetchHistoricalPrices, +) +from intentkit.skills.defillama.coins.fetch_price_chart import DefiLlamaFetchPriceChart +from intentkit.skills.defillama.coins.fetch_price_percentage import ( + DefiLlamaFetchPricePercentage, +) + +# Fees Tools +from intentkit.skills.defillama.fees.fetch_fees_overview import ( + DefiLlamaFetchFeesOverview, +) +from intentkit.skills.defillama.stablecoins.fetch_stablecoin_chains import ( + DefiLlamaFetchStablecoinChains, +) +from intentkit.skills.defillama.stablecoins.fetch_stablecoin_charts import ( + DefiLlamaFetchStablecoinCharts, +) +from intentkit.skills.defillama.stablecoins.fetch_stablecoin_prices import ( + DefiLlamaFetchStablecoinPrices, +) + +# Stablecoins Tools +from intentkit.skills.defillama.stablecoins.fetch_stablecoins import ( + DefiLlamaFetchStablecoins, +) +from intentkit.skills.defillama.tvl.fetch_chain_historical_tvl import ( + DefiLlamaFetchChainHistoricalTvl, +) +from intentkit.skills.defillama.tvl.fetch_chains import DefiLlamaFetchChains +from intentkit.skills.defillama.tvl.fetch_historical_tvl import ( + DefiLlamaFetchHistoricalTvl, +) +from intentkit.skills.defillama.tvl.fetch_protocol import DefiLlamaFetchProtocol +from intentkit.skills.defillama.tvl.fetch_protocol_current_tvl import ( + DefiLlamaFetchProtocolCurrentTvl, +) + +# TVL Tools +from intentkit.skills.defillama.tvl.fetch_protocols import DefiLlamaFetchProtocols + +# Volumes Tools +from intentkit.skills.defillama.volumes.fetch_dex_overview import ( + DefiLlamaFetchDexOverview, +) +from intentkit.skills.defillama.volumes.fetch_dex_summary import ( + DefiLlamaFetchDexSummary, +) +from intentkit.skills.defillama.volumes.fetch_options_overview import ( + DefiLlamaFetchOptionsOverview, +) +from intentkit.skills.defillama.yields.fetch_pool_chart import DefiLlamaFetchPoolChart + +# Yields Tools +from intentkit.skills.defillama.yields.fetch_pools import DefiLlamaFetchPools + +# we cache skills in system level, because they are stateless +_cache: dict[str, DefiLlamaBaseTool] = {} + +logger = logging.getLogger(__name__) + + +class SkillStates(TypedDict): + # TVL Skills + fetch_protocols: SkillState + fetch_protocol: SkillState + fetch_historical_tvl: SkillState + fetch_chain_historical_tvl: SkillState + fetch_protocol_current_tvl: SkillState + fetch_chains: SkillState + + # Coins Skills + fetch_current_prices: SkillState + fetch_historical_prices: SkillState + fetch_batch_historical_prices: SkillState + fetch_price_chart: SkillState + fetch_price_percentage: SkillState + fetch_first_price: SkillState + fetch_block: SkillState + + # Stablecoins Skills + fetch_stablecoins: SkillState + fetch_stablecoin_charts: SkillState + fetch_stablecoin_chains: SkillState + fetch_stablecoin_prices: SkillState + + # Yields Skills + fetch_pools: SkillState + fetch_pool_chart: SkillState + + # Volumes Skills + fetch_dex_overview: SkillState + fetch_dex_summary: SkillState + fetch_options_overview: SkillState + + # Fees Skills + fetch_fees_overview: SkillState + + +class Config(SkillConfig): + """Configuration for DeFi Llama skills.""" + + states: SkillStates + + +async def get_skills( + config: "Config", + is_private: bool, + store: SkillStoreABC, + **_, +) -> list[DefiLlamaBaseTool]: + """Get all DeFi Llama skills.""" + available_skills = [] + + # Include skills based on their state + for skill_name, state in config["states"].items(): + if state == "disabled": + continue + elif state == "public" or (state == "private" and is_private): + available_skills.append(skill_name) + + # Get each skill using the cached getter + result = [] + for name in available_skills: + skill = get_defillama_skill(name, store) + if skill: + result.append(skill) + return result + + +def get_defillama_skill( + name: str, + store: SkillStoreABC, +) -> DefiLlamaBaseTool: + """Get a DeFi Llama skill by name. + + Args: + name: The name of the skill to get + store: The skill store for persisting data + + Returns: + The requested DeFi Llama skill + + Notes: + Each skill maps to a specific DeFi Llama API endpoint. Some skills handle both + base and chain-specific endpoints through optional parameters rather than + separate implementations. + """ + # TVL Skills + if name == "fetch_protocols": + if name not in _cache: + _cache[name] = DefiLlamaFetchProtocols( + skill_store=store, + ) + return _cache[name] + elif name == "fetch_protocol": + if name not in _cache: + _cache[name] = DefiLlamaFetchProtocol( + skill_store=store, + ) + return _cache[name] + elif name == "fetch_historical_tvl": + if name not in _cache: + _cache[name] = DefiLlamaFetchHistoricalTvl( + skill_store=store, + ) + return _cache[name] + elif name == "fetch_chain_historical_tvl": + if name not in _cache: + _cache[name] = DefiLlamaFetchChainHistoricalTvl( + skill_store=store, + ) + return _cache[name] + elif name == "fetch_protocol_current_tvl": + if name not in _cache: + _cache[name] = DefiLlamaFetchProtocolCurrentTvl( + skill_store=store, + ) + return _cache[name] + elif name == "fetch_chains": + if name not in _cache: + _cache[name] = DefiLlamaFetchChains( + skill_store=store, + ) + return _cache[name] + + # Coins Skills + elif name == "fetch_current_prices": + if name not in _cache: + _cache[name] = DefiLlamaFetchCurrentPrices( + skill_store=store, + ) + return _cache[name] + elif name == "fetch_historical_prices": + if name not in _cache: + _cache[name] = DefiLlamaFetchHistoricalPrices( + skill_store=store, + ) + return _cache[name] + elif name == "fetch_batch_historical_prices": + if name not in _cache: + _cache[name] = DefiLlamaFetchBatchHistoricalPrices( + skill_store=store, + ) + return _cache[name] + elif name == "fetch_price_chart": + if name not in _cache: + _cache[name] = DefiLlamaFetchPriceChart( + skill_store=store, + ) + return _cache[name] + elif name == "fetch_price_percentage": + if name not in _cache: + _cache[name] = DefiLlamaFetchPricePercentage( + skill_store=store, + ) + return _cache[name] + elif name == "fetch_first_price": + if name not in _cache: + _cache[name] = DefiLlamaFetchFirstPrice( + skill_store=store, + ) + return _cache[name] + elif name == "fetch_block": + if name not in _cache: + _cache[name] = DefiLlamaFetchBlock( + skill_store=store, + ) + return _cache[name] + + # Stablecoins Skills + elif name == "fetch_stablecoins": + if name not in _cache: + _cache[name] = DefiLlamaFetchStablecoins( + skill_store=store, + ) + return _cache[name] + elif name == "fetch_stablecoin_charts": + if name not in _cache: + _cache[name] = DefiLlamaFetchStablecoinCharts( + skill_store=store, + ) + return _cache[name] + elif name == "fetch_stablecoin_chains": + if name not in _cache: + _cache[name] = DefiLlamaFetchStablecoinChains( + skill_store=store, + ) + return _cache[name] + elif name == "fetch_stablecoin_prices": + if name not in _cache: + _cache[name] = DefiLlamaFetchStablecoinPrices( + skill_store=store, + ) + return _cache[name] + + # Yields Skills + elif name == "fetch_pools": + if name not in _cache: + _cache[name] = DefiLlamaFetchPools( + skill_store=store, + ) + return _cache[name] + elif name == "fetch_pool_chart": + if name not in _cache: + _cache[name] = DefiLlamaFetchPoolChart( + skill_store=store, + ) + return _cache[name] + + # Volumes Skills + elif name == "fetch_dex_overview": # Handles both base and chain-specific overviews + if name not in _cache: + _cache[name] = DefiLlamaFetchDexOverview( + skill_store=store, + ) + return _cache[name] + elif name == "fetch_dex_summary": + if name not in _cache: + _cache[name] = DefiLlamaFetchDexSummary( + skill_store=store, + ) + return _cache[name] + elif ( + name == "fetch_options_overview" + ): # Handles both base and chain-specific overviews + if name not in _cache: + _cache[name] = DefiLlamaFetchOptionsOverview( + skill_store=store, + ) + return _cache[name] + + # Fees Skills + elif ( + name == "fetch_fees_overview" + ): # Handles both base and chain-specific overviews + if name not in _cache: + _cache[name] = DefiLlamaFetchFeesOverview( + skill_store=store, + ) + return _cache[name] + + else: + logger.warning(f"Unknown DeFi Llama skill: {name}") + return None diff --git a/intentkit/skills/defillama/api.py b/intentkit/skills/defillama/api.py new file mode 100644 index 00000000..45528ff9 --- /dev/null +++ b/intentkit/skills/defillama/api.py @@ -0,0 +1,315 @@ +"""DeFi Llama API implementation and shared schemas.""" + +from datetime import datetime +from typing import List, Optional + +import httpx + +DEFILLAMA_TVL_BASE_URL = "https://api.llama.fi" +DEFILLAMA_COINS_BASE_URL = "https://coins.llama.fi" +DEFILLAMA_STABLECOINS_BASE_URL = "https://stablecoins.llama.fi" +DEFILLAMA_YIELDS_BASE_URL = "https://yields.llama.fi" +DEFILLAMA_VOLUMES_BASE_URL = "https://api.llama.fi" +DEFILLAMA_FEES_BASE_URL = "https://api.llama.fi" + + +# TVL API Functions +async def fetch_protocols() -> dict: + """List all protocols on defillama along with their TVL.""" + url = f"{DEFILLAMA_TVL_BASE_URL}/protocols" + async with httpx.AsyncClient() as client: + response = await client.get(url) + if response.status_code != 200: + return {"error": f"API returned status code {response.status_code}"} + return response.json() + + +async def fetch_protocol(protocol: str) -> dict: + """Get historical TVL of a protocol and breakdowns by token and chain.""" + url = f"{DEFILLAMA_TVL_BASE_URL}/protocol/{protocol}" + async with httpx.AsyncClient() as client: + response = await client.get(url) + if response.status_code != 200: + return {"error": f"API returned status code {response.status_code}"} + return response.json() + + +async def fetch_historical_tvl() -> dict: + """Get historical TVL of DeFi on all chains.""" + url = f"{DEFILLAMA_TVL_BASE_URL}/v2/historicalChainTvl" + async with httpx.AsyncClient() as client: + response = await client.get(url) + if response.status_code != 200: + return {"error": f"API returned status code {response.status_code}"} + return response.json() + + +async def fetch_chain_historical_tvl(chain: str) -> dict: + """Get historical TVL of a specific chain.""" + url = f"{DEFILLAMA_TVL_BASE_URL}/v2/historicalChainTvl/{chain}" + async with httpx.AsyncClient() as client: + response = await client.get(url) + if response.status_code != 200: + return {"error": f"API returned status code {response.status_code}"} + return response.json() + + +async def fetch_protocol_current_tvl(protocol: str) -> dict: + """Get current TVL of a protocol.""" + url = f"{DEFILLAMA_TVL_BASE_URL}/tvl/{protocol}" + async with httpx.AsyncClient() as client: + response = await client.get(url) + if response.status_code != 200: + return {"error": f"API returned status code {response.status_code}"} + return response.json() + + +async def fetch_chains() -> dict: + """Get current TVL of all chains.""" + url = f"{DEFILLAMA_TVL_BASE_URL}/v2/chains" + async with httpx.AsyncClient() as client: + response = await client.get(url) + if response.status_code != 200: + return {"error": f"API returned status code {response.status_code}"} + return response.json() + + +# Coins API Functions +async def fetch_current_prices(coins: List[str]) -> dict: + """Get current prices of tokens by contract address using a 4-hour search window.""" + coins_str = ",".join(coins) + url = f"{DEFILLAMA_COINS_BASE_URL}/prices/current/{coins_str}?searchWidth=4h" + + async with httpx.AsyncClient() as client: + response = await client.get(url) + if response.status_code != 200: + return {"error": f"API returned status code {response.status_code}"} + return response.json() + + +async def fetch_historical_prices(timestamp: int, coins: List[str]) -> dict: + """Get historical prices of tokens by contract address using a 4-hour search window.""" + coins_str = ",".join(coins) + url = f"{DEFILLAMA_COINS_BASE_URL}/prices/historical/{timestamp}/{coins_str}?searchWidth=4h" + + async with httpx.AsyncClient() as client: + response = await client.get(url) + if response.status_code != 200: + return {"error": f"API returned status code {response.status_code}"} + return response.json() + + +async def fetch_batch_historical_prices(coins_timestamps: dict) -> dict: + """Get historical prices for multiple tokens at multiple timestamps.""" + url = f"{DEFILLAMA_COINS_BASE_URL}/batchHistorical" + + async with httpx.AsyncClient() as client: + response = await client.get( + url, params={"coins": coins_timestamps, "searchWidth": "600"} + ) + if response.status_code != 200: + return {"error": f"API returned status code {response.status_code}"} + return response.json() + + +async def fetch_price_chart(coins: List[str]) -> dict: + """Get historical price chart data from the past day for multiple tokens.""" + coins_str = ",".join(coins) + start_time = int(datetime.now().timestamp()) - 86400 # now - 1 day + + url = f"{DEFILLAMA_COINS_BASE_URL}/chart/{coins_str}" + params = {"start": start_time, "span": 10, "period": "2d", "searchWidth": "600"} + + async with httpx.AsyncClient() as client: + response = await client.get(url, params=params) + if response.status_code != 200: + return {"error": f"API returned status code {response.status_code}"} + return response.json() + + +async def fetch_price_percentage(coins: List[str]) -> dict: + """Get price percentage changes for multiple tokens over a 24h period.""" + coins_str = ",".join(coins) + current_timestamp = int(datetime.now().timestamp()) + + url = f"{DEFILLAMA_COINS_BASE_URL}/percentage/{coins_str}" + params = {"timestamp": current_timestamp, "lookForward": "false", "period": "24h"} + + async with httpx.AsyncClient() as client: + response = await client.get(url, params=params) + if response.status_code != 200: + return {"error": f"API returned status code {response.status_code}"} + return response.json() + + +async def fetch_first_price(coins: List[str]) -> dict: + """Get first recorded price data for multiple tokens.""" + coins_str = ",".join(coins) + url = f"{DEFILLAMA_COINS_BASE_URL}/prices/first/{coins_str}" + + async with httpx.AsyncClient() as client: + response = await client.get(url) + if response.status_code != 200: + return {"error": f"API returned status code {response.status_code}"} + return response.json() + + +async def fetch_block(chain: str) -> dict: + """Get current block data for a specific chain.""" + current_timestamp = int(datetime.now().timestamp()) + url = f"{DEFILLAMA_COINS_BASE_URL}/block/{chain}/{current_timestamp}" + + async with httpx.AsyncClient() as client: + response = await client.get(url) + if response.status_code != 200: + return {"error": f"API returned status code {response.status_code}"} + return response.json() + + +# Stablecoins API Functions +async def fetch_stablecoins() -> dict: + """Get comprehensive stablecoin data from DeFi Llama.""" + url = f"{DEFILLAMA_STABLECOINS_BASE_URL}/stablecoins" + params = {"includePrices": "true"} + + async with httpx.AsyncClient() as client: + response = await client.get(url, params=params) + if response.status_code != 200: + return {"error": f"API returned status code {response.status_code}"} + return response.json() + + +async def fetch_stablecoin_charts( + stablecoin_id: str, chain: Optional[str] = None +) -> dict: + """Get historical circulating supply data for a stablecoin.""" + base_url = f"{DEFILLAMA_STABLECOINS_BASE_URL}/stablecoincharts" + + # If chain is specified, fetch chain-specific data, otherwise fetch all chains + endpoint = f"/{chain}" if chain else "/all" + url = f"{base_url}{endpoint}?stablecoin={stablecoin_id}" + + async with httpx.AsyncClient() as client: + response = await client.get(url) + if response.status_code != 200: + return {"error": f"API returned status code {response.status_code}"} + return response.json() + + +async def fetch_stablecoin_chains() -> dict: + """Get stablecoin distribution data across all chains.""" + url = f"{DEFILLAMA_STABLECOINS_BASE_URL}/stablecoinchains" + + async with httpx.AsyncClient() as client: + response = await client.get(url) + if response.status_code != 200: + return {"error": f"API returned status code {response.status_code}"} + return response.json() + + +async def fetch_stablecoin_prices() -> dict: + """Get current stablecoin price data. + + Returns: + Dictionary containing stablecoin prices with their dates + """ + url = f"{DEFILLAMA_STABLECOINS_BASE_URL}/stablecoinprices" + + async with httpx.AsyncClient() as client: + response = await client.get(url) + if response.status_code != 200: + return {"error": f"API returned status code {response.status_code}"} + return response.json() + + +# Yields API Functions +async def fetch_pools() -> dict: + """Get comprehensive data for all yield-generating pools.""" + url = f"{DEFILLAMA_YIELDS_BASE_URL}/pools" + + async with httpx.AsyncClient() as client: + response = await client.get(url) + if response.status_code != 200: + return {"error": f"API returned status code {response.status_code}"} + return response.json() + + +async def fetch_pool_chart(pool_id: str) -> dict: + """Get historical chart data for a specific pool.""" + url = f"{DEFILLAMA_YIELDS_BASE_URL}/chart/{pool_id}" + + async with httpx.AsyncClient() as client: + response = await client.get(url) + if response.status_code != 200: + return {"error": f"API returned status code {response.status_code}"} + return response.json() + + +# Volumes API Functions +async def fetch_dex_overview() -> dict: + """Get overview data for DEX protocols.""" + url = f"{DEFILLAMA_VOLUMES_BASE_URL}/overview/dexs" + params = { + "excludeTotalDataChart": "true", + "excludeTotalDataChartBreakdown": "true", + "dataType": "dailyVolume", + } + + async with httpx.AsyncClient() as client: + response = await client.get(url, params=params) + if response.status_code != 200: + return {"error": f"API returned status code {response.status_code}"} + return response.json() + + +async def fetch_dex_summary(protocol: str) -> dict: + """Get summary data for a specific DEX protocol.""" + url = f"{DEFILLAMA_VOLUMES_BASE_URL}/summary/dexs/{protocol}" + params = { + "excludeTotalDataChart": "true", + "excludeTotalDataChartBreakdown": "true", + "dataType": "dailyVolume", + } + + async with httpx.AsyncClient() as client: + response = await client.get(url, params=params) + if response.status_code != 200: + return {"error": f"API returned status code {response.status_code}"} + return response.json() + + +async def fetch_options_overview() -> dict: + """Get overview data for options protocols from DeFi Llama.""" + url = f"{DEFILLAMA_VOLUMES_BASE_URL}/overview/options" + params = { + "excludeTotalDataChart": "true", + "excludeTotalDataChartBreakdown": "true", + "dataType": "dailyPremiumVolume", + } + + async with httpx.AsyncClient() as client: + response = await client.get(url, params=params) + if response.status_code != 200: + return {"error": f"API returned status code {response.status_code}"} + return response.json() + + +# Fees and Revenue API Functions +async def fetch_fees_overview() -> dict: + """Get overview data for fees from DeFi Llama. + + Returns: + Dictionary containing fees overview data + """ + url = f"{DEFILLAMA_FEES_BASE_URL}/overview/fees" + params = { + "excludeTotalDataChart": "true", + "excludeTotalDataChartBreakdown": "true", + "dataType": "dailyFees", + } + + async with httpx.AsyncClient() as client: + response = await client.get(url, params=params) + if response.status_code != 200: + return {"error": f"API returned status code {response.status_code}"} + return response.json() diff --git a/intentkit/skills/defillama/base.py b/intentkit/skills/defillama/base.py new file mode 100644 index 00000000..4825d54a --- /dev/null +++ b/intentkit/skills/defillama/base.py @@ -0,0 +1,136 @@ +"""Base class for all DeFi Llama tools.""" + +from datetime import datetime, timedelta, timezone +from typing import Type + +from pydantic import BaseModel, Field + +from intentkit.abstracts.graph import AgentContext +from intentkit.abstracts.skill import SkillStoreABC +from intentkit.skills.base import IntentKitSkill +from intentkit.skills.defillama.config.chains import ( + get_chain_from_alias, +) + +DEFILLAMA_BASE_URL = "https://api.llama.fi" + + +class DefiLlamaBaseTool(IntentKitSkill): + """Base class for DeFi Llama tools. + + This class provides common functionality for all DeFi Llama API tools: + - Rate limiting + - State management + - Chain validation + - Error handling + """ + + name: str = Field(description="The name of the tool") + description: str = Field(description="A description of what the tool does") + args_schema: Type[BaseModel] + skill_store: SkillStoreABC = Field( + description="The skill store for persisting data" + ) + base_url: str = Field( + default=DEFILLAMA_BASE_URL, description="Base URL for DeFi Llama API" + ) + + @property + def category(self) -> str: + return "defillama" + + async def check_rate_limit( + self, context: AgentContext, max_requests: int = 30, interval: int = 5 + ) -> tuple[bool, str | None]: + """Check if the rate limit has been exceeded. + + Args: + context: Skill context + max_requests: Maximum requests allowed in the interval (default: 30) + interval: Time interval in minutes (default: 5) + + Returns: + Rate limit status and error message if limited + """ + rate_limit = await self.skill_store.get_agent_skill_data( + context.agent_id, self.name, "rate_limit" + ) + current_time = datetime.now(tz=timezone.utc) + + if ( + rate_limit + and rate_limit.get("reset_time") + and rate_limit.get("count") is not None + and datetime.fromisoformat(rate_limit["reset_time"]) > current_time + ): + if rate_limit["count"] >= max_requests: + return True, "Rate limit exceeded" + + rate_limit["count"] += 1 + await self.skill_store.save_agent_skill_data( + context.agent_id, self.name, "rate_limit", rate_limit + ) + return False, None + + new_rate_limit = { + "count": 1, + "reset_time": (current_time + timedelta(minutes=interval)).isoformat(), + } + await self.skill_store.save_agent_skill_data( + context.agent_id, self.name, "rate_limit", new_rate_limit + ) + return False, None + + async def validate_chain(self, chain: str | None) -> tuple[bool, str | None]: + """Validate and normalize chain parameter. + + Args: + chain: Chain name to validate + + Returns: + Tuple of (is_valid, normalized_chain_name) + """ + if chain is None: + return True, None + + normalized_chain = get_chain_from_alias(chain) + if normalized_chain is None: + return False, None + + return True, normalized_chain + + def get_endpoint_url(self, endpoint: str) -> str: + """Construct full endpoint URL. + + Args: + endpoint: API endpoint path + + Returns: + Complete URL for the endpoint + """ + return f"{self.base_url}/{endpoint.lstrip('/')}" + + def format_error_response(self, status_code: int, message: str) -> dict: + """Format error responses consistently. + + Args: + status_code: HTTP status code + message: Error message + + Returns: + Formatted error response dictionary + """ + return { + "error": True, + "status_code": status_code, + "message": message, + "timestamp": datetime.now(tz=timezone.utc).isoformat(), + } + + def get_current_timestamp(self) -> int: + """Get current timestamp in UTC. + + Returns: + Current Unix timestamp + """ + return int(datetime.now(tz=timezone.utc).timestamp()) diff --git a/intentkit/skills/defillama/coins/__init__.py b/intentkit/skills/defillama/coins/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/intentkit/skills/defillama/coins/fetch_batch_historical_prices.py b/intentkit/skills/defillama/coins/fetch_batch_historical_prices.py new file mode 100644 index 00000000..02320ffa --- /dev/null +++ b/intentkit/skills/defillama/coins/fetch_batch_historical_prices.py @@ -0,0 +1,115 @@ +"""Tool for fetching batch historical token prices via DeFi Llama API.""" + +from typing import Dict, List, Optional, Type + +from pydantic import BaseModel, Field + +from intentkit.skills.defillama.api import fetch_batch_historical_prices +from intentkit.skills.defillama.base import DefiLlamaBaseTool + +FETCH_BATCH_HISTORICAL_PRICES_PROMPT = """ +This tool fetches historical token prices from DeFi Llama for multiple tokens at multiple timestamps. +Provide a dictionary mapping token identifiers to lists of timestamps in the format: +- Ethereum tokens: {"ethereum:0x...": [timestamp1, timestamp2]} +- Other chains: {"chainname:0x...": [timestamp1, timestamp2]} +- CoinGecko IDs: {"coingecko:bitcoin": [timestamp1, timestamp2]} +Returns historical price data including: +- Prices in USD at each timestamp +- Token symbols +- Confidence scores for price data +Uses a 4-hour search window around each specified timestamp. +""" + + +class HistoricalPricePoint(BaseModel): + """Model representing a single historical price point.""" + + timestamp: int = Field(..., description="Unix timestamp of the price data") + price: float = Field(..., description="Token price in USD at the timestamp") + confidence: float = Field(..., description="Confidence score for the price data") + + +class TokenPriceHistory(BaseModel): + """Model representing historical price data for a single token.""" + + symbol: str = Field(..., description="Token symbol") + prices: List[HistoricalPricePoint] = Field( + ..., description="List of historical price points" + ) + + +class FetchBatchHistoricalPricesInput(BaseModel): + """Input schema for fetching batch historical token prices.""" + + coins_timestamps: Dict[str, List[int]] = Field( + ..., description="Dictionary mapping token identifiers to lists of timestamps" + ) + + +class FetchBatchHistoricalPricesResponse(BaseModel): + """Response schema for batch historical token prices.""" + + coins: Dict[str, TokenPriceHistory] = Field( + default_factory=dict, + description="Historical token prices keyed by token identifier", + ) + error: Optional[str] = Field(None, description="Error message if any") + + +class DefiLlamaFetchBatchHistoricalPrices(DefiLlamaBaseTool): + """Tool for fetching batch historical token prices from DeFi Llama. + + This tool retrieves historical prices for multiple tokens at multiple + timestamps, using a 4-hour search window around each requested time. + + Example: + prices_tool = DefiLlamaFetchBatchHistoricalPrices( + skill_store=store, + agent_id="agent_123", + agent_store=agent_store + ) + result = await prices_tool._arun( + coins_timestamps={ + "ethereum:0x...": [1640995200, 1641081600], # Jan 1-2, 2022 + "coingecko:bitcoin": [1640995200, 1641081600] + } + ) + """ + + name: str = "defillama_fetch_batch_historical_prices" + description: str = FETCH_BATCH_HISTORICAL_PRICES_PROMPT + args_schema: Type[BaseModel] = FetchBatchHistoricalPricesInput + + async def _arun( + self, coins_timestamps: Dict[str, List[int]] + ) -> FetchBatchHistoricalPricesResponse: + """Fetch historical prices for the given tokens at specified timestamps. + + Args: + config: Runnable configuration + coins_timestamps: Dictionary mapping token identifiers to lists of timestamps + + Returns: + FetchBatchHistoricalPricesResponse containing historical token prices or error + """ + try: + # Check rate limiting + context = self.get_context() + is_rate_limited, error_msg = await self.check_rate_limit(context) + if is_rate_limited: + return FetchBatchHistoricalPricesResponse(error=error_msg) + + # Fetch batch historical prices from API + result = await fetch_batch_historical_prices( + coins_timestamps=coins_timestamps + ) + + # Check for API errors + if isinstance(result, dict) and "error" in result: + return FetchBatchHistoricalPricesResponse(error=result["error"]) + + # Return the response matching the API structure + return FetchBatchHistoricalPricesResponse(coins=result["coins"]) + + except Exception as e: + return FetchBatchHistoricalPricesResponse(error=str(e)) diff --git a/intentkit/skills/defillama/coins/fetch_block.py b/intentkit/skills/defillama/coins/fetch_block.py new file mode 100644 index 00000000..ebf51278 --- /dev/null +++ b/intentkit/skills/defillama/coins/fetch_block.py @@ -0,0 +1,97 @@ +"""Tool for fetching current block data via DeFi Llama API.""" + +from typing import Optional, Type + +from pydantic import BaseModel, Field + +from intentkit.skills.defillama.api import fetch_block +from intentkit.skills.defillama.base import DefiLlamaBaseTool + +FETCH_BLOCK_PROMPT = """ +This tool fetches current block data from DeFi Llama for a specific chain. +Provide: +- Chain name (e.g. "ethereum", "bsc", "solana") +Returns: +- Block height +- Block timestamp +""" + + +class BlockData(BaseModel): + """Model representing block data.""" + + height: int = Field(..., description="Block height number") + timestamp: int = Field(..., description="Unix timestamp of the block") + + +class FetchBlockInput(BaseModel): + """Input schema for fetching block data.""" + + chain: str = Field(..., description="Chain name to fetch block data for") + + +class FetchBlockResponse(BaseModel): + """Response schema for block data.""" + + chain: str = Field(..., description="Normalized chain name") + height: Optional[int] = Field(None, description="Block height number") + timestamp: Optional[int] = Field(None, description="Unix timestamp of the block") + error: Optional[str] = Field(None, description="Error message if any") + + +class DefiLlamaFetchBlock(DefiLlamaBaseTool): + """Tool for fetching current block data from DeFi Llama. + + This tool retrieves current block data for a specific chain. + + Example: + block_tool = DefiLlamaFetchBlock( + skill_store=store, + agent_id="agent_123", + agent_store=agent_store + ) + result = await block_tool._arun(chain="ethereum") + """ + + name: str = "defillama_fetch_block" + description: str = FETCH_BLOCK_PROMPT + args_schema: Type[BaseModel] = FetchBlockInput + + async def _arun(self, chain: str) -> FetchBlockResponse: + """Fetch current block data for the given chain. + + Args: + config: Runnable configuration + chain: Chain name to fetch block data for + + Returns: + FetchBlockResponse containing block data or error + """ + try: + # Validate chain parameter + is_valid, normalized_chain = await self.validate_chain(chain) + if not is_valid or normalized_chain is None: + return FetchBlockResponse(chain=chain, error=f"Invalid chain: {chain}") + + # Check rate limiting + context = self.get_context() + is_rate_limited, error_msg = await self.check_rate_limit(context) + if is_rate_limited: + return FetchBlockResponse(chain=normalized_chain, error=error_msg) + + # Fetch block data from API + result = await fetch_block(chain=normalized_chain) + + # Check for API errors + if isinstance(result, dict) and "error" in result: + return FetchBlockResponse(chain=normalized_chain, error=result["error"]) + + # Return the response matching the API structure + return FetchBlockResponse( + chain=normalized_chain, + height=result["height"], + timestamp=result["timestamp"], + ) + + except Exception as e: + return FetchBlockResponse(chain=chain, error=str(e)) diff --git a/intentkit/skills/defillama/coins/fetch_current_prices.py b/intentkit/skills/defillama/coins/fetch_current_prices.py new file mode 100644 index 00000000..8835ee79 --- /dev/null +++ b/intentkit/skills/defillama/coins/fetch_current_prices.py @@ -0,0 +1,102 @@ +"""Tool for fetching token prices via DeFi Llama API.""" + +from typing import Dict, List, Optional, Type + +from pydantic import BaseModel, Field + +from intentkit.skills.defillama.api import fetch_current_prices +from intentkit.skills.defillama.base import DefiLlamaBaseTool + +FETCH_PRICES_PROMPT = """ +This tool fetches current token prices from DeFi Llama with a 4-hour search window. +Provide a list of token identifiers in the format: +- Ethereum tokens: 'ethereum:0x...' +- Other chains: 'chainname:0x...' +- CoinGecko IDs: 'coingecko:bitcoin' +Returns price data including: +- Current price in USD +- Token symbol +- Price confidence score +- Token decimals (if available) +- Last update timestamp +""" + + +class TokenPrice(BaseModel): + """Model representing token price data.""" + + price: float = Field(..., description="Current token price in USD") + symbol: str = Field(..., description="Token symbol") + timestamp: int = Field(..., description="Unix timestamp of last price update") + confidence: float = Field(..., description="Confidence score for the price data") + decimals: Optional[int] = Field(None, description="Token decimals, if available") + + +class FetchCurrentPricesInput(BaseModel): + """Input schema for fetching current token prices with a 4-hour search window.""" + + coins: List[str] = Field( + ..., + description="List of token identifiers (e.g. 'ethereum:0x...', 'coingecko:ethereum')", + ) + + +class FetchCurrentPricesResponse(BaseModel): + """Response schema for current token prices.""" + + coins: Dict[str, TokenPrice] = Field( + default_factory=dict, description="Token prices keyed by token identifier" + ) + error: Optional[str] = Field(None, description="Error message if any") + + +class DefiLlamaFetchCurrentPrices(DefiLlamaBaseTool): + """Tool for fetching current token prices from DeFi Llama. + + This tool retrieves current prices for multiple tokens in a single request, + using a 4-hour search window to ensure fresh data. + + Example: + prices_tool = DefiLlamaFetchCurrentPrices( + skill_store=store, + agent_id="agent_123", + agent_store=agent_store + ) + result = await prices_tool._arun( + coins=["ethereum:0x...", "coingecko:bitcoin"] + ) + """ + + name: str = "defillama_fetch_current_prices" + description: str = FETCH_PRICES_PROMPT + args_schema: Type[BaseModel] = FetchCurrentPricesInput + + async def _arun(self, coins: List[str]) -> FetchCurrentPricesResponse: + """Fetch current prices for the given tokens. + + Args: + config: Runnable configuration + coins: List of token identifiers to fetch prices for + + Returns: + FetchCurrentPricesResponse containing token prices or error + """ + try: + # Check rate limiting + context = self.get_context() + is_rate_limited, error_msg = await self.check_rate_limit(context) + if is_rate_limited: + return FetchCurrentPricesResponse(error=error_msg) + + # Fetch prices from API + result = await fetch_current_prices(coins=coins) + + # Check for API errors + if isinstance(result, dict) and "error" in result: + return FetchCurrentPricesResponse(error=result["error"]) + + # Return the response matching the API structure + return FetchCurrentPricesResponse(coins=result["coins"]) + + except Exception as e: + return FetchCurrentPricesResponse(error=str(e)) diff --git a/intentkit/skills/defillama/coins/fetch_first_price.py b/intentkit/skills/defillama/coins/fetch_first_price.py new file mode 100644 index 00000000..3943b2cc --- /dev/null +++ b/intentkit/skills/defillama/coins/fetch_first_price.py @@ -0,0 +1,97 @@ +"""Tool for fetching first recorded token prices via DeFi Llama API.""" + +from typing import Dict, List, Optional, Type + +from pydantic import BaseModel, Field + +from intentkit.skills.defillama.api import fetch_first_price +from intentkit.skills.defillama.base import DefiLlamaBaseTool + +FETCH_FIRST_PRICE_PROMPT = """ +This tool fetches the first recorded price data from DeFi Llama for multiple tokens. +Provide a list of token identifiers in the format: +- Ethereum tokens: 'ethereum:0x...' +- Other chains: 'chainname:0x...' +- CoinGecko IDs: 'coingecko:bitcoin' +Returns first price data including: +- Initial price in USD +- Token symbol +- Timestamp of first recorded price +""" + + +class FirstPriceData(BaseModel): + """Model representing first price data for a single token.""" + + symbol: str = Field(..., description="Token symbol") + price: float = Field(..., description="First recorded price in USD") + timestamp: int = Field(..., description="Unix timestamp of first recorded price") + + +class FetchFirstPriceInput(BaseModel): + """Input schema for fetching first token prices.""" + + coins: List[str] = Field( + ..., description="List of token identifiers to fetch first prices for" + ) + + +class FetchFirstPriceResponse(BaseModel): + """Response schema for first token prices.""" + + coins: Dict[str, FirstPriceData] = Field( + default_factory=dict, description="First price data keyed by token identifier" + ) + error: Optional[str] = Field(None, description="Error message if any") + + +class DefiLlamaFetchFirstPrice(DefiLlamaBaseTool): + """Tool for fetching first recorded token prices from DeFi Llama. + + This tool retrieves the first price data recorded for multiple tokens, + including the initial price, symbol, and timestamp. + + Example: + first_price_tool = DefiLlamaFetchFirstPrice( + skill_store=store, + agent_id="agent_123", + agent_store=agent_store + ) + result = await first_price_tool._arun( + coins=["ethereum:0x...", "coingecko:ethereum"] + ) + """ + + name: str = "defillama_fetch_first_price" + description: str = FETCH_FIRST_PRICE_PROMPT + args_schema: Type[BaseModel] = FetchFirstPriceInput + + async def _arun(self, coins: List[str]) -> FetchFirstPriceResponse: + """Fetch first recorded prices for the given tokens. + + Args: + config: Runnable configuration + coins: List of token identifiers to fetch first prices for + + Returns: + FetchFirstPriceResponse containing first price data or error + """ + try: + # Check rate limiting + context = self.get_context() + is_rate_limited, error_msg = await self.check_rate_limit(context) + if is_rate_limited: + return FetchFirstPriceResponse(error=error_msg) + + # Fetch first price data from API + result = await fetch_first_price(coins=coins) + + # Check for API errors + if isinstance(result, dict) and "error" in result: + return FetchFirstPriceResponse(error=result["error"]) + + # Return the response matching the API structure + return FetchFirstPriceResponse(coins=result["coins"]) + + except Exception as e: + return FetchFirstPriceResponse(error=str(e)) diff --git a/intentkit/skills/defillama/coins/fetch_historical_prices.py b/intentkit/skills/defillama/coins/fetch_historical_prices.py new file mode 100644 index 00000000..8dabc998 --- /dev/null +++ b/intentkit/skills/defillama/coins/fetch_historical_prices.py @@ -0,0 +1,109 @@ +"""Tool for fetching historical token prices via DeFi Llama API.""" + +from typing import Dict, List, Optional, Type + +from pydantic import BaseModel, Field + +from intentkit.skills.defillama.api import fetch_historical_prices +from intentkit.skills.defillama.base import DefiLlamaBaseTool + +FETCH_HISTORICAL_PRICES_PROMPT = """ +This tool fetches historical token prices from DeFi Llama for a specific timestamp. +Provide a timestamp and list of token identifiers in the format: +- Ethereum tokens: 'ethereum:0x...' +- Other chains: 'chainname:0x...' +- CoinGecko IDs: 'coingecko:bitcoin' +Returns historical price data including: +- Price in USD at the specified time +- Token symbol +- Token decimals (if available) +- Actual timestamp of the price data +Uses a 4-hour search window around the specified timestamp. +""" + + +class HistoricalTokenPrice(BaseModel): + """Model representing historical token price data.""" + + price: float = Field(..., description="Token price in USD at the specified time") + symbol: Optional[str] = Field(None, description="Token symbol") + timestamp: int = Field(..., description="Unix timestamp of the price data") + decimals: Optional[int] = Field(None, description="Token decimals, if available") + + +class FetchHistoricalPricesInput(BaseModel): + """Input schema for fetching historical token prices.""" + + timestamp: int = Field( + ..., description="Unix timestamp for historical price lookup" + ) + coins: List[str] = Field( + ..., + description="List of token identifiers (e.g. 'ethereum:0x...', 'coingecko:ethereum')", + ) + + +class FetchHistoricalPricesResponse(BaseModel): + """Response schema for historical token prices.""" + + coins: Dict[str, HistoricalTokenPrice] = Field( + default_factory=dict, + description="Historical token prices keyed by token identifier", + ) + error: Optional[str] = Field(None, description="Error message if any") + + +class DefiLlamaFetchHistoricalPrices(DefiLlamaBaseTool): + """Tool for fetching historical token prices from DeFi Llama. + + This tool retrieves historical prices for multiple tokens at a specific + timestamp, using a 4-hour search window around the requested time. + + Example: + prices_tool = DefiLlamaFetchHistoricalPrices( + skill_store=store, + agent_id="agent_123", + agent_store=agent_store + ) + result = await prices_tool._arun( + timestamp=1640995200, # Jan 1, 2022 + coins=["ethereum:0x...", "coingecko:bitcoin"] + ) + """ + + name: str = "defillama_fetch_historical_prices" + description: str = FETCH_HISTORICAL_PRICES_PROMPT + args_schema: Type[BaseModel] = FetchHistoricalPricesInput + + async def _arun( + self, timestamp: int, coins: List[str] + ) -> FetchHistoricalPricesResponse: + """Fetch historical prices for the given tokens at the specified time. + + Args: + config: Runnable configuration + timestamp: Unix timestamp for historical price lookup + coins: List of token identifiers to fetch prices for + + Returns: + FetchHistoricalPricesResponse containing historical token prices or error + """ + try: + # Check rate limiting + context = self.get_context() + is_rate_limited, error_msg = await self.check_rate_limit(context) + if is_rate_limited: + return FetchHistoricalPricesResponse(error=error_msg) + + # Fetch historical prices from API + result = await fetch_historical_prices(timestamp=timestamp, coins=coins) + + # Check for API errors + if isinstance(result, dict) and "error" in result: + return FetchHistoricalPricesResponse(error=result["error"]) + + # Return the response matching the API structure + return FetchHistoricalPricesResponse(coins=result["coins"]) + + except Exception as e: + return FetchHistoricalPricesResponse(error=str(e)) diff --git a/intentkit/skills/defillama/coins/fetch_price_chart.py b/intentkit/skills/defillama/coins/fetch_price_chart.py new file mode 100644 index 00000000..14f21873 --- /dev/null +++ b/intentkit/skills/defillama/coins/fetch_price_chart.py @@ -0,0 +1,106 @@ +"""Tool for fetching token price charts via DeFi Llama API.""" + +from typing import Dict, List, Optional, Type + +from pydantic import BaseModel, Field + +from intentkit.skills.defillama.api import fetch_price_chart +from intentkit.skills.defillama.base import DefiLlamaBaseTool + +FETCH_PRICE_CHART_PROMPT = """ +This tool fetches price chart data from DeFi Llama for multiple tokens. +Provide a list of token identifiers in the format: +- Ethereum tokens: 'ethereum:0x...' +- Other chains: 'chainname:0x...' +- CoinGecko IDs: 'coingecko:bitcoin' +Returns price chart data including: +- Historical price points for the last 24 hours +- Token symbol and metadata +- Confidence scores for price data +- Token decimals (if available) +""" + + +class PricePoint(BaseModel): + """Model representing a single price point in the chart.""" + + timestamp: int = Field(..., description="Unix timestamp of the price data") + price: float = Field(..., description="Token price in USD at the timestamp") + + +class TokenPriceChart(BaseModel): + """Model representing price chart data for a single token.""" + + symbol: str = Field(..., description="Token symbol") + confidence: float = Field(..., description="Confidence score for the price data") + decimals: Optional[int] = Field(None, description="Token decimals") + prices: List[PricePoint] = Field(..., description="List of historical price points") + + +class FetchPriceChartInput(BaseModel): + """Input schema for fetching token price charts.""" + + coins: List[str] = Field( + ..., description="List of token identifiers to fetch price charts for" + ) + + +class FetchPriceChartResponse(BaseModel): + """Response schema for token price charts.""" + + coins: Dict[str, TokenPriceChart] = Field( + default_factory=dict, description="Price chart data keyed by token identifier" + ) + error: Optional[str] = Field(None, description="Error message if any") + + +class DefiLlamaFetchPriceChart(DefiLlamaBaseTool): + """Tool for fetching token price charts from DeFi Llama. + + This tool retrieves price chart data for multiple tokens over the last 24 hours, + including historical price points and token metadata. + + Example: + chart_tool = DefiLlamaFetchPriceChart( + skill_store=store, + agent_id="agent_123", + agent_store=agent_store + ) + result = await chart_tool._arun( + coins=["ethereum:0x...", "coingecko:ethereum"] + ) + """ + + name: str = "defillama_fetch_price_chart" + description: str = FETCH_PRICE_CHART_PROMPT + args_schema: Type[BaseModel] = FetchPriceChartInput + + async def _arun(self, coins: List[str]) -> FetchPriceChartResponse: + """Fetch price charts for the given tokens. + + Args: + config: Runnable configuration + coins: List of token identifiers to fetch price charts for + + Returns: + FetchPriceChartResponse containing price chart data or error + """ + try: + # Check rate limiting + context = self.get_context() + is_rate_limited, error_msg = await self.check_rate_limit(context) + if is_rate_limited: + return FetchPriceChartResponse(error=error_msg) + + # Fetch price chart data from API + result = await fetch_price_chart(coins=coins) + + # Check for API errors + if isinstance(result, dict) and "error" in result: + return FetchPriceChartResponse(error=result["error"]) + + # Return the response matching the API structure + return FetchPriceChartResponse(coins=result["coins"]) + + except Exception as e: + return FetchPriceChartResponse(error=str(e)) diff --git a/intentkit/skills/defillama/coins/fetch_price_percentage.py b/intentkit/skills/defillama/coins/fetch_price_percentage.py new file mode 100644 index 00000000..7d22c18c --- /dev/null +++ b/intentkit/skills/defillama/coins/fetch_price_percentage.py @@ -0,0 +1,90 @@ +"""Tool for fetching token price percentage changes via DeFi Llama API.""" + +from typing import Dict, List, Optional, Type + +from pydantic import BaseModel, Field + +from intentkit.skills.defillama.api import fetch_price_percentage +from intentkit.skills.defillama.base import DefiLlamaBaseTool + +FETCH_PRICE_PERCENTAGE_PROMPT = """ +This tool fetches 24-hour price percentage changes from DeFi Llama for multiple tokens. +Provide a list of token identifiers in the format: +- Ethereum tokens: 'ethereum:0x...' +- Other chains: 'chainname:0x...' +- CoinGecko IDs: 'coingecko:bitcoin' +Returns price percentage changes: +- Negative values indicate price decrease +- Positive values indicate price increase +- Changes are calculated from current time +""" + + +class FetchPricePercentageInput(BaseModel): + """Input schema for fetching token price percentage changes.""" + + coins: List[str] = Field( + ..., description="List of token identifiers to fetch price changes for" + ) + + +class FetchPricePercentageResponse(BaseModel): + """Response schema for token price percentage changes.""" + + coins: Dict[str, float] = Field( + default_factory=dict, + description="Price percentage changes keyed by token identifier", + ) + error: Optional[str] = Field(None, description="Error message if any") + + +class DefiLlamaFetchPricePercentage(DefiLlamaBaseTool): + """Tool for fetching token price percentage changes from DeFi Llama. + + This tool retrieves 24-hour price percentage changes for multiple tokens, + calculated from the current time. + + Example: + percentage_tool = DefiLlamaFetchPricePercentage( + skill_store=store, + agent_id="agent_123", + agent_store=agent_store + ) + result = await percentage_tool._arun( + coins=["ethereum:0x...", "coingecko:ethereum"] + ) + """ + + name: str = "defillama_fetch_price_percentage" + description: str = FETCH_PRICE_PERCENTAGE_PROMPT + args_schema: Type[BaseModel] = FetchPricePercentageInput + + async def _arun(self, coins: List[str]) -> FetchPricePercentageResponse: + """Fetch price percentage changes for the given tokens. + + Args: + config: Runnable configuration + coins: List of token identifiers to fetch price changes for + + Returns: + FetchPricePercentageResponse containing price percentage changes or error + """ + try: + # Check rate limiting + context = self.get_context() + is_rate_limited, error_msg = await self.check_rate_limit(context) + if is_rate_limited: + return FetchPricePercentageResponse(error=error_msg) + + # Fetch price percentage data from API + result = await fetch_price_percentage(coins=coins) + + # Check for API errors + if isinstance(result, dict) and "error" in result: + return FetchPricePercentageResponse(error=result["error"]) + + # Return the response matching the API structure + return FetchPricePercentageResponse(coins=result["coins"]) + + except Exception as e: + return FetchPricePercentageResponse(error=str(e)) diff --git a/intentkit/skills/defillama/config/__init__.py b/intentkit/skills/defillama/config/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/intentkit/skills/defillama/config/chains.py b/intentkit/skills/defillama/config/chains.py new file mode 100644 index 00000000..c63b002e --- /dev/null +++ b/intentkit/skills/defillama/config/chains.py @@ -0,0 +1,433 @@ +"""Chain configuration for DeFi Llama integration. + +This module contains the valid chains and their aliases for use with the DeFi Llama API. +The VALID_CHAINS dictionary maps primary chain identifiers to their known aliases. +""" + +from typing import Dict, List + +# Chain configuration with aliases +VALID_CHAINS: Dict[str, List[str]] = { + "ethereum": ["eth", "eth1", "eth2"], + "solana": ["sol"], + "bitcoin": ["btc"], + "bsc": ["bnb", "bsc"], + "tron": ["trx"], + "base": ["base"], + "berachain": ["bera"], + "arbitrum": ["arb"], + "sui": ["sui"], + "avalanche": ["avax", "ava"], + "aptos": ["apt"], + "polygon": ["matic", "polygon"], + "hyperliquid": ["hyper"], + "op_mainnet": ["op"], + "sonic": ["sonic"], + "core": ["core"], + "zircuit": ["zircuit"], + "cronos": ["cro"], + "bitlayer": ["bit"], + "cardano": ["ada"], + "bsquared": ["b2"], + "mantle": ["mntl"], + "pulsechain": ["pulse"], + "gnosis": ["gnosis"], + "dydx": ["dydx"], + "taiko": ["tk"], + "bob": ["bob"], + "zksync_era": ["zk", "zkSync"], + "linea": ["linea"], + "blast": ["blast"], + "rootstock": ["rs"], + "thorchain": ["thor"], + "ailayer": ["ai"], + "sei": ["sei"], + "eos": ["eos"], + "ton": ["ton"], + "near": ["near"], + "merlin": ["merlin"], + "kava": ["kava"], + "algorand": ["algo"], + "starknet": ["stark"], + "hedera": ["hbar"], + "mixin": ["mixin"], + "scroll": ["scroll"], + "kaia": ["kaia"], + "stacks": ["stx"], + "ronin": ["ronin"], + "osmosis": ["osmo"], + "verus": ["verus"], + "multiversx": ["x"], + "celo": ["celo"], + "xrpl": ["xrpl"], + "fraxtal": ["frax"], + "stellar": ["xlm"], + "bouncebit": ["bounce"], + "wemix3_0": ["wemix"], + "filecoin": ["fil"], + "hydration": ["hydra"], + "fantom": ["ftm"], + "iota_evm": ["iot"], + "manta": ["manta"], + "eclipse": ["eclp"], + "flow": ["flow"], + "injective": ["inj"], + "tezos": ["xtz"], + "soneium": ["son"], + "neutron": ["neut"], + "icp": ["icp"], + "iotex": ["iotex"], + "metis": ["metis"], + "opbnb": ["opbnb"], + "bifrost_network": ["bifrost"], + "flare": ["flare"], + "xdc": ["xdc"], + "morph": ["morph"], + "waves": ["waves"], + "conflux": ["conflux"], + "corn": ["corn"], + "reya_network": ["reya"], + "mode": ["mode"], + "cronos_zkevm": ["cronoszk"], + "telos": ["telos"], + "rollux": ["rollux"], + "zetachain": ["zeta"], + "chainflip": ["flip"], + "fuel_ignition": ["fuel"], + "aurora": ["aurora"], + "map_protocol": ["map"], + "kujira": ["kujira"], + "astar": ["astar"], + "moonbeam": ["moonbeam"], + "story": ["story"], + "abstract": ["abstract"], + "radix": ["radix"], + "zklink_nova": ["zklink"], + "duckchain": ["duck"], + "swellchain": ["swell"], + "apechain": ["ape"], + "icon": ["icx"], + "immutable_zkevm": ["immutable"], + "eos_evm": ["eosevm"], + "bifrost": ["bifrost"], + "k2": ["k2"], + "aelf": ["aelf"], + "fsc": ["fsc"], + "proton": ["proton"], + "secret": ["secret"], + "unichain": ["unichain"], + "neo": ["neo"], + "mayachain": ["maya"], + "canto": ["canto"], + "chiliz": ["chz"], + "x_layer": ["xlayer"], + "polynomial": ["poly"], + "ontology": ["ont"], + "onus": ["onus"], + "bitcoincash": ["bch"], + "terra2": ["terra2"], + "polygon_zkevm": ["polyzk"], + "ink": ["ink"], + "sophon": ["sophon"], + "venom": ["venom"], + "dexalot": ["dexalot"], + "bahamut": ["bahamut"], + "vite": ["vite"], + "dfs_network": ["dfs"], + "ergo": ["ergo"], + "wanchain": ["wan"], + "mantra": ["mantra"], + "doge": ["doge"], + "lisk": ["lisk"], + "alephium": ["alephium"], + "vision": ["vision"], + "dogechain": ["dogechain"], + "nuls": ["nuls"], + "agoric": ["agoric"], + "defichain": ["defi"], + "dymension": ["dym"], + "thundercore": ["tc"], + "godwokenv1": ["godwoken"], + "bevm": ["bevm"], + "litecoin": ["ltc"], + "ux": ["ux"], + "functionx": ["fx"], + "oraichain": ["oraichain"], + "dfk": ["dfk"], + "carbon": ["carbon"], + "beam": ["beam"], + "gravity": ["gravity"], + "horizen_eon": ["horizen"], + "moonriver": ["movr"], + "real": ["real"], + "oasys": ["oasys"], + "hydra": ["hydra"], + "oktchain": ["okt"], + "shibarium": ["shib"], + "world_chain": ["world"], + "interlay": ["interlay"], + "acala": ["acala"], + "elys": ["elys"], + "boba": ["boba"], + "vana": ["vana"], + "harmony": ["harmony"], + "lachain_network": ["lachain"], + "theta": ["theta"], + "ab": ["ab"], + "defiverse": ["defiverse"], + "kcc": ["kcc"], + "oasis_sapphire": ["oasis"], + "etherlink": ["etherlink"], + "wax": ["wax"], + "archway": ["archway"], + "redbelly": ["redbelly"], + "velas": ["velas"], + "equilibrium": ["equilibrium"], + "unit0": ["unit0"], + "ql1": ["ql1"], + "songbird": ["songbird"], + "zilliqa": ["zil"], + "rangers": ["rangers"], + "odyssey": ["odyssey"], + "terra_classic": ["terra"], + "kadena": ["kadena"], + "zero_network": ["zero"], + "elastos": ["elastos"], + "fluence": ["fluence"], + "idex": ["idex"], + "xpla": ["xpla"], + "milkomeda_c1": ["milkomeda"], + "taraxa": ["taraxa"], + "bitrock": ["bitrock"], + "persistence_one": ["persistence"], + "meter": ["meter"], + "arbitrum_nova": ["arbitrumnova"], + "everscale": ["everscale"], + "ultron": ["ultron"], + "fuse": ["fuse"], + "vechain": ["vet"], + "renec": ["renec"], + "shimmerevm": ["shimmer"], + "obyte": ["obyte"], + "nolus": ["nolus"], + "airdao": ["airdao"], + "elysium": ["elysium"], + "xai": ["xai"], + "starcoin": ["starcoin"], + "oasis_emerald": ["oasisem"], + "haqq": ["haqq"], + "nos": ["nos"], + "neon": ["neon"], + "bittorrent": ["btt"], + "csc": ["csc"], + "satoshivm": ["satv"], + "naka": ["naka"], + "edu_chain": ["edu"], + "kintsugi": ["kintsugi"], + "energi": ["energi"], + "rss3": ["rss3"], + "sx_rollup": ["sx"], + "cosmoshub": ["cosmos"], + "saakuru": ["saakuru"], + "boba_bnb": ["boba_bnb"], + "ethereumclassic": ["etc"], + "skale_europa": ["skale"], + "degen": ["degen"], + "mint": ["mint"], + "juno": ["juno"], + "viction": ["viction"], + "evmos": ["evmos"], + "enuls": ["enuls"], + "lightlink": ["lightlink"], + "sanko": ["sanko"], + "karura": ["karura"], + "kardia": ["kardia"], + "superposition": ["super"], + "crab": ["crab"], + "genesys": ["genesys"], + "matchain": ["matchain"], + "chihuahua": ["chihuahua"], + "massa": ["massa"], + "kroma": ["kroma"], + "tombchain": ["tomb"], + "smartbch": ["bchsmart"], + "ancient8": ["ancient8"], + "penumbra": ["penumbra"], + "ethpow": ["ethpow"], + "omax": ["omax"], + "migaloo": ["migaloo"], + "bostrom": ["bostrom"], + "energyweb": ["energyweb"], + "libre": ["libre"], + "defichain_evm": ["defievm"], + "artela": ["artela"], + "dash": ["dash"], + "sora": ["sora"], + "step": ["step"], + "nibiru": ["nibiru"], + "zkfair": ["zkfair"], + "hela": ["hela"], + "godwoken": ["godwoken"], + "shape": ["shape"], + "stargaze": ["stargaze"], + "crossfi": ["crossfi"], + "bitkub_chain": ["bitkub"], + "q_protocol": ["qprotocol"], + "loop": ["loop"], + "parex": ["parex"], + "alv": ["alv"], + "nahmii": ["nahmii"], + "shido": ["shido"], + "electroneum": ["etn"], + "zora": ["zora"], + "astar_zkevm": ["astark"], + "comdex": ["comdex"], + "stratis": ["stratis"], + "polkadex": ["polkadex"], + "meer": ["meer"], + "neo_x_mainnet": ["neo_x"], + "aura_network": ["aura"], + "findora": ["findora"], + "shiden": ["shiden"], + "swan": ["swan"], + "crescent": ["crescent"], + "rari": ["rari"], + "cyber": ["cyber"], + "redstone": ["redstone"], + "silicon_zkevm": ["silicon"], + "endurance": ["endurance"], + "inevm": ["inevm"], + "grove": ["grove"], + "areon_network": ["areon"], + "jbc": ["jbc"], + "planq": ["planq"], + "lachain": ["lachain"], + "rei": ["rei"], + "multivac": ["multivac"], + "cube": ["cube"], + "syscoin": ["syscoin"], + "vinuchain": ["vinuchain"], + "callisto": ["callisto"], + "hpb": ["hpb"], + "ham": ["ham"], + "ethf": ["ethf"], + "gochain": ["gochain"], + "darwinia": ["darwinia"], + "sx_network": ["sx"], + "manta_atlantic": ["atlantic"], + "ontologyevm": ["ontEvm"], + "mvc": ["mvc"], + "sifchain": ["sifchain"], + "plume": ["plume"], + "bitgert": ["bitgert"], + "reichain": ["reichain"], + "bitnet": ["bitnet"], + "tenet": ["tenet"], + "milkomeda_a1": ["milkomedaA1"], + "aeternity": ["aeternity"], + "palm": ["palm"], + "concordium": ["concordium"], + "kopi": ["kopi"], + "asset_chain": ["asset"], + "pego": ["pego"], + "waterfall": ["waterfall"], + "heco": ["heco"], + "exsat": ["exsat"], + "goerli": ["goerli"], + "celestia": ["celestia"], + "bandchain": ["band"], + "sommelier": ["sommelier"], + "stride": ["stride"], + "polkadot": ["dot"], + "kusama": ["kusama"], + "dexit": ["dexit"], + "fusion": ["fusion"], + "boba_avax": ["boba"], + "stafi": ["stafi"], + "empire": ["empire"], + "oxfun": ["oxfun"], + "pryzm": ["pryzm"], + "hoo": ["hoo"], + "echelon": ["echelon"], + "quicksilver": ["quick"], + "clv": ["clv"], + "pokt": ["pokt"], + "dsc": ["dsc"], + "zksync_lite": ["zkLite"], + "nova_network": ["nova"], + "cmp": ["cmp"], + "genshiro": ["genshiro"], + "lamden": ["lamden"], + "polis": ["polis"], + "zyx": ["zyx"], + "ubiq": ["ubiq"], + "heiko": ["heiko"], + "parallel": ["parallel"], + "coti": ["coti"], + "kekchain": ["kek"], + "muuchain": ["muuchain"], + "tlchain": ["tlchain"], + "zeniql": ["zeniql"], + "bitindi": ["bitindi"], + "lung": ["lung"], + "bone": ["bone"], + "lukso": ["lukso"], + "joltify": ["joltify"], +} + + +def get_chain_from_alias(alias: str) -> str | None: + """Get the main chain identifier from an alias. + + Args: + alias: The chain alias to look up + + Returns: + The main chain identifier if found, None otherwise + """ + normalized_alias = alias.lower().strip() + + # Check if it's a main chain name + if normalized_alias in VALID_CHAINS: + return normalized_alias + + # Check aliases + for chain, aliases in VALID_CHAINS.items(): + if normalized_alias in [a.lower() for a in aliases]: + return chain + + return None + + +def is_valid_chain(chain: str) -> bool: + """Check if a chain identifier is valid. + + Args: + chain: The chain identifier to validate + + Returns: + True if the chain is valid, False otherwise + """ + return get_chain_from_alias(chain) is not None + + +def get_all_chains() -> list[str]: + """Get a list of all valid main chain identifiers. + + Returns: + List of all main chain identifiers + """ + return list(VALID_CHAINS.keys()) + + +def get_chain_aliases(chain: str) -> list[str]: + """Get all aliases for a given chain. + + Args: + chain: The main chain identifier + + Returns: + List of aliases for the chain, empty list if chain not found + """ + normalized_chain = chain.lower().strip() + return VALID_CHAINS.get(normalized_chain, []) diff --git a/intentkit/skills/defillama/defillama.jpeg b/intentkit/skills/defillama/defillama.jpeg new file mode 100644 index 00000000..f5004df6 Binary files /dev/null and b/intentkit/skills/defillama/defillama.jpeg differ diff --git a/intentkit/skills/defillama/fees/__init__.py b/intentkit/skills/defillama/fees/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/intentkit/skills/defillama/fees/fetch_fees_overview.py b/intentkit/skills/defillama/fees/fetch_fees_overview.py new file mode 100644 index 00000000..e5bf1f44 --- /dev/null +++ b/intentkit/skills/defillama/fees/fetch_fees_overview.py @@ -0,0 +1,129 @@ +"""Tool for fetching fees overview data via DeFi Llama API.""" + +from typing import Dict, List, Optional, Type + +from pydantic import BaseModel, Field + +from intentkit.skills.defillama.api import fetch_fees_overview +from intentkit.skills.defillama.base import DefiLlamaBaseTool + +FETCH_FEES_OVERVIEW_PROMPT = """ +This tool fetches comprehensive overview data for protocol fees from DeFi Llama. +Returns detailed metrics including: +- Total fees across different timeframes +- Change percentages +- Protocol-specific data +- Chain breakdowns +""" + + +class ProtocolMethodology(BaseModel): + """Model representing protocol methodology data.""" + + UserFees: Optional[str] = Field(None, description="Description of user fees") + Fees: Optional[str] = Field(None, description="Description of fees") + Revenue: Optional[str] = Field(None, description="Description of revenue") + ProtocolRevenue: Optional[str] = Field( + None, description="Description of protocol revenue" + ) + HoldersRevenue: Optional[str] = Field( + None, description="Description of holders revenue" + ) + SupplySideRevenue: Optional[str] = Field( + None, description="Description of supply side revenue" + ) + + +class Protocol(BaseModel): + """Model representing protocol data.""" + + name: str = Field(..., description="Protocol name") + displayName: str = Field(..., description="Display name of protocol") + category: str = Field(..., description="Protocol category") + logo: str = Field(..., description="Logo URL") + chains: List[str] = Field(..., description="Supported chains") + module: str = Field(..., description="Protocol module") + total24h: Optional[float] = Field(None, description="24-hour total fees") + total7d: Optional[float] = Field(None, description="7-day total fees") + total30d: Optional[float] = Field(None, description="30-day total fees") + total1y: Optional[float] = Field(None, description="1-year total fees") + totalAllTime: Optional[float] = Field(None, description="All-time total fees") + change_1d: Optional[float] = Field(None, description="24-hour change percentage") + change_7d: Optional[float] = Field(None, description="7-day change percentage") + change_1m: Optional[float] = Field(None, description="30-day change percentage") + methodology: Optional[ProtocolMethodology] = Field( + None, description="Protocol methodology" + ) + breakdown24h: Optional[Dict[str, Dict[str, float]]] = Field( + None, description="24-hour breakdown by chain" + ) + breakdown30d: Optional[Dict[str, Dict[str, float]]] = Field( + None, description="30-day breakdown by chain" + ) + + +class FetchFeesOverviewResponse(BaseModel): + """Response schema for fees overview data.""" + + total24h: float = Field(..., description="Total fees in last 24 hours") + total7d: float = Field(..., description="Total fees in last 7 days") + total30d: float = Field(..., description="Total fees in last 30 days") + total1y: float = Field(..., description="Total fees in last year") + change_1d: float = Field(..., description="24-hour change percentage") + change_7d: float = Field(..., description="7-day change percentage") + change_1m: float = Field(..., description="30-day change percentage") + allChains: List[str] = Field(..., description="List of all chains") + protocols: List[Protocol] = Field(..., description="List of protocols") + error: Optional[str] = Field(None, description="Error message if any") + + +class DefiLlamaFetchFeesOverview(DefiLlamaBaseTool): + """Tool for fetching fees overview data from DeFi Llama. + + This tool retrieves comprehensive data about protocol fees, + including fee metrics, change percentages, and detailed protocol information. + + Example: + overview_tool = DefiLlamaFetchFeesOverview( + skill_store=store, + agent_id="agent_123", + agent_store=agent_store + ) + result = await overview_tool._arun() + """ + + name: str = "defillama_fetch_fees_overview" + description: str = FETCH_FEES_OVERVIEW_PROMPT + + class EmptyArgsSchema(BaseModel): + """Empty schema for no input parameters.""" + + pass + + args_schema: Type[BaseModel] = EmptyArgsSchema + + async def _arun(self, **kwargs) -> FetchFeesOverviewResponse: + """Fetch overview data for protocol fees. + + Returns: + FetchFeesOverviewResponse containing comprehensive fee data or error + """ + try: + # Check rate limiting + context = self.get_context() + is_rate_limited, error_msg = await self.check_rate_limit(context) + if is_rate_limited: + return FetchFeesOverviewResponse(error=error_msg) + + # Fetch fees data from API + result = await fetch_fees_overview() + + # Check for API errors + if isinstance(result, dict) and "error" in result: + return FetchFeesOverviewResponse(error=result["error"]) + + # Return the parsed response + return FetchFeesOverviewResponse(**result) + + except Exception as e: + return FetchFeesOverviewResponse(error=str(e)) diff --git a/intentkit/skills/defillama/schema.json b/intentkit/skills/defillama/schema.json new file mode 100644 index 00000000..65bd678a --- /dev/null +++ b/intentkit/skills/defillama/schema.json @@ -0,0 +1,383 @@ +{ + "$schema": "http://json-schema.org/draft-07/schema#", + "type": "object", + "title": "DeFiLlama", + "x-icon": "https://ai.service.crestal.dev/skills/defillama/defillama.jpeg", + "description": "Integration with DeFi Llama API providing comprehensive decentralized finance data including token prices, protocol TVL, DEX volumes, and stablecoin metrics", + "properties": { + "enabled": { + "type": "boolean", + "title": "Enabled", + "description": "Whether this skill is enabled", + "default": false + }, + "states": { + "type": "object", + "properties": { + "fetch_batch_historical_prices": { + "type": "string", + "enum": [ + "disabled", + "public", + "private" + ], + "x-enum-title": [ + "Disabled", + "Agent Owner + All Users", + "Agent Owner Only" + ], + "description": "Fetch batch historical prices", + "default": "disabled" + }, + "fetch_block": { + "type": "string", + "enum": [ + "disabled", + "public", + "private" + ], + "x-enum-title": [ + "Disabled", + "Agent Owner + All Users", + "Agent Owner Only" + ], + "description": "Fetch block", + "default": "disabled" + }, + "fetch_current_prices": { + "type": "string", + "enum": [ + "disabled", + "public", + "private" + ], + "x-enum-title": [ + "Disabled", + "Agent Owner + All Users", + "Agent Owner Only" + ], + "description": "Fetch current prices", + "default": "disabled" + }, + "fetch_first_price": { + "type": "string", + "enum": [ + "disabled", + "public", + "private" + ], + "x-enum-title": [ + "Disabled", + "Agent Owner + All Users", + "Agent Owner Only" + ], + "description": "Retrieves inaugural trading price data for assets with historical context", + "default": "disabled" + }, + "fetch_historical_prices": { + "type": "string", + "enum": [ + "disabled", + "public", + "private" + ], + "x-enum-title": [ + "Disabled", + "Agent Owner + All Users", + "Agent Owner Only" + ], + "description": "Fetch historical prices", + "default": "disabled" + }, + "fetch_price_chart": { + "type": "string", + "enum": [ + "disabled", + "public", + "private" + ], + "x-enum-title": [ + "Disabled", + "Agent Owner + All Users", + "Agent Owner Only" + ], + "description": "Fetch price chart", + "default": "disabled" + }, + "fetch_price_percentage": { + "type": "string", + "enum": [ + "disabled", + "public", + "private" + ], + "x-enum-title": [ + "Disabled", + "Agent Owner + All Users", + "Agent Owner Only" + ], + "description": "Fetch price percentage", + "default": "disabled" + }, + "fetch_fees_overview": { + "type": "string", + "title": "Fetch Fees Overview", + "enum": [ + "disabled", + "public", + "private" + ], + "x-enum-title": [ + "Disabled", + "Agent Owner + All Users", + "Agent Owner Only" + ], + "description": "Provides aggregated fee statistics across DEXs, lending protocols, and other fee-generating DeFi primitives", + "default": "disabled" + }, + "fetch_stablecoin_chains": { + "type": "string", + "enum": [ + "disabled", + "public", + "private" + ], + "x-enum-title": [ + "Disabled", + "Agent Owner + All Users", + "Agent Owner Only" + ], + "description": "Shows blockchain distribution of stablecoin supply with chain-specific circulation metrics", + "default": "disabled" + }, + "fetch_stablecoin_charts": { + "type": "string", + "enum": [ + "disabled", + "public", + "private" + ], + "x-enum-title": [ + "Disabled", + "Agent Owner + All Users", + "Agent Owner Only" + ], + "description": "Generates historical charts for stablecoin market cap, supply changes, and chain dominance", + "default": "disabled" + }, + "fetch_stablecoin_prices": { + "type": "string", + "enum": [ + "disabled", + "public", + "private" + ], + "x-enum-title": [ + "Disabled", + "Agent Owner + All Users", + "Agent Owner Only" + ], + "description": "Fetch stablecoin prices", + "default": "disabled" + }, + "fetch_stablecoins": { + "type": "string", + "enum": [ + "disabled", + "public", + "private" + ], + "x-enum-title": [ + "Disabled", + "Agent Owner + All Users", + "Agent Owner Only" + ], + "description": "Fetch stablecoins", + "default": "disabled" + }, + "fetch_chain_historical_tvl": { + "type": "string", + "enum": [ + "disabled", + "public", + "private" + ], + "x-enum-title": [ + "Disabled", + "Agent Owner + All Users", + "Agent Owner Only" + ], + "description": "Retrieves historical TVL data for specific blockchain chains with daily granularity", + "default": "disabled" + }, + "fetch_chains": { + "type": "string", + "enum": [ + "disabled", + "public", + "private" + ], + "x-enum-title": [ + "Disabled", + "Agent Owner + All Users", + "Agent Owner Only" + ], + "description": "Fetch chains", + "default": "disabled" + }, + "fetch_historical_tvl": { + "type": "string", + "enum": [ + "disabled", + "public", + "private" + ], + "x-enum-title": [ + "Disabled", + "Agent Owner + All Users", + "Agent Owner Only" + ], + "description": "Provides historical TVL trends for the entire DeFi ecosystem or specific protocol categories", + "default": "disabled" + }, + "fetch_protocol": { + "type": "string", + "enum": [ + "disabled", + "public", + "private" + ], + "x-enum-title": [ + "Disabled", + "Agent Owner + All Users", + "Agent Owner Only" + ], + "description": "Fetch protocol", + "default": "disabled" + }, + "fetch_protocol_current_tvl": { + "type": "string", + "enum": [ + "disabled", + "public", + "private" + ], + "x-enum-title": [ + "Disabled", + "Agent Owner + All Users", + "Agent Owner Only" + ], + "description": "Fetch protocol current TVL", + "default": "disabled" + }, + "fetch_protocols": { + "type": "string", + "enum": [ + "disabled", + "public", + "private" + ], + "x-enum-title": [ + "Disabled", + "Agent Owner + All Users", + "Agent Owner Only" + ], + "description": "Retrieves comprehensive list of all tracked DeFi protocols with their category, chain, and TVL data", + "default": "disabled" + }, + "fetch_dex_overview": { + "type": "string", + "enum": [ + "disabled", + "public", + "private" + ], + "x-enum-title": [ + "Disabled", + "Agent Owner + All Users", + "Agent Owner Only" + ], + "description": "Provides aggregated DEX metrics including total volume, top trading pairs, and liquidity distribution", + "default": "disabled" + }, + "fetch_dex_summary": { + "type": "string", + "enum": [ + "disabled", + "public", + "private" + ], + "x-enum-title": [ + "Disabled", + "Agent Owner + All Users", + "Agent Owner Only" + ], + "description": "Fetch dex summary", + "default": "disabled" + }, + "fetch_options_overview": { + "type": "string", + "enum": [ + "disabled", + "public", + "private" + ], + "x-enum-title": [ + "Disabled", + "Agent Owner + All Users", + "Agent Owner Only" + ], + "description": "Fetch options overview", + "default": "disabled" + }, + "fetch_pool_chart": { + "type": "string", + "enum": [ + "disabled", + "public", + "private" + ], + "x-enum-title": [ + "Disabled", + "Agent Owner + All Users", + "Agent Owner Only" + ], + "description": "Fetch pool chart", + "default": "disabled" + }, + "fetch_pools": { + "type": "string", + "enum": [ + "disabled", + "public", + "private" + ], + "x-enum-title": [ + "Disabled", + "Agent Owner + All Users", + "Agent Owner Only" + ], + "description": "Fetch pools", + "default": "disabled" + } + }, + "description": "States for each Defillama skill (disabled, public, or private)" + }, + "api_key_provider": { + "type": "string", + "title": "API Key Provider", + "description": "Who provides the API key", + "enum": [ + "platform" + ], + "x-enum-title": [ + "Nation Hosted" + ], + "default": "platform" + } + }, + "required": [ + "states", + "enabled" + ] +} \ No newline at end of file diff --git a/intentkit/skills/defillama/stablecoins/__init__.py b/intentkit/skills/defillama/stablecoins/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/intentkit/skills/defillama/stablecoins/fetch_stablecoin_chains.py b/intentkit/skills/defillama/stablecoins/fetch_stablecoin_chains.py new file mode 100644 index 00000000..355fff64 --- /dev/null +++ b/intentkit/skills/defillama/stablecoins/fetch_stablecoin_chains.py @@ -0,0 +1,99 @@ +"""Tool for fetching stablecoin chains data via DeFi Llama API.""" + +from typing import List, Optional + +from pydantic import BaseModel, Field + +from intentkit.skills.defillama.api import fetch_stablecoin_chains +from intentkit.skills.defillama.base import DefiLlamaBaseTool + +FETCH_STABLECOIN_CHAINS_PROMPT = """ +This tool fetches stablecoin distribution data across all chains from DeFi Llama. +Returns: +- List of chains with stablecoin circulating amounts +- Token information for each chain +- Peg type circulating amounts (USD, EUR, etc.) +""" + + +class CirculatingUSD(BaseModel): + """Model representing circulating amounts in different pegs.""" + + peggedUSD: Optional[float] = Field(None, description="Amount pegged to USD") + peggedEUR: Optional[float] = Field(None, description="Amount pegged to EUR") + peggedVAR: Optional[float] = Field(None, description="Amount in variable pegs") + peggedJPY: Optional[float] = Field(None, description="Amount pegged to JPY") + peggedCHF: Optional[float] = Field(None, description="Amount pegged to CHF") + peggedCAD: Optional[float] = Field(None, description="Amount pegged to CAD") + peggedGBP: Optional[float] = Field(None, description="Amount pegged to GBP") + peggedAUD: Optional[float] = Field(None, description="Amount pegged to AUD") + peggedCNY: Optional[float] = Field(None, description="Amount pegged to CNY") + peggedREAL: Optional[float] = Field( + None, description="Amount pegged to Brazilian Real" + ) + + +class ChainData(BaseModel): + """Model representing stablecoin data for a single chain.""" + + gecko_id: Optional[str] = Field(None, description="CoinGecko ID of the chain") + totalCirculatingUSD: CirculatingUSD = Field( + ..., description="Total circulating amounts in different pegs" + ) + tokenSymbol: Optional[str] = Field(None, description="Native token symbol") + name: str = Field(..., description="Chain name") + + +class FetchStablecoinChainsResponse(BaseModel): + """Response schema for stablecoin chains data.""" + + chains: List[ChainData] = Field( + default_factory=list, description="List of chains with their stablecoin data" + ) + error: Optional[str] = Field(None, description="Error message if any") + + +class DefiLlamaFetchStablecoinChains(DefiLlamaBaseTool): + """Tool for fetching stablecoin distribution across chains from DeFi Llama. + + This tool retrieves data about how stablecoins are distributed across different + blockchain networks, including circulation amounts and token information. + + Example: + chains_tool = DefiLlamaFetchStablecoinChains( + skill_store=store, + agent_id="agent_123", + agent_store=agent_store + ) + result = await chains_tool._arun() + """ + + name: str = "defillama_fetch_stablecoin_chains" + description: str = FETCH_STABLECOIN_CHAINS_PROMPT + args_schema: None = None # No input parameters needed + + async def _arun(self, **kwargs) -> FetchStablecoinChainsResponse: + """Fetch stablecoin distribution data across chains. + + Returns: + FetchStablecoinChainsResponse containing chain data or error + """ + try: + # Check rate limiting + context = self.get_context() + is_rate_limited, error_msg = await self.check_rate_limit(context) + if is_rate_limited: + return FetchStablecoinChainsResponse(error=error_msg) + + # Fetch chains data from API + result = await fetch_stablecoin_chains() + + # Check for API errors + if isinstance(result, dict) and "error" in result: + return FetchStablecoinChainsResponse(error=result["error"]) + + # Return the response matching the API structure + return FetchStablecoinChainsResponse(chains=result) + + except Exception as e: + return FetchStablecoinChainsResponse(error=str(e)) diff --git a/intentkit/skills/defillama/stablecoins/fetch_stablecoin_charts.py b/intentkit/skills/defillama/stablecoins/fetch_stablecoin_charts.py new file mode 100644 index 00000000..035d0ec7 --- /dev/null +++ b/intentkit/skills/defillama/stablecoins/fetch_stablecoin_charts.py @@ -0,0 +1,128 @@ +"""Tool for fetching stablecoin charts via DeFi Llama API.""" + +from typing import List, Optional, Type + +from pydantic import BaseModel, Field + +from intentkit.skills.defillama.api import fetch_stablecoin_charts +from intentkit.skills.defillama.base import DefiLlamaBaseTool + +FETCH_STABLECOIN_CHARTS_PROMPT = """ +This tool fetches historical circulating supply data from DeFi Llama for a specific stablecoin. +Required: +- Stablecoin ID +Optional: +- Chain name for chain-specific data +Returns historical data including: +- Total circulating supply +- Circulating supply in USD +- Daily data points +""" + + +class CirculatingSupply(BaseModel): + """Model representing circulating supply amounts.""" + + peggedUSD: float = Field(..., description="Amount pegged to USD") + + +class StablecoinDataPoint(BaseModel): + """Model representing a single historical data point.""" + + date: str = Field(..., description="Unix timestamp of the data point") + totalCirculating: CirculatingSupply = Field( + ..., description="Total circulating supply" + ) + totalCirculatingUSD: CirculatingSupply = Field( + ..., description="Total circulating supply in USD" + ) + + +class FetchStablecoinChartsInput(BaseModel): + """Input schema for fetching stablecoin chart data.""" + + stablecoin_id: str = Field( + ..., description="ID of the stablecoin to fetch data for" + ) + chain: Optional[str] = Field( + None, description="Optional chain name for chain-specific data" + ) + + +class FetchStablecoinChartsResponse(BaseModel): + """Response schema for stablecoin chart data.""" + + data: List[StablecoinDataPoint] = Field( + default_factory=list, description="List of historical data points" + ) + chain: Optional[str] = Field( + None, description="Chain name if chain-specific data was requested" + ) + error: Optional[str] = Field(None, description="Error message if any") + + +class DefiLlamaFetchStablecoinCharts(DefiLlamaBaseTool): + """Tool for fetching stablecoin chart data from DeFi Llama. + + This tool retrieves historical circulating supply data for a specific stablecoin, + optionally filtered by chain. + + Example: + charts_tool = DefiLlamaFetchStablecoinCharts( + skill_store=store, + agent_id="agent_123", + agent_store=agent_store + ) + # Get all chains data + result = await charts_tool._arun(stablecoin_id="1") + # Get chain-specific data + result = await charts_tool._arun(stablecoin_id="1", chain="ethereum") + """ + + name: str = "defillama_fetch_stablecoin_charts" + description: str = FETCH_STABLECOIN_CHARTS_PROMPT + args_schema: Type[BaseModel] = FetchStablecoinChartsInput + + async def _arun( + self, stablecoin_id: str, chain: Optional[str] = None + ) -> FetchStablecoinChartsResponse: + """Fetch historical chart data for the given stablecoin. + + Args: + config: Runnable configuration + stablecoin_id: ID of the stablecoin to fetch data for + chain: Optional chain name for chain-specific data + + Returns: + FetchStablecoinChartsResponse containing historical data or error + """ + try: + # Validate chain if provided + if chain: + is_valid, normalized_chain = await self.validate_chain(chain) + if not is_valid: + return FetchStablecoinChartsResponse( + error=f"Invalid chain: {chain}" + ) + chain = normalized_chain + + # Check rate limiting + context = self.get_context() + is_rate_limited, error_msg = await self.check_rate_limit(context) + if is_rate_limited: + return FetchStablecoinChartsResponse(error=error_msg) + + # Fetch chart data from API + result = await fetch_stablecoin_charts( + stablecoin_id=stablecoin_id, chain=chain + ) + + # Check for API errors + if isinstance(result, dict) and "error" in result: + return FetchStablecoinChartsResponse(error=result["error"]) + + # Parse response data + return FetchStablecoinChartsResponse(data=result, chain=chain) + + except Exception as e: + return FetchStablecoinChartsResponse(error=str(e)) diff --git a/intentkit/skills/defillama/stablecoins/fetch_stablecoin_prices.py b/intentkit/skills/defillama/stablecoins/fetch_stablecoin_prices.py new file mode 100644 index 00000000..d65da002 --- /dev/null +++ b/intentkit/skills/defillama/stablecoins/fetch_stablecoin_prices.py @@ -0,0 +1,82 @@ +"""Tool for fetching stablecoin prices via DeFi Llama API.""" + +from typing import Dict, List, Optional + +from pydantic import BaseModel, Field + +from intentkit.skills.defillama.api import fetch_stablecoin_prices +from intentkit.skills.defillama.base import DefiLlamaBaseTool + +FETCH_STABLECOIN_PRICES_PROMPT = """ +This tool fetches current price data for stablecoins from DeFi Llama. +Returns: +- Historical price points with timestamps +- Current prices for each stablecoin +- Prices indexed by stablecoin identifier +""" + + +class PriceDataPoint(BaseModel): + """Model representing a price data point.""" + + date: str = Field(..., description="Unix timestamp for the price data") + prices: Dict[str, float] = Field( + ..., description="Dictionary of stablecoin prices indexed by identifier" + ) + + +class FetchStablecoinPricesResponse(BaseModel): + """Response schema for stablecoin prices data.""" + + data: List[PriceDataPoint] = Field( + default_factory=list, description="List of price data points" + ) + error: Optional[str] = Field(None, description="Error message if any") + + +class DefiLlamaFetchStablecoinPrices(DefiLlamaBaseTool): + """Tool for fetching stablecoin prices from DeFi Llama. + + This tool retrieves current price data for stablecoins, including historical + price points and their timestamps. + + Example: + prices_tool = DefiLlamaFetchStablecoinPrices( + skill_store=store, + agent_id="agent_123", + agent_store=agent_store + ) + result = await prices_tool._arun() + """ + + name: str = "defillama_fetch_stablecoin_prices" + description: str = FETCH_STABLECOIN_PRICES_PROMPT + args_schema: None = None # No input parameters needed + + async def _arun(self, **kwargs) -> FetchStablecoinPricesResponse: + """Fetch stablecoin price data. + + Returns: + FetchStablecoinPricesResponse containing price data or error + """ + try: + # Check rate limiting + context = self.get_context() + is_rate_limited, error_msg = await self.check_rate_limit(context) + if is_rate_limited: + return FetchStablecoinPricesResponse(error=error_msg) + + # Fetch price data from API + result = await fetch_stablecoin_prices() + + # Check for API errors + if isinstance(result, dict) and "error" in result: + return FetchStablecoinPricesResponse(error=result["error"]) + + # Parse results into models + data_points = [PriceDataPoint(**point) for point in result] + + return FetchStablecoinPricesResponse(data=data_points) + + except Exception as e: + return FetchStablecoinPricesResponse(error=str(e)) diff --git a/intentkit/skills/defillama/stablecoins/fetch_stablecoins.py b/intentkit/skills/defillama/stablecoins/fetch_stablecoins.py new file mode 100644 index 00000000..5a0ef6df --- /dev/null +++ b/intentkit/skills/defillama/stablecoins/fetch_stablecoins.py @@ -0,0 +1,125 @@ +"""Tool for fetching stablecoin data via DeFi Llama API.""" + +from typing import Dict, List, Optional + +from pydantic import BaseModel, Field + +from intentkit.skills.defillama.api import fetch_stablecoins +from intentkit.skills.defillama.base import DefiLlamaBaseTool + +FETCH_STABLECOINS_PROMPT = """ +This tool fetches comprehensive stablecoin data from DeFi Llama. +Returns: +- List of stablecoins with details like name, symbol, market cap +- Per-chain circulating amounts +- Historical circulating amounts (day/week/month) +- Current prices and price history +- Peg mechanism and type information +""" + + +class CirculatingAmount(BaseModel): + """Model representing circulating amounts for a specific peg type.""" + + peggedUSD: float = Field(..., description="Amount pegged to USD") + + +class ChainCirculating(BaseModel): + """Model representing circulating amounts on a specific chain.""" + + current: CirculatingAmount = Field(..., description="Current circulating amount") + circulatingPrevDay: CirculatingAmount = Field( + ..., description="Circulating amount from previous day" + ) + circulatingPrevWeek: CirculatingAmount = Field( + ..., description="Circulating amount from previous week" + ) + circulatingPrevMonth: CirculatingAmount = Field( + ..., description="Circulating amount from previous month" + ) + + +class Stablecoin(BaseModel): + """Model representing a single stablecoin's data.""" + + id: str = Field(..., description="Unique identifier") + name: str = Field(..., description="Stablecoin name") + symbol: str = Field(..., description="Token symbol") + gecko_id: Optional[str] = Field(None, description="CoinGecko ID if available") + pegType: str = Field(..., description="Type of peg (e.g. peggedUSD)") + priceSource: str = Field(..., description="Source of price data") + pegMechanism: str = Field(..., description="Mechanism maintaining the peg") + circulating: CirculatingAmount = Field( + ..., description="Current total circulating amount" + ) + circulatingPrevDay: CirculatingAmount = Field( + ..., description="Total circulating amount from previous day" + ) + circulatingPrevWeek: CirculatingAmount = Field( + ..., description="Total circulating amount from previous week" + ) + circulatingPrevMonth: CirculatingAmount = Field( + ..., description="Total circulating amount from previous month" + ) + chainCirculating: Dict[str, ChainCirculating] = Field( + ..., description="Circulating amounts per chain" + ) + chains: List[str] = Field( + ..., description="List of chains where the stablecoin is present" + ) + price: float = Field(..., description="Current price in USD") + + +class FetchStablecoinsResponse(BaseModel): + """Response schema for stablecoin data.""" + + peggedAssets: List[Stablecoin] = Field( + default_factory=list, description="List of stablecoins with their data" + ) + error: Optional[str] = Field(None, description="Error message if any") + + +class DefiLlamaFetchStablecoins(DefiLlamaBaseTool): + """Tool for fetching stablecoin data from DeFi Llama. + + This tool retrieves comprehensive data about stablecoins, including their + circulating supply across different chains, price information, and peg details. + + Example: + stablecoins_tool = DefiLlamaFetchStablecoins( + skill_store=store, + agent_id="agent_123", + agent_store=agent_store + ) + result = await stablecoins_tool._arun() + """ + + name: str = "defillama_fetch_stablecoins" + description: str = FETCH_STABLECOINS_PROMPT + args_schema: None = None # No input parameters needed + + async def _arun(self, **kwargs) -> FetchStablecoinsResponse: + """Fetch stablecoin data. + + Returns: + FetchStablecoinsResponse containing stablecoin data or error + """ + try: + # Check rate limiting + context = self.get_context() + is_rate_limited, error_msg = await self.check_rate_limit(context) + if is_rate_limited: + return FetchStablecoinsResponse(error=error_msg) + + # Fetch stablecoin data from API + result = await fetch_stablecoins() + + # Check for API errors + if isinstance(result, dict) and "error" in result: + return FetchStablecoinsResponse(error=result["error"]) + + # Return the response matching the API structure + return FetchStablecoinsResponse(**result) + + except Exception as e: + return FetchStablecoinsResponse(error=str(e)) diff --git a/intentkit/skills/defillama/tests/__init__.py b/intentkit/skills/defillama/tests/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/intentkit/skills/defillama/tests/api_integration.test.py b/intentkit/skills/defillama/tests/api_integration.test.py new file mode 100644 index 00000000..d6f7d12f --- /dev/null +++ b/intentkit/skills/defillama/tests/api_integration.test.py @@ -0,0 +1,192 @@ +import asyncio +import logging +import unittest +from datetime import datetime, timedelta +from unittest.runner import TextTestResult + +# Import all functions from your API module +from intentkit.skills.defillama.api import ( + fetch_chains, + fetch_current_prices, + fetch_dex_overview, + fetch_dex_summary, + fetch_fees_overview, + fetch_historical_prices, + fetch_historical_tvl, + fetch_price_chart, + fetch_protocol, + fetch_protocols, + fetch_stablecoin_chains, + fetch_stablecoin_prices, + fetch_stablecoins, +) + +# Configure logging to only show warnings and errors +logging.basicConfig(level=logging.WARNING) + + +class QuietTestResult(TextTestResult): + """Custom TestResult class that minimizes output unless there's a failure""" + + def startTest(self, test): + self._started_at = datetime.now() + super().startTest(test) + + def addSuccess(self, test): + super().addSuccess(test) + if self.showAll: + self.stream.write("") + self.stream.flush() + + def addError(self, test, err): + super().addError(test, err) + self.stream.write("\n") + self.stream.write(self.separator1 + "\n") + self.stream.write(f"ERROR: {self.getDescription(test)}\n") + self.stream.write(self.separator2 + "\n") + self.stream.write(self._exc_info_to_string(err, test)) + self.stream.write("\n") + self.stream.flush() + + def addFailure(self, test, err): + super().addFailure(test, err) + self.stream.write("\n") + self.stream.write(self.separator1 + "\n") + self.stream.write(f"FAIL: {self.getDescription(test)}\n") + self.stream.write(self.separator2 + "\n") + self.stream.write(self._exc_info_to_string(err, test)) + self.stream.write("\n") + self.stream.flush() + + +class QuietTestRunner(unittest.TextTestRunner): + """Custom TestRunner that uses QuietTestResult""" + + resultclass = QuietTestResult + + +class TestDefiLlamaAPI(unittest.TestCase): + """Integration tests for DeFi Llama API client""" + + def setUp(self): + """Set up the async event loop""" + self.loop = asyncio.new_event_loop() + asyncio.set_event_loop(self.loop) + self.timeout = 3000 + + def tearDown(self): + """Clean up the event loop""" + self.loop.close() + + def run_async(self, coro): + """Helper to run async functions in test methods with timeout""" + try: + return self.loop.run_until_complete( + asyncio.wait_for(coro, timeout=self.timeout) + ) + except asyncio.TimeoutError: + raise AssertionError(f"Test timed out after {self.timeout} seconds") + except Exception as e: + raise AssertionError(f"Test failed with exception: {str(e)}") + + def assert_successful_response(self, response): + """Helper to check if response contains an error""" + if isinstance(response, dict) and "error" in response: + raise AssertionError(f"API request failed: {response['error']}") + + def test_tvl_endpoints(self): + """Test TVL-related endpoints""" + # Test fetch_protocols + protocols = self.run_async(fetch_protocols()) + self.assert_successful_response(protocols) + self.assertIsInstance(protocols, list) + if len(protocols) > 0: + self.assertIn("tvl", protocols[0]) + + # Test fetch_protocol using Aave as an example + protocol_data = self.run_async(fetch_protocol("aave")) + self.assert_successful_response(protocol_data) + self.assertIsInstance(protocol_data, dict) + + # Test fetch_historical_tvl + historical_tvl = self.run_async(fetch_historical_tvl()) + self.assert_successful_response(historical_tvl) + self.assertIsInstance(historical_tvl, list) + # Verify the structure of historical TVL data points + if len(historical_tvl) > 0: + self.assertIn("date", historical_tvl[0]) + self.assertIn("tvl", historical_tvl[0]) + + # Test fetch_chains + chains = self.run_async(fetch_chains()) + self.assert_successful_response(chains) + self.assertIsInstance(chains, list) + + def test_coins_endpoints(self): + """Test coin price-related endpoints""" + test_coins = ["ethereum:0xc02aaa39b223fe8d0a0e5c4f27ead9083c756cc2"] + + # Test fetch_current_prices + current_prices = self.run_async(fetch_current_prices(test_coins)) + self.assert_successful_response(current_prices) + self.assertIsInstance(current_prices, dict) + + # Test fetch_historical_prices + timestamp = int((datetime.now() - timedelta(days=1)).timestamp()) + historical_prices = self.run_async( + fetch_historical_prices(timestamp, test_coins) + ) + self.assert_successful_response(historical_prices) + self.assertIsInstance(historical_prices, dict) + + # Test fetch_price_chart + price_chart = self.run_async(fetch_price_chart(test_coins)) + self.assert_successful_response(price_chart) + self.assertIsInstance(price_chart, dict) + self.assertIn("coins", price_chart) + # Verify the structure of the response + coin_data = price_chart["coins"].get(test_coins[0]) + self.assertIsNotNone(coin_data) + self.assertIn("prices", coin_data) + self.assertIsInstance(coin_data["prices"], list) + + def test_stablecoin_endpoints(self): + """Test stablecoin-related endpoints""" + # Test fetch_stablecoins + stablecoins = self.run_async(fetch_stablecoins()) + self.assert_successful_response(stablecoins) + self.assertIsInstance(stablecoins, dict) + + # Test fetch_stablecoin_chains + chains = self.run_async(fetch_stablecoin_chains()) + self.assert_successful_response(chains) + self.assertIsInstance(chains, list) + + # Test fetch_stablecoin_prices + prices = self.run_async(fetch_stablecoin_prices()) + self.assert_successful_response(prices) + self.assertIsInstance(prices, list) + + def test_volume_endpoints(self): + """Test volume-related endpoints""" + # Test fetch_dex_overview + dex_overview = self.run_async(fetch_dex_overview()) + self.assert_successful_response(dex_overview) + self.assertIsInstance(dex_overview, dict) + + # Test fetch_dex_summary using Uniswap as example + dex_summary = self.run_async(fetch_dex_summary("uniswap")) + self.assert_successful_response(dex_summary) + self.assertIsInstance(dex_summary, dict) + + def test_fees_endpoint(self): + """Test fees endpoint""" + fees_overview = self.run_async(fetch_fees_overview()) + self.assert_successful_response(fees_overview) + self.assertIsInstance(fees_overview, dict) + + +if __name__ == "__main__": + # Use the quiet test runner + runner = QuietTestRunner(verbosity=1) + unittest.main(testRunner=runner) diff --git a/intentkit/skills/defillama/tests/api_unit.test.py b/intentkit/skills/defillama/tests/api_unit.test.py new file mode 100644 index 00000000..98a44040 --- /dev/null +++ b/intentkit/skills/defillama/tests/api_unit.test.py @@ -0,0 +1,583 @@ +import unittest +from unittest.mock import AsyncMock, patch + +# Import the endpoints from your module. +# Adjust the import path if your module has a different name or location. +from intentkit.skills.defillama.api import ( + fetch_batch_historical_prices, + fetch_block, + fetch_chain_historical_tvl, + fetch_chains, + fetch_current_prices, + # Volume related functions + fetch_dex_overview, + fetch_dex_summary, + # Fees related functions + fetch_fees_overview, + fetch_first_price, + fetch_historical_prices, + fetch_historical_tvl, + fetch_options_overview, + fetch_pool_chart, + # Yields related functions + fetch_pools, + # Price related functions + fetch_price_chart, + fetch_price_percentage, + fetch_protocol, + fetch_protocol_current_tvl, + # Original functions + fetch_protocols, + fetch_stablecoin_chains, + fetch_stablecoin_charts, + fetch_stablecoin_prices, + # Stablecoin related functions + fetch_stablecoins, +) + + +# Dummy response to simulate httpx responses. +class DummyResponse: + def __init__(self, status_code, json_data): + self.status_code = status_code + self._json_data = json_data + + def json(self): + return self._json_data + + +class TestDefiLlamaAPI(unittest.IsolatedAsyncioTestCase): + @classmethod + def setUpClass(cls): + # Set up a fixed timestamp that all tests will use + cls.mock_timestamp = 1677648000 # Fixed timestamp + + async def asyncSetUp(self): + # Start the patcher before each test + self.datetime_patcher = patch("skills.defillama.api.datetime") + self.mock_datetime = self.datetime_patcher.start() + # Configure the mock to return our fixed timestamp + self.mock_datetime.now.return_value.timestamp.return_value = self.mock_timestamp + + async def asyncTearDown(self): + # Stop the patcher after each test + self.datetime_patcher.stop() + + # Helper method to patch httpx.AsyncClient and set up the dummy client. + async def _run_with_dummy( + self, func, expected_url, dummy_response, *args, expected_kwargs=None + ): + if expected_kwargs is None: + expected_kwargs = {} + with patch("httpx.AsyncClient") as MockClient: + client_instance = AsyncMock() + client_instance.get.return_value = dummy_response + # Ensure that __aenter__ returns our dummy client. + MockClient.return_value.__aenter__.return_value = client_instance + result = await func(*args) + # Check that the get call was made with the expected URL (and parameters, if any). + client_instance.get.assert_called_once_with(expected_url, **expected_kwargs) + return result + + # --- Tests for fetch_protocols --- + async def test_fetch_protocols_success(self): + dummy = DummyResponse(200, {"protocols": []}) + result = await self._run_with_dummy( + fetch_protocols, + "https://api.llama.fi/protocols", + dummy, + ) + self.assertEqual(result, {"protocols": []}) + + async def test_fetch_protocols_error(self): + dummy = DummyResponse(404, None) + result = await self._run_with_dummy( + fetch_protocols, + "https://api.llama.fi/protocols", + dummy, + ) + self.assertEqual(result, {"error": "API returned status code 404"}) + + # --- Tests for fetch_protocol --- + async def test_fetch_protocol_success(self): + protocol = "testprotocol" + dummy = DummyResponse(200, {"protocol": protocol}) + expected_url = f"https://api.llama.fi/protocol/{protocol}" + result = await self._run_with_dummy( + fetch_protocol, expected_url, dummy, protocol + ) + self.assertEqual(result, {"protocol": protocol}) + + async def test_fetch_protocol_error(self): + protocol = "testprotocol" + dummy = DummyResponse(500, None) + expected_url = f"https://api.llama.fi/protocol/{protocol}" + result = await self._run_with_dummy( + fetch_protocol, expected_url, dummy, protocol + ) + self.assertEqual(result, {"error": "API returned status code 500"}) + + # --- Tests for fetch_historical_tvl --- + async def test_fetch_historical_tvl_success(self): + dummy = DummyResponse(200, {"historical": "data"}) + expected_url = "https://api.llama.fi/v2/historicalChainTvl" + result = await self._run_with_dummy( + fetch_historical_tvl, + expected_url, + dummy, + ) + self.assertEqual(result, {"historical": "data"}) + + async def test_fetch_historical_tvl_error(self): + dummy = DummyResponse(400, None) + expected_url = "https://api.llama.fi/v2/historicalChainTvl" + result = await self._run_with_dummy( + fetch_historical_tvl, + expected_url, + dummy, + ) + self.assertEqual(result, {"error": "API returned status code 400"}) + + # --- Tests for fetch_chain_historical_tvl --- + async def test_fetch_chain_historical_tvl_success(self): + chain = "ethereum" + dummy = DummyResponse(200, {"chain": chain}) + expected_url = f"https://api.llama.fi/v2/historicalChainTvl/{chain}" + result = await self._run_with_dummy( + fetch_chain_historical_tvl, expected_url, dummy, chain + ) + self.assertEqual(result, {"chain": chain}) + + async def test_fetch_chain_historical_tvl_error(self): + chain = "ethereum" + dummy = DummyResponse(503, None) + expected_url = f"https://api.llama.fi/v2/historicalChainTvl/{chain}" + result = await self._run_with_dummy( + fetch_chain_historical_tvl, expected_url, dummy, chain + ) + self.assertEqual(result, {"error": "API returned status code 503"}) + + # --- Tests for fetch_protocol_current_tvl --- + async def test_fetch_protocol_current_tvl_success(self): + protocol = "testprotocol" + dummy = DummyResponse(200, {"current_tvl": 12345}) + expected_url = f"https://api.llama.fi/tvl/{protocol}" + result = await self._run_with_dummy( + fetch_protocol_current_tvl, expected_url, dummy, protocol + ) + self.assertEqual(result, {"current_tvl": 12345}) + + async def test_fetch_protocol_current_tvl_error(self): + protocol = "testprotocol" + dummy = DummyResponse(418, None) + expected_url = f"https://api.llama.fi/tvl/{protocol}" + result = await self._run_with_dummy( + fetch_protocol_current_tvl, expected_url, dummy, protocol + ) + self.assertEqual(result, {"error": "API returned status code 418"}) + + # --- Tests for fetch_chains --- + async def test_fetch_chains_success(self): + dummy = DummyResponse(200, {"chains": ["eth", "bsc"]}) + expected_url = "https://api.llama.fi/v2/chains" + result = await self._run_with_dummy( + fetch_chains, + expected_url, + dummy, + ) + self.assertEqual(result, {"chains": ["eth", "bsc"]}) + + async def test_fetch_chains_error(self): + dummy = DummyResponse(404, None) + expected_url = "https://api.llama.fi/v2/chains" + result = await self._run_with_dummy( + fetch_chains, + expected_url, + dummy, + ) + self.assertEqual(result, {"error": "API returned status code 404"}) + + # --- Tests for fetch_current_prices --- + async def test_fetch_current_prices_success(self): + coins = ["coin1", "coin2"] + coins_str = ",".join(coins) + dummy = DummyResponse(200, {"prices": "data"}) + expected_url = f"https://api.llama.fi/prices/current/{coins_str}?searchWidth=4h" + result = await self._run_with_dummy( + fetch_current_prices, expected_url, dummy, coins + ) + self.assertEqual(result, {"prices": "data"}) + + async def test_fetch_current_prices_error(self): + coins = ["coin1", "coin2"] + coins_str = ",".join(coins) + dummy = DummyResponse(500, None) + expected_url = f"https://api.llama.fi/prices/current/{coins_str}?searchWidth=4h" + result = await self._run_with_dummy( + fetch_current_prices, expected_url, dummy, coins + ) + self.assertEqual(result, {"error": "API returned status code 500"}) + + # --- Tests for fetch_historical_prices --- + async def test_fetch_historical_prices_success(self): + timestamp = 1609459200 + coins = ["coin1", "coin2"] + coins_str = ",".join(coins) + dummy = DummyResponse(200, {"historical_prices": "data"}) + expected_url = f"https://api.llama.fi/prices/historical/{timestamp}/{coins_str}?searchWidth=4h" + result = await self._run_with_dummy( + fetch_historical_prices, expected_url, dummy, timestamp, coins + ) + self.assertEqual(result, {"historical_prices": "data"}) + + async def test_fetch_historical_prices_error(self): + timestamp = 1609459200 + coins = ["coin1", "coin2"] + coins_str = ",".join(coins) + dummy = DummyResponse(400, None) + expected_url = f"https://api.llama.fi/prices/historical/{timestamp}/{coins_str}?searchWidth=4h" + result = await self._run_with_dummy( + fetch_historical_prices, expected_url, dummy, timestamp, coins + ) + self.assertEqual(result, {"error": "API returned status code 400"}) + + # --- Tests for fetch_batch_historical_prices --- + async def test_fetch_batch_historical_prices_success(self): + coins_timestamps = {"coin1": [1609459200, 1609545600], "coin2": [1609459200]} + dummy = DummyResponse(200, {"batch": "data"}) + expected_url = "https://api.llama.fi/batchHistorical" + # For this endpoint, a params dict is sent. + expected_params = {"coins": coins_timestamps, "searchWidth": "600"} + with patch("httpx.AsyncClient") as MockClient: + client_instance = AsyncMock() + client_instance.get.return_value = dummy + MockClient.return_value.__aenter__.return_value = client_instance + result = await fetch_batch_historical_prices(coins_timestamps) + client_instance.get.assert_called_once_with( + expected_url, params=expected_params + ) + self.assertEqual(result, {"batch": "data"}) + + async def test_fetch_batch_historical_prices_error(self): + coins_timestamps = {"coin1": [1609459200], "coin2": [1609459200]} + dummy = DummyResponse(503, None) + expected_url = "https://api.llama.fi/batchHistorical" + expected_params = {"coins": coins_timestamps, "searchWidth": "600"} + with patch("httpx.AsyncClient") as MockClient: + client_instance = AsyncMock() + client_instance.get.return_value = dummy + MockClient.return_value.__aenter__.return_value = client_instance + result = await fetch_batch_historical_prices(coins_timestamps) + client_instance.get.assert_called_once_with( + expected_url, params=expected_params + ) + self.assertEqual(result, {"error": "API returned status code 503"}) + + async def test_fetch_price_chart_success(self): + coins = ["bitcoin", "ethereum"] + coins_str = ",".join(coins) + dummy = DummyResponse(200, {"chart": "data"}) + expected_url = f"https://api.llama.fi/chart/{coins_str}" + + # Calculate start time based on mock timestamp + start_time = self.mock_timestamp - 86400 # mock timestamp - 1 day + expected_params = { + "start": start_time, + "span": 10, + "period": "2d", + "searchWidth": "600", + } + + with patch("httpx.AsyncClient") as MockClient: + client_instance = AsyncMock() + client_instance.get.return_value = dummy + MockClient.return_value.__aenter__.return_value = client_instance + result = await fetch_price_chart(coins) + client_instance.get.assert_called_once_with( + expected_url, params=expected_params + ) + self.assertEqual(result, {"chart": "data"}) + + async def test_fetch_price_chart_error(self): + coins = ["bitcoin", "ethereum"] + coins_str = ",".join(coins) + dummy = DummyResponse(500, None) + expected_url = f"https://api.llama.fi/chart/{coins_str}" + + # Calculate start time based on mock timestamp + start_time = self.mock_timestamp - 86400 # mock timestamp - 1 day + expected_params = { + "start": start_time, + "span": 10, + "period": "2d", + "searchWidth": "600", + } + + with patch("httpx.AsyncClient") as MockClient: + client_instance = AsyncMock() + client_instance.get.return_value = dummy + MockClient.return_value.__aenter__.return_value = client_instance + result = await fetch_price_chart(coins) + client_instance.get.assert_called_once_with( + expected_url, params=expected_params + ) + self.assertEqual(result, {"error": "API returned status code 500"}) + + # --- Tests for fetch_price_percentage --- + async def test_fetch_price_percentage_success(self): + coins = ["bitcoin", "ethereum"] + coins_str = ",".join(coins) + dummy = DummyResponse(200, {"percentage": "data"}) + expected_url = f"https://api.llama.fi/percentage/{coins_str}" + + mock_timestamp = 1677648000 # Fixed timestamp + with patch("skills.defillama.api.datetime") as mock_datetime: + mock_datetime.now.return_value.timestamp.return_value = mock_timestamp + expected_params = { + "timestamp": mock_timestamp, + "lookForward": "false", + "period": "24h", + } + + with patch("httpx.AsyncClient") as MockClient: + client_instance = AsyncMock() + client_instance.get.return_value = dummy + MockClient.return_value.__aenter__.return_value = client_instance + result = await fetch_price_percentage(coins) + client_instance.get.assert_called_once_with( + expected_url, params=expected_params + ) + self.assertEqual(result, {"percentage": "data"}) + + async def test_fetch_price_percentage_error(self): + coins = ["bitcoin", "ethereum"] + coins_str = ",".join(coins) + dummy = DummyResponse(404, None) + expected_url = f"https://api.llama.fi/percentage/{coins_str}" + + expected_params = { + "timestamp": self.mock_timestamp, + "lookForward": "false", + "period": "24h", + } + + with patch("httpx.AsyncClient") as MockClient: + client_instance = AsyncMock() + client_instance.get.return_value = dummy + MockClient.return_value.__aenter__.return_value = client_instance + result = await fetch_price_percentage(coins) + client_instance.get.assert_called_once_with( + expected_url, params=expected_params + ) + self.assertEqual(result, {"error": "API returned status code 404"}) + + async def test_fetch_price_percentage_error2(self): + coins = ["bitcoin", "ethereum"] + coins_str = ",".join(coins) + dummy = DummyResponse(404, None) + expected_url = f"https://api.llama.fi/percentage/{coins_str}" + + with patch("datetime.datetime") as mock_datetime: + mock_datetime.now.return_value.timestamp.return_value = 1677648000 + expected_params = { + "timestamp": 1677648000, + "lookForward": "false", + "period": "24h", + } + + with patch("httpx.AsyncClient") as MockClient: + client_instance = AsyncMock() + client_instance.get.return_value = dummy + MockClient.return_value.__aenter__.return_value = client_instance + result = await fetch_price_percentage(coins) + client_instance.get.assert_called_once_with( + expected_url, params=expected_params + ) + self.assertEqual(result, {"error": "API returned status code 404"}) + + # --- Tests for fetch_first_price --- + async def test_fetch_first_price_success(self): + coins = ["bitcoin", "ethereum"] + coins_str = ",".join(coins) + dummy = DummyResponse(200, {"first_prices": "data"}) + expected_url = f"https://api.llama.fi/prices/first/{coins_str}" + result = await self._run_with_dummy( + fetch_first_price, expected_url, dummy, coins + ) + self.assertEqual(result, {"first_prices": "data"}) + + async def test_fetch_first_price_error(self): + coins = ["bitcoin", "ethereum"] + coins_str = ",".join(coins) + dummy = DummyResponse(500, None) + expected_url = f"https://api.llama.fi/prices/first/{coins_str}" + result = await self._run_with_dummy( + fetch_first_price, expected_url, dummy, coins + ) + self.assertEqual(result, {"error": "API returned status code 500"}) + + # --- Tests for fetch_block --- + async def test_fetch_block_success(self): + chain = "ethereum" + dummy = DummyResponse(200, {"block": 123456}) + mock_timestamp = 1677648000 # Fixed timestamp + + with patch("skills.defillama.api.datetime") as mock_datetime: + mock_datetime.now.return_value.timestamp.return_value = mock_timestamp + expected_url = f"https://api.llama.fi/block/{chain}/{mock_timestamp}" + result = await self._run_with_dummy(fetch_block, expected_url, dummy, chain) + self.assertEqual(result, {"block": 123456}) + + async def test_fetch_block_error(self): + chain = "ethereum" + dummy = DummyResponse(404, None) + mock_timestamp = 1677648000 # Fixed timestamp + + with patch("skills.defillama.api.datetime") as mock_datetime: + mock_datetime.now.return_value.timestamp.return_value = mock_timestamp + expected_url = f"https://api.llama.fi/block/{chain}/{mock_timestamp}" + result = await self._run_with_dummy(fetch_block, expected_url, dummy, chain) + self.assertEqual(result, {"error": "API returned status code 404"}) + + # --- Tests for Stablecoins API --- + async def test_fetch_stablecoins_success(self): + dummy = DummyResponse(200, {"stablecoins": "data"}) + expected_url = "https://api.llama.fi/stablecoins" + expected_params = {"includePrices": "true"} + + with patch("httpx.AsyncClient") as MockClient: + client_instance = AsyncMock() + client_instance.get.return_value = dummy + MockClient.return_value.__aenter__.return_value = client_instance + result = await fetch_stablecoins() + client_instance.get.assert_called_once_with( + expected_url, params=expected_params + ) + self.assertEqual(result, {"stablecoins": "data"}) + + async def test_fetch_stablecoin_charts_success(self): + stablecoin_id = "USDT" + chain = "ethereum" + dummy = DummyResponse(200, {"charts": "data"}) + expected_url = ( + f"https://api.llama.fi/stablecoincharts/{chain}?stablecoin={stablecoin_id}" + ) + result = await self._run_with_dummy( + fetch_stablecoin_charts, expected_url, dummy, stablecoin_id, chain + ) + self.assertEqual(result, {"charts": "data"}) + + async def test_fetch_stablecoin_chains_success(self): + dummy = DummyResponse(200, {"chains": "data"}) + expected_url = "https://api.llama.fi/stablecoinchains" + result = await self._run_with_dummy( + fetch_stablecoin_chains, expected_url, dummy + ) + self.assertEqual(result, {"chains": "data"}) + + async def test_fetch_stablecoin_prices_success(self): + dummy = DummyResponse(200, {"prices": "data"}) + expected_url = "https://api.llama.fi/stablecoinprices" + result = await self._run_with_dummy( + fetch_stablecoin_prices, expected_url, dummy + ) + self.assertEqual(result, {"prices": "data"}) + + # --- Tests for Yields API --- + async def test_fetch_pools_success(self): + dummy = DummyResponse(200, {"pools": "data"}) + expected_url = "https://api.llama.fi/pools" + result = await self._run_with_dummy(fetch_pools, expected_url, dummy) + self.assertEqual(result, {"pools": "data"}) + + async def test_fetch_pool_chart_success(self): + pool_id = "compound-usdc" + dummy = DummyResponse(200, {"chart": "data"}) + expected_url = f"https://api.llama.fi/chart/{pool_id}" + result = await self._run_with_dummy( + fetch_pool_chart, expected_url, dummy, pool_id + ) + self.assertEqual(result, {"chart": "data"}) + + # --- Tests for Volumes API --- + async def test_fetch_dex_overview_success(self): + dummy = DummyResponse(200, {"overview": "data"}) + expected_url = "https://api.llama.fi/overview/dexs" + expected_params = { + "excludeTotalDataChart": "true", + "excludeTotalDataChartBreakdown": "true", + "dataType": "dailyVolume", + } + + with patch("httpx.AsyncClient") as MockClient: + client_instance = AsyncMock() + client_instance.get.return_value = dummy + MockClient.return_value.__aenter__.return_value = client_instance + result = await fetch_dex_overview() + client_instance.get.assert_called_once_with( + expected_url, params=expected_params + ) + self.assertEqual(result, {"overview": "data"}) + + async def test_fetch_dex_summary_success(self): + protocol = "uniswap" + dummy = DummyResponse(200, {"summary": "data"}) + expected_url = f"https://api.llama.fi/summary/dexs/{protocol}" + expected_params = { + "excludeTotalDataChart": "true", + "excludeTotalDataChartBreakdown": "true", + "dataType": "dailyVolume", + } + + with patch("httpx.AsyncClient") as MockClient: + client_instance = AsyncMock() + client_instance.get.return_value = dummy + MockClient.return_value.__aenter__.return_value = client_instance + result = await fetch_dex_summary(protocol) + client_instance.get.assert_called_once_with( + expected_url, params=expected_params + ) + self.assertEqual(result, {"summary": "data"}) + + async def test_fetch_options_overview_success(self): + dummy = DummyResponse(200, {"options": "data"}) + expected_url = "https://api.llama.fi/overview/options" + expected_params = { + "excludeTotalDataChart": "true", + "excludeTotalDataChartBreakdown": "true", + "dataType": "dailyPremiumVolume", + } + + with patch("httpx.AsyncClient") as MockClient: + client_instance = AsyncMock() + client_instance.get.return_value = dummy + MockClient.return_value.__aenter__.return_value = client_instance + result = await fetch_options_overview() + client_instance.get.assert_called_once_with( + expected_url, params=expected_params + ) + self.assertEqual(result, {"options": "data"}) + + # --- Tests for Fees API --- + async def test_fetch_fees_overview_success(self): + dummy = DummyResponse(200, {"fees": "data"}) + expected_url = "https://api.llama.fi/overview/fees" + expected_params = { + "excludeTotalDataChart": "true", + "excludeTotalDataChartBreakdown": "true", + "dataType": "dailyFees", + } + + with patch("httpx.AsyncClient") as MockClient: + client_instance = AsyncMock() + client_instance.get.return_value = dummy + MockClient.return_value.__aenter__.return_value = client_instance + result = await fetch_fees_overview() + client_instance.get.assert_called_once_with( + expected_url, params=expected_params + ) + self.assertEqual(result, {"fees": "data"}) + + +if __name__ == "__main__": + unittest.main() diff --git a/intentkit/skills/defillama/tvl/__init__.py b/intentkit/skills/defillama/tvl/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/intentkit/skills/defillama/tvl/fetch_chain_historical_tvl.py b/intentkit/skills/defillama/tvl/fetch_chain_historical_tvl.py new file mode 100644 index 00000000..5d4e919d --- /dev/null +++ b/intentkit/skills/defillama/tvl/fetch_chain_historical_tvl.py @@ -0,0 +1,103 @@ +"""Tool for fetching chain historical TVL via DeFiLlama API.""" + +from typing import List, Type + +from pydantic import BaseModel, Field + +from intentkit.skills.defillama.api import fetch_chain_historical_tvl +from intentkit.skills.defillama.base import DefiLlamaBaseTool + +FETCH_HISTORICAL_TVL_PROMPT = """ +This tool fetches historical Total Value Locked (TVL) data for a specific blockchain. +Provide the chain name (e.g., "ethereum", "solana") to get its TVL history. +Returns a time series of TVL values with their corresponding dates. +""" + + +class HistoricalTVLDataPoint(BaseModel): + """Model representing a single TVL data point.""" + + date: int = Field(..., description="Unix timestamp of the TVL measurement") + tvl: float = Field(..., description="Total Value Locked in USD at this timestamp") + + +class FetchChainHistoricalTVLInput(BaseModel): + """Input schema for fetching chain-specific historical TVL data.""" + + chain: str = Field( + ..., description="Chain name to fetch TVL for (e.g., 'ethereum', 'solana')" + ) + + +class FetchChainHistoricalTVLResponse(BaseModel): + """Response schema for chain-specific historical TVL data.""" + + chain: str = Field(..., description="Normalized chain name") + data: List[HistoricalTVLDataPoint] = Field( + default_factory=list, description="List of historical TVL data points" + ) + error: str | None = Field(default=None, description="Error message if any") + + +class DefiLlamaFetchChainHistoricalTvl(DefiLlamaBaseTool): + """Tool for fetching historical TVL data for a specific blockchain. + + This tool fetches the complete Total Value Locked (TVL) history for a given + blockchain using the DeFiLlama API. It includes rate limiting and chain + validation to ensure reliable data retrieval. + + Example: + tvl_tool = DefiLlamaFetchChainHistoricalTvl( + skill_store=store, + agent_id="agent_123", + agent_store=agent_store + ) + result = await tvl_tool._arun(chain="ethereum") + """ + + name: str = "defillama_fetch_chain_historical_tvl" + description: str = FETCH_HISTORICAL_TVL_PROMPT + args_schema: Type[BaseModel] = FetchChainHistoricalTVLInput + + async def _arun(self, chain: str) -> FetchChainHistoricalTVLResponse: + """Fetch historical TVL data for the given chain. + + Args: + config: Runnable configuration + chain: Blockchain name (e.g., "ethereum", "solana") + + Returns: + FetchChainHistoricalTVLResponse containing chain name, TVL history or error + """ + try: + # Check rate limiting + context = self.get_context() + is_rate_limited, error_msg = await self.check_rate_limit(context) + if is_rate_limited: + return FetchChainHistoricalTVLResponse(chain=chain, error=error_msg) + + # Validate chain parameter + is_valid, normalized_chain = await self.validate_chain(chain) + if not is_valid or normalized_chain is None: + return FetchChainHistoricalTVLResponse( + chain=chain, error=f"Invalid chain: {chain}" + ) + + # Fetch TVL history from API + result = await fetch_chain_historical_tvl(normalized_chain) + + # Check for API errors + if isinstance(result, dict) and "error" in result: + return FetchChainHistoricalTVLResponse( + chain=normalized_chain, error=result["error"] + ) + + # Parse response into our schema + data_points = [HistoricalTVLDataPoint(**point) for point in result] + + return FetchChainHistoricalTVLResponse( + chain=normalized_chain, data=data_points + ) + + except Exception as e: + return FetchChainHistoricalTVLResponse(chain=chain, error=str(e)) diff --git a/intentkit/skills/defillama/tvl/fetch_chains.py b/intentkit/skills/defillama/tvl/fetch_chains.py new file mode 100644 index 00000000..88153bef --- /dev/null +++ b/intentkit/skills/defillama/tvl/fetch_chains.py @@ -0,0 +1,106 @@ +"""Tool for fetching chain TVL data via DeFi Llama API.""" + +from typing import List, Optional, Type + +from pydantic import BaseModel, Field + +from intentkit.skills.defillama.api import fetch_chains +from intentkit.skills.defillama.base import DefiLlamaBaseTool + +FETCH_CHAINS_PROMPT = """ +This tool fetches current Total Value Locked (TVL) data for all blockchains tracked by DeFi Llama. +No input parameters are required. Returns a comprehensive list including: +- Chain name and identifiers +- Current TVL in USD +- Chain metadata (token symbol, IDs) +- Aggregated total TVL across all chains +Returns the complete list of chains and total TVL or an error if the request fails. +""" + + +class ChainTVLData(BaseModel): + """Model representing TVL data for a single chain.""" + + name: str = Field(..., description="Chain name") + tvl: float = Field(..., description="Total Value Locked in USD") + gecko_id: Optional[str] = Field(None, description="CoinGecko identifier") + token_symbol: Optional[str] = Field( + None, alias="tokenSymbol", description="Native token symbol" + ) + cmc_id: Optional[str] = Field( + None, alias="cmcId", description="CoinMarketCap identifier" + ) + chain_id: Optional[int | str] = Field( + None, alias="chainId", description="Chain identifier" + ) + + +class FetchChainsInput(BaseModel): + """Input schema for fetching all chains' TVL data. + + This endpoint doesn't require any parameters as it returns + TVL data for all chains. + """ + + pass + + +class FetchChainsResponse(BaseModel): + """Response schema for all chains' TVL data.""" + + chains: List[ChainTVLData] = Field( + default_factory=list, description="List of chains with their TVL data" + ) + total_tvl: float = Field(..., description="Total TVL across all chains in USD") + error: Optional[str] = Field(None, description="Error message if any") + + +class DefiLlamaFetchChains(DefiLlamaBaseTool): + """Tool for fetching current TVL data for all blockchains. + + This tool retrieves the current Total Value Locked (TVL) for all chains + tracked by DeFi Llama, including chain identifiers and metadata. + + Example: + chains_tool = DefiLlamaFetchChains( + skill_store=store, + agent_id="agent_123", + agent_store=agent_store + ) + result = await chains_tool._arun() + """ + + name: str = "defillama_fetch_chains" + description: str = FETCH_CHAINS_PROMPT + args_schema: Type[BaseModel] = FetchChainsInput + + async def _arun(self, **kwargs) -> FetchChainsResponse: + """Fetch TVL data for all chains. + + Returns: + FetchChainsResponse containing chain TVL data and total TVL or error + """ + try: + # Check rate limiting + context = self.get_context() + is_rate_limited, error_msg = await self.check_rate_limit(context) + if is_rate_limited: + return FetchChainsResponse(chains=[], total_tvl=0, error=error_msg) + + # Fetch chains data from API + result = await fetch_chains() + + # Check for API errors + if isinstance(result, dict) and "error" in result: + return FetchChainsResponse( + chains=[], total_tvl=0, error=result["error"] + ) + + # Parse chains data and calculate total TVL + chains = [ChainTVLData(**chain_data) for chain_data in result] + total_tvl = sum(chain.tvl for chain in chains) + + return FetchChainsResponse(chains=chains, total_tvl=total_tvl) + + except Exception as e: + return FetchChainsResponse(chains=[], total_tvl=0, error=str(e)) diff --git a/intentkit/skills/defillama/tvl/fetch_historical_tvl.py b/intentkit/skills/defillama/tvl/fetch_historical_tvl.py new file mode 100644 index 00000000..aa191dfa --- /dev/null +++ b/intentkit/skills/defillama/tvl/fetch_historical_tvl.py @@ -0,0 +1,90 @@ +"""Tool for fetching total historical TVL via DeFiLlama API.""" + +from typing import List, Type + +from pydantic import BaseModel, Field + +from intentkit.skills.defillama.api import fetch_historical_tvl +from intentkit.skills.defillama.base import DefiLlamaBaseTool + +FETCH_TOTAL_HISTORICAL_TVL_PROMPT = """ +This tool fetches historical Total Value Locked (TVL) data across all blockchains. +Returns a time series of aggregate TVL values with their corresponding dates. +No input parameters are required as this endpoint returns global DeFi TVL data. +""" + + +class HistoricalTVLDataPoint(BaseModel): + """Model representing a single TVL data point.""" + + date: int = Field(..., description="Unix timestamp of the TVL measurement") + tvl: float = Field(..., description="Total Value Locked in USD at this timestamp") + + +class FetchHistoricalTVLInput(BaseModel): + """Input schema for fetching historical TVL data. + + This endpoint doesn't require any parameters as it returns + global TVL data across all chains. + """ + + pass + + +class FetchHistoricalTVLResponse(BaseModel): + """Response schema for historical TVL data.""" + + data: List[HistoricalTVLDataPoint] = Field( + default_factory=list, + description="List of historical TVL data points across all chains", + ) + error: str | None = Field(default=None, description="Error message if any") + + +class DefiLlamaFetchHistoricalTvl(DefiLlamaBaseTool): + """Tool for fetching historical TVL data across all blockchains. + + This tool fetches the complete Total Value Locked (TVL) history aggregated + across all chains using the DeFiLlama API. It includes rate limiting to + ensure reliable data retrieval. + + Example: + tvl_tool = DefiLlamaFetchHistoricalTvl( + skill_store=store, + agent_id="agent_123", + agent_store=agent_store + ) + result = await tvl_tool._arun() + """ + + name: str = "defillama_fetch_total_historical_tvl" + description: str = FETCH_TOTAL_HISTORICAL_TVL_PROMPT + args_schema: Type[BaseModel] = FetchHistoricalTVLInput + + async def _arun(self, **kwargs) -> FetchHistoricalTVLResponse: + """Fetch historical TVL data across all chains. + + Returns: + FetchHistoricalTVLResponse containing TVL history or error + """ + try: + # Check rate limiting + context = self.get_context() + is_rate_limited, error_msg = await self.check_rate_limit(context) + if is_rate_limited: + return FetchHistoricalTVLResponse(error=error_msg) + + # Fetch TVL history from API + result = await fetch_historical_tvl() + + # Check for API errors + if isinstance(result, dict) and "error" in result: + return FetchHistoricalTVLResponse(error=result["error"]) + + # Parse response into our schema + data_points = [HistoricalTVLDataPoint(**point) for point in result] + + return FetchHistoricalTVLResponse(data=data_points) + + except Exception as e: + return FetchHistoricalTVLResponse(error=str(e)) diff --git a/intentkit/skills/defillama/tvl/fetch_protocol.py b/intentkit/skills/defillama/tvl/fetch_protocol.py new file mode 100644 index 00000000..b11f2eef --- /dev/null +++ b/intentkit/skills/defillama/tvl/fetch_protocol.py @@ -0,0 +1,204 @@ +"""Tool for fetching specific protocol details via DeFi Llama API.""" + +from typing import Dict, List, Optional, Type + +from pydantic import BaseModel, Field + +from intentkit.skills.defillama.api import fetch_protocol +from intentkit.skills.defillama.base import DefiLlamaBaseTool + +FETCH_PROTOCOL_PROMPT = """ +This tool fetches comprehensive details about a specific DeFi protocol. +Provide the protocol identifier (e.g., "aave", "curve") to get detailed information including: +- Basic protocol information (name, description, website) +- TVL data across different chains +- Token information and historical amounts +- Social media and development links +- Funding history and significant events +- Market metrics and related protocols +Returns complete protocol details or an error if the protocol is not found. +""" + + +class TokenAmount(BaseModel): + """Model representing token amounts at a specific date.""" + + date: int = Field(..., description="Unix timestamp") + tokens: Dict[str, float] = Field(..., description="Token amounts keyed by symbol") + + +class ChainTVLData(BaseModel): + """Model representing TVL data for a specific chain.""" + + tvl: List[Dict[str, float]] = Field(..., description="Historical TVL data points") + tokens: Optional[Dict[str, float]] = Field( + None, description="Current token amounts" + ) + tokensInUsd: Optional[Dict[str, float]] = Field( + None, description="Current token amounts in USD" + ) + + +class HistoricalTVL(BaseModel): + """Model representing a historical TVL data point.""" + + date: int = Field(..., description="Unix timestamp") + totalLiquidityUSD: float = Field(..., description="Total TVL in USD") + + +class Raise(BaseModel): + """Model representing a funding round.""" + + date: int = Field(..., description="Funding date") + name: str = Field(..., description="Protocol name") + round: str = Field(..., description="Funding round type") + amount: float = Field(..., description="Amount raised in millions") + chains: List[str] = Field(..., description="Chains involved") + sector: str = Field(..., description="Business sector") + category: str = Field(..., description="Protocol category") + categoryGroup: str = Field(..., description="Category group") + source: str = Field(..., description="Information source") + leadInvestors: List[str] = Field(default_factory=list, description="Lead investors") + otherInvestors: List[str] = Field( + default_factory=list, description="Other investors" + ) + valuation: Optional[float] = Field(None, description="Valuation at time of raise") + defillamaId: Optional[str] = Field(None, description="DefiLlama ID") + + +class Hallmark(BaseModel): + """Model representing a significant protocol event.""" + + timestamp: int + description: str + + +class ProtocolDetail(BaseModel): + """Model representing detailed protocol information.""" + + # Basic Info + id: str = Field(..., description="Protocol unique identifier") + name: str = Field(..., description="Protocol name") + address: Optional[str] = Field(None, description="Protocol address") + symbol: str = Field(..., description="Protocol token symbol") + url: str = Field(..., description="Protocol website") + description: str = Field(..., description="Protocol description") + logo: str = Field(..., description="Logo URL") + + # Chain Info + chains: List[str] = Field(default_factory=list, description="Supported chains") + currentChainTvls: Dict[str, float] = Field(..., description="Current TVL by chain") + chainTvls: Dict[str, ChainTVLData] = Field( + ..., description="Historical TVL data by chain" + ) + + # Identifiers + gecko_id: Optional[str] = Field(None, description="CoinGecko ID") + cmcId: Optional[str] = Field(None, description="CoinMarketCap ID") + + # Social & Development + twitter: Optional[str] = Field(None, description="Twitter handle") + treasury: Optional[str] = Field(None, description="Treasury information") + governanceID: Optional[List[str]] = Field( + None, description="Governance identifiers" + ) + github: Optional[List[str]] = Field(None, description="GitHub repositories") + + # Protocol Relationships + isParentProtocol: Optional[bool] = Field( + None, description="Whether this is a parent protocol" + ) + otherProtocols: Optional[List[str]] = Field(None, description="Related protocols") + + # Historical Data + tokens: List[TokenAmount] = Field( + default_factory=list, description="Historical token amounts" + ) + tvl: List[HistoricalTVL] = Field(..., description="Historical TVL data points") + raises: Optional[List[Raise]] = Field(None, description="Funding rounds") + hallmarks: Optional[List[Hallmark]] = Field(None, description="Significant events") + + # Market Data + mcap: Optional[float] = Field(None, description="Market capitalization") + metrics: Dict = Field(default_factory=dict, description="Additional metrics") + + +class DefiLlamaProtocolInput(BaseModel): + """Input model for fetching protocol details.""" + + protocol: str = Field(..., description="Protocol identifier to fetch") + + +class DefiLlamaProtocolOutput(BaseModel): + """Output model for the protocol fetching tool.""" + + protocol: Optional[ProtocolDetail] = Field(None, description="Protocol details") + error: Optional[str] = Field(None, description="Error message if any") + + +class DefiLlamaFetchProtocol(DefiLlamaBaseTool): + """Tool for fetching detailed protocol information from DeFi Llama. + + This tool retrieves comprehensive information about a specific protocol, + including TVL history, token breakdowns, and metadata. + + Example: + protocol_tool = DefiLlamaFetchProtocol( + skill_store=store, + agent_id="agent_123", + agent_store=agent_store + ) + result = await protocol_tool._arun(protocol="aave") + """ + + name: str = "defillama_fetch_protocol" + description: str = FETCH_PROTOCOL_PROMPT + args_schema: Type[BaseModel] = DefiLlamaProtocolInput + + async def _arun(self, protocol: str) -> DefiLlamaProtocolOutput: + """Fetch detailed information about a specific protocol. + + Args: + config: Runnable configuration + protocol: Protocol identifier to fetch + + Returns: + DefiLlamaProtocolOutput containing protocol details or error + """ + try: + # Check rate limiting + context = self.get_context() + is_rate_limited, error_msg = await self.check_rate_limit(context) + if is_rate_limited: + return DefiLlamaProtocolOutput(error=error_msg) + + # Fetch protocol data from API + result = await fetch_protocol(protocol) + + if isinstance(result, dict) and "error" in result: + return DefiLlamaProtocolOutput(error=result["error"]) + + # Process hallmarks if present + hallmarks = None + if "hallmarks" in result: + hallmarks = [ + Hallmark(timestamp=h[0], description=h[1]) + for h in result.get("hallmarks", []) + ] + + # Create raises objects if present + raises = None + if "raises" in result: + raises = [Raise(**r) for r in result.get("raises", [])] + + # Create protocol detail object + protocol_detail = ProtocolDetail( + **{k: v for k, v in result.items() if k not in ["hallmarks", "raises"]}, + hallmarks=hallmarks, + raises=raises, + ) + + return DefiLlamaProtocolOutput(protocol=protocol_detail) + + except Exception as e: + return DefiLlamaProtocolOutput(error=str(e)) diff --git a/intentkit/skills/defillama/tvl/fetch_protocol_current_tvl.py b/intentkit/skills/defillama/tvl/fetch_protocol_current_tvl.py new file mode 100644 index 00000000..e40d888a --- /dev/null +++ b/intentkit/skills/defillama/tvl/fetch_protocol_current_tvl.py @@ -0,0 +1,90 @@ +"""Tool for fetching protocol TVL via DeFiLlama API.""" + +from typing import Type + +from pydantic import BaseModel, Field + +from intentkit.skills.defillama.api import fetch_protocol_current_tvl +from intentkit.skills.defillama.base import DefiLlamaBaseTool + +FETCH_TVL_PROMPT = """ +This tool fetches the current Total Value Locked (TVL) for a specific DeFi protocol. +Provide the protocol slug (e.g., "aave", "curve") to get its current TVL in USD. +Returns the normalized protocol name and its TVL value. +""" + + +class FetchProtocolCurrentTVLInput(BaseModel): + """Input schema for fetching current protocol TVL.""" + + protocol: str = Field( + ..., description="Protocol slug to fetch TVL for (e.g., 'aave', 'curve')" + ) + + +class FetchProtocolCurrentTVLResponse(BaseModel): + """Response schema for current protocol TVL.""" + + protocol: str = Field(..., description="Normalized protocol slug") + tvl: float = Field(..., description="Current Total Value Locked in USD") + error: str | None = Field(default=None, description="Error message if any") + + +class DefiLlamaFetchProtocolCurrentTvl(DefiLlamaBaseTool): + """Tool for fetching current TVL of a specific DeFi protocol. + + This tool fetches the current Total Value Locked (TVL) for a given protocol + using the DeFiLlama API. It includes rate limiting to avoid API abuse. + + Example: + tvl_tool = DefiLlamaFetchProtocolCurrentTvl( + skill_store=store, + agent_id="agent_123", + agent_store=agent_store + ) + result = await tvl_tool._arun(protocol="aave") + """ + + name: str = "defillama_fetch_protocol_tvl" + description: str = FETCH_TVL_PROMPT + args_schema: Type[BaseModel] = FetchProtocolCurrentTVLInput + + async def _arun(self, protocol: str) -> FetchProtocolCurrentTVLResponse: + """Fetch current TVL for the given protocol. + + Args: + config: Runnable configuration + protocol: DeFi protocol slug (e.g., "aave", "curve") + + Returns: + FetchProtocolCurrentTVLResponse containing protocol name, TVL value or error + """ + try: + # Check rate limiting + context = self.get_context() + is_rate_limited, error_msg = await self.check_rate_limit(context) + if is_rate_limited: + return FetchProtocolCurrentTVLResponse( + protocol=protocol, tvl=0, error=error_msg + ) + + # Normalize protocol slug + normalized_protocol = protocol.lower().replace(" ", "-") + + # Fetch TVL from API + result = await fetch_protocol_current_tvl(normalized_protocol) + + # Check for API errors + if isinstance(result, dict) and "error" in result: + return FetchProtocolCurrentTVLResponse( + protocol=normalized_protocol, tvl=0, error=result["error"] + ) + + return FetchProtocolCurrentTVLResponse( + protocol=normalized_protocol, tvl=float(result) + ) + + except Exception as e: + return FetchProtocolCurrentTVLResponse( + protocol=protocol, tvl=0, error=str(e) + ) diff --git a/intentkit/skills/defillama/tvl/fetch_protocols.py b/intentkit/skills/defillama/tvl/fetch_protocols.py new file mode 100644 index 00000000..9281a64b --- /dev/null +++ b/intentkit/skills/defillama/tvl/fetch_protocols.py @@ -0,0 +1,195 @@ +"""Tool for fetching all protocols via DeFi Llama API.""" + +from typing import Dict, List, Optional, Type, Union + +from pydantic import BaseModel, Field + +from intentkit.skills.defillama.api import fetch_protocols +from intentkit.skills.defillama.base import DefiLlamaBaseTool + +FETCH_PROTOCOLS_PROMPT = """ +This tool fetches information about all protocols tracked by DeFi Llama. +No input parameters are required. Returns comprehensive data for each protocol including: +- Basic information (name, description, website, logo) +- TVL metrics (total and per-chain breakdowns) +- Audit status and security information +- Token details and market metrics +- Chain support and deployment information +- Social media and development links +- Protocol relationships (forks, oracles) +- Historical events and significant updates +Returns the complete list of protocols or an error if the request fails. +""" + + +class Hallmark(BaseModel): + """Model representing a protocol hallmark (significant event).""" + + timestamp: int + description: str + + +class Protocol(BaseModel): + """Model representing a DeFi protocol.""" + + # Basic Info + id: str = Field(..., description="Protocol unique identifier") + name: str = Field(..., description="Protocol name") + address: Optional[str] = Field(None, description="Protocol's main contract address") + symbol: str = Field(..., description="Protocol token symbol") + url: Optional[str] = Field(None, description="Protocol website") + description: Optional[str] = Field(None, description="Protocol description") + chain: Optional[str] = Field(None, description="Main chain of the protocol") + logo: Optional[str] = Field(None, description="URL to protocol logo") + + # Audit Information + audits: Union[str, int] = Field("0", description="Number of audits") + audit_note: Optional[str] = Field(None, description="Additional audit information") + audit_links: Optional[List[str]] = Field(None, description="Links to audit reports") + + # External IDs + gecko_id: Optional[str] = Field(None, description="CoinGecko ID") + cmcId: Optional[Union[str, int]] = Field(None, description="CoinMarketCap ID") + + # Classification + category: str = Field(..., description="Protocol category") + chains: List[str] = Field( + default_factory=list, description="Chains the protocol operates on" + ) + + # Module and Related Info + module: str = Field(..., description="Module name in DefiLlama") + parentProtocol: Optional[str] = Field( + None, description="Parent protocol identifier" + ) + + # Social and Development + twitter: Optional[str] = Field(None, description="Twitter handle") + github: Optional[List[str]] = Field(None, description="GitHub organization names") + + # Protocol Relationships + oracles: List[str] = Field(default_factory=list, description="Oracle services used") + forkedFrom: List[str] = Field( + default_factory=list, description="Protocols this one was forked from" + ) + + # Additional Metadata + methodology: Optional[str] = Field(None, description="TVL calculation methodology") + listedAt: Optional[int] = Field( + None, description="Timestamp when protocol was listed" + ) + openSource: Optional[bool] = Field( + None, description="Whether protocol is open source" + ) + treasury: Optional[str] = Field(None, description="Treasury information") + misrepresentedTokens: Optional[bool] = Field( + None, description="Whether tokens are misrepresented" + ) + hallmarks: Optional[List[Hallmark]] = Field( + None, description="Significant protocol events" + ) + + # TVL Related Data + tvl: Optional[float] = Field(None, description="Total Value Locked in USD") + chainTvls: Dict[str, float] = Field( + default_factory=dict, + description="TVL breakdown by chain including special types (staking, borrowed, etc.)", + ) + change_1h: Optional[float] = Field(None, description="1 hour TVL change percentage") + change_1d: Optional[float] = Field(None, description="1 day TVL change percentage") + change_7d: Optional[float] = Field(None, description="7 day TVL change percentage") + + # Additional TVL Components + staking: Optional[float] = Field(None, description="Value in staking") + pool2: Optional[float] = Field(None, description="Value in pool2") + borrowed: Optional[float] = Field(None, description="Value borrowed") + + # Token Information + tokenBreakdowns: Dict[str, float] = Field( + default_factory=dict, description="TVL breakdown by token" + ) + mcap: Optional[float] = Field(None, description="Market capitalization") + + +class DefiLlamaProtocolsOutput(BaseModel): + """Output model for the protocols fetching tool.""" + + protocols: List[Protocol] = Field( + default_factory=list, description="List of fetched protocols" + ) + error: Optional[str] = Field(None, description="Error message if any") + + +class DefiLlamaFetchProtocols(DefiLlamaBaseTool): + """Tool for fetching all protocols from DeFi Llama. + + This tool retrieves information about all protocols tracked by DeFi Llama, + including their TVL, supported chains, and related metrics. + + Example: + protocols_tool = DefiLlamaFetchProtocols( + skill_store=store, + agent_id="agent_123", + agent_store=agent_store + ) + result = await protocols_tool._arun() + """ + + name: str = "defillama_fetch_protocols" + description: str = FETCH_PROTOCOLS_PROMPT + + class EmptyArgsSchema(BaseModel): + """Empty schema for no input parameters.""" + + pass + + args_schema: Type[BaseModel] = EmptyArgsSchema + + async def _arun(self, **kwargs) -> DefiLlamaProtocolsOutput: + """Fetch information about all protocols. + + Returns: + DefiLlamaProtocolsOutput containing list of protocols or error + """ + try: + # Check rate limiting + context = self.get_context() + is_rate_limited, error_msg = await self.check_rate_limit(context) + if is_rate_limited: + return DefiLlamaProtocolsOutput(error=error_msg) + + # Fetch protocols from API + result = await fetch_protocols() + + if isinstance(result, dict) and "error" in result: + return DefiLlamaProtocolsOutput(error=result["error"]) + + # Convert raw data to Protocol models + protocols = [] + for protocol_data in result: + try: + # Process hallmarks if present + hallmarks = None + if "hallmarks" in protocol_data and protocol_data["hallmarks"]: + hallmarks = [ + Hallmark(timestamp=h[0], description=h[1]) + for h in protocol_data["hallmarks"] + ] + + # Create protocol model + protocol = Protocol( + **{k: v for k, v in protocol_data.items() if k != "hallmarks"}, + hallmarks=hallmarks, + ) + protocols.append(protocol) + except Exception as e: + # Log error for individual protocol processing but continue with others + print( + f"Error processing protocol {protocol_data.get('name', 'unknown')}: {str(e)}" + ) + continue + + return DefiLlamaProtocolsOutput(protocols=protocols) + + except Exception as e: + return DefiLlamaProtocolsOutput(error=str(e)) diff --git a/intentkit/skills/defillama/volumes/__init__.py b/intentkit/skills/defillama/volumes/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/intentkit/skills/defillama/volumes/fetch_dex_overview.py b/intentkit/skills/defillama/volumes/fetch_dex_overview.py new file mode 100644 index 00000000..ab126cf5 --- /dev/null +++ b/intentkit/skills/defillama/volumes/fetch_dex_overview.py @@ -0,0 +1,156 @@ +"""Tool for fetching DEX overview data via DeFi Llama API.""" + +from typing import Dict, List, Optional + +from pydantic import BaseModel, Field + +from intentkit.skills.defillama.api import fetch_dex_overview +from intentkit.skills.defillama.base import DefiLlamaBaseTool + +FETCH_DEX_OVERVIEW_PROMPT = """ +This tool fetches comprehensive overview data for DEX protocols from DeFi Llama. +Returns: +- Chain statistics and breakdowns +- Protocol-specific metrics +- Change percentages +- Total volume data +""" + + +class MethodologyInfo(BaseModel): + """Model representing methodology information.""" + + UserFees: Optional[str] = Field(None, description="User fee information") + Fees: Optional[str] = Field(None, description="Fee structure") + Revenue: Optional[str] = Field(None, description="Revenue model") + ProtocolRevenue: Optional[str] = Field(None, description="Protocol revenue info") + HoldersRevenue: Optional[str] = Field(None, description="Holder revenue info") + SupplySideRevenue: Optional[str] = Field( + None, description="Supply side revenue info" + ) + + +class ProtocolInfo(BaseModel): + """Model representing individual protocol data.""" + + total24h: Optional[float] = Field(None, description="24h total") + total48hto24h: Optional[float] = Field(None, description="48h to 24h total") + total7d: Optional[float] = Field(None, description="7d total") + total14dto7d: Optional[float] = Field(None, description="14d to 7d total") + total60dto30d: Optional[float] = Field(None, description="60d to 30d total") + total30d: Optional[float] = Field(None, description="30d total") + total1y: Optional[float] = Field(None, description="1y total") + totalAllTime: Optional[float] = Field(None, description="All time total") + average1y: Optional[float] = Field(None, description="1y average") + change_1d: Optional[float] = Field(None, description="1d change") + change_7d: Optional[float] = Field(None, description="7d change") + change_1m: Optional[float] = Field(None, description="1m change") + change_7dover7d: Optional[float] = Field(None, description="7d over 7d change") + change_30dover30d: Optional[float] = Field(None, description="30d over 30d change") + breakdown24h: Optional[Dict[str, Dict[str, float]]] = Field( + None, description="24h breakdown by chain" + ) + breakdown30d: Optional[Dict[str, Dict[str, float]]] = Field( + None, description="30d breakdown by chain" + ) + total7DaysAgo: Optional[float] = Field(None, description="Total 7 days ago") + total30DaysAgo: Optional[float] = Field(None, description="Total 30 days ago") + defillamaId: Optional[str] = Field(None, description="DeFi Llama ID") + name: str = Field(..., description="Protocol name") + displayName: str = Field(..., description="Display name") + module: str = Field(..., description="Module name") + category: str = Field(..., description="Protocol category") + logo: Optional[str] = Field(None, description="Logo URL") + chains: List[str] = Field(..., description="Supported chains") + protocolType: str = Field(..., description="Protocol type") + methodologyURL: Optional[str] = Field(None, description="Methodology URL") + methodology: Optional[MethodologyInfo] = Field( + None, description="Methodology details" + ) + latestFetchIsOk: bool = Field(..., description="Latest fetch status") + disabled: Optional[bool] = Field(None, description="Whether protocol is disabled") + parentProtocol: Optional[str] = Field(None, description="Parent protocol") + slug: str = Field(..., description="Protocol slug") + linkedProtocols: Optional[List[str]] = Field(None, description="Linked protocols") + id: str = Field(..., description="Protocol ID") + + +class FetchDexOverviewResponse(BaseModel): + """Response schema for DEX overview data.""" + + totalDataChart: List = Field( + default_factory=list, description="Total data chart points" + ) + totalDataChartBreakdown: List = Field( + default_factory=list, description="Total data chart breakdown" + ) + breakdown24h: Optional[Dict[str, Dict[str, float]]] = Field( + None, description="24h breakdown by chain" + ) + breakdown30d: Optional[Dict[str, Dict[str, float]]] = Field( + None, description="30d breakdown by chain" + ) + chain: Optional[str] = Field(None, description="Specific chain") + allChains: List[str] = Field(..., description="List of all chains") + total24h: float = Field(..., description="24h total") + total48hto24h: float = Field(..., description="48h to 24h total") + total7d: float = Field(..., description="7d total") + total14dto7d: float = Field(..., description="14d to 7d total") + total60dto30d: float = Field(..., description="60d to 30d total") + total30d: float = Field(..., description="30d total") + total1y: float = Field(..., description="1y total") + change_1d: float = Field(..., description="1d change") + change_7d: float = Field(..., description="7d change") + change_1m: float = Field(..., description="1m change") + change_7dover7d: float = Field(..., description="7d over 7d change") + change_30dover30d: float = Field(..., description="30d over 30d change") + total7DaysAgo: float = Field(..., description="Total 7 days ago") + total30DaysAgo: float = Field(..., description="Total 30 days ago") + protocols: List[ProtocolInfo] = Field(..., description="List of protocol data") + error: Optional[str] = Field(None, description="Error message if any") + + +class DefiLlamaFetchDexOverview(DefiLlamaBaseTool): + """Tool for fetching DEX overview data from DeFi Llama. + + This tool retrieves comprehensive data about DEX protocols, including + volumes, metrics, and chain breakdowns. + + Example: + overview_tool = DefiLlamaFetchDexOverview( + skill_store=store, + agent_id="agent_123", + agent_store=agent_store + ) + result = await overview_tool._arun() + """ + + name: str = "defillama_fetch_dex_overview" + description: str = FETCH_DEX_OVERVIEW_PROMPT + args_schema: None = None # No input parameters needed + + async def _arun(self, **kwargs) -> FetchDexOverviewResponse: + """Fetch DEX overview data. + + Returns: + FetchDexOverviewResponse containing overview data or error + """ + try: + # Check rate limiting + context = self.get_context() + is_rate_limited, error_msg = await self.check_rate_limit(context) + if is_rate_limited: + return FetchDexOverviewResponse(error=error_msg) + + # Fetch overview data from API + result = await fetch_dex_overview() + + # Check for API errors + if isinstance(result, dict) and "error" in result: + return FetchDexOverviewResponse(error=result["error"]) + + # Return the response matching the API structure + return FetchDexOverviewResponse(**result) + + except Exception as e: + return FetchDexOverviewResponse(error=str(e)) diff --git a/intentkit/skills/defillama/volumes/fetch_dex_summary.py b/intentkit/skills/defillama/volumes/fetch_dex_summary.py new file mode 100644 index 00000000..10892dc1 --- /dev/null +++ b/intentkit/skills/defillama/volumes/fetch_dex_summary.py @@ -0,0 +1,120 @@ +"""Tool for fetching DEX protocol summary data via DeFi Llama API.""" + +from typing import Dict, List, Optional, Type + +from pydantic import BaseModel, Field + +from intentkit.skills.defillama.api import fetch_dex_summary +from intentkit.skills.defillama.base import DefiLlamaBaseTool + +FETCH_DEX_SUMMARY_PROMPT = """ +This tool fetches summary data for a specific DEX protocol from DeFi Llama. +Required: +- Protocol identifier +Returns: +- Protocol details and metadata +- Volume metrics +- Social links and identifiers +- Child protocols and versions +""" + + +class FetchDexSummaryInput(BaseModel): + """Input schema for fetching DEX protocol summary.""" + + protocol: str = Field(..., description="Protocol identifier (e.g. 'uniswap')") + + +class FetchDexSummaryResponse(BaseModel): + """Response schema for DEX protocol summary data.""" + + id: str = Field(..., description="Protocol ID") + name: str = Field(..., description="Protocol name") + url: Optional[str] = Field(None, description="Protocol website URL") + description: Optional[str] = Field(None, description="Protocol description") + logo: Optional[str] = Field(None, description="Logo URL") + gecko_id: Optional[str] = Field(None, description="CoinGecko ID") + cmcId: Optional[str] = Field(None, description="CoinMarketCap ID") + chains: List[str] = Field(default_factory=list, description="Supported chains") + twitter: Optional[str] = Field(None, description="Twitter handle") + treasury: Optional[str] = Field(None, description="Treasury identifier") + governanceID: Optional[List[str]] = Field(None, description="Governance IDs") + github: Optional[List[str]] = Field(None, description="GitHub organizations") + childProtocols: Optional[List[str]] = Field(None, description="Child protocols") + linkedProtocols: Optional[List[str]] = Field(None, description="Linked protocols") + disabled: Optional[bool] = Field(None, description="Whether protocol is disabled") + displayName: str = Field(..., description="Display name") + module: Optional[str] = Field(None, description="Module name") + category: Optional[str] = Field(None, description="Protocol category") + methodologyURL: Optional[str] = Field(None, description="Methodology URL") + methodology: Optional[Dict] = Field(None, description="Methodology details") + forkedFrom: Optional[List[str]] = Field(None, description="Forked from protocols") + audits: Optional[str] = Field(None, description="Audit information") + address: Optional[str] = Field(None, description="Contract address") + audit_links: Optional[List[str]] = Field(None, description="Audit links") + versionKey: Optional[str] = Field(None, description="Version key") + parentProtocol: Optional[str] = Field(None, description="Parent protocol") + previousNames: Optional[List[str]] = Field(None, description="Previous names") + latestFetchIsOk: bool = Field(..., description="Latest fetch status") + slug: str = Field(..., description="Protocol slug") + protocolType: str = Field(..., description="Protocol type") + total24h: Optional[float] = Field(None, description="24h total volume") + total48hto24h: Optional[float] = Field(None, description="48h to 24h total volume") + total7d: Optional[float] = Field(None, description="7d total volume") + totalAllTime: Optional[float] = Field(None, description="All time total volume") + totalDataChart: List = Field(default_factory=list, description="Total data chart") + totalDataChartBreakdown: List = Field( + default_factory=list, description="Chart breakdown" + ) + change_1d: Optional[float] = Field(None, description="1d change percentage") + error: Optional[str] = Field(None, description="Error message if any") + + +class DefiLlamaFetchDexSummary(DefiLlamaBaseTool): + """Tool for fetching DEX protocol summary data from DeFi Llama. + + This tool retrieves detailed information about a specific DEX protocol, + including metadata, metrics, and related protocols. + + Example: + summary_tool = DefiLlamaFetchDexSummary( + skill_store=store, + agent_id="agent_123", + agent_store=agent_store + ) + result = await summary_tool._arun(protocol="uniswap") + """ + + name: str = "defillama_fetch_dex_summary" + description: str = FETCH_DEX_SUMMARY_PROMPT + args_schema: Type[BaseModel] = FetchDexSummaryInput + + async def _arun(self, protocol: str) -> FetchDexSummaryResponse: + """Fetch summary data for the given DEX protocol. + + Args: + config: Runnable configuration + protocol: Protocol identifier + + Returns: + FetchDexSummaryResponse containing protocol data or error + """ + try: + # Check rate limiting + context = self.get_context() + is_rate_limited, error_msg = await self.check_rate_limit(context) + if is_rate_limited: + return FetchDexSummaryResponse(error=error_msg) + + # Fetch protocol data from API + result = await fetch_dex_summary(protocol=protocol) + + # Check for API errors + if isinstance(result, dict) and "error" in result: + return FetchDexSummaryResponse(error=result["error"]) + + # Return the response matching the API structure + return FetchDexSummaryResponse(**result) + + except Exception as e: + return FetchDexSummaryResponse(error=str(e)) diff --git a/intentkit/skills/defillama/volumes/fetch_options_overview.py b/intentkit/skills/defillama/volumes/fetch_options_overview.py new file mode 100644 index 00000000..b927ffe0 --- /dev/null +++ b/intentkit/skills/defillama/volumes/fetch_options_overview.py @@ -0,0 +1,130 @@ +"""Tool for fetching options overview data via DeFi Llama API.""" + +from typing import Dict, List, Optional, Type + +from pydantic import BaseModel, Field + +from intentkit.skills.defillama.api import fetch_options_overview +from intentkit.skills.defillama.base import DefiLlamaBaseTool + +FETCH_OPTIONS_OVERVIEW_PROMPT = """ +This tool fetches comprehensive overview data for all options protocols from DeFi Llama. +Returns detailed metrics including: +- Total volumes across different timeframes +- Change percentages +- Protocol-specific data +- Chain breakdowns +""" + + +class ProtocolMethodology(BaseModel): + """Model representing protocol methodology data.""" + + UserFees: Optional[str] = Field(None, description="User fees description") + Fees: Optional[str] = Field(None, description="Fees description") + Revenue: Optional[str] = Field(None, description="Revenue description") + ProtocolRevenue: Optional[str] = Field( + None, description="Protocol revenue description" + ) + HoldersRevenue: Optional[str] = Field( + None, description="Holders revenue description" + ) + SupplySideRevenue: Optional[str] = Field( + None, description="Supply side revenue description" + ) + + +class Protocol(BaseModel): + """Model representing protocol data.""" + + name: str = Field(..., description="Protocol name") + displayName: str = Field(..., description="Display name of protocol") + defillamaId: str = Field(..., description="DeFi Llama ID") + category: str = Field(..., description="Protocol category") + logo: str = Field(..., description="Logo URL") + chains: List[str] = Field(..., description="Supported chains") + module: str = Field(..., description="Protocol module") + total24h: Optional[float] = Field(None, description="24-hour total") + total7d: Optional[float] = Field(None, description="7-day total") + total30d: Optional[float] = Field(None, description="30-day total") + total1y: Optional[float] = Field(None, description="1-year total") + totalAllTime: Optional[float] = Field(None, description="All-time total") + change_1d: Optional[float] = Field(None, description="24-hour change percentage") + change_7d: Optional[float] = Field(None, description="7-day change percentage") + change_1m: Optional[float] = Field(None, description="30-day change percentage") + methodology: Optional[ProtocolMethodology] = Field( + None, description="Protocol methodology" + ) + breakdown24h: Optional[Dict[str, Dict[str, float]]] = Field( + None, description="24-hour breakdown by chain" + ) + breakdown30d: Optional[Dict[str, Dict[str, float]]] = Field( + None, description="30-day breakdown by chain" + ) + + +class FetchOptionsOverviewResponse(BaseModel): + """Response schema for options overview data.""" + + total24h: float = Field(..., description="Total volume in last 24 hours") + total7d: float = Field(..., description="Total volume in last 7 days") + total30d: float = Field(..., description="Total volume in last 30 days") + total1y: float = Field(..., description="Total volume in last year") + change_1d: float = Field(..., description="24-hour change percentage") + change_7d: float = Field(..., description="7-day change percentage") + change_1m: float = Field(..., description="30-day change percentage") + allChains: List[str] = Field(..., description="List of all chains") + protocols: List[Protocol] = Field(..., description="List of protocols") + error: Optional[str] = Field(None, description="Error message if any") + + +class DefiLlamaFetchOptionsOverview(DefiLlamaBaseTool): + """Tool for fetching options overview data from DeFi Llama. + + This tool retrieves comprehensive data about all options protocols, + including volume metrics, change percentages, and detailed protocol information. + + Example: + overview_tool = DefiLlamaFetchOptionsOverview( + skill_store=store, + agent_id="agent_123", + agent_store=agent_store + ) + result = await overview_tool._arun() + """ + + name: str = "defillama_fetch_options_overview" + description: str = FETCH_OPTIONS_OVERVIEW_PROMPT + + class EmptyArgsSchema(BaseModel): + """Empty schema for no input parameters.""" + + pass + + args_schema: Type[BaseModel] = EmptyArgsSchema + + async def _arun(self, **kwargs) -> FetchOptionsOverviewResponse: + """Fetch overview data for all options protocols. + + Returns: + FetchOptionsOverviewResponse containing comprehensive overview data or error + """ + try: + # Check rate limiting + context = self.get_context() + is_rate_limited, error_msg = await self.check_rate_limit(context) + if is_rate_limited: + return FetchOptionsOverviewResponse(error=error_msg) + + # Fetch overview data from API + result = await fetch_options_overview() + + # Check for API errors + if isinstance(result, dict) and "error" in result: + return FetchOptionsOverviewResponse(error=result["error"]) + + # Return the parsed response + return FetchOptionsOverviewResponse(**result) + + except Exception as e: + return FetchOptionsOverviewResponse(error=str(e)) diff --git a/intentkit/skills/defillama/yields/__init__.py b/intentkit/skills/defillama/yields/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/intentkit/skills/defillama/yields/fetch_pool_chart.py b/intentkit/skills/defillama/yields/fetch_pool_chart.py new file mode 100644 index 00000000..77b0d641 --- /dev/null +++ b/intentkit/skills/defillama/yields/fetch_pool_chart.py @@ -0,0 +1,97 @@ +"""Tool for fetching pool chart data via DeFi Llama API.""" + +from typing import List, Optional, Type + +from pydantic import BaseModel, Field + +from intentkit.skills.defillama.api import fetch_pool_chart +from intentkit.skills.defillama.base import DefiLlamaBaseTool + +FETCH_POOL_CHART_PROMPT = """ +This tool fetches historical chart data from DeFi Llama for a specific pool. +Required: +- Pool ID +Returns historical data including: +- TVL in USD +- APY metrics (base, reward, total) +- Timestamps for each data point +""" + + +class PoolDataPoint(BaseModel): + """Model representing a single historical data point.""" + + timestamp: str = Field(..., description="ISO formatted timestamp of the data point") + tvlUsd: float = Field(..., description="Total Value Locked in USD") + apy: Optional[float] = Field(None, description="Total APY including rewards") + apyBase: Optional[float] = Field(None, description="Base APY without rewards") + apyReward: Optional[float] = Field(None, description="Additional APY from rewards") + il7d: Optional[float] = Field(None, description="7-day impermanent loss") + apyBase7d: Optional[float] = Field(None, description="7-day base APY") + + +class FetchPoolChartInput(BaseModel): + """Input schema for fetching pool chart data.""" + + pool_id: str = Field(..., description="ID of the pool to fetch chart data for") + + +class FetchPoolChartResponse(BaseModel): + """Response schema for pool chart data.""" + + status: str = Field("success", description="Response status") + data: List[PoolDataPoint] = Field( + default_factory=list, description="List of historical data points" + ) + error: Optional[str] = Field(None, description="Error message if any") + + +class DefiLlamaFetchPoolChart(DefiLlamaBaseTool): + """Tool for fetching pool chart data from DeFi Llama. + + This tool retrieves historical data for a specific pool, including + TVL and APY metrics over time. + + Example: + chart_tool = DefiLlamaFetchPoolChart( + skill_store=store, + agent_id="agent_123", + agent_store=agent_store + ) + result = await chart_tool._arun( + pool_id="747c1d2a-c668-4682-b9f9-296708a3dd90" + ) + """ + + name: str = "defillama_fetch_pool_chart" + description: str = FETCH_POOL_CHART_PROMPT + args_schema: Type[BaseModel] = FetchPoolChartInput + + async def _arun(self, pool_id: str) -> FetchPoolChartResponse: + """Fetch historical chart data for the given pool. + + Args: + pool_id: ID of the pool to fetch chart data for + + Returns: + FetchPoolChartResponse containing historical data or error + """ + try: + # Check rate limiting + context = self.get_context() + is_rate_limited, error_msg = await self.check_rate_limit(context) + if is_rate_limited: + return FetchPoolChartResponse(error=error_msg) + + # Fetch chart data from API + result = await fetch_pool_chart(pool_id=pool_id) + + # Check for API errors + if isinstance(result, dict) and "error" in result: + return FetchPoolChartResponse(error=result["error"]) + + # Return the response matching the API structure + return FetchPoolChartResponse(**result) + + except Exception as e: + return FetchPoolChartResponse(error=str(e)) diff --git a/intentkit/skills/defillama/yields/fetch_pools.py b/intentkit/skills/defillama/yields/fetch_pools.py new file mode 100644 index 00000000..a5c76c98 --- /dev/null +++ b/intentkit/skills/defillama/yields/fetch_pools.py @@ -0,0 +1,125 @@ +"""Tool for fetching pool data via DeFi Llama API.""" + +from typing import Optional + +from pydantic import BaseModel, Field + +from intentkit.skills.defillama.api import fetch_pools +from intentkit.skills.defillama.base import DefiLlamaBaseTool + +FETCH_POOLS_PROMPT = """ +This tool fetches comprehensive data about yield-generating pools from DeFi Llama. +Returns data including: +- Pool details (chain, project, symbol) +- TVL and APY information +- Statistical metrics (mean, standard deviation) +- Risk assessments and predictions +- Historical performance data +""" + + +class PredictionData(BaseModel): + """Model representing prediction data for a pool.""" + + predictedClass: Optional[str] = Field( + None, description="Predicted direction of APY movement" + ) + predictedProbability: Optional[float] = Field( + None, description="Probability of the prediction" + ) + binnedConfidence: Optional[int] = Field(None, description="Confidence level bucket") + + +class PoolData(BaseModel): + """Model representing a single pool's data.""" + + chain: str = Field(..., description="Blockchain network") + project: str = Field(..., description="Protocol or project name") + symbol: str = Field(..., description="Token or pool symbol") + tvlUsd: float = Field(..., description="Total Value Locked in USD") + apyBase: Optional[float] = Field(None, description="Base APY without rewards") + apyReward: Optional[float] = Field(None, description="Additional APY from rewards") + apy: Optional[float] = Field(None, description="Total APY including rewards") + rewardTokens: Optional[list[str]] = Field( + None, description="List of reward token addresses" + ) + pool: Optional[str] = Field(None, description="Pool identifier") + apyPct1D: Optional[float] = Field(None, description="1-day APY percentage change") + apyPct7D: Optional[float] = Field(None, description="7-day APY percentage change") + apyPct30D: Optional[float] = Field(None, description="30-day APY percentage change") + stablecoin: bool = Field(False, description="Whether pool involves stablecoins") + ilRisk: str = Field("no", description="Impermanent loss risk assessment") + exposure: str = Field("single", description="Asset exposure type") + predictions: Optional[PredictionData] = Field( + None, description="APY movement predictions" + ) + poolMeta: Optional[str] = Field(None, description="Additional pool metadata") + mu: Optional[float] = Field(None, description="Mean APY value") + sigma: Optional[float] = Field(None, description="APY standard deviation") + count: Optional[int] = Field(None, description="Number of data points") + outlier: bool = Field(False, description="Whether pool is an outlier") + underlyingTokens: Optional[list[str]] = Field( + None, description="List of underlying token addresses" + ) + il7d: Optional[float] = Field(None, description="7-day impermanent loss") + apyBase7d: Optional[float] = Field(None, description="7-day base APY") + apyMean30d: Optional[float] = Field(None, description="30-day mean APY") + volumeUsd1d: Optional[float] = Field(None, description="24h volume in USD") + volumeUsd7d: Optional[float] = Field(None, description="7-day volume in USD") + apyBaseInception: Optional[float] = Field( + None, description="Base APY since inception" + ) + + +class FetchPoolsResponse(BaseModel): + """Response schema for pool data.""" + + status: str = Field("success", description="Response status") + data: list[PoolData] = Field(default_factory=list, description="List of pool data") + error: Optional[str] = Field(None, description="Error message if any") + + +class DefiLlamaFetchPools(DefiLlamaBaseTool): + """Tool for fetching pool data from DeFi Llama. + + This tool retrieves comprehensive data about yield-generating pools, + including TVL, APYs, risk metrics, and predictions. + + Example: + pools_tool = DefiLlamaFetchPools( + skill_store=store, + agent_id="agent_123", + agent_store=agent_store + ) + result = await pools_tool._arun() + """ + + name: str = "defillama_fetch_pools" + description: str = FETCH_POOLS_PROMPT + args_schema: None = None # No input parameters needed + + async def _arun(self, **kwargs) -> FetchPoolsResponse: + """Fetch pool data. + + Returns: + FetchPoolsResponse containing pool data or error + """ + try: + # Check rate limiting + context = self.get_context() + is_rate_limited, error_msg = await self.check_rate_limit(context) + if is_rate_limited: + return FetchPoolsResponse(error=error_msg) + + # Fetch pool data from API + result = await fetch_pools() + + # Check for API errors + if isinstance(result, dict) and "error" in result: + return FetchPoolsResponse(error=result["error"]) + + # Return the response matching the API structure + return FetchPoolsResponse(**result) + + except Exception as e: + return FetchPoolsResponse(error=str(e)) diff --git a/intentkit/skills/dexscreener/README.md b/intentkit/skills/dexscreener/README.md new file mode 100644 index 00000000..e7d9c782 --- /dev/null +++ b/intentkit/skills/dexscreener/README.md @@ -0,0 +1,154 @@ +# DexScreener Skill + +The DexScreener skill provides integration with the DexScreener API to search for cryptocurrency token pairs and retrieve market data including prices, volume, liquidity, and other trading metrics. + +## Overview + +DexScreener is a popular platform for tracking decentralized exchange (DEX) trading pairs across multiple blockchain networks. This skill enables your agent to search for token information and provide users with real-time market data. + +## Skills Available + +### `search_token` + +Searches DexScreener for token pairs matching a query string. Supports searching by: + +- Token symbol (e.g., "WIF", "DOGE") +- Token name (e.g., "Dogwifhat") +- Token address (e.g., "0x...") +- Exact ticker matching with "$" prefix (e.g., "$WIF") +- Pair address + +**Parameters:** + +- `query` (required): The search query string +- `sort_by` (optional): Sort results by "liquidity" (default) or "volume" +- `volume_timeframe` (optional): When sorting by volume, use "24_hour" (default), "6_hour", "1_hour", or "5_minutes" + +### `get_pair_info` + +Retrieves detailed information about a specific trading pair using chain ID and pair address. + +**Parameters:** + +- `chain_id` (required): The blockchain chain ID (e.g., "ethereum", "solana", "bsc", "polygon", "arbitrum", "base", "avalanche") +- `pair_address` (required): The trading pair contract address + +### `get_token_pairs` + +Finds all trading pairs for a specific token using chain ID and token address. + +**Parameters:** + +- `chain_id` (required): The blockchain chain ID +- `token_address` (required): The token contract address + +### `get_tokens_info` + +Retrieves detailed trading pair information for multiple tokens at once (up to 30 tokens). + +**Parameters:** + +- `chain_id` (required): The blockchain chain ID +- `token_addresses` (required): List of token contract addresses (maximum 30) + +## Configuration + +Add to your agent configuration: + +```yaml +skills: + dexscreener: + enabled: true + states: + search_token: public # or "private" or "disabled" + get_pair_info: public # or "private" or "disabled" + get_token_pairs: public # or "private" or "disabled" + get_tokens_info: public # or "private" or "disabled" +``` + +## Example Prompts + +Here are some example prompts that will trigger the DexScreener skill: + +### Basic Token Search + +- "What's the current price of WIF?" +- "Show me information about Dogwifhat token" +- "Find trading pairs for PEPE" +- "Search for Solana tokens with high volume" + +### Address-Based Search + +- "Get token info for address 0x1234567890abcdef1234567890abcdef12345678" +- "Look up this token contract: 0xabc123..." +- "Find pairs for token address 0x..." + +### Exact Ticker Matching + +- "Show me all $WIF pairs" (matches only tokens with symbol "WIF") +- "Find $DOGE trading data" +- "$SOL price and volume information" + +### Sorting and Filtering + +- "Find highest volume tokens in the last hour" +- "Show me tokens sorted by liquidity" +- "Get 6-hour volume data for trending tokens" + +### Market Analysis + +- "What are the most liquid trading pairs right now?" +- "Find new token launches with high volume" +- "Show me tokens with significant price changes in the last 24 hours" +- "Compare liquidity across different DEXes for a token" + +### Specific Pair Analysis + +- "Get detailed info for pair address 0x1234... on Ethereum" +- "Show me the liquidity and volume for this Uniswap pair" +- "Analyze this specific trading pair on Solana" +- "What's the current price and 24h change for pair 0xabc..." + +### Token Pair Discovery + +- "Find all trading pairs for token 0x1234... on Ethereum" +- "Where can I trade this token? Show me all available pairs" +- "List all DEXes that have liquidity for this token address" +- "Find the best liquidity pools for token 0xabc..." + +### Multi-Token Analysis + +- "Compare these 5 token addresses: [list of addresses]" +- "Get trading data for my entire portfolio: [token addresses]" +- "Analyze liquidity across these tokens on Ethereum" +- "Show me pair information for these 10 tokens at once" + +### Portfolio Research + +- "Research this token before I invest: [token name/symbol]" +- "Is this token legitimate? Check its social links" +- "What DEXes is this token trading on?" +- "Show me the trading activity for this pair" + +## Response Format + +The skill returns structured JSON data containing: + +- Token pair information (base/quote tokens) +- Current prices in USD and native currency +- Volume data across different timeframes +- Liquidity information +- Price change percentages +- Market cap and fully diluted valuation +- Social links and website information +- Trading transaction counts + +## Rate Limits + +- 300 requests per minute across all users +- Built-in rate limiting prevents exceeding API limits +- Requests are queued and processed efficiently + +## Data Sources + +All data is sourced from the official DexScreener API, which aggregates information from major decentralized exchanges across multiple blockchain networks including Ethereum, Solana, Binance Smart Chain, and others. diff --git a/intentkit/skills/dexscreener/__init__.py b/intentkit/skills/dexscreener/__init__.py new file mode 100644 index 00000000..89588ea4 --- /dev/null +++ b/intentkit/skills/dexscreener/__init__.py @@ -0,0 +1,102 @@ +import logging +from typing import Optional, TypedDict + +from intentkit.abstracts.skill import SkillStoreABC +from intentkit.skills.base import SkillConfig, SkillState +from intentkit.skills.dexscreener.base import DexScreenerBaseTool +from intentkit.skills.dexscreener.get_pair_info import GetPairInfo +from intentkit.skills.dexscreener.get_token_pairs import GetTokenPairs +from intentkit.skills.dexscreener.get_tokens_info import GetTokensInfo +from intentkit.skills.dexscreener.search_token import SearchToken + +# Cache skills at the system level, because they are stateless +_cache: dict[str, DexScreenerBaseTool] = {} + +logger = logging.getLogger(__name__) + + +class SkillStates(TypedDict): + search_token: SkillState + get_pair_info: SkillState + get_token_pairs: SkillState + get_tokens_info: SkillState + + +_SKILL_NAME_TO_CLASS_MAP: dict[str, type[DexScreenerBaseTool]] = { + "search_token": SearchToken, + "get_pair_info": GetPairInfo, + "get_token_pairs": GetTokenPairs, + "get_tokens_info": GetTokensInfo, +} + + +class Config(SkillConfig): + """Configuration for DexScreener skills.""" + + enabled: bool + states: SkillStates + + +async def get_skills( + config: "Config", + is_private: bool, + store: SkillStoreABC, + **_, +) -> list[DexScreenerBaseTool]: + """Get all DexScreener skills. + + Args: + config: The configuration for DexScreener skills. + is_private: Whether to include private skills. + store: The skill store for persisting data. + + Returns: + A list of DexScreener skills. + """ + + available_skills = [] + + # Include skills based on their state + for skill_name, state in config["states"].items(): + if state == "disabled": + continue + elif state == "public" or (state == "private" and is_private): + available_skills.append(skill_name) + + logger.debug(f"Available Skills {available_skills}") + logger.debug(f"Hardcoded Skills {_SKILL_NAME_TO_CLASS_MAP}") + + # Get each skill using the cached getter + result = [] + for name in available_skills: + skill = get_dexscreener_skills(name, store) + if skill: + result.append(skill) + return result + + +def get_dexscreener_skills( + name: str, + store: SkillStoreABC, +) -> Optional[DexScreenerBaseTool]: + """Get a DexScreener skill by name. + + Args: + name: The name of the skill to get + store: The skill store for persisting data + + Returns: + The requested DexScreener skill + """ + + # Return from cache immediately if already exists + if name in _cache: + return _cache[name] + + skill_class = _SKILL_NAME_TO_CLASS_MAP.get(name) + if not skill_class: + logger.warning(f"Unknown Dexscreener skill: {name}") + return None + + _cache[name] = skill_class(skill_store=store) + return _cache[name] diff --git a/intentkit/skills/dexscreener/base.py b/intentkit/skills/dexscreener/base.py new file mode 100644 index 00000000..5127717c --- /dev/null +++ b/intentkit/skills/dexscreener/base.py @@ -0,0 +1,130 @@ +import json +import logging +from typing import Any, Dict, Optional, Tuple + +import httpx +from pydantic import Field + +from intentkit.abstracts.skill import SkillStoreABC +from intentkit.skills.base import IntentKitSkill +from intentkit.skills.dexscreener.utils import DEXSCREENER_BASE_URL + +logger = logging.getLogger(__name__) + +# ApiResult still represents (success_data, error_data) +ApiResult = Tuple[Optional[Dict[str, Any]], Optional[Dict[str, Any]]] + + +class DexScreenerBaseTool(IntentKitSkill): + """ + Generic base class for tools interacting with the Dex Screener API. + Handles shared logic like API calls and error reporting via return values. + """ + + skill_store: SkillStoreABC = Field( + description="The skill store for persisting data and configs." + ) + base_url: str = DEXSCREENER_BASE_URL + + @property + def category(self) -> str: + return "dexscreener" + + async def _get( + self, + path: str, + params: Optional[Dict[str, Any]] = None, + ) -> ApiResult: + """ + Makes an asynchronous GET request to the DexScreener API. + + Args: + path: The API endpoint path (e.g., "/dex/search"). + params: Optional dictionary of query parameters. + + Returns: + A tuple (data, error_details): + - (dict, None): On HTTP 2xx success with valid JSON response. + - (None, dict): On any error (API error, connection error, + JSON parsing error, unexpected error). The dict + contains details including an 'error_type'. + """ + if not path.startswith("/"): + path = "/" + path + + url = f"{self.base_url}{path}" + headers = {"Accept": "application/json"} + method = "GET" + + logger.debug(f"Calling DexScreener API: {method} {url} with params: {params}") + response = None # Define response outside try block for access in except + + try: + async with httpx.AsyncClient() as client: + response = await client.request( + method, url, params=params, headers=headers + ) + + # Attempt to parse JSON response text + try: + response_data = response.json() + except json.JSONDecodeError as json_err: + logger.error( + f"Failed to parse JSON response from {url}. Status: {response.status_code}. Response text: {response.text}", + exc_info=True, + ) + error_details = { + "error": "Failed to parse DexScreener API response", + "error_type": "parsing_error", + "status_code": response.status_code, + "details": response.text, # Raw text causing the error + "original_exception": str(json_err), + "url": url, + } + return None, error_details # Return parsing error + + # Check HTTP status *after* attempting JSON parse + if response.is_success: # 2xx + logger.debug( + f"DexScreener API success response status: {response.status_code}" + ) + return response_data, None # Success + else: # 4xx/5xx + logger.warning( + f"DexScreener API returned error status: {response.status_code} - {response.text}" + ) + error_details = { + "error": "DexScreener API request failed", + "error_type": "api_error", + "status_code": response.status_code, + "response_body": response_data, # Parsed error body if available + "url": url, + } + return None, error_details # Return API error + + except httpx.RequestError as req_err: + logger.error( + f"Request error connecting to DexScreener API: {req_err}", exc_info=True + ) + error_details = { + "error": "Failed to connect to DexScreener API", + "error_type": "connection_error", + "details": str(req_err), + "url": url, + } + return None, error_details # Return connection error + + except Exception as e: + # Catch any other unexpected errors during the process + logger.exception( + f"An unexpected error occurred during DexScreener API GET call: {e}" + ) + status_code = response.status_code if response else None + error_details = { + "error": "An unexpected error occurred during API call", + "error_type": "unexpected_error", + "status_code": status_code, # Include if available + "details": str(e), + "url": url, + } + return None, error_details # Return unexpected error diff --git a/intentkit/skills/dexscreener/dexscreener.png b/intentkit/skills/dexscreener/dexscreener.png new file mode 100644 index 00000000..f352cfa9 Binary files /dev/null and b/intentkit/skills/dexscreener/dexscreener.png differ diff --git a/intentkit/skills/dexscreener/get_pair_info.py b/intentkit/skills/dexscreener/get_pair_info.py new file mode 100644 index 00000000..cc794fe2 --- /dev/null +++ b/intentkit/skills/dexscreener/get_pair_info.py @@ -0,0 +1,159 @@ +import logging +from typing import Any, Type + +from pydantic import BaseModel, Field + +from intentkit.skills.dexscreener.base import DexScreenerBaseTool +from intentkit.skills.dexscreener.model.search_token_response import PairModel +from intentkit.skills.dexscreener.utils import ( + API_ENDPOINTS, + RATE_LIMITS, + create_error_response, + format_success_response, + truncate_large_fields, +) + +logger = logging.getLogger(__name__) + + +class GetPairInfoInput(BaseModel): + """Input schema for the DexScreener get_pair_info tool.""" + + chain_id: str = Field( + description="The blockchain chain ID (e.g., 'ethereum', 'solana', 'bsc', 'polygon', 'arbitrum', 'base', 'avalanche')" + ) + pair_address: str = Field( + description="The trading pair contract address (e.g., '0x1234...abcd' for Ethereum-based chains)" + ) + + +class GetPairInfo(DexScreenerBaseTool): + """ + Tool to get detailed information about a specific trading pair on DexScreener. + """ + + name: str = "dexscreener_get_pair_info" + description: str = ( + "Retrieves detailed information about a specific trading pair using chain ID and pair address. " + "Returns comprehensive data including current price, volume, liquidity, price changes, " + "market cap, FDV, transaction counts, and social links. " + "Use this tool when you have a specific pair address and need detailed trading metrics." + ) + args_schema: Type[BaseModel] = GetPairInfoInput + + async def _arun( + self, + chain_id: str, + pair_address: str, + **kwargs: Any, + ) -> str: + """Implementation to get specific pair information.""" + + # Apply rate limiting + await self.user_rate_limit_by_category( + user_id=f"{self.category}{self.name}", + limit=RATE_LIMITS["pairs"], + minutes=1, + ) + + logger.info( + f"Executing DexScreener get_pair_info tool with chain_id: '{chain_id}', " + f"pair_address: '{pair_address}'" + ) + + try: + # Construct API path + api_path = f"{API_ENDPOINTS['pairs']}/{chain_id}/{pair_address}" + + data, error_details = await self._get(path=api_path) + + if error_details: + return await self._handle_error_response(error_details) + + if not data: + logger.error(f"No data returned for pair {pair_address} on {chain_id}") + return create_error_response( + error_type="empty_success", + message="API call returned empty success response.", + additional_data={ + "chain_id": chain_id, + "pair_address": pair_address, + }, + ) + + # The API returns a single pair object, not wrapped in a pairs array + if not isinstance(data, dict): + return create_error_response( + error_type="format_error", + message="Unexpected response format - expected object", + additional_data={ + "chain_id": chain_id, + "pair_address": pair_address, + }, + ) + + try: + # Validate the response using our existing PairModel + pair_data = PairModel.model_validate(data) + logger.info( + f"Successfully retrieved pair info for {pair_address} on {chain_id}" + ) + + return format_success_response( + { + "pair": pair_data.model_dump(), + "chain_id": chain_id, + "pair_address": pair_address, + } + ) + + except Exception as validation_error: + logger.error( + f"Failed to validate pair response for {pair_address} on {chain_id}: {validation_error}", + exc_info=True, + ) + # Return raw data if validation fails + return format_success_response( + { + "pair": data, + "chain_id": chain_id, + "pair_address": pair_address, + "validation_warning": "Response structure may have changed", + } + ) + + except Exception as e: + return await self._handle_unexpected_runtime_error( + e, f"{chain_id}/{pair_address}" + ) + + async def _handle_error_response(self, error_details: dict) -> str: + """Formats error details (from _get) into a JSON string.""" + if error_details.get("error_type") in [ + "connection_error", + "parsing_error", + "unexpected_error", + ]: + logger.error( + f"DexScreener get_pair_info tool encountered an error: {error_details}" + ) + else: # api_error + logger.warning(f"DexScreener API returned an error: {error_details}") + + # Truncate potentially large fields before returning to user/LLM + truncated_details = truncate_large_fields(error_details) + return format_success_response(truncated_details) + + async def _handle_unexpected_runtime_error( + self, e: Exception, query_info: str + ) -> str: + """Formats unexpected runtime exception details into a JSON string.""" + logger.exception( + f"An unexpected runtime error occurred in get_pair_info tool _arun method for {query_info}: {e}" + ) + return create_error_response( + error_type="runtime_error", + message="An unexpected internal error occurred processing the pair info request", + details=str(e), + additional_data={"query_info": query_info}, + ) diff --git a/intentkit/skills/dexscreener/get_token_pairs.py b/intentkit/skills/dexscreener/get_token_pairs.py new file mode 100644 index 00000000..56a89103 --- /dev/null +++ b/intentkit/skills/dexscreener/get_token_pairs.py @@ -0,0 +1,166 @@ +import logging +from typing import Any, Type + +from pydantic import BaseModel, Field, ValidationError + +from intentkit.skills.dexscreener.base import DexScreenerBaseTool +from intentkit.skills.dexscreener.model.search_token_response import ( + SearchTokenResponseModel, +) +from intentkit.skills.dexscreener.utils import ( + API_ENDPOINTS, + RATE_LIMITS, + create_error_response, + create_no_results_response, + format_success_response, + get_liquidity_value, + handle_validation_error, + truncate_large_fields, +) + +logger = logging.getLogger(__name__) + + +class GetTokenPairsInput(BaseModel): + """Input schema for the DexScreener get_token_pairs tool.""" + + chain_id: str = Field( + description="The blockchain chain ID (e.g., 'ethereum', 'solana', 'bsc', 'polygon', 'arbitrum', 'base', 'avalanche')" + ) + token_address: str = Field( + description="The token contract address (e.g., '0x1234...abcd' for Ethereum-based chains)" + ) + + +class GetTokenPairs(DexScreenerBaseTool): + """ + Tool to get all trading pairs for a specific token on DexScreener. + """ + + name: str = "dexscreener_get_token_pairs" + description: str = ( + "Finds all trading pairs for a specific token using chain ID and token address. " + "Returns a list of all pools/pairs where this token is traded, including pair addresses, " + "DEX information, liquidity, volume, and pricing data for each pair. " + "Use this tool to analyze all available trading venues and liquidity sources for a specific token." + ) + args_schema: Type[BaseModel] = GetTokenPairsInput + + async def _arun( + self, + chain_id: str, + token_address: str, + **kwargs: Any, + ) -> str: + """Implementation to get all pairs for a specific token.""" + + # Apply rate limiting + await self.user_rate_limit_by_category( + user_id=f"{self.category}{self.name}", + limit=RATE_LIMITS["token_pairs"], + minutes=1, + ) + + logger.info( + f"Executing DexScreener get_token_pairs tool with chain_id: '{chain_id}', " + f"token_address: '{token_address}'" + ) + + try: + # Construct API path + api_path = f"{API_ENDPOINTS['token_pairs']}/{chain_id}/{token_address}" + + data, error_details = await self._get(path=api_path) + + if error_details: + return await self._handle_error_response(error_details) + + if not data: + logger.error( + f"No data returned for token {token_address} on {chain_id}" + ) + return create_error_response( + error_type="empty_success", + message="API call returned empty success response.", + additional_data={ + "chain_id": chain_id, + "token_address": token_address, + }, + ) + + try: + # Validate response using SearchTokenResponseModel since API returns similar structure + result = SearchTokenResponseModel.model_validate(data) + except ValidationError as e: + return handle_validation_error( + e, f"{chain_id}/{token_address}", len(str(data)) + ) + + if not result.pairs: + return create_no_results_response( + f"{chain_id}/{token_address}", + reason="no trading pairs found for this token", + ) + + pairs_list = [p for p in result.pairs if p is not None] + + if not pairs_list: + return create_no_results_response( + f"{chain_id}/{token_address}", + reason="all pairs were null or invalid", + ) + + # Sort pairs by liquidity (highest first) for better UX + try: + pairs_list.sort(key=get_liquidity_value, reverse=True) + except Exception as sort_err: + logger.warning(f"Failed to sort pairs by liquidity: {sort_err}") + + logger.info( + f"Found {len(pairs_list)} pairs for token {token_address} on {chain_id}" + ) + + return format_success_response( + { + "pairs": [p.model_dump() for p in pairs_list], + "chain_id": chain_id, + "token_address": token_address, + "total_pairs": len(pairs_list), + } + ) + + except Exception as e: + return await self._handle_unexpected_runtime_error( + e, f"{chain_id}/{token_address}" + ) + + async def _handle_error_response(self, error_details: dict) -> str: + """Formats error details (from _get) into a JSON string.""" + if error_details.get("error_type") in [ + "connection_error", + "parsing_error", + "unexpected_error", + ]: + logger.error( + f"DexScreener get_token_pairs tool encountered an error: {error_details}" + ) + else: # api_error + logger.warning(f"DexScreener API returned an error: {error_details}") + + # Truncate potentially large fields before returning to user/LLM + truncated_details = truncate_large_fields(error_details) + return format_success_response(truncated_details) + + async def _handle_unexpected_runtime_error( + self, e: Exception, query_info: str + ) -> str: + """Formats unexpected runtime exception details into a JSON string.""" + logger.exception( + f"An unexpected runtime error occurred in get_token_pairs tool _arun method for {query_info}: {e}" + ) + return create_error_response( + error_type="runtime_error", + message="An unexpected internal error occurred processing the token pairs request", + details=str(e), + additional_data={"query_info": query_info}, + ) diff --git a/intentkit/skills/dexscreener/get_tokens_info.py b/intentkit/skills/dexscreener/get_tokens_info.py new file mode 100644 index 00000000..eb75f59a --- /dev/null +++ b/intentkit/skills/dexscreener/get_tokens_info.py @@ -0,0 +1,213 @@ +import logging +from typing import Any, List, Type + +from pydantic import BaseModel, Field, ValidationError, field_validator + +from intentkit.skills.dexscreener.base import DexScreenerBaseTool +from intentkit.skills.dexscreener.model.search_token_response import ( + SearchTokenResponseModel, +) +from intentkit.skills.dexscreener.utils import ( + API_ENDPOINTS, + MAX_TOKENS_BATCH, + RATE_LIMITS, + create_error_response, + create_no_results_response, + format_success_response, + get_liquidity_value, + group_pairs_by_token, + handle_validation_error, + truncate_large_fields, +) + +logger = logging.getLogger(__name__) + + +class GetTokensInfoInput(BaseModel): + """Input schema for the DexScreener get_tokens_info tool.""" + + chain_id: str = Field( + description="The blockchain chain ID (e.g., 'ethereum', 'solana', 'bsc', 'polygon', 'arbitrum', 'base', 'avalanche')" + ) + token_addresses: List[str] = Field( + description=f"List of token contract addresses to retrieve info for (maximum {MAX_TOKENS_BATCH} addresses). " + "Each address should be in the format '0x1234...abcd' for Ethereum-based chains." + ) + + @field_validator("token_addresses") + @classmethod + def validate_token_addresses(cls, v: List[str]) -> List[str]: + if not v: + raise ValueError("At least one token address is required") + if len(v) > MAX_TOKENS_BATCH: + raise ValueError(f"Maximum {MAX_TOKENS_BATCH} token addresses allowed") + # Remove duplicates while preserving order + seen = set() + unique_addresses = [] + for addr in v: + if addr not in seen: + seen.add(addr) + unique_addresses.append(addr) + return unique_addresses + + +class GetTokensInfo(DexScreenerBaseTool): + """ + Tool to get detailed information for multiple tokens at once on DexScreener. + """ + + name: str = "dexscreener_get_tokens_info" + description: str = ( + f"Retrieves detailed trading pair information for multiple tokens (up to {MAX_TOKENS_BATCH}) " + "using chain ID and a list of token addresses. For each token, returns all available " + "trading pairs with price, volume, liquidity, market data, and DEX information. " + "This is more efficient than making individual calls when you need info for multiple tokens. " + "Use this tool for portfolio analysis or comparing multiple tokens at once." + ) + args_schema: Type[BaseModel] = GetTokensInfoInput + + async def _arun( + self, + chain_id: str, + token_addresses: List[str], + **kwargs: Any, + ) -> str: + """Implementation to get information for multiple tokens.""" + + # Apply rate limiting + await self.user_rate_limit_by_category( + user_id=f"{self.category}{self.name}", + limit=RATE_LIMITS["tokens"], + minutes=1, + ) + + logger.info( + f"Executing DexScreener get_tokens_info tool with chain_id: '{chain_id}', " + f"token_addresses: {len(token_addresses)} tokens" + ) + + try: + # Construct API path - addresses are comma-separated + addresses_param = ",".join(token_addresses) + api_path = f"{API_ENDPOINTS['tokens']}/{chain_id}/{addresses_param}" + + data, error_details = await self._get(path=api_path) + + if error_details: + return await self._handle_error_response(error_details) + + if not data: + logger.error(f"No data returned for tokens on {chain_id}") + return create_error_response( + error_type="empty_success", + message="API call returned empty success response.", + additional_data={ + "chain_id": chain_id, + "token_addresses": token_addresses, + }, + ) + + try: + # Validate response using SearchTokenResponseModel since API returns similar structure + result = SearchTokenResponseModel.model_validate(data) + except ValidationError as e: + return handle_validation_error( + e, f"{chain_id}/{len(token_addresses)} tokens", len(str(data)) + ) + + if not result.pairs: + return create_no_results_response( + f"{chain_id} - {len(token_addresses)} tokens", + reason="no trading pairs found for any of the specified tokens", + additional_data={ + "chain_id": chain_id, + "requested_addresses": token_addresses, + "tokens_data": {}, + "all_pairs": [], + "found_tokens": 0, + "total_pairs": 0, + }, + ) + + pairs_list = [p for p in result.pairs if p is not None] + + if not pairs_list: + return create_no_results_response( + f"{chain_id} - {len(token_addresses)} tokens", + reason="all pairs were null or invalid", + additional_data={ + "chain_id": chain_id, + "requested_addresses": token_addresses, + "tokens_data": {}, + "all_pairs": [], + "found_tokens": 0, + "total_pairs": 0, + }, + ) + + # Group pairs by token address for better organization + tokens_data = group_pairs_by_token(pairs_list) + + # Sort pairs within each token by liquidity (highest first) + for token_addr, pairs in tokens_data.items(): + try: + pairs.sort(key=get_liquidity_value, reverse=True) + except Exception as sort_err: + logger.warning( + f"Failed to sort pairs for token {token_addr}: {sort_err}" + ) + + logger.info( + f"Found {len(pairs_list)} total pairs across {len(tokens_data)} tokens " + f"for {len(token_addresses)} requested addresses on {chain_id}" + ) + + return format_success_response( + { + "tokens_data": { + addr: [p.model_dump() for p in pairs] + for addr, pairs in tokens_data.items() + }, + "all_pairs": [p.model_dump() for p in pairs_list], + "chain_id": chain_id, + "requested_addresses": token_addresses, + "found_tokens": len(tokens_data), + "total_pairs": len(pairs_list), + } + ) + + except Exception as e: + return await self._handle_unexpected_runtime_error( + e, f"{chain_id}/{len(token_addresses)} tokens" + ) + + async def _handle_error_response(self, error_details: dict) -> str: + """Formats error details (from _get) into a JSON string.""" + if error_details.get("error_type") in [ + "connection_error", + "parsing_error", + "unexpected_error", + ]: + logger.error( + f"DexScreener get_tokens_info tool encountered an error: {error_details}" + ) + else: # api_error + logger.warning(f"DexScreener API returned an error: {error_details}") + + # Truncate potentially large fields before returning to user/LLM + truncated_details = truncate_large_fields(error_details) + return format_success_response(truncated_details) + + async def _handle_unexpected_runtime_error( + self, e: Exception, query_info: str + ) -> str: + """Formats unexpected runtime exception details into a JSON string.""" + logger.exception( + f"An unexpected runtime error occurred in get_tokens_info tool _arun method for {query_info}: {e}" + ) + return create_error_response( + error_type="runtime_error", + message="An unexpected internal error occurred processing the tokens info request", + details=str(e), + additional_data={"query_info": query_info}, + ) diff --git a/intentkit/skills/dexscreener/model/__init__.py b/intentkit/skills/dexscreener/model/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/intentkit/skills/dexscreener/model/search_token_response.py b/intentkit/skills/dexscreener/model/search_token_response.py new file mode 100644 index 00000000..cd179bee --- /dev/null +++ b/intentkit/skills/dexscreener/model/search_token_response.py @@ -0,0 +1,82 @@ +from typing import List, Optional + +from pydantic import BaseModel + + +class TokenModel(BaseModel): + address: Optional[str] = None + name: Optional[str] = None + symbol: Optional[str] = None + + +class TxnsDetailsModel(BaseModel): + buys: Optional[int] = None + sells: Optional[int] = None + + +class TxnsModel(BaseModel): + m5: Optional[TxnsDetailsModel] = None + h1: Optional[TxnsDetailsModel] = None + h6: Optional[TxnsDetailsModel] = None + h24: Optional[TxnsDetailsModel] = None + + +class VolumeModel(BaseModel): + h24: Optional[float] = None + h6: Optional[float] = None + h1: Optional[float] = None + m5: Optional[float] = None + + +class PriceChangeModel(BaseModel): + m5: Optional[float] = None + h1: Optional[float] = None + h6: Optional[float] = None + h24: Optional[float] = None + + +class LiquidityModel(BaseModel): + usd: Optional[float] = None + base: Optional[float] = None + quote: Optional[float] = None + + +class WebsiteModel(BaseModel): + label: Optional[str] = None + url: Optional[str] = None + + +class SocialModel(BaseModel): + type: Optional[str] = None + url: Optional[str] = None + + +class InfoModel(BaseModel): + imageUrl: Optional[str] = None + websites: Optional[List[Optional[WebsiteModel]]] = None + socials: Optional[List[Optional[SocialModel]]] = None + + +class PairModel(BaseModel): + chainId: Optional[str] = None + dexId: Optional[str] = None + url: Optional[str] = None + pairAddress: Optional[str] = None + labels: Optional[List[Optional[str]]] = None + baseToken: Optional[TokenModel] = None + quoteToken: Optional[TokenModel] = None + priceNative: Optional[str] = None + priceUsd: Optional[str] = None + txns: Optional[TxnsModel] = None + volume: Optional[VolumeModel] = None + priceChange: Optional[PriceChangeModel] = None + liquidity: Optional[LiquidityModel] = None + fdv: Optional[float] = None + marketCap: Optional[float] = None + pairCreatedAt: Optional[int] = None + info: Optional[InfoModel] = None + + +class SearchTokenResponseModel(BaseModel): + schemaVersion: Optional[str] = None + pairs: Optional[List[Optional[PairModel]]] = None diff --git a/intentkit/skills/dexscreener/schema.json b/intentkit/skills/dexscreener/schema.json new file mode 100644 index 00000000..6fb34a20 --- /dev/null +++ b/intentkit/skills/dexscreener/schema.json @@ -0,0 +1,93 @@ +{ + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "Dexscreener", + "description": "Integration with DexScreener API, enabling crypto token pair information", + "type": "object", + "x-icon": "https://ai.service.crestal.dev/skills/dexscreener/dexscreener.png", + "x-tags": [ + "Crypto", + "Market Data", + "Finance", + "Blockchain" + ], + "properties": { + "enabled": { + "type": "boolean", + "title": "Enabled", + "description": "Enable or disable the Dexscreener skill.", + "default": false + }, + "states": { + "type": "object", + "title": "Skill States", + "description": "Enable/disable specific tools. Only enable one if you want a consistent characteristic for your agent", + "properties": { + "search_token": { + "type": "string", + "enum": [ + "disabled", + "public", + "private" + ], + "x-enum-title": [ + "Disabled", + "Agent Owner + All Users", + "Agent Owner Only" + ], + "description": "Searches on DexScreener for token pairs matching a query (symbol, name, address). Returns up to 25 pairs sorted by 'liquidity' or 'volume' with timeframe options, including price, volume, etc. Use this tool to find token information based on user queries.", + "default": "disabled" + }, + "get_pair_info": { + "type": "string", + "enum": [ + "disabled", + "public", + "private" + ], + "x-enum-title": [ + "Disabled", + "Agent Owner + All Users", + "Agent Owner Only" + ], + "description": "Retrieves detailed information about a specific trading pair using chain ID and pair address. Returns comprehensive data including current price, volume, liquidity, price changes, market cap, FDV, transaction counts, and social links.", + "default": "disabled" + }, + "get_token_pairs": { + "type": "string", + "enum": [ + "disabled", + "public", + "private" + ], + "x-enum-title": [ + "Disabled", + "Agent Owner + All Users", + "Agent Owner Only" + ], + "description": "Finds all trading pairs for a specific token using chain ID and token address. Returns a list of all pools/pairs where this token is traded, including pair addresses, DEX information, liquidity, volume, and pricing data for each pair.", + "default": "disabled" + }, + "get_tokens_info": { + "type": "string", + "enum": [ + "disabled", + "public", + "private" + ], + "x-enum-title": [ + "Disabled", + "Agent Owner + All Users", + "Agent Owner Only" + ], + "description": "Retrieves detailed trading pair information for multiple tokens (up to 30) using chain ID and a list of token addresses. More efficient than making individual calls when you need info for multiple tokens. Use for portfolio analysis or comparing multiple tokens at once.", + "default": "disabled" + } + } + } + }, + "required": [ + "enabled", + "states" + ], + "additionalProperties": true +} \ No newline at end of file diff --git a/intentkit/skills/dexscreener/search_token.py b/intentkit/skills/dexscreener/search_token.py new file mode 100644 index 00000000..f0c200a7 --- /dev/null +++ b/intentkit/skills/dexscreener/search_token.py @@ -0,0 +1,184 @@ +import logging +from typing import Any, Optional, Type + +from pydantic import BaseModel, Field, ValidationError + +from intentkit.skills.dexscreener.base import DexScreenerBaseTool +from intentkit.skills.dexscreener.model.search_token_response import ( + SearchTokenResponseModel, +) +from intentkit.skills.dexscreener.utils import ( + API_ENDPOINTS, + MAX_SEARCH_RESULTS, + SEARCH_DISCLAIMER, + QueryType, + SortBy, + VolumeTimeframe, + create_error_response, + create_no_results_response, + determine_query_type, + filter_address_pairs, + filter_ticker_pairs, + format_success_response, + handle_validation_error, + sort_pairs_by_criteria, + truncate_large_fields, +) + +logger = logging.getLogger(__name__) + + +class SearchTokenInput(BaseModel): + """Input schema for the DexScreener search_token tool.""" + + query: str = Field( + description="The search query string (e.g., token symbol 'WIF', pair address, token address '0x...', token name 'Dogwifhat', or ticker '$WIF'). Prefixing with '$' filters results to match the base token symbol exactly (case-insensitive)." + ) + sort_by: Optional[SortBy] = Field( + default=SortBy.LIQUIDITY, + description="Sort preference for the results. Options: 'liquidity' (default) or 'volume'", + ) + volume_timeframe: Optional[VolumeTimeframe] = Field( + default=VolumeTimeframe.TWENTY_FOUR_HOUR, + description="Define which timeframe should we use if the 'sort_by' is 'volume'. Available options: '5_minutes', '1_hour', '6_hour', '24_hour'", + ) + + +class SearchToken(DexScreenerBaseTool): + """ + Tool to search for token pairs on DexScreener based on a query string. + """ + + name: str = "dexscreener_search_token" + description: str = ( + f"Searches DexScreener for token pairs matching the provided query string " + f"(e.g., token symbol like 'WIF', pair address, token name like 'Dogwifhat', or ticker like '$WIF'). " + f"If the query starts with '$', it filters results to only include pairs where the base token symbol exactly matches the ticker (case-insensitive). " + f"Returns a list of matching pairs with details like price, volume, liquidity, etc., " + f"sorted by the specified criteria (via 'sort_by': 'liquidity', 'volume'; defaults to 'liquidity'), " + f"limited to the top {MAX_SEARCH_RESULTS}. " + f"Use this tool to find token information based on user queries." + ) + args_schema: Type[BaseModel] = SearchTokenInput + + async def _arun( + self, + query: str, + sort_by: Optional[SortBy] = SortBy.LIQUIDITY, + volume_timeframe: Optional[VolumeTimeframe] = VolumeTimeframe.TWENTY_FOUR_HOUR, + **kwargs: Any, + ) -> str: + """Implementation to search token, with filtering based on query type.""" + + # dexscreener 300 request per minute (across all user) based on dexscreener docs + # https://docs.dexscreener.com/api/reference#get-latest-dex-search + await self.user_rate_limit_by_category( + # using hardcoded user_id to make sure it limit across all users + user_id=f"{self.category}{self.name}", + limit=300, + minutes=1, + ) + + sort_by = sort_by or SortBy.LIQUIDITY + volume_timeframe = volume_timeframe or VolumeTimeframe.TWENTY_FOUR_HOUR + + # Determine query type + query_type = determine_query_type(query) + + # Process query based on type + if query_type == QueryType.TICKER: + search_query = query[1:] # Remove the '$' prefix + target_ticker = search_query.upper() + else: + search_query = query + target_ticker = None + + logger.info( + f"Executing DexScreener search_token tool with query: '{query}' " + f"(interpreted as {query_type.value} search for '{search_query}'), " + f"sort_by: {sort_by}" + ) + + try: + data, error_details = await self._get( + path=API_ENDPOINTS["search"], params={"q": search_query} + ) + + if error_details: + return await self._handle_error_response(error_details) + if not data: + logger.error(f"No data or error details returned for query '{query}'") + return create_error_response( + error_type="empty_success", + message="API call returned empty success response.", + additional_data={"query": query}, + ) + + try: + result = SearchTokenResponseModel.model_validate(data) + except ValidationError as e: + return handle_validation_error(e, query, len(str(data))) + + if not result.pairs: + return create_no_results_response( + query, reason="returned null or empty for pairs" + ) + + pairs_list = [p for p in result.pairs if p is not None] + + # Apply filtering based on query type + if query_type == QueryType.TICKER and target_ticker: + pairs_list = filter_ticker_pairs(pairs_list, target_ticker) + if not pairs_list: + return create_no_results_response( + query, reason=f"no match for ticker '${target_ticker}'" + ) + elif query_type == QueryType.ADDRESS: + pairs_list = filter_address_pairs(pairs_list, search_query) + if not pairs_list: + return create_no_results_response( + query, reason=f"no match for address '{search_query}'" + ) + + # Sort pairs by specified criteria + pairs_list = sort_pairs_by_criteria(pairs_list, sort_by, volume_timeframe) + + # If sorting failed, pairs_list will be returned unchanged by the utility function + + final_count = min(len(pairs_list), MAX_SEARCH_RESULTS) + logger.info(f"Returning {final_count} pairs for query '{query}'") + return format_success_response( + { + **SEARCH_DISCLAIMER, + "pairs": [p.model_dump() for p in pairs_list[:MAX_SEARCH_RESULTS]], + } + ) + except Exception as e: + return await self._handle_unexpected_runtime_error(e, query) + + async def _handle_error_response(self, error_details: dict) -> str: + """Formats error details (from _get) into a JSON string.""" + if error_details.get("error_type") in [ + "connection_error", + "parsing_error", + "unexpected_error", + ]: + logger.error(f"DexScreener tool encountered an error: {error_details}") + else: # api_error + logger.warning(f"DexScreener API returned an error: {error_details}") + + # Truncate potentially large fields before returning to user/LLM + truncated_details = truncate_large_fields(error_details) + return format_success_response(truncated_details) + + async def _handle_unexpected_runtime_error(self, e: Exception, query: str) -> str: + """Formats unexpected runtime exception details into a JSON string.""" + logger.exception( + f"An unexpected runtime error occurred in search_token tool _arun method for query '{query}': {e}" + ) + return create_error_response( + error_type="runtime_error", + message="An unexpected internal error occurred processing the search request", + details=str(e), + additional_data={"query": query}, + ) diff --git a/intentkit/skills/dexscreener/utils.py b/intentkit/skills/dexscreener/utils.py new file mode 100644 index 00000000..f3cf307e --- /dev/null +++ b/intentkit/skills/dexscreener/utils.py @@ -0,0 +1,419 @@ +""" +Utility functions and constants for DexScreener skills. +""" + +import json +import logging +from enum import Enum +from typing import Any, Callable, Dict, List, Optional + +from pydantic import ValidationError + +from intentkit.skills.dexscreener.model.search_token_response import PairModel + +logger = logging.getLogger(__name__) + +# API Base URL +DEXSCREENER_BASE_URL = "https://api.dexscreener.com" + +# API Endpoints +API_ENDPOINTS = { + "search": "/latest/dex/search", + "pairs": "/latest/dex/pairs", + "token_pairs": "/token-pairs/v1", + "tokens": "/tokens/v1", + "token_profiles": "/token-profiles/latest/v1", + "token_boosts_latest": "/token-boosts/latest/v1", + "token_boosts_top": "/token-boosts/top/v1", + "orders": "/orders/v1", +} + +# Rate Limits (requests per minute) +RATE_LIMITS = { + "search": 300, + "pairs": 300, + "token_pairs": 300, + "tokens": 300, + "token_profiles": 60, + "token_boosts": 60, + "orders": 60, +} + +# Limits +MAX_SEARCH_RESULTS = 25 +MAX_TOKENS_BATCH = 30 + +# Common disclaimer for search results +SEARCH_DISCLAIMER = { + "disclaimer": ( + "Search results may include unofficial, duplicate, or potentially malicious tokens. " + "If multiple unrelated tokens share a similar name or ticker, ask the user for the exact token address. " + "If the correct token is not found, re-run the tool using the provided address. " + "Also advise the user to verify the token's legitimacy via its official social links included in the result." + ) +} + + +# Query Types +class QueryType(str, Enum): + TEXT = "TEXT" + TICKER = "TICKER" + ADDRESS = "ADDRESS" + + +# Sort Options +class SortBy(str, Enum): + LIQUIDITY = "liquidity" + VOLUME = "volume" + + +# Volume Timeframes +class VolumeTimeframe(str, Enum): + FIVE_MINUTES = "5_minutes" + ONE_HOUR = "1_hour" + SIX_HOUR = "6_hour" + TWENTY_FOUR_HOUR = "24_hour" + + +# Supported Chain IDs +SUPPORTED_CHAINS = [ + "ethereum", + "bsc", + "polygon", + "avalanche", + "fantom", + "cronos", + "arbitrum", + "optimism", + "base", + "solana", + "sui", + "tron", + "ton", +] + + +def determine_query_type(query: str) -> QueryType: + """ + Determine whether the query is a TEXT, TICKER, or ADDRESS. + + Args: + query: The search query string + + Returns: + QueryType enum value + """ + if query.startswith("0x"): + return QueryType.ADDRESS + if query.startswith("$"): + return QueryType.TICKER + return QueryType.TEXT + + +def get_liquidity_value(pair: PairModel) -> float: + """ + Extract liquidity USD value from a pair, defaulting to 0.0 if not available. + + Args: + pair: PairModel instance + + Returns: + Liquidity value in USD as float + """ + return ( + pair.liquidity.usd if pair.liquidity and pair.liquidity.usd is not None else 0.0 + ) + + +def get_volume_value( + pair: PairModel, timeframe: VolumeTimeframe = VolumeTimeframe.TWENTY_FOUR_HOUR +) -> float: + """ + Extract volume value from a pair for the specified timeframe. + + Args: + pair: PairModel instance + timeframe: VolumeTimeframe enum value + + Returns: + Volume value as float + """ + if not pair.volume: + return 0.0 + + volume_map = { + VolumeTimeframe.FIVE_MINUTES: pair.volume.m5, + VolumeTimeframe.ONE_HOUR: pair.volume.h1, + VolumeTimeframe.SIX_HOUR: pair.volume.h6, + VolumeTimeframe.TWENTY_FOUR_HOUR: pair.volume.h24, + } + + return volume_map.get(timeframe, 0.0) or 0.0 + + +def get_sort_function( + sort_by: SortBy, + volume_timeframe: VolumeTimeframe = VolumeTimeframe.TWENTY_FOUR_HOUR, +) -> Callable[[PairModel], float]: + """ + Get the appropriate sorting function based on sort criteria. + + Args: + sort_by: SortBy enum value + volume_timeframe: VolumeTimeframe enum value (used when sorting by volume) + + Returns: + Callable function that takes a PairModel and returns a float for sorting + """ + if sort_by == SortBy.LIQUIDITY: + return get_liquidity_value + elif sort_by == SortBy.VOLUME: + return lambda pair: get_volume_value(pair, volume_timeframe) + else: + logger.warning(f"Invalid sort_by value '{sort_by}', defaulting to liquidity.") + return get_liquidity_value + + +def sort_pairs_by_criteria( + pairs: List[PairModel], + sort_by: SortBy = SortBy.LIQUIDITY, + volume_timeframe: VolumeTimeframe = VolumeTimeframe.TWENTY_FOUR_HOUR, + reverse: bool = True, +) -> List[PairModel]: + """ + Sort pairs by the specified criteria. + + Args: + pairs: List of PairModel instances to sort + sort_by: Sorting criteria (liquidity or volume) + volume_timeframe: Timeframe for volume sorting + reverse: Sort in descending order if True + + Returns: + Sorted list of PairModel instances + """ + try: + sort_func = get_sort_function(sort_by, volume_timeframe) + return sorted(pairs, key=sort_func, reverse=reverse) + except Exception as e: + logger.error(f"Failed to sort pairs: {e}", exc_info=True) + return pairs # Return original list if sorting fails + + +def filter_ticker_pairs(pairs: List[PairModel], target_ticker: str) -> List[PairModel]: + """ + Filter pairs to only include those where base token symbol matches target ticker. + + Args: + pairs: List of PairModel instances + target_ticker: Target ticker symbol (case-insensitive) + + Returns: + Filtered list of PairModel instances + """ + target_ticker_upper = target_ticker.upper() + return [ + p + for p in pairs + if p.baseToken + and p.baseToken.symbol + and p.baseToken.symbol.upper() == target_ticker_upper + ] + + +def filter_address_pairs( + pairs: List[PairModel], target_address: str +) -> List[PairModel]: + """ + Filter pairs to only include those matching the target address. + Checks pairAddress, baseToken.address, and quoteToken.address. + + Args: + pairs: List of PairModel instances + target_address: Target address (case-insensitive) + + Returns: + Filtered list of PairModel instances + """ + target_address_lower = target_address.lower() + return [ + p + for p in pairs + if (p.pairAddress and p.pairAddress.lower() == target_address_lower) + or ( + p.baseToken + and p.baseToken.address + and p.baseToken.address.lower() == target_address_lower + ) + or ( + p.quoteToken + and p.quoteToken.address + and p.quoteToken.address.lower() == target_address_lower + ) + ] + + +def create_error_response( + error_type: str, + message: str, + details: Optional[str] = None, + additional_data: Optional[Dict[str, Any]] = None, +) -> str: + """ + Create a standardized error response in JSON format. + + Args: + error_type: Type/category of error + message: Human-readable error message + details: Optional additional details about the error + additional_data: Optional dictionary of additional data to include + + Returns: + JSON string containing error information + """ + response = { + "error": message, + "error_type": error_type, + } + + if details: + response["details"] = details + + if additional_data: + response.update(additional_data) + + return json.dumps(response, indent=2) + + +def create_no_results_response( + query_info: str, + reason: str = "no results found", + additional_data: Optional[Dict[str, Any]] = None, +) -> str: + """ + Create a standardized "no results found" response. + + Args: + query_info: Information about the query that was performed + reason: Reason why no results were found + additional_data: Optional additional data to include + + Returns: + JSON string containing no results information + """ + response = { + "message": f"No results found for the query. Reason: {reason}.", + "query_info": query_info, + "pairs": [], + } + + if additional_data: + response.update(additional_data) + + return json.dumps(response, indent=2) + + +def handle_validation_error( + error: ValidationError, query_info: str, data_length: Optional[int] = None +) -> str: + """ + Handle validation errors in a standardized way. + + Args: + error: The ValidationError that occurred + query_info: Information about the query being processed + data_length: Optional length of the data that failed validation + + Returns: + JSON error response string + """ + log_message = f"Failed to validate DexScreener response structure for {query_info}. Error: {error}" + if data_length: + log_message += f". Raw data length: {data_length}" + + logger.error(log_message, exc_info=True) + + return create_error_response( + error_type="validation_error", + message="Failed to parse successful DexScreener API response", + details=str(error.errors()), + additional_data={"query_info": query_info}, + ) + + +def truncate_large_fields( + data: Dict[str, Any], max_length: int = 500 +) -> Dict[str, Any]: + """ + Truncate large string fields in error response data to avoid overwhelming the LLM. + + Args: + data: Dictionary potentially containing large string fields + max_length: Maximum length for string fields before truncation + + Returns: + Dictionary with truncated fields + """ + truncated = data.copy() + + for key in ["details", "response_body"]: + if isinstance(truncated.get(key), str) and len(truncated[key]) > max_length: + truncated[key] = truncated[key][:max_length] + "... (truncated)" + + return truncated + + +def group_pairs_by_token(pairs: List[PairModel]) -> Dict[str, List[PairModel]]: + """ + Group pairs by token address for better organization in multi-token responses. + + Args: + pairs: List of PairModel instances + + Returns: + Dictionary mapping lowercase token addresses to lists of pairs + """ + tokens_data = {} + + for pair in pairs: + # Group by base token address + if pair.baseToken and pair.baseToken.address: + base_addr = pair.baseToken.address.lower() + if base_addr not in tokens_data: + tokens_data[base_addr] = [] + tokens_data[base_addr].append(pair) + + # Group by quote token address + if pair.quoteToken and pair.quoteToken.address: + quote_addr = pair.quoteToken.address.lower() + if quote_addr not in tokens_data: + tokens_data[quote_addr] = [] + tokens_data[quote_addr].append(pair) + + return tokens_data + + +def validate_chain_id(chain_id: str) -> bool: + """ + Validate if the chain ID is supported. + + Args: + chain_id: Chain ID to validate + + Returns: + True if chain ID is supported, False otherwise + """ + return chain_id.lower() in SUPPORTED_CHAINS + + +def format_success_response(data: Dict[str, Any]) -> str: + """ + Format a successful response as JSON string. + + Args: + data: Response data dictionary + + Returns: + JSON formatted string + """ + return json.dumps(data, indent=2) diff --git a/intentkit/skills/dune_analytics/__init__.py b/intentkit/skills/dune_analytics/__init__.py new file mode 100644 index 00000000..55e8d048 --- /dev/null +++ b/intentkit/skills/dune_analytics/__init__.py @@ -0,0 +1,103 @@ +"""Dune Analytics skill module for IntentKit. + +Loads and initializes skills for fetching data from Dune Analytics API. +""" + +import logging +from typing import Dict, List, Optional, TypedDict + +from intentkit.abstracts.skill import SkillStoreABC +from intentkit.skills.base import SkillConfig, SkillState +from intentkit.skills.dune_analytics.base import DuneBaseTool + +logger = logging.getLogger(__name__) + +# Cache for skill instances +_skill_cache: Dict[str, DuneBaseTool] = {} + + +class SkillStates(TypedDict): + """Type definition for Dune Analytics skill states.""" + + fetch_nation_metrics: SkillState + fetch_kol_buys: SkillState + + +class Config(SkillConfig): + """Configuration schema for Dune Analytics skills.""" + + states: SkillStates + api_key: str + + +async def get_skills( + config: Config, + is_private: bool, + store: SkillStoreABC, + **kwargs, +) -> List[DuneBaseTool]: + """Load Dune Analytics skills based on configuration. + + Args: + config: Skill configuration with states and API key. + is_private: Whether the context is private (affects skill visibility). + store: Skill store for accessing other skills. + **kwargs: Additional keyword arguments. + + Returns: + List of loaded Dune Analytics skill instances. + """ + logger.info("Loading Dune Analytics skills") + available_skills = [] + + for skill_name, state in config["states"].items(): + logger.debug("Checking skill: %s, state: %s", skill_name, state) + if state == "disabled": + continue + if state == "public" or (state == "private" and is_private): + available_skills.append(skill_name) + + loaded_skills = [] + for name in available_skills: + skill = get_dune_skill(name, store) + if skill: + logger.info("Successfully loaded skill: %s", name) + loaded_skills.append(skill) + else: + logger.warning("Failed to load skill: %s", name) + + return loaded_skills + + +def get_dune_skill(name: str, store: SkillStoreABC) -> Optional[DuneBaseTool]: + """Retrieve a Dune Analytics skill instance by name. + + Args: + name: Name of the skill (e.g., 'fetch_nation_metrics', 'fetch_kol_buys'). + store: Skill store for accessing other skills. + + Returns: + Dune Analytics skill instance or None if not found or import fails. + """ + if name in _skill_cache: + logger.debug("Retrieved cached skill: %s", name) + return _skill_cache[name] + + try: + if name == "fetch_nation_metrics": + from .fetch_nation_metrics import FetchNationMetrics + + _skill_cache[name] = FetchNationMetrics(skill_store=store) + elif name == "fetch_kol_buys": + from .fetch_kol_buys import FetchKOLBuys + + _skill_cache[name] = FetchKOLBuys(skill_store=store) + else: + logger.warning("Unknown Dune Analytics skill: %s", name) + return None + + logger.debug("Cached new skill instance: %s", name) + return _skill_cache[name] + except ImportError as e: + logger.error("Failed to import skill %s: %s", name, e) + return None diff --git a/intentkit/skills/dune_analytics/base.py b/intentkit/skills/dune_analytics/base.py new file mode 100644 index 00000000..85ada391 --- /dev/null +++ b/intentkit/skills/dune_analytics/base.py @@ -0,0 +1,52 @@ +"""Base module for Dune Analytics skills. + +Provides shared functionality for interacting with the Dune Analytics API. +""" + +from typing import Type + +from langchain.tools.base import ToolException +from pydantic import BaseModel, Field + +from intentkit.abstracts.skill import SkillStoreABC +from intentkit.skills.base import IntentKitSkill + + +class DuneBaseTool(IntentKitSkill): + """Base class for Dune Analytics skills. + + Offers common functionality like API key retrieval and Dune API interaction. + """ + + name: str = Field(description="Tool name") + description: str = Field(description="Tool description") + args_schema: Type[BaseModel] + skill_store: SkillStoreABC = Field(description="Skill store for data persistence") + + def get_api_key(self) -> str: + """Retrieve the Dune Analytics API key from context. + + Returns: + API key string. + + Raises: + ToolException: If the API key is not found. + """ + context = self.get_context() + skill_config = context.agent.skill_config(self.category) + api_key_provider = skill_config.get("api_key_provider") + if api_key_provider == "agent_owner": + api_key = skill_config.get("api_key") + if api_key: + return api_key + else: + raise ToolException("No api_key found in agent_owner configuration") + else: + raise ToolException( + f"Invalid API key provider: {api_key_provider}. Only 'agent_owner' is supported for Dune Analytics." + ) + + @property + def category(self) -> str: + """Category of the skill.""" + return "dune_analytics" diff --git a/intentkit/skills/dune_analytics/dune.png b/intentkit/skills/dune_analytics/dune.png new file mode 100644 index 00000000..c9fa2c99 Binary files /dev/null and b/intentkit/skills/dune_analytics/dune.png differ diff --git a/intentkit/skills/dune_analytics/fetch_kol_buys.py b/intentkit/skills/dune_analytics/fetch_kol_buys.py new file mode 100644 index 00000000..bf585643 --- /dev/null +++ b/intentkit/skills/dune_analytics/fetch_kol_buys.py @@ -0,0 +1,125 @@ +"""Skill to fetch KOL memecoin buys on Solana from Dune Analytics API. + +Uses query ID 4832844 to retrieve a list of KOL buy transactions. +""" + +from typing import Any, Dict, Type + +import httpx +from pydantic import BaseModel, Field +from tenacity import retry, stop_after_attempt, wait_exponential + +from intentkit.abstracts.skill import SkillStoreABC +from intentkit.skills.dune_analytics.base import DuneBaseTool + +BASE_URL = "https://api.dune.com/api/v1/query" +KOL_BUYS_QUERY_ID = 4832844 + + +class KOLBuysInput(BaseModel): + """Input schema for fetching KOL memecoin buys.""" + + limit: int = Field( + default=10, + description="Maximum number of buy transactions to fetch (default 10).", + ge=1, + ) + + +class KOLBuyData(BaseModel): + """Data model for KOL buy results.""" + + data: Dict[str, Any] = Field(description="KOL buy data from Dune API") + error: str = Field(default="", description="Error message if fetch failed") + + +class KOLBuysOutput(BaseModel): + """Output schema for KOL memecoin buys.""" + + buys: KOLBuyData = Field(description="KOL buy transaction data") + summary: str = Field(description="Summary of fetched data") + + +class FetchKOLBuys(DuneBaseTool): + """Skill to fetch KOL memecoin buys on Solana from Dune Analytics API.""" + + name: str = "dune_fetch_kol_buys" + description: str = ( + "Fetches a list of KOL memecoin buy transactions on Solana from Dune Analytics API using query ID 4832844. " + "Supports a configurable limit for the number of results. Handles rate limits with retries." + ) + args_schema: Type[BaseModel] = KOLBuysInput + skill_store: SkillStoreABC = Field(description="Skill store for data persistence") + + @retry( + stop=stop_after_attempt(3), wait=wait_exponential(multiplier=5, min=5, max=60) + ) + async def fetch_data( + self, query_id: int, api_key: str, limit: int = 10 + ) -> Dict[str, Any]: + """Fetch data for a specific Dune query. + + Args: + query_id: Dune query ID. + api_key: Dune API key. + limit: Maximum number of results (default 10). + + Returns: + Dictionary of query results. + + Raises: + ToolException: If the API request fails. + """ + from langchain.tools.base import ToolException + + url = f"{BASE_URL}/{query_id}/results?limit={limit}" + headers = {"X-Dune-API-Key": api_key} + + async with httpx.AsyncClient() as client: + try: + response = await client.get(url, headers=headers, timeout=10) + response.raise_for_status() + return response.json().get("result", {}) + except (httpx.RequestError, httpx.HTTPStatusError) as e: + raise ToolException(f"Error fetching data from Dune API: {e}") + + async def _arun( + self, + limit: int = 10, + **kwargs, + ) -> str: + """Fetch KOL memecoin buys asynchronously and return formatted output. + + Args: + limit: Maximum number of buy transactions to fetch (default 10). + config: Runnable configuration. + **kwargs: Additional keyword arguments. + + Returns: + Formatted string with KOL buy transactions or error message. + """ + import logging + + logger = logging.getLogger(__name__) + api_key = self.get_api_key() + + try: + data = await self.fetch_data(KOL_BUYS_QUERY_ID, api_key, limit) + rows = data.get("rows", []) + if not rows: + return "No KOL buy transactions found." + + output = f"Fetched {len(rows)} KOL memecoin buy transactions:\n" + for row in rows: + output += ( + f"- {row['kol_with_link']} bought {row['token_with_chart']} " + f"(${row['amount_usd']:.2f}) at {row['buy_time']}\n" + ) + return output.strip() + except Exception as e: + error_msg = f"Error fetching KOL memecoin buys: {str(e)}" + logger.warning(error_msg) + return error_msg + + def _run(self, question: str): + raise NotImplementedError("Use _arun for async execution") diff --git a/intentkit/skills/dune_analytics/fetch_nation_metrics.py b/intentkit/skills/dune_analytics/fetch_nation_metrics.py new file mode 100644 index 00000000..49b11cd2 --- /dev/null +++ b/intentkit/skills/dune_analytics/fetch_nation_metrics.py @@ -0,0 +1,234 @@ +"""Skill to fetch Crestal Nation metrics from Dune Analytics API. + +Supports predefined metrics (e.g., total_users, unique_ai_citizens) or direct query IDs. +""" + +import difflib +import re +from typing import Any, Dict, Type + +import httpx +from pydantic import BaseModel, Field +from tenacity import retry, stop_after_attempt, wait_exponential + +from intentkit.abstracts.skill import SkillStoreABC +from intentkit.skills.dune_analytics.base import DuneBaseTool + +SUPPORTED_QUERIES = { + "total_users": 4858003, + "weekly_active_users": 4867200, + "unique_ai_citizens": 4857629, + "unique_creators": 4844506, + "ai_citizens_over_time": 4857629, + "chat_messages_over_time": 4857870, + "onchain_transactions": 4859895, + "total_chat_messages": 4857870, + "daily_skill_executions": 4861785, + "goods_services": 4859895, + "agent_tvl": 4859887, + "citizen_market_cap": 4859887, +} +QUERY_ALIASES = { + "agents": "unique_ai_citizens", + "citizens": "unique_ai_citizens", + "market_cap": "citizen_market_cap", + "nation_market_cap": "citizen_market_cap", + "number_of_agents": "unique_ai_citizens", + "number_of_citizens": "unique_ai_citizens", + "ai_citizens": "unique_ai_citizens", + "users": "total_users", + "active_users": "weekly_active_users", + "creators": "unique_creators", + "transactions": "onchain_transactions", + "messages": "total_chat_messages", + "skill_executions": "daily_skill_executions", + "tvl": "agent_tvl", +} +BASE_URL = "https://api.dune.com/api/v1/query" + + +class NationMetricsInput(BaseModel): + """Input schema for fetching Crestal Nation metrics.""" + + metric: str = Field( + default="", + description="Metric name (e.g., total_users, agents) or query ID (e.g., 4858003). Empty for all configured metrics.", + ) + limit: int = Field( + default=1000, description="Maximum number of results to fetch (default 1000)." + ) + + +class MetricData(BaseModel): + """Data model for a single metric result.""" + + metric: str = Field(description="Metric name or query ID") + data: Dict[str, Any] = Field(description="Metric data from Dune API") + error: str = Field(default="", description="Error message if fetch failed") + + +class NationMetricsOutput(BaseModel): + """Output schema for Crestal Nation metrics.""" + + metrics: Dict[str, MetricData] = Field( + description="Dictionary of metric names or query IDs to their data" + ) + summary: str = Field(description="Summary of fetched metrics") + + +class FetchNationMetrics(DuneBaseTool): + """Skill to fetch Crestal Nation metrics from Dune Analytics API.""" + + name: str = "dune_fetch_nation_metrics" + description: str = ( + "Fetches Crestal Nation metrics (e.g., total_users, agents/citizens, market_cap) from Dune Analytics API. " + "Supports predefined metrics, direct query IDs, or all configured metrics if none specified. " + "Handles rate limits with retries." + ) + args_schema: Type[BaseModel] = NationMetricsInput + skill_store: SkillStoreABC = Field(description="Skill store for data persistence") + + def normalize_metric(self, metric: str) -> str: + """Normalize a metric string for matching. + + Args: + metric: Raw metric string from input. + + Returns: + Normalized metric string (lowercase, underscores, no punctuation). + """ + if not metric: + return "" + metric = re.sub(r"[^\w\s]", "", metric.lower()).replace(" ", "_") + return re.sub(r"_+", "_", metric).strip("_") + + def find_closest_metrics(self, metric: str, max_suggestions: int = 3) -> list[str]: + """Find the closest matching metrics using fuzzy matching. + + Args: + metric: Input metric to match against. + max_suggestions: Maximum number of suggestions to return. + + Returns: + List of closest metric names. + """ + all_metrics = list(SUPPORTED_QUERIES.keys()) + list(QUERY_ALIASES.keys()) + if not metric or not all_metrics: + return [] + return difflib.get_close_matches( + metric, all_metrics, n=max_suggestions, cutoff=0.6 + ) + + @retry( + stop=stop_after_attempt(3), wait=wait_exponential(multiplier=5, min=5, max=60) + ) + async def fetch_data( + self, query_id: int, api_key: str, limit: int = 1000 + ) -> Dict[str, Any]: + """Fetch data for a specific Dune query. + + Args: + query_id: Dune query ID. + api_key: Dune API key. + limit: Maximum number of results (default 1000). + + Returns: + Dictionary of query results. + + Raises: + ToolException: If the API request fails. + """ + from langchain.tools.base import ToolException + + url = f"{BASE_URL}/{query_id}/results?limit={limit}" + headers = {"X-Dune-API-Key": api_key} + + async with httpx.AsyncClient() as client: + try: + response = await client.get(url, headers=headers, timeout=10) + response.raise_for_status() + return response.json().get("result", {}) + except (httpx.RequestError, httpx.HTTPStatusError) as e: + raise ToolException(f"Error fetching data from Dune API: {e}") + + async def _arun( + self, + metric: str = "", + limit: int = 1000, + **kwargs, + ) -> NationMetricsOutput: + """Fetch Crestal Nation metrics asynchronously. + + Args: + metric: Metric name (e.g., total_users) or query ID (e.g., 4858003). Empty for all configured metrics. + limit: Maximum number of results (default 1000). + config: Runnable configuration. + **kwargs: Additional keyword arguments. + + Returns: + NationMetricsOutput with metric data and summary. + """ + import logging + + logger = logging.getLogger(__name__) + api_key = self.get_api_key() + + metric = self.normalize_metric(metric) + metric = QUERY_ALIASES.get(metric, metric) + + results = {} + metrics_to_fetch = {} + + try: + query_id = int(metric) + metrics_to_fetch[str(query_id)] = query_id + except (ValueError, TypeError): + metrics_to_fetch = ( + SUPPORTED_QUERIES + if not metric + else ( + {metric: SUPPORTED_QUERIES[metric]} + if metric in SUPPORTED_QUERIES + else {} + ) + ) + + if not metrics_to_fetch: + closest_metrics = self.find_closest_metrics(metric) + supported = ", ".join(SUPPORTED_QUERIES.keys()) + suggestions = ( + f" Did you mean: {', '.join(closest_metrics)}?" + if closest_metrics + else "" + ) + logger.warning( + "Unrecognized metric or query ID: %s. Suggested: %s", + metric, + closest_metrics, + ) + return NationMetricsOutput( + metrics={}, + summary=( + f"Invalid metric or query ID: {metric}. Supported metrics include: {supported}.{suggestions} " + "Try 'fetch nation metrics total_users' or a valid query ID, or submit a feature request at " + "https://github.com/crestalnetwork/intentkit." + ), + ) + + for metric_name, query_id in metrics_to_fetch.items(): + try: + data = await self.fetch_data(query_id, api_key, limit) + results[metric_name] = MetricData(metric=metric_name, data=data) + except Exception as e: + results[metric_name] = MetricData( + metric=metric_name, data={}, error=str(e) + ) + + summary = f"Fetched data for {len([m for m in results.values() if not m.error])}/{len(metrics_to_fetch)} metrics." + if any(m.error for m in results.values()): + summary += f" Errors occurred for: {', '.join(m.metric for m in results.values() if m.error)}." + + return NationMetricsOutput(metrics=results, summary=summary) + + def _run(self, question: str): + raise NotImplementedError("Use _arun for async execution") diff --git a/intentkit/skills/dune_analytics/schema.json b/intentkit/skills/dune_analytics/schema.json new file mode 100644 index 00000000..e3a8fe89 --- /dev/null +++ b/intentkit/skills/dune_analytics/schema.json @@ -0,0 +1,99 @@ +{ + "title": "Dune Analytics", + "description": "Dune Analytics skills to fetch data from Dune Analytics API.", + "x-icon": "https://ai.service.crestal.dev/skills/dune_analytics/dune.png", + "type": "object", + "properties": { + "enabled": { + "type": "boolean", + "title": "Enabled", + "description": "Whether this skill is enabled", + "default": false + }, + "states": { + "title": "Skill States", + "type": "object", + "properties": { + "fetch_nation_metrics": { + "type": "string", + "title": "Fetch Crestal Nation Metrics", + "enum": [ + "disabled", + "public", + "private" + ], + "x-enum-title": [ + "Disabled", + "Agent Owner + All Users", + "Agent Owner Only" + ], + "description": "Fetches Crestal Nation metrics (e.g., total_users, agents/citizens, market_cap) from Dune Analytics API.", + "default": "disabled" + }, + "fetch_kol_buys": { + "type": "string", + "title": "Fetch KOL Memecoin Buys", + "enum": [ + "disabled", + "public", + "private" + ], + "x-enum-title": [ + "Disabled", + "Agent Owner + All Users", + "Agent Owner Only" + ], + "description": "Fetches a list of KOL memecoin buy transactions on Solana from Dune Analytics API.", + "default": "disabled" + } + } + }, + "api_key_provider": { + "type": "string", + "title": "API Key Provider", + "description": "Provider of the API key for Dune service", + "enum": [ + "agent_owner" + ], + "x-enum-title": [ + "Owner Provided" + ], + "default": "agent_owner" + } + }, + "required": [ + "states", + "enabled" + ], + "if": { + "properties": { + "api_key_provider": { + "const": "agent_owner" + } + } + }, + "then": { + "properties": { + "api_key": { + "type": "string", + "title": "Dune API Key", + "description": "API key for Dune Analytics (X-Dune-API-Key).", + "x-link": "[Get your API key](https://docs.dune.com/api-reference/overview/authentication)", + "x-sensitive": true + } + }, + "if": { + "properties": { + "enabled": { + "const": true + } + } + }, + "then": { + "required": [ + "api_key" + ] + } + }, + "additionalProperties": true +} \ No newline at end of file diff --git a/intentkit/skills/elfa/README.md b/intentkit/skills/elfa/README.md new file mode 100644 index 00000000..68069764 --- /dev/null +++ b/intentkit/skills/elfa/README.md @@ -0,0 +1,152 @@ +# Elfa Skills - Social Media Intelligence + +Integration with [Elfa AI API v2](https://api.elfa.ai/v2) providing real-time social media data analysis and processing capabilities for crypto and stock market sentiment tracking. + +**Important: V2 API Changes** +- **No Raw Content**: V2 API removes raw tweet content for platform compliance +- **Sanitized Data**: Returns engagement metrics, timestamps, and account metadata only +- **New Endpoints**: Updated endpoints with consistent response format +- **Enhanced Pagination**: Different pagination patterns for search vs aggregation endpoints + +## Setup + +Add your Elfa API key to your environment: +```bash +ELFA_API_KEY=your_elfa_api_key_here +``` + +## Available Skills + +### 1. Get Trending Tokens (`get_trending_tokens`) + +Ranks the most discussed tokens based on smart mentions count for a given period, updated every 5 minutes. + +**Endpoint**: `v2/aggregations/trending-tokens` - Direct Migration + +**Example Prompts:** +``` +"What are the trending crypto tokens in the last 24 hours?" +"Get trending tokens with minimum 50 mentions in the past week" +``` + +**Parameters:** +- `timeWindow`: "30m", "1h", "4h", "24h", "7d", "30d" (default: "7d") +- `page`: Page number for pagination (default: 1) +- `pageSize`: Number of items per page (default: 50) +- `minMentions`: Minimum mentions required (default: 5) + +**V2 Changes:** +- Same functionality and parameters +- Enhanced response format with metadata +- Uses page+pageSize pagination for aggregations + +--- + +### 2. Get Top Mentions (`get_top_mentions`) + +Queries tweets mentioning a specific stock/crypto ticker, ranked by view count for market sentiment analysis. + +**Endpoint**: `v2/data/top-mentions` - Breaking Changes + +**Example Prompts:** +``` +"Get the top mentions for Bitcoin in the last 24 hours" +"Show me engagement metrics for tweets about $ETH today" +``` + +**Parameters:** +- `ticker`: Stock/crypto symbol (e.g., "BTC", "$ETH", "AAPL") - required +- `timeWindow`: "1h", "24h", "7d" (default: "1h") +- `page`: Page number for pagination (default: 1) +- `pageSize`: Number of items per page (default: 10) + +**V2 Changes:** +- **Removed**: Raw tweet content/text +- **Removed**: `includeAccountDetails` parameter (always included) +- **Preserved**: Engagement metrics (view_count, like_count, etc.) +- **Preserved**: Account information and verification status +- **Enhanced**: Account tags (e.g., "smart" accounts) + +--- + +### 3. Search Mentions (`search_mentions`) + +Searches tweets mentioning up to 5 keywords or from specific accounts with sanitized engagement data. + +**Endpoint**: `v2/data/keyword-mentions` - Breaking Changes + +**Example Prompts:** +``` +"Search for engagement metrics of tweets mentioning 'DeFi, NFT, blockchain'" +"Find tweets from account 'elonmusk' about cryptocurrency" +``` + +**Parameters:** +- `keywords`: Up to 5 keywords (comma-separated, phrases accepted) - optional if accountName provided +- `accountName`: Account username to filter by - optional if keywords provided +- `timeWindow`: Time window for search (default: "7d") +- `limit`: Number of results to return, max 30 (default: 20) +- `searchType`: Type of search - "and" or "or" (default: "or") +- `cursor`: Cursor for pagination (optional) + +**V2 Changes:** +- **Removed**: Raw tweet content/text +- **Preserved**: Engagement metrics and sentiment analysis +- **Enhanced**: Account filtering with `accountName` parameter +- **Updated**: Uses limit+cursor pagination for search +- **Added**: Account tags and metadata + +--- + +### 4. Get Smart Stats (`get_smart_stats`) + +Retrieves key social media metrics for a specific username including engagement ratios and smart following count. + +**Endpoint**: `v2/account/smart-stats` - Direct Migration + +**Example Prompts:** +``` +"Get smart stats for @elonmusk" +"Analyze the social metrics for username 'VitalikButerin'" +``` + +**Parameters:** +- `username`: Twitter username (with or without @) - required + +**V2 Changes:** +- Same functionality and parameters +- Consistent response format with metadata + +## V2 Response Format + +All V2 endpoints return a consistent format: + +```json +{ + "success": boolean, + "data": [...], // Array or object with actual data + "metadata": { // Pagination and additional info + "total": number, + "page": number, + "pageSize": number, + "cursor": "string" // For search endpoints + } +} +``` + +## Migration Notes + +### What's Removed in V2: +- Raw tweet content/text (compliance requirement) +- Direct access to tweet body/message content +- `includeAccountDetails` parameter (always included) +- **Deprecated**: `get_mentions` skill (no v2 equivalent) + +### What's Preserved: +- Engagement metrics (likes, views, reposts, replies) +- Account information and verification status +- Timestamps and metadata +- Sentiment analysis +- Core functionality for trend analysis + + diff --git a/intentkit/skills/elfa/__init__.py b/intentkit/skills/elfa/__init__.py new file mode 100644 index 00000000..662667d7 --- /dev/null +++ b/intentkit/skills/elfa/__init__.py @@ -0,0 +1,114 @@ +"""Elfa skills.""" + +import logging +from typing import NotRequired, TypedDict + +from intentkit.abstracts.skill import SkillStoreABC +from intentkit.skills.base import SkillConfig, SkillState +from intentkit.skills.elfa.base import ElfaBaseTool +from intentkit.skills.elfa.mention import ( + ElfaGetTopMentions, + ElfaSearchMentions, +) +from intentkit.skills.elfa.stats import ElfaGetSmartStats +from intentkit.skills.elfa.tokens import ElfaGetTrendingTokens + +# Cache skills at the system level, because they are stateless +_cache: dict[str, ElfaBaseTool] = {} + +logger = logging.getLogger(__name__) + + +class SkillStates(TypedDict): + get_top_mentions: SkillState + search_mentions: SkillState + get_trending_tokens: SkillState + get_smart_stats: SkillState + + +class Config(SkillConfig): + """Configuration for Elfa skills.""" + + states: SkillStates + api_key: NotRequired[str] + + +async def get_skills( + config: "Config", + is_private: bool, + store: SkillStoreABC, + **_, +) -> list[ElfaBaseTool]: + """Get all Elfa skills. + + Args: + config: The configuration for Elfa skills. + is_private: Whether to include private skills. + store: The skill store for persisting data. + + Returns: + A list of Elfa skills. + """ + available_skills = [] + + # Include skills based on their state + for skill_name, state in config["states"].items(): + if state == "disabled": + continue + elif state == "public" or (state == "private" and is_private): + available_skills.append(skill_name) + + # Get each skill using the cached getter + result = [] + for name in available_skills: + skill = get_elfa_skill(name, store) + if skill: + result.append(skill) + return result + + +def get_elfa_skill( + name: str, + store: SkillStoreABC, +) -> ElfaBaseTool: + """Get an Elfa skill by name. + + Args: + name: The name of the skill to get + store: The skill store for persisting data + + Returns: + The requested Elfa skill + """ + + if name == "get_top_mentions": + if name not in _cache: + _cache[name] = ElfaGetTopMentions( + skill_store=store, + ) + return _cache[name] + + elif name == "search_mentions": + if name not in _cache: + _cache[name] = ElfaSearchMentions( + skill_store=store, + ) + return _cache[name] + + elif name == "get_trending_tokens": + if name not in _cache: + _cache[name] = ElfaGetTrendingTokens( + skill_store=store, + ) + return _cache[name] + + elif name == "get_smart_stats": + if name not in _cache: + _cache[name] = ElfaGetSmartStats( + skill_store=store, + ) + return _cache[name] + + else: + logger.warning(f"Unknown Elfa skill: {name}") + return None diff --git a/intentkit/skills/elfa/base.py b/intentkit/skills/elfa/base.py new file mode 100644 index 00000000..fcc166eb --- /dev/null +++ b/intentkit/skills/elfa/base.py @@ -0,0 +1,38 @@ +from typing import Type + +from langchain.tools.base import ToolException +from pydantic import BaseModel, Field + +from intentkit.abstracts.skill import SkillStoreABC +from intentkit.skills.base import IntentKitSkill + +base_url = "https://api.elfa.ai/v2" + + +class ElfaBaseTool(IntentKitSkill): + """Base class for Elfa tools.""" + + name: str = Field(description="The name of the tool") + description: str = Field(description="A description of what the tool does") + args_schema: Type[BaseModel] + skill_store: SkillStoreABC = Field( + description="The skill store for persisting data" + ) + + def get_api_key(self) -> str: + context = self.get_context() + skill_config = context.agent.skill_config(self.category) + api_key_provider = skill_config.get("api_key_provider") + if api_key_provider == "platform": + return self.skill_store.get_system_config("elfa_api_key") + # for backward compatibility, may only have api_key in skill_config + elif skill_config.get("api_key"): + return skill_config.get("api_key") + else: + raise ToolException( + f"Invalid API key provider: {api_key_provider}, or no api_key in config" + ) + + @property + def category(self) -> str: + return "elfa" diff --git a/intentkit/skills/elfa/elfa.jpg b/intentkit/skills/elfa/elfa.jpg new file mode 100644 index 00000000..276d5e18 Binary files /dev/null and b/intentkit/skills/elfa/elfa.jpg differ diff --git a/intentkit/skills/elfa/mention.py b/intentkit/skills/elfa/mention.py new file mode 100644 index 00000000..dde1ca26 --- /dev/null +++ b/intentkit/skills/elfa/mention.py @@ -0,0 +1,218 @@ +"""Mention-related skills for Elfa AI API.""" + +from typing import Any, Dict, List, Optional, Type + +from pydantic import BaseModel, Field + +from .base import ElfaBaseTool +from .utils import MentionData, make_elfa_request + + +class ElfaGetTopMentionsInput(BaseModel): + """Input parameters for top mentions.""" + + ticker: str = Field(description="Stock ticker symbol (e.g., ETH, $ETH, BTC, $BTC)") + timeWindow: Optional[str] = Field( + "1h", description="Time window (e.g., '1h', '24h', '7d')" + ) + page: Optional[int] = Field(1, description="Page number for pagination") + pageSize: Optional[int] = Field(10, description="Number of items per page") + + +class ElfaGetTopMentionsOutput(BaseModel): + """Output structure for top mentions response.""" + + success: bool + data: Optional[List[MentionData]] = Field(None, description="List of top mentions") + metadata: Optional[Dict[str, Any]] = Field(None, description="Response metadata") + + +class ElfaGetTopMentions(ElfaBaseTool): + """ + Get top mentions for a specific ticker. + + This tool uses the Elfa API to query tweets mentioning a specific stock ticker. + The tweets are ranked by view count, providing insight into the most visible and + potentially influential discussions surrounding the stock. Results are updated hourly. + + Use Cases: + - Real-time sentiment analysis: Track changes in public opinion about a stock + - News monitoring: Identify trending news and discussions related to a specific ticker + - Investor insights: Monitor conversations and opinions of investors and traders + """ + + name: str = "elfa_get_top_mentions" + description: str = """Get top mentions for a specific ticker symbol ranked by view count. + Updated hourly. Returns engagement metrics and account information for market sentiment analysis. + + Use this to track public opinion, identify trending news, and monitor investor discussions.""" + args_schema: Type[BaseModel] = ElfaGetTopMentionsInput + + async def _arun( + self, + ticker: str, + timeWindow: str = "1h", + page: int = 1, + pageSize: int = 10, + **kwargs, + ) -> ElfaGetTopMentionsOutput: + """ + Execute the top mentions request. + + Args: + ticker: Stock ticker symbol + timeWindow: Time window for mentions (default: 1h) + page: Page number for pagination (default: 1) + pageSize: Items per page (default: 10) + config: LangChain runnable configuration + **kwargs: Additional parameters + + Returns: + ElfaGetTopMentionsOutput: Structured response with top mentions + + Raises: + ValueError: If API key is not found + ToolException: If there's an error with the API request + """ + api_key = self.get_api_key() + + # Prepare parameters according to API spec + params = { + "ticker": ticker, + "timeWindow": timeWindow, + "page": page, + "pageSize": pageSize, + } + + # Make API request using shared utility + response = await make_elfa_request( + endpoint="data/top-mentions", api_key=api_key, params=params + ) + + # Parse response data into MentionData objects + mentions = [] + if response.data and isinstance(response.data, list): + mentions = [MentionData(**item) for item in response.data] + + return ElfaGetTopMentionsOutput( + success=response.success, data=mentions, metadata=response.metadata + ) + + +class ElfaSearchMentionsInput(BaseModel): + """Input parameters for search mentions.""" + + keywords: Optional[str] = Field( + None, + description="Up to 5 keywords to search for, separated by commas. Phrases accepted", + ) + accountName: Optional[str] = Field( + None, + description="Account username to filter by (optional if keywords provided)", + ) + timeWindow: Optional[str] = Field("7d", description="Time window for search") + limit: Optional[int] = Field(20, description="Number of results to return (max 30)") + searchType: Optional[str] = Field( + "or", description="Type of search ('and' or 'or')" + ) + cursor: Optional[str] = Field(None, description="Cursor for pagination") + + +class ElfaSearchMentionsOutput(BaseModel): + """Output structure for search mentions response.""" + + success: bool + data: Optional[List[MentionData]] = Field( + None, description="List of matching mentions" + ) + metadata: Optional[Dict[str, Any]] = Field( + None, description="Response metadata with cursor" + ) + + +class ElfaSearchMentions(ElfaBaseTool): + """ + Search mentions by keywords or account name. + + This tool uses the Elfa API to search tweets mentioning up to five keywords or from specific accounts. + It can search within the past 30 days of data (updated every 5 minutes) or access historical data. + Returns sanitized engagement metrics and sentiment data. + + Use Cases: + - Market research: Track conversations and sentiment around specific products or industries + - Brand monitoring: Monitor mentions of your brand and identify potential PR issues + - Public opinion tracking: Analyze public opinion on various topics + - Competitive analysis: See what people are saying about your competitors + """ + + name: str = "elfa_search_mentions" + description: str = """Search tweets by keywords or account name with engagement data and sentiment analysis. + Updated every 5 minutes. Access 30 days of recent data or historical archives. + + Use this for market research, brand monitoring, opinion tracking, and competitive analysis.""" + args_schema: Type[BaseModel] = ElfaSearchMentionsInput + + async def _arun( + self, + keywords: Optional[str] = None, + accountName: Optional[str] = None, + timeWindow: str = "7d", + limit: int = 20, + searchType: str = "or", + cursor: Optional[str] = None, + **kwargs, + ) -> ElfaSearchMentionsOutput: + """ + Execute the search mentions request. + + Args: + keywords: Keywords to search for (optional if accountName provided) + accountName: Account username to filter by (optional if keywords provided) + timeWindow: Time window for search (default: 7d) + limit: Number of results to return (default: 20, max 30) + searchType: Type of search - 'and' or 'or' (default: 'or') + cursor: Pagination cursor (optional) + config: LangChain runnable configuration + **kwargs: Additional parameters + + Returns: + ElfaSearchMentionsOutput: Structured response with matching mentions + + Raises: + ValueError: If API key is not found or neither keywords nor accountName provided + ToolException: If there's an error with the API request + """ + api_key = self.get_api_key() + + # Validate that at least one search criteria is provided + if not keywords and not accountName: + raise ValueError("Either keywords or accountName must be provided") + + # Prepare parameters according to API spec + params = { + "timeWindow": timeWindow, + "limit": min(limit, 30), # API max is 30 + "searchType": searchType, + } + + # Add optional parameters + if keywords: + params["keywords"] = keywords + if accountName: + params["accountName"] = accountName + if cursor: + params["cursor"] = cursor + + # Make API request using shared utility + response = await make_elfa_request( + endpoint="data/keyword-mentions", api_key=api_key, params=params + ) + + # Parse response data into MentionData objects + mentions = [] + if response.data and isinstance(response.data, list): + mentions = [MentionData(**item) for item in response.data] + + return ElfaSearchMentionsOutput( + success=response.success, data=mentions, metadata=response.metadata + ) diff --git a/intentkit/skills/elfa/schema.json b/intentkit/skills/elfa/schema.json new file mode 100644 index 00000000..cc8e9ba0 --- /dev/null +++ b/intentkit/skills/elfa/schema.json @@ -0,0 +1,137 @@ +{ + "$schema": "http://json-schema.org/draft-07/schema#", + "type": "object", + "title": "Elfa", + "description": "Integration with Elfa AI API providing data analysis and processing capabilities with secure authentication for advanced data operations", + "x-icon": "https://ai.service.crestal.dev/skills/elfa/elfa.jpg", + "x-tags": [ + "Data" + ], + "x-nft-requirement": 1, + "properties": { + "enabled": { + "type": "boolean", + "title": "Enabled", + "description": "Whether this skill is enabled", + "default": true + }, + "states": { + "type": "object", + "properties": { + "get_top_mentions": { + "type": "string", + "title": "Get Top Mentions", + "enum": [ + "disabled", + "public", + "private" + ], + "x-enum-title": [ + "Disabled", + "Agent Owner + All Users", + "Agent Owner Only" + ], + "description": "This tool uses the Elfa API to query tweets mentioning a specific stock ticker. The tweets are ranked by view count, providing insight into the most visible and potentially influential discussions surrounding the stock. The results are updated hourly, allowing for real-time monitoring of market sentiment.", + "default": "private" + }, + "search_mentions": { + "type": "string", + "title": "Search Mentions", + "enum": [ + "disabled", + "public", + "private" + ], + "x-enum-title": [ + "Disabled", + "Agent Owner + All Users", + "Agent Owner Only" + ], + "description": "This tool uses the Elfa API to search tweets mentioning up to five keywords or from specific accounts. It can search within the past 30 days of data, which is updated every 5 minutes, or access up to six months of historical tweet data.", + "default": "private" + }, + "get_trending_tokens": { + "type": "string", + "title": "Get Trending Tokens", + "enum": [ + "disabled", + "public", + "private" + ], + "x-enum-title": [ + "Disabled", + "Agent Owner + All Users", + "Agent Owner Only" + ], + "description": "This tool ranks the most discussed tokens based on smart mentions count for a given period, with updates every 5 minutes via the Elfa API. Smart mentions provide a more sophisticated measure of discussion volume than simple keyword counts.", + "default": "disabled" + }, + "get_smart_stats": { + "type": "string", + "title": "Get Smart Stats", + "enum": [ + "disabled", + "public", + "private" + ], + "x-enum-title": [ + "Disabled", + "Agent Owner + All Users", + "Agent Owner Only" + ], + "description": "This tool uses the Elfa API to retrieve key social media metrics for a given username. These metrics include Smart Following Count, Engagement Score, and Engagement Ratio.", + "default": "private" + } + }, + "description": "States for each Elfa skill (disabled, public, or private)" + }, + "api_key_provider": { + "type": "string", + "title": "API Key Provider", + "description": "Provider of the API key", + "enum": [ + "platform", + "agent_owner" + ], + "x-enum-title": [ + "Nation Hosted", + "Owner Provided" + ], + "default": "platform" + } + }, + "required": [ + "states", + "enabled" + ], + "if": { + "properties": { + "api_key_provider": { + "const": "agent_owner" + } + } + }, + "then": { + "properties": { + "api_key": { + "type": "string", + "title": "Elfa API Key", + "x-sensitive": true, + "description": "Elfa API key for authentication" + } + }, + "if": { + "properties": { + "enabled": { + "const": true + } + } + }, + "then": { + "required": [ + "api_key" + ] + } + }, + "additionalProperties": true +} \ No newline at end of file diff --git a/intentkit/skills/elfa/stats.py b/intentkit/skills/elfa/stats.py new file mode 100644 index 00000000..a1b7a4e0 --- /dev/null +++ b/intentkit/skills/elfa/stats.py @@ -0,0 +1,81 @@ +"""Smart stats skill for Elfa AI API.""" + +from typing import Any, Dict, Optional, Type + +from pydantic import BaseModel, Field + +from .base import ElfaBaseTool +from .utils import SmartStatsData, make_elfa_request + + +class ElfaGetSmartStatsInput(BaseModel): + """Input parameters for smart stats.""" + + username: str = Field(description="Account username to get stats for") + + +class ElfaGetSmartStatsOutput(BaseModel): + """Output structure for smart stats response.""" + + success: bool + data: Optional[SmartStatsData] = Field(None, description="Smart stats data") + metadata: Optional[Dict[str, Any]] = Field(None, description="Response metadata") + + +class ElfaGetSmartStats(ElfaBaseTool): + """ + Get smart stats for a specific username. + + This tool uses the Elfa API to retrieve key social media metrics for a given username including: + - Smart Following Count: Number of high-quality or influential followers + - Average Engagement: Composite score reflecting interaction with user's content + - Average Reach: Average reach of the user's content + - Smart Follower Count: Number of smart followers + - Follower Count: Total follower count + + Use Cases: + - Competitor analysis: Compare social media performance to competitors + - Influencer identification: Identify influential users in your niche + - Social media audits: Assess overall health and effectiveness of social media presence + """ + + name: str = "elfa_get_smart_stats" + description: str = """Get comprehensive social media metrics for a username including smart following count, + engagement scores, and follower analytics. Use this for competitor analysis, influencer identification, + and social media performance audits.""" + args_schema: Type[BaseModel] = ElfaGetSmartStatsInput + + async def _arun(self, username: str, **kwargs) -> ElfaGetSmartStatsOutput: + """ + Execute the smart stats request. + + Args: + username: The username to check stats for + config: LangChain runnable configuration + **kwargs: Additional parameters + + Returns: + ElfaGetSmartStatsOutput: Structured response with smart stats + + Raises: + ValueError: If API key is not found + ToolException: If there's an error with the API request + """ + api_key = self.get_api_key() + + # Prepare parameters according to API spec + params = {"username": username} + + # Make API request using shared utility + response = await make_elfa_request( + endpoint="account/smart-stats", api_key=api_key, params=params + ) + + # Parse response data into SmartStatsData object + stats_data = None + if response.data and isinstance(response.data, dict): + stats_data = SmartStatsData(**response.data) + + return ElfaGetSmartStatsOutput( + success=response.success, data=stats_data, metadata=response.metadata + ) diff --git a/intentkit/skills/elfa/tokens.py b/intentkit/skills/elfa/tokens.py new file mode 100644 index 00000000..f9f52dd1 --- /dev/null +++ b/intentkit/skills/elfa/tokens.py @@ -0,0 +1,119 @@ +"""Trending tokens skill for Elfa AI API.""" + +from typing import Any, Dict, List, Optional, Type + +from pydantic import BaseModel, Field + +from .base import ElfaBaseTool +from .utils import make_elfa_request + + +class ElfaGetTrendingTokensInput(BaseModel): + """Input parameters for trending tokens.""" + + timeWindow: Optional[str] = Field( + "7d", + description="Time window for trending analysis (e.g., '30m', '1h', '4h', '24h', '7d', '30d')", + ) + page: Optional[int] = Field(1, description="Page number for pagination") + pageSize: Optional[int] = Field(50, description="Number of items per page") + minMentions: Optional[int] = Field( + 5, description="Minimum number of mentions required" + ) + + +class TrendingToken(BaseModel): + """Individual trending token data.""" + + token: Optional[str] = Field(None, description="Token symbol") + current_count: Optional[int] = Field(None, description="Current mention count") + previous_count: Optional[int] = Field(None, description="Previous mention count") + change_percent: Optional[float] = Field(None, description="Change percentage") + + +class ElfaGetTrendingTokensOutput(BaseModel): + """Output structure for trending tokens response.""" + + success: bool + data: Optional[List[TrendingToken]] = Field( + None, description="List of trending tokens" + ) + metadata: Optional[Dict[str, Any]] = Field(None, description="Response metadata") + + +class ElfaGetTrendingTokens(ElfaBaseTool): + """ + Get trending tokens based on smart mentions count. + + This tool ranks the most discussed tokens based on smart mentions count for a given period, + with updates every 5 minutes via the Elfa API. Smart mentions provide a more sophisticated + measure of discussion volume than simple keyword counts. + + Use Cases: + - Identify trending tokens: Quickly see which tokens are gaining traction in online discussions + - Gauge market sentiment: Track changes in smart mention counts to understand shifts in market opinion + - Research potential investments: Use the ranking as a starting point for further due diligence + """ + + name: str = "elfa_get_trending_tokens" + description: str = """Get trending tokens ranked by smart mentions count for a given time period. + Updated every 5 minutes. Smart mentions provide sophisticated discussion volume measurement beyond simple keyword counts. + + Use this to identify tokens gaining traction, gauge market sentiment, and research potential investments.""" + args_schema: Type[BaseModel] = ElfaGetTrendingTokensInput + + async def _arun( + self, + timeWindow: str = "7d", + page: int = 1, + pageSize: int = 50, + minMentions: int = 5, + **kwargs, + ) -> ElfaGetTrendingTokensOutput: + """ + Execute the trending tokens request. + + Args: + timeWindow: Time window for analysis (default: 7d) + page: Page number for pagination (default: 1) + pageSize: Items per page (default: 50) + minMentions: Minimum mentions required (default: 5) + config: LangChain runnable configuration + **kwargs: Additional parameters + + Returns: + ElfaGetTrendingTokensOutput: Structured response with trending tokens + + Raises: + ValueError: If API key is not found + ToolException: If there's an error with the API request + """ + api_key = self.get_api_key() + + # Prepare parameters according to API spec + params = { + "timeWindow": timeWindow, + "page": page, + "pageSize": pageSize, + "minMentions": minMentions, + } + + # Make API request using shared utility + response = await make_elfa_request( + endpoint="aggregations/trending-tokens", api_key=api_key, params=params + ) + + # Parse response data into TrendingToken objects + trending_tokens = [] + if response.data: + if isinstance(response.data, list): + trending_tokens = [TrendingToken(**item) for item in response.data] + elif isinstance(response.data, dict) and "data" in response.data: + # Handle nested data structure if present + trending_tokens = [ + TrendingToken(**item) for item in response.data["data"] + ] + + return ElfaGetTrendingTokensOutput( + success=response.success, data=trending_tokens, metadata=response.metadata + ) diff --git a/intentkit/skills/elfa/utils.py b/intentkit/skills/elfa/utils.py new file mode 100644 index 00000000..9e00df3d --- /dev/null +++ b/intentkit/skills/elfa/utils.py @@ -0,0 +1,129 @@ +"""Utility functions for Elfa skills.""" + +from typing import Any, Dict, Optional + +import httpx +from langchain.tools.base import ToolException +from pydantic import BaseModel, Field + +from .base import base_url + + +class ElfaResponse(BaseModel): + """Standard Elfa API v2 response format.""" + + success: bool + data: Any = None + metadata: Optional[Dict[str, Any]] = None + + +async def make_elfa_request( + endpoint: str, + api_key: str, + params: Optional[Dict[str, Any]] = None, + timeout: int = 30, +) -> ElfaResponse: + """ + Make a standardized request to the Elfa API. + + Args: + endpoint: API endpoint path (e.g., "aggregations/trending-tokens") + api_key: Elfa API key + params: Query parameters + timeout: Request timeout in seconds + + Returns: + ElfaResponse: Standardized response object + + Raises: + ToolException: If there's an error with the API request + """ + if not api_key: + raise ValueError("Elfa API key not found") + + url = f"{base_url}/{endpoint}" + headers = { + "accept": "application/json", + "x-elfa-api-key": api_key, + } + + # Clean up params - remove None values + if params: + params = {k: v for k, v in params.items() if v is not None} + + async with httpx.AsyncClient() as client: + try: + response = await client.get( + url, headers=headers, timeout=timeout, params=params + ) + response.raise_for_status() + json_dict = response.json() + + # Handle v2 response format + if isinstance(json_dict, dict) and "success" in json_dict: + return ElfaResponse( + success=json_dict["success"], + data=json_dict.get("data"), + metadata=json_dict.get("metadata", {}), + ) + else: + # Fallback for unexpected format + return ElfaResponse(success=True, data=json_dict, metadata={}) + + except httpx.RequestError as req_err: + raise ToolException(f"Request error from Elfa API: {req_err}") from req_err + except httpx.HTTPStatusError as http_err: + raise ToolException(f"HTTP error from Elfa API: {http_err}") from http_err + except Exception as e: + raise ToolException(f"Error from Elfa API: {e}") from e + + +# Common Pydantic models for v2 API responses +class RepostBreakdown(BaseModel): + """Repost breakdown data.""" + + smart: Optional[int] = None + ct: Optional[int] = None + + +class Account(BaseModel): + """Account information.""" + + username: Optional[str] = None + isVerified: Optional[bool] = None + + +class MentionData(BaseModel): + """Base mention data structure used across multiple endpoints.""" + + tweetId: Optional[str] = Field(None, description="Tweet ID") + link: Optional[str] = Field(None, description="Link to the tweet") + likeCount: Optional[int] = Field(None, description="Number of likes") + repostCount: Optional[int] = Field(None, description="Number of reposts") + viewCount: Optional[int] = Field(None, description="Number of views") + quoteCount: Optional[int] = Field(None, description="Number of quotes") + replyCount: Optional[int] = Field(None, description="Number of replies") + bookmarkCount: Optional[int] = Field(None, description="Number of bookmarks") + mentionedAt: Optional[str] = Field(None, description="When mentioned") + type: Optional[str] = Field(None, description="Post type") + account: Optional[Account] = Field(None, description="Account information") + repostBreakdown: Optional[RepostBreakdown] = Field( + None, description="Repost breakdown" + ) + + +class SmartStatsData(BaseModel): + """Smart stats data structure.""" + + smartFollowingCount: Optional[int] = Field( + None, description="Smart following count" + ) + averageEngagement: Optional[float] = Field(None, description="Average engagement") + averageReach: Optional[float] = Field(None, description="Average reach") + smartFollowerCount: Optional[int] = Field(None, description="Smart follower count") + followerCount: Optional[int] = Field(None, description="Total follower count") + + +def clean_params(params: Dict[str, Any]) -> Dict[str, Any]: + """Remove None values from parameters dict.""" + return {k: v for k, v in params.items() if v is not None} diff --git a/intentkit/skills/enso/README.md b/intentkit/skills/enso/README.md new file mode 100644 index 00000000..d5300546 --- /dev/null +++ b/intentkit/skills/enso/README.md @@ -0,0 +1,75 @@ +# Enso Finance Skills + +Integration with Enso Finance API for DeFi protocols, portfolio management, and yield optimization. + +## Skills + +| Skill | Description | +|-------|-------------| +| `enso_get_networks` | List supported networks | +| `enso_get_tokens` | Get token info (APY, symbol, address) | +| `enso_get_prices` | Get token prices | +| `enso_get_wallet_balances` | Get wallet token balances | +| `enso_get_wallet_approvals` | Get token spend approvals | +| `enso_wallet_approve` | Broadcast approval transactions | +| `enso_route_shortcut` | Broadcast route transactions | +| `enso_get_best_yield` | Find best yield options across protocols | + +## Configuration + +```yaml +# Agent configuration example +skills: + enso: + enabled: true + api_token: "${ENSO_API_TOKEN}" # Optional if set at system level + main_tokens: ["USDC", "ETH", "USDT"] + states: + get_networks: public + get_tokens: public + get_prices: public + get_best_yield: public + # Sensitive operations should be private or disabled + get_wallet_approvals: private + get_wallet_balances: private + wallet_approve: private + route_shortcut: disabled +``` + +## Get Best Yield Skill + +Finds highest yield options for a token across protocols. Default: USDC on Base network. + +### Parameters + +- `token_symbol`: Token symbol (default: "USDC") +- `chain_id`: Blockchain network ID (default: 8453 for Base) +- `top_n`: Number of options to return (default: 5) + +### Example + +``` +# Query: What are the best USDC yield options on Base? + +# Response format: +{ + "best_options": [ + { + "protocol_name": "Protocol Name", + "token_name": "Token Name", + "apy": 12.5, + "tvl": 5000000, + "underlying_tokens": ["USDC"] + }, + // Additional results... + ], + "token_symbol": "USDC", + "chain_name": "Base" +} +``` + +The skill fetches protocols, retrieves token data, filters for the target token, and sorts by APY. + +## Authentication + +Requires an Enso API token in agent config or system config. \ No newline at end of file diff --git a/intentkit/skills/enso/__init__.py b/intentkit/skills/enso/__init__.py new file mode 100644 index 00000000..28158774 --- /dev/null +++ b/intentkit/skills/enso/__init__.py @@ -0,0 +1,114 @@ +"""Enso skills.""" + +import logging +from typing import List, NotRequired, TypedDict + +from intentkit.abstracts.skill import SkillStoreABC +from intentkit.skills.base import SkillConfig, SkillState +from intentkit.skills.enso.base import EnsoBaseTool +from intentkit.skills.enso.best_yield import EnsoGetBestYield +from intentkit.skills.enso.networks import EnsoGetNetworks +from intentkit.skills.enso.prices import EnsoGetPrices +from intentkit.skills.enso.route import EnsoRouteShortcut +from intentkit.skills.enso.tokens import EnsoGetTokens +from intentkit.skills.enso.wallet import ( + EnsoGetWalletApprovals, + EnsoGetWalletBalances, + EnsoWalletApprove, +) + +logger = logging.getLogger(__name__) + + +class SkillStates(TypedDict): + get_networks: SkillState + get_tokens: SkillState + get_prices: SkillState + get_wallet_approvals: SkillState + get_wallet_balances: SkillState + wallet_approve: SkillState + route_shortcut: SkillState + get_best_yield: SkillState + + +class Config(SkillConfig): + """Configuration for Enso skills.""" + + states: SkillStates + api_token: NotRequired[str] + main_tokens: List[str] + + +async def get_skills( + config: Config, + is_private: bool, + store: SkillStoreABC, + **_, +) -> list[EnsoBaseTool]: + """Get all Enso skills.""" + available_skills = [] + + # Include skills based on their state + for skill_name, state in config["states"].items(): + if state == "disabled": + continue + elif state == "public" or (state == "private" and is_private): + available_skills.append(skill_name) + + # Get each skill using the cached getter + result = [] + for name in available_skills: + skill = get_enso_skill(name, store) + if skill: + result.append(skill) + return result + + +def get_enso_skill( + name: str, + skill_store: SkillStoreABC, +) -> EnsoBaseTool: + """Get an Enso skill by name. + + Args: + name: The name of the skill to get + skill_store: The skill store for persisting data + + Returns: + The requested Enso skill + """ + if name == "get_networks": + return EnsoGetNetworks( + skill_store=skill_store, + ) + if name == "get_tokens": + return EnsoGetTokens( + skill_store=skill_store, + ) + if name == "get_prices": + return EnsoGetPrices( + skill_store=skill_store, + ) + if name == "get_wallet_approvals": + return EnsoGetWalletApprovals( + skill_store=skill_store, + ) + if name == "get_wallet_balances": + return EnsoGetWalletBalances( + skill_store=skill_store, + ) + if name == "wallet_approve": + return EnsoWalletApprove( + skill_store=skill_store, + ) + if name == "route_shortcut": + return EnsoRouteShortcut( + skill_store=skill_store, + ) + if name == "get_best_yield": + return EnsoGetBestYield( + skill_store=skill_store, + ) + else: + logger.warning(f"Unknown Enso skill: {name}") + return None diff --git a/intentkit/skills/enso/abi/__init__.py b/intentkit/skills/enso/abi/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/intentkit/skills/enso/abi/approval.py b/intentkit/skills/enso/abi/approval.py new file mode 100644 index 00000000..d277bb93 --- /dev/null +++ b/intentkit/skills/enso/abi/approval.py @@ -0,0 +1,279 @@ +ABI_APPROVAL = [ + { + "inputs": [ + {"internalType": "address", "name": "owner_", "type": "address"}, + {"internalType": "address", "name": "executor_", "type": "address"}, + ], + "stateMutability": "nonpayable", + "type": "constructor", + }, + { + "inputs": [ + {"internalType": "uint256", "name": "command_index", "type": "uint256"}, + {"internalType": "address", "name": "target", "type": "address"}, + {"internalType": "string", "name": "message", "type": "string"}, + ], + "name": "ExecutionFailed", + "type": "error", + }, + {"inputs": [], "name": "InvalidAccount", "type": "error"}, + {"inputs": [], "name": "InvalidArrayLength", "type": "error"}, + {"inputs": [], "name": "NotPermitted", "type": "error"}, + {"inputs": [], "name": "UnsafeSetting", "type": "error"}, + {"inputs": [], "name": "WithdrawFailed", "type": "error"}, + { + "anonymous": False, + "inputs": [ + { + "indexed": False, + "internalType": "bytes32", + "name": "role", + "type": "bytes32", + }, + { + "indexed": False, + "internalType": "address", + "name": "account", + "type": "address", + }, + { + "indexed": False, + "internalType": "bool", + "name": "permission", + "type": "bool", + }, + ], + "name": "PermissionSet", + "type": "event", + }, + { + "inputs": [], + "name": "EXECUTOR_ROLE", + "outputs": [{"internalType": "bytes32", "name": "", "type": "bytes32"}], + "stateMutability": "view", + "type": "function", + }, + { + "inputs": [], + "name": "MODULE_ROLE", + "outputs": [{"internalType": "bytes32", "name": "", "type": "bytes32"}], + "stateMutability": "view", + "type": "function", + }, + { + "inputs": [], + "name": "OWNER_ROLE", + "outputs": [{"internalType": "bytes32", "name": "", "type": "bytes32"}], + "stateMutability": "view", + "type": "function", + }, + { + "inputs": [ + {"internalType": "bytes32[]", "name": "commands", "type": "bytes32[]"}, + {"internalType": "bytes[]", "name": "state", "type": "bytes[]"}, + ], + "name": "executeShortcut", + "outputs": [{"internalType": "bytes[]", "name": "", "type": "bytes[]"}], + "stateMutability": "payable", + "type": "function", + }, + { + "inputs": [], + "name": "executor", + "outputs": [{"internalType": "address", "name": "", "type": "address"}], + "stateMutability": "view", + "type": "function", + }, + { + "inputs": [ + {"internalType": "bytes32", "name": "role", "type": "bytes32"}, + {"internalType": "address", "name": "account", "type": "address"}, + ], + "name": "getPermission", + "outputs": [{"internalType": "bool", "name": "", "type": "bool"}], + "stateMutability": "view", + "type": "function", + }, + { + "inputs": [ + {"internalType": "address", "name": "", "type": "address"}, + {"internalType": "address", "name": "", "type": "address"}, + {"internalType": "uint256[]", "name": "", "type": "uint256[]"}, + {"internalType": "uint256[]", "name": "", "type": "uint256[]"}, + {"internalType": "bytes", "name": "", "type": "bytes"}, + ], + "name": "onERC1155BatchReceived", + "outputs": [{"internalType": "bytes4", "name": "", "type": "bytes4"}], + "stateMutability": "nonpayable", + "type": "function", + }, + { + "inputs": [ + {"internalType": "address", "name": "", "type": "address"}, + {"internalType": "address", "name": "", "type": "address"}, + {"internalType": "uint256", "name": "", "type": "uint256"}, + {"internalType": "uint256", "name": "", "type": "uint256"}, + {"internalType": "bytes", "name": "", "type": "bytes"}, + ], + "name": "onERC1155Received", + "outputs": [{"internalType": "bytes4", "name": "", "type": "bytes4"}], + "stateMutability": "nonpayable", + "type": "function", + }, + { + "inputs": [ + {"internalType": "address", "name": "", "type": "address"}, + {"internalType": "address", "name": "", "type": "address"}, + {"internalType": "uint256", "name": "", "type": "uint256"}, + {"internalType": "bytes", "name": "", "type": "bytes"}, + ], + "name": "onERC721Received", + "outputs": [{"internalType": "bytes4", "name": "", "type": "bytes4"}], + "stateMutability": "nonpayable", + "type": "function", + }, + { + "inputs": [ + { + "components": [ + { + "internalType": "enum MinimalWallet.Protocol", + "name": "protocol", + "type": "uint8", + }, + {"internalType": "address", "name": "token", "type": "address"}, + { + "internalType": "address[]", + "name": "operators", + "type": "address[]", + }, + ], + "internalType": "struct MinimalWallet.ApprovalNote[]", + "name": "notes", + "type": "tuple[]", + } + ], + "name": "revokeApprovals", + "outputs": [], + "stateMutability": "nonpayable", + "type": "function", + }, + { + "inputs": [ + {"internalType": "contract IERC1155", "name": "erc1155", "type": "address"}, + {"internalType": "address[]", "name": "operators", "type": "address[]"}, + ], + "name": "revokeERC1155Approvals", + "outputs": [], + "stateMutability": "nonpayable", + "type": "function", + }, + { + "inputs": [ + {"internalType": "contract IERC20", "name": "erc20", "type": "address"}, + {"internalType": "address[]", "name": "operators", "type": "address[]"}, + ], + "name": "revokeERC20Approvals", + "outputs": [], + "stateMutability": "nonpayable", + "type": "function", + }, + { + "inputs": [ + {"internalType": "contract IERC721", "name": "erc721", "type": "address"}, + {"internalType": "address[]", "name": "operators", "type": "address[]"}, + ], + "name": "revokeERC721Approvals", + "outputs": [], + "stateMutability": "nonpayable", + "type": "function", + }, + { + "inputs": [ + {"internalType": "bytes32", "name": "role", "type": "bytes32"}, + {"internalType": "address", "name": "account", "type": "address"}, + {"internalType": "bool", "name": "permission", "type": "bool"}, + ], + "name": "setPermission", + "outputs": [], + "stateMutability": "nonpayable", + "type": "function", + }, + { + "inputs": [{"internalType": "bytes4", "name": "interfaceId", "type": "bytes4"}], + "name": "supportsInterface", + "outputs": [{"internalType": "bool", "name": "", "type": "bool"}], + "stateMutability": "view", + "type": "function", + }, + { + "inputs": [ + { + "components": [ + { + "internalType": "enum MinimalWallet.Protocol", + "name": "protocol", + "type": "uint8", + }, + {"internalType": "address", "name": "token", "type": "address"}, + {"internalType": "uint256[]", "name": "ids", "type": "uint256[]"}, + { + "internalType": "uint256[]", + "name": "amounts", + "type": "uint256[]", + }, + ], + "internalType": "struct MinimalWallet.TransferNote[]", + "name": "notes", + "type": "tuple[]", + } + ], + "name": "withdraw", + "outputs": [], + "stateMutability": "nonpayable", + "type": "function", + }, + { + "inputs": [ + {"internalType": "contract IERC1155", "name": "erc1155", "type": "address"}, + {"internalType": "uint256[]", "name": "ids", "type": "uint256[]"}, + {"internalType": "uint256[]", "name": "amounts", "type": "uint256[]"}, + ], + "name": "withdrawERC1155s", + "outputs": [], + "stateMutability": "nonpayable", + "type": "function", + }, + { + "inputs": [ + { + "internalType": "contract IERC20[]", + "name": "erc20s", + "type": "address[]", + }, + {"internalType": "uint256[]", "name": "amounts", "type": "uint256[]"}, + ], + "name": "withdrawERC20s", + "outputs": [], + "stateMutability": "nonpayable", + "type": "function", + }, + { + "inputs": [ + {"internalType": "contract IERC721", "name": "erc721", "type": "address"}, + {"internalType": "uint256[]", "name": "ids", "type": "uint256[]"}, + ], + "name": "withdrawERC721s", + "outputs": [], + "stateMutability": "nonpayable", + "type": "function", + }, + { + "inputs": [{"internalType": "uint256", "name": "amount", "type": "uint256"}], + "name": "withdrawETH", + "outputs": [], + "stateMutability": "nonpayable", + "type": "function", + }, + {"stateMutability": "payable", "type": "receive"}, +] diff --git a/intentkit/skills/enso/abi/erc20.py b/intentkit/skills/enso/abi/erc20.py new file mode 100644 index 00000000..8a3d45f3 --- /dev/null +++ b/intentkit/skills/enso/abi/erc20.py @@ -0,0 +1,14 @@ +ABI_ERC20 = [ + { + "constant": False, + "inputs": [ + {"internalType": "address", "name": "spender", "type": "address"}, + {"internalType": "uint256", "name": "value", "type": "uint256"}, + ], + "name": "approve", + "outputs": [{"internalType": "bool", "name": "", "type": "bool"}], + "payable": False, + "stateMutability": "nonpayable", + "type": "function", + } +] diff --git a/intentkit/skills/enso/abi/route.py b/intentkit/skills/enso/abi/route.py new file mode 100644 index 00000000..ee76ce3e --- /dev/null +++ b/intentkit/skills/enso/abi/route.py @@ -0,0 +1,129 @@ +ABI_ROUTE = [ + { + "inputs": [{"internalType": "address", "name": "owner_", "type": "address"}], + "stateMutability": "nonpayable", + "type": "constructor", + }, + { + "inputs": [{"internalType": "address", "name": "token", "type": "address"}], + "name": "AmountTooLow", + "type": "error", + }, + { + "inputs": [{"internalType": "address", "name": "token", "type": "address"}], + "name": "Duplicate", + "type": "error", + }, + { + "inputs": [ + {"internalType": "uint256", "name": "value", "type": "uint256"}, + {"internalType": "uint256", "name": "amount", "type": "uint256"}, + ], + "name": "WrongValue", + "type": "error", + }, + { + "inputs": [], + "name": "enso", + "outputs": [ + {"internalType": "contract EnsoShortcuts", "name": "", "type": "address"} + ], + "stateMutability": "view", + "type": "function", + }, + { + "inputs": [ + { + "components": [ + { + "internalType": "contract IERC20", + "name": "token", + "type": "address", + }, + {"internalType": "uint256", "name": "amount", "type": "uint256"}, + ], + "internalType": "struct Token[]", + "name": "tokensIn", + "type": "tuple[]", + }, + {"internalType": "bytes32[]", "name": "commands", "type": "bytes32[]"}, + {"internalType": "bytes[]", "name": "state", "type": "bytes[]"}, + ], + "name": "routeMulti", + "outputs": [ + {"internalType": "bytes[]", "name": "returnData", "type": "bytes[]"} + ], + "stateMutability": "payable", + "type": "function", + }, + { + "inputs": [ + {"internalType": "contract IERC20", "name": "tokenIn", "type": "address"}, + {"internalType": "uint256", "name": "amountIn", "type": "uint256"}, + {"internalType": "bytes32[]", "name": "commands", "type": "bytes32[]"}, + {"internalType": "bytes[]", "name": "state", "type": "bytes[]"}, + ], + "name": "routeSingle", + "outputs": [ + {"internalType": "bytes[]", "name": "returnData", "type": "bytes[]"} + ], + "stateMutability": "payable", + "type": "function", + }, + { + "inputs": [ + { + "components": [ + { + "internalType": "contract IERC20", + "name": "token", + "type": "address", + }, + {"internalType": "uint256", "name": "amount", "type": "uint256"}, + ], + "internalType": "struct Token[]", + "name": "tokensIn", + "type": "tuple[]", + }, + { + "components": [ + { + "internalType": "contract IERC20", + "name": "token", + "type": "address", + }, + {"internalType": "uint256", "name": "amount", "type": "uint256"}, + ], + "internalType": "struct Token[]", + "name": "tokensOut", + "type": "tuple[]", + }, + {"internalType": "address", "name": "receiver", "type": "address"}, + {"internalType": "bytes32[]", "name": "commands", "type": "bytes32[]"}, + {"internalType": "bytes[]", "name": "state", "type": "bytes[]"}, + ], + "name": "safeRouteMulti", + "outputs": [ + {"internalType": "bytes[]", "name": "returnData", "type": "bytes[]"} + ], + "stateMutability": "payable", + "type": "function", + }, + { + "inputs": [ + {"internalType": "contract IERC20", "name": "tokenIn", "type": "address"}, + {"internalType": "contract IERC20", "name": "tokenOut", "type": "address"}, + {"internalType": "uint256", "name": "amountIn", "type": "uint256"}, + {"internalType": "uint256", "name": "minAmountOut", "type": "uint256"}, + {"internalType": "address", "name": "receiver", "type": "address"}, + {"internalType": "bytes32[]", "name": "commands", "type": "bytes32[]"}, + {"internalType": "bytes[]", "name": "state", "type": "bytes[]"}, + ], + "name": "safeRouteSingle", + "outputs": [ + {"internalType": "bytes[]", "name": "returnData", "type": "bytes[]"} + ], + "stateMutability": "payable", + "type": "function", + }, +] diff --git a/intentkit/skills/enso/base.py b/intentkit/skills/enso/base.py new file mode 100644 index 00000000..3da96706 --- /dev/null +++ b/intentkit/skills/enso/base.py @@ -0,0 +1,79 @@ +from typing import Optional, Type + +from cdp import EvmServerAccount +from coinbase_agentkit import CdpEvmServerWalletProvider +from langchain.tools.base import ToolException +from pydantic import BaseModel, Field + +from intentkit.abstracts.graph import AgentContext +from intentkit.abstracts.skill import SkillStoreABC +from intentkit.clients import CdpClient, get_cdp_client +from intentkit.skills.base import IntentKitSkill +from intentkit.utils.chain import ChainProvider, NetworkId + +base_url = "https://api.enso.finance" +default_chain_id = int(NetworkId.BaseMainnet) + + +class EnsoBaseTool(IntentKitSkill): + """Base class for Enso tools.""" + + name: str = Field(description="The name of the tool") + description: str = Field(description="A description of what the tool does") + args_schema: Type[BaseModel] + skill_store: SkillStoreABC = Field( + description="The skill store for persisting data" + ) + + async def get_account(self, context: AgentContext) -> Optional[EvmServerAccount]: + """Get the account object from the CDP client. + + Args: + context: The skill context containing agent information. + + Returns: + Optional[EvmServerAccount]: The account object if available. + """ + client: CdpClient = await get_cdp_client(context.agent.id, self.skill_store) + return await client.get_account() + + async def get_wallet_provider( + self, context: AgentContext + ) -> Optional[CdpEvmServerWalletProvider]: + """Get the wallet provider from the CDP client. + + Args: + context: The skill context containing agent information. + + Returns: + Optional[CdpEvmServerWalletProvider]: The wallet provider if available. + """ + client: CdpClient = await get_cdp_client(context.agent.id, self.skill_store) + return await client.get_wallet_provider() + + def get_chain_provider(self, context: AgentContext) -> Optional[ChainProvider]: + return self.skill_store.get_system_config("chain_provider") + + def get_main_tokens(self, context: AgentContext) -> list[str]: + skill_config = context.agent.skill_config(self.category) + if "main_tokens" in skill_config and skill_config["main_tokens"]: + return skill_config["main_tokens"] + return [] + + def get_api_key(self) -> str: + context = self.get_context() + skill_config = context.agent.skill_config(self.category) + api_key_provider = skill_config.get("api_key_provider") + if api_key_provider == "platform": + return self.skill_store.get_system_config("enso_api_token") + # for backward compatibility, may only have api_token in skill_config + elif skill_config.get("api_token"): + return skill_config.get("api_token") + else: + raise ToolException( + f"Invalid API key provider: {api_key_provider}, or no api_token in config" + ) + + @property + def category(self) -> str: + return "enso" diff --git a/intentkit/skills/enso/best_yield.py b/intentkit/skills/enso/best_yield.py new file mode 100644 index 00000000..ea409918 --- /dev/null +++ b/intentkit/skills/enso/best_yield.py @@ -0,0 +1,283 @@ +from typing import List, Optional, Type + +import httpx +from langchain.tools.base import ToolException +from pydantic import BaseModel, Field + +from intentkit.skills.enso.base import ( + EnsoBaseTool, + base_url, +) +from intentkit.utils.chain import NetworkId + +# Chain ID for Base Mainnet +BASE_CHAIN_ID = int(NetworkId.BaseMainnet) + + +class EnsoGetBestYieldInput(BaseModel): + """Input for finding the best yield for a token on a specific chain.""" + + token_symbol: str = Field( + "USDC", + description="Symbol of the token to find the best yield for (e.g., 'USDC', 'ETH', 'USDT')", + ) + chain_id: int = Field( + BASE_CHAIN_ID, + description="The blockchain chain ID. Default is Base Mainnet (8453)", + ) + top_n: int = Field( + 5, + description="Number of top yield options to return", + ) + + +class YieldOption(BaseModel): + """Represents a yield option for a token.""" + + protocol_name: str = Field( + None, description="Name of the protocol offering the yield" + ) + protocol_slug: str = Field(None, description="Slug of the protocol") + token_name: str = Field(None, description="Name of the yield-bearing token") + token_symbol: str = Field(None, description="Symbol of the yield-bearing token") + token_address: str = Field( + None, description="Contract address of the yield-bearing token" + ) + primary_address: str = Field( + None, description="Primary contract address for interacting with the protocol" + ) + apy: float = Field(None, description="Annual Percentage Yield") + tvl: Optional[float] = Field(None, description="Total Value Locked in the protocol") + underlying_tokens: List[str] = Field( + [], description="List of underlying token symbols" + ) + + +class EnsoGetBestYieldOutput(BaseModel): + """Output containing the best yield options.""" + + best_options: List[YieldOption] = Field( + [], description="List of best yield options sorted by APY (descending)" + ) + token_symbol: str = Field(None, description="Symbol of the token searched for") + chain_id: int = Field(None, description="Chain ID searched") + chain_name: str = Field(None, description="Name of the chain searched") + + +class EnsoGetBestYield(EnsoBaseTool): + """ + Tool for finding the best yield options for a specific token across all protocols on a blockchain network. + This tool analyzes yield data from various DeFi protocols and returns the top options sorted by APY. + """ + + name: str = "enso_get_best_yield" + description: str = ( + "Find the best yield options for a specific token (default: USDC) across all protocols " + "on a blockchain network (default: Base). Results are sorted by APY in descending order." + ) + args_schema: Type[BaseModel] = EnsoGetBestYieldInput + + async def _arun( + self, + token_symbol: str = "USDC", + chain_id: int = BASE_CHAIN_ID, + top_n: int = 5, + **kwargs, + ) -> EnsoGetBestYieldOutput: + """ + Run the tool to find the best yield options. + + Args: + token_symbol (str): Symbol of the token to find the best yield for (default: USDC) + chain_id (int): The chain id of the network (default: Base Mainnet) + top_n (int): Number of top yield options to return + + Returns: + EnsoGetBestYieldOutput: A structured output containing the top yield options. + + Raises: + ToolException: If there's an error accessing the Enso API. + """ + context = self.get_context() + api_token = self.get_api_token(context) + + if not api_token: + raise ToolException("No API token found for Enso Finance") + + # Get the chain name for the given chain ID + chain_name = await self._get_chain_name(api_token, chain_id) + + # Get all protocols on the specified chain + protocols = await self._get_protocols(api_token, chain_id) + + # Collect all yield options from all protocols + all_yield_options = [] + + for protocol in protocols: + protocol_slug = protocol.get("slug") + protocol_name = protocol.get("name") + + # Get yield-bearing tokens for this protocol + tokens = await self._get_protocol_tokens( + api_token, chain_id, protocol_slug, token_symbol + ) + + # Process tokens to extract yield options + for token in tokens: + # Skip tokens without APY information + if token.get("apy") is None: + continue + + # Check if the token has USDC as an underlying token + has_target_token = False + underlying_token_symbols = [] + + if token.get("underlyingTokens"): + for underlying in token.get("underlyingTokens", []): + underlying_symbol = underlying.get("symbol") + underlying_token_symbols.append(underlying_symbol) + if ( + underlying_symbol + and underlying_symbol.upper() == token_symbol.upper() + ): + has_target_token = True + + # Skip if the token doesn't have the target token as underlying + if not has_target_token and token.get("symbol") != token_symbol.upper(): + continue + + # Create a yield option + yield_option = YieldOption( + protocol_name=protocol_name, + protocol_slug=protocol_slug, + token_name=token.get("name"), + token_symbol=token.get("symbol"), + token_address=token.get("address"), + primary_address=token.get("primaryAddress"), + apy=token.get("apy"), + tvl=token.get("tvl"), + underlying_tokens=underlying_token_symbols, + ) + + all_yield_options.append(yield_option) + + # Sort yield options by APY (descending) + sorted_options = sorted(all_yield_options, key=lambda x: x.apy, reverse=True) + + # Take the top N options + top_options = sorted_options[:top_n] + + return EnsoGetBestYieldOutput( + best_options=top_options, + token_symbol=token_symbol, + chain_id=chain_id, + chain_name=chain_name, + ) + + async def _get_chain_name(self, api_token: str, chain_id: int) -> str: + """ + Get the name of a chain by its ID. + + Args: + api_token (str): The Enso API token + chain_id (int): The chain ID to look up + + Returns: + str: The name of the chain, or "Unknown" if not found + """ + url = f"{base_url}/api/v1/networks" + + headers = { + "accept": "application/json", + "Authorization": f"Bearer {api_token}", + } + + async with httpx.AsyncClient() as client: + try: + response = await client.get(url, headers=headers) + response.raise_for_status() + networks = response.json() + + for network in networks: + if network.get("id") == chain_id: + return network.get("name", "Unknown") + + return "Unknown" + except Exception: + return "Unknown" + + async def _get_protocols(self, api_token: str, chain_id: int) -> list: + """ + Get all protocols available on a specific chain. + + Args: + api_token (str): The Enso API token + chain_id (int): Chain ID to filter protocols by + + Returns: + list: List of protocol data + """ + url = f"{base_url}/api/v1/protocols" + + headers = { + "accept": "application/json", + "Authorization": f"Bearer {api_token}", + } + + params = {"chainId": chain_id} + + async with httpx.AsyncClient() as client: + try: + response = await client.get(url, headers=headers, params=params) + response.raise_for_status() + return response.json() + except httpx.RequestError as req_err: + raise ToolException( + f"Request error from Enso API: {req_err}" + ) from req_err + except httpx.HTTPStatusError as http_err: + raise ToolException( + f"HTTP error from Enso API: {http_err}" + ) from http_err + except Exception as e: + raise ToolException(f"Error from Enso API: {e}") from e + + async def _get_protocol_tokens( + self, api_token: str, chain_id: int, protocol_slug: str, token_symbol: str + ) -> list: + """ + Get tokens for a specific protocol that involve the target token. + + Args: + api_token (str): The Enso API token + chain_id (int): Chain ID for the tokens + protocol_slug (str): Protocol slug to filter tokens by + token_symbol (str): Symbol of the token to search for + + Returns: + list: List of token data + """ + url = f"{base_url}/api/v1/tokens" + + headers = { + "accept": "application/json", + "Authorization": f"Bearer {api_token}", + } + + params = { + "chainId": chain_id, + "protocolSlug": protocol_slug, + "includeMetadata": True, + } + + async with httpx.AsyncClient() as client: + try: + response = await client.get(url, headers=headers, params=params) + response.raise_for_status() + return response.json().get("data", []) + except httpx.RequestError: + return [] + except httpx.HTTPStatusError: + return [] + except Exception: + return [] diff --git a/intentkit/skills/enso/enso.jpg b/intentkit/skills/enso/enso.jpg new file mode 100644 index 00000000..72ddef37 Binary files /dev/null and b/intentkit/skills/enso/enso.jpg differ diff --git a/intentkit/skills/enso/networks.py b/intentkit/skills/enso/networks.py new file mode 100644 index 00000000..887ce7ae --- /dev/null +++ b/intentkit/skills/enso/networks.py @@ -0,0 +1,102 @@ +import logging +from typing import Type + +import httpx +from langchain.tools.base import ToolException +from pydantic import BaseModel, Field + +from .base import EnsoBaseTool, base_url + +logger = logging.getLogger(__name__) + + +class EnsoGetNetworksInput(BaseModel): + """ + Input model for retrieving networks. + """ + + +class ConnectedNetwork(BaseModel): + """ + Represents a single network connection. + """ + + id: int | None = Field(None, description="Unique identifier of the network") + name: str | None = Field(None, description="Name of the network") + isConnected: bool | None = Field( + None, description="Indicates if the network is connected" + ) + + +class EnsoGetNetworksOutput(BaseModel): + """ + Output model for retrieving networks. + """ + + res: list[ConnectedNetwork] | None = Field( + None, description="Response containing networks and metadata" + ) + + +class EnsoGetNetworks(EnsoBaseTool): + """ + Tool for retrieving networks and their corresponding chainId, the output should be kept. + """ + + name: str = "enso_get_networks" + description: str = "Retrieve networks supported by the Enso API" + args_schema: Type[BaseModel] = EnsoGetNetworksInput + + async def _arun(self, **kwargs) -> EnsoGetNetworksOutput: + """ + Function to request the list of supported networks and their chain id and name. + + Returns: + EnsoGetNetworksOutput: A structured output containing the network list or an error message. + """ + url = f"{base_url}/api/v1/networks" + + context = self.get_context() + api_token = self.get_api_token(context) + logger.debug(f"api_token: {api_token}") + headers = { + "accept": "application/json", + "Authorization": f"Bearer {api_token}", + } + + async with httpx.AsyncClient() as client: + try: + # Send the GET request + response = await client.get(url, headers=headers) + response.raise_for_status() + + # Parse the response JSON into the NetworkResponse model + json_dict = response.json() + + networks = [] + networks_memory = {} + for item in json_dict: + network = ConnectedNetwork(**item) + networks.append(network) + networks_memory[str(network.id)] = network.model_dump( + exclude_none=True + ) + + await self.skill_store.save_agent_skill_data( + context.agent_id, + "enso_get_networks", + "networks", + networks_memory, + ) + + return EnsoGetNetworksOutput(res=networks) + except httpx.RequestError as req_err: + raise ToolException( + f"request error from Enso API: {req_err}" + ) from req_err + except httpx.HTTPStatusError as http_err: + raise ToolException( + f"http error from Enso API: {http_err}" + ) from http_err + except Exception as e: + raise ToolException(f"error from Enso API: {e}") from e diff --git a/intentkit/skills/enso/prices.py b/intentkit/skills/enso/prices.py new file mode 100644 index 00000000..4ab278fd --- /dev/null +++ b/intentkit/skills/enso/prices.py @@ -0,0 +1,89 @@ +from typing import Type + +import httpx +from langchain.tools.base import ToolException +from pydantic import BaseModel, Field + +from .base import EnsoBaseTool, base_url, default_chain_id + + +class EnsoGetPricesInput(BaseModel): + chainId: int = Field( + default_chain_id, description="Blockchain chain ID of the token" + ) + address: str = Field( + "0xeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeee", + description="Contract address of the token", + ) + + +class EnsoGetPricesOutput(BaseModel): + decimals: int | None = Field(None, ge=0, description="Number of decimals") + price: float | None = Field(None, gt=0, description="Price in the smallest unit") + address: str | None = Field(None, description="Contract address") + symbol: str | None = Field(None, description="Token symbol") + timestamp: int | None = Field(None, ge=0, description="Timestamp in seconds") + chainId: int | None = Field(None, ge=0, description="Chain ID") + + +class EnsoGetPrices(EnsoBaseTool): + """ + Tool allows fetching the price in USD for a given blockchain's token. + + Attributes: + name (str): Name of the tool, specifically "enso_get_prices". + description (str): Comprehensive description of the tool's purpose and functionality. + args_schema (Type[BaseModel]): Schema for input arguments, specifying expected parameters. + """ + + name: str = "enso_get_prices" + description: str = "Retrieve the price of a token by chain ID and contract address" + args_schema: Type[BaseModel] = EnsoGetPricesInput + + async def _arun( + self, + address: str, + chainId: int = default_chain_id, + **kwargs, + ) -> EnsoGetPricesOutput: + """ + Asynchronous function to request the token price from the API. + + Args: + chainId (int): The blockchain's chain ID. + address (str): Contract address of the token. + + Returns: + EnsoGetPricesOutput: Token price response or error message. + """ + url = f"{base_url}/api/v1/prices/{str(chainId)}/{address}" + + context = self.get_context() + api_token = self.get_api_token(context) + + headers = { + "accept": "application/json", + "Authorization": f"Bearer {api_token}", + } + + async with httpx.AsyncClient() as client: + try: + response = await client.get(url, headers=headers) + response.raise_for_status() + json_dict = response.json() + + # Parse the response into a `PriceInfo` object + res = EnsoGetPricesOutput(**json_dict) + + # Return the parsed response + return res + except httpx.RequestError as req_err: + raise ToolException( + f"request error from Enso API: {req_err}" + ) from req_err + except httpx.HTTPStatusError as http_err: + raise ToolException( + f"http error from Enso API: {http_err}" + ) from http_err + except Exception as e: + raise ToolException(f"error from Enso API: {e}") from e diff --git a/intentkit/skills/enso/route.py b/intentkit/skills/enso/route.py new file mode 100644 index 00000000..49e00319 --- /dev/null +++ b/intentkit/skills/enso/route.py @@ -0,0 +1,298 @@ +from typing import Type + +import httpx +from langchain.tools.base import ToolException +from pydantic import BaseModel, Field + +from intentkit.skills.enso.networks import EnsoGetNetworks + +from .base import EnsoBaseTool, base_url, default_chain_id + + +class EnsoRouteShortcutInput(BaseModel): + """ + Input model for finding best route for swap or deposit. + """ + + broadcast_requested: bool = Field( + False, + description="Whether to broadcast the transaction or not, this is false by default.", + ) + chainId: int = Field( + default_chain_id, + description="(Optional) Chain ID of the network to execute the transaction on. the default value is the chain_id extracted from networks according to tokenIn and tokenOut", + ) + amountIn: list[int] = Field( + description="Amount of tokenIn to swap in wei, you should multiply user's requested value by token decimals." + ) + tokenIn: list[str] = Field( + description="Ethereum address of the token to swap or enter into a position from (For ETH, use 0xeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeee)." + ) + tokenOut: list[str] = Field( + description="Ethereum address of the token to swap or enter into a position to (For ETH, use 0xeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeee)." + ) + # Optional inputs + # routingStrategy: Literal["router", "delegate", "ensowallet", None] = Field( + # None, + # description="(Optional) Routing strategy to use. Options: 'ensowallet', 'router', 'delegate'.", + # ) + # receiver: str | None = Field( + # None, description="(Optional) Ethereum address of the receiver of the tokenOut." + # ) + # spender: str | None = Field( + # None, description="(Optional) Ethereum address of the spender of the tokenIn." + # ) + # amountOut: list[str] | None = Field( + # None, description="(Optional) Amount of tokenOut to receive." + # ) + # minAmountOut: list[str] | None = Field( + # None, + # description="(Optional) Minimum amount out in wei. If specified, slippage should not be specified.", + # ) + # slippage: str | None = Field( + # None, + # description="(Optional) Slippage in basis points (1/10000). If specified, minAmountOut should not be specified.", + # ) + # fee: list[str] | None = Field( + # None, + # description="(Optional) Fee in basis points (1/10000) for each amountIn value.", + # ) + # feeReceiver: str | None = Field( + # None, + # description="(Optional) Ethereum address that will receive the collected fee if fee was provided.", + # ) + # disableRFQs: bool | None = Field( + # None, description="(Optional) Exclude RFQ sources from routes." + # ) + # ignoreAggregators: list[str] | None = Field( + # None, description="(Optional) List of swap aggregators to ignore." + # ) + # ignoreStandards: list[str] | None = Field( + # None, description="(Optional) List of standards to ignore." + # ) + # variableEstimates: dict | None = Field( + # None, description="Variable estimates for the route." + # ) + + +class Route(BaseModel): + tokenIn: list[str] | None = Field( + None, + description="Ethereum address of the token to swap or enter into a position from.", + ) + tokenOut: list[str] | None = Field( + None, + description="Ethereum address of the token to swap or enter into a position to.", + ) + protocol: str | None = Field(None, description="Protocol used for finding route.") + action: str | None = Field( + None, description="Action has been done for route (e.g. swap)." + ) + # internalRoutes: list[str] | None = Field( + # None, description="Internal routes needed for the route." + # ) + + +class EnsoRouteShortcutOutput(BaseModel): + """ + Output model for broadcasting a transaction. + """ + + network: str = Field( + "The network name of the transaction.", + ) + amountOut: str | dict | None = Field( + None, + description="The final calculated amountOut as an object. you should multiply its value by tokenOut decimals.", + ) + priceImpact: float | None = Field( + None, + description="Price impact in basis points, it is null if USD price is not found.", + ) + txHash: str | None = Field( + None, description="The transaction hash of the broadcasted transaction." + ) + # gas: str | None = Field( + # None, + # description="Estimated gas amount for the transaction.", + # ) + # feeAmount: list[str] | None = Field( + # None, + # description="An array of the fee amounts collected for each tokenIn.", + # ) + # createdAt: int | None = Field( + # None, description="Block number the transaction was created on." + # ) + # route: list[Route] | None = Field( + # None, description="Route that the shortcut will use." + # ) + + # def __str__(self): + # """ + # Returns the summary attribute as a string. + # """ + # return f"network:{self.network}, amount out: {self.amountOut}, price impact: {self.priceImpact}, tx hash: {self.txHash}" + + +class EnsoRouteShortcut(EnsoBaseTool): + """ + This tool finds the optimal execution route path for swap or deposit across a multitude of DeFi protocols such as liquidity pools, + lending platforms, automated market makers, yield optimizers, and more. This allows for maximized capital efficiency + and yield optimization, taking into account return rates, gas costs, and slippage. + + Important: the amountOut should be divided by tokenOut decimals before returning the result. + + This tool is able to broadcast the transaction to the network if the user explicitly requests it. otherwise, + broadcast_requested is always false. + + Deposit means to supply the underlying token to its parent token. (e.g. deposit USDC to receive aBasUSDC). + + Attributes: + name (str): Name of the tool, specifically "enso_route_shortcut". + description (str): Comprehensive description of the tool's purpose and functionality. + args_schema (Type[BaseModel]): Schema for input arguments, specifying expected parameters. + """ + + name: str = "enso_route_shortcut" + description: str = "This tool is used specifically for broadcasting a route transaction calldata to the network. It should only be used when the user explicitly requests to broadcast a route transaction with routeId." + args_schema: Type[BaseModel] = EnsoRouteShortcutInput + + async def _arun( + self, + amountIn: list[int], + tokenIn: list[str], + tokenOut: list[str], + chainId: int = default_chain_id, + broadcast_requested: bool = False, + **kwargs, + ) -> EnsoRouteShortcutOutput: + """ + Run the tool to get swap route information. + + Args: + amountIn (list[int]): Amount of tokenIn to swap in wei, you should multiply user's requested value by token decimals. + tokenIn (list[str]): Ethereum address of the token to swap or enter into a position from (For ETH, use 0xeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeee). + tokenOut (list[str]): Ethereum address of the token to swap or enter into a position to (For ETH, use 0xeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeee). + chainId (int): The chain id of the network to be used for swap, deposit and routing. + broadcast_requested (bool): User should ask for broadcasting the transaction explicitly, otherwise it is always false. + + Returns: + EnsoRouteShortcutOutput: The response containing route shortcut information. + """ + + context = self.get_context() + agent_id = context.agent_id + api_token = self.get_api_token(context) + account = await self.get_account(context) + + async with httpx.AsyncClient() as client: + try: + network_name = None + networks = await self.skill_store.get_agent_skill_data( + agent_id, "enso_get_networks", "networks" + ) + + if networks: + network_name = ( + networks.get(str(chainId)).get("name") + if networks.get(str(chainId)) + else None + ) + if network_name is None: + networks = await EnsoGetNetworks( + skill_store=self.skill_store, + ).arun() + + for network in networks.res: + if network.id == chainId: + network_name = network.name + + if not network_name: + raise ToolException( + f"network name not found for chainId: {chainId}" + ) + + headers = { + "accept": "application/json", + "Authorization": f"Bearer {api_token}", + } + + token_decimals = await self.skill_store.get_agent_skill_data( + agent_id, + "enso_get_tokens", + "decimals", + ) + + if not token_decimals: + raise ToolException( + "there is not enough information, enso_get_tokens should be called for data, at first." + ) + + if not token_decimals.get(tokenOut[0]): + raise ToolException( + f"token decimals information for token {tokenOut[0]} not found" + ) + + if not token_decimals.get(tokenIn[0]): + raise ToolException( + f"token decimals information for token {tokenIn[0]} not found" + ) + + url = f"{base_url}/api/v1/shortcuts/route" + + # Prepare query parameters + params = EnsoRouteShortcutInput( + chainId=chainId, + amountIn=amountIn, + tokenIn=tokenIn, + tokenOut=tokenOut, + ).model_dump(exclude_none=True) + + params["fromAddress"] = account.address + + response = await client.get(url, headers=headers, params=params) + response.raise_for_status() # Raise HTTPError for non-2xx responses + json_dict = response.json() + + res = EnsoRouteShortcutOutput(**json_dict) + res.network = network_name + + res.amountOut = str( + float(res.amountOut) / 10 ** token_decimals[tokenOut[0]] + ) + + if broadcast_requested: + # Use the wallet provider to send the transaction + wallet_provider = await self.get_wallet_provider(context) + + # Extract transaction data from the Enso API response + tx_data = json_dict.get("tx", {}) + if tx_data: + # Send the transaction using the wallet provider + tx_hash = wallet_provider.send_transaction( + { + "to": tx_data.get("to"), + "data": tx_data.get("data", "0x"), + "value": tx_data.get("value", 0), + } + ) + + # Wait for transaction confirmation + wallet_provider.wait_for_transaction_receipt(tx_hash) + res.txHash = tx_hash + else: + # For now, return a placeholder transaction hash if no tx data + res.txHash = "0x0000000000000000000000000000000000000000000000000000000000000000" + + return res + + except httpx.RequestError as req_err: + raise ToolException( + f"request error from Enso API: {req_err}" + ) from req_err + except httpx.HTTPStatusError as http_err: + raise ToolException( + f"http error from Enso API: {http_err}" + ) from http_err + except Exception as e: + raise ToolException(f"error from Enso API: {e}") from e diff --git a/intentkit/skills/enso/schema.json b/intentkit/skills/enso/schema.json new file mode 100644 index 00000000..1d465268 --- /dev/null +++ b/intentkit/skills/enso/schema.json @@ -0,0 +1,212 @@ +{ + "$schema": "http://json-schema.org/draft-07/schema#", + "type": "object", + "title": "Enso Finance", + "description": "Integration with Enso Finance API providing DeFi trading and portfolio management capabilities across multiple blockchain networks", + "x-icon": "https://ai.service.crestal.dev/skills/enso/enso.jpg", + "x-tags": [ + "Blockchain" + ], + "properties": { + "enabled": { + "type": "boolean", + "title": "Enabled", + "description": "Whether this skill is enabled", + "default": false + }, + "states": { + "type": "object", + "properties": { + "get_networks": { + "type": "string", + "title": "Get Networks", + "enum": [ + "disabled", + "public", + "private" + ], + "x-enum-title": [ + "Disabled", + "Agent Owner + All Users", + "Agent Owner Only" + ], + "description": "Retrieve networks supported by the Enso API", + "default": "disabled" + }, + "get_tokens": { + "type": "string", + "title": "Get Tokens", + "enum": [ + "disabled", + "public", + "private" + ], + "x-enum-title": [ + "Disabled", + "Agent Owner + All Users", + "Agent Owner Only" + ], + "description": "Enso Finance Token Information Tool: Retrieves detailed token information from the Enso Finance API, including APY, symbol, address, protocol slug, token type, and underlying tokens.", + "default": "disabled" + }, + "get_prices": { + "type": "string", + "title": "Get Prices", + "enum": [ + "disabled", + "public", + "private" + ], + "x-enum-title": [ + "Disabled", + "Agent Owner + All Users", + "Agent Owner Only" + ], + "description": "Retrieve the price of a token by chain ID and contract address", + "default": "disabled" + }, + "get_wallet_approvals": { + "type": "string", + "title": "Get Wallet Approvals", + "enum": [ + "disabled", + "public", + "private" + ], + "x-enum-title": [ + "Disabled", + "Agent Owner + All Users", + "Agent Owner Only" + ], + "description": "Retrieve token spend approvals for a wallet on a specified blockchain network.", + "default": "disabled" + }, + "get_wallet_balances": { + "type": "string", + "title": "Get Wallet Balances", + "enum": [ + "disabled", + "public", + "private" + ], + "x-enum-title": [ + "Disabled", + "Agent Owner + All Users", + "Agent Owner Only" + ], + "description": "Retrieve token balances of a wallet on a specified blockchain network.", + "default": "disabled" + }, + "wallet_approve": { + "type": "string", + "title": "Wallet Approve", + "enum": [ + "disabled", + "public", + "private" + ], + "x-enum-title": [ + "Disabled", + "Agent Owner + All Users", + "Agent Owner Only" + ], + "description": "This tool is used specifically for broadcasting a ERC20 token spending approval transaction to the network. It should only be used when the user explicitly requests to broadcast an approval transaction with a specific amount for a certain token.", + "default": "disabled" + }, + "route_shortcut": { + "type": "string", + "title": "Route Shortcut", + "enum": [ + "disabled", + "public", + "private" + ], + "x-enum-title": [ + "Disabled", + "Agent Owner + All Users", + "Agent Owner Only" + ], + "description": "This tool is used specifically for broadcasting a route transaction calldata to the network. It should only be used when the user explicitly requests to broadcast a route transaction with routeId.", + "default": "disabled" + }, + "get_best_yield": { + "type": "string", + "title": "Get Best Yield", + "enum": [ + "disabled", + "public", + "private" + ], + "x-enum-title": [ + "Disabled", + "Agent Owner + All Users", + "Agent Owner Only" + ], + "description": "Find the best yield options for a specific token (default: USDC) across all protocols on a blockchain network (default: Base). Results are sorted by APY in descending order.", + "default": "disabled" + } + }, + "description": "States for each Enso skill (disabled, public, or private)" + }, + "main_tokens": { + "type": "array", + "items": { + "type": "string" + }, + "description": "List of main tokens to use", + "default": [ + "ETH", + "UDSC", + "USDT" + ] + }, + "api_key_provider": { + "type": "string", + "title": "API Key Provider", + "description": "Provider of the API key", + "enum": [ + "platform", + "agent_owner" + ], + "x-enum-title": [ + "Nation Hosted", + "Owner Provided" + ], + "default": "platform" + } + }, + "required": [ + "states", + "main_tokens", + "enabled" + ], + "if": { + "properties": { + "api_key_provider": { + "const": "agent_owner" + } + } + }, + "then": { + "properties": { + "api_token": { + "type": "string", + "title": "API Token", + "description": "Enso API token for authentication" + } + }, + "if": { + "properties": { + "enabled": { + "const": true + } + } + }, + "then": { + "required": [ + "api_token" + ] + } + }, + "additionalProperties": true +} \ No newline at end of file diff --git a/intentkit/skills/enso/tokens.py b/intentkit/skills/enso/tokens.py new file mode 100644 index 00000000..73c69af8 --- /dev/null +++ b/intentkit/skills/enso/tokens.py @@ -0,0 +1,220 @@ +from typing import Type + +import httpx +from langchain.tools.base import ToolException +from pydantic import BaseModel, Field + +from intentkit.skills.enso.base import ( + EnsoBaseTool, + base_url, + default_chain_id, +) + +# Actual Enso output types +# class UnderlyingToken(BaseModel): +# address: str | None = Field(None, description="The address of the token") +# chainId: int | None = Field(None, description="The blockchain chain ID") +# type: str | None = Field(None, description="The type of the token (e.g., base token)") +# decimals: int | None = Field(None, description="The number of decimals for the token") +# name: str | None = Field(None, description="The name of the token") +# symbol: str | None = Field(None, description="The symbol of the token") +# logosUri: list[HttpUrl] | None = Field(None, description="List of URLs to token's logos") +# +# +# class TokenData(BaseModel): +# chainId: int | None = Field(None, description="The blockchain chain ID") +# address: str | None = Field(None, description="The address of the token") +# decimals: int | None = Field(None, description="The number of decimals for the token") +# name: str | None = Field(None, description="The name of the token") +# symbol: str | None = Field(None, description="The symbol of the token") +# logosUri: list[HttpUrl] | None = Field(None, description="List of URLs to token's logos") +# type: str | None = Field(None, description="The type of the token (e.g., defi, base, etc.)") +# protocolSlug: str | None = Field(None, description="The protocol slug associated with the token") +# underlyingTokens: list[UnderlyingToken] | None = Field(None, description="List of underlying tokens") +# primaryAddress: str | None = Field(None, description="The primary address associated with the token") +# apy: float | None = Field(None, description="The annual percentage yield (APY) for the token") +# +# +# class MetaData(BaseModel): +# total: int | None = Field(None, description="Total number of records") +# lastPage: int | None = Field(None, description="Last page of the data") +# currentPage: int | None = Field(None, description="Current page of the data") +# perPage: int | None = Field(None, description="Number of records per page") +# prev: int | None = Field(None, description="Previous page number, if applicable") +# next: int | None = Field(None, description="Next page number, if applicable") +# +# +# class TokenResponse(BaseModel): +# data: list[TokenData] | None = Field(None, description="List of token data") +# meta: MetaData | None = Field(None, description="Metadata regarding pagination") + + +class EnsoGetTokensInput(BaseModel): + chainId: int = Field( + default_chain_id, + description="The blockchain chain ID", + ) + protocolSlug: str | None = Field( + None, + description="The protocol slug (e.g., 'aave-v2', 'aave-v3', 'compound-v2')", + ) + # address: str | None = Field( + # None, + # description="Ethereum address of the token", + # ) + # underlyingTokens: str | list[str] | None = Field( + # None, + # description="Underlying tokens (e.g. 0xdAC17F958D2ee523a2206206994597C13D831ec7)", + # ) + # primaryAddress: str | None = Field( + # None, + # description="Ethereum address for contract interaction of defi token", + # ) + # type: Literal["defi", "base"] | None = Field( + # None, + # description="The type of the token (e.g., 'defi', 'base'). Note: Base Network also exists, it should not be confused with type.", + # ) + + +class UnderlyingTokenCompact(BaseModel): + address: str | None = Field(None, description="The address of the token") + type: str | None = Field( + None, description="The type of the token (e.g., base token)" + ) + name: str | None = Field(None, description="The name of the token") + symbol: str | None = Field(None, description="The symbol of the token") + decimals: int | None = Field( + None, description="The number of decimals for the token" + ) + + +class TokenResponseCompact(BaseModel): + name: str | None = Field(None, description="The name of the token") + symbol: str | None = Field(None, description="The symbol of the token") + address: str | None = Field(None, description="The address of the token") + primaryAddress: str | None = Field( + None, description="The primary address associated with the token" + ) + type: str | None = Field( + None, description="The type of the token (e.g., defi, base, etc.)" + ) + apy: float | None = Field( + None, description="The annual percentage yield (APY) for the token" + ) + underlyingTokens: list[UnderlyingTokenCompact] | None = Field( + None, description="List of underlying tokens" + ) + decimals: int | None = Field( + None, description="The number of decimals for the token" + ) + + +class EnsoGetTokensOutput(BaseModel): + res: list[TokenResponseCompact] | None + + +class EnsoGetTokens(EnsoBaseTool): + """ + Tool for interacting with the Enso API to retrieve cryptocurrency token information, including APY, symbol, address, + protocol slug, token type, and underlying tokens. + + This class is designed to provide detailed insights into tokens managed by the Enso platform. + It integrates with the Enso API and offers various options for filtering tokens based on optional inputs such as + chain ID, protocol slug, token type, and underlying tokens. The main objective is to retrieve APY data + and relevant information for the specified tokens, delivering structured output for further processing. + + Attributes: + name (str): Name of the tool, specifically "enso_get_tokens". + description (str): Comprehensive description of the tool's purpose and functionality. + args_schema (Type[BaseModel]): Schema for input arguments, specifying expected parameters. + """ + + name: str = "enso_get_tokens" + description: str = ( + "Enso Finance Token Information Tool: Retrieves detailed token information from the Enso Finance API, " + "including APY, symbol, address, protocol slug, token type, and underlying tokens." + ) + args_schema: Type[BaseModel] = EnsoGetTokensInput + + async def _arun( + self, + chainId: int = default_chain_id, + protocolSlug: str | None = None, + **kwargs, + ) -> EnsoGetTokensOutput: + """Run the tool to get Tokens and APY. + Args: + chainId (int): The chain id of the network. + protocolSlug (str): The protocol slug (e.g., 'aave-v2', 'aave-v3', 'compound-v2'). + Returns: + EnsoGetTokensOutput: A structured output containing the tokens APY data. + + Raises: + Exception: If there's an error accessing the Enso API. + """ + url = f"{base_url}/api/v1/tokens" + + context = self.get_context() + agent_id = context.agent_id + api_token = self.get_api_token(context) + main_tokens = self.get_main_tokens(context) + headers = { + "accept": "application/json", + "Authorization": f"Bearer {api_token}", + } + + params = EnsoGetTokensInput( + chainId=chainId, + protocolSlug=protocolSlug, + ).model_dump(exclude_none=True) + + params["page"] = 1 + params["includeMetadata"] = "true" + + async with httpx.AsyncClient() as client: + try: + response = await client.get(url, headers=headers, params=params) + response.raise_for_status() + json_dict = response.json() + + token_decimals = await self.skill_store.get_agent_skill_data( + agent_id, + "enso_get_tokens", + "decimals", + ) + if not token_decimals: + token_decimals = {} + + # filter the main tokens from config or the ones that have apy assigned. + res = EnsoGetTokensOutput(res=list[TokenResponseCompact]()) + for item in json_dict["data"]: + main_tokens = [item.upper() for item in main_tokens] + if item.get("apy") or (item.get("symbol").upper() in main_tokens): + token_response = TokenResponseCompact(**item) + res.res.append(token_response) + token_decimals[token_response.address] = token_response.decimals + if ( + token_response.underlyingTokens + and len(token_response.underlyingTokens) > 0 + ): + for u_token in token_response.underlyingTokens: + token_decimals[u_token.address] = u_token.decimals + + await self.skill_store.save_agent_skill_data( + agent_id, + "enso_get_tokens", + "decimals", + token_decimals, + ) + + return res + except httpx.RequestError as req_err: + raise ToolException( + f"request error from Enso API: {req_err}" + ) from req_err + except httpx.HTTPStatusError as http_err: + raise ToolException( + f"http error from Enso API: {http_err}" + ) from http_err + except Exception as e: + raise ToolException(f"error from Enso API: {e}") from e diff --git a/intentkit/skills/enso/wallet.py b/intentkit/skills/enso/wallet.py new file mode 100644 index 00000000..a9a77193 --- /dev/null +++ b/intentkit/skills/enso/wallet.py @@ -0,0 +1,379 @@ +from typing import Literal, Tuple, Type + +import httpx +from langchain.tools.base import ToolException +from pydantic import BaseModel, Field + +from .base import EnsoBaseTool, base_url, default_chain_id + + +class EnsoGetBalancesInput(BaseModel): + """ + Input model for retrieving wallet balances. + """ + + chainId: int = Field( + default_chain_id, description="Chain ID of the blockchain network" + ) + # eoaAddress: str = Field( + # description="Address of the eoa with which to associate the ensoWallet for balances" + # ) + # useEoa: bool = Field( + # description="If true returns balances for the provided eoaAddress, instead of the associated ensoWallet" + # ) + + +class WalletBalance(BaseModel): + token: str | None = Field(None, description="The address of the token") + amount: str | None = Field(None, description="The unformatted balance of the token") + decimals: int | None = Field(None, ge=0, description="The number of decimals") + price: float | None = Field(None, description="Price of the token in usd") + + +class EnsoGetBalancesOutput(BaseModel): + """ + Output model for retrieving wallet balances. + """ + + res: list[WalletBalance] | None = Field( + None, description="The wallet's balances along with token details." + ) + + +class EnsoGetWalletBalances(EnsoBaseTool): + """ + This tool allows querying for first 20 token balances of a specific wallet + and blockchain network. + + Attributes: + name (str): Name of the tool, specifically "enso_get_wallet_balances". + description (str): Comprehensive description of the tool's purpose and functionality. + args_schema (Type[BaseModel]): Schema for input arguments, specifying expected parameters. + """ + + name: str = "enso_get_wallet_balances" + description: str = ( + "Retrieve token balances of a wallet on a specified blockchain network." + ) + args_schema: Type[BaseModel] = EnsoGetBalancesInput + + async def _arun( + self, + chainId: int = default_chain_id, + **kwargs, + ) -> EnsoGetBalancesOutput: + """ + Run the tool to get token balances of a wallet. + + Args: + chainId (int): Chain ID of the blockchain network. + + Returns: + EnsoGetBalancesOutput: The list of balances or an error message. + """ + url = f"{base_url}/api/v1/wallet/balances" + + context = self.get_context() + api_token = self.get_api_token(context) + account = await self.get_account(context) + headers = { + "accept": "application/json", + "Authorization": f"Bearer {api_token}", + } + + params = EnsoGetBalancesInput(chainId=chainId).model_dump(exclude_none=True) + params["eoaAddress"] = account.address + params["useEoa"] = True + + async with httpx.AsyncClient() as client: + try: + # Send the GET request + response = await client.get(url, headers=headers, params=params) + response.raise_for_status() + + # Map the response JSON into the WalletBalance model + json_dict = response.json()[:20] + res = [WalletBalance(**item) for item in json_dict] + + # Return the parsed response + return EnsoGetBalancesOutput(res=res) + except httpx.RequestError as req_err: + raise ToolException("request error from Enso API") from req_err + except httpx.HTTPStatusError as http_err: + raise ToolException("http error from Enso API") from http_err + except Exception as e: + raise ToolException(f"error from Enso API: {e}") from e + + +class EnsoGetApprovalsInput(BaseModel): + """ + Input model for retrieving wallet approvals. + """ + + chainId: int = Field( + default_chain_id, description="Chain ID of the blockchain network" + ) + fromAddress: str = Field(description="Address of the wallet") + routingStrategy: Literal["ensowallet", "router", "delegate"] | None = Field( + None, description="Routing strategy to use" + ) + + +class WalletAllowance(BaseModel): + token: str | None = Field(None, description="The token address") + allowance: str | None = Field(None, description="The amount of tokens approved") + spender: str | None = Field(None, description="The spender address") + + +class EnsoGetApprovalsOutput(BaseModel): + """ + Output model for retrieving wallet approvals. + """ + + res: list[WalletAllowance] | None = Field( + None, description="Response containing the list of token approvals." + ) + + +class EnsoGetWalletApprovals(EnsoBaseTool): + """ + This tool allows querying for first 50 token spend approvals associated with a specific wallet + and blockchain network. + + Attributes: + name (str): Name of the tool, specifically "enso_get_wallet_approvals". + description (str): Comprehensive description of the tool's purpose and functionality. + args_schema (Type[BaseModel]): Schema for input arguments, specifying expected parameters. + """ + + name: str = "enso_get_wallet_approvals" + description: str = ( + "Retrieve token spend approvals for a wallet on a specified blockchain network." + ) + args_schema: Type[BaseModel] = EnsoGetApprovalsOutput + + async def _arun( + self, + chainId: int = default_chain_id, + **kwargs, + ) -> EnsoGetApprovalsOutput: + """ + Run the tool to get token approvals for a wallet. + + Args: + chainId (int): Chain ID of the blockchain network. + **kwargs: optional kwargs for the tool with args schema defined in EnsoGetApprovalsInput. + + Returns: + EnsoGetApprovalsOutput: The list of approvals or an error message. + """ + url = f"{base_url}/api/v1/wallet/approvals" + + context = self.get_context() + api_token = self.get_api_token(context) + account = await self.get_account(context) + + headers = { + "accept": "application/json", + "Authorization": f"Bearer {api_token}", + } + + params = EnsoGetApprovalsInput( + chainId=chainId, + fromAddress=account.address, + ) + + if kwargs.get("routingStrategy"): + params.routingStrategy = kwargs["routingStrategy"] + + async with httpx.AsyncClient() as client: + try: + # Send the GET request + response = await client.get( + url, headers=headers, params=params.model_dump(exclude_none=True) + ) + response.raise_for_status() + + # Map the response JSON into the ApprovalsResponse model + json_dict = response.json()[:50] + res = [WalletAllowance(**item) for item in json_dict] + + # Return the parsed response + return EnsoGetApprovalsOutput(res=res) + except httpx.RequestError as req_err: + raise ToolException( + f"request error from Enso API: {req_err}" + ) from req_err + except httpx.HTTPStatusError as http_err: + raise ToolException( + f"http error from Enso API: {http_err}" + ) from http_err + except Exception as e: + raise ToolException(f"error from Enso API: {e}") from e + + +class EnsoWalletApproveInput(BaseModel): + """ + Input model for approve the wallet. + """ + + tokenAddress: str = Field(description="ERC20 token address of the token to approve") + amount: int = Field(description="Amount of tokens to approve in wei") + chainId: int = Field( + default_chain_id, description="Chain ID of the blockchain network" + ) + routingStrategy: Literal["ensowallet", "router", "delegate"] | None = Field( + None, description="Routing strategy to use" + ) + + +class EnsoWalletApproveOutput(BaseModel): + """ + Output model for approve token for the wallet. + """ + + gas: str | None = Field(None, description="The gas estimate for the transaction") + token: str | None = Field(None, description="The token address to approve") + amount: str | None = Field(None, description="The amount of tokens to approve") + spender: str | None = Field(None, description="The spender address to approve") + + +class EnsoWalletApproveArtifact(BaseModel): + """ + Output model for approve token for the wallet. + """ + + tx: object | None = Field(None, description="The tx object to use in `ethers`") + txHash: str | None = Field(None, description="The transaction hash") + + +class EnsoWalletApprove(EnsoBaseTool): + """ + This tool is used specifically for broadcasting a ERC20 token spending approval transaction to the network. + It should only be used when the user explicitly requests to broadcast an approval transaction with a specific amount for a certain token. + + **Example Usage:** + + "Broadcast an approval transaction for 10 USDC to the wallet." + + **Important:** + - This tool should be used with extreme caution. + - Approving token spending grants another account permission to spend your tokens. + + Attributes: + name (str): Name of the tool, specifically "enso_wallet_approve". + description (str): Comprehensive description of the tool's purpose and functionality. + args_schema (Type[BaseModel]): Schema for input arguments, specifying expected parameters. + """ + + name: str = "enso_wallet_approve" + description: str = "This tool is used specifically for broadcasting a ERC20 token spending approval transaction to the network. It should only be used when the user explicitly requests to broadcast an approval transaction with a specific amount for a certain token." + args_schema: Type[BaseModel] = EnsoWalletApproveInput + response_format: str = "content_and_artifact" + + # def _run( + # self, + # tokenAddress: str, + # amount: int, + # chainId: int = default_chain_id, + # **kwargs, + # ) -> Tuple[EnsoBroadcastWalletApproveOutput, EnsoBroadcastWalletApproveArtifact]: + # """Run the tool to approve enso router for a wallet. + + # Returns: + # Tuple[EnsoBroadcastWalletApproveOutput, EnsoBroadcastWalletApproveArtifact]: A structured output containing the result of token approval. + + # Raises: + # Exception: If there's an error accessing the Enso API. + # """ + # raise NotImplementedError("Use _arun instead") + + async def _arun( + self, + tokenAddress: str, + amount: int, + chainId: int = default_chain_id, + **kwargs, + ) -> Tuple[EnsoWalletApproveOutput, EnsoWalletApproveArtifact]: + """ + Run the tool to approve enso router for a wallet. + + Args: + tokenAddress (str): ERC20 token address of the token to approve. + amount (int): Amount of tokens to approve in wei. + chainId (int): Chain ID of the blockchain network. + **kwargs: optional kwargs for the tool with args schema defined in EnsoGetApproveInput. + + Returns: + Tuple[EnsoBroadcastWalletApproveOutput, EnsoBroadcastWalletApproveArtifact]: The list of approve transaction output or an error message. + """ + url = f"{base_url}/api/v1/wallet/approve" + context = self.get_context() + api_token = self.get_api_token(context) + account = await self.get_account(context) + + headers = { + "accept": "application/json", + "Authorization": f"Bearer {api_token}", + } + + from_address = account.address + + params = EnsoWalletApproveInput( + tokenAddress=tokenAddress, + amount=amount, + chainId=chainId, + ) + + if kwargs.get("routingStrategy"): + params.routingStrategy = kwargs["routingStrategy"] + + params = params.model_dump(exclude_none=True) + + params["fromAddress"] = from_address + + with httpx.Client() as client: + try: + # Send the GET request + response = client.get(url, headers=headers, params=params) + response.raise_for_status() + + # Map the response JSON into the WalletApproveTransaction model + json_dict = response.json() + content = EnsoWalletApproveOutput(**json_dict) + artifact = EnsoWalletApproveArtifact(**json_dict) + + # Use the wallet provider to send the transaction + wallet_provider = await self.get_wallet_provider(context) + + # Extract transaction data from the Enso API response + tx_data = json_dict.get("tx", {}) + if tx_data: + # Send the transaction using the wallet provider + tx_hash = wallet_provider.send_transaction( + { + "to": tx_data.get("to"), + "data": tx_data.get("data", "0x"), + "value": tx_data.get("value", 0), + } + ) + + # Wait for transaction confirmation + wallet_provider.wait_for_transaction_receipt(tx_hash) + artifact.txHash = tx_hash + else: + # For now, return without executing the transaction if no tx data + artifact.txHash = "0x0000000000000000000000000000000000000000000000000000000000000000" + + # Return the parsed response + return (content, artifact) + except httpx.RequestError as req_err: + raise ToolException( + f"request error from Enso API: {req_err}" + ) from req_err + except httpx.HTTPStatusError as http_err: + raise ToolException( + f"http error from Enso API: {http_err}" + ) from http_err + except Exception as e: + raise ToolException(f"error from Enso API: {e}") from e diff --git a/intentkit/skills/firecrawl/README.md b/intentkit/skills/firecrawl/README.md new file mode 100644 index 00000000..113b0bed --- /dev/null +++ b/intentkit/skills/firecrawl/README.md @@ -0,0 +1,217 @@ +# Firecrawl Skills + +The Firecrawl skills provide advanced web scraping and content indexing capabilities using the Firecrawl API. These skills can handle JavaScript-heavy websites, PDFs, and provide automatic content indexing for intelligent querying. + +## Skills Overview + +### 1. firecrawl_scrape +Scrapes a single webpage and REPLACES any existing indexed content for that URL, preventing duplicates. + +**Parameters:** +- `url` (required): The URL to scrape +- `formats` (optional): Output formats - markdown, html, rawHtml, screenshot, links, json (default: ["markdown"]) +- `only_main_content` (optional): Extract only main content (default: true) +- `include_tags` (optional): HTML tags to include (e.g., ["h1", "h2", "p"]) +- `exclude_tags` (optional): HTML tags to exclude +- `wait_for` (optional): Wait time in milliseconds before scraping +- `timeout` (optional): Maximum timeout in milliseconds (default: 30000) +- `index_content` (optional): Whether to index content for querying (default: true) +- `chunk_size` (optional): Size of text chunks for indexing (default: 1000) +- `chunk_overlap` (optional): Overlap between chunks (default: 200) + +**Use Case:** Use this when you want to refresh/update content from a URL that was previously scraped, ensuring no duplicate or stale content remains. + +### 2. firecrawl_crawl +Crawls multiple pages from a website and indexes all content. + +**Parameters:** +- `url` (required): The base URL to start crawling +- `include_paths` (optional): URL patterns to include (e.g., ["/docs/*"]) +- `exclude_paths` (optional): URL patterns to exclude +- `max_depth` (optional): Maximum crawl depth (default: 2) +- `limit` (optional): Maximum number of pages to crawl (default: 5) +- `index_content` (optional): Whether to index content for querying (default: true) +- `chunk_size` (optional): Size of text chunks for indexing (default: 1000) +- `chunk_overlap` (optional): Overlap between chunks (default: 200) + +### 3. firecrawl_query_indexed_content +Queries previously indexed Firecrawl content using semantic search. + +**Parameters:** +- `query` (required): The search query +- `limit` (optional): Maximum number of results to return (1-10, default: 4) + +### 4. firecrawl_clear_indexed_content +Clears all previously indexed Firecrawl content from the vector store. + +**Parameters:** +- `confirm` (required): Must be set to true to confirm the deletion (default: false) + +**Note:** This action is permanent and cannot be undone. Use when you want to start fresh with new content. + +## API Key Configuration +Set your Firecrawl API key as an environment variable: +```bash +export FIRECRAWL_API_KEY=fc-your-api-key-here +``` + +## Testing Instructions + +### Step 1: Create an Agent with Firecrawl Skills + +1. **Create a new agent** via the API or UI with the following skills: + ```json + { + "skills": [ + "firecrawl_scrape", + "firecrawl_crawl", + "firecrawl_query_indexed_content", + "firecrawl_clear_indexed_content" + ] + } + ``` + +2. **Note the agent ID** for testing + +### Step 2: Test Single Page Scraping + +**Test scraping a documentation homepage:** +``` +Prompt: "Use firecrawl_scrape to scrape https://docs.joincommonwealth.xyz/ and index the content for future querying" +``` + +**Expected Result:** +- Content successfully scraped +- Content automatically indexed with metadata +- Confirmation of chunk creation and indexing + +### Step 3: Test Content Crawling + +**Test crawling multiple pages:** +``` +Prompt: "Use firecrawl_crawl to crawl https://docs.joincommonwealth.xyz/ with max_depth=2 and limit=3 to index multiple documentation pages" +``` + +**Expected Result:** +- Multiple pages crawled and scraped +- Each page indexed separately +- Batch processing confirmation + +### Step 4: Test Content Querying + +**Test querying indexed content:** +``` +Prompt: "Use firecrawl_query_indexed_content to search for 'What is All Street and what is its purpose?' in the indexed content" +``` + +**Expected Result:** +- Relevant content retrieved from indexed documents +- Results tagged with [Firecrawl Scrape] or [Firecrawl Crawl] +- Source URLs and metadata included + +### Step 5: Test Advanced Scraping Options + +**Test with specific formatting:** +``` +Prompt: "Use firecrawl_scrape to scrape https://docs.joincommonwealth.xyz/all-street-manifesto with formats=['markdown', 'html'] and include_tags=['h1', 'h2', 'p'] and index_content=true" +``` + +**Expected Result:** +- Content in both markdown and HTML formats +- Only specified HTML tags included +- Content indexed for querying + +### Step 6: Test Multiple Queries + +**Test different query types:** +``` +Prompt: "Use firecrawl_query_indexed_content to search for 'democratize finance' in the indexed content" +``` + +**Expected Result:** +- Relevant content retrieved from Firecrawl's independent vector store +- Results tagged with [Firecrawl Scrape] or [Firecrawl Crawl] +- Source URLs and metadata included + +### Step 7: Test Clear Indexed Content + +**Test clearing all indexed content:** +``` +Prompt: "Use firecrawl_clear_indexed_content with confirm=true to clear all indexed content" +``` + +**Expected Result:** +- All indexed content removed from vector store +- Confirmation message displayed +- Subsequent queries return no results + +### Step 8: Test Re-indexing After Clear + +**Test that content can be re-indexed after clearing:** +``` +Prompt: "Use firecrawl_scrape to scrape https://example.com and index the content" +``` + +**Expected Result:** +- Content successfully scraped and indexed +- Fresh vector store created +- Content available for querying again + +## Common Use Cases + +### Documentation Indexing +``` +1. Scrape main documentation page +2. Crawl related documentation sections +3. Use scrape again to update changed pages (replaces old content) +4. Query for specific technical information +``` + +### Competitive Analysis +``` +1. Scrape competitor websites +2. Index product information and features +3. Query for specific comparisons +``` + +### Research and Knowledge Base +``` +1. Crawl research papers or articles +2. Index academic or technical content +3. Query for specific concepts or methodologies +``` + +## Troubleshooting + +### Common Issues + +1. **API Key Not Found** + - Ensure `FIRECRAWL_API_KEY` environment variable is set + - Restart the IntentKit server after setting the key + +2. **Scraping Failures** + - Check if the URL is accessible + - Verify Firecrawl API quota and limits + - Some websites may block scraping + +3. **Indexing Errors** + - Ensure OpenAI API key is configured for embeddings + - Check if content is too large for processing + - Verify vector store permissions + +4. **Query Returns No Results** + - Ensure content was successfully indexed + - Try broader or different search terms + - Check if vector store contains data + +## Features and Benefits + +- **JavaScript Rendering**: Handles SPAs and dynamic content +- **PDF Support**: Can scrape and index PDF documents +- **Intelligent Chunking**: Optimized text splitting for better search +- **Independent Storage**: Uses its own dedicated vector store for Firecrawl content +- **Content Replacement**: Replace mode prevents duplicate/stale content +- **Metadata Rich**: Includes source URLs, timestamps, and content types +- **Semantic Search**: Uses OpenAI embeddings for intelligent querying +- **Batch Processing**: Efficient handling of multiple pages +- **Content Filtering**: Flexible include/exclude options for targeted scraping \ No newline at end of file diff --git a/intentkit/skills/firecrawl/__init__.py b/intentkit/skills/firecrawl/__init__.py new file mode 100644 index 00000000..8b464d15 --- /dev/null +++ b/intentkit/skills/firecrawl/__init__.py @@ -0,0 +1,107 @@ +"""Firecrawl skills for web scraping and crawling.""" + +import logging +from typing import TypedDict + +from intentkit.abstracts.skill import SkillStoreABC +from intentkit.skills.base import SkillConfig, SkillState +from intentkit.skills.firecrawl.base import FirecrawlBaseTool +from intentkit.skills.firecrawl.clear import FirecrawlClearIndexedContent +from intentkit.skills.firecrawl.crawl import FirecrawlCrawl +from intentkit.skills.firecrawl.query import FirecrawlQueryIndexedContent +from intentkit.skills.firecrawl.scrape import FirecrawlScrape + +# Cache skills at the system level, because they are stateless +_cache: dict[str, FirecrawlBaseTool] = {} + +logger = logging.getLogger(__name__) + + +class SkillStates(TypedDict): + firecrawl_scrape: SkillState + firecrawl_crawl: SkillState + firecrawl_query_indexed_content: SkillState + firecrawl_clear_indexed_content: SkillState + + +class Config(SkillConfig): + """Configuration for Firecrawl skills.""" + + states: SkillStates + api_key: str = "" + api_key_provider: str = "agent_owner" + rate_limit_number: int = 100 + rate_limit_minutes: int = 60 + + +async def get_skills( + config: "Config", + is_private: bool, + store: SkillStoreABC, + **_, +) -> list[FirecrawlBaseTool]: + """Get all Firecrawl skills. + + Args: + config: The configuration for Firecrawl skills. + is_private: Whether to include private skills. + store: The skill store for persisting data. + + Returns: + A list of Firecrawl skills. + """ + available_skills = [] + + # Include skills based on their state + for skill_name, state in config["states"].items(): + if state == "disabled": + continue + elif state == "public" or (state == "private" and is_private): + available_skills.append(skill_name) + + # Get each skill using the cached getter + return [get_firecrawl_skill(name, store) for name in available_skills] + + +def get_firecrawl_skill( + name: str, + store: SkillStoreABC, +) -> FirecrawlBaseTool: + """Get a Firecrawl skill by name. + + Args: + name: The name of the skill to get + store: The skill store for persisting data + + Returns: + The requested Firecrawl skill + + Raises: + ValueError: If the skill name is unknown + """ + if name == "firecrawl_scrape": + if name not in _cache: + _cache[name] = FirecrawlScrape( + skill_store=store, + ) + return _cache[name] + elif name == "firecrawl_crawl": + if name not in _cache: + _cache[name] = FirecrawlCrawl( + skill_store=store, + ) + return _cache[name] + elif name == "firecrawl_query_indexed_content": + if name not in _cache: + _cache[name] = FirecrawlQueryIndexedContent( + skill_store=store, + ) + return _cache[name] + elif name == "firecrawl_clear_indexed_content": + if name not in _cache: + _cache[name] = FirecrawlClearIndexedContent( + skill_store=store, + ) + return _cache[name] + else: + raise ValueError(f"Unknown Firecrawl skill: {name}") diff --git a/intentkit/skills/firecrawl/base.py b/intentkit/skills/firecrawl/base.py new file mode 100644 index 00000000..cbc9ef9f --- /dev/null +++ b/intentkit/skills/firecrawl/base.py @@ -0,0 +1,38 @@ +from typing import Type + +from langchain.tools.base import ToolException +from pydantic import BaseModel, Field + +from intentkit.abstracts.skill import SkillStoreABC +from intentkit.skills.base import IntentKitSkill + + +class FirecrawlBaseTool(IntentKitSkill): + """Base class for Firecrawl tools.""" + + name: str = Field(description="The name of the tool") + description: str = Field(description="A description of what the tool does") + args_schema: Type[BaseModel] + skill_store: SkillStoreABC = Field( + description="The skill store for persisting data" + ) + + def get_api_key(self) -> str: + """Get the Firecrawl API key from configuration.""" + context = self.get_context() + skill_config = context.agent.skill_config(self.category) + api_key_provider = skill_config.get("api_key_provider") + if api_key_provider == "agent_owner": + api_key = skill_config.get("api_key") + if api_key: + return api_key + else: + raise ToolException("No api_key found in agent_owner configuration") + else: + raise ToolException( + f"Invalid API key provider: {api_key_provider}. Only 'agent_owner' is supported for Firecrawl." + ) + + @property + def category(self) -> str: + return "firecrawl" diff --git a/intentkit/skills/firecrawl/clear.py b/intentkit/skills/firecrawl/clear.py new file mode 100644 index 00000000..b103a0fa --- /dev/null +++ b/intentkit/skills/firecrawl/clear.py @@ -0,0 +1,85 @@ +import logging +from typing import Type + +from pydantic import BaseModel, Field + +from intentkit.skills.firecrawl.base import FirecrawlBaseTool + +logger = logging.getLogger(__name__) + + +class FirecrawlClearInput(BaseModel): + """Input for Firecrawl clear tool.""" + + confirm: bool = Field( + description="Confirmation to clear all indexed content (must be true)", + default=False, + ) + + +class FirecrawlClearIndexedContent(FirecrawlBaseTool): + """Tool for clearing all indexed Firecrawl content. + + This tool removes all previously indexed content from the Firecrawl vector store, + allowing for a fresh start with new content. + """ + + name: str = "firecrawl_clear_indexed_content" + description: str = ( + "Clear all previously indexed Firecrawl content from the vector store.\n" + "This will permanently delete all indexed content and cannot be undone.\n" + "Use this tool when you want to start fresh with new content." + ) + args_schema: Type[BaseModel] = FirecrawlClearInput + + async def _arun( + self, + confirm: bool = False, + **kwargs, + ) -> str: + """Clear all indexed Firecrawl content for the agent. + + Args: + confirm: Must be True to confirm the deletion + config: The configuration for the tool call + + Returns: + str: Confirmation message + """ + context = self.get_context() + agent_id = context.agent_id + + if not agent_id: + return "Error: Agent ID not available for clearing content." + + if not confirm: + return "Error: You must set confirm=true to clear all indexed content." + + logger.info( + f"firecrawl_clear: Starting clear indexed content operation for agent {agent_id}" + ) + + try: + # Delete vector store data (using web_scraper storage format for compatibility) + vector_store_key = f"vector_store_{agent_id}" + await self.skill_store.delete_agent_skill_data( + agent_id, "web_scraper", vector_store_key + ) + + # Delete metadata + metadata_key = f"indexed_urls_{agent_id}" + await self.skill_store.delete_agent_skill_data( + agent_id, "web_scraper", metadata_key + ) + + logger.info( + f"firecrawl_clear: Successfully cleared all indexed content for agent {agent_id}" + ) + return "Successfully cleared all Firecrawl indexed content. The vector store is now empty and ready for new content." + + except Exception as e: + logger.error( + f"firecrawl_clear: Error clearing indexed content for agent {agent_id}: {e}", + exc_info=True, + ) + return f"Error clearing indexed content: {str(e)}" diff --git a/intentkit/skills/firecrawl/crawl.py b/intentkit/skills/firecrawl/crawl.py new file mode 100644 index 00000000..e7d6ab11 --- /dev/null +++ b/intentkit/skills/firecrawl/crawl.py @@ -0,0 +1,398 @@ +import asyncio +import logging +from typing import List, Optional, Type + +import httpx +from langchain_core.documents import Document +from pydantic import BaseModel, Field + +from intentkit.skills.firecrawl.base import FirecrawlBaseTool + +logger = logging.getLogger(__name__) + + +class FirecrawlCrawlInput(BaseModel): + """Input for Firecrawl crawl tool.""" + + url: str = Field( + description="The base URL to crawl. All accessible subpages will be crawled." + ) + limit: int = Field( + description="Maximum number of pages to crawl", default=10, ge=1, le=1000 + ) + formats: List[str] = Field( + description="Output formats to include in the response. Options: 'markdown', 'html', 'rawHtml', 'screenshot', 'links', 'json'", + default=["markdown"], + ) + include_paths: Optional[List[str]] = Field( + description="Regex patterns to include in the crawl (e.g., ['^/blog/.*$'])", + default=None, + ) + exclude_paths: Optional[List[str]] = Field( + description="Regex patterns to exclude from the crawl (e.g., ['^/admin/.*$'])", + default=None, + ) + max_depth: Optional[int] = Field( + description="Maximum depth to crawl from the base URL", + default=None, + ge=1, + le=10, + ) + allow_backward_links: bool = Field( + description="Allow crawling parent and sibling URLs, not just child paths", + default=False, + ) + allow_external_links: bool = Field( + description="Allow crawling external domains (use with caution)", default=False + ) + allow_subdomains: bool = Field( + description="Allow crawling subdomains of the main domain", default=False + ) + only_main_content: bool = Field( + description="Whether to extract only the main content (excluding headers, footers, navigation, etc.)", + default=True, + ) + index_content: bool = Field( + description="Whether to index the crawled content for later querying (default: True)", + default=True, + ) + chunk_size: int = Field( + description="Size of text chunks for indexing (default: 1000)", + default=1000, + ge=100, + le=4000, + ) + chunk_overlap: int = Field( + description="Overlap between chunks (default: 200)", + default=200, + ge=0, + le=1000, + ) + + +class FirecrawlCrawl(FirecrawlBaseTool): + """Tool for crawling entire websites using Firecrawl. + + This tool uses Firecrawl's API to crawl websites and extract content from multiple pages. + It can handle JavaScript-rendered content, follow links, and extract structured data + from entire websites. + + Attributes: + name: The name of the tool. + description: A description of what the tool does. + args_schema: The schema for the tool's input arguments. + """ + + name: str = "firecrawl_crawl" + description: str = ( + "Crawl an entire website and extract content from multiple pages. " + "This tool can follow links, handle JavaScript-rendered content, and extract " + "structured data from all accessible pages on a website. " + "Optionally indexes all crawled content for later querying using the firecrawl_query_indexed_content tool. " + "Use this when you need to gather comprehensive information from a website." + ) + args_schema: Type[BaseModel] = FirecrawlCrawlInput + + async def _arun( + self, + url: str, + limit: int = 10, + formats: List[str] = None, + include_paths: Optional[List[str]] = None, + exclude_paths: Optional[List[str]] = None, + max_depth: Optional[int] = None, + allow_backward_links: bool = False, + allow_external_links: bool = False, + allow_subdomains: bool = False, + only_main_content: bool = True, + index_content: bool = True, + chunk_size: int = 1000, + chunk_overlap: int = 200, + **kwargs, + ) -> str: + """Implementation of the Firecrawl crawl tool. + + Args: + url: The base URL to crawl. + limit: Maximum number of pages to crawl. + formats: Output formats to include in the response. + include_paths: Regex patterns to include in the crawl. + exclude_paths: Regex patterns to exclude from the crawl. + max_depth: Maximum depth to crawl from the base URL. + allow_backward_links: Allow crawling parent and sibling URLs. + allow_external_links: Allow crawling external domains. + allow_subdomains: Allow crawling subdomains. + only_main_content: Whether to extract only main content. + config: The configuration for the tool call. + + Returns: + str: Formatted crawled content from all pages. + """ + context = self.get_context() + skill_config = context.agent.skill_config(self.category) + logger.debug(f"firecrawl_crawl: Running crawl with context {context}") + + if skill_config.get("api_key_provider") == "agent_owner": + if skill_config.get("rate_limit_number") and skill_config.get( + "rate_limit_minutes" + ): + await self.user_rate_limit_by_category( + context.user_id, + skill_config["rate_limit_number"], + skill_config["rate_limit_minutes"], + ) + + # Get the API key from the agent's configuration + api_key = self.get_api_key() + if not api_key: + return "Error: No Firecrawl API key provided in the configuration." + + # Validate and set defaults + if formats is None: + formats = ["markdown"] + + # Validate formats + valid_formats = ["markdown", "html", "rawHtml", "screenshot", "links", "json"] + formats = [f for f in formats if f in valid_formats] + if not formats: + formats = ["markdown"] + + # Prepare the request payload + payload = { + "url": url, + "limit": min(limit, 1000), # Cap at 1000 for safety + "scrapeOptions": {"formats": formats, "onlyMainContent": only_main_content}, + } + + if include_paths: + payload["includePaths"] = include_paths + if exclude_paths: + payload["excludePaths"] = exclude_paths + if max_depth: + payload["maxDepth"] = max_depth + if allow_backward_links: + payload["allowBackwardLinks"] = allow_backward_links + if allow_external_links: + payload["allowExternalLinks"] = allow_external_links + if allow_subdomains: + payload["allowSubdomains"] = allow_subdomains + + # Call Firecrawl crawl API + try: + async with httpx.AsyncClient(timeout=120.0) as client: + # Start the crawl + response = await client.post( + "https://api.firecrawl.dev/v1/crawl", + json=payload, + headers={ + "Authorization": f"Bearer {api_key}", + "Content-Type": "application/json", + }, + ) + + if response.status_code != 200: + logger.error( + f"firecrawl_crawl: Error from Firecrawl API: {response.status_code} - {response.text}" + ) + return f"Error starting crawl: {response.status_code} - {response.text}" + + crawl_data = response.json() + + if not crawl_data.get("success"): + error_msg = crawl_data.get("error", "Unknown error occurred") + return f"Error starting crawl: {error_msg}" + + crawl_id = crawl_data.get("id") + if not crawl_id: + return "Error: No crawl ID returned from Firecrawl API" + + # Poll for crawl completion + max_polls = 60 # Maximum 5 minutes of polling (60 * 5 seconds) + poll_count = 0 + + while poll_count < max_polls: + # Check crawl status + status_response = await client.get( + f"https://api.firecrawl.dev/v1/crawl/{crawl_id}", + headers={ + "Authorization": f"Bearer {api_key}", + "Content-Type": "application/json", + }, + ) + + if status_response.status_code != 200: + logger.error( + f"firecrawl_crawl: Error checking crawl status: {status_response.status_code} - {status_response.text}" + ) + return f"Error checking crawl status: {status_response.status_code} - {status_response.text}" + + status_data = status_response.json() + status = status_data.get("status") + + if status == "completed": + # Crawl completed successfully + pages_data = status_data.get("data", []) + total_pages = status_data.get("total", 0) + completed_pages = status_data.get("completed", 0) + + # Format the results + formatted_result = f"Successfully crawled: {url}\n" + formatted_result += f"Total pages found: {total_pages}\n" + formatted_result += f"Pages completed: {completed_pages}\n\n" + + # Process each page + for i, page_data in enumerate( + pages_data[:10], 1 + ): # Limit to first 10 pages for output + page_url = page_data.get("metadata", {}).get( + "sourceURL", "Unknown URL" + ) + formatted_result += f"## Page {i}: {page_url}\n" + + if "markdown" in formats and page_data.get("markdown"): + content = page_data["markdown"][ + :500 + ] # Limit content length + formatted_result += f"{content}" + if len(page_data["markdown"]) > 500: + formatted_result += "... (content truncated)" + formatted_result += "\n\n" + + # Add page metadata + metadata = page_data.get("metadata", {}) + if metadata.get("title"): + formatted_result += f"Title: {metadata['title']}\n" + if metadata.get("description"): + formatted_result += ( + f"Description: {metadata['description']}\n" + ) + formatted_result += "\n" + + if len(pages_data) > 10: + formatted_result += ( + f"... and {len(pages_data) - 10} more pages\n" + ) + + # Index content if requested + if index_content and pages_data: + try: + # Import indexing utilities from firecrawl utils + from intentkit.skills.firecrawl.utils import ( + FirecrawlMetadataManager, + index_documents, + ) + + # Create documents from crawled content + documents = [] + for page_data in pages_data: + if page_data.get("markdown"): + metadata = page_data.get("metadata", {}) + document = Document( + page_content=page_data["markdown"], + metadata={ + "source": metadata.get( + "sourceURL", "Unknown URL" + ), + "title": metadata.get("title", ""), + "description": metadata.get( + "description", "" + ), + "language": metadata.get( + "language", "" + ), + "source_type": "firecrawl_crawl", + "indexed_at": str(context.agent_id), + }, + ) + documents.append(document) + + # Get agent ID for indexing + agent_id = context.agent_id + if agent_id and documents: + # Index all documents + total_chunks, was_merged = await index_documents( + documents, + agent_id, + self.skill_store, + chunk_size, + chunk_overlap, + ) + + # Update metadata + metadata_manager = FirecrawlMetadataManager( + self.skill_store + ) + urls = [doc.metadata["source"] for doc in documents] + new_metadata = metadata_manager.create_url_metadata( + urls, documents, "firecrawl_crawl" + ) + await metadata_manager.update_metadata( + agent_id, new_metadata + ) + + formatted_result += "\n## Content Indexing\n" + formatted_result += "Successfully indexed crawled content into vector store:\n" + formatted_result += ( + f"- Pages indexed: {len(documents)}\n" + ) + formatted_result += ( + f"- Total chunks created: {total_chunks}\n" + ) + formatted_result += f"- Chunk size: {chunk_size}\n" + formatted_result += ( + f"- Chunk overlap: {chunk_overlap}\n" + ) + formatted_result += f"- Content merged with existing: {'Yes' if was_merged else 'No'}\n" + formatted_result += "Use the 'firecrawl_query_indexed_content' skill to search this content.\n" + + logger.info( + f"firecrawl_crawl: Successfully indexed {len(documents)} pages with {total_chunks} total chunks" + ) + else: + formatted_result += "\n## Content Indexing\n" + formatted_result += "Warning: Could not index content - agent ID not available or no content to index.\n" + + except Exception as index_error: + logger.error( + f"firecrawl_crawl: Error indexing content: {index_error}" + ) + formatted_result += "\n## Content Indexing\n" + formatted_result += f"Warning: Failed to index content for later querying: {str(index_error)}\n" + + return formatted_result.strip() + + elif status == "failed": + error_msg = status_data.get("error", "Crawl failed") + return f"Crawl failed: {error_msg}" + + elif status in ["scraping", "active"]: + # Still in progress, wait and poll again + completed = status_data.get("completed", 0) + total = status_data.get("total", 0) + logger.debug( + f"firecrawl_crawl: Crawl in progress: {completed}/{total} pages" + ) + + # Wait 5 seconds before next poll + await asyncio.sleep(5) + poll_count += 1 + + else: + # Unknown status + logger.warning( + f"firecrawl_crawl: Unknown crawl status: {status}" + ) + await asyncio.sleep(5) + poll_count += 1 + + # If we've exceeded max polls, return partial results + return f"Crawl timeout: The crawl of {url} is taking longer than expected. Please try again later or reduce the crawl limit." + + except httpx.TimeoutException: + logger.error(f"firecrawl_crawl: Timeout crawling URL: {url}") + return ( + f"Timeout error: The request to crawl {url} took too long to complete." + ) + except Exception as e: + logger.error(f"firecrawl_crawl: Error crawling URL: {e}", exc_info=True) + return f"An error occurred while crawling the URL: {str(e)}" diff --git a/intentkit/skills/firecrawl/firecrawl.png b/intentkit/skills/firecrawl/firecrawl.png new file mode 100644 index 00000000..78ee35c5 Binary files /dev/null and b/intentkit/skills/firecrawl/firecrawl.png differ diff --git a/intentkit/skills/firecrawl/query.py b/intentkit/skills/firecrawl/query.py new file mode 100644 index 00000000..685519d0 --- /dev/null +++ b/intentkit/skills/firecrawl/query.py @@ -0,0 +1,121 @@ +import logging +from typing import Type + +from pydantic import BaseModel, Field + +from intentkit.skills.firecrawl.base import FirecrawlBaseTool + +logger = logging.getLogger(__name__) + + +class FirecrawlQueryInput(BaseModel): + """Input for Firecrawl query tool.""" + + query: str = Field( + description="Question or query to search in the indexed content", + min_length=1, + max_length=500, + ) + max_results: int = Field( + description="Maximum number of relevant documents to return (default: 4)", + default=4, + ge=1, + le=10, + ) + + +class FirecrawlQueryIndexedContent(FirecrawlBaseTool): + """Tool for querying previously indexed Firecrawl content. + + This tool searches through content that was previously scraped and indexed + using the firecrawl_scrape or firecrawl_crawl tools to answer questions or find relevant information. + """ + + name: str = "firecrawl_query_indexed_content" + description: str = ( + "Query previously indexed Firecrawl content to find relevant information and answer questions.\n" + "Use this tool to search through content that was previously scraped and indexed using Firecrawl tools.\n" + "This tool can help answer questions based on the indexed web content from Firecrawl scraping/crawling." + ) + args_schema: Type[BaseModel] = FirecrawlQueryInput + + async def _arun( + self, + query: str, + max_results: int = 4, + **kwargs, + ) -> str: + """Query the indexed Firecrawl content.""" + try: + # Get agent context - throw error if not available + # Configuration is always available in new runtime + pass + + context = self.get_context() + if not context or not context.agent_id: + raise ValueError("Agent ID is required but not found in configuration") + + agent_id = context.agent_id + + logger.info(f"[{agent_id}] Starting Firecrawl query operation: '{query}'") + + # Import query utilities from firecrawl utils + from intentkit.skills.firecrawl.utils import ( + FirecrawlDocumentProcessor, + query_indexed_content, + ) + + # Query the indexed content + docs = await query_indexed_content( + query, agent_id, self.skill_store, max_results + ) + + if not docs: + logger.info(f"[{agent_id}] No relevant documents found for query") + return f"No relevant information found for your query: '{query}'. The indexed content may not contain information related to your search." + + # Format results + results = [] + for i, doc in enumerate(docs, 1): + # Sanitize content to prevent database storage errors + content = FirecrawlDocumentProcessor.sanitize_for_database( + doc.page_content.strip() + ) + source = doc.metadata.get("source", "Unknown") + source_type = doc.metadata.get("source_type", "unknown") + + # Add source type indicator for Firecrawl content + if source_type.startswith("firecrawl"): + source_indicator = ( + f"[Firecrawl {source_type.replace('firecrawl_', '').title()}]" + ) + else: + source_indicator = "" + + results.append( + f"**Source {i}:** {source} {source_indicator}\n{content}" + ) + + response = "\n\n".join(results) + logger.info( + f"[{agent_id}] Firecrawl query completed successfully, returning {len(response)} chars" + ) + + return response + + except Exception as e: + # Extract agent_id for error logging if possible + agent_id = "UNKNOWN" + try: + # TODO: Fix config reference + context = self.get_context() + if context and context.agent_id: + agent_id = context.agent_id + except Exception: + pass + + logger.error( + f"[{agent_id}] Error in FirecrawlQueryIndexedContent: {e}", + exc_info=True, + ) + raise type(e)(f"[agent:{agent_id}]: {e}") from e diff --git a/intentkit/skills/firecrawl/schema.json b/intentkit/skills/firecrawl/schema.json new file mode 100644 index 00000000..689c1520 --- /dev/null +++ b/intentkit/skills/firecrawl/schema.json @@ -0,0 +1,153 @@ +{ + "$schema": "http://json-schema.org/draft-07/schema#", + "type": "object", + "title": "Firecrawl Web Scraping and Crawling", + "description": "AI-powered web scraping and crawling capabilities using Firecrawl", + "x-icon": "https://ai.service.crestal.dev/skills/firecrawl/firecrawl.png", + "x-tags": [ + "Web Scraping", + "Crawling", + "Content Extraction", + "Data Mining", + "Website Analysis" + ], + "properties": { + "enabled": { + "type": "boolean", + "title": "Enabled", + "description": "Whether this skill is enabled", + "default": false + }, + "states": { + "type": "object", + "properties": { + "firecrawl_scrape": { + "type": "string", + "title": "Firecrawl Scrape", + "enum": [ + "disabled", + "public", + "private" + ], + "x-enum-title": [ + "Disabled", + "Agent Owner + All Users", + "Agent Owner Only" + ], + "description": "Scrape single web pages and REPLACE any existing indexed content for that URL. Unlike regular scrape, this prevents duplicate content when re-scraping the same page. Use this to refresh/update content from a previously scraped URL.", + "default": "private" + }, + "firecrawl_crawl": { + "type": "string", + "title": "Firecrawl Crawl", + "enum": [ + "disabled", + "public", + "private" + ], + "x-enum-title": [ + "Disabled", + "Agent Owner + All Users", + "Agent Owner Only" + ], + "description": "Crawl entire websites and extract content from multiple pages. Can follow links, handle JavaScript-rendered content, and extract structured data from entire websites.", + "default": "private" + }, + "firecrawl_query_indexed_content": { + "type": "string", + "title": "Query Indexed Content", + "enum": [ + "disabled", + "public", + "private" + ], + "x-enum-title": [ + "Disabled", + "Agent Owner + All Users", + "Agent Owner Only" + ], + "description": "Query previously indexed Firecrawl content to find relevant information and answer questions. Use this to search through content that was scraped and indexed using Firecrawl tools.", + "default": "private" + }, + "firecrawl_clear_indexed_content": { + "type": "string", + "title": "Clear Indexed Content", + "enum": [ + "disabled", + "public", + "private" + ], + "x-enum-title": [ + "Disabled", + "Agent Owner + All Users", + "Agent Owner Only" + ], + "description": "Clear all previously indexed Firecrawl content from the vector store. This will permanently delete all indexed content and cannot be undone. Use this tool when you want to start fresh with new content.", + "default": "private" + } + }, + "description": "States for each Firecrawl skill (disabled, public, or private)" + }, + "api_key_provider": { + "type": "string", + "title": "API Key Provider", + "description": "Provider of the API key", + "enum": [ + "agent_owner" + ], + "x-enum-title": [ + "Owner Provided" + ], + "default": "agent_owner" + }, + "api_key": { + "type": "string", + "title": "Firecrawl API Key", + "description": "API key for Firecrawl services", + "x-link": "[Get your API key](https://firecrawl.dev/)", + "x-sensitive": true + }, + "rate_limit_number": { + "type": "integer", + "title": "Rate Limit Number", + "description": "Number of requests allowed per time window", + "minimum": 1, + "maximum": 1000, + "default": 100 + }, + "rate_limit_minutes": { + "type": "integer", + "title": "Rate Limit Minutes", + "description": "Time window in minutes for rate limiting", + "minimum": 1, + "maximum": 1440, + "default": 60 + } + }, + "required": [ + "states", + "enabled" + ], + "if": { + "properties": { + "api_key_provider": { + "const": "agent_owner" + } + } + }, + "then": { + "if": { + "properties": { + "enabled": { + "const": true + } + } + }, + "then": { + "required": [ + "api_key" + ] + } + }, + "additionalProperties": true +} \ No newline at end of file diff --git a/intentkit/skills/firecrawl/scrape.py b/intentkit/skills/firecrawl/scrape.py new file mode 100644 index 00000000..aada0d12 --- /dev/null +++ b/intentkit/skills/firecrawl/scrape.py @@ -0,0 +1,432 @@ +import logging +from typing import List, Optional, Type + +import httpx +from langchain_core.documents import Document +from pydantic import BaseModel, Field + +from intentkit.skills.firecrawl.base import FirecrawlBaseTool + +logger = logging.getLogger(__name__) + + +class FirecrawlScrapeInput(BaseModel): + """Input for Firecrawl scrape tool.""" + + url: str = Field( + description="The URL to scrape. Must be a valid HTTP or HTTPS URL." + ) + formats: List[str] = Field( + description="Output formats to include in the response. Options: 'markdown', 'html', 'rawHtml', 'screenshot', 'links', 'json'", + default=["markdown"], + ) + only_main_content: bool = Field( + description="Whether to extract only the main content (excluding headers, footers, navigation, etc.)", + default=True, + ) + include_tags: Optional[List[str]] = Field( + description="HTML tags, classes, or IDs to include in the response (e.g., ['h1', 'p', '.main-content'])", + default=None, + ) + exclude_tags: Optional[List[str]] = Field( + description="HTML tags, classes, or IDs to exclude from the response (e.g., ['#ad', '#footer'])", + default=None, + ) + wait_for: int = Field( + description="Wait time in milliseconds before scraping (use only as last resort)", + default=0, + ge=0, + ) + timeout: int = Field( + description="Maximum timeout in milliseconds for the scraping operation", + default=30000, + ge=1000, + le=120000, + ) + index_content: bool = Field( + description="Whether to index the scraped content for later querying (default: True)", + default=True, + ) + chunk_size: int = Field( + description="Size of text chunks for indexing (default: 1000)", + default=1000, + ge=100, + le=4000, + ) + chunk_overlap: int = Field( + description="Overlap between chunks (default: 200)", + default=200, + ge=0, + le=1000, + ) + + +class FirecrawlScrape(FirecrawlBaseTool): + """Tool for scraping web pages using Firecrawl with REPLACE behavior. + + This tool uses Firecrawl's API to scrape web pages and REPLACES any existing + indexed content for the same URL instead of appending to it. This prevents + duplicate content when re-scraping the same page. + + Attributes: + name: The name of the tool. + description: A description of what the tool does. + args_schema: The schema for the tool's input arguments. + """ + + name: str = "firecrawl_scrape" + description: str = ( + "Scrape a single web page and REPLACE any existing indexed content for that URL. " + "Unlike regular scrape, this tool removes old content before adding new content, preventing duplicates. " + "This tool can handle JavaScript-rendered content, PDFs, and dynamic websites. " + "Use this when you want to refresh/update content from a URL that was previously scraped." + ) + args_schema: Type[BaseModel] = FirecrawlScrapeInput + + async def _arun( + self, + url: str, + formats: List[str] = None, + only_main_content: bool = True, + include_tags: Optional[List[str]] = None, + exclude_tags: Optional[List[str]] = None, + wait_for: int = 0, + timeout: int = 30000, + index_content: bool = True, + chunk_size: int = 1000, + chunk_overlap: int = 200, + **kwargs, + ) -> str: + """Implementation of the Firecrawl scrape tool. + + Args: + url: The URL to scrape. + formats: Output formats to include in the response. + only_main_content: Whether to extract only main content. + include_tags: HTML tags/classes/IDs to include. + exclude_tags: HTML tags/classes/IDs to exclude. + wait_for: Wait time in milliseconds before scraping. + timeout: Maximum timeout in milliseconds. + index_content: Whether to index the content for later querying. + chunk_size: Size of text chunks for indexing. + chunk_overlap: Overlap between chunks. + config: The configuration for the tool call. + + Returns: + str: Formatted scraped content based on the requested formats. + """ + context = self.get_context() + skill_config = context.agent.skill_config(self.category) + logger.debug(f"firecrawl_scrape: Running scrape with context {context}") + + if skill_config.get("api_key_provider") == "agent_owner": + if skill_config.get("rate_limit_number") and skill_config.get( + "rate_limit_minutes" + ): + await self.user_rate_limit_by_category( + context.user_id, + skill_config["rate_limit_number"], + skill_config["rate_limit_minutes"], + ) + + # Get the API key from the agent's configuration + api_key = self.get_api_key() + if not api_key: + return "Error: No Firecrawl API key provided in the configuration." + + # Validate and set defaults + if formats is None: + formats = ["markdown"] + + # Validate formats + valid_formats = ["markdown", "html", "rawHtml", "screenshot", "links", "json"] + formats = [f for f in formats if f in valid_formats] + if not formats: + formats = ["markdown"] + + # Prepare the request payload + payload = { + "url": url, + "formats": formats, + "onlyMainContent": only_main_content, + "timeout": timeout, + } + + if include_tags: + payload["includeTags"] = include_tags + if exclude_tags: + payload["excludeTags"] = exclude_tags + if wait_for > 0: + payload["waitFor"] = wait_for + + # Call Firecrawl scrape API + try: + async with httpx.AsyncClient(timeout=timeout / 1000 + 10) as client: + response = await client.post( + "https://api.firecrawl.dev/v1/scrape", + json=payload, + headers={ + "Authorization": f"Bearer {api_key}", + "Content-Type": "application/json", + }, + ) + + if response.status_code != 200: + logger.error( + f"firecrawl_scrape: Error from Firecrawl API: {response.status_code} - {response.text}" + ) + return ( + f"Error scraping URL: {response.status_code} - {response.text}" + ) + + data = response.json() + + if not data.get("success"): + error_msg = data.get("error", "Unknown error occurred") + return f"Error scraping URL: {error_msg}" + + result_data = data.get("data", {}) + + # Format the results based on requested formats + formatted_result = f"Successfully scraped (REPLACE mode): {url}\n\n" + + if "markdown" in formats and result_data.get("markdown"): + formatted_result += "## Markdown Content\n" + formatted_result += result_data["markdown"][:2000] # Limit length + if len(result_data["markdown"]) > 2000: + formatted_result += "... (content truncated)" + formatted_result += "\n\n" + + if "html" in formats and result_data.get("html"): + formatted_result += "## HTML Content\n" + formatted_result += f"HTML content available ({len(result_data['html'])} characters)\n\n" + + if "links" in formats and result_data.get("links"): + formatted_result += "## Extracted Links\n" + links = result_data["links"][:10] # Limit to first 10 links + for link in links: + formatted_result += f"- {link}\n" + if len(result_data["links"]) > 10: + formatted_result += ( + f"... and {len(result_data['links']) - 10} more links\n" + ) + formatted_result += "\n" + + if "json" in formats and result_data.get("json"): + formatted_result += "## Structured Data (JSON)\n" + formatted_result += str(result_data["json"])[:1000] # Limit length + if len(str(result_data["json"])) > 1000: + formatted_result += "... (data truncated)" + formatted_result += "\n\n" + + if "screenshot" in formats and result_data.get("screenshot"): + formatted_result += "## Screenshot\n" + formatted_result += ( + f"Screenshot available at: {result_data['screenshot']}\n\n" + ) + + # Add metadata information + metadata = result_data.get("metadata", {}) + if metadata: + formatted_result += "## Page Metadata\n" + if metadata.get("title"): + formatted_result += f"Title: {metadata['title']}\n" + if metadata.get("description"): + formatted_result += f"Description: {metadata['description']}\n" + if metadata.get("language"): + formatted_result += f"Language: {metadata['language']}\n" + formatted_result += "\n" + + # Index content if requested - REPLACE MODE + if index_content and result_data.get("markdown"): + try: + # Import indexing utilities + from langchain_community.vectorstores import FAISS + + from intentkit.skills.firecrawl.utils import ( + FirecrawlDocumentProcessor, + FirecrawlMetadataManager, + FirecrawlVectorStoreManager, + ) + + # Create document from scraped content + document = Document( + page_content=result_data["markdown"], + metadata={ + "source": url, + "title": metadata.get("title", ""), + "description": metadata.get("description", ""), + "language": metadata.get("language", ""), + "source_type": "firecrawl_scrape", + "indexed_at": str(context.agent_id), + }, + ) + + # Get agent ID for indexing + agent_id = context.agent_id + if agent_id: + # Initialize managers + vs_manager = FirecrawlVectorStoreManager(self.skill_store) + metadata_manager = FirecrawlMetadataManager( + self.skill_store + ) + + # Load existing vector store + existing_vector_store = await vs_manager.load_vector_store( + agent_id + ) + + # Split the new document into chunks + split_docs = FirecrawlDocumentProcessor.split_documents( + [document], chunk_size, chunk_overlap + ) + + # Create embeddings + embeddings = vs_manager.create_embeddings() + + if existing_vector_store: + # Get all existing documents and filter out those from the same URL + try: + # Try to access documents directly if available + if hasattr( + existing_vector_store, "docstore" + ) and hasattr( + existing_vector_store.docstore, "_dict" + ): + # Access FAISS documents directly + all_docs = list( + existing_vector_store.docstore._dict.values() + ) + else: + # Fallback: use a reasonable k value for similarity search + # Use a dummy query to retrieve documents + all_docs = existing_vector_store.similarity_search( + "dummy", # Use a dummy query instead of empty string + k=1000, # Use reasonable upper bound + ) + + # Filter out documents from the same URL + preserved_docs = [ + doc + for doc in all_docs + if doc.metadata.get("source") != url + ] + + logger.info( + f"firecrawl_scrape: Preserving {len(preserved_docs)} docs from other URLs, " + f"replacing content from {url}" + ) + + # Create new vector store with preserved docs + new docs + if preserved_docs: + # Combine preserved and new documents + all_documents = preserved_docs + split_docs + new_vector_store = FAISS.from_documents( + all_documents, embeddings + ) + formatted_result += "\n## Content Replacement\n" + formatted_result += f"Replaced existing content for URL: {url}\n" + num_preserved_urls = len( + set( + doc.metadata.get("source", "") + for doc in preserved_docs + ) + ) + formatted_result += f"Preserved content from {num_preserved_urls} other URLs\n" + else: + # No other documents to preserve, just create from new docs + new_vector_store = FAISS.from_documents( + split_docs, embeddings + ) + formatted_result += "\n## Content Replacement\n" + formatted_result += f"Created new index with content from: {url}\n" + except Exception as e: + logger.warning( + f"Could not preserve other URLs, creating fresh index: {e}" + ) + # Fallback: create new store with just the new documents + new_vector_store = FAISS.from_documents( + split_docs, embeddings + ) + formatted_result += "\n## Content Replacement\n" + formatted_result += f"Created fresh index with content from: {url}\n" + else: + # No existing store, create new one + new_vector_store = FAISS.from_documents( + split_docs, embeddings + ) + formatted_result += "\n## Content Indexing\n" + formatted_result += ( + f"Created new index with content from: {url}\n" + ) + + # Save the new vector store + await vs_manager.save_vector_store( + agent_id, new_vector_store, chunk_size, chunk_overlap + ) + + # Update metadata to track all URLs + # Get existing metadata to preserve other URLs + metadata_key = f"indexed_urls_{agent_id}" + existing_metadata = ( + await self.skill_store.get_agent_skill_data( + agent_id, "firecrawl", metadata_key + ) + ) + + if existing_metadata and existing_metadata.get("urls"): + # Remove the current URL and add it back (to update timestamp) + existing_urls = [ + u for u in existing_metadata["urls"] if u != url + ] + existing_urls.append(url) + updated_metadata = { + "urls": existing_urls, + "document_count": len(existing_urls), + "source_type": "firecrawl_mixed", + "indexed_at": str(len(existing_urls)), + } + else: + # Create new metadata + updated_metadata = metadata_manager.create_url_metadata( + [url], [document], "firecrawl_scrape" + ) + + await metadata_manager.update_metadata( + agent_id, updated_metadata + ) + + formatted_result += "\n## Content Indexing (REPLACE MODE)\n" + formatted_result += "Successfully REPLACED indexed content in vector store:\n" + formatted_result += f"- Chunks created: {len(split_docs)}\n" + formatted_result += f"- Chunk size: {chunk_size}\n" + formatted_result += f"- Chunk overlap: {chunk_overlap}\n" + formatted_result += ( + "- Previous content for this URL: REPLACED\n" + ) + formatted_result += "Use the 'firecrawl_query_indexed_content' skill to search this content.\n" + + logger.info( + f"firecrawl_scrape: Successfully replaced content for {url} with {len(split_docs)} chunks" + ) + else: + formatted_result += "\n## Content Indexing\n" + formatted_result += "Warning: Could not index content - agent ID not available.\n" + + except Exception as index_error: + logger.error( + f"firecrawl_scrape: Error indexing content: {index_error}" + ) + formatted_result += "\n## Content Indexing\n" + formatted_result += f"Warning: Failed to index content for later querying: {str(index_error)}\n" + + return formatted_result.strip() + + except httpx.TimeoutException: + logger.error(f"firecrawl_scrape: Timeout scraping URL: {url}") + return ( + f"Timeout error: The request to scrape {url} took too long to complete." + ) + except Exception as e: + logger.error(f"firecrawl_scrape: Error scraping URL: {e}", exc_info=True) + return f"An error occurred while scraping the URL: {str(e)}" diff --git a/intentkit/skills/firecrawl/utils.py b/intentkit/skills/firecrawl/utils.py new file mode 100644 index 00000000..620da2b3 --- /dev/null +++ b/intentkit/skills/firecrawl/utils.py @@ -0,0 +1,306 @@ +"""Utilities for Firecrawl skill content indexing and querying.""" + +import logging +import re +from typing import Any, Dict, List, Optional, Tuple + +from langchain.text_splitter import RecursiveCharacterTextSplitter +from langchain_community.vectorstores import FAISS +from langchain_core.documents import Document +from langchain_openai import OpenAIEmbeddings + +from intentkit.abstracts.skill import SkillStoreABC + +logger = logging.getLogger(__name__) + + +class FirecrawlDocumentProcessor: + """Handles document processing and sanitization for Firecrawl content.""" + + @staticmethod + def sanitize_for_database(text: str) -> str: + """Sanitize text content to prevent database storage errors.""" + if not text: + return "" + + # Remove null bytes and other problematic characters + text = text.replace("\x00", "") + text = re.sub(r"[\x00-\x08\x0b\x0c\x0e-\x1f\x7f-\x9f]", "", text) + + # Normalize whitespace + text = re.sub(r"\s+", " ", text) + text = text.strip() + + return text + + @staticmethod + def split_documents( + documents: List[Document], chunk_size: int = 1000, chunk_overlap: int = 200 + ) -> List[Document]: + """Split documents into smaller chunks for better indexing.""" + text_splitter = RecursiveCharacterTextSplitter( + chunk_size=chunk_size, + chunk_overlap=chunk_overlap, + length_function=len, + ) + + split_docs = [] + for doc in documents: + # Sanitize content before splitting + sanitized_content = FirecrawlDocumentProcessor.sanitize_for_database( + doc.page_content + ) + doc.page_content = sanitized_content + + # Split the document + chunks = text_splitter.split_documents([doc]) + split_docs.extend(chunks) + + return split_docs + + +class FirecrawlVectorStoreManager: + """Manages vector store operations for Firecrawl content.""" + + def __init__(self, skill_store: SkillStoreABC): + self.skill_store = skill_store + + def create_embeddings(self) -> OpenAIEmbeddings: + """Create OpenAI embeddings instance.""" + openai_api_key = self.skill_store.get_system_config("openai_api_key") + if not openai_api_key: + raise ValueError("OpenAI API key not found in system configuration") + + return OpenAIEmbeddings( + openai_api_key=openai_api_key, model="text-embedding-3-small" + ) + + def encode_vector_store(self, vector_store: FAISS) -> Dict[str, str]: + """Encode FAISS vector store to base64 for storage (compatible with web_scraper).""" + import base64 + import os + import tempfile + + try: + with tempfile.TemporaryDirectory() as temp_dir: + vector_store.save_local(temp_dir) + + encoded_files = {} + for filename in os.listdir(temp_dir): + file_path = os.path.join(temp_dir, filename) + if os.path.isfile(file_path): + with open(file_path, "rb") as f: + encoded_files[filename] = base64.b64encode(f.read()).decode( + "utf-8" + ) + + return encoded_files + except Exception as e: + logger.error(f"Error encoding vector store: {e}") + raise + + def decode_vector_store( + self, encoded_files: Dict[str, str], embeddings: OpenAIEmbeddings + ) -> FAISS: + """Decode base64 files back to FAISS vector store (compatible with web_scraper).""" + import base64 + import os + import tempfile + + try: + with tempfile.TemporaryDirectory() as temp_dir: + # Decode and write files + for filename, encoded_content in encoded_files.items(): + file_path = os.path.join(temp_dir, filename) + with open(file_path, "wb") as f: + f.write(base64.b64decode(encoded_content)) + + # Load vector store + return FAISS.load_local( + temp_dir, + embeddings, + allow_dangerous_deserialization=True, + ) + except Exception as e: + logger.error(f"Error decoding vector store: {e}") + raise + + async def load_vector_store(self, agent_id: str) -> Optional[FAISS]: + """Load existing vector store for an agent.""" + try: + vector_store_key = f"vector_store_{agent_id}" + stored_data = await self.skill_store.get_agent_skill_data( + agent_id, "web_scraper", vector_store_key + ) + + if not stored_data or "faiss_files" not in stored_data: + return None + + embeddings = self.create_embeddings() + return self.decode_vector_store(stored_data["faiss_files"], embeddings) + + except Exception as e: + logger.error(f"Error loading vector store for agent {agent_id}: {e}") + return None + + async def save_vector_store( + self, + agent_id: str, + vector_store: FAISS, + chunk_size: int = 1000, + chunk_overlap: int = 200, + ) -> None: + """Save vector store for an agent (compatible with web_scraper format).""" + try: + vector_store_key = f"vector_store_{agent_id}" + encoded_files = self.encode_vector_store(vector_store) + + # Use the same data structure as web_scraper + storage_data = { + "faiss_files": encoded_files, + "chunk_size": chunk_size, + "chunk_overlap": chunk_overlap, + } + + await self.skill_store.save_agent_skill_data( + agent_id, "web_scraper", vector_store_key, storage_data + ) + + except Exception as e: + logger.error(f"Error saving vector store for agent {agent_id}: {e}") + raise + + +class FirecrawlMetadataManager: + """Manages metadata for Firecrawl indexed content.""" + + def __init__(self, skill_store: SkillStoreABC): + self.skill_store = skill_store + + def create_url_metadata( + self, urls: List[str], documents: List[Document], source_type: str + ) -> Dict[str, Any]: + """Create metadata for indexed URLs.""" + return { + "urls": urls, + "document_count": len(documents), + "source_type": source_type, + "indexed_at": str(len(urls)), # Simple counter + } + + async def update_metadata( + self, agent_id: str, new_metadata: Dict[str, Any] + ) -> None: + """Update metadata for an agent.""" + try: + metadata_key = f"indexed_urls_{agent_id}" + await self.skill_store.save_agent_skill_data( + agent_id, "web_scraper", metadata_key, new_metadata + ) + except Exception as e: + logger.error(f"Error updating metadata for agent {agent_id}: {e}") + raise + + +async def index_documents( + documents: List[Document], + agent_id: str, + skill_store: SkillStoreABC, + chunk_size: int = 1000, + chunk_overlap: int = 200, +) -> Tuple[int, bool]: + """ + Index documents into the Firecrawl vector store. + + Args: + documents: List of documents to index + agent_id: Agent ID for storage + skill_store: Skill store for persistence + chunk_size: Size of text chunks + chunk_overlap: Overlap between chunks + + Returns: + Tuple of (total_chunks, was_merged_with_existing) + """ + try: + # Initialize managers + vs_manager = FirecrawlVectorStoreManager(skill_store) + + # Split documents into chunks + split_docs = FirecrawlDocumentProcessor.split_documents( + documents, chunk_size, chunk_overlap + ) + + if not split_docs: + logger.warning("No documents to index after splitting") + return 0, False + + # Create embeddings + embeddings = vs_manager.create_embeddings() + + # Try to load existing vector store + existing_vector_store = await vs_manager.load_vector_store(agent_id) + + if existing_vector_store: + # Add to existing vector store + existing_vector_store.add_documents(split_docs) + vector_store = existing_vector_store + was_merged = True + else: + # Create new vector store + vector_store = FAISS.from_documents(split_docs, embeddings) + was_merged = False + + # Save the vector store + await vs_manager.save_vector_store( + agent_id, vector_store, chunk_size, chunk_overlap + ) + + logger.info( + f"Successfully indexed {len(split_docs)} chunks for agent {agent_id}" + ) + return len(split_docs), was_merged + + except Exception as e: + logger.error(f"Error indexing documents for agent {agent_id}: {e}") + raise + + +async def query_indexed_content( + query: str, + agent_id: str, + skill_store: SkillStoreABC, + max_results: int = 4, +) -> List[Document]: + """ + Query the Firecrawl indexed content. + + Args: + query: Search query + agent_id: Agent ID + skill_store: Skill store for persistence + max_results: Maximum number of results to return + + Returns: + List of relevant documents + """ + try: + # Initialize vector store manager + vs_manager = FirecrawlVectorStoreManager(skill_store) + + # Load vector store + vector_store = await vs_manager.load_vector_store(agent_id) + + if not vector_store: + logger.warning(f"No vector store found for agent {agent_id}") + return [] + + # Perform similarity search + docs = vector_store.similarity_search(query, k=max_results) + + logger.info(f"Found {len(docs)} documents for query: {query}") + return docs + + except Exception as e: + logger.error(f"Error querying indexed content for agent {agent_id}: {e}") + raise diff --git a/intentkit/skills/github/README.md b/intentkit/skills/github/README.md new file mode 100644 index 00000000..3c4354e0 --- /dev/null +++ b/intentkit/skills/github/README.md @@ -0,0 +1,63 @@ +# GitHub Skill + +This skill enables agents to search GitHub for repositories, users, and code using GitHub's public API endpoints. + +## Features + +- Search GitHub repositories by name, description, or topics +- Search GitHub users by username or real name +- Search code snippets across GitHub repositories +- No authentication required (uses public API endpoints) +- Rate limit aware (respects GitHub's public API limits) + +## Configuration + +Add the GitHub skill to your agent's configuration: + +```yaml +skills: + github: + states: + github_search: public # or private if you want to restrict access +``` + +## Usage Examples + +The agent can use the GitHub skill to answer questions like: + +- "Find repositories about blockchain development" +- "Search for users who work on web3 projects" +- "Find code examples of smart contracts in Solidity" +- "Show me popular Python machine learning repositories" +- "Find developers who contribute to Ethereum" + +## Rate Limits + +The skill uses GitHub's public API which has the following rate limits: +- 60 requests per hour per IP address +- No authentication required +- Results are limited to public repositories and users + +## Implementation Details + +The skill uses the following GitHub API endpoints: +- `/search/repositories` - For searching repositories +- `/search/users` - For searching users +- `/search/code` - For searching code + +Each search result includes: +- For repositories: name, description, language, stars count, and URL +- For users: username, name, bio, and profile URL +- For code: repository name, file path, and URL + +## Error Handling + +The skill handles various error cases: +- API rate limits +- Network errors +- Invalid queries +- No results found + +## Logging + +All operations are logged with the prefix `github_search.py:` for easy debugging and monitoring. \ No newline at end of file diff --git a/intentkit/skills/github/__init__.py b/intentkit/skills/github/__init__.py new file mode 100644 index 00000000..ef8c8d88 --- /dev/null +++ b/intentkit/skills/github/__init__.py @@ -0,0 +1,54 @@ +from typing import TypedDict + +from intentkit.abstracts.skill import SkillStoreABC +from intentkit.skills.base import SkillConfig, SkillState +from intentkit.skills.github.base import GitHubBaseTool +from intentkit.skills.github.github_search import GitHubSearch + +# Cache skills at the system level, because they are stateless +_cache: dict[str, GitHubBaseTool] = {} + + +class SkillStates(TypedDict): + github_search: SkillState + + +class Config(SkillConfig): + """Configuration for GitHub skills.""" + + states: SkillStates + + +async def get_skills( + config: "Config", + is_private: bool, + store: SkillStoreABC, + **_, +) -> list[GitHubBaseTool]: + """Get all GitHub skills.""" + available_skills = [] + + # Include skills based on their state + for skill_name, state in config["states"].items(): + if state == "disabled": + continue + elif state == "public" or (state == "private" and is_private): + available_skills.append(skill_name) + + # Get each skill using the cached getter + return [get_github_skill(name, store) for name in available_skills] + + +def get_github_skill( + name: str, + store: SkillStoreABC, +) -> GitHubBaseTool: + """Get a GitHub skill by name.""" + if name == "github_search": + if name not in _cache: + _cache[name] = GitHubSearch( + skill_store=store, + ) + return _cache[name] + else: + raise ValueError(f"Unknown GitHub skill: {name}") diff --git a/intentkit/skills/github/base.py b/intentkit/skills/github/base.py new file mode 100644 index 00000000..91c69f1a --- /dev/null +++ b/intentkit/skills/github/base.py @@ -0,0 +1,21 @@ +from typing import Type + +from pydantic import BaseModel, Field + +from intentkit.abstracts.skill import SkillStoreABC +from intentkit.skills.base import IntentKitSkill + + +class GitHubBaseTool(IntentKitSkill): + """Base class for GitHub tools.""" + + name: str = Field(description="The name of the tool") + description: str = Field(description="A description of what the tool does") + args_schema: Type[BaseModel] + skill_store: SkillStoreABC = Field( + description="The skill store for persisting data" + ) + + @property + def category(self) -> str: + return "github" diff --git a/intentkit/skills/github/github.jpg b/intentkit/skills/github/github.jpg new file mode 100644 index 00000000..e28a8373 Binary files /dev/null and b/intentkit/skills/github/github.jpg differ diff --git a/intentkit/skills/github/github_search.py b/intentkit/skills/github/github_search.py new file mode 100644 index 00000000..1b218b58 --- /dev/null +++ b/intentkit/skills/github/github_search.py @@ -0,0 +1,181 @@ +import logging +from enum import Enum +from typing import Type + +import httpx +from pydantic import BaseModel, Field + +from intentkit.skills.github.base import GitHubBaseTool + +logger = logging.getLogger(__name__) + + +class SearchType(str, Enum): + REPOSITORIES = "repositories" + USERS = "users" + CODE = "code" + + +class GitHubSearchInput(BaseModel): + """Input for GitHub search tool.""" + + query: str = Field( + description="The search query to look up on GitHub.", + ) + search_type: SearchType = Field( + description="Type of GitHub search to perform (repositories, users, or code).", + default=SearchType.REPOSITORIES, + ) + max_results: int = Field( + description="Maximum number of search results to return (1-30).", + default=5, + ge=1, + le=30, + ) + + +class GitHubSearch(GitHubBaseTool): + """Tool for searching GitHub. + + This tool uses GitHub's public REST API to search for repositories, users, and code. + No authentication is required as it uses public endpoints. + + Attributes: + name: The name of the tool. + description: A description of what the tool does. + args_schema: The schema for the tool's input arguments. + """ + + name: str = "github_search" + description: str = ( + "Search GitHub for repositories, users, or code. Use this tool when you need to find:\n" + "- GitHub repositories by name, description, or topics\n" + "- GitHub users by username or real name\n" + "- Code snippets across GitHub repositories\n" + "You must call this tool whenever the user asks about finding something on GitHub." + ) + args_schema: Type[BaseModel] = GitHubSearchInput + + async def _arun( + self, + query: str, + search_type: SearchType = SearchType.REPOSITORIES, + max_results: int = 5, + **kwargs, + ) -> str: + """Implementation of the GitHub search tool. + + Args: + query: The search query to look up. + search_type: Type of search to perform (repositories, users, or code). + max_results: Maximum number of search results to return (1-30). + config: The configuration for the tool call. + + Returns: + str: Formatted search results based on the search type. + """ + context = self.get_context() + logger.debug(f"github_search.py: Running GitHub search with context {context}") + + # Limit max_results to a reasonable range + max_results = max(1, min(max_results, 30)) + + headers = { + "Accept": "application/vnd.github.v3+json", + } + + # Build the search URL based on search type + base_url = "https://api.github.com/search" + search_url = f"{base_url}/{search_type.value}" + logger.debug(f"github_search.py: Searching GitHub at {search_url}") + + try: + async with httpx.AsyncClient(timeout=30.0) as client: + response = await client.get( + search_url, + headers=headers, + params={"q": query, "per_page": max_results}, + ) + + if response.status_code == 403: + rate_limit = response.headers.get( + "X-RateLimit-Remaining", "unknown" + ) + reset_time = response.headers.get("X-RateLimit-Reset", "unknown") + logger.warning( + f"github_search.py: Rate limit reached. Remaining: {rate_limit}, Reset: {reset_time}" + ) + return ( + "GitHub API rate limit reached. Please try again in a few minutes. " + "The rate limit resets every hour for unauthenticated requests." + ) + elif response.status_code != 200: + logger.error( + f"github_search.py: Error from GitHub API: {response.status_code} - {response.text}" + ) + return f"Error searching GitHub: {response.status_code} - {response.text}" + + data = response.json() + items = data.get("items", []) + + if not items: + return f"No results found for query: '{query}'" + + # Format results based on search type + formatted_results = ( + f"GitHub search results for '{query}' ({search_type.value}):\n\n" + ) + + for i, item in enumerate(items, 1): + if search_type == SearchType.REPOSITORIES: + name = item.get("full_name", "No name") + description = item.get("description", "No description") + url = item.get("html_url", "No URL") + stars = item.get("stargazers_count", 0) + language = item.get("language", "Not specified") + + formatted_results += f"{i}. {name}\n" + formatted_results += f"Description: {description}\n" + formatted_results += f"Language: {language} | Stars: {stars}\n" + formatted_results += f"URL: {url}\n\n" + + elif search_type == SearchType.USERS: + login = item.get("login", "No username") + name = item.get("name", "No name") + bio = item.get("bio", "No bio") + url = item.get("html_url", "No URL") + followers = item.get("followers", 0) + public_repos = item.get("public_repos", 0) + + formatted_results += f"{i}. {login}" + if name: + formatted_results += f" ({name})" + formatted_results += "\n" + if bio: + formatted_results += f"Bio: {bio}\n" + formatted_results += ( + f"Followers: {followers} | Public Repos: {public_repos}\n" + ) + formatted_results += f"URL: {url}\n\n" + + elif search_type == SearchType.CODE: + repo = item.get("repository", {}).get( + "full_name", "No repository" + ) + path = item.get("path", "No path") + url = item.get("html_url", "No URL") + + formatted_results += f"{i}. Found in {repo}\n" + formatted_results += f"File: {path}\n" + formatted_results += f"URL: {url}\n\n" + + return formatted_results.strip() + + except httpx.TimeoutException: + logger.error("github_search.py: Request timed out") + return "The request to GitHub timed out. Please try again later." + except Exception as e: + logger.error( + f"github_search.py: Error searching GitHub: {e}", exc_info=True + ) + return "An error occurred while searching GitHub. Please try again later." diff --git a/intentkit/skills/github/schema.json b/intentkit/skills/github/schema.json new file mode 100644 index 00000000..43357700 --- /dev/null +++ b/intentkit/skills/github/schema.json @@ -0,0 +1,59 @@ +{ + "$schema": "http://json-schema.org/draft-07/schema#", + "type": "object", + "title": "GitHub", + "description": "Search capabilities for GitHub repositories, users, and code", + "x-icon": "https://ai.service.crestal.dev/skills/github/github.jpg", + "x-tags": [ + "GitHub", + "Search", + "Code" + ], + "properties": { + "enabled": { + "type": "boolean", + "title": "Enabled", + "description": "Whether this skill is enabled", + "default": false + }, + "states": { + "type": "object", + "properties": { + "github_search": { + "type": "string", + "title": "GitHub Search", + "enum": [ + "disabled", + "public", + "private" + ], + "x-enum-title": [ + "Disabled", + "Agent Owner + All Users", + "Agent Owner Only" + ], + "description": "Search GitHub for repositories, users, and code", + "default": "disabled" + } + }, + "description": "States for each GitHub search skill (disabled, public, or private)" + }, + "api_key_provider": { + "type": "string", + "title": "API Key Provider", + "description": "Who provides the API key", + "enum": [ + "platform" + ], + "x-enum-title": [ + "Nation Hosted" + ], + "default": "platform" + } + }, + "required": [ + "states", + "enabled" + ], + "additionalProperties": true +} \ No newline at end of file diff --git a/intentkit/skills/heurist/__init__.py b/intentkit/skills/heurist/__init__.py new file mode 100644 index 00000000..889702bc --- /dev/null +++ b/intentkit/skills/heurist/__init__.py @@ -0,0 +1,143 @@ +"""Heurist AI skills.""" + +import logging +from typing import NotRequired, TypedDict + +from intentkit.abstracts.skill import SkillStoreABC +from intentkit.skills.base import SkillConfig, SkillState +from intentkit.skills.heurist.base import HeuristBaseTool +from intentkit.skills.heurist.image_generation_animagine_xl import ( + ImageGenerationAnimagineXL, +) +from intentkit.skills.heurist.image_generation_arthemy_comics import ( + ImageGenerationArthemyComics, +) +from intentkit.skills.heurist.image_generation_arthemy_real import ( + ImageGenerationArthemyReal, +) +from intentkit.skills.heurist.image_generation_braindance import ( + ImageGenerationBrainDance, +) +from intentkit.skills.heurist.image_generation_cyber_realistic_xl import ( + ImageGenerationCyberRealisticXL, +) +from intentkit.skills.heurist.image_generation_flux_1_dev import ImageGenerationFlux1Dev +from intentkit.skills.heurist.image_generation_sdxl import ImageGenerationSDXL + +# Cache skills at the system level, because they are stateless +_cache: dict[str, HeuristBaseTool] = {} + +logger = logging.getLogger(__name__) + + +class SkillStates(TypedDict): + image_generation_animagine_xl: SkillState + image_generation_arthemy_comics: SkillState + image_generation_arthemy_real: SkillState + image_generation_braindance: SkillState + image_generation_cyber_realistic_xl: SkillState + image_generation_flux_1_dev: SkillState + image_generation_sdxl: SkillState + + +class Config(SkillConfig): + """Configuration for Heurist AI skills.""" + + states: SkillStates + api_key: NotRequired[str] + rate_limit_number: NotRequired[int] + rate_limit_minutes: NotRequired[int] + + +async def get_skills( + config: "Config", + is_private: bool, + store: SkillStoreABC, + **_, +) -> list[HeuristBaseTool]: + """Get all Heurist AI skills. + + Args: + config: The configuration for Heurist AI skills. + is_private: Whether to include private skills. + store: The skill store for persisting data. + + Returns: + A list of Heurist AI skills. + """ + available_skills = [] + + # Include skills based on their state + for skill_name, state in config["states"].items(): + if state == "disabled": + continue + elif state == "public" or (state == "private" and is_private): + available_skills.append(skill_name) + + # Get each skill using the cached getter + result = [] + for name in available_skills: + skill = get_heurist_skill(name, store) + if skill: + result.append(skill) + return result + + +def get_heurist_skill( + name: str, + store: SkillStoreABC, +) -> HeuristBaseTool: + """Get a Heurist AI skill by name. + + Args: + name: The name of the skill to get + store: The skill store for persisting data + + Returns: + The requested Heurist AI skill + """ + if name == "image_generation_animagine_xl": + if name not in _cache: + _cache[name] = ImageGenerationAnimagineXL( + skill_store=store, + ) + return _cache[name] + elif name == "image_generation_arthemy_comics": + if name not in _cache: + _cache[name] = ImageGenerationArthemyComics( + skill_store=store, + ) + return _cache[name] + elif name == "image_generation_arthemy_real": + if name not in _cache: + _cache[name] = ImageGenerationArthemyReal( + skill_store=store, + ) + return _cache[name] + elif name == "image_generation_braindance": + if name not in _cache: + _cache[name] = ImageGenerationBrainDance( + skill_store=store, + ) + return _cache[name] + elif name == "image_generation_cyber_realistic_xl": + if name not in _cache: + _cache[name] = ImageGenerationCyberRealisticXL( + skill_store=store, + ) + return _cache[name] + elif name == "image_generation_flux_1_dev": + if name not in _cache: + _cache[name] = ImageGenerationFlux1Dev( + skill_store=store, + ) + return _cache[name] + elif name == "image_generation_sdxl": + if name not in _cache: + _cache[name] = ImageGenerationSDXL( + skill_store=store, + ) + return _cache[name] + else: + logger.warning(f"Unknown Heurist skill: {name}") + return None diff --git a/intentkit/skills/heurist/base.py b/intentkit/skills/heurist/base.py new file mode 100644 index 00000000..188ccc4a --- /dev/null +++ b/intentkit/skills/heurist/base.py @@ -0,0 +1,41 @@ +"""Base class for Heurist AI skills.""" + +from typing import Type + +from langchain.tools.base import ToolException +from pydantic import BaseModel, Field + +from intentkit.abstracts.skill import SkillStoreABC +from intentkit.skills.base import IntentKitSkill + + +class HeuristBaseTool(IntentKitSkill): + """Base class for all Heurist AI skills. + + This class provides common functionality for all Heurist AI skills. + """ + + name: str = Field(description="The name of the tool") + description: str = Field(description="A description of what the tool does") + args_schema: Type[BaseModel] + skill_store: SkillStoreABC = Field( + description="The skill store for persisting data" + ) + + def get_api_key(self) -> str: + context = self.get_context() + skill_config = context.agent.skill_config(self.category) + api_key_provider = skill_config.get("api_key_provider") + if api_key_provider == "platform": + return self.skill_store.get_system_config("heurist_api_key") + # for backward compatibility, may only have api_key in skill_config + elif skill_config.get("api_key"): + return skill_config.get("api_key") + else: + raise ToolException( + f"Invalid API key provider: {api_key_provider}, or no api_key in config" + ) + + @property + def category(self) -> str: + return "heurist" diff --git a/intentkit/skills/heurist/heurist.png b/intentkit/skills/heurist/heurist.png new file mode 100644 index 00000000..ab836372 Binary files /dev/null and b/intentkit/skills/heurist/heurist.png differ diff --git a/intentkit/skills/heurist/image_generation_animagine_xl.py b/intentkit/skills/heurist/image_generation_animagine_xl.py new file mode 100644 index 00000000..b587073e --- /dev/null +++ b/intentkit/skills/heurist/image_generation_animagine_xl.py @@ -0,0 +1,161 @@ +import logging +from typing import Optional, Type + +import httpx +from epyxid import XID +from pydantic import BaseModel, Field + +from intentkit.skills.heurist.base import HeuristBaseTool +from intentkit.utils.s3 import store_image + +logger = logging.getLogger(__name__) + + +class ImageGenerationAnimagineXLInput(BaseModel): + """Input for ImageGenerationAnimagineXL tool.""" + + prompt: str = Field( + description="Text prompt describing the image to generate.", + ) + neg_prompt: Optional[str] = Field( + default="(worst quality: 1.4), bad quality, nsfw", + description="Negative prompt describing what to avoid in the generated image.", + ) + width: Optional[int] = Field( + default=1024, + le=1024, + description="Width of the generated image.", + ) + height: Optional[int] = Field( + default=680, + le=1024, + description="Height of the generated image.", + ) + + +class ImageGenerationAnimagineXL(HeuristBaseTool): + """Tool for generating Japanese anime-style images using Heurist AI's AnimagineXL model. + + This tool takes a text prompt and uses Heurist's API to generate + a Japanese anime-style image based on the description. + + Attributes: + name: The name of the tool. + description: A description of what the tool does. + args_schema: The schema for the tool's input arguments. + """ + + name: str = "heurist_image_generation_animagine_xl" + description: str = ( + "Generate Japanese anime-style images using Heurist AI's AnimagineXL model.\n" + "Provide a text prompt describing the anime-style image you want to generate.\n" + "AnimagineXL specializes in creating high-quality Japanese anime-style illustrations.\n" + "If you have height and width, remember to specify them.\n" + ) + args_schema: Type[BaseModel] = ImageGenerationAnimagineXLInput + + async def _arun( + self, + prompt: str, + neg_prompt: Optional[str] = "(worst quality: 1.4), bad quality, nsfw", + width: Optional[int] = 1024, + height: Optional[int] = 680, + **kwargs, + ) -> str: + """Implementation of the tool to generate Japanese anime-style images using Heurist AI's AnimagineXL model. + + Args: + prompt: Text prompt describing the image to generate. + neg_prompt: Negative prompt describing what to avoid in the generated image. + width: Width of the generated image. + height: Height of the generated image. + config: Configuration for the runnable. + tool_call_id: The ID of the tool call, can be used for tracking or correlation. + + Returns: + str: URL of the generated image. + """ + context = self.get_context() + skill_config = context.agent.skill_config(self.category) + skill_config = skill_config + + # Get the Heurist API key from the skill store + if "api_key" in skill_config and skill_config["api_key"]: + api_key = skill_config["api_key"] + if skill_config.get("rate_limit_number") and skill_config.get( + "rate_limit_minutes" + ): + await self.user_rate_limit_by_category( + context.user_id, + skill_config["rate_limit_number"], + skill_config["rate_limit_minutes"], + ) + else: + api_key = self.skill_store.get_system_config("heurist_api_key") + await self.user_rate_limit_by_category(context.user_id, 10, 1440) + + # Generate a unique job ID + job_id = str(XID()) + + # Prepare the request payload + payload = { + "job_id": job_id, + "model_input": { + "SD": { + "prompt": prompt, + "neg_prompt": neg_prompt, + "num_iterations": 25, + "width": width, + "height": height, + "guidance_scale": 5, + "seed": -1, + } + }, + "model_id": "AnimagineXL", + "deadline": 180, + "priority": 1, + } + + headers = { + "Authorization": f"Bearer {api_key}", + "Content-Type": "application/json", + } + + try: + # Make the API request + async with httpx.AsyncClient() as client: + response = await client.post( + "http://sequencer.heurist.xyz/submit_job", + json=payload, + headers=headers, + timeout=180, + ) + logger.debug(f"Heurist API response: {response.text}") + response.raise_for_status() + + # Store the image URL + image_url = response.text.strip('"') + # Generate a key with agent ID as prefix + image_key = f"{context.agent_id}/heurist/{job_id}" + # Store the image and get the CDN URL + stored_url = await store_image(image_url, image_key) + + # Return the stored image URL + return stored_url + + except httpx.HTTPStatusError as e: + # Extract error details from response + try: + error_json = e.response.json() + error_code = error_json.get("error", "") + error_message = error_json.get("message", "") + full_error = f"Heurist API error: Error code: {error_code}, Message: {error_message}" + except Exception: + full_error = f"Heurist API error: {e}" + + logger.error(full_error) + raise Exception(full_error) + + except Exception as e: + logger.error(f"Error generating image with Heurist: {e}") + raise Exception(f"Error generating image with Heurist: {str(e)}") diff --git a/intentkit/skills/heurist/image_generation_arthemy_comics.py b/intentkit/skills/heurist/image_generation_arthemy_comics.py new file mode 100644 index 00000000..87d3eac4 --- /dev/null +++ b/intentkit/skills/heurist/image_generation_arthemy_comics.py @@ -0,0 +1,161 @@ +import logging +from typing import Optional, Type + +import httpx +from epyxid import XID +from pydantic import BaseModel, Field + +from intentkit.skills.heurist.base import HeuristBaseTool +from intentkit.utils.s3 import store_image + +logger = logging.getLogger(__name__) + + +class ImageGenerationArthemyComicsInput(BaseModel): + """Input for ImageGenerationArthemyComics tool.""" + + prompt: str = Field( + description="Text prompt describing the image to generate.", + ) + neg_prompt: Optional[str] = Field( + default="(worst quality: 1.4), bad quality, nsfw", + description="Negative prompt describing what to avoid in the generated image.", + ) + width: Optional[int] = Field( + default=1024, + le=1024, + description="Width of the generated image.", + ) + height: Optional[int] = Field( + default=1024, + le=1024, + description="Height of the generated image.", + ) + + +class ImageGenerationArthemyComics(HeuristBaseTool): + """Tool for generating comic-style images using Heurist AI's ArthemyComics model. + + This tool takes a text prompt and uses Heurist's API to generate + a comic-style image based on the description. + + Attributes: + name: The name of the tool. + description: A description of what the tool does. + args_schema: The schema for the tool's input arguments. + """ + + name: str = "heurist_image_generation_arthemy_comics" + description: str = ( + "Generate comic-style images using Heurist AI's ArthemyComics model.\n" + "Provide a text prompt describing the comic-style image you want to generate.\n" + "ArthemyComics specializes in creating vibrant, stylized comic book illustrations.\n" + "If you have height and width, remember to specify them.\n" + ) + args_schema: Type[BaseModel] = ImageGenerationArthemyComicsInput + + async def _arun( + self, + prompt: str, + neg_prompt: Optional[str] = "(worst quality: 1.4), bad quality, nsfw", + width: Optional[int] = 1024, + height: Optional[int] = 680, + **kwargs, + ) -> str: + """Implementation of the tool to generate comic-style images using Heurist AI's ArthemyComics model. + + Args: + prompt: Text prompt describing the image to generate. + neg_prompt: Negative prompt describing what to avoid in the generated image. + width: Width of the generated image. + height: Height of the generated image. + config: Configuration for the runnable. + + Returns: + str: URL of the generated image. + """ + context = self.get_context() + skill_config = context.agent.skill_config(self.category) + skill_config = skill_config + + # Get the Heurist API key from the skill store + if "api_key" in skill_config and skill_config["api_key"]: + api_key = skill_config["api_key"] + if skill_config.get("rate_limit_number") and skill_config.get( + "rate_limit_minutes" + ): + await self.user_rate_limit_by_category( + context.user_id, + skill_config["rate_limit_number"], + skill_config["rate_limit_minutes"], + ) + else: + api_key = self.skill_store.get_system_config("heurist_api_key") + await self.user_rate_limit_by_category(context.user_id, 10, 1440) + + # Generate a unique job ID + job_id = str(XID()) + + # Prepare the request payload + payload = { + "job_id": job_id, + "model_input": { + "SD": { + "prompt": prompt, + "neg_prompt": neg_prompt, + "num_iterations": 25, + "width": width, + "height": height, + "guidance_scale": 5, + "seed": -1, + } + }, + "model_id": "ArthemyComics", + "deadline": 120, + "priority": 1, + } + logger.debug(f"Heurist API payload: {payload}") + + headers = { + "Authorization": f"Bearer {api_key}", + "Content-Type": "application/json", + } + + try: + # Make the API request + async with httpx.AsyncClient() as client: + response = await client.post( + "http://sequencer.heurist.xyz/submit_job", + json=payload, + headers=headers, + timeout=120, + ) + logger.debug(f"Heurist API response: {response.text}") + response.raise_for_status() + + # Store the image URL + image_url = response.text.strip('"') + # Generate a key with agent ID as prefix + image_key = f"{context.agent_id}/heurist/{job_id}" + # Store the image and get the CDN URL + stored_url = await store_image(image_url, image_key) + + # Return the stored image URL + return stored_url + + except httpx.HTTPStatusError as e: + # Extract error details from response + try: + error_json = e.response.json() + error_code = error_json.get("error", "") + error_message = error_json.get("message", "") + full_error = f"Heurist API error: Error code: {error_code}, Message: {error_message}" + except Exception: + full_error = f"Heurist API error: {e}" + + logger.error(full_error) + raise Exception(full_error) + + except Exception as e: + logger.error(f"Error generating image with Heurist: {e}") + raise Exception(f"Error generating image with Heurist: {str(e)}") diff --git a/intentkit/skills/heurist/image_generation_arthemy_real.py b/intentkit/skills/heurist/image_generation_arthemy_real.py new file mode 100644 index 00000000..f3c6bbce --- /dev/null +++ b/intentkit/skills/heurist/image_generation_arthemy_real.py @@ -0,0 +1,161 @@ +import logging +from typing import Optional, Type + +import httpx +from epyxid import XID +from pydantic import BaseModel, Field + +from intentkit.skills.heurist.base import HeuristBaseTool +from intentkit.utils.s3 import store_image + +logger = logging.getLogger(__name__) + + +class ImageGenerationArthemyRealInput(BaseModel): + """Input for ImageGenerationArthemyReal tool.""" + + prompt: str = Field( + description="Text prompt describing the image to generate.", + ) + neg_prompt: Optional[str] = Field( + default="(worst quality: 1.4), bad quality, nsfw", + description="Negative prompt describing what to avoid in the generated image.", + ) + width: Optional[int] = Field( + default=1024, + le=1024, + description="Width of the generated image.", + ) + height: Optional[int] = Field( + default=1024, + le=1024, + description="Height of the generated image.", + ) + + +class ImageGenerationArthemyReal(HeuristBaseTool): + """Tool for generating realistic images using Heurist AI's ArthemyReal model. + + This tool takes a text prompt and uses Heurist's API to generate + a realistic image based on the description. + + Attributes: + name: The name of the tool. + description: A description of what the tool does. + args_schema: The schema for the tool's input arguments. + """ + + name: str = "heurist_image_generation_arthemy_real" + description: str = ( + "Generate realistic images using Heurist AI's ArthemyReal model.\n" + "Provide a text prompt describing the realistic image you want to generate.\n" + "ArthemyReal specializes in creating photorealistic, lifelike images with fine details.\n" + "If you have height and width, remember to specify them.\n" + ) + args_schema: Type[BaseModel] = ImageGenerationArthemyRealInput + + async def _arun( + self, + prompt: str, + neg_prompt: Optional[str] = "(worst quality: 1.4), bad quality, nsfw", + width: Optional[int] = 1024, + height: Optional[int] = 680, + **kwargs, + ) -> str: + """Implementation of the tool to generate realistic images using Heurist AI's ArthemyReal model. + + Args: + prompt: Text prompt describing the image to generate. + neg_prompt: Negative prompt describing what to avoid in the generated image. + width: Width of the generated image. + height: Height of the generated image. + config: Configuration for the runnable. + + Returns: + str: URL of the generated image. + """ + context = self.get_context() + skill_config = context.agent.skill_config(self.category) + skill_config = skill_config + + # Get the Heurist API key from the skill store + if "api_key" in skill_config and skill_config["api_key"]: + api_key = skill_config["api_key"] + if skill_config.get("rate_limit_number") and skill_config.get( + "rate_limit_minutes" + ): + await self.user_rate_limit_by_category( + context.user_id, + skill_config["rate_limit_number"], + skill_config["rate_limit_minutes"], + ) + else: + api_key = self.skill_store.get_system_config("heurist_api_key") + await self.user_rate_limit_by_category(context.user_id, 10, 1440) + + # Generate a unique job ID + job_id = str(XID()) + + # Prepare the request payload + payload = { + "job_id": job_id, + "model_input": { + "SD": { + "prompt": prompt, + "neg_prompt": neg_prompt, + "num_iterations": 25, + "width": width, + "height": height, + "guidance_scale": 5, + "seed": -1, + } + }, + "model_id": "ArthemyReal", + "deadline": 120, + "priority": 1, + } + logger.debug(f"Heurist API payload: {payload}") + + headers = { + "Authorization": f"Bearer {api_key}", + "Content-Type": "application/json", + } + + try: + # Make the API request + async with httpx.AsyncClient() as client: + response = await client.post( + "http://sequencer.heurist.xyz/submit_job", + json=payload, + headers=headers, + timeout=120, + ) + logger.debug(f"Heurist API response: {response.text}") + response.raise_for_status() + + # Store the image URL + image_url = response.text.strip('"') + # Generate a key with agent ID as prefix + image_key = f"{context.agent_id}/heurist/{job_id}" + # Store the image and get the CDN URL + stored_url = await store_image(image_url, image_key) + + # Return the stored image URL + return stored_url + + except httpx.HTTPStatusError as e: + # Extract error details from response + try: + error_json = e.response.json() + error_code = error_json.get("error", "") + error_message = error_json.get("message", "") + full_error = f"Heurist API error: Error code: {error_code}, Message: {error_message}" + except Exception: + full_error = f"Heurist API error: {e}" + + logger.error(full_error) + raise Exception(full_error) + + except Exception as e: + logger.error(f"Error generating image with Heurist: {e}") + raise Exception(f"Error generating image with Heurist: {str(e)}") diff --git a/intentkit/skills/heurist/image_generation_braindance.py b/intentkit/skills/heurist/image_generation_braindance.py new file mode 100644 index 00000000..029fe6cf --- /dev/null +++ b/intentkit/skills/heurist/image_generation_braindance.py @@ -0,0 +1,161 @@ +import logging +from typing import Optional, Type + +import httpx +from epyxid import XID +from pydantic import BaseModel, Field + +from intentkit.skills.heurist.base import HeuristBaseTool +from intentkit.utils.s3 import store_image + +logger = logging.getLogger(__name__) + + +class ImageGenerationBrainDanceInput(BaseModel): + """Input for ImageGenerationBrainDance tool.""" + + prompt: str = Field( + description="Text prompt describing the image to generate.", + ) + neg_prompt: Optional[str] = Field( + default="(worst quality: 1.4), bad quality, nsfw", + description="Negative prompt describing what to avoid in the generated image.", + ) + width: Optional[int] = Field( + default=1024, + le=1024, + description="Width of the generated image.", + ) + height: Optional[int] = Field( + default=1024, + le=1024, + description="Height of the generated image.", + ) + + +class ImageGenerationBrainDance(HeuristBaseTool): + """Tool for generating artistic images using Heurist AI's BrainDance model. + + This tool takes a text prompt and uses Heurist's API to generate + an artistic image based on the description. + + Attributes: + name: The name of the tool. + description: A description of what the tool does. + args_schema: The schema for the tool's input arguments. + """ + + name: str = "heurist_image_generation_braindance" + description: str = ( + "Generate artistic images using Heurist AI's BrainDance model.\n" + "Provide a text prompt describing the artistic image you want to generate.\n" + "BrainDance specializes in creating unique, artistic interpretations with creative flair.\n" + "If you have height and width, remember to specify them.\n" + ) + args_schema: Type[BaseModel] = ImageGenerationBrainDanceInput + + async def _arun( + self, + prompt: str, + neg_prompt: Optional[str] = "(worst quality: 1.4), bad quality, nsfw", + width: Optional[int] = 1024, + height: Optional[int] = 680, + **kwargs, + ) -> str: + """Implementation of the tool to generate artistic images using Heurist AI's BrainDance model. + + Args: + prompt: Text prompt describing the image to generate. + neg_prompt: Negative prompt describing what to avoid in the generated image. + width: Width of the generated image. + height: Height of the generated image. + config: Configuration for the runnable. + + Returns: + str: URL of the generated image. + """ + context = self.get_context() + skill_config = context.agent.skill_config(self.category) + skill_config = skill_config + + # Get the Heurist API key from the skill store + if "api_key" in skill_config and skill_config["api_key"]: + api_key = skill_config["api_key"] + if skill_config.get("rate_limit_number") and skill_config.get( + "rate_limit_minutes" + ): + await self.user_rate_limit_by_category( + context.user_id, + skill_config["rate_limit_number"], + skill_config["rate_limit_minutes"], + ) + else: + api_key = self.skill_store.get_system_config("heurist_api_key") + await self.user_rate_limit_by_category(context.user_id, 10, 1440) + + # Generate a unique job ID + job_id = str(XID()) + + # Prepare the request payload + payload = { + "job_id": job_id, + "model_input": { + "SD": { + "prompt": prompt, + "neg_prompt": neg_prompt, + "num_iterations": 25, + "width": width, + "height": height, + "guidance_scale": 5, + "seed": -1, + } + }, + "model_id": "BrainDance", + "deadline": 120, + "priority": 1, + } + logger.debug(f"Heurist API payload: {payload}") + + headers = { + "Authorization": f"Bearer {api_key}", + "Content-Type": "application/json", + } + + try: + # Make the API request + async with httpx.AsyncClient() as client: + response = await client.post( + "http://sequencer.heurist.xyz/submit_job", + json=payload, + headers=headers, + timeout=120, + ) + logger.debug(f"Heurist API response: {response.text}") + response.raise_for_status() + + # Store the image URL + image_url = response.text.strip('"') + # Generate a key with agent ID as prefix + image_key = f"{context.agent_id}/heurist/{job_id}" + # Store the image and get the CDN URL + stored_url = await store_image(image_url, image_key) + + # Return the stored image URL + return stored_url + + except httpx.HTTPStatusError as e: + # Extract error details from response + try: + error_json = e.response.json() + error_code = error_json.get("error", "") + error_message = error_json.get("message", "") + full_error = f"Heurist API error: Error code: {error_code}, Message: {error_message}" + except Exception: + full_error = f"Heurist API error: {e}" + + logger.error(full_error) + raise Exception(full_error) + + except Exception as e: + logger.error(f"Error generating image with Heurist: {e}") + raise Exception(f"Error generating image with Heurist: {str(e)}") diff --git a/intentkit/skills/heurist/image_generation_cyber_realistic_xl.py b/intentkit/skills/heurist/image_generation_cyber_realistic_xl.py new file mode 100644 index 00000000..d010c65e --- /dev/null +++ b/intentkit/skills/heurist/image_generation_cyber_realistic_xl.py @@ -0,0 +1,161 @@ +import logging +from typing import Optional, Type + +import httpx +from epyxid import XID +from pydantic import BaseModel, Field + +from intentkit.skills.heurist.base import HeuristBaseTool +from intentkit.utils.s3 import store_image + +logger = logging.getLogger(__name__) + + +class ImageGenerationCyberRealisticXLInput(BaseModel): + """Input for ImageGenerationCyberRealisticXL tool.""" + + prompt: str = Field( + description="Text prompt describing the image to generate.", + ) + neg_prompt: Optional[str] = Field( + default="(worst quality: 1.4), bad quality, nsfw", + description="Negative prompt describing what to avoid in the generated image.", + ) + width: Optional[int] = Field( + default=1024, + le=1024, + description="Width of the generated image.", + ) + height: Optional[int] = Field( + default=680, + le=1024, + description="Height of the generated image.", + ) + + +class ImageGenerationCyberRealisticXL(HeuristBaseTool): + """Tool for generating hyperrealistic cyberpunk images using Heurist AI's CyberRealisticXL model. + + This tool takes a text prompt and uses Heurist's API to generate + a hyperrealistic photograph with a cyberpunk aesthetic based on the description. + + Attributes: + name: The name of the tool. + description: A description of what the tool does. + args_schema: The schema for the tool's input arguments. + """ + + name: str = "heurist_image_generation_cyber_realistic_xl" + description: str = ( + "Generate hyperrealistic cyberpunk photography using Heurist AI's CyberRealisticXL model.\n" + "Provide a text prompt describing the hyperrealistic cyberpunk image you want to generate.\n" + "CyberRealisticXL specializes in creating high-quality hyperrealistic photographs with a cyberpunk aesthetic.\n" + "If you have height and width, remember to specify them.\n" + ) + args_schema: Type[BaseModel] = ImageGenerationCyberRealisticXLInput + + async def _arun( + self, + prompt: str, + neg_prompt: Optional[str] = "(worst quality: 1.4), bad quality, nsfw", + width: Optional[int] = 1024, + height: Optional[int] = 680, + **kwargs, + ) -> str: + """Implementation of the tool to generate hyperrealistic cyberpunk images using Heurist AI's CyberRealisticXL model. + + Args: + prompt: Text prompt describing the image to generate. + neg_prompt: Negative prompt describing what to avoid in the generated image. + width: Width of the generated image. + height: Height of the generated image. + config: Configuration for the runnable. + + Returns: + str: URL of the generated image. + """ + context = self.get_context() + skill_config = context.agent.skill_config(self.category) + skill_config = skill_config + + # Get the Heurist API key from the skill store + if "api_key" in skill_config and skill_config["api_key"]: + api_key = skill_config["api_key"] + if skill_config.get("rate_limit_number") and skill_config.get( + "rate_limit_minutes" + ): + await self.user_rate_limit_by_category( + context.user_id, + skill_config["rate_limit_number"], + skill_config["rate_limit_minutes"], + ) + else: + api_key = self.skill_store.get_system_config("heurist_api_key") + await self.user_rate_limit_by_category(context.user_id, 10, 1440) + + # Generate a unique job ID + job_id = str(XID()) + + # Prepare the request payload + payload = { + "job_id": job_id, + "model_input": { + "SD": { + "prompt": prompt, + "neg_prompt": neg_prompt, + "num_iterations": 25, + "width": width, + "height": height, + "guidance_scale": 5, + "seed": -1, + } + }, + "model_id": "CyberRealisticXL", + "deadline": 180, + "priority": 1, + } + logger.debug(f"Heurist API payload: {payload}") + + headers = { + "Authorization": f"Bearer {api_key}", + "Content-Type": "application/json", + } + + try: + # Make the API request + async with httpx.AsyncClient() as client: + response = await client.post( + "http://sequencer.heurist.xyz/submit_job", + json=payload, + headers=headers, + timeout=120, + ) + logger.debug(f"Heurist API response: {response.text}") + response.raise_for_status() + + # Store the image URL + image_url = response.text.strip('"') + # Generate a key with agent ID as prefix + image_key = f"{context.agent_id}/heurist/{job_id}" + # Store the image and get the CDN URL + stored_url = await store_image(image_url, image_key) + + # Return the stored image URL + return stored_url + + except httpx.HTTPStatusError as e: + # Extract error details from response + try: + error_json = e.response.json() + error_code = error_json.get("error", "") + error_message = error_json.get("message", "") + full_error = f"Heurist API error: Error code: {error_code}, Message: {error_message}" + except Exception: + full_error = f"Heurist API error: {e}" + + logger.error(full_error) + raise Exception(full_error) + + except Exception as e: + logger.error(f"Error generating image with Heurist: {e}") + raise Exception(f"Error generating image with Heurist: {str(e)}") diff --git a/intentkit/skills/heurist/image_generation_flux_1_dev.py b/intentkit/skills/heurist/image_generation_flux_1_dev.py new file mode 100644 index 00000000..d14af4be --- /dev/null +++ b/intentkit/skills/heurist/image_generation_flux_1_dev.py @@ -0,0 +1,161 @@ +import logging +from typing import Optional, Type + +import httpx +from epyxid import XID +from pydantic import BaseModel, Field + +from intentkit.skills.heurist.base import HeuristBaseTool +from intentkit.utils.s3 import store_image + +logger = logging.getLogger(__name__) + + +class ImageGenerationFlux1DevInput(BaseModel): + """Input for ImageGenerationFlux1Dev tool.""" + + prompt: str = Field( + description="Text prompt describing the image to generate.", + ) + neg_prompt: Optional[str] = Field( + default="", + description="Negative prompt describing what to avoid in the generated image.", + ) + width: Optional[int] = Field( + default=1024, + le=2048, + description="Width of the generated image.", + ) + height: Optional[int] = Field( + default=1024, + le=2048, + description="Height of the generated image.", + ) + + +class ImageGenerationFlux1Dev(HeuristBaseTool): + """Tool for generating versatile images using Heurist AI's Flux.1-dev model. + + This tool takes a text prompt and uses Heurist's API to generate + an image based on the description using the versatile Flux.1-dev model. + + Attributes: + name: The name of the tool. + description: A description of what the tool does. + args_schema: The schema for the tool's input arguments. + """ + + name: str = "heurist_image_generation_flux_1_dev" + description: str = ( + "Generate images using Heurist AI's Flux.1-dev model.\n" + "Provide a text prompt describing the image you want to generate.\n" + "Flux.1-dev is a versatile, general-purpose model capable of generating images in any style.\n" + "If you have height and width, remember to specify them.\n" + ) + args_schema: Type[BaseModel] = ImageGenerationFlux1DevInput + + async def _arun( + self, + prompt: str, + neg_prompt: Optional[str] = "", + width: Optional[int] = 1024, + height: Optional[int] = 680, + **kwargs, + ) -> str: + """Implementation of the tool to generate images using Heurist AI's Flux.1-dev model. + + Args: + prompt: Text prompt describing the image to generate. + neg_prompt: Negative prompt describing what to avoid in the generated image. + width: Width of the generated image. + height: Height of the generated image. + config: Configuration for the runnable. + + Returns: + str: URL of the generated image. + """ + context = self.get_context() + skill_config = context.agent.skill_config(self.category) + skill_config = skill_config + + # Get the Heurist API key from the skill store + if "api_key" in skill_config and skill_config["api_key"]: + api_key = skill_config["api_key"] + if skill_config.get("rate_limit_number") and skill_config.get( + "rate_limit_minutes" + ): + await self.user_rate_limit_by_category( + context.user_id, + skill_config["rate_limit_number"], + skill_config["rate_limit_minutes"], + ) + else: + api_key = self.skill_store.get_system_config("heurist_api_key") + await self.user_rate_limit_by_category(context.user_id, 10, 1440) + + # Generate a unique job ID + job_id = str(XID()) + + # Prepare the request payload + payload = { + "job_id": job_id, + "model_input": { + "SD": { + "prompt": prompt, + "neg_prompt": neg_prompt, + "num_iterations": 22, + "width": width, + "height": height, + "guidance_scale": 3, + "seed": -1, + } + }, + "model_id": "Flux.1-dev", + "deadline": 180, + "priority": 1, + } + logger.debug(f"Heurist API payload: {payload}") + + headers = { + "Authorization": f"Bearer {api_key}", + "Content-Type": "application/json", + } + + try: + # Make the API request + async with httpx.AsyncClient() as client: + response = await client.post( + "http://sequencer.heurist.xyz/submit_job", + json=payload, + headers=headers, + timeout=120, + ) + logger.debug(f"Heurist API response: {response.text}") + response.raise_for_status() + + # Store the image URL + image_url = response.text.strip('"') + # Generate a key with agent ID as prefix + image_key = f"{context.agent_id}/heurist/{job_id}" + # Store the image and get the CDN URL + stored_url = await store_image(image_url, image_key) + + # Return the stored image URL + return stored_url + + except httpx.HTTPStatusError as e: + # Extract error details from response + try: + error_json = e.response.json() + error_code = error_json.get("error", "") + error_message = error_json.get("message", "") + full_error = f"Heurist API error: Error code: {error_code}, Message: {error_message}" + except Exception: + full_error = f"Heurist API error: {e}" + + logger.error(full_error) + raise Exception(full_error) + + except Exception as e: + logger.error(f"Error generating image with Heurist: {e}") + raise Exception(f"Error generating image with Heurist: {str(e)}") diff --git a/intentkit/skills/heurist/image_generation_sdxl.py b/intentkit/skills/heurist/image_generation_sdxl.py new file mode 100644 index 00000000..ce13a09f --- /dev/null +++ b/intentkit/skills/heurist/image_generation_sdxl.py @@ -0,0 +1,160 @@ +import logging +from typing import Optional, Type + +import httpx +from epyxid import XID +from pydantic import BaseModel, Field + +from intentkit.skills.heurist.base import HeuristBaseTool +from intentkit.utils.s3 import store_image + +logger = logging.getLogger(__name__) + + +class ImageGenerationSDXLInput(BaseModel): + """Input for ImageGenerationSDXL tool.""" + + prompt: str = Field( + description="Text prompt describing the image to generate.", + ) + neg_prompt: Optional[str] = Field( + default="(worst quality: 1.4), bad quality, nsfw", + description="Negative prompt describing what to avoid in the generated image.", + ) + width: Optional[int] = Field( + default=1024, + le=1024, + description="Width of the generated image.", + ) + height: Optional[int] = Field( + default=1024, + le=1024, + description="Height of the generated image.", + ) + + +class ImageGenerationSDXL(HeuristBaseTool): + """Tool for generating high-quality images using Heurist AI's SDXL model. + + This tool takes a text prompt and uses Heurist's API to generate + an image based on the description using the versatile SDXL model. + + Attributes: + name: The name of the tool. + description: A description of what the tool does. + args_schema: The schema for the tool's input arguments. + """ + + name: str = "heurist_image_generation_sdxl" + description: str = ( + "Generate images using Heurist AI's SDXL model.\n" + "Provide a text prompt describing the image you want to generate.\n" + "SDXL is a versatile, general-purpose model capable of generating high-quality images in any style.\n" + "If you have height and width, remember to specify them.\n" + ) + args_schema: Type[BaseModel] = ImageGenerationSDXLInput + + async def _arun( + self, + prompt: str, + neg_prompt: Optional[str] = "(worst quality: 1.4), bad quality, nsfw", + width: Optional[int] = 1024, + height: Optional[int] = 680, + **kwargs, + ) -> str: + """Implementation of the tool to generate images using Heurist AI's SDXL model. + + Args: + prompt: Text prompt describing the image to generate. + neg_prompt: Negative prompt describing what to avoid in the generated image. + width: Width of the generated image. + height: Height of the generated image. + config: Configuration for the runnable. + + Returns: + str: URL of the generated image. + """ + context = self.get_context() + skill_config = context.agent.skill_config(self.category) + skill_config = skill_config + + # Get the Heurist API key from the skill store + if "api_key" in skill_config and skill_config["api_key"]: + api_key = skill_config["api_key"] + if skill_config.get("rate_limit_number") and skill_config.get( + "rate_limit_minutes" + ): + await self.user_rate_limit_by_category( + context.user_id, + skill_config["rate_limit_number"], + skill_config["rate_limit_minutes"], + ) + else: + api_key = self.skill_store.get_system_config("heurist_api_key") + await self.user_rate_limit_by_category(context.user_id, 10, 1440) + + # Generate a unique job ID + job_id = str(XID()) + + # Prepare the request payload + payload = { + "job_id": job_id, + "model_input": { + "SD": { + "prompt": prompt, + "neg_prompt": neg_prompt, + "num_iterations": 25, + "width": width, + "height": height, + "guidance_scale": 5, + "seed": -1, + } + }, + "model_id": "SDXL", + "deadline": 180, + "priority": 1, + } + + headers = { + "Authorization": f"Bearer {api_key}", + "Content-Type": "application/json", + } + + try: + # Make the API request + async with httpx.AsyncClient() as client: + response = await client.post( + "http://sequencer.heurist.xyz/submit_job", + json=payload, + headers=headers, + timeout=120, + ) + logger.debug(f"Heurist API response: {response.text}") + response.raise_for_status() + + # Store the image URL + image_url = response.text.strip('"') + # Generate a key with agent ID as prefix + image_key = f"{context.agent_id}/heurist/{job_id}" + # Store the image and get the CDN URL + stored_url = await store_image(image_url, image_key) + + # Return the stored image URL + return stored_url + + except httpx.HTTPStatusError as e: + # Extract error details from response + try: + error_json = e.response.json() + error_code = error_json.get("error", "") + error_message = error_json.get("message", "") + full_error = f"Heurist API error: Error code: {error_code}, Message: {error_message}" + except Exception: + full_error = f"Heurist API error: {e}" + + logger.error(full_error) + raise Exception(full_error) + + except Exception as e: + logger.error(f"Error generating image with Heurist: {e}") + raise Exception(f"Error generating image with Heurist: {str(e)}") diff --git a/intentkit/skills/heurist/schema.json b/intentkit/skills/heurist/schema.json new file mode 100644 index 00000000..fee599b5 --- /dev/null +++ b/intentkit/skills/heurist/schema.json @@ -0,0 +1,196 @@ +{ + "$schema": "http://json-schema.org/draft-07/schema#", + "type": "object", + "title": "Heurist AI", + "description": "Skills for interacting with Heurist AI services, including image generation and other AI capabilities", + "x-icon": "https://ai.service.crestal.dev/skills/heurist/heurist.png", + "x-tags": [ + "AI", + "Image Generation" + ], + "properties": { + "enabled": { + "type": "boolean", + "title": "Enabled", + "description": "Whether this skill is enabled", + "default": false + }, + "states": { + "type": "object", + "properties": { + "image_generation_animagine_xl": { + "type": "string", + "title": "Japanese Anime Image Generation", + "enum": [ + "disabled", + "public", + "private" + ], + "x-enum-title": [ + "Disabled", + "Agent Owner + All Users", + "Agent Owner Only" + ], + "description": "Generate Japanese anime-style images using Heurist's AnimagineXL model based on text prompts", + "default": "disabled" + }, + "image_generation_arthemy_comics": { + "type": "string", + "title": "Comic Style Image Generation", + "enum": [ + "disabled", + "public", + "private" + ], + "x-enum-title": [ + "Disabled", + "Agent Owner + All Users", + "Agent Owner Only" + ], + "description": "Generate comic-style images using Heurist's ArthemyComics model based on text prompts", + "default": "disabled" + }, + "image_generation_arthemy_real": { + "type": "string", + "title": "Realistic Image Generation", + "enum": [ + "disabled", + "public", + "private" + ], + "x-enum-title": [ + "Disabled", + "Agent Owner + All Users", + "Agent Owner Only" + ], + "description": "Generate realistic images using Heurist's ArthemyReal model based on text prompts", + "default": "disabled" + }, + "image_generation_braindance": { + "type": "string", + "title": "Artistic Image Generation", + "enum": [ + "disabled", + "public", + "private" + ], + "x-enum-title": [ + "Disabled", + "Agent Owner + All Users", + "Agent Owner Only" + ], + "description": "Generate artistic images using Heurist's BrainDance model based on text prompts", + "default": "disabled" + }, + "image_generation_cyber_realistic_xl": { + "type": "string", + "title": "Cyberpunk Hyperrealistic Image Generation", + "enum": [ + "disabled", + "public", + "private" + ], + "x-enum-title": [ + "Disabled", + "Agent Owner + All Users", + "Agent Owner Only" + ], + "description": "Generate hyperrealistic photographs with a cyberpunk aesthetic using Heurist's CyberRealisticXL model based on text prompts", + "default": "disabled" + }, + "image_generation_flux_1_dev": { + "type": "string", + "title": "Versatile Image Generation (Flux)", + "enum": [ + "disabled", + "public", + "private" + ], + "x-enum-title": [ + "Disabled", + "Agent Owner + All Users", + "Agent Owner Only" + ], + "description": "Generate versatile images in any style using Heurist's Flux.1-dev model based on text prompts", + "default": "disabled" + }, + "image_generation_sdxl": { + "type": "string", + "title": "High-Quality Image Generation (SDXL)", + "enum": [ + "disabled", + "public", + "private" + ], + "x-enum-title": [ + "Disabled", + "Agent Owner + All Users", + "Agent Owner Only" + ], + "description": "Generate high-quality images in any style using Heurist's SDXL model based on text prompts", + "default": "disabled" + } + }, + "description": "States for each Heurist AI skill (disabled, public, or private)" + }, + "api_key_provider": { + "type": "string", + "title": "API Key Provider", + "description": "Provider of the API key", + "enum": [ + "platform", + "agent_owner" + ], + "x-enum-title": [ + "Nation Hosted", + "Owner Provided" + ], + "default": "platform" + } + }, + "required": [ + "states", + "enabled" + ], + "if": { + "properties": { + "api_key_provider": { + "const": "agent_owner" + } + } + }, + "then": { + "properties": { + "api_key": { + "type": "string", + "title": "Heurist API Key", + "x-link": "[Get your API key](https://dev-api-form.heurist.ai/)", + "x-sensitive": true, + "description": "API key for Heurist AI services, if you have one, you can set the rate limit for your user" + }, + "rate_limit_number": { + "type": "integer", + "title": "Rate Limit Number", + "description": "Number of requests allowed per time window, only valid if api_key is set" + }, + "rate_limit_minutes": { + "type": "integer", + "title": "Rate Limit Minutes", + "description": "Time window in minutes for rate limiting, only valid if api_key is set" + } + }, + "if": { + "properties": { + "enabled": { + "const": true + } + } + }, + "then": { + "required": [ + "api_key" + ] + } + }, + "additionalProperties": true +} \ No newline at end of file diff --git a/intentkit/skills/http/README.md b/intentkit/skills/http/README.md new file mode 100644 index 00000000..d2ec1065 --- /dev/null +++ b/intentkit/skills/http/README.md @@ -0,0 +1,78 @@ +# HTTP Client Skills + +This skill category provides HTTP client functionality for making web requests using the httpx async client library. + +## Available Skills + +### http_get +Make HTTP GET requests to fetch data from web APIs and websites. + +**Parameters:** +- `url` (string, required): The URL to send the GET request to +- `headers` (dict, optional): Custom headers to include in the request +- `params` (dict, optional): Query parameters to include in the request +- `timeout` (float, optional): Request timeout in seconds (default: 30) + +**Example usage:** +``` +Fetch data from https://api.example.com/users with timeout of 10 seconds +``` + +### http_post +Make HTTP POST requests to send data to web APIs and submit forms. + +**Parameters:** +- `url` (string, required): The URL to send the POST request to +- `data` (dict or string, optional): The data to send in the request body +- `headers` (dict, optional): Custom headers to include in the request +- `params` (dict, optional): Query parameters to include in the request +- `timeout` (float, optional): Request timeout in seconds (default: 30) + +**Example usage:** +``` +Send a POST request to https://api.example.com/users with JSON data {"name": "John", "email": "john@example.com"} +``` + +### http_put +Make HTTP PUT requests to update or replace data on web APIs. + +**Parameters:** +- `url` (string, required): The URL to send the PUT request to +- `data` (dict or string, optional): The data to send in the request body +- `headers` (dict, optional): Custom headers to include in the request +- `params` (dict, optional): Query parameters to include in the request +- `timeout` (float, optional): Request timeout in seconds (default: 30) + +**Example usage:** +``` +Update user data at https://api.example.com/users/123 with {"name": "Jane Doe"} +``` + +## Features + +- **Async Support**: All HTTP operations are asynchronous using httpx +- **Automatic JSON Handling**: Dictionary data is automatically sent as JSON with proper Content-Type headers +- **Error Handling**: Comprehensive error handling for timeouts, HTTP errors, and connection issues +- **Flexible Data Types**: Support for both JSON (dict) and raw string data in POST/PUT requests +- **Custom Headers**: Support for custom headers in all request types +- **Query Parameters**: Support for URL query parameters +- **Configurable Timeouts**: Customizable request timeouts + +## Configuration + +Each skill can be configured with one of three states: +- `disabled`: Skill is not available +- `public`: Available to both agent owner and all users +- `private`: Available only to the agent owner + +## Natural Language Usage + +These skills are designed to work seamlessly with natural language instructions: + +- "Get the weather data from the API" +- "Send a POST request to create a new user" +- "Update the user profile using PUT request" +- "Fetch the latest news from the RSS feed" +- "Submit the form data to the webhook" + +The AI agent will automatically select the appropriate HTTP method and construct the proper request based on your natural language description. \ No newline at end of file diff --git a/intentkit/skills/http/__init__.py b/intentkit/skills/http/__init__.py new file mode 100644 index 00000000..84f604e1 --- /dev/null +++ b/intentkit/skills/http/__init__.py @@ -0,0 +1,100 @@ +"""HTTP client skills.""" + +import logging +from typing import TypedDict + +from intentkit.abstracts.skill import SkillStoreABC +from intentkit.skills.base import SkillConfig, SkillState +from intentkit.skills.http.base import HttpBaseTool +from intentkit.skills.http.get import HttpGet +from intentkit.skills.http.post import HttpPost +from intentkit.skills.http.put import HttpPut + +# Cache skills at the system level, because they are stateless +_cache: dict[str, HttpBaseTool] = {} + +logger = logging.getLogger(__name__) + + +class SkillStates(TypedDict): + """Type definition for HTTP skill states.""" + + http_get: SkillState + http_post: SkillState + http_put: SkillState + + +class Config(SkillConfig): + """Configuration for HTTP client skills.""" + + states: SkillStates + + +async def get_skills( + config: "Config", + is_private: bool, + store: SkillStoreABC, + **_, +) -> list[HttpBaseTool]: + """Get all HTTP client skills. + + Args: + config: The configuration for HTTP client skills. + is_private: Whether to include private skills. + store: The skill store for persisting data. + + Returns: + A list of HTTP client skills. + """ + available_skills = [] + + # Include skills based on their state + for skill_name, state in config["states"].items(): + if state == "disabled": + continue + elif state == "public" or (state == "private" and is_private): + available_skills.append(skill_name) + + # Get each skill using the cached getter + result = [] + for name in available_skills: + skill = get_http_skill(name, store) + if skill: + result.append(skill) + return result + + +def get_http_skill( + name: str, + store: SkillStoreABC, +) -> HttpBaseTool: + """Get an HTTP client skill by name. + + Args: + name: The name of the skill to get + store: The skill store for persisting data + + Returns: + The requested HTTP client skill + """ + if name == "http_get": + if name not in _cache: + _cache[name] = HttpGet( + skill_store=store, + ) + return _cache[name] + elif name == "http_post": + if name not in _cache: + _cache[name] = HttpPost( + skill_store=store, + ) + return _cache[name] + elif name == "http_put": + if name not in _cache: + _cache[name] = HttpPut( + skill_store=store, + ) + return _cache[name] + else: + logger.warning(f"Unknown HTTP skill: {name}") + return None diff --git a/intentkit/skills/http/base.py b/intentkit/skills/http/base.py new file mode 100644 index 00000000..41299c23 --- /dev/null +++ b/intentkit/skills/http/base.py @@ -0,0 +1,21 @@ +from typing import Type + +from pydantic import BaseModel, Field + +from intentkit.abstracts.skill import SkillStoreABC +from intentkit.skills.base import IntentKitSkill + + +class HttpBaseTool(IntentKitSkill): + """Base class for HTTP client tools.""" + + name: str = Field(description="The name of the tool") + description: str = Field(description="A description of what the tool does") + args_schema: Type[BaseModel] + skill_store: SkillStoreABC = Field( + description="The skill store for persisting data" + ) + + @property + def category(self) -> str: + return "http" diff --git a/intentkit/skills/http/get.py b/intentkit/skills/http/get.py new file mode 100644 index 00000000..87f90407 --- /dev/null +++ b/intentkit/skills/http/get.py @@ -0,0 +1,94 @@ +import logging +from typing import Any, Dict, Optional, Type + +import httpx +from pydantic import BaseModel, Field + +from intentkit.skills.http.base import HttpBaseTool + +logger = logging.getLogger(__name__) + + +class HttpGetInput(BaseModel): + """Input for HTTP GET request.""" + + url: str = Field(description="The URL to send the GET request to") + headers: Optional[Dict[str, str]] = Field( + description="Optional headers to include in the request", + default=None, + ) + params: Optional[Dict[str, Any]] = Field( + description="Optional query parameters to include in the request", + default=None, + ) + timeout: Optional[float] = Field( + description="Request timeout in seconds (default: 30)", + default=30.0, + ) + + +class HttpGet(HttpBaseTool): + """Tool for making HTTP GET requests. + + This tool allows you to make HTTP GET requests to any URL with optional + headers and query parameters. It returns the response content as a string. + + Attributes: + name: The name of the tool. + description: A description of what the tool does. + args_schema: The schema for the tool's input arguments. + """ + + name: str = "http_get" + description: str = ( + "Make an HTTP GET request to a specified URL. " + "You can include custom headers and query parameters. " + "Returns the response content as text. " + "Use this when you need to fetch data from web APIs or websites." + ) + args_schema: Type[BaseModel] = HttpGetInput + + async def _arun( + self, + url: str, + headers: Optional[Dict[str, str]] = None, + params: Optional[Dict[str, Any]] = None, + timeout: float = 30.0, + **kwargs, + ) -> str: + """Implementation of the HTTP GET request. + + Args: + url: The URL to send the GET request to. + headers: Optional headers to include in the request. + params: Optional query parameters to include in the request. + timeout: Request timeout in seconds. + config: The runnable config (unused but required by interface). + + Returns: + str: The response content as text, or error message if request fails. + """ + try: + async with httpx.AsyncClient() as client: + response = await client.get( + url=url, + headers=headers, + params=params, + timeout=timeout, + ) + + # Raise an exception for bad status codes + response.raise_for_status() + + # Return response content + return f"Status: {response.status_code}\nContent: {response.text}" + + except httpx.TimeoutException: + return f"Error: Request to {url} timed out after {timeout} seconds" + except httpx.HTTPStatusError as e: + return f"Error: HTTP {e.response.status_code} - {e.response.text}" + except httpx.RequestError as e: + return f"Error: Failed to connect to {url} - {str(e)}" + except Exception as e: + logger.error(f"Unexpected error in HTTP GET request: {e}") + return f"Error: Unexpected error occurred - {str(e)}" diff --git a/intentkit/skills/http/http.svg b/intentkit/skills/http/http.svg new file mode 100644 index 00000000..64302f57 --- /dev/null +++ b/intentkit/skills/http/http.svg @@ -0,0 +1,15 @@ + + + + + + HTTP + + + + + + + + + \ No newline at end of file diff --git a/intentkit/skills/http/post.py b/intentkit/skills/http/post.py new file mode 100644 index 00000000..b3c85476 --- /dev/null +++ b/intentkit/skills/http/post.py @@ -0,0 +1,111 @@ +import logging +from typing import Any, Dict, Optional, Type, Union + +import httpx +from pydantic import BaseModel, Field + +from intentkit.skills.http.base import HttpBaseTool + +logger = logging.getLogger(__name__) + + +class HttpPostInput(BaseModel): + """Input for HTTP POST request.""" + + url: str = Field(description="The URL to send the POST request to") + data: Optional[Union[Dict[str, Any], str]] = Field( + description="The data to send in the request body. Can be a dictionary (will be sent as JSON) or a string", + default=None, + ) + headers: Optional[Dict[str, str]] = Field( + description="Optional headers to include in the request", + default=None, + ) + params: Optional[Dict[str, Any]] = Field( + description="Optional query parameters to include in the request", + default=None, + ) + timeout: Optional[float] = Field( + description="Request timeout in seconds (default: 30)", + default=30.0, + ) + + +class HttpPost(HttpBaseTool): + """Tool for making HTTP POST requests. + + This tool allows you to make HTTP POST requests to any URL with optional + headers, query parameters, and request body data. It returns the response content as a string. + + Attributes: + name: The name of the tool. + description: A description of what the tool does. + args_schema: The schema for the tool's input arguments. + """ + + name: str = "http_post" + description: str = ( + "Make an HTTP POST request to a specified URL. " + "You can include custom headers, query parameters, and request body data. " + "Data can be provided as a dictionary (sent as JSON) or as a string. " + "Returns the response content as text. " + "Use this when you need to send data to web APIs or submit forms." + ) + args_schema: Type[BaseModel] = HttpPostInput + + async def _arun( + self, + url: str, + data: Optional[Union[Dict[str, Any], str]] = None, + headers: Optional[Dict[str, str]] = None, + params: Optional[Dict[str, Any]] = None, + timeout: float = 30.0, + **kwargs, + ) -> str: + """Implementation of the HTTP POST request. + + Args: + url: The URL to send the POST request to. + data: The data to send in the request body. + headers: Optional headers to include in the request. + params: Optional query parameters to include in the request. + timeout: Request timeout in seconds. + config: The runnable config (unused but required by interface). + + Returns: + str: The response content as text, or error message if request fails. + """ + try: + # Prepare headers + request_headers = headers or {} + + # If data is a dictionary, send as JSON + if isinstance(data, dict): + if "content-type" not in {k.lower() for k in request_headers.keys()}: + request_headers["Content-Type"] = "application/json" + + async with httpx.AsyncClient() as client: + response = await client.post( + url=url, + json=data if isinstance(data, dict) else None, + content=data if isinstance(data, str) else None, + headers=request_headers, + params=params, + timeout=timeout, + ) + + # Raise an exception for bad status codes + response.raise_for_status() + + # Return response content + return f"Status: {response.status_code}\nContent: {response.text}" + + except httpx.TimeoutException: + return f"Error: Request to {url} timed out after {timeout} seconds" + except httpx.HTTPStatusError as e: + return f"Error: HTTP {e.response.status_code} - {e.response.text}" + except httpx.RequestError as e: + return f"Error: Failed to connect to {url} - {str(e)}" + except Exception as e: + logger.error(f"Unexpected error in HTTP POST request: {e}") + return f"Error: Unexpected error occurred - {str(e)}" diff --git a/intentkit/skills/http/put.py b/intentkit/skills/http/put.py new file mode 100644 index 00000000..709430e2 --- /dev/null +++ b/intentkit/skills/http/put.py @@ -0,0 +1,111 @@ +import logging +from typing import Any, Dict, Optional, Type, Union + +import httpx +from pydantic import BaseModel, Field + +from intentkit.skills.http.base import HttpBaseTool + +logger = logging.getLogger(__name__) + + +class HttpPutInput(BaseModel): + """Input for HTTP PUT request.""" + + url: str = Field(description="The URL to send the PUT request to") + data: Optional[Union[Dict[str, Any], str]] = Field( + description="The data to send in the request body. Can be a dictionary (will be sent as JSON) or a string", + default=None, + ) + headers: Optional[Dict[str, str]] = Field( + description="Optional headers to include in the request", + default=None, + ) + params: Optional[Dict[str, Any]] = Field( + description="Optional query parameters to include in the request", + default=None, + ) + timeout: Optional[float] = Field( + description="Request timeout in seconds (default: 30)", + default=30.0, + ) + + +class HttpPut(HttpBaseTool): + """Tool for making HTTP PUT requests. + + This tool allows you to make HTTP PUT requests to any URL with optional + headers, query parameters, and request body data. It returns the response content as a string. + + Attributes: + name: The name of the tool. + description: A description of what the tool does. + args_schema: The schema for the tool's input arguments. + """ + + name: str = "http_put" + description: str = ( + "Make an HTTP PUT request to a specified URL. " + "You can include custom headers, query parameters, and request body data. " + "Data can be provided as a dictionary (sent as JSON) or as a string. " + "Returns the response content as text. " + "Use this when you need to update or replace data on web APIs." + ) + args_schema: Type[BaseModel] = HttpPutInput + + async def _arun( + self, + url: str, + data: Optional[Union[Dict[str, Any], str]] = None, + headers: Optional[Dict[str, str]] = None, + params: Optional[Dict[str, Any]] = None, + timeout: float = 30.0, + **kwargs, + ) -> str: + """Implementation of the HTTP PUT request. + + Args: + url: The URL to send the PUT request to. + data: The data to send in the request body. + headers: Optional headers to include in the request. + params: Optional query parameters to include in the request. + timeout: Request timeout in seconds. + config: The runnable config (unused but required by interface). + + Returns: + str: The response content as text, or error message if request fails. + """ + try: + # Prepare headers + request_headers = headers or {} + + # If data is a dictionary, send as JSON + if isinstance(data, dict): + if "content-type" not in {k.lower() for k in request_headers.keys()}: + request_headers["Content-Type"] = "application/json" + + async with httpx.AsyncClient() as client: + response = await client.put( + url=url, + json=data if isinstance(data, dict) else None, + content=data if isinstance(data, str) else None, + headers=request_headers, + params=params, + timeout=timeout, + ) + + # Raise an exception for bad status codes + response.raise_for_status() + + # Return response content + return f"Status: {response.status_code}\nContent: {response.text}" + + except httpx.TimeoutException: + return f"Error: Request to {url} timed out after {timeout} seconds" + except httpx.HTTPStatusError as e: + return f"Error: HTTP {e.response.status_code} - {e.response.text}" + except httpx.RequestError as e: + return f"Error: Failed to connect to {url} - {str(e)}" + except Exception as e: + logger.error(f"Unexpected error in HTTP PUT request: {e}") + return f"Error: Unexpected error occurred - {str(e)}" diff --git a/intentkit/skills/http/schema.json b/intentkit/skills/http/schema.json new file mode 100644 index 00000000..88121f51 --- /dev/null +++ b/intentkit/skills/http/schema.json @@ -0,0 +1,80 @@ +{ + "$schema": "http://json-schema.org/draft-07/schema#", + "type": "object", + "title": "HTTP Client", + "description": "HTTP client skills for making web requests", + "x-icon": "https://ai.service.crestal.dev/skills/http/http.svg", + "x-tags": [ + "HTTP", + "Web", + "API", + "Client" + ], + "properties": { + "enabled": { + "type": "boolean", + "title": "Enabled", + "description": "Whether this skill is enabled", + "default": false + }, + "states": { + "type": "object", + "properties": { + "http_get": { + "type": "string", + "title": "HTTP GET", + "enum": [ + "disabled", + "public", + "private" + ], + "x-enum-title": [ + "Disabled", + "Agent Owner + All Users", + "Agent Owner Only" + ], + "description": "Make HTTP GET requests to fetch data from web APIs and websites", + "default": "private" + }, + "http_post": { + "type": "string", + "title": "HTTP POST", + "enum": [ + "disabled", + "public", + "private" + ], + "x-enum-title": [ + "Disabled", + "Agent Owner + All Users", + "Agent Owner Only" + ], + "description": "Make HTTP POST requests to send data to web APIs and submit forms", + "default": "private" + }, + "http_put": { + "type": "string", + "title": "HTTP PUT", + "enum": [ + "disabled", + "public", + "private" + ], + "x-enum-title": [ + "Disabled", + "Agent Owner + All Users", + "Agent Owner Only" + ], + "description": "Make HTTP PUT requests to update or replace data on web APIs", + "default": "private" + } + }, + "description": "States for each HTTP client skill (disabled, public, or private)" + } + }, + "required": [ + "states", + "enabled" + ], + "additionalProperties": true +} \ No newline at end of file diff --git a/intentkit/skills/lifi/README.md b/intentkit/skills/lifi/README.md new file mode 100644 index 00000000..9ffade04 --- /dev/null +++ b/intentkit/skills/lifi/README.md @@ -0,0 +1,294 @@ +# LiFi Skills +Cross-chain token transfers and swaps using the LiFi protocol with CDP wallet integration. + +## Features + +- **Token Quotes**: Get real-time quotes for token swaps and cross-chain transfers +- **Token Execution**: Execute token swaps and transfers with automatic transaction handling +- **Explorer URLs**: Get direct blockchain explorer links for all transactions +- **Multi-chain Support**: Works with 15+ blockchains including Base, Ethereum, Arbitrum, Polygon +- **Testnet Support**: Full support for testnet operations (Base Sepolia, Ethereum Sepolia, etc.) + +## Skills Available + +1. **`token_quote`** - Get quotes for token transfers (public access) +2. **`lifi_token_execute`** - Execute token transfers (requires CDP wallet) + +## Test Prompts + +Use these exact prompts to test the LiFi skills: + +### 1. Check Wallet Address +``` +what is my wallet address +``` + +### 2. Check Balances +``` +tell me my balance in eth and usdc +``` + +### 3. Get Quote for Token Swap +``` +now get me quote to swap my usdc to eth on the same chain that is base mainnet +``` + +### 4. Execute Token Swap +``` +yes do the swap now +``` + +### 5. Verify Transaction +``` +tell me my balance in eth and usdc +``` + +## Expected Results + +### Quote Response +``` +### Token Transfer Quote + +**From:** 2.2019 USDC on Base +**To:** 0.00100259 ETH on Base +**Minimum Received:** 0.00097251 ETH +**Bridge/Exchange:** sushiswap + +**Value:** $2.2017 → $2.2067 +**Estimated Time:** 30 seconds + +**Gas Cost:** +- SEND: 0.00001005 ETH ($0.0221) +- **Total Gas:** ~$0.0221 +``` + +### Execution Response +``` +**Token Swap Executed Successfully** + +Transaction successful! +Transaction Hash: 0xe7d026c7598699909794df9f7858e48cc56c03e4d428f5cc62f51c1979617fd1 +Network: Base +Explorer: https://basescan.org/tx/0xe7d026c7598699909794df9f7858e48cc56c03e4d428f5cc62f51c1979617fd1 +Token: USDC → ETH +Amount: 2.2019 + +**Status:** Completed (same-chain swap) +``` + +## Supported Chains + +**Mainnet**: Ethereum, Base, Arbitrum, Optimism, Polygon, Avalanche, Fantom, BSC, Linea, zkSync Era, Scroll + +**Testnet**: Ethereum Sepolia, Base Sepolia, Arbitrum Sepolia, Optimism Sepolia, Polygon Mumbai + +## Prerequisites + +- CDP wallet configured and funded +- Agent with LiFi skills enabled +- Sufficient token balance for swaps/transfers +- Network gas tokens for transaction fees + +## Configuration + +The skills are automatically configured with: +- Default slippage: 3% +- Maximum execution time: 300 seconds +- Support for all major tokens (ETH, USDC, USDT, DAI, WETH, etc.) + +## Error Handling + +The skills handle common errors automatically: +- Invalid chain identifiers +- Insufficient balances +- Network connectivity issues +- Transaction failures with detailed error messages + +### CDP Wallet Requirements + +To use the `token_execute` skill, your agent must have: + +1. **CDP Wallet Configuration**: A properly configured CDP wallet with `cdp_wallet_data` set +2. **Sufficient Funds**: Enough tokens for the transfer amount plus gas fees +3. **Network Configuration**: Proper network settings matching your intended chains + + +## Usage Examples + +### Token Quote Examples + +#### Cross-Chain Transfer Quote +``` +"Get a quote for transferring 100 USDC from Ethereum to Polygon" +``` + +#### Same-Chain Swap Quote +``` +"What's the rate for swapping 0.5 ETH to USDC on Ethereum?" +``` + +#### Fee Analysis +``` +"Check the fees for sending 1000 DAI from Arbitrum to Base" +``` + +#### Amount Calculation +``` +"How much MATIC would I get if I transfer 50 USDC from Ethereum to Polygon?" +``` + +### Token Execute Examples + +#### Execute Cross-Chain Transfer +``` +"Execute a transfer of 100 USDC from Ethereum to Polygon" +``` + +#### Execute Same-Chain Swap +``` +"Swap 0.1 ETH for USDC on Base" +``` + +#### Execute with Custom Slippage +``` +"Transfer 500 DAI from Arbitrum to Optimism with 1% slippage" +``` + +## Supported Networks + +### Major Networks +- **Ethereum** (ETH) - Chain ID: 1 +- **Polygon** (POL) - Chain ID: 137 +- **Arbitrum One** (ARB) - Chain ID: 42161 +- **Optimism** (OPT) - Chain ID: 10 +- **Base** (BASE) - Chain ID: 8453 +- **BNB Chain** (BSC) - Chain ID: 56 +- **Avalanche** (AVAX) - Chain ID: 43114 +- **Gnosis Chain** (DAI) - Chain ID: 100 + +### Layer 2 Networks +- **Linea** - Chain ID: 59144 +- **zkSync Era** - Chain ID: 324 +- **Polygon zkEVM** - Chain ID: 1101 +- **Scroll** - Chain ID: 534352 + +## How It Works + +### Token Quote Process + +1. **Validates** input parameters (chains, tokens, amounts, slippage) +2. **Queries** LiFi API for the best route and pricing +3. **Formats** comprehensive quote information including: + - Token amounts and conversion rates + - Detailed fee breakdown (LP fees, bridge fees, etc.) + - Gas cost estimates in native tokens and USD + - Execution time estimates + - Routing path through bridges/exchanges + - USD value equivalents + +### Token Execute Process + +1. **Gets Quote** - Retrieves routing and pricing information +2. **Checks Wallet** - Validates CDP wallet configuration and funds +3. **Sets Approval** - Automatically approves ERC20 tokens if needed +4. **Executes Transaction** - Sends the transfer transaction +5. **Monitors Status** - Tracks cross-chain transfer completion +6. **Reports Results** - Provides transaction hash and final status + +## Troubleshooting + +### Common Issues + +#### "CDP client not available" +**Problem**: Agent doesn't have CDP wallet configuration +**Solution**: +- Set `wallet_provider: "cdp"` in agent configuration +- Ensure CDP credentials are properly configured +- Use `token_quote` for research without requiring a wallet + +#### "No route found" +**Problem**: LiFi cannot find a path for the requested transfer +**Solutions**: +- Try different token pairs +- Use more liquid tokens (USDC, ETH, etc.) +- Check if both chains support the requested tokens +- Reduce transfer amount if liquidity is limited + +#### "Invalid request: Token not supported" +**Problem**: Token symbol or address not recognized +**Solutions**: +- Use popular token symbols (USDC, ETH, DAI, MATIC) +- Verify token exists on the source chain +- Use full token contract address instead of symbol + +#### "Failed to approve token" +**Problem**: ERC20 token approval failed +**Solutions**: +- Ensure wallet has enough native tokens for gas +- Check if token contract allows approvals +- Try again with a smaller amount + +#### "Transfer pending" (taking too long) +**Problem**: Cross-chain transfer is slow +**Solutions**: +- Wait longer (some bridges take 10-30 minutes) +- Check the explorer link for detailed status +- Contact LiFi support if transfer is stuck + +### Configuration Issues + +#### Invalid Slippage +``` +Error: "Invalid slippage: must be between 0.001 (0.1%) and 0.5 (50%)" +``` +**Solution**: Use slippage between 0.1% and 50% (e.g., 0.03 for 3%) + +#### Chain Restrictions +``` +Error: "Source chain 'ETH' is not allowed" +``` +**Solution**: Update `allowed_chains` in configuration or remove the restriction + +#### Execution Timeout +``` +Status: "Still pending - transfer may take longer to complete" +``` +**Solution**: Increase `max_execution_time` or wait for manual completion + +## Best Practices + +### For Token Quotes +- Use quotes to compare different routes before executing +- Check gas costs and fees before large transfers +- Consider execution time for time-sensitive operations + +### For Token Execution +- Always get a quote first to understand costs +- Ensure sufficient gas tokens in your wallet +- Use appropriate slippage (1-3% for stable pairs, 3-5% for volatile pairs) +- Monitor large transfers using the explorer link + +### For Production Use +- Set reasonable `allowed_chains` to prevent unexpected transfers +- Use `private` state for execution skills in production +- Monitor transfer status for cross-chain operations +- Keep some native tokens for gas in each chain you use + +## API Reference + +### Token Quote Parameters +- `from_chain`: Source blockchain (string) +- `to_chain`: Destination blockchain (string) +- `from_token`: Token to send (symbol or address) +- `to_token`: Token to receive (symbol or address) +- `from_amount`: Amount in smallest unit (string) +- `slippage`: Slippage tolerance 0.001-0.5 (float, optional) + +### Token Execute Parameters +Same as Token Quote - the skill handles the execution automatically. + +### Response Format + +**Quote Response**: Detailed markdown with transfer details, fees, gas costs, and routing information. + +**Execute Response**: Transaction hash, status monitoring, and complete transfer summary. diff --git a/intentkit/skills/lifi/__init__.py b/intentkit/skills/lifi/__init__.py new file mode 100644 index 00000000..aea64b90 --- /dev/null +++ b/intentkit/skills/lifi/__init__.py @@ -0,0 +1,141 @@ +import logging +from typing import List, Optional, TypedDict + +from intentkit.abstracts.skill import SkillStoreABC +from intentkit.skills.base import SkillConfig, SkillState +from intentkit.skills.lifi.base import LiFiBaseTool +from intentkit.skills.lifi.token_execute import TokenExecute +from intentkit.skills.lifi.token_quote import TokenQuote + +# Cache skills at the system level, because they are stateless +_cache: dict[str, LiFiBaseTool] = {} + +# Set up logging +logger = logging.getLogger(__name__) + + +class SkillStates(TypedDict): + token_quote: SkillState + token_execute: SkillState + + +class Config(SkillConfig): + """Configuration for LiFi skills.""" + + states: SkillStates + default_slippage: Optional[float] = 0.03 + allowed_chains: Optional[List[str]] = None + max_execution_time: Optional[int] = 300 + + +async def get_skills( + config: "Config", + is_private: bool, + store: SkillStoreABC, + **_, +) -> list[LiFiBaseTool]: + """Get all LiFi skills.""" + available_skills = [] + + # Log configuration + logger.info(f"[LiFi_Skills] Initializing with config: {config}") + logger.info(f"[LiFi_Skills] Is private session: {is_private}") + + # Include skills based on their state + for skill_name, state in config["states"].items(): + if state == "disabled": + logger.info(f"[LiFi_Skills] Skipping disabled skill: {skill_name}") + continue + elif state == "public" or (state == "private" and is_private): + available_skills.append(skill_name) + logger.info(f"[LiFi_Skills] Including skill: {skill_name} (state: {state})") + else: + logger.info( + f"[LiFi_Skills] Skipping private skill in public session: {skill_name}" + ) + + logger.info(f"[LiFi_Skills] Available skills: {available_skills}") + + # Get each skill using the cached getter + skills = [] + for name in available_skills: + try: + skill = get_lifi_skill(name, store, config) + skills.append(skill) + logger.info(f"[LiFi_Skills] Successfully loaded skill: {name}") + except Exception as e: + logger.error(f"[LiFi_Skills] Failed to load skill {name}: {str(e)}") + # Continue loading other skills even if one fails + + logger.info(f"[LiFi_Skills] Total skills loaded: {len(skills)}") + return skills + + +def get_lifi_skill( + name: str, + store: SkillStoreABC, + config: Config, +) -> LiFiBaseTool: + """Get a LiFi skill by name.""" + # Create a cache key that includes configuration to ensure skills + # with different configurations are treated as separate instances + cache_key = f"{name}_{id(config)}" + + # Extract configuration options with proper defaults + default_slippage = config.get("default_slippage", 0.03) + allowed_chains = config.get("allowed_chains", None) + max_execution_time = config.get("max_execution_time", 300) + + # Validate configuration + if default_slippage < 0.001 or default_slippage > 0.5: + logger.warning( + f"[LiFi_Skills] Invalid default_slippage: {default_slippage}, using 0.03" + ) + default_slippage = 0.03 + + if max_execution_time < 60 or max_execution_time > 1800: + logger.warning( + f"[LiFi_Skills] Invalid max_execution_time: {max_execution_time}, using 300" + ) + max_execution_time = 300 + + if name == "token_quote": + if cache_key not in _cache: + logger.info( + f"[LiFi_Skills] Initializing token_quote skill with slippage: {default_slippage}" + ) + if allowed_chains: + logger.info(f"[LiFi_Skills] Allowed chains: {allowed_chains}") + + _cache[cache_key] = TokenQuote( + skill_store=store, + default_slippage=default_slippage, + allowed_chains=allowed_chains, + ) + return _cache[cache_key] + + elif name == "token_execute": + if cache_key not in _cache: + logger.info("[LiFi_Skills] Initializing token_execute skill") + logger.info( + f"[LiFi_Skills] Configuration - slippage: {default_slippage}, max_time: {max_execution_time}" + ) + if allowed_chains: + logger.info(f"[LiFi_Skills] Allowed chains: {allowed_chains}") + + # Log a warning about CDP wallet requirements + logger.warning( + "[LiFi_Skills] token_execute requires a properly configured CDP wallet with sufficient funds" + ) + + _cache[cache_key] = TokenExecute( + skill_store=store, + default_slippage=default_slippage, + allowed_chains=allowed_chains, + max_execution_time=max_execution_time, + ) + return _cache[cache_key] + + else: + logger.error(f"[LiFi_Skills] Unknown LiFi skill requested: {name}") + raise ValueError(f"Unknown LiFi skill: {name}") diff --git a/intentkit/skills/lifi/base.py b/intentkit/skills/lifi/base.py new file mode 100644 index 00000000..bc56cdcd --- /dev/null +++ b/intentkit/skills/lifi/base.py @@ -0,0 +1,21 @@ +from typing import Type + +from pydantic import BaseModel, Field + +from intentkit.abstracts.skill import SkillStoreABC +from intentkit.skills.base import IntentKitSkill + + +class LiFiBaseTool(IntentKitSkill): + """Base class for LiFi tools.""" + + name: str = Field(description="The name of the tool") + description: str = Field(description="A description of what the tool does") + args_schema: Type[BaseModel] + skill_store: SkillStoreABC = Field( + description="The skill store for persisting data" + ) + + @property + def category(self) -> str: + return "lifi" diff --git a/intentkit/skills/lifi/lifi.png b/intentkit/skills/lifi/lifi.png new file mode 100644 index 00000000..90a84191 Binary files /dev/null and b/intentkit/skills/lifi/lifi.png differ diff --git a/intentkit/skills/lifi/schema.json b/intentkit/skills/lifi/schema.json new file mode 100644 index 00000000..92da0f41 --- /dev/null +++ b/intentkit/skills/lifi/schema.json @@ -0,0 +1,89 @@ +{ + "$schema": "http://json-schema.org/draft-07/schema#", + "type": "object", + "title": "LiFi Token Transfer", + "description": "Cross-chain token transfer and swap capabilities using the LiFi protocol", + "x-icon": "https://ai.service.crestal.dev/skills/lifi/lifi.png", + "x-tags": [ + "DeFi", + "Blockchain", + "Token Transfer", + "Cross-chain" + ], + "properties": { + "enabled": { + "type": "boolean", + "title": "Enabled", + "description": "Whether this skill is enabled", + "default": false + }, + "states": { + "type": "object", + "properties": { + "token_quote": { + "type": "string", + "title": "Token Quote", + "enum": [ + "disabled", + "public", + "private" + ], + "x-enum-title": [ + "Disabled", + "Agent Owner + All Users", + "Agent Owner Only" + ], + "default": "public", + "description": "Get token transfer quotes without executing transactions" + }, + "token_execute": { + "type": "string", + "title": "Token Execute", + "enum": [ + "disabled", + "public", + "private" + ], + "x-enum-title": [ + "Disabled", + "Agent Owner + All Users", + "Agent Owner Only" + ], + "default": "private", + "description": "Execute token transfers (requires CDP wallet and cdp skills enabled)" + } + }, + "required": ["token_quote", "token_execute"], + "description": "States for each LiFi skill" + }, + "default_slippage": { + "type": "number", + "title": "Default Slippage", + "description": "Default slippage tolerance for token transfers (e.g., 0.03 for 3%)", + "default": 0.03, + "minimum": 0.001, + "maximum": 0.5, + "x-step": 0.001 + }, + "allowed_chains": { + "type": "array", + "title": "Allowed Chains", + "description": "List of blockchain networks that can be used (if empty, all supported chains are allowed)", + "items": { + "type": "string", + "examples": ["ETH", "POL", "ARB", "OPT", "DAI"] + }, + "uniqueItems": true + }, + "max_execution_time": { + "type": "integer", + "title": "Maximum Execution Time", + "description": "Maximum time (in seconds) to wait for transaction confirmation for token_execute", + "default": 300, + "minimum": 60, + "maximum": 1800 + } + }, + "required": ["states", "enabled"], + "additionalProperties": true +} \ No newline at end of file diff --git a/intentkit/skills/lifi/token_execute.py b/intentkit/skills/lifi/token_execute.py new file mode 100644 index 00000000..85826877 --- /dev/null +++ b/intentkit/skills/lifi/token_execute.py @@ -0,0 +1,470 @@ +import asyncio +from typing import Any, Dict, List, Optional, Type + +import httpx +from pydantic import BaseModel, Field +from web3 import Web3 + +from intentkit.abstracts.skill import SkillStoreABC +from intentkit.clients import get_cdp_client +from intentkit.skills.lifi.base import LiFiBaseTool +from intentkit.skills.lifi.token_quote import TokenQuote +from intentkit.skills.lifi.utils import ( + ERC20_ABI, + LIFI_API_URL, + build_quote_params, + convert_chain_to_id, + create_erc20_approve_data, + format_amount, + format_transaction_result, + handle_api_response, + is_native_token, + prepare_transaction_params, + validate_inputs, +) + + +class TokenExecuteInput(BaseModel): + """Input for the TokenExecute skill.""" + + from_chain: str = Field( + description="The source chain (e.g., 'ETH', 'POL', 'ARB', 'DAI'). Can be chain ID or chain key." + ) + to_chain: str = Field( + description="The destination chain (e.g., 'ETH', 'POL', 'ARB', 'DAI'). Can be chain ID or chain key." + ) + from_token: str = Field( + description="The token to send (e.g., 'USDC', 'ETH', 'DAI'). Can be token address or symbol." + ) + to_token: str = Field( + description="The token to receive (e.g., 'USDC', 'ETH', 'DAI'). Can be token address or symbol." + ) + from_amount: str = Field( + description="The amount to send, including all decimals (e.g., '1000000' for 1 USDC with 6 decimals)." + ) + slippage: float = Field( + default=0.03, + description="Maximum acceptable slippage as a decimal (e.g., 0.03 for 3%). Default is 3%.", + ) + + +class TokenExecute(LiFiBaseTool): + """Tool for executing token transfers across chains using LiFi. + + This tool executes actual token transfers and swaps using the CDP wallet provider. + Requires a properly configured CDP wallet to work. + """ + + name: str = "lifi_token_execute" + description: str = ( + "Execute a token transfer across blockchains or swap tokens on the same chain.\n" + "This requires a CDP wallet with sufficient funds and proper network configuration.\n" + "Use token_quote first to check rates and fees before executing.\n" + "Supports all major chains like Ethereum, Polygon, Arbitrum, Optimism, Base, and more." + ) + args_schema: Type[BaseModel] = TokenExecuteInput + api_url: str = LIFI_API_URL + + # Configuration options + default_slippage: float = 0.03 + allowed_chains: Optional[List[str]] = None + max_execution_time: int = 300 + quote_tool: TokenQuote = Field(default=None, exclude=True) + + def __init__( + self, + skill_store: SkillStoreABC, + default_slippage: float = 0.03, + allowed_chains: Optional[List[str]] = None, + max_execution_time: int = 300, + ): + """Initialize the TokenExecute skill with configuration options.""" + super().__init__(skill_store=skill_store) + self.default_slippage = default_slippage + self.allowed_chains = allowed_chains + self.max_execution_time = max_execution_time + # Initialize quote tool if not set + if not self.quote_tool: + self.quote_tool = TokenQuote( + skill_store=skill_store, + default_slippage=default_slippage, + allowed_chains=allowed_chains, + ) + + def _format_quote_result(self, data: Dict[str, Any]) -> str: + """Format the quote result in a readable format.""" + # Use the same formatting as token_quote + return self.quote_tool._format_quote_result(data) + + async def _arun( + self, + from_chain: str, + to_chain: str, + from_token: str, + to_token: str, + from_amount: str, + slippage: float = None, + **kwargs, + ) -> str: + """Execute a token transfer.""" + try: + # Use provided slippage or default + if slippage is None: + slippage = self.default_slippage + + # Validate all inputs + validation_error = validate_inputs( + from_chain, + to_chain, + from_token, + to_token, + from_amount, + slippage, + self.allowed_chains, + ) + if validation_error: + return validation_error + + # Get agent context for CDP wallet + context = self.get_context() + agent_id = context.agent_id + + self.logger.info( + f"Executing LiFi transfer: {from_amount} {from_token} on {from_chain} -> {to_token} on {to_chain}" + ) + + # Get CDP wallet provider + cdp_wallet_provider = await self._get_cdp_wallet_provider(agent_id) + if isinstance(cdp_wallet_provider, str): # Error message + return cdp_wallet_provider + + # Get wallet address + from_address = cdp_wallet_provider.get_address() + if not from_address: + return "No wallet address available. Please check your CDP wallet configuration." + + # Get quote and execute transfer + async with httpx.AsyncClient() as client: + # Step 1: Get quote + quote_data = await self._get_quote( + client, + from_chain, + to_chain, + from_token, + to_token, + from_amount, + slippage, + from_address, + ) + if isinstance(quote_data, str): # Error message + return quote_data + + # Step 2: Handle token approval if needed + approval_result = await self._handle_token_approval( + cdp_wallet_provider, quote_data + ) + if approval_result: + self.logger.info(f"Token approval completed: {approval_result}") + + # Step 3: Execute transaction + tx_hash = await self._execute_transfer_transaction( + cdp_wallet_provider, quote_data + ) + + # Step 4: Monitor status and return result + return await self._finalize_transfer( + client, tx_hash, from_chain, to_chain, quote_data + ) + + except Exception as e: + self.logger.error("LiFi_Error: %s", str(e)) + return f"An unexpected error occurred: {str(e)}" + + async def _get_cdp_wallet_provider(self, agent_id: str): + """Get CDP wallet provider with error handling.""" + try: + cdp_client = await get_cdp_client(agent_id, self.skill_store) + if not cdp_client: + return "CDP client not available. Please ensure your agent has CDP wallet configuration." + + cdp_wallet_provider = await cdp_client.get_wallet_provider() + if not cdp_wallet_provider: + return "CDP wallet provider not configured. Please set up your agent's CDP wallet first." + + return cdp_wallet_provider + + except Exception as e: + self.logger.error("LiFi_CDP_Error: %s", str(e)) + return f"Cannot access CDP wallet: {str(e)}\n\nPlease ensure your agent has a properly configured CDP wallet with sufficient funds." + + async def _get_quote( + self, + client: httpx.AsyncClient, + from_chain: str, + to_chain: str, + from_token: str, + to_token: str, + from_amount: str, + slippage: float, + from_address: str, + ) -> Dict[str, Any]: + """Get quote from LiFi API.""" + api_params = build_quote_params( + from_chain, + to_chain, + from_token, + to_token, + from_amount, + slippage, + from_address, + ) + + try: + response = await client.get( + f"{self.api_url}/quote", + params=api_params, + timeout=30.0, + ) + except httpx.TimeoutException: + return "Request timed out. The LiFi service might be temporarily unavailable. Please try again." + except httpx.ConnectError: + return "Connection error. Unable to reach LiFi service. Please check your internet connection." + except Exception as e: + self.logger.error("LiFi_API_Error: %s", str(e)) + return f"Error making API request: {str(e)}" + + # Handle response + data, error = handle_api_response( + response, from_token, from_chain, to_token, to_chain + ) + if error: + self.logger.error("LiFi_API_Error: %s", error) + return error + + # Validate transaction request + transaction_request = data.get("transactionRequest") + if not transaction_request: + return "No transaction request found in the quote. Cannot execute transfer." + + return data + + async def _handle_token_approval( + self, wallet_provider, quote_data: Dict[str, Any] + ) -> Optional[str]: + """Handle ERC20 token approval if needed.""" + estimate = quote_data.get("estimate", {}) + approval_address = estimate.get("approvalAddress") + from_token_info = quote_data.get("action", {}).get("fromToken", {}) + from_token_address = from_token_info.get("address", "") + from_amount = quote_data.get("action", {}).get("fromAmount", "0") + + # Skip approval for native tokens + if is_native_token(from_token_address) or not approval_address: + return None + + self.logger.info("Checking token approval for ERC20 transfer...") + + try: + return await self._check_and_set_allowance( + wallet_provider, from_token_address, approval_address, from_amount + ) + except Exception as e: + self.logger.error("LiFi_Token_Approval_Error: %s", str(e)) + raise Exception(f"Failed to approve token: {str(e)}") + + async def _execute_transfer_transaction( + self, wallet_provider, quote_data: Dict[str, Any] + ) -> str: + """Execute the main transfer transaction.""" + transaction_request = quote_data.get("transactionRequest") + + try: + tx_params = prepare_transaction_params(transaction_request) + self.logger.info( + f"Sending transaction to {tx_params['to']} with value {tx_params['value']}" + ) + + # Send transaction + tx_hash = wallet_provider.send_transaction(tx_params) + + # Wait for confirmation + receipt = wallet_provider.wait_for_transaction_receipt(tx_hash) + if not receipt or receipt.get("status") == 0: + raise Exception(f"Transaction failed: {tx_hash}") + + return tx_hash + + except Exception as e: + self.logger.error("LiFi_Execution_Error: %s", str(e)) + raise Exception(f"Failed to execute transaction: {str(e)}") + + async def _finalize_transfer( + self, + client: httpx.AsyncClient, + tx_hash: str, + from_chain: str, + to_chain: str, + quote_data: Dict[str, Any], + ) -> str: + """Finalize transfer and return formatted result.""" + self.logger.info(f"Transaction sent: {tx_hash}") + + # Get chain ID for explorer URL + from_chain_id = convert_chain_to_id(from_chain) + + # Extract token info for result formatting + action = quote_data.get("action", {}) + from_token_info = action.get("fromToken", {}) + to_token_info = action.get("toToken", {}) + + token_info = { + "symbol": f"{from_token_info.get('symbol', 'Unknown')} → {to_token_info.get('symbol', 'Unknown')}", + "amount": format_amount( + action.get("fromAmount", "0"), from_token_info.get("decimals", 18) + ), + } + + # Format transaction result with explorer URL + transaction_result = format_transaction_result( + tx_hash, from_chain_id, token_info + ) + + # Format quote details + formatted_quote = self._format_quote_result(quote_data) + + # Handle cross-chain vs same-chain transfers + if from_chain.lower() != to_chain.lower(): + self.logger.info("Monitoring cross-chain transfer status...") + status_result = await self._monitor_transfer_status( + client, tx_hash, from_chain, to_chain + ) + + return f"""**Token Transfer Executed Successfully** + +{transaction_result} +{status_result} + +{formatted_quote} +""" + else: + return f"""**Token Swap Executed Successfully** + +{transaction_result} +**Status:** Completed (same-chain swap) + +{formatted_quote} +""" + + async def _monitor_transfer_status( + self, client: httpx.AsyncClient, tx_hash: str, from_chain: str, to_chain: str + ) -> str: + """Monitor the status of a cross-chain transfer.""" + max_attempts = min(self.max_execution_time // 10, 30) # Check every 10 seconds + attempt = 0 + + while attempt < max_attempts: + try: + status_response = await client.get( + f"{self.api_url}/status", + params={ + "txHash": tx_hash, + "fromChain": from_chain, + "toChain": to_chain, + }, + timeout=10.0, + ) + + if status_response.status_code == 200: + status_data = status_response.json() + status = status_data.get("status", "UNKNOWN") + + if status == "DONE": + receiving_tx = status_data.get("receiving", {}).get("txHash") + if receiving_tx: + return ( + f"**Status:** Complete (destination tx: {receiving_tx})" + ) + else: + return "**Status:** Complete" + elif status == "FAILED": + return "**Status:** Failed" + elif status in ["PENDING", "NOT_FOUND"]: + # Continue monitoring + pass + else: + return f"**Status:** {status}" + + except Exception as e: + self.logger.warning( + f"Status check failed (attempt {attempt + 1}): {str(e)}" + ) + + attempt += 1 + if attempt < max_attempts: + await asyncio.sleep(10) # Wait 10 seconds before next check + + return "**Status:** Processing (monitoring timed out, but transfer may still complete)" + + async def _check_and_set_allowance( + self, + wallet_provider, + token_address: str, + approval_address: str, + amount: str, + ) -> Optional[str]: + """Check if token allowance is sufficient and set approval if needed.""" + try: + # Normalize addresses + token_address = Web3.to_checksum_address(token_address) + approval_address = Web3.to_checksum_address(approval_address) + wallet_address = wallet_provider.get_address() + + # Check current allowance + try: + current_allowance = wallet_provider.read_contract( + contract_address=token_address, + abi=ERC20_ABI, + function_name="allowance", + args=[wallet_address, approval_address], + ) + + required_amount = int(amount) + + if current_allowance >= required_amount: + self.logger.info( + f"Sufficient allowance already exists: {current_allowance}" + ) + return None # No approval needed + + except Exception as e: + self.logger.warning(f"Could not check current allowance: {str(e)}") + # Continue with approval anyway + + # Set approval for the required amount + self.logger.info( + f"Setting token approval for {amount} tokens to {approval_address}" + ) + + # Create approval transaction + approve_data = create_erc20_approve_data(approval_address, amount) + + # Send approval transaction + approval_tx_hash = wallet_provider.send_transaction( + { + "to": token_address, + "data": approve_data, + "value": 0, + } + ) + + # Wait for approval transaction confirmation + receipt = wallet_provider.wait_for_transaction_receipt(approval_tx_hash) + + if not receipt or receipt.get("status") == 0: + raise Exception(f"Approval transaction failed: {approval_tx_hash}") + + return approval_tx_hash + + except Exception as e: + self.logger.error(f"Token approval failed: {str(e)}") + raise Exception(f"Failed to approve token transfer: {str(e)}") diff --git a/intentkit/skills/lifi/token_quote.py b/intentkit/skills/lifi/token_quote.py new file mode 100644 index 00000000..1c6be1a4 --- /dev/null +++ b/intentkit/skills/lifi/token_quote.py @@ -0,0 +1,188 @@ +from typing import Any, Dict, List, Optional, Type + +import httpx +from pydantic import BaseModel, Field + +from intentkit.abstracts.skill import SkillStoreABC +from intentkit.skills.lifi.base import LiFiBaseTool +from intentkit.skills.lifi.utils import ( + LIFI_API_URL, + build_quote_params, + format_duration, + format_fees_and_gas, + format_quote_basic_info, + format_route_info, + handle_api_response, + validate_inputs, +) + + +class TokenQuoteInput(BaseModel): + """Input for the TokenQuote skill.""" + + from_chain: str = Field( + description="The source chain (e.g., 'ETH', 'POL', 'ARB', 'DAI'). Can be chain ID or chain key." + ) + to_chain: str = Field( + description="The destination chain (e.g., 'ETH', 'POL', 'ARB', 'DAI'). Can be chain ID or chain key." + ) + from_token: str = Field( + description="The token to send (e.g., 'USDC', 'ETH', 'DAI'). Can be token address or symbol." + ) + to_token: str = Field( + description="The token to receive (e.g., 'USDC', 'ETH', 'DAI'). Can be token address or symbol." + ) + from_amount: str = Field( + description="The amount to send, including all decimals (e.g., '1000000' for 1 USDC with 6 decimals)." + ) + slippage: float = Field( + default=0.03, + description="The maximum allowed slippage for the transaction (0.03 represents 3%).", + ) + + +class TokenQuote(LiFiBaseTool): + """Tool for getting token transfer quotes across chains using LiFi. + + This tool provides quotes for token transfers and swaps without executing transactions. + """ + + name: str = "lifi_token_quote" + description: str = ( + "Get a quote for transferring tokens across blockchains or swapping tokens.\n" + "Use this tool to check rates, fees, and estimated time for token transfers without executing them.\n" + "Supports all major chains like Ethereum, Polygon, Arbitrum, Optimism, Base, and more." + ) + args_schema: Type[BaseModel] = TokenQuoteInput + api_url: str = LIFI_API_URL + + # Configuration options + default_slippage: float = 0.03 + allowed_chains: Optional[List[str]] = None + + def __init__( + self, + skill_store: SkillStoreABC, + default_slippage: float = 0.03, + allowed_chains: Optional[List[str]] = None, + ): + """Initialize the TokenQuote skill with configuration options.""" + super().__init__(skill_store=skill_store) + self.default_slippage = default_slippage + self.allowed_chains = allowed_chains + + async def _arun( + self, + from_chain: str, + to_chain: str, + from_token: str, + to_token: str, + from_amount: str, + slippage: float = None, + **kwargs, + ) -> str: + """Get a quote for token transfer.""" + try: + # Use provided slippage or default + if slippage is None: + slippage = self.default_slippage + + # Validate all inputs + validation_error = validate_inputs( + from_chain, + to_chain, + from_token, + to_token, + from_amount, + slippage, + self.allowed_chains, + ) + if validation_error: + return validation_error + + self.logger.info( + f"Requesting LiFi quote: {from_amount} {from_token} on {from_chain} -> {to_token} on {to_chain}" + ) + + # Build API parameters + api_params = build_quote_params( + from_chain, to_chain, from_token, to_token, from_amount, slippage + ) + + # Make API request + async with httpx.AsyncClient() as client: + try: + response = await client.get( + f"{self.api_url}/quote", + params=api_params, + timeout=30.0, + ) + except httpx.TimeoutException: + return "Request timed out. The LiFi service might be temporarily unavailable. Please try again." + except httpx.ConnectError: + return "Connection error. Unable to reach LiFi service. Please check your internet connection." + except Exception as e: + self.logger.error("LiFi_API_Error: %s", str(e)) + return f"Error making API request: {str(e)}" + + # Handle response + data, error = handle_api_response( + response, from_token, from_chain, to_token, to_chain + ) + if error: + self.logger.error("LiFi_API_Error: %s", error) + return error + + # Format the quote result + return self._format_quote_result(data) + + except Exception as e: + self.logger.error("LiFi_Error: %s", str(e)) + return f"An unexpected error occurred: {str(e)}" + + def _format_quote_result(self, data: Dict[str, Any]) -> str: + """Format quote result into human-readable text.""" + try: + # Get basic info + info = format_quote_basic_info(data) + + # Build result string + result = "### Token Transfer Quote\n\n" + result += f"**From:** {info['from_amount']} {info['from_token']} on {info['from_chain']}\n" + result += f"**To:** {info['to_amount']} {info['to_token']} on {info['to_chain']}\n" + result += ( + f"**Minimum Received:** {info['to_amount_min']} {info['to_token']}\n" + ) + result += f"**Bridge/Exchange:** {info['tool']}\n\n" + + # Add USD values if available + if info["from_amount_usd"] and info["to_amount_usd"]: + result += f"**Value:** ${info['from_amount_usd']} → ${info['to_amount_usd']}\n\n" + + # Add execution time estimate + if info["execution_duration"]: + time_str = format_duration(info["execution_duration"]) + result += f"**Estimated Time:** {time_str}\n\n" + + # Add fees and gas costs + fees_text, gas_text = format_fees_and_gas(data) + if fees_text: + result += fees_text + "\n" + if gas_text: + result += gas_text + "\n" + + # Add route information + route_text = format_route_info(data) + if route_text: + result += route_text + "\n" + + result += "---\n" + result += ( + "*Use token_execute to perform this transfer with your CDP wallet*" + ) + + return result + + except Exception as e: + self.logger.error("Format_Error: %s", str(e)) + return f"Quote received but formatting failed: {str(e)}\nRaw data: {str(data)[:500]}..." diff --git a/intentkit/skills/lifi/utils.py b/intentkit/skills/lifi/utils.py new file mode 100644 index 00000000..9005dfd1 --- /dev/null +++ b/intentkit/skills/lifi/utils.py @@ -0,0 +1,656 @@ +""" +LiFi Skills Utilities + +Common utilities and helper functions for LiFi token transfer skills. +""" + +from typing import Any, Dict, List, Optional, Tuple + +import httpx +from web3 import Web3 + +# Constants +LIFI_API_URL = "https://li.quest/v1" +DUMMY_ADDRESS = "0x552008c0f6870c2f77e5cC1d2eb9bdff03e30Ea0" # For quotes + +# Chain ID to name mapping (includes mainnet and testnet) +CHAIN_NAMES = { + # Mainnet chains + 1: "Ethereum", + 10: "Optimism", + 56: "BNB Chain", + 100: "Gnosis Chain", + 137: "Polygon", + 250: "Fantom", + 8453: "Base", + 42161: "Arbitrum One", + 43114: "Avalanche", + 59144: "Linea", + 324: "zkSync Era", + 1101: "Polygon zkEVM", + 534352: "Scroll", + # Testnet chains + 11155111: "Ethereum Sepolia", + 84532: "Base Sepolia", + 421614: "Arbitrum Sepolia", + 11155420: "Optimism Sepolia", + 80001: "Polygon Mumbai", + 5: "Ethereum Goerli", # Legacy testnet + 420: "Optimism Goerli", # Legacy testnet +} + +# Standard ERC20 ABI for allowance and approve functions +ERC20_ABI = [ + { + "constant": True, + "inputs": [ + {"name": "_owner", "type": "address"}, + {"name": "_spender", "type": "address"}, + ], + "name": "allowance", + "outputs": [{"name": "", "type": "uint256"}], + "type": "function", + }, + { + "constant": False, + "inputs": [ + {"name": "_spender", "type": "address"}, + {"name": "_value", "type": "uint256"}, + ], + "name": "approve", + "outputs": [{"name": "", "type": "bool"}], + "type": "function", + }, +] + + +def validate_inputs( + from_chain: str, + to_chain: str, + from_token: str, + to_token: str, + from_amount: str, + slippage: float, + allowed_chains: Optional[List[str]] = None, +) -> Optional[str]: + """ + Validate all input parameters for LiFi operations. + + Returns: + None if valid, error message string if invalid + """ + # Validate slippage + if slippage < 0.001 or slippage > 0.5: + return "Invalid slippage: must be between 0.001 (0.1%) and 0.5 (50%)" + + # Validate chain identifiers can be converted to chain IDs + try: + convert_chain_to_id(from_chain) + except ValueError as e: + return f"Invalid source chain: {str(e)}" + + try: + convert_chain_to_id(to_chain) + except ValueError as e: + return f"Invalid destination chain: {str(e)}" + + # Validate chains if restricted (use original chain names for restriction check) + if allowed_chains: + if from_chain not in allowed_chains: + return f"Source chain '{from_chain}' is not allowed. Allowed chains: {', '.join(allowed_chains)}" + if to_chain not in allowed_chains: + return f"Destination chain '{to_chain}' is not allowed. Allowed chains: {', '.join(allowed_chains)}" + + # Validate amount is numeric and positive + try: + amount_float = float(from_amount) + if amount_float <= 0: + return "Amount must be greater than 0" + except ValueError: + return f"Invalid amount format: {from_amount}. Must be a numeric value." + + return None + + +def format_amount(amount: str, decimals: int) -> str: + """ + Format amount from wei/smallest unit to human readable. + + Args: + amount: Amount in smallest unit (wei/satoshi/etc) + decimals: Number of decimal places for the token + + Returns: + Formatted amount string + """ + try: + amount_int = int(amount) + amount_float = amount_int / (10**decimals) + + # Format with appropriate precision + if amount_float >= 1000: + return f"{amount_float:,.2f}" + elif amount_float >= 1: + return f"{amount_float:.4f}" + elif amount_float >= 0.01: + return f"{amount_float:.6f}" + else: + return f"{amount_float:.8f}" + except (ValueError, TypeError): + return str(amount) + + +def get_chain_name(chain_id: int) -> str: + """ + Get human readable chain name from chain ID. + + Args: + chain_id: Blockchain chain ID + + Returns: + Human readable chain name + """ + return CHAIN_NAMES.get(chain_id, f"Chain {chain_id}") + + +def format_duration(duration: int) -> str: + """ + Format duration in seconds to human readable format. + + Args: + duration: Duration in seconds + + Returns: + Formatted duration string + """ + if duration < 60: + return f"{duration} seconds" + elif duration < 3600: + return f"{duration // 60} minutes {duration % 60} seconds" + else: + hours = duration // 3600 + minutes = (duration % 3600) // 60 + return f"{hours} hours {minutes} minutes" + + +def handle_api_response( + response: httpx.Response, + from_token: str, + from_chain: str, + to_token: str, + to_chain: str, +) -> Tuple[Optional[Dict], Optional[str]]: + """ + Handle LiFi API response and return data or error message. + + Args: + response: HTTP response from LiFi API + from_token, from_chain, to_token, to_chain: Transfer parameters for error messages + + Returns: + Tuple of (data, error_message). One will be None. + """ + if response.status_code == 400: + try: + error_data = response.json() + error_message = error_data.get("message", response.text) + return None, f"Invalid request: {error_message}" + except (ValueError, TypeError, AttributeError): + return None, f"Invalid request: {response.text}" + elif response.status_code == 404: + return ( + None, + f"No route found for transfer from {from_token} on {from_chain} to {to_token} on {to_chain}. Try different tokens or chains.", + ) + elif response.status_code != 200: + return None, f"LiFi API error ({response.status_code}): {response.text}" + + try: + data = response.json() + if not isinstance(data, dict): + return None, "Invalid response format from LiFi API." + return data, None + except Exception: + return None, "Invalid response from LiFi API. Please try again." + + +def convert_chain_to_id(chain: str) -> int: + """ + Convert chain identifier to numeric chain ID. + + Args: + chain: Chain identifier (can be name, key, or numeric ID as string) + + Returns: + Numeric chain ID + + Raises: + ValueError: If chain identifier is not recognized + """ + # If it's already a number, return it + if chain.isdigit(): + return int(chain) + + # Chain name/key to ID mapping + chain_mapping = { + # Mainnet chains + "ethereum": 1, + "eth": 1, + "1": 1, + "optimism": 10, + "opt": 10, + "10": 10, + "binance": 56, + "bsc": 56, + "bnb": 56, + "56": 56, + "gnosis": 100, + "100": 100, + "polygon": 137, + "pol": 137, + "matic": 137, + "137": 137, + "fantom": 250, + "ftm": 250, + "250": 250, + "base": 8453, + "base-mainnet": 8453, + "8453": 8453, + "arbitrum": 42161, + "arb": 42161, + "42161": 42161, + "avalanche": 43114, + "avax": 43114, + "43114": 43114, + "linea": 59144, + "59144": 59144, + "zksync": 324, + "324": 324, + "polygon-zkevm": 1101, + "1101": 1101, + "scroll": 534352, + "534352": 534352, + # Testnet chains + "ethereum-sepolia": 11155111, + "sepolia": 11155111, + "11155111": 11155111, + "base-sepolia": 84532, + "84532": 84532, + "arbitrum-sepolia": 421614, + "421614": 421614, + "optimism-sepolia": 11155420, + "11155420": 11155420, + "polygon-mumbai": 80001, + "mumbai": 80001, + "80001": 80001, + } + + chain_lower = chain.lower() + if chain_lower in chain_mapping: + return chain_mapping[chain_lower] + + raise ValueError(f"Unsupported chain identifier: {chain}") + + +def convert_amount_to_wei(amount: str, token_symbol: str = "ETH") -> str: + """ + Convert human-readable amount to wei format for LiFi API. + + Args: + amount: Amount in human readable format (e.g., "0.0015") + token_symbol: Token symbol to determine decimals + + Returns: + Amount in wei format as string + """ + # Default decimals for common tokens + token_decimals = { + "ETH": 18, + "USDC": 6, + "USDT": 6, + "DAI": 18, + "WETH": 18, + "MATIC": 18, + "BNB": 18, + "AVAX": 18, + } + + decimals = token_decimals.get(token_symbol.upper(), 18) + + try: + # Convert string to float, then to wei + amount_float = float(amount) + amount_wei = int(amount_float * (10**decimals)) + return str(amount_wei) + except (ValueError, TypeError): + # If conversion fails, return original amount + return amount + + +def build_quote_params( + from_chain: str, + to_chain: str, + from_token: str, + to_token: str, + from_amount: str, + slippage: float, + from_address: Optional[str] = None, +) -> Dict[str, Any]: + """ + Build parameters for LiFi quote API request. + + Args: + from_chain, to_chain, from_token, to_token, from_amount: Transfer parameters + slippage: Slippage tolerance + from_address: Wallet address (uses dummy if None) + + Returns: + Dictionary of API parameters + + Raises: + ValueError: If chain identifiers are not recognized + """ + # Convert amount to wei format for API + wei_amount = convert_amount_to_wei(from_amount, from_token) + + return { + "fromChain": convert_chain_to_id(from_chain), + "toChain": convert_chain_to_id(to_chain), + "fromToken": from_token, + "toToken": to_token, + "fromAmount": wei_amount, + "fromAddress": from_address or DUMMY_ADDRESS, + "slippage": slippage, + } + + +def is_native_token(token_address: str) -> bool: + """ + Check if token address represents a native token (ETH, MATIC, etc). + + Args: + token_address: Token contract address + + Returns: + True if native token, False if ERC20 + """ + return ( + token_address == "0x0000000000000000000000000000000000000000" + or token_address == "" + or token_address.lower() == "0x0" + ) + + +def prepare_transaction_params(transaction_request: Dict[str, Any]) -> Dict[str, Any]: + """ + Prepare transaction parameters for CDP wallet provider. + + Args: + transaction_request: Transaction request from LiFi API + + Returns: + Formatted transaction parameters + + Raises: + Exception: If required parameters are missing + """ + to_address = transaction_request.get("to") + value = transaction_request.get("value", "0") + data = transaction_request.get("data", "0x") + + if not to_address: + raise Exception("No destination address in transaction request") + + # Convert value to integer if it's a string + if isinstance(value, str): + value = int(value, 16) if value.startswith("0x") else int(value) + + return { + "to": Web3.to_checksum_address(to_address), + "value": value, + "data": data, + } + + +def format_quote_basic_info(data: Dict[str, Any]) -> Dict[str, Any]: + """ + Extract and format basic quote information. + + Args: + data: Quote response from LiFi API + + Returns: + Dictionary with formatted basic info + """ + action = data.get("action", {}) + estimate = data.get("estimate", {}) + + from_token_info = action.get("fromToken", {}) + to_token_info = action.get("toToken", {}) + + from_amount = action.get("fromAmount", "0") + to_amount = estimate.get("toAmount", "0") + to_amount_min = estimate.get("toAmountMin", "0") + + from_token_decimals = from_token_info.get("decimals", 18) + to_token_decimals = to_token_info.get("decimals", 18) + + return { + "from_token": from_token_info.get("symbol", "Unknown"), + "to_token": to_token_info.get("symbol", "Unknown"), + "from_chain": get_chain_name(action.get("fromChainId")), + "to_chain": get_chain_name(action.get("toChainId")), + "from_amount": format_amount(from_amount, from_token_decimals), + "to_amount": format_amount(to_amount, to_token_decimals), + "to_amount_min": format_amount(to_amount_min, to_token_decimals), + "tool": data.get("tool", "Unknown"), + "from_amount_usd": estimate.get("fromAmountUSD"), + "to_amount_usd": estimate.get("toAmountUSD"), + "execution_duration": estimate.get("executionDuration"), + } + + +def format_fees_and_gas(data: Dict[str, Any]) -> Tuple[str, str]: + """ + Format fee and gas cost information from quote data. + + Args: + data: Quote response from LiFi API + + Returns: + Tuple of (fees_text, gas_text) + """ + estimate = data.get("estimate", {}) + + # Extract gas and fee costs + gas_costs = estimate.get("gasCosts", []) + fee_costs = [] + + # Collect fee information from included steps + for step in data.get("includedSteps", []): + step_fees = step.get("estimate", {}).get("feeCosts", []) + if step_fees: + fee_costs.extend(step_fees) + + # Format fees + fees_text = "" + if fee_costs: + fees_text = "**Fees:**\n" + total_fee_usd = 0 + for fee in fee_costs: + fee_name = fee.get("name", "Unknown fee") + fee_amount = fee.get("amount", "0") + fee_token = fee.get("token", {}).get("symbol", "") + fee_decimals = fee.get("token", {}).get("decimals", 18) + fee_percentage = fee.get("percentage", "0") + fee_usd = fee.get("amountUSD", "0") + + fee_amount_formatted = format_amount(fee_amount, fee_decimals) + percentage_str = ( + f" ({float(fee_percentage) * 100:.3f}%)" + if fee_percentage != "0" + else "" + ) + fees_text += ( + f"- {fee_name}: {fee_amount_formatted} {fee_token}{percentage_str}" + ) + + if fee_usd and float(fee_usd) > 0: + fees_text += f" (${fee_usd})" + total_fee_usd += float(fee_usd) + + fees_text += "\n" + + if total_fee_usd > 0: + fees_text += f"- **Total Fees:** ~${total_fee_usd:.4f}\n" + + # Format gas costs + gas_text = "" + if gas_costs: + gas_text = "**Gas Cost:**\n" + total_gas_usd = 0 + for gas in gas_costs: + gas_amount = gas.get("amount", "0") + gas_token = gas.get("token", {}).get("symbol", "ETH") + gas_decimals = gas.get("token", {}).get("decimals", 18) + gas_usd = gas.get("amountUSD", "0") + gas_type = gas.get("type", "SEND") + + gas_amount_formatted = format_amount(gas_amount, gas_decimals) + gas_text += f"- {gas_type}: {gas_amount_formatted} {gas_token}" + + if gas_usd and float(gas_usd) > 0: + gas_text += f" (${gas_usd})" + total_gas_usd += float(gas_usd) + + gas_text += "\n" + + if total_gas_usd > 0: + gas_text += f"- **Total Gas:** ~${total_gas_usd:.4f}\n" + + return fees_text, gas_text + + +def format_route_info(data: Dict[str, Any]) -> str: + """ + Format routing information from quote data. + + Args: + data: Quote response from LiFi API + + Returns: + Formatted route information text + """ + included_steps = data.get("includedSteps", []) + if len(included_steps) <= 1: + return "" + + route_text = "**Route:**\n" + for i, step in enumerate(included_steps, 1): + step_tool = step.get("tool", "Unknown") + step_type = step.get("type", "unknown") + route_text += f"{i}. {step_tool} ({step_type})\n" + + return route_text + + +def create_erc20_approve_data(spender_address: str, amount: str) -> str: + """ + Create encoded data for ERC20 approve function call. + + Args: + spender_address: Address to approve + amount: Amount to approve + + Returns: + Encoded function call data + """ + contract = Web3().eth.contract( + address=Web3.to_checksum_address("0x0000000000000000000000000000000000000000"), + abi=ERC20_ABI, + ) + return contract.encode_abi("approve", [spender_address, int(amount)]) + + +def get_api_error_message(response: httpx.Response) -> str: + """ + Extract error message from API response. + + Args: + response: HTTP response + + Returns: + Formatted error message + """ + try: + error_data = response.json() + return error_data.get("message", response.text) + except (ValueError, TypeError, AttributeError): + return response.text + + +def get_explorer_url(chain_id: int, tx_hash: str) -> str: + """ + Generate blockchain explorer URL for a transaction. + + Args: + chain_id: Blockchain chain ID + tx_hash: Transaction hash + + Returns: + Explorer URL for the transaction + """ + # Explorer URLs for different chains + explorers = { + 1: "https://etherscan.io/tx/", # Ethereum + 10: "https://optimistic.etherscan.io/tx/", # Optimism + 56: "https://bscscan.com/tx/", # BSC + 100: "https://gnosisscan.io/tx/", # Gnosis + 137: "https://polygonscan.com/tx/", # Polygon + 250: "https://ftmscan.com/tx/", # Fantom + 8453: "https://basescan.org/tx/", # Base + 42161: "https://arbiscan.io/tx/", # Arbitrum + 43114: "https://snowtrace.io/tx/", # Avalanche + 59144: "https://lineascan.build/tx/", # Linea + 324: "https://explorer.zksync.io/tx/", # zkSync Era + 1101: "https://zkevm.polygonscan.com/tx/", # Polygon zkEVM + 534352: "https://scrollscan.com/tx/", # Scroll + # Testnet explorers + 11155111: "https://sepolia.etherscan.io/tx/", # Ethereum Sepolia + 84532: "https://sepolia.basescan.org/tx/", # Base Sepolia + 421614: "https://sepolia.arbiscan.io/tx/", # Arbitrum Sepolia + 11155420: "https://sepolia-optimism.etherscan.io/tx/", # Optimism Sepolia + 80001: "https://mumbai.polygonscan.com/tx/", # Polygon Mumbai + } + + base_url = explorers.get(chain_id, "https://etherscan.io/tx/") + return f"{base_url}{tx_hash}" + + +def format_transaction_result( + tx_hash: str, chain_id: int, token_info: dict = None +) -> str: + """ + Format transaction result with explorer link. + + Args: + tx_hash: Transaction hash + chain_id: Chain ID where transaction was executed + token_info: Optional token information for context + + Returns: + Formatted transaction result message + """ + explorer_url = get_explorer_url(chain_id, tx_hash) + chain_name = get_chain_name(chain_id) + + result = "Transaction successful!\n" + result += f"Transaction Hash: {tx_hash}\n" + result += f"Network: {chain_name}\n" + result += f"Explorer: {explorer_url}\n" + + if token_info: + result += f"Token: {token_info.get('symbol', 'Unknown')}\n" + result += f"Amount: {token_info.get('amount', 'Unknown')}\n" + + return result diff --git a/intentkit/skills/moralis/README.md b/intentkit/skills/moralis/README.md new file mode 100644 index 00000000..208c99aa --- /dev/null +++ b/intentkit/skills/moralis/README.md @@ -0,0 +1,490 @@ +# Wallet Portfolio & Blockchain Analysis Skills + +## Overview + +The Wallet Portfolio & Blockchain Analysis Skills module provides comprehensive blockchain wallet analysis and transaction exploration capabilities across EVM-compatible chains (Ethereum, BSC, Polygon, etc.) and Solana. This module integrates with Moralis API to fetch wallet balances, transaction data, block information, NFT holdings, and more. + +## Features + +- Multi-chain portfolio analysis +- Token balances with USD values +- Transaction history with detailed metadata +- Transaction exploration and decoding +- Block data retrieval and analysis +- NFT holdings with metadata +- Solana-specific portfolio analysis +- Token approval monitoring + +## Setup + +1. Obtain a Moralis API key from [Moralis](https://moralis.io/) +2. Configure the module with your API key: + +```json +{ + "api_key": "YOUR_MORALIS_API_KEY", + "states": { + "fetch_wallet_portfolio": "public", + "fetch_chain_portfolio": "public", + "fetch_nft_portfolio": "public", + "fetch_transaction_history": "public", + "fetch_solana_portfolio": "public", + "fetch_transaction_by_hash": "public", + "fetch_latest_block": "public", + "fetch_block_by_hash_or_number": "public", + "fetch_block_by_date": "public" + }, + "supported_chains": { + "evm": true, + "solana": true + } +} +``` + +## Wallet Portfolio Skills + +### 1. Fetch Wallet Portfolio (`fetch_wallet_portfolio`) + +Provides a comprehensive overview of a wallet's holdings across multiple chains. + +#### Sample Prompts: + +``` +What's my portfolio value across all chains for address 0x742d35Cc6634C0532925a3b844Bc454e4438f44e? +``` + +``` +Show me the total value of my crypto holdings for 0x742d35Cc6634C0532925a3b844Bc454e4438f44e across Ethereum and BSC +``` + +#### Example Response: + +``` +I've analyzed the wallet portfolio for address 0x742d35Cc6634C0532925a3b844Bc454e4438f44e across multiple chains. + +Portfolio Summary: +- Total Net Worth: $12,567.82 +- Ethereum: $8,934.21 +- BSC: $2,456.78 +- Polygon: $1,176.83 + +Top Holdings: +1. ETH (Ethereum): 1.53 ETH ($5,823.45) +2. USDT (Ethereum): 2,500.00 USDT ($2,500.00) +3. BNB (BSC): 4.25 BNB ($1,870.25) +4. MATIC (Polygon): 1,100.00 MATIC ($880.00) +5. AAVE (Ethereum): 3.45 AAVE ($610.76) + +Would you like me to provide more details about any specific token or chain? +``` + +### 2. Fetch Chain Portfolio (`fetch_chain_portfolio`) + +Provides detailed information about a wallet's holdings on a specific blockchain. + +#### Sample Prompts: + +``` +What tokens do I have on Ethereum for address 0x742d35Cc6634C0532925a3b844Bc454e4438f44e? +``` + +``` +Show me my token approvals on BSC for wallet 0x742d35Cc6634C0532925a3b844Bc454e4438f44e +``` + +#### Example Response: + +``` +I've analyzed your holdings on Ethereum (Chain ID: 1) for address 0x742d35Cc6634C0532925a3b844Bc454e4438f44e. + +Portfolio on Ethereum: +- Total Value: $8,934.21 + +Native Token: +- ETH: 1.53 ETH ($5,823.45) + +ERC-20 Tokens: +1. USDT: 2,500.00 USDT ($2,500.00) +2. AAVE: 3.45 AAVE ($610.76) +3. LINK: 0.01 LINK ($0.25) + +Token Approvals: +1. USDT approved to Uniswap Router (0x7a250d5630B4cF539739dF2C5dAcb4c659F2488D) + - Unlimited approval (high risk) + +2. AAVE approved to AAVE lending pool (0x7d2768dE32b0b80b7a3454c06BdAc94A69DDc7A9) + - Amount: 10 AAVE +``` + +### 3. Fetch NFT Portfolio (`fetch_nft_portfolio`) + +Retrieves NFT holdings for a wallet address across different chains. + +#### Sample Prompts: + +``` +What NFTs do I own on address 0x742d35Cc6634C0532925a3b844Bc454e4438f44e? +``` + +``` +Show me my Solana NFTs for wallet 5YNmS1R9nNSCDzb5a7mMJ1dwK9uHeAAF4CYuH1Oss2nS +``` + +#### Example Response: + +``` +I've found the following NFTs for address 0x742d35Cc6634C0532925a3b844Bc454e4438f44e: + +Total NFTs: 7 (Ethereum: 4, Polygon: 3) + +Ethereum NFTs: +1. CryptoPunk #8291 + - Collection: CryptoPunks + - Token ID: 8291 + - Contract: 0xb47e3cd837dDF8e4c57F05d70Ab865de6e193BBB + - Traits: Mohawk, Earring + - Floor Price: 72.5 ETH + +2. Bored Ape Yacht Club #2534 + - Collection: Bored Ape Yacht Club + - Token ID: 2534 + - Contract: 0xBC4CA0EdA7647A8aB7C2061c2E118A18a936f13D + - Traits: Red Fur, 3D Glasses, Hat + - Floor Price: 38.2 ETH + +[2 more NFTs...] + +Polygon NFTs: +1. Sandbox Land (-12, 40) + - Collection: Sandbox + - Token ID: 43215 + - Contract: 0x5CC5B05a8A13E3fBDB0BB9FcCd98D38e50F90c38 + +[2 more NFTs...] +``` + +### 4. Fetch Transaction History (`fetch_transaction_history`) + +Retrieves detailed transaction history for a wallet address with enhanced analytics. + +#### Sample Prompts: + +``` +Show me the recent transactions for 0x742d35Cc6634C0532925a3b844Bc454e4438f44e +``` + +``` +What are my latest swaps on Ethereum for wallet 0x742d35Cc6634C0532925a3b844Bc454e4438f44e? +``` + +#### Example Response: + +``` +Here are the recent transactions for 0x742d35Cc6634C0532925a3b844Bc454e4438f44e on Ethereum: + +Transaction Statistics: +- Total Transactions: 156 +- Swaps: 42 +- Transfers: 87 +- Approvals: 12 +- Other: 15 + +Recent Activity: +1. Swap (2 hours ago) + - Hash: 0x3a5e...f781 + - Swapped 1,000 USDT for 0.25 ETH on Uniswap + - Fee: 0.005 ETH ($19.25) + - Function: swap(uint256,uint256,address[],address) + +2. Token Transfer (1 day ago) + - Hash: 0x8f72...d4e3 + - Sent 50 LINK to 0x91B...5a4c + - Fee: 0.002 ETH ($7.70) + +3. NFT Purchase (2 days ago) + - Hash: 0x6c9d...b2a1 + - Bought Bored Ape #2534 for 38.2 ETH + - Fee: 0.012 ETH ($46.20) + +4. Approval (3 days ago) + - Hash: 0x2d7f...a9b3 + - Approved USDT for Uniswap Router + - Fee: 0.003 ETH ($11.55) + +5. Deposit (5 days ago) + - Hash: 0x1e8c...f3d2 + - Received 2,000 USDT from Binance + - Fee: N/A (you were the recipient) + +Would you like to see more transactions or filter by a specific type? +``` + +### 5. Fetch Solana Portfolio (`fetch_solana_portfolio`) + +Provides detailed information about a Solana wallet's holdings. + +#### Sample Prompts: + +``` +What's in my Solana wallet 5YNmS1R9nNSCDzb5a7mMJ1dwK9uHeAAF4CYuH1Oss2nS? +``` + +``` +Show me my SOL and SPL tokens for address 5YNmS1R9nNSCDzb5a7mMJ1dwK9uHeAAF4CYuH1Oss2nS +``` + +#### Example Response: + +``` +I've analyzed your Solana wallet (5YNmS1R9nNSCDzb5a7mMJ1dwK9uHeAAF4CYuH1Oss2nS): + +Portfolio Summary: +- Total Value: $5,342.67 + +Native SOL: +- Balance: 25.8 SOL ($2,322.00) + +SPL Tokens: +1. USDC: 1,500.00 USDC ($1,500.00) +2. RAY: 120.5 RAY ($960.78) +3. SRM: 300 SRM ($450.00) +4. MNGO: 4,500 MNGO ($45.00) +5. FIDA: 250 FIDA ($62.50) +6. STEP: 100 STEP ($2.40) + +NFTs: 3 found (enable include_nfts parameter for details) + +Associated Token Addresses: +- USDC: CK8a3uXCLZxmQZ9r7Q9eMEU9UKinLKzQi9VWNtmG9rB +- RAY: 7pbMGuVdG4rQt7QUjSYWQFEASVcS9eJtt7LUUVfZPsDR +[more addresses...] +``` + +## Blockchain Data Skills + +### 6. Fetch Transaction By Hash (`fetch_transaction_by_hash`) + +Retrieves detailed information about a specific transaction by its hash. + +#### Sample Prompts: + +``` +Show me details for transaction 0xfeda0e8f0d6e54112c28d319c0d303c065d1125c9197bd653682f5fcb0a6c81e +``` + +``` +What happened in this transaction: 0x1ed85b3757a6d31d01a4d6677fc52fd3911d649a0af21fe5ca3f886b153773ed? +``` + +#### Example Response: + +``` +Transaction 0x1ed85b3757a6d31d01a4d6677fc52fd3911d649a0af21fe5ca3f886b153773ed +Status: Success +Type: Transfer +From: 0x267be1c1d684f78cb4f6a176c4911b741e4ffdc0 (Binance 1) +To: 0x003dde3494f30d861d063232c6a8c04394b686ff (Binance 2) +Value: 0.115580 ETH +Block: 12386788 +Timestamp: 2021-05-07T11:08:35.000Z + +This transaction was a simple ETH transfer between two addresses. The transaction was successful and used 21,000 gas at a price of 52.5 Gwei, resulting in a fee of 0.0011025 ETH. + +The transaction occurred on the Ethereum mainnet and did not involve any smart contract interactions or token transfers. +``` + +### 7. Fetch Latest Block (`fetch_latest_block`) + +Retrieves the latest block number from a blockchain network. + +#### Sample Prompts: + +``` +What's the latest block on Ethereum? +``` + +``` +Show me the current block height for BSC +``` + +#### Example Response: + +``` +The latest block on Ethereum (Chain ID: 1) is 18243567. + +This block was mined approximately 12 seconds ago. +``` + +### 8. Fetch Block By Hash or Number (`fetch_block_by_hash_or_number`) + +Retrieves detailed information about a block by its hash or number. + +#### Sample Prompts: + +``` +Show me block 17000000 on Ethereum +``` + +``` +Get details for block 0x9b559aef7ea858608c2e554246fe4a24287e7aeeb976848df2b9a2531f4b9171 +``` + +#### Example Response: + +``` +Block #17000000 on Ethereum (Chain ID: 1) + +Block Details: +- Hash: 0x2241c2a0926e7c876af6c0bb355461fe5ef7a682fa0441125575fa5c5af5fe90 +- Timestamp: 2023-06-13T10:42:15.000Z +- Miner: 0xea674fdde714fd979de3edf0f56aa9716b898ec8 +- Gas Used: 29,892,458 (99.87% of gas limit) +- Size: 142,157 bytes +- Transactions: 318 + +This block contains 318 transactions and was mined by Ethermine (0xea674fdde714fd979de3edf0f56aa9716b898ec8). The total gas used was 29,892,458, which is 99.87% of the block's gas limit. +``` + +### 9. Fetch Block By Date (`fetch_block_by_date`) + +Retrieves block information based on a specific date. + +#### Sample Prompts: + +``` +What block was mined on June 15, 2023 on Ethereum? +``` + +``` +Show me the blockchain state on 2023-01-01 +``` + +#### Example Response: + +``` +On June 15, 2023, block #17034268 was mined on Ethereum (Chain ID: 1). + +Block Details: +- Hash: 0x3a33181831a3d5c23072378163e43179743f79f8921c2a07e23da3c953502742 +- Timestamp: 2023-06-15T00:00:12.000Z +- Miner: 0xea674fdde714fd979de3edf0f56aa9716b898ec8 +- Gas Used: 29,956,842 (99.86% of gas limit) +- Size: 143,965 bytes +- Transactions: 327 + +This block was mined by Ethermine (0xea674fdde714fd979de3edf0f56aa9716b898ec8) at the beginning of June 15, 2023. +``` + +## Advanced Usage + +### Combining Skills + +The wallet portfolio and blockchain data skills can be combined for comprehensive analysis. For example: + +``` +Can you analyze wallet 0x742d35Cc6634C0532925a3b844Bc454e4438f44e and show me: +1. Total portfolio value +2. NFT holdings +3. Recent transactions and decode the last swap transaction +``` + +### Response Example: + +``` +I've performed a comprehensive analysis of wallet 0x742d35Cc6634C0532925a3b844Bc454e4438f44e: + +PORTFOLIO VALUE: +Total Net Worth: $12,567.82 +- Ethereum: $8,934.21 +- BSC: $2,456.78 +- Polygon: $1,176.83 + +Top Holdings: +1. ETH (Ethereum): 1.53 ETH ($5,823.45) +2. USDT (Ethereum): 2,500.00 USDT ($2,500.00) +3. BNB (BSC): 4.25 BNB ($1,870.25) + +NFT HOLDINGS: +Total NFTs: 7 (Ethereum: 4, Polygon: 3) + +Notable NFTs: +1. CryptoPunk #8291 (Ethereum) + - Floor Price: 72.5 ETH +2. Bored Ape Yacht Club #2534 (Ethereum) + - Floor Price: 38.2 ETH +3. Sandbox Land (-12, 40) (Polygon) + +RECENT TRANSACTIONS: +Most recent swap transaction (3 hours ago): +- Hash: 0x3a5e...f781 +- Function: swapExactTokensForETH(uint256,uint256,address[],address,uint256) +- Parameters: + - amountIn: 1000000000 (uint256) - 1,000 USDT with 6 decimals + - amountOutMin: 249158750524475790 (uint256) - Minimum 0.249 ETH expected + - path: ["0xdAC17F958D2ee523a2206206994597C13D831ec7","0xC02aaA39b223FE8D0A0e5C4F27eAD9083C756Cc2"] - USDT to WETH path + - to: "0x742d35Cc6634C0532925a3b844Bc454e4438f44e" - Your address + - deadline: 1689542498 (uint256) - Transaction deadline timestamp + +The transaction successfully swapped 1,000 USDT for 0.25 ETH using Uniswap Router. + +Would you like me to focus on any specific aspect of this wallet? +``` + +### Transaction Analysis + +The transaction analysis skills can provide deep insights into on-chain activity: + +``` +Can you analyze this transaction 0xfeda0e8f0d6e54112c28d319c0d303c065d1125c9197bd653682f5fcb0a6c81e and explain what happened in simple terms? +``` + +### Response Example: + +``` +I've analyzed transaction 0xfeda0e8f0d6e54112c28d319c0d303c065d1125c9197bd653682f5fcb0a6c81e: + +TRANSACTION SUMMARY: +This was an interaction with the Uniswap V2 Router on Ethereum, executed on May 7, 2021. + +WHAT HAPPENED: +In simple terms, someone swapped approximately 5,000 USDT for 1.89 ETH using Uniswap. + +DETAILS: +- Type: Token Swap (DEX) +- Platform: Uniswap V2 +- Function Called: swapExactTokensForETH +- Tokens Involved: + * Sent: 5,000 USDT + * Received: 1.89 ETH +- Fee Paid: 0.0084 ETH (approximately $21.50 at that time) +- Result: Successful + +This transaction represents a typical decentralized exchange swap where USDT stablecoin was exchanged for ETH. The transaction was initiated by a wallet associated with Binance and executed through the Uniswap V2 protocol. +``` + +## Error Handling + +The skills handle various error conditions gracefully: + +- Invalid addresses +- Unsupported chains +- API rate limiting +- Network issues +- Malformed transaction hashes +- Non-existent blocks + +Each skill includes an `error` field in the response that will be populated with error information when applicable. + +## Limitations + +- Data is only as current as the Moralis API +- Some price data may not be available for smaller or newer tokens +- Transactions are limited to 100 per request by default +- NFT metadata and images may not be available for all NFTs +- Token approvals analysis may not identify all high-risk approvals +- Transaction decoding depends on verified ABIs in the Moralis database +- Block data for very old blocks may be slower to retrieve + +## Contributing + +Contributions to improve the Wallet Portfolio & Blockchain Analysis Skills are welcome. Please ensure that your code follows the project's style and includes appropriate tests. \ No newline at end of file diff --git a/intentkit/skills/moralis/__init__.py b/intentkit/skills/moralis/__init__.py new file mode 100644 index 00000000..713a5ea5 --- /dev/null +++ b/intentkit/skills/moralis/__init__.py @@ -0,0 +1,110 @@ +"""Wallet Portfolio Skills for IntentKit.""" + +import logging +from typing import Dict, List, NotRequired, TypedDict + +from intentkit.abstracts.skill import SkillStoreABC +from intentkit.skills.base import SkillConfig, SkillState +from intentkit.skills.moralis.base import WalletBaseTool +from intentkit.skills.moralis.fetch_chain_portfolio import FetchChainPortfolio +from intentkit.skills.moralis.fetch_nft_portfolio import FetchNftPortfolio +from intentkit.skills.moralis.fetch_solana_portfolio import FetchSolanaPortfolio +from intentkit.skills.moralis.fetch_wallet_portfolio import FetchWalletPortfolio + +logger = logging.getLogger(__name__) + + +class SkillStates(TypedDict): + """Configuration of states for wallet skills.""" + + fetch_wallet_portfolio: SkillState + fetch_chain_portfolio: SkillState + fetch_nft_portfolio: SkillState + fetch_solana_portfolio: SkillState + + +class Config(SkillConfig): + """Configuration for Wallet Portfolio skills.""" + + api_key: str + states: SkillStates + supported_chains: NotRequired[Dict[str, bool]] = {"evm": True, "solana": True} + + +async def get_skills( + config: "Config", + is_private: bool, + store: SkillStoreABC, + **_, +) -> List[WalletBaseTool]: + """Get all Wallet Portfolio skills. + + Args: + config: Skill configuration + is_private: Whether the request is from an authenticated user + store: Skill store for persistence + chain_provider: Optional chain provider for blockchain interactions + **_: Additional arguments + + Returns: + List of enabled wallet skills + """ + available_skills = [] + + # Include skills based on their state + for skill_name, state in config["states"].items(): + if state == "disabled": + continue + elif state == "public" or (state == "private" and is_private): + # Check chain support for Solana-specific skills + if skill_name == "fetch_solana_portfolio" and not config.get( + "supported_chains", {} + ).get("solana", True): + continue + + available_skills.append(skill_name) + # api key + if config.get("api_key_provider") == "agent_owner": + api_key = config.get("api_key") + else: + api_key = store.get_system_config("moralis_api_key") + + # Get each skill using the getter + result = [] + for name in available_skills: + skill = get_wallet_skill(name, api_key, store) + if skill: + result.append(skill) + return result + + +def get_wallet_skill( + name: str, + api_key: str, + store: SkillStoreABC, +) -> WalletBaseTool: + """Get a specific Wallet Portfolio skill by name. + + Args: + name: Name of the skill to get + api_key: API key for Moralis + store: Skill store for persistence + + Returns: + The requested skill + """ + skill_classes = { + "fetch_wallet_portfolio": FetchWalletPortfolio, + "fetch_chain_portfolio": FetchChainPortfolio, + "fetch_nft_portfolio": FetchNftPortfolio, + "fetch_solana_portfolio": FetchSolanaPortfolio, + } + + if name not in skill_classes: + logger.warning(f"Unknown Wallet Portfolio skill: {name}") + return None + + return skill_classes[name]( + api_key=api_key, + skill_store=store, + ) diff --git a/intentkit/skills/moralis/api.py b/intentkit/skills/moralis/api.py new file mode 100644 index 00000000..c33d8750 --- /dev/null +++ b/intentkit/skills/moralis/api.py @@ -0,0 +1,281 @@ +"""API interface for wallet data providers (EVM chains and Solana).""" + +import logging +from typing import Dict + +import httpx + +from intentkit.skills.moralis.base import CHAIN_MAPPING + +logger = logging.getLogger(__name__) + +############################################# +# EVM Chains API (Ethereum, BSC, etc.) +############################################# + + +async def fetch_moralis_data( + api_key: str, endpoint: str, address: str, chain_id: int = None, params: dict = None +) -> dict: + """Base function for Moralis API calls. + + Args: + api_key: Moralis API key + endpoint: API endpoint (with {address} placeholder if needed) + address: Wallet address to query + chain_id: Blockchain network ID + params: Additional query parameters + + Returns: + Response data from the API or error + """ + + if not api_key: + logger.error("API key not configured") + return {"error": "API key not configured"} + + base_url = "https://deep-index.moralis.io/api/v2.2" + headers = {"X-API-Key": api_key} + + url = f"{base_url}/{endpoint.format(address=address)}" + + if chain_id: + params = params or {} + params["chain"] = CHAIN_MAPPING.get(chain_id, "eth") + + async with httpx.AsyncClient() as client: + try: + response = await client.get(url, headers=headers, params=params) + response.raise_for_status() + return response.json() + except httpx.RequestError as e: + logger.error(f"API request error: {e}") + return {"error": str(e)} + except httpx.HTTPStatusError as e: + logger.error(f"API error: {e.response.status_code} {e.response.text}") + return {"error": f"HTTP error {e.response.status_code}"} + + +# Wallet Balances +async def fetch_wallet_balances( + api_key: str, address: str, chain_id: int = None +) -> dict: + """Get token balances with prices. + + Args: + api_key: API key for the data provider + address: Wallet address to query + chain_id: Blockchain network ID + + Returns: + Token balances with additional data + """ + endpoint = "wallets/{address}/tokens" + return await fetch_moralis_data(api_key, endpoint, address, chain_id) + + +# NFT Balances +async def fetch_nft_data( + api_key: str, address: str, chain_id: int = None, params: dict = None +) -> dict: + """Get NFT balances. + + Args: + api_key: API key for the data provider + address: Wallet address to query + chain_id: Blockchain network ID + params: Additional query parameters + + Returns: + NFT data including metadata + """ + endpoint = "{address}/nft" + default_params = {"normalizeMetadata": True} + if params: + default_params.update(params) + return await fetch_moralis_data( + api_key, endpoint, address, chain_id, default_params + ) + + +# Transaction History +async def fetch_transaction_history( + api_key: str, + address: str, + chain_id: int = None, + cursor: str = None, + limit: int = 100, +) -> dict: + """Get wallet transaction history. + + Args: + api_key: API key for the data provider + address: Wallet address to query + chain_id: Blockchain network ID + cursor: Pagination cursor + limit: Maximum number of transactions to return + + Returns: + Transaction history data + """ + endpoint = "wallets/{address}/history" + params = {"limit": limit} + if cursor: + params["cursor"] = cursor + return await fetch_moralis_data(api_key, endpoint, address, chain_id, params) + + +# Token Approvals +async def fetch_token_approvals( + api_key: str, address: str, chain_id: int = None +) -> dict: + """Get token approvals. + + Args: + api_key: API key for the data provider + address: Wallet address to query + chain_id: Blockchain network ID + + Returns: + Token approval data + """ + endpoint = "wallets/{address}/approvals" + return await fetch_moralis_data(api_key, endpoint, address, chain_id) + + +# Net Worth +async def fetch_net_worth(api_key: str, address: str) -> dict: + """Get wallet net worth. + + Args: + api_key: API key for the data provider + address: Wallet address to query + + Returns: + Net worth data across all chains + """ + endpoint = "wallets/{address}/net-worth" + return await fetch_moralis_data(api_key, endpoint, address) + + +############################################# +# Solana API +############################################# + + +async def fetch_solana_api(api_key: str, endpoint: str, params: Dict = None) -> Dict: + """Base function for Solana API calls using Moralis. + + Args: + api_key: API key for the data provider + endpoint: API endpoint + params: Additional query parameters + + Returns: + Response data from the API or error + """ + + if not api_key: + logger.error("API key not configured") + return {"error": "API key not configured"} + + base_url = "https://solana-gateway.moralis.io" + headers = {"X-API-Key": api_key} + url = f"{base_url}{endpoint}" + + async with httpx.AsyncClient() as client: + try: + response = await client.get(url, headers=headers, params=params) + response.raise_for_status() + return response.json() + except httpx.RequestError as e: + logger.error(f"Solana API request error: {e}") + return {"error": str(e)} + except httpx.HTTPStatusError as e: + logger.error( + f"Solana API error: {e.response.status_code} {e.response.text}" + ) + return {"error": f"HTTP error {e.response.status_code}: {e.response.text}"} + + +async def get_solana_portfolio( + api_key: str, address: str, network: str = "mainnet" +) -> Dict: + """Get complete portfolio for a Solana wallet. + + Args: + api_key: API key for the data provider + address: Solana wallet address + network: Solana network (mainnet or devnet) + + Returns: + Complete portfolio including SOL and SPL tokens + """ + endpoint = f"/account/{network}/{address}/portfolio" + return await fetch_solana_api(api_key, endpoint) + + +async def get_solana_balance( + api_key: str, address: str, network: str = "mainnet" +) -> Dict: + """Get native SOL balance. + + Args: + api_key: API key for the data provider + address: Solana wallet address + network: Solana network (mainnet or devnet) + + Returns: + Native SOL balance + """ + endpoint = f"/account/{network}/{address}/balance" + return await fetch_solana_api(api_key, endpoint) + + +async def get_solana_spl_tokens( + api_key: str, address: str, network: str = "mainnet" +) -> Dict: + """Get SPL token balances. + + Args: + api_key: API key for the data provider + address: Solana wallet address + network: Solana network (mainnet or devnet) + + Returns: + SPL token balances + """ + endpoint = f"/account/{network}/{address}/tokens" + return await fetch_solana_api(api_key, endpoint) + + +async def get_solana_nfts(api_key: str, address: str, network: str = "mainnet") -> Dict: + """Get NFTs owned by a Solana wallet. + + Args: + api_key: API key for the data provider + address: Solana wallet address + network: Solana network (mainnet or devnet) + + Returns: + NFT holdings + """ + endpoint = f"/account/{network}/{address}/nft" + return await fetch_solana_api(api_key, endpoint) + + +async def get_token_price( + api_key: str, token_address: str, network: str = "mainnet" +) -> Dict: + """Get token price by mint address. + + Args: + api_key: API key for the data provider + token_address: Token mint address + network: Solana network (mainnet or devnet) + + Returns: + Token price data + """ + endpoint = f"/token/{network}/{token_address}/price" + return await fetch_solana_api(api_key, endpoint) diff --git a/intentkit/skills/moralis/base.py b/intentkit/skills/moralis/base.py new file mode 100644 index 00000000..ad625560 --- /dev/null +++ b/intentkit/skills/moralis/base.py @@ -0,0 +1,69 @@ +"""Base class for Wallet Portfolio tools.""" + +from typing import List, Optional, Type + +from langchain.tools.base import ToolException +from pydantic import BaseModel, Field + +from intentkit.abstracts.skill import SkillStoreABC +from intentkit.skills.base import IntentKitSkill + +# Chain ID to chain name mapping for EVM chains +CHAIN_MAPPING = { + 1: "eth", + 56: "bsc", + 137: "polygon", + 42161: "arbitrum", + 10: "optimism", + 43114: "avalanche", + 250: "fantom", + 8453: "base", +} + +# Solana networks +SOLANA_NETWORKS = ["mainnet", "devnet"] + + +class WalletBaseTool(IntentKitSkill): + """Base class for all wallet portfolio tools.""" + + name: str = Field(description="The name of the tool") + description: str = Field(description="A description of what the tool does") + args_schema: Type[BaseModel] + skill_store: SkillStoreABC = Field( + description="The skill store for persisting data" + ) + + # Optional fields for blockchain providers + solana_networks: Optional[List[str]] = Field( + default=SOLANA_NETWORKS, description="Supported Solana networks" + ) + + def get_api_key(self) -> str: + context = self.get_context() + skill_config = context.agent.skill_config(self.category) + api_key_provider = skill_config.get("api_key_provider") + if api_key_provider == "platform": + return self.skill_store.get_system_config("moralis_api_key") + # for backward compatibility, may only have api_key in skill_config + elif skill_config.get("api_key"): + return skill_config.get("api_key") + else: + raise ToolException( + f"Invalid API key provider: {api_key_provider}, or no api_key in config" + ) + + @property + def category(self) -> str: + return "moralis" + + def _get_chain_name(self, chain_id: int) -> str: + """Convert chain ID to chain name for API calls. + + Args: + chain_id: The blockchain network ID + + Returns: + The chain name used by the API + """ + return CHAIN_MAPPING.get(chain_id, "eth") diff --git a/intentkit/skills/moralis/fetch_chain_portfolio.py b/intentkit/skills/moralis/fetch_chain_portfolio.py new file mode 100644 index 00000000..725a8773 --- /dev/null +++ b/intentkit/skills/moralis/fetch_chain_portfolio.py @@ -0,0 +1,191 @@ +"""fetching wallet portfolio for a specific blockchain.""" + +import logging +from typing import List, Optional, Type + +from pydantic import BaseModel, Field + +from intentkit.skills.moralis.api import fetch_token_approvals, fetch_wallet_balances +from intentkit.skills.moralis.base import WalletBaseTool + +logger = logging.getLogger(__name__) + + +class FetchChainPortfolioInput(BaseModel): + """Input for FetchChainPortfolio tool.""" + + address: str = Field(..., description="Wallet address") + chain_id: int = Field(..., description="Chain ID to fetch portfolio for") + include_approvals: bool = Field( + default=False, description="Whether to include token approvals in the response" + ) + + +class ChainTokenBalance(BaseModel): + """Model for token balance on a specific chain.""" + + contract_address: str = Field(..., description="Token contract address") + symbol: str = Field(..., description="Token symbol") + name: str = Field(..., description="Token name") + logo: Optional[str] = Field(None, description="Token logo URL") + decimals: int = Field(..., description="Token decimals") + balance: float = Field(..., description="Token balance") + balance_raw: str = Field(..., description="Raw token balance") + balance_usd: float = Field(0.0, description="USD value of token balance") + + +class TokenApproval(BaseModel): + """Model for token approval.""" + + token_address: str = Field(..., description="Token contract address") + token_symbol: Optional[str] = Field(None, description="Token symbol") + token_name: Optional[str] = Field(None, description="Token name") + spender: str = Field(..., description="Spender address (contract)") + spender_name: Optional[str] = Field(None, description="Spender name if known") + allowance: str = Field(..., description="Raw approval amount") + allowance_formatted: Optional[float] = Field( + None, description="Formatted approval amount" + ) + unlimited: bool = Field(False, description="Whether the approval is unlimited") + + +class ChainPortfolioOutput(BaseModel): + """Output for FetchChainPortfolio tool.""" + + address: str = Field(..., description="Wallet address") + chain_id: int = Field(..., description="Chain ID") + chain_name: str = Field(..., description="Chain name") + native_token: Optional[ChainTokenBalance] = Field( + None, description="Native token balance" + ) + tokens: List[ChainTokenBalance] = Field( + default_factory=list, description="List of token balances" + ) + total_usd_value: float = Field(0.0, description="Total USD value on this chain") + approvals: Optional[List[TokenApproval]] = Field( + None, description="Token approvals if requested" + ) + error: Optional[str] = Field(None, description="Error message if any") + + +class FetchChainPortfolio(WalletBaseTool): + """Tool for fetching wallet portfolio for a specific blockchain. + + This tool retrieves detailed information about a wallet's holdings on a specific + blockchain, including token balances, USD values, and optionally token approvals. + """ + + name: str = "moralis_fetch_chain_portfolio" + description: str = ( + "This tool fetches wallet portfolio data for a specific blockchain.\n" + "Provide a wallet address and chain ID to get detailed information about tokens and their values.\n" + "Returns:\n" + "- Token balances for the specified chain\n" + "- USD value of each token\n" + "- Total chain value\n" + "- Token metadata (symbol, name, decimals)\n" + "- Token approvals (optional)\n" + "Use this tool whenever a user wants to see their holdings on a specific blockchain." + ) + args_schema: Type[BaseModel] = FetchChainPortfolioInput + + async def _arun( + self, address: str, chain_id: int, include_approvals: bool = False, **kwargs + ) -> ChainPortfolioOutput: + """Fetch wallet portfolio for a specific chain. + + Args: + address: Wallet address to fetch portfolio for + chain_id: Chain ID to fetch portfolio for + include_approvals: Whether to include token approvals + + Returns: + ChainPortfolioOutput containing portfolio data for the specified chain + """ + try: + # Fetch wallet balances for the specified chain + balances = await fetch_wallet_balances(self.api_key, address, chain_id) + + if "error" in balances: + return ChainPortfolioOutput( + address=address, + chain_id=chain_id, + chain_name=self._get_chain_name(chain_id), + error=balances["error"], + ) + + # Process the data + portfolio = { + "address": address, + "chain_id": chain_id, + "chain_name": self._get_chain_name(chain_id), + "tokens": [], + "total_usd_value": 0.0, + } + + for token in balances.get("result", []): + token_balance = ChainTokenBalance( + contract_address=token["token_address"], + symbol=token.get("symbol", "UNKNOWN"), + name=token.get("name", "Unknown Token"), + logo=token.get("logo", None), + decimals=token.get("decimals", 18), + balance=float(token.get("balance_formatted", 0)), + balance_raw=token.get("balance", "0"), + balance_usd=float(token.get("usd_value", 0)), + ) + + # Identify native token + if token.get("native_token", False): + portfolio["native_token"] = token_balance + else: + portfolio["tokens"].append(token_balance) + + # Add to total USD value + portfolio["total_usd_value"] += token_balance.balance_usd + + # Fetch token approvals if requested + if include_approvals: + approvals_data = await fetch_token_approvals( + self.api_key, address, chain_id + ) + + if "error" not in approvals_data: + approvals = [] + + for approval in approvals_data.get("result", []): + # Determine if the approval is unlimited (max uint256) + allowance = approval.get("allowance", "0") + is_unlimited = ( + allowance + == "115792089237316195423570985008687907853269984665640564039457584007913129639935" + ) + + # Create approval object + token_approval = TokenApproval( + token_address=approval.get("token_address", ""), + token_symbol=approval.get("token_symbol"), + token_name=approval.get("token_name"), + spender=approval.get("spender", ""), + spender_name=approval.get("spender_name"), + allowance=allowance, + allowance_formatted=float( + approval.get("allowance_formatted", 0) + ), + unlimited=is_unlimited, + ) + + approvals.append(token_approval) + + portfolio["approvals"] = approvals + + return ChainPortfolioOutput(**portfolio) + + except Exception as e: + logger.error(f"Error fetching chain portfolio: {str(e)}") + return ChainPortfolioOutput( + address=address, + chain_id=chain_id, + chain_name=self._get_chain_name(chain_id), + error=str(e), + ) diff --git a/intentkit/skills/moralis/fetch_nft_portfolio.py b/intentkit/skills/moralis/fetch_nft_portfolio.py new file mode 100644 index 00000000..97ea296e --- /dev/null +++ b/intentkit/skills/moralis/fetch_nft_portfolio.py @@ -0,0 +1,284 @@ +"""fetching NFT portfolio for a wallet.""" + +import json +import logging +from typing import Any, Dict, List, Optional, Type + +from pydantic import BaseModel, Field + +from intentkit.skills.moralis.api import fetch_nft_data, get_solana_nfts +from intentkit.skills.moralis.base import WalletBaseTool + +logger = logging.getLogger(__name__) + + +class FetchNftPortfolioInput(BaseModel): + """Input for FetchNftPortfolio tool.""" + + address: str = Field(..., description="Wallet address") + chain_id: Optional[int] = Field( + None, + description="Chain ID (if not specified, fetches from all supported chains)", + ) + include_solana: bool = Field( + default=False, description="Whether to include Solana NFTs" + ) + solana_network: str = Field( + default="mainnet", description="Solana network to use (mainnet or devnet)" + ) + limit: Optional[int] = Field(100, description="Maximum number of NFTs to return") + normalize_metadata: bool = Field( + True, description="Whether to normalize metadata across different standards" + ) + + +class NftMetadata(BaseModel): + """Model for NFT metadata.""" + + name: Optional[str] = Field(None, description="NFT name") + description: Optional[str] = Field(None, description="NFT description") + image: Optional[str] = Field(None, description="NFT image URL") + animation_url: Optional[str] = Field(None, description="NFT animation URL") + attributes: Optional[List[Dict]] = Field(None, description="NFT attributes/traits") + external_url: Optional[str] = Field(None, description="External URL") + + +class NftItem(BaseModel): + """Model for an NFT item.""" + + token_id: str = Field(..., description="NFT token ID") + token_address: str = Field(..., description="NFT contract address") + contract_type: Optional[str] = Field( + None, description="NFT contract type (ERC721, ERC1155, etc.)" + ) + name: Optional[str] = Field(None, description="NFT name") + symbol: Optional[str] = Field(None, description="NFT symbol") + owner_of: str = Field(..., description="Owner address") + metadata: Optional[NftMetadata] = Field(None, description="NFT metadata") + floor_price: Optional[float] = Field(None, description="Floor price if available") + chain: str = Field("eth", description="Blockchain network") + + +class NftPortfolioOutput(BaseModel): + """Output for FetchNftPortfolio tool.""" + + address: str = Field(..., description="Wallet address") + nfts: List[NftItem] = Field(default_factory=list, description="List of NFT items") + total_count: int = Field(0, description="Total count of NFTs") + chains: List[str] = Field( + default_factory=list, description="Chains included in the response" + ) + cursor: Optional[str] = Field(None, description="Cursor for pagination") + error: Optional[str] = Field(None, description="Error message if any") + + +class FetchNftPortfolio(WalletBaseTool): + """Tool for fetching NFT portfolio for a wallet. + + This tool retrieves detailed information about NFTs owned by a wallet address, + including metadata, media URLs, and floor prices when available. + """ + + name: str = "moralis_fetch_nft_portfolio" + description: str = ( + "This tool fetches NFT holdings for a wallet address.\n" + "Provide a wallet address and optionally a chain ID to get detailed information about NFTs.\n" + "Returns:\n" + "- NFT collection data\n" + "- NFT metadata and attributes\n" + "- Media URLs if available\n" + "- Floor prices if available\n" + "Use this tool whenever a user asks about their NFTs or digital collectibles." + ) + args_schema: Type[BaseModel] = FetchNftPortfolioInput + + async def _arun( + self, + address: str, + chain_id: Optional[int] = None, + include_solana: bool = False, + solana_network: str = "mainnet", + limit: int = 100, + normalize_metadata: bool = True, + **kwargs, + ) -> NftPortfolioOutput: + """Fetch NFT portfolio for a wallet. + + Args: + address: Wallet address to fetch NFTs for + chain_id: Chain ID to fetch NFTs for (if None, fetches from all supported chains) + include_solana: Whether to include Solana NFTs + solana_network: Solana network to use (mainnet or devnet) + limit: Maximum number of NFTs to return + normalize_metadata: Whether to normalize metadata across different standards + + Returns: + NftPortfolioOutput containing NFT portfolio data + """ + try: + # Initialize result + result = {"address": address, "nfts": [], "total_count": 0, "chains": []} + + # Fetch EVM NFTs + if chain_id is not None: + # Fetch from specific chain + await self._fetch_evm_nfts( + address, chain_id, limit, normalize_metadata, result + ) + else: + # Fetch from all supported chains + from intentkit.skills.moralis.base import CHAIN_MAPPING + + for chain_id in CHAIN_MAPPING.keys(): + await self._fetch_evm_nfts( + address, + chain_id, + limit // len(CHAIN_MAPPING), + normalize_metadata, + result, + ) + + # Fetch Solana NFTs if requested + if include_solana: + await self._fetch_solana_nfts(address, solana_network, limit, result) + + return NftPortfolioOutput(**result) + + except Exception as e: + logger.error(f"Error fetching NFT portfolio: {str(e)}") + return NftPortfolioOutput( + address=address, nfts=[], total_count=0, chains=[], error=str(e) + ) + + async def _fetch_evm_nfts( + self, + address: str, + chain_id: int, + limit: int, + normalize_metadata: bool, + result: Dict[str, Any], + ) -> None: + """Fetch NFTs from an EVM chain. + + Args: + address: Wallet address + chain_id: Chain ID + limit: Maximum number of NFTs to return + normalize_metadata: Whether to normalize metadata + result: Result dictionary to update + """ + params = {"limit": limit, "normalizeMetadata": normalize_metadata} + + nft_data = await fetch_nft_data(self.api_key, address, chain_id, params) + + if "error" in nft_data: + return + + chain_name = self._get_chain_name(chain_id) + if chain_name not in result["chains"]: + result["chains"].append(chain_name) + + result["total_count"] += nft_data.get("total", 0) + + if "cursor" in nft_data: + result["cursor"] = nft_data["cursor"] + + for nft in nft_data.get("result", []): + # Extract metadata + metadata = None + if "metadata" in nft and nft["metadata"]: + try: + if isinstance(nft["metadata"], str): + metadata_dict = json.loads(nft["metadata"]) + else: + metadata_dict = nft["metadata"] + + metadata = NftMetadata( + name=metadata_dict.get("name"), + description=metadata_dict.get("description"), + image=metadata_dict.get("image"), + animation_url=metadata_dict.get("animation_url"), + attributes=metadata_dict.get("attributes"), + external_url=metadata_dict.get("external_url"), + ) + except Exception as e: + logger.warning(f"Error parsing NFT metadata: {str(e)}") + # If metadata parsing fails, continue without it + pass + + # Create NFT item + nft_item = NftItem( + token_id=nft.get("token_id", ""), + token_address=nft.get("token_address", ""), + contract_type=nft.get("contract_type"), + name=nft.get("name"), + symbol=nft.get("symbol"), + owner_of=nft.get("owner_of", address), + metadata=metadata, + floor_price=nft.get("floor_price"), + chain=chain_name, + ) + + result["nfts"].append(nft_item) + + async def _fetch_solana_nfts( + self, address: str, network: str, limit: int, result: Dict[str, Any] + ) -> None: + """Fetch NFTs from Solana. + + Args: + address: Wallet address + network: Solana network + limit: Maximum number of NFTs to return + result: Result dictionary to update + """ + chain_name = "solana" + if chain_name not in result["chains"]: + result["chains"].append(chain_name) + + nfts_result = await get_solana_nfts(self.api_key, address, network) + + if "error" in nfts_result: + return + + if not isinstance(nfts_result, list): + return + + count = min(limit, len(nfts_result)) + result["total_count"] += count + + for i, nft in enumerate(nfts_result): + if i >= limit: + break + + # Create NFT item + metadata = None + if "metadata" in nft and nft["metadata"]: + try: + metadata_dict = nft["metadata"] + if isinstance(metadata_dict, str): + metadata_dict = json.loads(metadata_dict) + + metadata = NftMetadata( + name=metadata_dict.get("name"), + description=metadata_dict.get("description"), + image=metadata_dict.get("image"), + animation_url=metadata_dict.get("animation_url"), + attributes=metadata_dict.get("attributes"), + external_url=metadata_dict.get("external_url"), + ) + except Exception as e: + logger.warning(f"Error parsing Solana NFT metadata: {str(e)}") + pass + + nft_item = NftItem( + token_id=nft.get("mint", ""), # Use mint address as token ID + token_address=nft.get("mint", ""), # Use mint address as token address + name=nft.get("name"), + symbol=nft.get("symbol"), + owner_of=address, + metadata=metadata, + chain=chain_name, + ) + + result["nfts"].append(nft_item) diff --git a/intentkit/skills/moralis/fetch_solana_portfolio.py b/intentkit/skills/moralis/fetch_solana_portfolio.py new file mode 100644 index 00000000..a9be3d03 --- /dev/null +++ b/intentkit/skills/moralis/fetch_solana_portfolio.py @@ -0,0 +1,331 @@ +"""fetching Solana wallet portfolio.""" + +import logging +from typing import Dict, List, Optional, Type + +from pydantic import BaseModel, Field + +from intentkit.skills.moralis.api import ( + get_solana_balance, + get_solana_nfts, + get_solana_portfolio, + get_solana_spl_tokens, + get_token_price, +) +from intentkit.skills.moralis.base import WalletBaseTool + +logger = logging.getLogger(__name__) + + +class SolanaPortfolioInput(BaseModel): + """Input for FetchSolanaPortfolio tool.""" + + address: str = Field(..., description="Solana wallet address") + network: str = Field( + default="mainnet", description="Solana network to use (mainnet or devnet)" + ) + include_nfts: bool = Field( + default=False, description="Whether to include NFTs in the response" + ) + include_price_data: bool = Field( + default=True, description="Whether to include price data for tokens" + ) + + +class SolanaTokenInfo(BaseModel): + """Model for Solana token information.""" + + symbol: str + name: str + decimals: int + mint: str + associated_token_address: str + + +class SolanaTokenBalance(BaseModel): + """Model for Solana token balance.""" + + token_info: SolanaTokenInfo + amount: float + amount_raw: str + usd_value: Optional[float] = 0.0 + + +class SolanaNftInfo(BaseModel): + """Model for Solana NFT information.""" + + mint: str + name: Optional[str] = None + symbol: Optional[str] = None + associated_token_address: str + metadata: Optional[Dict] = None + + +class SolanaPortfolioOutput(BaseModel): + """Output for FetchSolanaPortfolio tool.""" + + address: str + sol_balance: float + sol_balance_lamports: int + sol_price_usd: Optional[float] = None + sol_value_usd: Optional[float] = None + tokens: List[SolanaTokenBalance] = [] + nfts: List[SolanaNftInfo] = [] + total_value_usd: float = 0.0 + error: Optional[str] = None + + +class FetchSolanaPortfolio(WalletBaseTool): + """Tool for fetching Solana wallet portfolio. + + This tool retrieves detailed information about a Solana wallet's holdings, + including native SOL, SPL tokens, and optionally NFTs. + """ + + name: str = "moralis_fetch_solana_portfolio" + description: str = ( + "Get comprehensive portfolio data for a Solana wallet including:\n" + "- Native SOL balance\n" + "- SPL token balances\n" + "- NFT holdings (optional)\n" + "- USD values of assets\n" + "Use this tool whenever the user asks specifically about Solana holdings." + ) + args_schema: Type[BaseModel] = SolanaPortfolioInput + + async def _arun( + self, + address: str, + network: str = "mainnet", + include_nfts: bool = False, + include_price_data: bool = True, + **kwargs, + ) -> SolanaPortfolioOutput: + """Fetch Solana wallet portfolio data. + + Args: + address: Solana wallet address + network: Solana network to use (mainnet or devnet) + include_nfts: Whether to include NFTs in the response + include_price_data: Whether to include price data for tokens + + Returns: + SolanaPortfolioOutput containing the Solana wallet's portfolio data + """ + try: + # Try to get complete portfolio + sol_portfolio = await get_solana_portfolio(self.api_key, address, network) + + if "error" not in sol_portfolio: + return await self._process_portfolio_data( + address, network, sol_portfolio, include_nfts, include_price_data + ) + else: + # If portfolio endpoint fails, try to fetch data separately + return await self._fetch_separate_portfolio_data( + address, network, include_nfts, include_price_data + ) + + except Exception as e: + logger.error(f"Error fetching Solana portfolio: {str(e)}") + return SolanaPortfolioOutput( + address=address, sol_balance=0, sol_balance_lamports=0, error=str(e) + ) + + async def _process_portfolio_data( + self, + address: str, + network: str, + sol_portfolio: Dict, + include_nfts: bool, + include_price_data: bool, + ) -> SolanaPortfolioOutput: + """Process portfolio data from the API. + + Args: + address: Solana wallet address + network: Solana network + sol_portfolio: Portfolio data from the API + include_nfts: Whether to include NFTs + include_price_data: Whether to include price data + + Returns: + SolanaPortfolioOutput with processed data + """ + result = SolanaPortfolioOutput( + address=address, + sol_balance=float(sol_portfolio.get("nativeBalance", {}).get("solana", 0)), + sol_balance_lamports=int( + sol_portfolio.get("nativeBalance", {}).get("lamports", 0) + ), + ) + + # Process tokens + tokens = [] + for token in sol_portfolio.get("tokens", []): + token_info = SolanaTokenInfo( + symbol=token.get("symbol", ""), + name=token.get("name", ""), + decimals=int(token.get("decimals", 0)), + mint=token.get("mint", ""), + associated_token_address=token.get("associatedTokenAddress", ""), + ) + + token_balance = SolanaTokenBalance( + token_info=token_info, + amount=float(token.get("amount", 0)), + amount_raw=token.get("amountRaw", "0"), + ) + + tokens.append(token_balance) + + result.tokens = tokens + + # Fetch NFTs if requested + if include_nfts: + nfts_result = await get_solana_nfts(self.api_key, address, network) + + if "error" not in nfts_result and isinstance(nfts_result, list): + nfts = [] + for nft in nfts_result: + nft_info = SolanaNftInfo( + mint=nft.get("mint", ""), + name=nft.get("name"), + symbol=nft.get("symbol"), + associated_token_address=nft.get("associatedTokenAddress", ""), + metadata=nft.get("metadata"), + ) + nfts.append(nft_info) + + result.nfts = nfts + + # Fetch price data if requested + if include_price_data: + # Fetch SOL price + sol_price_result = await get_token_price( + self.api_key, + "So11111111111111111111111111111111111111112", # SOL mint address + network, + ) + + if "error" not in sol_price_result: + sol_price_usd = float(sol_price_result.get("usdPrice", 0)) + result.sol_price_usd = sol_price_usd + result.sol_value_usd = sol_price_usd * result.sol_balance + result.total_value_usd += result.sol_value_usd or 0 + + # Fetch token prices + for token in result.tokens: + if token.token_info.mint: + price_result = await get_token_price( + self.api_key, token.token_info.mint, network + ) + + if "error" not in price_result: + token_price_usd = float(price_result.get("usdPrice", 0)) + token.usd_value = token_price_usd * token.amount + result.total_value_usd += token.usd_value + + return result + + async def _fetch_separate_portfolio_data( + self, address: str, network: str, include_nfts: bool, include_price_data: bool + ) -> SolanaPortfolioOutput: + """Fetch portfolio data using separate API calls. + + Args: + address: Solana wallet address + network: Solana network + include_nfts: Whether to include NFTs + include_price_data: Whether to include price data + + Returns: + SolanaPortfolioOutput with processed data + """ + # Get SOL balance + balance_result = await get_solana_balance(self.api_key, address, network) + + if "error" in balance_result: + return SolanaPortfolioOutput( + address=address, + sol_balance=0, + sol_balance_lamports=0, + error=balance_result["error"], + ) + + result = SolanaPortfolioOutput( + address=address, + sol_balance=float(balance_result.get("solana", 0)), + sol_balance_lamports=int(balance_result.get("lamports", 0)), + ) + + # Get SPL tokens + tokens_result = await get_solana_spl_tokens(self.api_key, address, network) + + if "error" not in tokens_result and isinstance(tokens_result, list): + tokens = [] + for token in tokens_result: + token_info = SolanaTokenInfo( + symbol=token.get("symbol", ""), + name=token.get("name", ""), + decimals=int(token.get("decimals", 0)), + mint=token.get("mint", ""), + associated_token_address=token.get("associatedTokenAddress", ""), + ) + + token_balance = SolanaTokenBalance( + token_info=token_info, + amount=float(token.get("amount", 0)), + amount_raw=token.get("amountRaw", "0"), + ) + + tokens.append(token_balance) + + result.tokens = tokens + + # Fetch NFTs if requested + if include_nfts: + nfts_result = await get_solana_nfts(self.api_key, address, network) + + if "error" not in nfts_result and isinstance(nfts_result, list): + nfts = [] + for nft in nfts_result: + nft_info = SolanaNftInfo( + mint=nft.get("mint", ""), + name=nft.get("name"), + symbol=nft.get("symbol"), + associated_token_address=nft.get("associatedTokenAddress", ""), + metadata=nft.get("metadata"), + ) + nfts.append(nft_info) + + result.nfts = nfts + + # Fetch price data if requested + if include_price_data: + # Fetch SOL price + sol_price_result = await get_token_price( + self.api_key, + "So11111111111111111111111111111111111111112", # SOL mint address + network, + ) + + if "error" not in sol_price_result: + sol_price_usd = float(sol_price_result.get("usdPrice", 0)) + result.sol_price_usd = sol_price_usd + result.sol_value_usd = sol_price_usd * result.sol_balance + result.total_value_usd += result.sol_value_usd or 0 + + # Fetch token prices + for token in result.tokens: + if token.token_info.mint: + price_result = await get_token_price( + self.api_key, token.token_info.mint, network + ) + + if "error" not in price_result: + token_price_usd = float(price_result.get("usdPrice", 0)) + token.usd_value = token_price_usd * token.amount + result.total_value_usd += token.usd_value + + return result diff --git a/intentkit/skills/moralis/fetch_wallet_portfolio.py b/intentkit/skills/moralis/fetch_wallet_portfolio.py new file mode 100644 index 00000000..ee0b811f --- /dev/null +++ b/intentkit/skills/moralis/fetch_wallet_portfolio.py @@ -0,0 +1,301 @@ +"""fetching a complete wallet portfolio (EVM + Solana).""" + +import logging +from typing import Dict, List, Optional, Type + +from pydantic import BaseModel, Field + +from intentkit.skills.moralis.api import ( + fetch_net_worth, + fetch_wallet_balances, + get_solana_balance, + get_solana_portfolio, + get_solana_spl_tokens, + get_token_price, +) +from intentkit.skills.moralis.base import CHAIN_MAPPING, WalletBaseTool + +logger = logging.getLogger(__name__) + + +class FetchWalletPortfolioInput(BaseModel): + """Input for FetchWalletPortfolio tool.""" + + address: str = Field( + ..., description="Wallet address to analyze (Ethereum or Solana)" + ) + chains: Optional[List[int]] = Field( + default=None, + description="List of EVM chain IDs to check (default: all supported)", + ) + include_solana: bool = Field( + default=True, description="Whether to include Solana in the analysis" + ) + solana_network: str = Field( + default="mainnet", description="Solana network to use (mainnet or devnet)" + ) + + +class TokenBalance(BaseModel): + """Model for token balance.""" + + symbol: str + name: str + balance: float + usd_value: float + chain: str + + +class PortfolioOutput(BaseModel): + """Output for FetchWalletPortfolio tool.""" + + address: str + total_net_worth: float + chains: Dict[str, float] + tokens: List[TokenBalance] + error: Optional[str] = None + + +class FetchWalletPortfolio(WalletBaseTool): + """Tool for fetching a complete wallet portfolio across all chains (EVM + Solana). + + This tool retrieves detailed information about a wallet's holdings across + multiple blockchains, including token balances, USD values, and a summary + of the total portfolio value. + """ + + name: str = "moralis_fetch_wallet_portfolio" + description: str = ( + "Get comprehensive portfolio data for a wallet including:\n" + "- Token balances and prices across multiple chains (EVM and Solana)\n" + "- Total net worth estimation\n" + "- Chain distribution of assets\n" + "Use this tool whenever the user asks about their crypto holdings, portfolio value, " + "or wallet contents across multiple blockchains." + ) + args_schema: Type[BaseModel] = FetchWalletPortfolioInput + + async def _arun( + self, + address: str, + chains: Optional[List[int]] = None, + include_solana: bool = True, + solana_network: str = "mainnet", + **kwargs, + ) -> PortfolioOutput: + """Fetch wallet portfolio data across multiple chains. + + Args: + address: Wallet address to fetch portfolio for + chains: List of EVM chain IDs to check (if None, checks all supported chains) + include_solana: Whether to include Solana in the analysis + solana_network: Solana network to use (mainnet or devnet) + + Returns: + PortfolioOutput containing the wallet's portfolio data + """ + try: + # Initialize portfolio data + portfolio = {"tokens": [], "chains": {}, "total_net_worth": 0} + + # Get EVM chain portfolio + await self._fetch_evm_portfolio(address, chains, portfolio) + + # Get Solana portfolio if requested + if include_solana: + await self._fetch_solana_portfolio(address, solana_network, portfolio) + + return PortfolioOutput( + address=address, + total_net_worth=portfolio["total_net_worth"], + chains=portfolio["chains"], + tokens=portfolio["tokens"], + ) + + except Exception as e: + logger.error(f"Error fetching wallet portfolio: {str(e)}") + return PortfolioOutput( + address=address, total_net_worth=0, chains={}, tokens=[], error=str(e) + ) + + async def _fetch_evm_portfolio( + self, address: str, chains: Optional[List[int]], portfolio: Dict + ) -> None: + """Fetch portfolio data for EVM chains. + + Args: + address: Wallet address to fetch portfolio for + chains: List of EVM chain IDs to check (if None, checks all supported chains) + portfolio: Portfolio data to update + """ + # Get chain IDs to query (use all supported chains if not specified) + chain_ids = chains or list(CHAIN_MAPPING.keys()) + + # Get balances for each chain + for chain_id in chain_ids: + balance_data = await fetch_wallet_balances(self.api_key, address, chain_id) + + if "error" in balance_data: + continue + + chain_name = self._get_chain_name(chain_id) + chain_total = 0 + + for token in balance_data.get("result", []): + if token.get("usd_value"): + portfolio["tokens"].append( + TokenBalance( + symbol=token.get("symbol", "UNKNOWN"), + name=token.get("name", "Unknown Token"), + balance=float(token.get("balance_formatted", 0)), + usd_value=token["usd_value"], + chain=chain_name, + ) + ) + chain_total += token["usd_value"] + + portfolio["chains"][chain_name] = chain_total + portfolio["total_net_worth"] += chain_total + + # Add net worth data if available + net_worth = await fetch_net_worth(self.api_key, address) + if "result" in net_worth: + portfolio["total_net_worth"] = net_worth["result"].get( + "total_networth_usd", portfolio["total_net_worth"] + ) + + async def _fetch_solana_portfolio( + self, address: str, network: str, portfolio: Dict + ) -> None: + """Fetch portfolio data for Solana. + + Args: + address: Wallet address to fetch portfolio for + network: Solana network to use (mainnet or devnet) + portfolio: Portfolio data to update + """ + chain_name = "solana" + chain_total = 0 + + # Try to get complete portfolio + sol_portfolio = await get_solana_portfolio(self.api_key, address, network) + + if "error" not in sol_portfolio: + # Process native SOL balance + if "nativeBalance" in sol_portfolio: + sol_balance = float(sol_portfolio["nativeBalance"].get("solana", 0)) + + # Get SOL price + sol_price_result = await get_token_price( + self.api_key, + "So11111111111111111111111111111111111111112", # SOL mint address + network, + ) + + sol_price_usd = 0 + if "error" not in sol_price_result: + sol_price_usd = float(sol_price_result.get("usdPrice", 0)) + + sol_value_usd = sol_balance * sol_price_usd + chain_total += sol_value_usd + + # Add SOL to tokens + portfolio["tokens"].append( + TokenBalance( + symbol="SOL", + name="Solana", + balance=sol_balance, + usd_value=sol_value_usd, + chain=chain_name, + ) + ) + + # Process SPL tokens + for token in sol_portfolio.get("tokens", []): + token_balance = { + "symbol": token.get("symbol", "UNKNOWN"), + "name": token.get("name", "Unknown Token"), + "balance": float(token.get("amount", 0)), + "usd_value": 0, # Will update if price is available + "chain": chain_name, + } + + # Try to get token price + if token.get("mint"): + price_result = await get_token_price( + self.api_key, token["mint"], network + ) + + if "error" not in price_result: + token_price_usd = float(price_result.get("usdPrice", 0)) + token_balance["usd_value"] = ( + token_balance["balance"] * token_price_usd + ) + chain_total += token_balance["usd_value"] + + portfolio["tokens"].append(TokenBalance(**token_balance)) + else: + # If portfolio endpoint fails, try to fetch balance and tokens separately + sol_balance_result = await get_solana_balance( + self.api_key, address, network + ) + + if "error" not in sol_balance_result: + sol_balance = float(sol_balance_result.get("solana", 0)) + + # Get SOL price + sol_price_result = await get_token_price( + self.api_key, + "So11111111111111111111111111111111111111112", # SOL mint address + network, + ) + + sol_price_usd = 0 + if "error" not in sol_price_result: + sol_price_usd = float(sol_price_result.get("usdPrice", 0)) + + sol_value_usd = sol_balance * sol_price_usd + chain_total += sol_value_usd + + # Add SOL to tokens + portfolio["tokens"].append( + TokenBalance( + symbol="SOL", + name="Solana", + balance=sol_balance, + usd_value=sol_value_usd, + chain=chain_name, + ) + ) + + # Get SPL tokens + tokens_result = await get_solana_spl_tokens(self.api_key, address, network) + + if "error" not in tokens_result and isinstance(tokens_result, list): + for token in tokens_result: + token_balance = { + "symbol": token.get("symbol", "UNKNOWN"), + "name": token.get("name", "Unknown Token"), + "balance": float(token.get("amount", 0)), + "usd_value": 0, # Will update if price is available + "chain": chain_name, + } + + # Try to get token price + if token.get("mint"): + price_result = await get_token_price( + self.api_key, token["mint"], network + ) + + if "error" not in price_result: + token_price_usd = float(price_result.get("usdPrice", 0)) + token_balance["usd_value"] = ( + token_balance["balance"] * token_price_usd + ) + chain_total += token_balance["usd_value"] + + portfolio["tokens"].append(TokenBalance(**token_balance)) + + # Update chain total and net worth + portfolio["chains"][chain_name] = chain_total + portfolio["total_net_worth"] += chain_total diff --git a/intentkit/skills/moralis/moralis.png b/intentkit/skills/moralis/moralis.png new file mode 100644 index 00000000..04f36ca7 Binary files /dev/null and b/intentkit/skills/moralis/moralis.png differ diff --git a/intentkit/skills/moralis/schema.json b/intentkit/skills/moralis/schema.json new file mode 100644 index 00000000..96035133 --- /dev/null +++ b/intentkit/skills/moralis/schema.json @@ -0,0 +1,156 @@ +{ + "$schema": "http://json-schema.org/draft-07/schema#", + "type": "object", + "title": "Moralis", + "x-icon": "https://ai.service.crestal.dev/skills/moralis/moralis.png", + "description": "Comprehensive blockchain data access via Moralis API providing wallet portfolio information, NFT data, and transaction details across multiple EVM chains and Solana networks", + "properties": { + "enabled": { + "type": "boolean", + "title": "Enabled", + "description": "Whether this skill is enabled", + "default": false + }, + "states": { + "type": "object", + "title": "Skill States", + "description": "States for each skill (disabled, public, or private)", + "properties": { + "fetch_wallet_portfolio": { + "type": "string", + "title": "Fetch Wallet Portfolio", + "enum": [ + "disabled", + "public", + "private" + ], + "x-enum-title": [ + "Disabled", + "Agent Owner + All Users", + "Agent Owner Only" + ], + "description": "Fetches a complete wallet portfolio across all chains (EVM + Solana), retrieving detailed information about a wallet's holdings, token balances, USD values, and total portfolio value." + }, + "fetch_chain_portfolio": { + "type": "string", + "title": "Fetch Chain Portfolio", + "enum": [ + "disabled", + "public", + "private" + ], + "x-enum-title": [ + "Disabled", + "Agent Owner + All Users", + "Agent Owner Only" + ], + "description": "Fetches wallet portfolio for a specific blockchain, retrieving detailed information about a wallet's holdings, token balances, USD values, and optionally token approvals." + }, + "fetch_nft_portfolio": { + "type": "string", + "title": "Fetch NFT Portfolio", + "enum": [ + "disabled", + "public", + "private" + ], + "x-enum-title": [ + "Disabled", + "Agent Owner + All Users", + "Agent Owner Only" + ], + "description": "Fetches NFT portfolio for a wallet, retrieving detailed information about NFTs owned by a wallet address, including metadata, media URLs, and floor prices when available." + }, + "fetch_solana_portfolio": { + "type": "string", + "title": "Fetch Solana Portfolio", + "enum": [ + "disabled", + "public", + "private" + ], + "x-enum-title": [ + "Disabled", + "Agent Owner + All Users", + "Agent Owner Only" + ], + "description": "Fetches Solana wallet portfolio, retrieving detailed information about a Solana wallet's holdings, including native SOL, SPL tokens, and optionally NFTs." + } + }, + "required": [ + "fetch_wallet_portfolio", + "fetch_chain_portfolio", + "fetch_nft_portfolio", + "fetch_solana_portfolio" + ] + }, + "supported_chains": { + "type": "object", + "title": "Supported Blockchain Networks", + "description": "Configure which blockchain networks are supported", + "properties": { + "evm": { + "type": "boolean", + "title": "EVM Chains", + "description": "Whether to support EVM-compatible chains (Ethereum, Binance Smart Chain, etc.)", + "default": true + }, + "solana": { + "type": "boolean", + "title": "Solana", + "description": "Whether to support Solana blockchain", + "default": true + } + } + }, + "api_key_provider": { + "type": "string", + "title": "API Key Provider", + "description": "Provider of the API key", + "enum": [ + "platform", + "agent_owner" + ], + "x-enum-title": [ + "Nation Hosted", + "Owner Provided" + ], + "default": "platform" + } + }, + "required": [ + "states", + "enabled" + ], + "if": { + "properties": { + "api_key_provider": { + "const": "agent_owner" + } + } + }, + "then": { + "properties": { + "api_key": { + "type": "string", + "title": "API Key", + "x-link": "[Get your API key](https://developers.moralis.com/)", + "x-sensitive": true, + "description": "Moralis API key for blockchain data access" + } + }, + "if": { + "properties": { + "enabled": { + "const": true + } + } + }, + "then": { + "required": [ + "api_key" + ] + } + }, + "additionalProperties": true +} \ No newline at end of file diff --git a/intentkit/skills/moralis/tests/__init__.py b/intentkit/skills/moralis/tests/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/intentkit/skills/moralis/tests/test_wallet.py b/intentkit/skills/moralis/tests/test_wallet.py new file mode 100644 index 00000000..829b5dcb --- /dev/null +++ b/intentkit/skills/moralis/tests/test_wallet.py @@ -0,0 +1,511 @@ +"""Tests for the Moralis Wallet Portfolio skills.""" + +import asyncio +import json +import unittest +from unittest.mock import AsyncMock, MagicMock, patch + +from intentkit.skills.moralis import ( + FetchChainPortfolio, + FetchSolanaPortfolio, + FetchWalletPortfolio, + get_skills, +) +from intentkit.skills.moralis.api import ( + fetch_moralis_data, + fetch_wallet_balances, + get_solana_portfolio, +) +from intentkit.skills.moralis.base import WalletBaseTool + + +class DummyResponse: + """Mock HTTP response for testing.""" + + def __init__(self, status_code, json_data): + self.status_code = status_code + self._json_data = json_data + self.text = json.dumps(json_data) if json_data else "" + + def json(self): + return self._json_data + + async def raise_for_status(self): + if self.status_code >= 400: + raise Exception(f"HTTP Error: {self.status_code}") + + +class TestWalletBaseClass(unittest.TestCase): + """Test the base wallet portfolio tool class.""" + + def setUp(self): + self.loop = asyncio.new_event_loop() + asyncio.set_event_loop(self.loop) + + self.mock_skill_store = MagicMock() + + def tearDown(self): + self.loop.close() + + def test_base_class_init(self): + """Test base class initialization.""" + + # Create a concrete subclass for testing + class TestTool(WalletBaseTool): + async def _arun(self, *args, **kwargs): + return "test" + + tool = TestTool( + name="test_tool", + description="Test tool", + args_schema=MagicMock(), + api_key="test_key", + skill_store=self.mock_skill_store, + agent_id="test_agent", + ) + + self.assertEqual(tool.api_key, "test_key") + self.assertEqual(tool.agent_id, "test_agent") + self.assertEqual(tool.skill_store, self.mock_skill_store) + self.assertEqual(tool.category, "moralis") + + def test_get_chain_name(self): + """Test chain name conversion.""" + + class TestTool(WalletBaseTool): + async def _arun(self, *args, **kwargs): + return "test" + + tool = TestTool( + name="test_tool", + description="Test tool", + args_schema=MagicMock(), + api_key="test_key", + skill_store=self.mock_skill_store, + agent_id="test_agent", + ) + + # Test with known chain IDs + self.assertEqual(tool._get_chain_name(1), "eth") + self.assertEqual(tool._get_chain_name(56), "bsc") + self.assertEqual(tool._get_chain_name(137), "polygon") + + # Test with unknown chain ID + self.assertEqual(tool._get_chain_name(999999), "eth") + + +class TestAPIFunctions(unittest.IsolatedAsyncioTestCase): + """Test the API interaction functions.""" + + async def test_fetch_moralis_data(self): + """Test the base Moralis API function.""" + with patch("httpx.AsyncClient") as MockClient: + client_instance = AsyncMock() + client_instance.get.return_value = DummyResponse( + 200, {"success": True, "data": "test_data"} + ) + MockClient.return_value.__aenter__.return_value = client_instance + + result = await fetch_moralis_data( + "test_api_key", "test_endpoint", "0xAddress", 1 + ) + + self.assertEqual(result, {"success": True, "data": "test_data"}) + + # Test error handling + client_instance.get.return_value = DummyResponse(404, None) + client_instance.get.return_value.raise_for_status = AsyncMock( + side_effect=Exception("HTTP error 404") + ) + + result = await fetch_moralis_data( + "test_api_key", "test_endpoint", "0xAddress", 1 + ) + self.assertIn("error", result) + + async def test_fetch_wallet_balances(self): + """Test fetching wallet balances.""" + with patch("skills.moralis.api.fetch_moralis_data") as mock_fetch: + mock_fetch.return_value = { + "result": [ + { + "token_address": "0x123", + "symbol": "TEST", + "balance": "1000000", + "usd_value": 100, + } + ] + } + + result = await fetch_wallet_balances("test_api_key", "0xAddress", 1) + + self.assertEqual(result["result"][0]["symbol"], "TEST") + mock_fetch.assert_called_once_with( + "test_api_key", "wallets/{address}/tokens", "0xAddress", 1, None + ) + + async def test_get_solana_portfolio(self): + """Test getting Solana portfolio.""" + with patch("skills.moralis.api.fetch_solana_api") as mock_fetch: + mock_fetch.return_value = { + "nativeBalance": {"solana": 1.5, "lamports": 1500000000}, + "tokens": [ + { + "symbol": "TEST", + "name": "Test Token", + "mint": "TokenMintAddress", + "associatedTokenAddress": "AssocTokenAddress", + "amount": 10, + "decimals": 9, + "amountRaw": "10000000000", + } + ], + } + + result = await get_solana_portfolio("test_api_key", "SolAddress", "mainnet") + + mock_fetch.assert_called_once_with( + "test_api_key", "/account/mainnet/SolAddress/portfolio" + ) + self.assertEqual(result["nativeBalance"]["solana"], 1.5) + self.assertEqual(len(result["tokens"]), 1) + self.assertEqual(result["tokens"][0]["symbol"], "TEST") + + +class TestFetchWalletPortfolio(unittest.IsolatedAsyncioTestCase): + """Test the FetchWalletPortfolio skill.""" + + async def test_wallet_portfolio_success(self): + """Test successful wallet portfolio fetch.""" + mock_skill_store = MagicMock() + + with ( + patch( + "skills.moralis.moralis_fetch_wallet_portfolio.fetch_wallet_balances" + ) as mock_balances, + patch( + "skills.moralis.moralis_fetch_wallet_portfolio.fetch_net_worth" + ) as mock_net_worth, + ): + # Mock successful responses + mock_balances.return_value = { + "result": [ + { + "token_address": "0x123", + "symbol": "TEST", + "name": "Test Token", + "balance": "1000000000000000000", + "balance_formatted": "1.0", + "usd_value": 100, + } + ] + } + mock_net_worth.return_value = {"result": {"total_networth_usd": 1000}} + + tool = FetchWalletPortfolio( + name="fetch_wallet_portfolio", + description="Test description", + args_schema=MagicMock(), + api_key="test_key", + skill_store=mock_skill_store, + agent_id="test_agent", + ) + + result = await tool._arun(address="0xAddress") + + self.assertEqual(result.address, "0xAddress") + self.assertEqual(result.total_net_worth, 1000) + self.assertEqual(len(result.tokens), 1) + self.assertEqual(result.tokens[0].symbol, "TEST") + + async def test_wallet_portfolio_with_solana(self): + """Test wallet portfolio with Solana support.""" + mock_skill_store = MagicMock() + + with ( + patch( + "skills.moralis.moralis_fetch_wallet_portfolio.fetch_wallet_balances" + ) as mock_evm_balances, + patch( + "skills.moralis.moralis_fetch_wallet_portfolio.fetch_net_worth" + ) as mock_net_worth, + patch( + "skills.moralis.moralis_fetch_wallet_portfolio.get_solana_portfolio" + ) as mock_sol_portfolio, + patch( + "skills.moralis.moralis_fetch_wallet_portfolio.get_token_price" + ) as mock_token_price, + ): + # Mock EVM responses + mock_evm_balances.return_value = { + "result": [ + { + "token_address": "0x123", + "symbol": "ETH", + "name": "Ethereum", + "balance": "1000000000000000000", + "balance_formatted": "1.0", + "usd_value": 2000, + } + ] + } + mock_net_worth.return_value = {"result": {"total_networth_usd": 3000}} + + # Mock Solana responses + mock_sol_portfolio.return_value = { + "nativeBalance": {"solana": 2.0, "lamports": 2000000000}, + "tokens": [ + { + "symbol": "SOL", + "name": "Solana", + "mint": "So11111111111111111111111111111111111111112", + "associatedTokenAddress": "AssocTokenAddress", + "amount": 2.0, + "decimals": 9, + "amountRaw": "2000000000", + } + ], + } + + mock_token_price.return_value = {"usdPrice": 500} + + tool = FetchWalletPortfolio( + name="fetch_wallet_portfolio", + description="Test description", + args_schema=MagicMock(), + api_key="test_key", + skill_store=mock_skill_store, + agent_id="test_agent", + ) + + result = await tool._arun(address="0xAddress", include_solana=True) + + self.assertEqual(result.address, "0xAddress") + self.assertEqual( + result.total_net_worth, 3000 + ) # Using the net worth from mock + self.assertIn("eth", result.chains) + self.assertIn("solana", result.chains) + + # Check that we have both EVM and Solana tokens + token_symbols = [token.symbol for token in result.tokens] + self.assertIn("ETH", token_symbols) + self.assertIn("SOL", token_symbols) + + +class TestFetchSolanaPortfolio(unittest.IsolatedAsyncioTestCase): + """Test the FetchSolanaPortfolio skill.""" + + async def test_solana_portfolio_success(self): + """Test successful Solana portfolio fetch.""" + mock_skill_store = MagicMock() + + with ( + patch( + "skills.moralis.moralis_fetch_solana_portfolio.get_solana_portfolio" + ) as mock_portfolio, + patch( + "skills.moralis.moralis_fetch_solana_portfolio.get_solana_nfts" + ) as mock_nfts, + patch( + "skills.moralis.moralis_fetch_solana_portfolio.get_token_price" + ) as mock_token_price, + ): + # Mock successful responses + mock_portfolio.return_value = { + "nativeBalance": {"solana": 1.5, "lamports": 1500000000}, + "tokens": [ + { + "symbol": "TEST", + "name": "Test Token", + "mint": "TokenMintAddress", + "associatedTokenAddress": "AssocTokenAddress", + "amount": 10, + "decimals": 9, + "amountRaw": "10000000000", + } + ], + } + + mock_nfts.return_value = [ + { + "mint": "NFTMintAddress", + "name": "Test NFT", + "symbol": "TNFT", + "associatedTokenAddress": "AssocTokenAddress", + "metadata": {"name": "Test NFT", "image": "image.png"}, + } + ] + + mock_token_price.return_value = {"usdPrice": 25} + + tool = FetchSolanaPortfolio( + name="fetch_solana_portfolio", + description="Test description", + args_schema=MagicMock(), + api_key="test_key", + skill_store=mock_skill_store, + agent_id="test_agent", + ) + + result = await tool._arun(address="SolanaAddress", include_nfts=True) + + self.assertEqual(result.address, "SolanaAddress") + self.assertEqual(result.sol_balance, 1.5) + self.assertEqual(len(result.tokens), 1) + self.assertEqual(result.tokens[0].token_info.symbol, "TEST") + self.assertEqual(len(result.nfts), 1) + self.assertEqual(result.nfts[0].name, "Test NFT") + self.assertEqual(result.sol_price_usd, 25) + self.assertEqual(result.sol_value_usd, 37.5) # 1.5 SOL * $25 + + +class TestFetchChainPortfolio(unittest.IsolatedAsyncioTestCase): + """Test the FetchChainPortfolio skill.""" + + async def test_chain_portfolio_success(self): + """Test successful chain portfolio fetch.""" + mock_skill_store = MagicMock() + + with patch( + "skills.moralis.moralis_fetch_chain_portfolio.fetch_wallet_balances" + ) as mock_balances: + # Mock successful responses + mock_balances.return_value = { + "result": [ + { + "token_address": "0x123", + "symbol": "ETH", + "name": "Ethereum", + "logo": "logo.png", + "decimals": 18, + "balance": "1000000000000000000", + "balance_formatted": "1.0", + "usd_value": 2000, + "native_token": True, + }, + { + "token_address": "0x456", + "symbol": "TOKEN", + "name": "Test Token", + "logo": "logo2.png", + "decimals": 18, + "balance": "2000000000000000000", + "balance_formatted": "2.0", + "usd_value": 200, + "native_token": False, + }, + ] + } + + tool = FetchChainPortfolio( + name="fetch_chain_portfolio", + description="Test description", + args_schema=MagicMock(), + api_key="test_key", + skill_store=mock_skill_store, + agent_id="test_agent", + ) + + result = await tool._arun(address="0xAddress", chain_id=1) + + self.assertEqual(result.address, "0xAddress") + self.assertEqual(result.chain_id, 1) + self.assertEqual(result.chain_name, "eth") + self.assertEqual(result.total_usd_value, 2200) # 2000 + 200 + self.assertEqual(len(result.tokens), 1) # Regular tokens, not native + self.assertIsNotNone(result.native_token) + self.assertEqual(result.native_token.symbol, "ETH") + self.assertEqual(result.tokens[0].symbol, "TOKEN") + + +class TestSkillInitialization(unittest.TestCase): + """Test skill initialization and configuration.""" + + def setUp(self): + self.mock_skill_store = MagicMock() + + def test_get_skills(self): + """Test getting multiple skills from config.""" + config = { + "api_key": "test_api_key", + "states": { + "fetch_wallet_portfolio": "public", + "fetch_chain_portfolio": "public", + "fetch_nft_portfolio": "private", + "fetch_transaction_history": "private", + "fetch_solana_portfolio": "public", + }, + "supported_chains": {"evm": True, "solana": True}, + } + + # Test with mock implementation + with patch("skills.moralis.base.WalletBaseTool") as mock_tool: + mock_tool.return_value = MagicMock() + + # This is just a test structure - actual implementation would create the skills + skills = get_skills( + config, + is_private=False, # Only get public skills + skill_store=self.mock_skill_store, + agent_id="test_agent", + ) + + # In a real implementation, we'd test that the correct skills were returned + # For now, we just verify the function exists + self.assertIsNotNone(skills) + + +class TestIntegration(unittest.TestCase): + """Integration tests for wallet skills.""" + + def test_wallet_skill_configuration(self): + """Test wallet skill configuration in agent config.""" + # Example agent configuration + agent_config = { + "id": "crypto-agent", + "skills": { + "moralis": { + "api_key": "test_api_key", + "states": { + "fetch_wallet_portfolio": "public", + "fetch_chain_portfolio": "public", + "fetch_nft_portfolio": "private", + "fetch_transaction_history": "private", + "fetch_solana_portfolio": "public", + }, + "supported_chains": {"evm": True, "solana": True}, + } + }, + } + + # Verify the configuration structure is valid + moralis_config = agent_config["skills"]["moralis"] + self.assertIn("api_key", moralis_config) + self.assertIn("states", moralis_config) + self.assertIn("supported_chains", moralis_config) + + # Check that all required skills are configured + states = moralis_config["states"] + required_skills = [ + "fetch_wallet_portfolio", + "fetch_chain_portfolio", + "fetch_nft_portfolio", + "fetch_transaction_history", + "fetch_solana_portfolio", + ] + + for skill in required_skills: + self.assertIn(skill, states) + self.assertIn(states[skill], ["public", "private", "disabled"]) + + # Check chain configuration + chains = moralis_config["supported_chains"] + self.assertIn("evm", chains) + self.assertIn("solana", chains) + self.assertTrue(isinstance(chains["evm"], bool)) + self.assertTrue(isinstance(chains["solana"], bool)) + + +if __name__ == "__main__": + unittest.main() diff --git a/intentkit/skills/nation/__init__.py b/intentkit/skills/nation/__init__.py new file mode 100644 index 00000000..f276b384 --- /dev/null +++ b/intentkit/skills/nation/__init__.py @@ -0,0 +1,62 @@ +import logging +from typing import Optional, TypedDict + +from intentkit.abstracts.skill import SkillStoreABC +from intentkit.skills.base import SkillConfig, SkillState +from intentkit.skills.nation.base import NationBaseTool +from intentkit.skills.nation.nft_check import NftCheck + +logger = logging.getLogger(__name__) + +# Cache skills at the system level, because they are stateless +_cache: dict[str, NationBaseTool] = {} + + +class SkillStates(TypedDict): + nft_check: SkillState + + +class Config(SkillConfig): + """Configuration for nation skills.""" + + states: SkillStates + + +async def get_skills( + config: "Config", + is_private: bool, + store: SkillStoreABC, + **_, +) -> list[NationBaseTool]: + """Get all nation skills.""" + available_skills = [] + + # Include skills based on their state + for skill_name, state in config["states"].items(): + if state == "disabled": + continue + elif state == "public" or (state == "private" and is_private): + available_skills.append(skill_name) + + # Get each skill using the cached getter + return [ + skill + for name in available_skills + if (skill := get_nation_skill(name, store)) is not None + ] + + +def get_nation_skill( + name: str, + store: SkillStoreABC, +) -> Optional[NationBaseTool]: + """Get a nation skill by name.""" + if name == "nft_check": + if name not in _cache: + _cache[name] = NftCheck( + skill_store=store, + ) + return _cache[name] + else: + logger.error(f"Unknown Nation skill: {name}") + return None diff --git a/intentkit/skills/nation/base.py b/intentkit/skills/nation/base.py new file mode 100644 index 00000000..1c837963 --- /dev/null +++ b/intentkit/skills/nation/base.py @@ -0,0 +1,31 @@ +from typing import Type + +from pydantic import BaseModel, Field + +from intentkit.abstracts.skill import SkillStoreABC +from intentkit.skills.base import IntentKitSkill + +default_nation_api_url = "http://backend-api" + + +class NationBaseTool(IntentKitSkill): + """Base class for GitHub tools.""" + + name: str = Field(description="The name of the tool") + description: str = Field(description="A description of what the tool does") + args_schema: Type[BaseModel] + skill_store: SkillStoreABC = Field( + description="The skill store for persisting data" + ) + + def get_api_key(self) -> str: + return self.skill_store.get_system_config("nation_api_key") + + def get_base_url(self) -> str: + if self.skill_store.get_system_config("nation_api_url"): + return self.skill_store.get_system_config("nation_api_url") + return default_nation_api_url + + @property + def category(self) -> str: + return "nation" diff --git a/intentkit/skills/nation/nation.png b/intentkit/skills/nation/nation.png new file mode 100644 index 00000000..ab26384c Binary files /dev/null and b/intentkit/skills/nation/nation.png differ diff --git a/intentkit/skills/nation/nft_check.py b/intentkit/skills/nation/nft_check.py new file mode 100644 index 00000000..620d3e78 --- /dev/null +++ b/intentkit/skills/nation/nft_check.py @@ -0,0 +1,103 @@ +import logging +from typing import Optional, Type + +import httpx +from eth_utils import is_address +from pydantic import BaseModel, Field + +from .base import NationBaseTool + +logger = logging.getLogger(__name__) + + +class NftCheckInput(BaseModel): + nation_wallet_address: Optional[str] = Field( + default=None, description="Nation wallet address" + ) + + +class NftCheck(NationBaseTool): + name: str = "nft_check" + description: str = "Check user nation pass NFTs stats in nation, including usage status and linked agents.By default, it will use the user_id as the wallet address. If you want to check other wallet address, please pass the nation_wallet_address parameter." + args_schema: Type[BaseModel] = NftCheckInput + + async def _arun(self, nation_wallet_address: Optional[str] = None) -> str: + """Implementation of the NFT Check tool. + + Args: + nation_wallet_address: The wallet address of the nation (optional), if not passed, then get user_id from chat as wallet address. + config: Configuration for the runnable. + + Returns: + str: Formatted NFT check results based on the nation wallet address. + """ + + context = self.get_context() + logger.debug(f"nft_check.py: Running NFT check with context {context}") + + # Use the provided nation_wallet_address or fetch it from the context + if not nation_wallet_address: + nation_wallet_address = context.user_id + if not nation_wallet_address: + raise ValueError( + "Nation wallet address is not provided and not found in context" + ) + + # Validate the normalized address + if not is_address(nation_wallet_address): + raise ValueError( + f"Invalid Ethereum wallet address: {nation_wallet_address}" + ) + + url = f"{self.get_base_url()}/v1/users/{nation_wallet_address}" + + api_key = self.get_api_key() + + if not api_key: + raise ValueError("Backend API key not found") + + headers = {"Accept": "application/json", "x-api-key": api_key} + + try: + async with httpx.AsyncClient(timeout=30.0) as client: + response = await client.get(url, headers=headers) + + if response.status_code != 200: + logger.error( + f"nft_check.py: Error from API: {response.status_code} - {response.text}" + ) + return f"Error fetching NFT data: {response.status_code} - {response.text}" + + data = response.json() + nfts = data.get("nfts", []) + + if not nfts: + return f"No NFTs found for wallet address: {nation_wallet_address}" + + # Format the NFT data + formatted_results = ( + f"NFTs for wallet address '{nation_wallet_address}':\n\n" + ) + + for i, nft in enumerate(nfts, 1): + token_id = nft.get("token_id", "Unknown") + used_by = nft.get("used_by", None) + linked_agent_id = nft.get("linked_agent_id", "None") + + formatted_results += f"{i}. Token ID: {token_id}\n" + if used_by: + formatted_results += ( + f" Status: Used by Agent ID {linked_agent_id}\n" + ) + else: + formatted_results += " Status: Available\n" + formatted_results += "\n" + + return formatted_results.strip() + + except httpx.TimeoutException: + logger.error("nft_check.py: Request timed out") + return "The request to the NFT API timed out. Please try again later." + except Exception as e: + logger.error(f"nft_check.py: Error fetching NFT data: {e}", exc_info=True) + return "An error occurred while fetching NFT data. Please try again later." diff --git a/intentkit/skills/nation/schema.json b/intentkit/skills/nation/schema.json new file mode 100644 index 00000000..1418d9ef --- /dev/null +++ b/intentkit/skills/nation/schema.json @@ -0,0 +1,58 @@ +{ + "$schema": "http://json-schema.org/draft-07/schema#", + "type": "object", + "title": "Nation", + "description": "Check nation NFT stats", + "x-icon": "https://ai.service.crestal.dev/skills/nation/nation.png", + "x-tags": [ + "Nation", + "NFTChecker" + ], + "properties": { + "enabled": { + "type": "boolean", + "title": "Enabled", + "description": "Whether this skill is enabled", + "default": false + }, + "states": { + "type": "object", + "properties": { + "nft_check": { + "type": "string", + "title": "Nation NFT Usage Check", + "enum": [ + "disabled", + "public", + "private" + ], + "x-enum-title": [ + "Disabled", + "Agent Owner + All Users", + "Agent Owner Only" + ], + "description": "Check User Nation NFT Usage", + "default": "disabled" + } + }, + "description": "States for each Nation skill" + }, + "api_key_provider": { + "type": "string", + "title": "API Key Provider", + "description": "Who provides the API key", + "enum": [ + "platform" + ], + "x-enum-title": [ + "Nation Hosted" + ], + "default": "platform" + } + }, + "required": [ + "states", + "enabled" + ], + "additionalProperties": true +} \ No newline at end of file diff --git a/intentkit/skills/openai/__init__.py b/intentkit/skills/openai/__init__.py new file mode 100644 index 00000000..ae535294 --- /dev/null +++ b/intentkit/skills/openai/__init__.py @@ -0,0 +1,107 @@ +"""OpenAI skills.""" + +import logging +from typing import TypedDict + +from intentkit.abstracts.skill import SkillStoreABC +from intentkit.skills.base import SkillConfig, SkillState +from intentkit.skills.openai.base import OpenAIBaseTool +from intentkit.skills.openai.dalle_image_generation import DALLEImageGeneration +from intentkit.skills.openai.gpt_image_generation import GPTImageGeneration +from intentkit.skills.openai.gpt_image_to_image import GPTImageToImage +from intentkit.skills.openai.image_to_text import ImageToText + +# Cache skills at the system level, because they are stateless +_cache: dict[str, OpenAIBaseTool] = {} + +logger = logging.getLogger(__name__) + + +class SkillStates(TypedDict): + image_to_text: SkillState + dalle_image_generation: SkillState + gpt_image_generation: SkillState + gpt_image_to_image: SkillState + + +class Config(SkillConfig): + """Configuration for OpenAI skills.""" + + states: SkillStates + api_key: str + + +async def get_skills( + config: "Config", + is_private: bool, + store: SkillStoreABC, + **_, +) -> list[OpenAIBaseTool]: + """Get all OpenAI skills. + + Args: + config: The configuration for OpenAI skills. + is_private: Whether to include private skills. + store: The skill store for persisting data. + + Returns: + A list of OpenAI skills. + """ + available_skills = [] + + # Include skills based on their state + for skill_name, state in config["states"].items(): + if state == "disabled": + continue + elif state == "public" or (state == "private" and is_private): + available_skills.append(skill_name) + + # Get each skill using the cached getter + result = [] + for name in available_skills: + skill = get_openai_skill(name, store) + if skill: + result.append(skill) + return result + + +def get_openai_skill( + name: str, + store: SkillStoreABC, +) -> OpenAIBaseTool: + """Get an OpenAI skill by name. + + Args: + name: The name of the skill to get + store: The skill store for persisting data + + Returns: + The requested OpenAI skill + """ + if name == "image_to_text": + if name not in _cache: + _cache[name] = ImageToText( + skill_store=store, + ) + return _cache[name] + elif name == "dalle_image_generation": + if name not in _cache: + _cache[name] = DALLEImageGeneration( + skill_store=store, + ) + return _cache[name] + elif name == "gpt_image_generation": + if name not in _cache: + _cache[name] = GPTImageGeneration( + skill_store=store, + ) + return _cache[name] + elif name == "gpt_image_to_image": + if name not in _cache: + _cache[name] = GPTImageToImage( + skill_store=store, + ) + return _cache[name] + else: + logger.warning(f"Unknown OpenAI skill: {name}") + return None diff --git a/intentkit/skills/openai/base.py b/intentkit/skills/openai/base.py new file mode 100644 index 00000000..577f05ee --- /dev/null +++ b/intentkit/skills/openai/base.py @@ -0,0 +1,41 @@ +"""Base class for OpenAI skills.""" + +from typing import Type + +from langchain.tools.base import ToolException +from pydantic import BaseModel, Field + +from intentkit.abstracts.skill import SkillStoreABC +from intentkit.skills.base import IntentKitSkill + + +class OpenAIBaseTool(IntentKitSkill): + """Base class for all OpenAI skills. + + This class provides common functionality for all OpenAI skills. + """ + + name: str = Field(description="The name of the tool") + description: str = Field(description="A description of what the tool does") + args_schema: Type[BaseModel] + skill_store: SkillStoreABC = Field( + description="The skill store for persisting data" + ) + + def get_api_key(self) -> str: + context = self.get_context() + skill_config = context.agent.skill_config(self.category) + api_key_provider = skill_config.get("api_key_provider") + if api_key_provider == "platform": + return self.skill_store.get_system_config("openai_api_key") + # for backward compatibility, may only have api_key in skill_config + elif skill_config.get("api_key"): + return skill_config.get("api_key") + else: + raise ToolException( + f"Invalid API key provider: {api_key_provider}, or no api_key in config" + ) + + @property + def category(self) -> str: + return "openai" diff --git a/intentkit/skills/openai/dalle_image_generation.py b/intentkit/skills/openai/dalle_image_generation.py new file mode 100644 index 00000000..a234d282 --- /dev/null +++ b/intentkit/skills/openai/dalle_image_generation.py @@ -0,0 +1,129 @@ +"""DALL-E image generation skill for OpenAI.""" + +import logging +from typing import Type + +import openai +from epyxid import XID +from pydantic import BaseModel, Field + +from intentkit.skills.openai.base import OpenAIBaseTool +from intentkit.utils.s3 import store_image + +logger = logging.getLogger(__name__) + + +class DALLEImageGenerationInput(BaseModel): + """Input for DALLEImageGeneration tool.""" + + prompt: str = Field( + description="Text prompt describing the image to generate.", + ) + size: str = Field( + default="1024x1024", + description="Size of the generated image. Options: 1024x1024, 1024x1792, 1792x1024", + ) + quality: str = Field( + default="hd", + description="Quality of the generated image. Options: standard, hd", + ) + style: str = Field( + default="vivid", + description="Style of the generated image. Options: vivid, natural", + ) + + +class DALLEImageGeneration(OpenAIBaseTool): + """Tool for generating high-quality images using OpenAI's DALL-E 3 model. + + This tool takes a text prompt and uses OpenAI's API to generate + an image based on the description using the DALL-E 3 model. + + Attributes: + name: The name of the tool. + description: A description of what the tool does. + args_schema: The schema for the tool's input arguments. + """ + + name: str = "dalle_image_generation" + description: str = ( + "Generate images using OpenAI's DALL-E 3 model.\n" + "Provide a text prompt describing the image you want to generate.\n" + "DALL-E 3 is a powerful image generation model capable of creating detailed, " + "high-quality images from text descriptions.\n" + "You can specify size, quality, and style parameters for more control.\n" + ) + args_schema: Type[BaseModel] = DALLEImageGenerationInput + + async def _arun( + self, + prompt: str, + size: str = "1024x1024", + quality: str = "hd", + style: str = "vivid", + **kwargs, + ) -> str: + """Implementation of the tool to generate images using OpenAI's DALL-E 3 model. + + Args: + prompt: Text prompt describing the image to generate. + size: Size of the generated image. Options: 1024x1024, 1024x1792, 1792x1024 + quality: Quality of the generated image. Options: standard, hd + style: Style of the generated image. Options: vivid, natural + + + Returns: + str: URL of the generated image. + + Raises: + Exception: If the image generation fails. + """ + context = self.get_context() + skill_config = context.agent.skill_config(self.category) + + # Get the OpenAI API key from the skill store + api_key = skill_config.get("api_key") or self.skill_store.get_system_config( + "openai_api_key" + ) + + # Generate a unique job ID + job_id = str(XID()) + + try: + # Initialize the OpenAI client + client = openai.OpenAI(api_key=api_key) + + # Make the API request to generate the image + response = client.images.generate( + model="dall-e-3", + prompt=prompt, + size=size, + quality=quality, + style=style, + n=1, + ) + + # Get the image URL from the response + image_url = response.data[0].url + + # Strip potential double quotes from the response + image_url = image_url.strip('"') + + # Generate a key with agent ID as prefix + image_key = f"{context.agent_id}/dalle/{job_id}" + + # Store the image and get the CDN URL + stored_url = await store_image(image_url, image_key) + + # Return the stored image URL + return stored_url + + except openai.OpenAIError as e: + error_message = f"OpenAI API error: {str(e)}" + logger.error(error_message) + raise Exception(error_message) + + except Exception as e: + error_message = f"Error generating image with DALL-E: {str(e)}" + logger.error(error_message) + raise Exception(error_message) diff --git a/intentkit/skills/openai/gpt_image_generation.py b/intentkit/skills/openai/gpt_image_generation.py new file mode 100644 index 00000000..29895d42 --- /dev/null +++ b/intentkit/skills/openai/gpt_image_generation.py @@ -0,0 +1,153 @@ +"""GPT image generation skill for OpenAI.""" + +import base64 +import logging +from typing import Literal, Type + +import openai +from epyxid import XID +from pydantic import BaseModel, Field + +from intentkit.skills.openai.base import OpenAIBaseTool +from intentkit.utils.s3 import store_image_bytes + +logger = logging.getLogger(__name__) + + +class GPTImageGenerationInput(BaseModel): + """Input for GPTImageGeneration tool.""" + + prompt: str = Field( + description="Text prompt describing the image to generate.", + ) + size: Literal["1024x1024", "1536x1024", "1024x1536", "auto"] = Field( + default="auto", + description="Size of the generated image. Options: 1024x1024, 1536x1024, 1024x1536, auto", + ) + quality: Literal["high", "medium", "low", "auto"] = Field( + default="auto", + description="Quality of the generated image. Options: high, medium, low, auto", + ) + background: Literal["transparent", "opaque", "auto"] = Field( + default="auto", + description="Background transparency. Options: transparent, opaque, auto", + ) + + +class GPTImageGeneration(OpenAIBaseTool): + """Tool for generating high-quality images using OpenAI's GPT-Image-1 model. + + This tool takes a text prompt and uses OpenAI's API to generate + an image based on the description using the GPT-Image-1 model. + + Attributes: + name: The name of the tool. + description: A description of what the tool does. + args_schema: The schema for the tool's input arguments. + """ + + name: str = "gpt_image_generation" + description: str = ( + "Generate images using OpenAI's GPT-Image-1 model.\n" + "Provide a text prompt describing the image you want to generate.\n" + "GPT-Image-1 is a powerful image generation model capable of creating detailed, " + "high-quality images from text descriptions.\n" + "You can specify size, quality, and background parameters for more control.\n" + ) + args_schema: Type[BaseModel] = GPTImageGenerationInput + + async def _arun( + self, + prompt: str, + size: Literal["1024x1024", "1536x1024", "1024x1536", "auto"] = "auto", + quality: Literal["high", "medium", "low", "auto"] = "auto", + background: Literal["transparent", "opaque", "auto"] = "auto", + **kwargs, + ) -> str: + """Implementation of the tool to generate images using OpenAI's GPT-Image-1 model. + + Args: + prompt: Text prompt describing the image to generate. + size: Size of the generated image. Options: 1024x1024, 1536x1024, 1024x1536, auto + quality: Quality of the generated image. Options: high, medium, low, auto + background: Background transparency. Options: transparent, opaque, auto + + + Returns: + str: URL of the generated image. + + Raises: + Exception: If the image generation fails. + """ + context = self.get_context() + skill_config = context.agent.skill_config(self.category) + + # Get the OpenAI API key from the skill store + api_key = skill_config.get("api_key") or self.skill_store.get_system_config( + "openai_api_key" + ) + + # Generate a unique job ID + job_id = str(XID()) + + try: + # Initialize the OpenAI client + client = openai.OpenAI(api_key=api_key) + + # Determine content type based on background setting + content_type = "image/png" if background == "transparent" else "image/jpeg" + + # Make the API request to generate the image + response = client.images.generate( + model="gpt-image-1", + prompt=prompt, + size=size, + quality=quality, + background=background, + moderation="low", # Using low moderation as specified + n=1, + ) + + # GPT-Image-1 always returns base64-encoded images + # Get the base64 image data from the response + base64_image = response.data[0].b64_json + + # Log the usage information if available + if hasattr(response, "usage") and response.usage: + usage = response.usage + logger.info( + f"GPT-Image-1 generation usage: " + f"input_tokens={usage.input_tokens}, " + f"output_tokens={usage.output_tokens}, " + f"total_tokens={usage.total_tokens}" + ) + + # Log detailed input tokens information if available + if ( + hasattr(usage, "input_tokens_details") + and usage.input_tokens_details + ): + details = usage.input_tokens_details + logger.info(f"Input tokens details: {details}") + + # Decode the base64 string to bytes + image_bytes = base64.b64decode(base64_image) + + # Generate a key with agent ID as prefix + image_key = f"{context.agent_id}/gpt-image/{job_id}" + + # Store the image bytes and get the CDN URL + stored_url = await store_image_bytes(image_bytes, image_key, content_type) + + # Return the stored image URL + return stored_url + + except openai.OpenAIError as e: + error_message = f"OpenAI API error: {str(e)}" + logger.error(error_message) + raise Exception(error_message) + + except Exception as e: + error_message = f"Error generating image with GPT-Image-1: {str(e)}" + logger.error(error_message) + raise Exception(error_message) diff --git a/intentkit/skills/openai/gpt_image_to_image.py b/intentkit/skills/openai/gpt_image_to_image.py new file mode 100644 index 00000000..de805cc9 --- /dev/null +++ b/intentkit/skills/openai/gpt_image_to_image.py @@ -0,0 +1,187 @@ +"""GPT image-to-image generation skill for OpenAI.""" + +import base64 +import logging +from io import BytesIO +from typing import Literal, Type + +import httpx +import openai +from epyxid import XID +from pydantic import BaseModel, Field + +from intentkit.skills.openai.base import OpenAIBaseTool +from intentkit.utils.s3 import store_image_bytes + +logger = logging.getLogger(__name__) + + +class GPTImageToImageInput(BaseModel): + """Input for GPTImageToImage tool.""" + + image_url: str = Field( + description="URL of the source image to edit.", + ) + prompt: str = Field( + description="Text prompt describing the desired edits to the image.", + ) + size: Literal["1024x1024", "1536x1024", "1024x1536", "auto"] = Field( + default="auto", + description="Size of the generated image. Options: 1024x1024, 1536x1024, 1024x1536, auto", + ) + quality: Literal["high", "medium", "low", "auto"] = Field( + default="auto", + description="Quality of the generated image. Options: high, medium, low, auto", + ) + + +class GPTImageToImage(OpenAIBaseTool): + """Tool for editing images using OpenAI's GPT-Image-1 model. + + This tool takes a source image URL and a text prompt, then uses OpenAI's API to + generate an edited version of the image based on the description. + + Attributes: + name: The name of the tool. + description: A description of what the tool does. + args_schema: The schema for the tool's input arguments. + """ + + name: str = "gpt_image_to_image" + description: str = ( + "Edit images using OpenAI's GPT-Image-1 model.\n" + "Provide a source image URL and a text prompt describing the desired edits.\n" + "GPT-Image-1 is a powerful image editing model capable of transforming images " + "based on text descriptions.\n" + "You can specify size and quality parameters for more control.\n" + ) + args_schema: Type[BaseModel] = GPTImageToImageInput + + async def _arun( + self, + image_url: str, + prompt: str, + size: Literal["1024x1024", "1536x1024", "1024x1536", "auto"] = "auto", + quality: Literal["high", "medium", "low", "auto"] = "auto", + **kwargs, + ) -> str: + """Implementation of the tool to edit images using OpenAI's GPT-Image-1 model. + + Args: + image_url: URL of the source image to edit. + prompt: Text prompt describing the desired edits to the image. + size: Size of the generated image. Options: 1024x1024, 1536x1024, 1024x1536, auto + quality: Quality of the generated image. Options: high, medium, low, auto + + + Returns: + str: URL of the edited image. + + Raises: + Exception: If the image editing fails. + """ + context = self.get_context() + skill_config = context.agent.skill_config(self.category) + + # Get the OpenAI API key from the skill store + api_key = skill_config.get("api_key") or self.skill_store.get_system_config( + "openai_api_key" + ) + + # Generate a unique job ID + job_id = str(XID()) + + try: + # Download the image from the URL asynchronously + async with httpx.AsyncClient() as client: + response = await client.get(image_url, follow_redirects=True) + response.raise_for_status() + image_data = response.content + + # Initialize the OpenAI client + client = openai.OpenAI(api_key=api_key) + + # Import required modules for file handling + import os + import tempfile + + from PIL import Image + + # Create a temporary file with .png extension + with tempfile.NamedTemporaryFile(suffix=".png", delete=False) as temp_file: + temp_path = temp_file.name + + # Open the image, convert to RGB if needed, and save as PNG + img = Image.open(BytesIO(image_data)) + if img.mode != "RGB": + img = img.convert("RGB") + img.save(temp_path, format="PNG") + + # Open the temporary file in binary read mode + # This provides both .read() method and .name attribute that OpenAI SDK needs + image_file = open(temp_path, "rb") + + # Make the API request to edit the image + try: + response = client.images.edit( + model="gpt-image-1", + image=image_file, # Use the file object with .read() method and .name attribute + prompt=prompt, + size=size, + quality=quality, + n=1, + ) + + # GPT-Image-1 always returns base64-encoded images + # Get the base64 image data from the response + base64_image = response.data[0].b64_json + + # Log the usage information if available + if hasattr(response, "usage") and response.usage: + usage = response.usage + logger.info( + f"GPT-Image-1 edit usage: " + f"input_tokens={usage.input_tokens}, " + f"output_tokens={usage.output_tokens}, " + f"total_tokens={usage.total_tokens}" + ) + + # Log detailed input tokens information if available + if ( + hasattr(usage, "input_tokens_details") + and usage.input_tokens_details + ): + details = usage.input_tokens_details + logger.info(f"Input tokens details: {details}") + + # Decode the base64 string to bytes + image_bytes = base64.b64decode(base64_image) + + # Generate a key with agent ID as prefix + image_key = f"{context.agent_id}/gpt-image-edit/{job_id}" + + # Store the image bytes and get the CDN URL + stored_url = await store_image_bytes(image_bytes, image_key) + finally: + # Close and remove the temporary file + image_file.close() + if os.path.exists(temp_path): + os.unlink(temp_path) + + # Return the stored image URL + return stored_url + + except httpx.HTTPError as e: + error_message = f"Failed to download image from URL {image_url}: {str(e)}" + logger.error(error_message) + raise Exception(error_message) + + except openai.OpenAIError as e: + error_message = f"OpenAI API error: {str(e)}" + logger.error(error_message) + raise Exception(error_message) + + except Exception as e: + error_message = f"Error editing image with GPT-Image-1: {str(e)}" + logger.error(error_message) + raise Exception(error_message) diff --git a/intentkit/skills/openai/image_to_text.py b/intentkit/skills/openai/image_to_text.py new file mode 100644 index 00000000..16688aa0 --- /dev/null +++ b/intentkit/skills/openai/image_to_text.py @@ -0,0 +1,126 @@ +import io +import logging +from typing import Type + +import aiohttp +import openai +from PIL import Image +from pydantic import BaseModel, Field + +from intentkit.skills.openai.base import OpenAIBaseTool + +logger = logging.getLogger(__name__) + + +class ImageToTextInput(BaseModel): + """Input for ImageToText tool.""" + + image: str = Field( + description="URL of the image to convert to text.", + ) + + +class ImageToTextOutput(BaseModel): + """Output for ImageToText tool.""" + + description: str = Field(description="Detailed text description of the image.") + width: int = Field(description="Width of the processed image.") + height: int = Field(description="Height of the processed image.") + + +class ImageToText(OpenAIBaseTool): + """Tool for converting images to text using OpenAI's GPT-4o model. + + This tool takes an image URL and uses OpenAI's vision capabilities + to generate a detailed text description of the image content. + + Attributes: + name: The name of the tool. + description: A description of what the tool does. + args_schema: The schema for the tool's input arguments. + """ + + name: str = "image_to_text" + description: str = ( + "Convert an image to detailed text description.\n" + "Provide a URL to the image to analyze and get a comprehensive textual description.\n" + "Optimized for DALL-E generated images and preserves as many details as possible." + ) + args_schema: Type[BaseModel] = ImageToTextInput + + async def _arun(self, image: str, **kwargs) -> ImageToTextOutput: + """Implementation of the tool to convert images to text. + + Args: + image (str): URL of the image to convert to text. + + Returns: + ImageToTextOutput: Object containing the text description and image dimensions. + """ + context = self.get_context() + skill_config = context.agent.skill_config(self.category) + logger.debug(f"context: {context}") + + # Get the OpenAI client from the skill store + api_key = skill_config.get("api_key") or self.skill_store.get_system_config( + "openai_api_key" + ) + client = openai.AsyncOpenAI(api_key=api_key) + + try: + # Download the image from the URL + async with aiohttp.ClientSession() as session: + async with session.get(image) as response: + if response.status != 200: + raise Exception( + f"Failed to download image from URL: {response.status}" + ) + + # Get image data + image_data = await response.read() + img = Image.open(io.BytesIO(image_data)) + + # Get original dimensions + orig_width, orig_height = img.size + + # Calculate new dimensions with longest side as 1024 (for reference only) + max_size = 1024 + if orig_width >= orig_height: + scaled_width = max_size + scaled_height = int(orig_height * (max_size / orig_width)) + else: + scaled_height = max_size + scaled_width = int(orig_width * (max_size / orig_height)) + + # Use OpenAI API to analyze the image (using original image) + response = await client.chat.completions.create( + model="gpt-4o", + messages=[ + { + "role": "system", + "content": "You are an expert image analyzer. Describe the image in great detail, capturing all visual elements, colors, composition, subjects, and context. If there are people in the picture, be sure to clearly describe the person's skin color, hair color, expression, direction, etc. For DALL-E generated images, pay special attention to artistic style, lighting effects, and fantastical elements. Preserve as many details as possible in your description.", + }, + { + "role": "user", + "content": [ + {"type": "text", "text": "Describe this image in detail:"}, + { + "type": "image_url", + "image_url": {"url": image, "detail": "high"}, + }, + ], + }, + ], + max_tokens=1000, + ) + + # Return the text description and scaled image dimensions + return ImageToTextOutput( + description=response.choices[0].message.content, + width=scaled_width, + height=scaled_height, + ) + + except Exception as e: + logger.error(f"Error converting image to text: {e}") + raise Exception(f"Error converting image to text: {str(e)}") diff --git a/intentkit/skills/openai/openai.png b/intentkit/skills/openai/openai.png new file mode 100644 index 00000000..5c8532ab Binary files /dev/null and b/intentkit/skills/openai/openai.png differ diff --git a/intentkit/skills/openai/schema.json b/intentkit/skills/openai/schema.json new file mode 100644 index 00000000..761fc439 --- /dev/null +++ b/intentkit/skills/openai/schema.json @@ -0,0 +1,139 @@ +{ + "$schema": "http://json-schema.org/draft-07/schema#", + "type": "object", + "title": "OpenAI", + "description": "Skills for interacting with OpenAI services, including image generation, image-to-text conversion, and other AI capabilities", + "x-icon": "https://ai.service.crestal.dev/skills/openai/openai.png", + "x-tags": [ + "AI", + "Image Generation", + "Image Analysis" + ], + "properties": { + "enabled": { + "type": "boolean", + "title": "Enabled", + "description": "Whether this skill is enabled", + "default": true + }, + "states": { + "type": "object", + "properties": { + "image_to_text": { + "type": "string", + "title": "Image to Text", + "enum": [ + "disabled", + "public", + "private" + ], + "x-enum-title": [ + "Disabled", + "Agent Owner + All Users", + "Agent Owner Only" + ], + "description": "Convert images to detailed text descriptions using OpenAI's GPT-4o model", + "default": "private" + }, + "dalle_image_generation": { + "type": "string", + "title": "Image Generation by DALL-E", + "enum": [ + "disabled", + "public", + "private" + ], + "x-enum-title": [ + "Disabled", + "Agent Owner + All Users", + "Agent Owner Only" + ], + "description": "Generate images using OpenAI's DALL-E model based on text prompts", + "default": "disabled" + }, + "gpt_image_generation": { + "type": "string", + "title": "Image Generation by GPT", + "enum": [ + "disabled", + "public", + "private" + ], + "x-enum-title": [ + "Disabled", + "Agent Owner + All Users", + "Agent Owner Only" + ], + "description": "Generate images using OpenAI's GPT-Image-1 model based on text prompts", + "default": "private" + }, + "gpt_image_to_image": { + "type": "string", + "title": "Image Editing by GPT", + "enum": [ + "disabled", + "public", + "private" + ], + "x-enum-title": [ + "Disabled", + "Agent Owner + All Users", + "Agent Owner Only" + ], + "description": "Edit images using OpenAI's GPT-Image-1 model based on text prompts", + "default": "private" + } + }, + "description": "States for each OpenAI skill (disabled, public, or private)" + }, + "api_key_provider": { + "type": "string", + "title": "API Key Provider", + "description": "Provider of the API key", + "enum": [ + "platform", + "agent_owner" + ], + "x-enum-title": [ + "Nation Hosted", + "Owner Provided" + ], + "default": "platform" + } + }, + "required": [ + "states", + "enabled" + ], + "if": { + "properties": { + "api_key_provider": { + "const": "agent_owner" + } + } + }, + "then": { + "properties": { + "api_key": { + "type": "string", + "title": "API Key", + "x-link": "[Get your API key](https://platform.openai.com/)", + "x-sensitive": true, + "description": "OpenAI API key for authentication" + } + }, + "if": { + "properties": { + "enabled": { + "const": true + } + } + }, + "then": { + "required": [ + "api_key" + ] + } + }, + "additionalProperties": true +} \ No newline at end of file diff --git a/intentkit/skills/portfolio/README.md b/intentkit/skills/portfolio/README.md new file mode 100644 index 00000000..f2d3c128 --- /dev/null +++ b/intentkit/skills/portfolio/README.md @@ -0,0 +1,55 @@ +# Blockchain Portfolio Analysis Skills + +A set of skills for analyzing blockchain wallets and portfolios through the Moralis API, allowing agents to retrieve wallet data, token balances, and investment performance across various blockchain networks. + +## Available Skills + +| Skill | Description | Endpoint | Example Prompts | +|-------|-------------|----------|----------------| +| `wallet_history` | Gets transaction history including sends, receives, token transfers | `GET /wallets/{address}/history` | "Show me all transactions for wallet 0x123..."
"What are the recent transactions for this ETH address?" | +| `token_balances` | Gets token balances and USD value with spam filtering options | `GET /wallets/{address}/tokens` | "What tokens does wallet 0x123 hold?"
"Show me token balances with USD values for this address" | +| `wallet_approvals` | Lists active ERC20 token approvals to identify spend permissions | `GET /wallets/{address}/approvals` | "Check what contracts have approval to spend from wallet 0x123"
"Has this wallet approved any token spending?" | +| `wallet_swaps` | Lists all swap-related transactions (buy/sell) for trade analysis | `GET /wallets/{address}/swaps` | "Show me all token swaps for wallet 0x123"
"What trading activity has this address performed?" | +| `wallet_net_worth` | Calculates total wallet value in USD across multiple chains | `GET /wallets/{address}/net-worth` | "What's the total value of wallet 0x123?"
"Calculate the net worth of this address across all chains" | +| `wallet_profitability_summary` | Provides overview of wallet profitability metrics | `GET /wallets/{address}/profitability/summary` | "Is wallet 0x123 profitable overall?"
"Give me a summary of trading performance for this address" | +| `wallet_profitability` | Delivers detailed profitability by token with buy/sell prices | `GET /wallets/{address}/profitability` | "Show detailed profit/loss for each token in wallet 0x123"
"What's the cost basis of tokens in this wallet?" | +| `wallet_stats` | Provides statistics about NFTs, collections, and transactions | `GET /wallets/{address}/stats` | "How many NFTs does wallet 0x123 have?"
"Give me stats about this wallet's activity" | +| `wallet_defi_positions` | Get DeFi positions for a wallet | `GET /wallets/{address}/defi/positions` | "What DeFi positions does my wallet have?"
"Show my liquidity positions." | +| `wallet_nfts` | Get NFTs owned by a wallet address | `GET /{address}/nft` | "What NFTs does wallet 0x123 own?"
"Show me all the NFTs in my wallet." | + +All endpoints use the base URL defined in `constants.py`: `https://deep-index.moralis.io/api/v2.2` + +## Migration Note + +If you're currently using the Moralis module, simply update your configuration by changing `moralis:` to `portfolio:`. All functionality remains identical. + +## Authentication + +All API requests include the Moralis API key in the header: +``` +X-API-Key: YOUR_MORALIS_API_KEY +``` + +## Supported Chains + +These skills support various EVM-compatible chains: +- Ethereum (eth) +- Polygon (polygon) +- Binance Smart Chain (bsc) +- Avalanche (avalanche) +- Arbitrum (arbitrum) +- Optimism (optimism) +- Base (base) + +## Key Parameters + +Most endpoints support these common parameters: +- `chain`: The chain to query (default: eth) +- `limit`: Number of results per page +- `cursor`: Pagination cursor for subsequent requests + +## Getting a Moralis API Key + +1. Create an account at [Moralis.io](https://moralis.io/) +2. Navigate to the API Keys section in your dashboard +3. Create a new key with appropriate permissions diff --git a/intentkit/skills/portfolio/__init__.py b/intentkit/skills/portfolio/__init__.py new file mode 100644 index 00000000..7ee368e6 --- /dev/null +++ b/intentkit/skills/portfolio/__init__.py @@ -0,0 +1,151 @@ +"""Portfolio skills for blockchain wallet analysis.""" + +import logging +from typing import TypedDict + +from intentkit.abstracts.skill import SkillStoreABC +from intentkit.skills.base import SkillConfig, SkillState +from intentkit.skills.portfolio.base import PortfolioBaseTool +from intentkit.skills.portfolio.token_balances import TokenBalances +from intentkit.skills.portfolio.wallet_approvals import WalletApprovals +from intentkit.skills.portfolio.wallet_defi_positions import WalletDefiPositions +from intentkit.skills.portfolio.wallet_history import WalletHistory +from intentkit.skills.portfolio.wallet_net_worth import WalletNetWorth +from intentkit.skills.portfolio.wallet_nfts import WalletNFTs +from intentkit.skills.portfolio.wallet_profitability import WalletProfitability +from intentkit.skills.portfolio.wallet_profitability_summary import ( + WalletProfitabilitySummary, +) +from intentkit.skills.portfolio.wallet_stats import WalletStats +from intentkit.skills.portfolio.wallet_swaps import WalletSwaps + +# Cache skills at the system level, because they are stateless +_cache: dict[str, PortfolioBaseTool] = {} + +logger = logging.getLogger(__name__) + + +class SkillStates(TypedDict): + """State configurations for Portfolio skills.""" + + wallet_history: SkillState + token_balances: SkillState + wallet_approvals: SkillState + wallet_swaps: SkillState + wallet_net_worth: SkillState + wallet_profitability_summary: SkillState + wallet_profitability: SkillState + wallet_stats: SkillState + wallet_defi_positions: SkillState + wallet_nfts: SkillState + + +class Config(SkillConfig): + """Configuration for Portfolio blockchain analysis skills.""" + + states: SkillStates + api_key: str + api_key_provider: str + + +async def get_skills( + config: "Config", + is_private: bool, + store: SkillStoreABC, + **_, +) -> list[PortfolioBaseTool]: + """Get all Portfolio blockchain analysis skills. + + Args: + config: The configuration for Portfolio skills. + is_private: Whether to include private skills. + store: The skill store for persisting data. + + Returns: + A list of Portfolio blockchain analysis skills. + """ + available_skills = [] + + # Include skills based on their state + for skill_name, state in config["states"].items(): + if state == "disabled": + continue + elif state == "public" or (state == "private" and is_private): + available_skills.append(skill_name) + + # Get each skill using the cached getter + result = [] + for name in available_skills: + skill = get_portfolio_skill(name, store) + if skill: + result.append(skill) + return result + + +def get_portfolio_skill( + name: str, + store: SkillStoreABC, +) -> PortfolioBaseTool: + """Get a portfolio skill by name.""" + if name == "wallet_history": + if name not in _cache: + _cache[name] = WalletHistory( + skill_store=store, + ) + return _cache[name] + elif name == "token_balances": + if name not in _cache: + _cache[name] = TokenBalances( + skill_store=store, + ) + return _cache[name] + elif name == "wallet_approvals": + if name not in _cache: + _cache[name] = WalletApprovals( + skill_store=store, + ) + return _cache[name] + elif name == "wallet_swaps": + if name not in _cache: + _cache[name] = WalletSwaps( + skill_store=store, + ) + return _cache[name] + elif name == "wallet_net_worth": + if name not in _cache: + _cache[name] = WalletNetWorth( + skill_store=store, + ) + return _cache[name] + elif name == "wallet_profitability_summary": + if name not in _cache: + _cache[name] = WalletProfitabilitySummary( + skill_store=store, + ) + return _cache[name] + elif name == "wallet_profitability": + if name not in _cache: + _cache[name] = WalletProfitability( + skill_store=store, + ) + return _cache[name] + elif name == "wallet_stats": + if name not in _cache: + _cache[name] = WalletStats( + skill_store=store, + ) + return _cache[name] + elif name == "wallet_defi_positions": + if name not in _cache: + _cache[name] = WalletDefiPositions( + skill_store=store, + ) + return _cache[name] + elif name == "wallet_nfts": + if name not in _cache: + _cache[name] = WalletNFTs( + skill_store=store, + ) + return _cache[name] + else: + raise ValueError(f"Unknown portfolio skill: {name}") diff --git a/intentkit/skills/portfolio/base.py b/intentkit/skills/portfolio/base.py new file mode 100644 index 00000000..dae8d619 --- /dev/null +++ b/intentkit/skills/portfolio/base.py @@ -0,0 +1,108 @@ +"""Base classes for portfolio skills.""" + +import asyncio +import logging +from abc import ABC +from typing import Any, Dict, Type + +import aiohttp +from pydantic import BaseModel, Field + +from intentkit.abstracts.skill import SkillStoreABC +from intentkit.skills.base import IntentKitSkill +from intentkit.skills.portfolio.constants import MORALIS_API_BASE_URL + +logger = logging.getLogger(__name__) + + +class PortfolioBaseTool(IntentKitSkill, ABC): + """Base class for portfolio analysis skills.""" + + name: str = Field(description="The name of the tool") + description: str = Field(description="A description of what the tool does") + args_schema: Type[BaseModel] + skill_store: SkillStoreABC = Field( + description="The skill store for persisting data" + ) + + def get_api_key(self) -> str: + context = self.get_context() + skill_config = context.agent.skill_config(self.category) + if skill_config.get("api_key_provider") == "agent_owner": + return skill_config.get("api_key") + return self.skill_store.get_system_config("moralis_api_key") + + @property + def category(self) -> str: + return "portfolio" + + def _prepare_params(self, params: Dict[str, Any]) -> Dict[str, Any]: + """Convert boolean values to lowercase strings for API compatibility. + + Args: + params: Dictionary with query parameters that may contain boolean values + + Returns: + Dictionary with boolean values converted to lowercase strings + """ + if not params: + return params + + result = {} + for key, value in params.items(): + if isinstance(value, bool): + result[key] = str(value).lower() + else: + result[key] = value + return result + + async def _make_request( + self, + method: str, + endpoint: str, + api_key: str, + params: Dict[str, Any] = None, + data: Dict[str, Any] = None, + ) -> Dict[str, Any]: + """Make a request to the Moralis API. + + Args: + method: HTTP method (GET, POST, etc.) + endpoint: API endpoint (without base URL) + api_key: Moralis API key + params: Query parameters + data: Request body data for POST requests + + Returns: + Response data as dictionary + """ + url = f"{MORALIS_API_BASE_URL}{endpoint}" + + headers = {"accept": "application/json", "X-API-Key": api_key} + + # Convert boolean params to strings + processed_params = self._prepare_params(params) if params else None + + logger.debug(f"portfolio/base.py: Making request to {url}") + + async with aiohttp.ClientSession() as session: + async with session.request( + method=method, + url=url, + headers=headers, + params=processed_params, + json=data, + ) as response: + if response.status >= 400: + error_text = await response.text() + logger.error(f"portfolio/base.py: API error: {error_text}") + return { + "error": f"API error: {response.status}", + "details": error_text, + } + + return await response.json() + + def _run(self, *args: Any, **kwargs: Any) -> Any: + """Execute the tool synchronously by running the async version in a loop.""" + return asyncio.run(self._arun(*args, **kwargs)) diff --git a/intentkit/skills/portfolio/constants.py b/intentkit/skills/portfolio/constants.py new file mode 100644 index 00000000..33bea3e1 --- /dev/null +++ b/intentkit/skills/portfolio/constants.py @@ -0,0 +1,9 @@ +"""Constants for the portfolio skills module.""" + +# Base URLs +MORALIS_API_BASE_URL = "https://deep-index.moralis.io/api/v2.2" + +# Default parameters +DEFAULT_CHAIN = "base" +DEFAULT_LIMIT = 10 +DEFAULT_ORDER = "DESC" diff --git a/intentkit/skills/portfolio/moralis.png b/intentkit/skills/portfolio/moralis.png new file mode 100644 index 00000000..04f36ca7 Binary files /dev/null and b/intentkit/skills/portfolio/moralis.png differ diff --git a/intentkit/skills/portfolio/schema.json b/intentkit/skills/portfolio/schema.json new file mode 100644 index 00000000..7c0e870a --- /dev/null +++ b/intentkit/skills/portfolio/schema.json @@ -0,0 +1,237 @@ +{ + "$schema": "http://json-schema.org/draft-07/schema#", + "type": "object", + "title": "Portfolio Analysis", + "description": "Access blockchain wallet data and analytics through Moralis APIs for portfolio tracking, token balances, and investment performance", + "x-icon": "https://ai.service.crestal.dev/skills/portfolio/moralis.png", + "x-tags": [ + "Blockchain", + "Web3", + "Crypto", + "Portfolio", + "Wallet" + ], + "properties": { + "enabled": { + "type": "boolean", + "title": "Enabled", + "description": "Whether this skill is enabled", + "default": true + }, + "states": { + "type": "object", + "properties": { + "wallet_history": { + "type": "string", + "title": "Wallet Transaction History", + "enum": [ + "disabled", + "public", + "private" + ], + "x-enum-title": [ + "Disabled", + "Agent Owner + All Users", + "Agent Owner Only" + ], + "description": "Retrieve the full transaction history of a specified wallet address", + "default": "private" + }, + "token_balances": { + "type": "string", + "title": "Token Balances", + "enum": [ + "disabled", + "public", + "private" + ], + "x-enum-title": [ + "Disabled", + "Agent Owner + All Users", + "Agent Owner Only" + ], + "description": "Get token balances for a specific wallet address and their token prices in USD", + "default": "private" + }, + "wallet_approvals": { + "type": "string", + "title": "Wallet Approvals", + "enum": [ + "disabled", + "public", + "private" + ], + "x-enum-title": [ + "Disabled", + "Agent Owner + All Users", + "Agent Owner Only" + ], + "description": "Retrieve active ERC20 token approvals for a specified wallet address", + "default": "disabled" + }, + "wallet_swaps": { + "type": "string", + "title": "Wallet Swaps", + "enum": [ + "disabled", + "public", + "private" + ], + "x-enum-title": [ + "Disabled", + "Agent Owner + All Users", + "Agent Owner Only" + ], + "description": "Get all swap-related transactions (buy, sell) for a wallet address", + "default": "disabled" + }, + "wallet_net_worth": { + "type": "string", + "title": "Wallet Net Worth", + "enum": [ + "disabled", + "public", + "private" + ], + "x-enum-title": [ + "Disabled", + "Agent Owner + All Users", + "Agent Owner Only" + ], + "description": "Get the net worth of a wallet in USD across multiple chains", + "default": "private" + }, + "wallet_profitability_summary": { + "type": "string", + "title": "Wallet Profitability Summary", + "enum": [ + "disabled", + "public", + "private" + ], + "x-enum-title": [ + "Disabled", + "Agent Owner + All Users", + "Agent Owner Only" + ], + "description": "Retrieve a summary of wallet profitability", + "default": "private" + }, + "wallet_profitability": { + "type": "string", + "title": "Wallet Profitability Breakdown", + "enum": [ + "disabled", + "public", + "private" + ], + "x-enum-title": [ + "Disabled", + "Agent Owner + All Users", + "Agent Owner Only" + ], + "description": "Retrieve detailed profitability breakdown for a wallet", + "default": "private" + }, + "wallet_stats": { + "type": "string", + "title": "Wallet Stats", + "enum": [ + "disabled", + "public", + "private" + ], + "x-enum-title": [ + "Disabled", + "Agent Owner + All Users", + "Agent Owner Only" + ], + "description": "Get statistical information about a wallet", + "default": "disabled" + }, + "wallet_defi_positions": { + "type": "string", + "title": "Wallet DeFi Positions", + "enum": [ + "disabled", + "public", + "private" + ], + "x-enum-title": [ + "Disabled", + "Agent Owner + All Users", + "Agent Owner Only" + ], + "description": "Get the positions summary of a wallet address", + "default": "disabled" + }, + "wallet_nfts": { + "type": "string", + "title": "Wallet NFTs", + "enum": [ + "disabled", + "public", + "private" + ], + "x-enum-title": [ + "Disabled", + "Agent Owner + All Users", + "Agent Owner Only" + ], + "description": "Get NFTs owned by a given wallet address", + "default": "disabled" + } + }, + "description": "States for each portfolio blockchain analysis skill (disabled, public, or private)" + }, + "api_key_provider": { + "type": "string", + "title": "API Key Provider", + "description": "Provider of the API key", + "enum": [ + "platform", + "agent_owner" + ], + "x-enum-title": [ + "Nation Hosted", + "Owner Provided" + ], + "default": "platform" + } + }, + "required": [ + "states", + "enabled" + ], + "if": { + "properties": { + "api_key_provider": { + "const": "agent_owner" + } + } + }, + "then": { + "properties": { + "api_key": { + "type": "string", + "title": "Moralis API Key", + "description": "API key for Moralis API service", + "x-link": "[Get your API key](https://moralis.io/)", + "x-sensitive": true + } + }, + "if": { + "properties": { + "enabled": { + "const": true + } + } + }, + "then": { + "required": [ + "api_key" + ] + } + }, + "additionalProperties": true +} \ No newline at end of file diff --git a/intentkit/skills/portfolio/token_balances.py b/intentkit/skills/portfolio/token_balances.py new file mode 100644 index 00000000..7da061d6 --- /dev/null +++ b/intentkit/skills/portfolio/token_balances.py @@ -0,0 +1,153 @@ +import logging +from typing import Any, Dict, List, Optional, Type + +from pydantic import BaseModel, Field + +from intentkit.skills.portfolio.base import PortfolioBaseTool +from intentkit.skills.portfolio.constants import ( + DEFAULT_CHAIN, + DEFAULT_LIMIT, +) + +logger = logging.getLogger(__name__) + + +class TokenBalancesInput(BaseModel): + """Input for token balances tool.""" + + address: str = Field(description="The wallet address to check token balances for.") + chain: str = Field( + description="The chain to query (e.g., 'eth', 'bsc', 'polygon').", + default=DEFAULT_CHAIN, + ) + to_block: Optional[int] = Field( + description="The block number up to which the balances will be checked.", + default=None, + ) + token_addresses: Optional[List[str]] = Field( + description="The specific token addresses to get balances for.", + default=None, + ) + exclude_spam: Optional[bool] = Field( + description="Exclude spam tokens from the result.", + default=True, + ) + exclude_unverified_contracts: Optional[bool] = Field( + description="Exclude unverified contracts from the result.", + default=True, + ) + cursor: Optional[str] = Field( + description="The cursor for pagination.", + default=None, + ) + limit: Optional[int] = Field( + description="The number of results per page.", + default=DEFAULT_LIMIT, + ) + exclude_native: Optional[bool] = Field( + description="Exclude native balance from the result.", + default=None, + ) + max_token_inactivity: Optional[int] = Field( + description="Exclude tokens inactive for more than the given amount of days.", + default=None, + ) + min_pair_side_liquidity_usd: Optional[float] = Field( + description="Exclude tokens with liquidity less than the specified amount in USD.", + default=None, + ) + + +class TokenBalances(PortfolioBaseTool): + """Tool for retrieving native and ERC20 token balances using Moralis. + + This tool uses Moralis' API to fetch token balances for a specific wallet address + and their token prices in USD. + """ + + name: str = "portfolio_token_balances" + description: str = ( + "Get token balances for a specific wallet address and their token prices in USD. " + "Includes options to exclude spam and unverified contracts." + ) + args_schema: Type[BaseModel] = TokenBalancesInput + + async def _arun( + self, + address: str, + chain: str = DEFAULT_CHAIN, + to_block: Optional[int] = None, + token_addresses: Optional[List[str]] = None, + exclude_spam: Optional[bool] = True, + exclude_unverified_contracts: Optional[bool] = True, + cursor: Optional[str] = None, + limit: Optional[int] = DEFAULT_LIMIT, + exclude_native: Optional[bool] = None, + max_token_inactivity: Optional[int] = None, + min_pair_side_liquidity_usd: Optional[float] = None, + **kwargs, + ) -> Dict[str, Any]: + """Fetch token balances from Moralis. + + Args: + address: The wallet address to get balances for + chain: The blockchain to query + to_block: Block number up to which balances will be checked + token_addresses: Specific token addresses to get balances for + exclude_spam: Whether to exclude spam tokens + exclude_unverified_contracts: Whether to exclude unverified contracts + cursor: Pagination cursor + limit: Number of results per page + exclude_native: Whether to exclude native balance + max_token_inactivity: Exclude tokens inactive for more than the given days + min_pair_side_liquidity_usd: Exclude tokens with liquidity less than specified + config: The configuration for the tool call + + Returns: + Dict containing token balances data + """ + context = self.get_context() + logger.debug( + f"token_balances.py: Fetching token balances with context {context}" + ) + + # Get the API key from the agent's configuration + api_key = self.get_api_key() + if not api_key: + return {"error": "No Moralis API key provided in the configuration."} + + # Build query parameters + params = { + "chain": chain, + "limit": limit, + "exclude_spam": exclude_spam, + "exclude_unverified_contracts": exclude_unverified_contracts, + } + + # Add optional parameters if they exist + if to_block: + params["to_block"] = to_block + if token_addresses: + params["token_addresses"] = token_addresses + if cursor: + params["cursor"] = cursor + if exclude_native is not None: + params["exclude_native"] = exclude_native + if max_token_inactivity: + params["max_token_inactivity"] = max_token_inactivity + if min_pair_side_liquidity_usd: + params["min_pair_side_liquidity_usd"] = min_pair_side_liquidity_usd + + # Call Moralis API + try: + endpoint = f"/wallets/{address}/tokens" + return await self._make_request( + method="GET", endpoint=endpoint, api_key=api_key, params=params + ) + except Exception as e: + logger.error( + f"token_balances.py: Error fetching token balances: {e}", exc_info=True + ) + return { + "error": "An error occurred while fetching token balances. Please try again later." + } diff --git a/intentkit/skills/portfolio/wallet_approvals.py b/intentkit/skills/portfolio/wallet_approvals.py new file mode 100644 index 00000000..9bc178f6 --- /dev/null +++ b/intentkit/skills/portfolio/wallet_approvals.py @@ -0,0 +1,100 @@ +import logging +from typing import Any, Dict, Optional, Type + +from pydantic import BaseModel, Field + +from intentkit.skills.portfolio.base import PortfolioBaseTool +from intentkit.skills.portfolio.constants import ( + DEFAULT_CHAIN, + DEFAULT_LIMIT, +) + +logger = logging.getLogger(__name__) + + +class WalletApprovalsInput(BaseModel): + """Input for wallet token approvals tool.""" + + address: str = Field(description="The wallet address to check token approvals for.") + chain: str = Field( + description="The chain to query (e.g., 'eth', 'bsc', 'polygon').", + default=DEFAULT_CHAIN, + ) + cursor: Optional[str] = Field( + description="The cursor for pagination.", + default=None, + ) + limit: Optional[int] = Field( + description="The number of results per page.", + default=DEFAULT_LIMIT, + ) + + +class WalletApprovals(PortfolioBaseTool): + """Tool for retrieving token approvals for a wallet using Moralis. + + This tool uses Moralis' API to fetch active ERC20 token approvals for the + specified wallet address. + """ + + name: str = "portfolio_wallet_approvals" + description: str = ( + "Retrieve active ERC20 token approvals for the specified wallet address. " + "This helps identify which contracts have permission to spend tokens." + ) + args_schema: Type[BaseModel] = WalletApprovalsInput + + async def _arun( + self, + address: str, + chain: str = DEFAULT_CHAIN, + cursor: Optional[str] = None, + limit: Optional[int] = DEFAULT_LIMIT, + **kwargs, + ) -> Dict[str, Any]: + """Fetch wallet token approvals from Moralis. + + Args: + address: The wallet address to get approvals for + chain: The blockchain to query + cursor: Pagination cursor + limit: Number of results per page + config: The configuration for the tool call + + Returns: + Dict containing wallet approvals data + """ + context = self.get_context() + logger.debug( + f"wallet_approvals.py: Fetching wallet approvals with context {context}" + ) + + # Get the API key from the agent's configuration + api_key = self.get_api_key() + if not api_key: + return {"error": "No Moralis API key provided in the configuration."} + + # Build query parameters + params = { + "chain": chain, + "limit": limit, + } + + # Add optional parameters if they exist + if cursor: + params["cursor"] = cursor + + # Call Moralis API + try: + endpoint = f"/wallets/{address}/approvals" + return await self._make_request( + method="GET", endpoint=endpoint, api_key=api_key, params=params + ) + except Exception as e: + logger.error( + f"wallet_approvals.py: Error fetching wallet approvals: {e}", + exc_info=True, + ) + return { + "error": "An error occurred while fetching wallet approvals. Please try again later." + } diff --git a/intentkit/skills/portfolio/wallet_defi_positions.py b/intentkit/skills/portfolio/wallet_defi_positions.py new file mode 100644 index 00000000..8c49f3ae --- /dev/null +++ b/intentkit/skills/portfolio/wallet_defi_positions.py @@ -0,0 +1,79 @@ +import logging +from typing import Any, Dict, Type + +from pydantic import BaseModel, Field + +from intentkit.skills.portfolio.base import PortfolioBaseTool +from intentkit.skills.portfolio.constants import DEFAULT_CHAIN + +logger = logging.getLogger(__name__) + + +class WalletDefiPositionsInput(BaseModel): + """Input for wallet DeFi positions tool.""" + + address: str = Field(description="The wallet address to get DeFi positions for.") + chain: str = Field( + description="The chain to query (e.g., 'eth', 'bsc', 'polygon').", + default=DEFAULT_CHAIN, + ) + + +class WalletDefiPositions(PortfolioBaseTool): + """Tool for retrieving DeFi positions by wallet using Moralis. + + This tool uses Moralis' API to fetch the positions summary of a wallet address, + including liquidity positions, staking, lending, and other DeFi activities. + """ + + name: str = "portfolio_wallet_defi_positions" + description: str = ( + "Get the DeFi positions summary of a wallet address. " + "Returns information about liquidity positions, staking, lending, and other DeFi activities." + ) + args_schema: Type[BaseModel] = WalletDefiPositionsInput + + async def _arun( + self, + address: str, + chain: str = DEFAULT_CHAIN, + **kwargs, + ) -> Dict[str, Any]: + """Fetch wallet DeFi positions from Moralis. + + Args: + address: The wallet address + chain: The blockchain to query + config: The configuration for the tool call + + Returns: + Dict containing DeFi positions data + """ + context = self.get_context() + skill_config = context.agent.skill_config(self.category) + logger.debug( + f"wallet_defi_positions.py: Fetching wallet DeFi positions with context {context}" + ) + + # Get the API key from the agent's configuration + api_key = skill_config.get("api_key") + if not api_key: + return {"error": "No Moralis API key provided in the configuration."} + + # Build query parameters + params = {"chain": chain} + + # Call Moralis API + try: + endpoint = f"/wallets/{address}/defi/positions" + return await self._make_request( + method="GET", endpoint=endpoint, api_key=api_key, params=params + ) + except Exception as e: + logger.error( + f"wallet_defi_positions.py: Error fetching wallet DeFi positions: {e}", + exc_info=True, + ) + return { + "error": "An error occurred while fetching wallet DeFi positions. Please try again later." + } diff --git a/intentkit/skills/portfolio/wallet_history.py b/intentkit/skills/portfolio/wallet_history.py new file mode 100644 index 00000000..76137389 --- /dev/null +++ b/intentkit/skills/portfolio/wallet_history.py @@ -0,0 +1,153 @@ +import logging +from typing import Any, Dict, Optional, Type + +from pydantic import BaseModel, Field + +from intentkit.skills.portfolio.base import PortfolioBaseTool +from intentkit.skills.portfolio.constants import ( + DEFAULT_CHAIN, + DEFAULT_LIMIT, + DEFAULT_ORDER, +) + +logger = logging.getLogger(__name__) + + +class WalletHistoryInput(BaseModel): + """Input for wallet transaction history tool.""" + + address: str = Field( + description="The address of the wallet to get transaction history for." + ) + chain: str = Field( + description="The chain to query (e.g., 'eth', 'bsc', 'polygon').", + default=DEFAULT_CHAIN, + ) + limit: Optional[int] = Field( + description="The desired page size of the result.", + default=DEFAULT_LIMIT, + ) + cursor: Optional[str] = Field( + description="The cursor returned in the previous response (for pagination).", + default=None, + ) + from_block: Optional[int] = Field( + description="The minimum block number to get transactions from.", + default=None, + ) + to_block: Optional[int] = Field( + description="The maximum block number to get transactions from.", + default=None, + ) + from_date: Optional[str] = Field( + description="The start date to get transactions from (format in seconds or datestring).", + default=None, + ) + to_date: Optional[str] = Field( + description="The end date to get transactions from (format in seconds or datestring).", + default=None, + ) + include_internal_transactions: Optional[bool] = Field( + description="If the result should contain the internal transactions.", + default=None, + ) + nft_metadata: Optional[bool] = Field( + description="If the result should contain the NFT metadata.", + default=None, + ) + order: Optional[str] = Field( + description="The order of the result, in ascending (ASC) or descending (DESC).", + default=DEFAULT_ORDER, + ) + + +class WalletHistory(PortfolioBaseTool): + """Tool for retrieving wallet transaction history using Moralis. + + This tool uses Moralis' API to fetch the full transaction history of a specified wallet address, + including sends, receives, token and NFT transfers, and contract interactions. + """ + + name: str = "portfolio_wallet_history" + description: str = ( + "Retrieve the full transaction history of a specified wallet address, including sends, " + "receives, token and NFT transfers, and contract interactions." + ) + args_schema: Type[BaseModel] = WalletHistoryInput + + async def _arun( + self, + address: str, + chain: str = DEFAULT_CHAIN, + limit: Optional[int] = DEFAULT_LIMIT, + cursor: Optional[str] = None, + from_block: Optional[int] = None, + to_block: Optional[int] = None, + from_date: Optional[str] = None, + to_date: Optional[str] = None, + include_internal_transactions: Optional[bool] = None, + nft_metadata: Optional[bool] = None, + order: Optional[str] = DEFAULT_ORDER, + **kwargs, + ) -> Dict[str, Any]: + """Fetch wallet transaction history from Moralis. + + Args: + address: The wallet address to get history for + chain: The blockchain to query + limit: Number of results per page + cursor: Pagination cursor + from_block: Minimum block number + to_block: Maximum block number + from_date: Start date for transactions + to_date: End date for transactions + include_internal_transactions: Include internal txs + nft_metadata: Include NFT metadata + order: Order of results (ASC/DESC) + config: The configuration for the tool call + + Returns: + Dict containing transaction history data + """ + context = self.get_context() + logger.debug( + f"wallet_history.py: Fetching wallet history with context {context}" + ) + + # Get the API key from the agent's configuration + api_key = self.get_api_key() + if not api_key: + return {"error": "No Moralis API key provided in the configuration."} + + # Build query parameters + params = {"chain": chain, "limit": limit, "order": order} + + # Add optional parameters if they exist + if cursor: + params["cursor"] = cursor + if from_block: + params["from_block"] = from_block + if to_block: + params["to_block"] = to_block + if from_date: + params["from_date"] = from_date + if to_date: + params["to_date"] = to_date + if include_internal_transactions is not None: + params["include_internal_transactions"] = include_internal_transactions + if nft_metadata is not None: + params["nft_metadata"] = nft_metadata + + # Call Moralis API + try: + endpoint = f"/wallets/{address}/history" + return await self._make_request( + method="GET", endpoint=endpoint, api_key=api_key, params=params + ) + except Exception as e: + logger.error( + f"wallet_history.py: Error fetching wallet history: {e}", exc_info=True + ) + return { + "error": "An error occurred while fetching wallet history. Please try again later." + } diff --git a/intentkit/skills/portfolio/wallet_net_worth.py b/intentkit/skills/portfolio/wallet_net_worth.py new file mode 100644 index 00000000..9e488b87 --- /dev/null +++ b/intentkit/skills/portfolio/wallet_net_worth.py @@ -0,0 +1,110 @@ +import logging +from typing import Any, Dict, List, Optional, Type + +from pydantic import BaseModel, Field + +from intentkit.skills.portfolio.base import PortfolioBaseTool + +logger = logging.getLogger(__name__) + + +class WalletNetWorthInput(BaseModel): + """Input for wallet net worth tool.""" + + address: str = Field(description="The wallet address to calculate net worth for.") + chains: Optional[List[str]] = Field( + description="The chains to query (e.g., ['eth', 'bsc', 'polygon']).", + default=None, + ) + exclude_spam: Optional[bool] = Field( + description="Exclude spam tokens from the result.", + default=True, + ) + exclude_unverified_contracts: Optional[bool] = Field( + description="Exclude unverified contracts from the result.", + default=True, + ) + max_token_inactivity: Optional[int] = Field( + description="Exclude tokens inactive for more than the given amount of days.", + default=1, + ) + min_pair_side_liquidity_usd: Optional[float] = Field( + description="Exclude tokens with liquidity less than the specified amount in USD.", + default=1000, + ) + + +class WalletNetWorth(PortfolioBaseTool): + """Tool for calculating a wallet's total net worth using Moralis. + + This tool uses Moralis' API to calculate the net worth of a wallet in USD across + multiple chains, with options to filter out spam and low-liquidity tokens. + """ + + name: str = "portfolio_wallet_net_worth" + description: str = ( + "Get the net worth of a wallet in USD across multiple chains. " + "Filters out spam tokens and low-liquidity assets for more accurate results." + ) + args_schema: Type[BaseModel] = WalletNetWorthInput + + async def _arun( + self, + address: str, + chains: Optional[List[str]] = None, + exclude_spam: Optional[bool] = True, + exclude_unverified_contracts: Optional[bool] = True, + max_token_inactivity: Optional[int] = 1, + min_pair_side_liquidity_usd: Optional[float] = 1000, + **kwargs, + ) -> Dict[str, Any]: + """Calculate wallet net worth from Moralis. + + Args: + address: The wallet address to calculate net worth for + chains: List of chains to query + exclude_spam: Whether to exclude spam tokens + exclude_unverified_contracts: Whether to exclude unverified contracts + max_token_inactivity: Exclude tokens inactive for more than the given days + min_pair_side_liquidity_usd: Exclude tokens with liquidity less than specified + config: The configuration for the tool call + + Returns: + Dict containing wallet net worth data + """ + context = self.get_context() + logger.debug( + f"wallet_net_worth.py: Calculating wallet net worth with context {context}" + ) + + # Get the API key from the agent's configuration + api_key = self.get_api_key() + if not api_key: + return {"error": "No Moralis API key provided in the configuration."} + + # Build query parameters + params = { + "exclude_spam": exclude_spam, + "exclude_unverified_contracts": exclude_unverified_contracts, + "max_token_inactivity": max_token_inactivity, + "min_pair_side_liquidity_usd": min_pair_side_liquidity_usd, + } + + # Add chains if specified + if chains: + params["chains"] = chains + + # Call Moralis API + try: + endpoint = f"/wallets/{address}/net-worth" + return await self._make_request( + method="GET", endpoint=endpoint, api_key=api_key, params=params + ) + except Exception as e: + logger.error( + f"wallet_net_worth.py: Error calculating wallet net worth: {e}", + exc_info=True, + ) + return { + "error": "An error occurred while calculating wallet net worth. Please try again later." + } diff --git a/intentkit/skills/portfolio/wallet_nfts.py b/intentkit/skills/portfolio/wallet_nfts.py new file mode 100644 index 00000000..1a297e5e --- /dev/null +++ b/intentkit/skills/portfolio/wallet_nfts.py @@ -0,0 +1,137 @@ +import logging +from typing import Any, Dict, List, Optional, Type + +from pydantic import BaseModel, Field + +from intentkit.skills.portfolio.base import PortfolioBaseTool +from intentkit.skills.portfolio.constants import DEFAULT_CHAIN, DEFAULT_LIMIT + +logger = logging.getLogger(__name__) + + +class WalletNFTsInput(BaseModel): + """Input for wallet NFTs tool.""" + + address: str = Field(description="The address of the wallet to get NFTs for.") + chain: str = Field( + description="The chain to query (e.g., 'eth', 'base', 'polygon').", + default=DEFAULT_CHAIN, + ) + format: Optional[str] = Field( + description="The format of the token ID ('decimal' or 'hex').", + default="decimal", + ) + limit: Optional[int] = Field( + description="The desired page size of the result.", + default=DEFAULT_LIMIT, + ) + exclude_spam: Optional[bool] = Field( + description="Should spam NFTs be excluded from the result?", + default=True, + ) + token_addresses: Optional[List[str]] = Field( + description="The non-fungible token (NFT) addresses to get balances for.", + default=None, + ) + cursor: Optional[str] = Field( + description="The cursor returned in the previous response (for pagination).", + default=None, + ) + normalize_metadata: Optional[bool] = Field( + description="The option to enable metadata normalization.", + default=True, + ) + media_items: Optional[bool] = Field( + description="Should preview media data be returned?", + default=False, + ) + include_prices: Optional[bool] = Field( + description="Should NFT last sale prices be included in the result?", + default=False, + ) + + +class WalletNFTs(PortfolioBaseTool): + """Tool for retrieving NFTs owned by a wallet using Moralis. + + This tool uses Moralis' API to fetch NFTs owned by a given address, with options + to filter and format the results. + """ + + name: str = "portfolio_wallet_nfts" + description: str = ( + "Get NFTs owned by a given wallet address. Results include token details, " + "metadata, collection information, and optionally prices." + ) + args_schema: Type[BaseModel] = WalletNFTsInput + + async def _arun( + self, + address: str, + chain: str = DEFAULT_CHAIN, + format: Optional[str] = "decimal", + limit: Optional[int] = DEFAULT_LIMIT, + exclude_spam: Optional[bool] = True, + token_addresses: Optional[List[str]] = None, + cursor: Optional[str] = None, + normalize_metadata: Optional[bool] = True, + media_items: Optional[bool] = False, + include_prices: Optional[bool] = False, + **kwargs, + ) -> Dict[str, Any]: + """Fetch NFTs owned by a wallet from Moralis. + + Args: + address: The wallet address + chain: The blockchain to query + format: The format of the token ID ('decimal' or 'hex') + limit: Number of results per page + exclude_spam: Whether to exclude spam NFTs + token_addresses: Specific NFT contracts to filter by + cursor: Pagination cursor + normalize_metadata: Enable metadata normalization + media_items: Include preview media data + include_prices: Include NFT last sale prices + config: The configuration for the tool call + + Returns: + Dict containing wallet NFTs data + """ + context = self.get_context() + logger.debug(f"wallet_nfts.py: Fetching wallet NFTs with context {context}") + + # Get the API key from the agent's configuration + api_key = self.get_api_key() + if not api_key: + return {"error": "No Moralis API key provided in the configuration."} + + # Build query parameters + params = { + "chain": chain, + "format": format, + "limit": limit, + "exclude_spam": exclude_spam, + "normalizeMetadata": normalize_metadata, + "media_items": media_items, + "include_prices": include_prices, + } + + # Add optional parameters if they exist + if token_addresses: + params["token_addresses"] = token_addresses + if cursor: + params["cursor"] = cursor + + # Call Moralis API + try: + endpoint = f"/{address}/nft" + return await self._make_request( + method="GET", endpoint=endpoint, api_key=api_key, params=params + ) + except Exception as e: + logger.error( + f"wallet_nfts.py: Error fetching wallet NFTs: {e}", exc_info=True + ) + return { + "error": "An error occurred while fetching wallet NFTs. Please try again later." + } diff --git a/intentkit/skills/portfolio/wallet_profitability.py b/intentkit/skills/portfolio/wallet_profitability.py new file mode 100644 index 00000000..48d730ab --- /dev/null +++ b/intentkit/skills/portfolio/wallet_profitability.py @@ -0,0 +1,99 @@ +import logging +from typing import Any, Dict, List, Optional, Type + +from pydantic import BaseModel, Field + +from intentkit.skills.portfolio.base import PortfolioBaseTool +from intentkit.skills.portfolio.constants import DEFAULT_CHAIN + +logger = logging.getLogger(__name__) + + +class WalletProfitabilityInput(BaseModel): + """Input for wallet profitability breakdown tool.""" + + address: str = Field( + description="The wallet address to get profitability breakdown for." + ) + chain: str = Field( + description="The chain to query (e.g., 'eth', 'bsc', 'polygon').", + default=DEFAULT_CHAIN, + ) + days: Optional[str] = Field( + description="Timeframe in days for which profitability is calculated. Options: 'all', '7', '30', '60', '90'.", + default="all", + ) + token_addresses: Optional[List[str]] = Field( + description="The token addresses list to filter the result with.", + default=None, + ) + + +class WalletProfitability(PortfolioBaseTool): + """Tool for retrieving detailed wallet profitability breakdown using Moralis. + + This tool uses Moralis' API to retrieve detailed profitability information for a + specific wallet address, with the option to filter by one or more tokens. + """ + + name: str = "portfolio_wallet_profitability" + description: str = ( + "Retrieve detailed profitability breakdown for a wallet, including profit/loss per token, " + "average buy/sell prices, and realized profits. Can be filtered by specific tokens." + ) + args_schema: Type[BaseModel] = WalletProfitabilityInput + + async def _arun( + self, + address: str, + chain: str = DEFAULT_CHAIN, + days: Optional[str] = "all", + token_addresses: Optional[List[str]] = None, + **kwargs, + ) -> Dict[str, Any]: + """Fetch detailed wallet profitability from Moralis. + + Args: + address: The wallet address to get profitability for + chain: The blockchain to query + days: Timeframe in days for the profitability data + token_addresses: List of token addresses to filter results + config: The configuration for the tool call + + Returns: + Dict containing wallet profitability breakdown data + """ + context = self.get_context() + logger.debug( + f"wallet_profitability.py: Fetching profitability breakdown with context {context}" + ) + + # Get the API key from the agent's configuration + api_key = self.get_api_key() + if not api_key: + return {"error": "No Moralis API key provided in the configuration."} + + # Build query parameters + params = { + "chain": chain, + "days": days, + } + + # Add token_addresses if specified + if token_addresses: + params["token_addresses"] = token_addresses + + # Call Moralis API + try: + endpoint = f"/wallets/{address}/profitability" + return await self._make_request( + method="GET", endpoint=endpoint, api_key=api_key, params=params + ) + except Exception as e: + logger.error( + f"wallet_profitability.py: Error fetching profitability breakdown: {e}", + exc_info=True, + ) + return { + "error": "An error occurred while fetching profitability breakdown. Please try again later." + } diff --git a/intentkit/skills/portfolio/wallet_profitability_summary.py b/intentkit/skills/portfolio/wallet_profitability_summary.py new file mode 100644 index 00000000..6c42aa60 --- /dev/null +++ b/intentkit/skills/portfolio/wallet_profitability_summary.py @@ -0,0 +1,89 @@ +import logging +from typing import Any, Dict, Optional, Type + +from pydantic import BaseModel, Field + +from intentkit.skills.portfolio.base import PortfolioBaseTool +from intentkit.skills.portfolio.constants import DEFAULT_CHAIN + +logger = logging.getLogger(__name__) + + +class WalletProfitabilitySummaryInput(BaseModel): + """Input for wallet profitability summary tool.""" + + address: str = Field( + description="The wallet address to get profitability summary for." + ) + chain: str = Field( + description="The chain to query (e.g., 'eth', 'bsc', 'polygon').", + default=DEFAULT_CHAIN, + ) + days: Optional[str] = Field( + description="Timeframe in days for the profitability summary. Options: 'all', '7', '30', '60', '90'.", + default="all", + ) + + +class WalletProfitabilitySummary(PortfolioBaseTool): + """Tool for retrieving wallet profitability summary using Moralis. + + This tool uses Moralis' API to retrieve a summary of wallet profitability + based on specified parameters. + """ + + name: str = "portfolio_wallet_profitability_summary" + description: str = ( + "Retrieve a summary of wallet profitability including total profit/loss, " + "trade volume, and other metrics. Filter by time period." + ) + args_schema: Type[BaseModel] = WalletProfitabilitySummaryInput + + async def _arun( + self, + address: str, + chain: str = DEFAULT_CHAIN, + days: Optional[str] = "all", + **kwargs, + ) -> Dict[str, Any]: + """Fetch wallet profitability summary from Moralis. + + Args: + address: The wallet address to get profitability for + chain: The blockchain to query + days: Timeframe in days for the summary + config: The configuration for the tool call + + Returns: + Dict containing wallet profitability summary data + """ + context = self.get_context() + logger.debug( + f"wallet_profitability_summary.py: Fetching profitability summary with context {context}" + ) + + # Get the API key from the agent's configuration + api_key = self.get_api_key() + if not api_key: + return {"error": "No Moralis API key provided in the configuration."} + + # Build query parameters + params = { + "chain": chain, + "days": days, + } + + # Call Moralis API + try: + endpoint = f"/wallets/{address}/profitability/summary" + return await self._make_request( + method="GET", endpoint=endpoint, api_key=api_key, params=params + ) + except Exception as e: + logger.error( + f"wallet_profitability_summary.py: Error fetching profitability summary: {e}", + exc_info=True, + ) + return { + "error": "An error occurred while fetching profitability summary. Please try again later." + } diff --git a/intentkit/skills/portfolio/wallet_stats.py b/intentkit/skills/portfolio/wallet_stats.py new file mode 100644 index 00000000..2298efda --- /dev/null +++ b/intentkit/skills/portfolio/wallet_stats.py @@ -0,0 +1,77 @@ +import logging +from typing import Any, Dict, Type + +from pydantic import BaseModel, Field + +from intentkit.skills.portfolio.base import PortfolioBaseTool +from intentkit.skills.portfolio.constants import DEFAULT_CHAIN + +logger = logging.getLogger(__name__) + + +class WalletStatsInput(BaseModel): + """Input for wallet stats tool.""" + + address: str = Field(description="The wallet address to get stats for.") + chain: str = Field( + description="The chain to query (e.g., 'eth', 'bsc', 'polygon').", + default=DEFAULT_CHAIN, + ) + + +class WalletStats(PortfolioBaseTool): + """Tool for retrieving wallet statistics using Moralis. + + This tool uses Moralis' API to get high-level statistical information about + a wallet, including NFT counts, collection counts, and transaction counts. + """ + + name: str = "portfolio_wallet_stats" + description: str = ( + "Get statistical information about a wallet, including the number of NFTs, " + "collections, and transaction counts." + ) + args_schema: Type[BaseModel] = WalletStatsInput + + async def _arun( + self, + address: str, + chain: str = DEFAULT_CHAIN, + **kwargs, + ) -> Dict[str, Any]: + """Fetch wallet stats from Moralis. + + Args: + address: The wallet address to get stats for + chain: The blockchain to query + config: The configuration for the tool call + + Returns: + Dict containing wallet stats data + """ + context = self.get_context() + logger.debug(f"wallet_stats.py: Fetching wallet stats with context {context}") + + # Get the API key from the agent's configuration + api_key = self.get_api_key() + if not api_key: + return {"error": "No Moralis API key provided in the configuration."} + + # Build query parameters + params = { + "chain": chain, + } + + # Call Moralis API + try: + endpoint = f"/wallets/{address}/stats" + return await self._make_request( + method="GET", endpoint=endpoint, api_key=api_key, params=params + ) + except Exception as e: + logger.error( + f"wallet_stats.py: Error fetching wallet stats: {e}", exc_info=True + ) + return { + "error": "An error occurred while fetching wallet stats. Please try again later." + } diff --git a/intentkit/skills/portfolio/wallet_swaps.py b/intentkit/skills/portfolio/wallet_swaps.py new file mode 100644 index 00000000..6ab61c19 --- /dev/null +++ b/intentkit/skills/portfolio/wallet_swaps.py @@ -0,0 +1,145 @@ +import logging +from typing import Any, Dict, List, Optional, Type + +from pydantic import BaseModel, Field + +from intentkit.skills.portfolio.base import PortfolioBaseTool +from intentkit.skills.portfolio.constants import ( + DEFAULT_CHAIN, + DEFAULT_LIMIT, + DEFAULT_ORDER, +) + +logger = logging.getLogger(__name__) + + +class WalletSwapsInput(BaseModel): + """Input for wallet swaps tool.""" + + address: str = Field(description="The wallet address to get swap transactions for.") + chain: str = Field( + description="The chain to query (e.g., 'eth', 'bsc', 'polygon').", + default=DEFAULT_CHAIN, + ) + cursor: Optional[str] = Field( + description="The cursor for pagination.", + default=None, + ) + limit: Optional[int] = Field( + description="The number of results per page.", + default=DEFAULT_LIMIT, + ) + from_block: Optional[str] = Field( + description="The minimum block number to get transactions from.", + default=None, + ) + to_block: Optional[str] = Field( + description="The maximum block number to get transactions from.", + default=None, + ) + from_date: Optional[str] = Field( + description="The start date to get transactions from (format in seconds or datestring).", + default=None, + ) + to_date: Optional[str] = Field( + description="The end date to get transactions from (format in seconds or datestring).", + default=None, + ) + order: Optional[str] = Field( + description="The order of the result (ASC or DESC).", + default=DEFAULT_ORDER, + ) + transaction_types: Optional[List[str]] = Field( + description="Array of transaction types. Allowed values are 'buy', 'sell'.", + default=None, + ) + + +class WalletSwaps(PortfolioBaseTool): + """Tool for retrieving swap-related transactions for a wallet using Moralis. + + This tool uses Moralis' API to fetch all swap-related (buy, sell) transactions + for a specific wallet address. + """ + + name: str = "portfolio_wallet_swaps" + description: str = ( + "Get all swap-related transactions (buy, sell) for a wallet address. " + "Note that swaps data is only available from September 2024 onwards." + ) + args_schema: Type[BaseModel] = WalletSwapsInput + + async def _arun( + self, + address: str, + chain: str = DEFAULT_CHAIN, + cursor: Optional[str] = None, + limit: Optional[int] = DEFAULT_LIMIT, + from_block: Optional[str] = None, + to_block: Optional[str] = None, + from_date: Optional[str] = None, + to_date: Optional[str] = None, + order: Optional[str] = DEFAULT_ORDER, + transaction_types: Optional[List[str]] = None, + **kwargs, + ) -> Dict[str, Any]: + """Fetch wallet swap transactions from Moralis. + + Args: + address: The wallet address to get swaps for + chain: The blockchain to query + cursor: Pagination cursor + limit: Number of results per page + from_block: Minimum block number for transactions + to_block: Maximum block number for transactions + from_date: Start date for transactions + to_date: End date for transactions + order: Order of results (ASC/DESC) + transaction_types: Types of transactions to include ('buy', 'sell') + config: The configuration for the tool call + + Returns: + Dict containing wallet swaps data + """ + context = self.get_context() + logger.debug(f"wallet_swaps.py: Fetching wallet swaps with context {context}") + + # Get the API key from the agent's configuration + api_key = self.get_api_key() + if not api_key: + return {"error": "No Moralis API key provided in the configuration."} + + # Build query parameters + params = { + "chain": chain, + "limit": limit, + "order": order, + } + + # Add optional parameters if they exist + if cursor: + params["cursor"] = cursor + if from_block: + params["fromBlock"] = from_block + if to_block: + params["toBlock"] = to_block + if from_date: + params["fromDate"] = from_date + if to_date: + params["toDate"] = to_date + if transaction_types: + params["transactionTypes"] = transaction_types + + # Call Moralis API + try: + endpoint = f"/wallets/{address}/swaps" + return await self._make_request( + method="GET", endpoint=endpoint, api_key=api_key, params=params + ) + except Exception as e: + logger.error( + f"wallet_swaps.py: Error fetching wallet swaps: {e}", exc_info=True + ) + return { + "error": "An error occurred while fetching wallet swaps. Please try again later." + } diff --git a/intentkit/skills/skills.toml b/intentkit/skills/skills.toml new file mode 100644 index 00000000..dcbb3f52 --- /dev/null +++ b/intentkit/skills/skills.toml @@ -0,0 +1,107 @@ +[acolyt] +author_github = "TxCorpi0x" +author_wallet = "0x2Bd32A312280bF5A01140e68ca630fB76cE8A3De" + +[aixbt] +author_github = "bluntbrain" +author_wallet = "0x3cdd051eeC909f94965F9c1c657f5b70a172B2C0" + +[allora] +author_github = "TxCorpi0x" +author_wallet = "0x2Bd32A312280bF5A01140e68ca630fB76cE8A3De" + +[cdp] +author_github = "hyacinthus" +author_wallet = "0x445750026A4a1906b61302442E085f9cbAfe206a" + +[casino] +author_github = "bluntbrain" +author_wallet = "0x3cdd051eeC909f94965F9c1c657f5b70a172B2C0" + +[chainlist] +author_github = "bluntbrain" +author_wallet = "0x3cdd051eeC909f94965F9c1c657f5b70a172B2C0" + +[common] +author_github = "hyacinthus" +author_wallet = "0x445750026A4a1906b61302442E085f9cbAfe206a" + +[cryptocompare] +author_github = "0xkieranwilliams" +author_wallet = "0x91D43BfDc698b1e510efa0811e2e07F628D02e6b" + +[cryptopanic] +author_github = "v1ktorrr0x" +author_wallet = "0x178741Fc5BA9B77147398853c28736eEFe5fCff1" + +[dapplooker] +author_github = "bluntbrain" +author_wallet = "0x3cdd051eeC909f94965F9c1c657f5b70a172B2C0" + +[defillama] +author_github = "0xkieranwilliams" +author_wallet = "0x91D43BfDc698b1e510efa0811e2e07F628D02e6b" + +[elfa] +author_github = "TxCorpi0x" +author_wallet = "0x2Bd32A312280bF5A01140e68ca630fB76cE8A3De" + +[enso] +author_github = "TxCorpi0x" +author_wallet = "0x2Bd32A312280bF5A01140e68ca630fB76cE8A3De" + +[github] +author_github = "bluntbrain" +author_wallet = "0x3cdd051eeC909f94965F9c1c657f5b70a172B2C0" + +[goat] +author_github = "TxCorpi0x" +author_wallet = "0x2Bd32A312280bF5A01140e68ca630fB76cE8A3De" + +[heurist] +author_github = "hyacinthus" +author_wallet = "0x445750026A4a1906b61302442E085f9cbAfe206a" + +[moralis] +author_github = "developerfred" +author_wallet = "0xd1a8Dd23e356B9fAE27dF5DeF9ea025A602EC81e" + +[nation] +author_github = "spidemen" +author_wallet = "0x275960ad41DbE218bBf72cDF612F88b5C6f40648" + +[openai] +author_github = "hyacinthus" +author_wallet = "0x445750026A4a1906b61302442E085f9cbAfe206a" + +[slack] +author_github = "hyacinthus" +author_wallet = "0x445750026A4a1906b61302442E085f9cbAfe206a" + +[system] +author_github = "hyacinthus" +author_wallet = "0x445750026A4a1906b61302442E085f9cbAfe206a" + +[tavily] +author_github = "bluntbrain" +author_wallet = "0x3cdd051eeC909f94965F9c1c657f5b70a172B2C0" + +[twitter] +author_github = "hyacinthus" +author_wallet = "0x445750026A4a1906b61302442E085f9cbAfe206a" + +[unrealspeech] +author_github = "bluntbrain" +author_wallet = "0x3cdd051eeC909f94965F9c1c657f5b70a172B2C0" + +[venice_image] +author_github = "yornfifty" +author_wallet = "0xF60D4B6780D5D51827602D7aC319458bc9e921F4" + +[portfolio] +author_github = "bluntbrain" +author_wallet = "0x3cdd051eeC909f94965F9c1c657f5b70a172B2C0" + +[token] +author_github = "bluntbrain" +author_wallet = "0x3cdd051eeC909f94965F9c1c657f5b70a172B2C0" diff --git a/intentkit/skills/slack/__init__.py b/intentkit/skills/slack/__init__.py new file mode 100644 index 00000000..ea3d068d --- /dev/null +++ b/intentkit/skills/slack/__init__.py @@ -0,0 +1,98 @@ +"""Slack skills.""" + +import logging +from typing import TypedDict + +from intentkit.abstracts.skill import SkillStoreABC +from intentkit.skills.base import SkillConfig, SkillState +from intentkit.skills.slack.base import SlackBaseTool +from intentkit.skills.slack.get_channel import SlackGetChannel +from intentkit.skills.slack.get_message import SlackGetMessage +from intentkit.skills.slack.schedule_message import SlackScheduleMessage +from intentkit.skills.slack.send_message import SlackSendMessage + +# we cache skills in system level, because they are stateless +_cache: dict[str, SlackBaseTool] = {} + +logger = logging.getLogger(__name__) + + +class SkillStates(TypedDict): + get_channel: SkillState + get_message: SkillState + schedule_message: SkillState + send_message: SkillState + + +class Config(SkillConfig): + """Configuration for Slack skills.""" + + states: SkillStates + slack_bot_token: str + + +async def get_skills( + config: "Config", + is_private: bool, + store: SkillStoreABC, + **_, +) -> list[SlackBaseTool]: + """Get all Slack skills.""" + available_skills = [] + + # Include skills based on their state + for skill_name, state in config["states"].items(): + if state == "disabled": + continue + elif state == "public" or (state == "private" and is_private): + available_skills.append(skill_name) + + # Get each skill using the cached getter + result = [] + for name in available_skills: + skill = get_slack_skill(name, store) + if skill: + result.append(skill) + return result + + +def get_slack_skill( + name: str, + store: SkillStoreABC, +) -> SlackBaseTool: + """Get a Slack skill by name. + + Args: + name: The name of the skill to get + store: The skill store for persisting data + + Returns: + The requested Slack skill + """ + if name == "get_channel": + if name not in _cache: + _cache[name] = SlackGetChannel( + skill_store=store, + ) + return _cache[name] + elif name == "get_message": + if name not in _cache: + _cache[name] = SlackGetMessage( + skill_store=store, + ) + return _cache[name] + elif name == "schedule_message": + if name not in _cache: + _cache[name] = SlackScheduleMessage( + skill_store=store, + ) + return _cache[name] + elif name == "send_message": + if name not in _cache: + _cache[name] = SlackSendMessage( + skill_store=store, + ) + return _cache[name] + else: + logger.warning(f"Unknown Slack skill: {name}") + return None diff --git a/intentkit/skills/slack/base.py b/intentkit/skills/slack/base.py new file mode 100644 index 00000000..8b22f40c --- /dev/null +++ b/intentkit/skills/slack/base.py @@ -0,0 +1,73 @@ +from typing import Optional, Type + +from langchain.tools.base import ToolException +from pydantic import BaseModel, Field +from slack_sdk import WebClient + +from intentkit.abstracts.skill import SkillStoreABC +from intentkit.skills.base import IntentKitSkill + + +class SlackBaseTool(IntentKitSkill): + """Base class for Slack tools.""" + + name: str = Field(description="The name of the tool") + description: str = Field(description="A description of what the tool does") + args_schema: Type[BaseModel] + skill_store: SkillStoreABC = Field( + description="The skill store for persisting data" + ) + + def get_api_key(self) -> str: + context = self.get_context() + skill_config = context.agent.skill_config(self.category) + api_key_provider = skill_config.get("api_key_provider") + if api_key_provider == "agent_owner": + slack_bot_token = skill_config.get("slack_bot_token") + if slack_bot_token: + return slack_bot_token + else: + raise ToolException( + "No slack_bot_token found in agent_owner configuration" + ) + else: + raise ToolException( + f"Invalid API key provider: {api_key_provider}. Only 'agent_owner' is supported for Slack." + ) + + @property + def category(self) -> str: + return "slack" + + def get_client(self, token: str) -> WebClient: + """Get a Slack WebClient instance. + + Args: + token: The Slack bot token to use + + Returns: + WebClient: A configured Slack client + """ + return WebClient(token=token) + + +class SlackChannel(BaseModel): + """Model representing a Slack channel.""" + + id: str + name: str + is_private: bool + created: int + creator: str + is_archived: bool + members: list[str] = [] + + +class SlackMessage(BaseModel): + """Model representing a Slack message.""" + + ts: str + text: str + user: str + channel: str + thread_ts: Optional[str] = None diff --git a/intentkit/skills/slack/get_channel.py b/intentkit/skills/slack/get_channel.py new file mode 100644 index 00000000..1f28c54e --- /dev/null +++ b/intentkit/skills/slack/get_channel.py @@ -0,0 +1,108 @@ +from typing import Any, Dict, Optional, Type, Union + +from pydantic import BaseModel, Field + +from intentkit.skills.slack.base import SlackBaseTool, SlackChannel + + +class SlackGetChannelSchema(BaseModel): + """Input schema for SlackGetChannel.""" + + channel_id: Optional[str] = Field( + None, + description="The ID of the channel to get information about. Provide either channel_id or channel_name.", + ) + channel_name: Optional[str] = Field( + None, + description="The name of the channel to get information about. Provide either channel_id or channel_name.", + ) + + +class SlackGetChannel(SlackBaseTool): + """Tool for getting information about a Slack channel.""" + + name: str = "slack_get_channel" + description: str = "Get information about a Slack channel by ID or name" + args_schema: Type[BaseModel] = SlackGetChannelSchema + + async def _arun( + self, + channel_id: Optional[str] = None, + channel_name: Optional[str] = None, + **kwargs, + ) -> Union[SlackChannel, Dict[str, SlackChannel]]: + """Run the tool to get information about a Slack channel. + + Args: + channel_id: The ID of the channel to get information about + channel_name: The name of the channel to get information about + + Returns: + Information about the requested channel or all channels if no ID/name provided + + Raises: + ValueError: If neither channel_id nor channel_name is provided + Exception: If an error occurs getting the channel information + """ + context = self.get_context() + skill_config = context.agent.skill_config(self.category) + client = self.get_client(skill_config.get("slack_bot_token")) + + try: + # If no channel specified, return a dict of all channels + if not channel_id and not channel_name: + # Get all channels + response = client.conversations_list() + if response["ok"]: + channels = {} + for channel in response["channels"]: + channels[channel["id"]] = self._format_channel(channel) + return channels + else: + raise Exception(f"Error getting channels: {response['error']}") + + # First try to find by channel_id if provided + if channel_id: + response = client.conversations_info(channel=channel_id) + if response["ok"]: + return self._format_channel(response["channel"]) + else: + raise Exception(f"Error getting channel: {response['error']}") + + # Otherwise try to find by channel_name + if channel_name: + # If channel name doesn't start with #, add it + if not channel_name.startswith("#"): + channel_name = f"#{channel_name}" + + # Get all channels and filter by name + response = client.conversations_list() + if response["ok"]: + for channel in response["channels"]: + if channel["name"] == channel_name.lstrip("#"): + return self._format_channel(channel) + raise ValueError(f"Channel {channel_name} not found") + else: + raise Exception(f"Error getting channels: {response['error']}") + + except Exception as e: + raise Exception(f"Error getting channel information: {str(e)}") + + def _format_channel(self, channel: Dict[str, Any]) -> SlackChannel: + """Format the channel data into a SlackChannel model. + + Args: + channel: The raw channel data from the Slack API + + Returns: + A formatted SlackChannel object + """ + return SlackChannel( + id=channel["id"], + name=channel["name"], + is_private=channel.get("is_private", False), + created=channel.get("created", 0), + creator=channel.get("creator", ""), + is_archived=channel.get("is_archived", False), + members=channel.get("members", []), + ) diff --git a/intentkit/skills/slack/get_message.py b/intentkit/skills/slack/get_message.py new file mode 100644 index 00000000..d98f6a7b --- /dev/null +++ b/intentkit/skills/slack/get_message.py @@ -0,0 +1,135 @@ +from typing import Any, Dict, Optional, Type + +from pydantic import BaseModel, Field + +from intentkit.skills.slack.base import SlackBaseTool, SlackMessage + + +class SlackGetMessageSchema(BaseModel): + """Input schema for SlackGetMessage.""" + + channel_id: str = Field( + description="The ID of the channel containing the message", + ) + ts: Optional[str] = Field( + None, + description="The timestamp of a specific message to retrieve. If not provided, returns recent messages.", + ) + thread_ts: Optional[str] = Field( + None, + description="If provided, retrieve messages from this thread instead of the channel.", + ) + limit: Optional[int] = Field( + 10, + description="The maximum number of messages to return (1-100, default 10).", + ) + + +class SlackGetMessage(SlackBaseTool): + """Tool for getting messages from a Slack channel or thread.""" + + name: str = "slack_get_message" + description: str = "Get messages from a Slack channel or thread" + args_schema: Type[BaseModel] = SlackGetMessageSchema + + async def _arun( + self, + channel_id: str, + ts: Optional[str] = None, + thread_ts: Optional[str] = None, + limit: int = 10, + **kwargs, + ) -> Dict[str, Any]: + """Run the tool to get Slack messages. + + Args: + channel_id: The ID of the channel to get messages from + ts: The timestamp of a specific message to retrieve + thread_ts: If provided, retrieve messages from this thread + limit: Maximum number of messages to return (1-100) + + Returns: + A dictionary containing the requested messages + + Raises: + Exception: If an error occurs getting the messages + """ + context = self.get_context() + skill_config = context.agent.skill_config(self.category) + client = self.get_client(skill_config.get("slack_bot_token")) + + try: + # Ensure limit is within bounds + if limit < 1: + limit = 1 + elif limit > 100: + limit = 100 + + # Get a specific message by timestamp + if ts and not thread_ts: + response = client.conversations_history( + channel=channel_id, latest=ts, limit=1, inclusive=True + ) + if response["ok"] and response["messages"]: + return { + "messages": [ + self._format_message(response["messages"][0], channel_id) + ] + } + else: + raise Exception(f"Message with timestamp {ts} not found") + + # Get messages from a thread + elif thread_ts: + response = client.conversations_replies( + channel=channel_id, ts=thread_ts, limit=limit + ) + if response["ok"]: + return { + "messages": [ + self._format_message(msg, channel_id) + for msg in response["messages"] + ], + "has_more": response.get("has_more", False), + } + else: + raise Exception( + f"Error getting thread messages: {response.get('error')}" + ) + + # Get channel history + else: + response = client.conversations_history(channel=channel_id, limit=limit) + if response["ok"]: + return { + "messages": [ + self._format_message(msg, channel_id) + for msg in response["messages"] + ], + "has_more": response.get("has_more", False), + } + else: + raise Exception( + f"Error getting channel messages: {response.get('error')}" + ) + + except Exception as e: + raise Exception(f"Error getting messages: {str(e)}") + + def _format_message(self, message: Dict[str, Any], channel_id: str) -> SlackMessage: + """Format the message data into a SlackMessage model. + + Args: + message: The raw message data from the Slack API + channel_id: The channel ID the message belongs to + + Returns: + A formatted SlackMessage object + """ + return SlackMessage( + ts=message["ts"], + text=message["text"], + user=message.get("user", ""), + channel=channel_id, + thread_ts=message.get("thread_ts"), + ) diff --git a/intentkit/skills/slack/schedule_message.py b/intentkit/skills/slack/schedule_message.py new file mode 100644 index 00000000..55f4692f --- /dev/null +++ b/intentkit/skills/slack/schedule_message.py @@ -0,0 +1,91 @@ +from datetime import datetime +from typing import Any, Dict, Optional, Type + +from pydantic import BaseModel, Field + +from intentkit.skills.slack.base import SlackBaseTool + + +class SlackScheduleMessageSchema(BaseModel): + """Input schema for SlackScheduleMessage.""" + + channel_id: str = Field( + description="The ID of the channel to send the scheduled message to", + ) + text: str = Field( + description="The text content of the message to schedule", + ) + post_at: str = Field( + description="The time to send the message in ISO format (e.g., '2023-12-25T10:00:00Z')", + ) + thread_ts: Optional[str] = Field( + None, + description="The timestamp of the thread to reply to, if sending a thread reply", + ) + + +class SlackScheduleMessage(SlackBaseTool): + """Tool for scheduling messages to be sent to a Slack channel or thread.""" + + name: str = "slack_schedule_message" + description: str = "Schedule a message to be sent to a Slack channel or thread at a specific time, if you need current time, use skill common_current_time" + args_schema: Type[BaseModel] = SlackScheduleMessageSchema + + async def _arun( + self, + channel_id: str, + text: str, + post_at: str, + thread_ts: Optional[str] = None, + **kwargs, + ) -> Dict[str, Any]: + """Run the tool to schedule a Slack message. + + Args: + channel_id: The ID of the channel to send the message to + text: The text content of the message to schedule + post_at: The time to send the message in ISO format + thread_ts: The timestamp of the thread to reply to, if sending a thread reply + + Returns: + Information about the scheduled message + + Raises: + Exception: If an error occurs scheduling the message + """ + context = self.get_context() + skill_config = context.agent.skill_config(self.category) + client = self.get_client(skill_config.get("slack_bot_token")) + + try: + # Convert ISO datetime string to Unix timestamp + post_datetime = datetime.fromisoformat(post_at.replace("Z", "+00:00")) + post_time_unix = int(post_datetime.timestamp()) + + # Prepare message parameters + message_params = { + "channel": channel_id, + "text": text, + "post_at": post_time_unix, + } + + # Add thread_ts if replying to a thread + if thread_ts: + message_params["thread_ts"] = thread_ts + + # Schedule the message + response = client.chat_scheduleMessage(**message_params) + + if response["ok"]: + return { + "channel": channel_id, + "scheduled_message_id": response["scheduled_message_id"], + "post_at": post_at, + "text": text, + "thread_ts": thread_ts, + } + else: + raise Exception(f"Error scheduling message: {response.get('error')}") + + except Exception as e: + raise Exception(f"Error scheduling message: {str(e)}") diff --git a/intentkit/skills/slack/schema.json b/intentkit/skills/slack/schema.json new file mode 100644 index 00000000..0425c21e --- /dev/null +++ b/intentkit/skills/slack/schema.json @@ -0,0 +1,135 @@ +{ + "$schema": "http://json-schema.org/draft-07/schema#", + "type": "object", + "title": "Slack", + "description": "Integration with Slack API enabling workspace communication including channel management, message retrieval, and posting capabilities for team collaboration", + "x-icon": "https://ai.service.crestal.dev/skills/slack/slack.jpg", + "x-tags": [ + "Social" + ], + "properties": { + "enabled": { + "type": "boolean", + "title": "Enabled", + "description": "Whether this skill is enabled", + "default": false + }, + "states": { + "type": "object", + "properties": { + "get_channel": { + "type": "string", + "title": "Get Channel", + "enum": [ + "disabled", + "public", + "private" + ], + "x-enum-title": [ + "Disabled", + "Agent Owner + All Users", + "Agent Owner Only" + ], + "description": "State of the get_channel skill", + "default": "disabled" + }, + "get_message": { + "type": "string", + "title": "Get Message", + "enum": [ + "disabled", + "public", + "private" + ], + "x-enum-title": [ + "Disabled", + "Agent Owner + All Users", + "Agent Owner Only" + ], + "description": "State of the get_message skill", + "default": "disabled" + }, + "schedule_message": { + "type": "string", + "title": "Schedule Message", + "enum": [ + "disabled", + "public", + "private" + ], + "x-enum-title": [ + "Disabled", + "Agent Owner + All Users", + "Agent Owner Only" + ], + "description": "State of the schedule_message skill", + "default": "disabled" + }, + "send_message": { + "type": "string", + "title": "Send Message", + "enum": [ + "disabled", + "public", + "private" + ], + "x-enum-title": [ + "Disabled", + "Agent Owner + All Users", + "Agent Owner Only" + ], + "description": "State of the send_message skill", + "default": "disabled" + } + }, + "description": "States for each Slack skill (disabled, public, or private)" + }, + "api_key_provider": { + "type": "string", + "title": "API Key Provider", + "description": "Provider of the API key for AIXBT API service", + "enum": [ + "agent_owner" + ], + "x-enum-title": [ + "Owner Provided" + ], + "default": "agent_owner" + } + }, + "required": [ + "states", + "enabled" + ], + "if": { + "properties": { + "api_key_provider": { + "const": "agent_owner" + } + } + }, + "then": { + "properties": { + "slack_bot_token": { + "type": "string", + "title": "Slack Bot Token", + "x-link": "[Get your API key](https://api.slack.com/)", + "x-sensitive": true, + "description": "Slack bot token for API access" + } + }, + "if": { + "properties": { + "enabled": { + "const": true + } + } + }, + "then": { + "required": [ + "slack_bot_token" + ] + } + }, + "additionalProperties": true +} \ No newline at end of file diff --git a/intentkit/skills/slack/send_message.py b/intentkit/skills/slack/send_message.py new file mode 100644 index 00000000..922fb5e9 --- /dev/null +++ b/intentkit/skills/slack/send_message.py @@ -0,0 +1,80 @@ +from typing import Optional, Type + +from pydantic import BaseModel, Field + +from intentkit.skills.slack.base import SlackBaseTool, SlackMessage + + +class SlackSendMessageSchema(BaseModel): + """Input schema for SlackSendMessage.""" + + channel_id: str = Field( + description="The ID of the channel to send the message to", + ) + text: str = Field( + description="The text content of the message to send", + ) + thread_ts: Optional[str] = Field( + None, + description="The timestamp of the thread to reply to, if sending a thread reply", + ) + + +class SlackSendMessage(SlackBaseTool): + """Tool for sending messages to a Slack channel or thread.""" + + name: str = "slack_send_message" + description: str = "Send a message to a Slack channel or thread" + args_schema: Type[BaseModel] = SlackSendMessageSchema + + async def _arun( + self, + channel_id: str, + text: str, + thread_ts: Optional[str] = None, + **kwargs, + ) -> SlackMessage: + """Run the tool to send a Slack message. + + Args: + channel_id: The ID of the channel to send the message to + text: The text content of the message to send + thread_ts: The timestamp of the thread to reply to, if sending a thread reply + + Returns: + Information about the sent message + + Raises: + Exception: If an error occurs sending the message + """ + context = self.get_context() + skill_config = context.agent.skill_config(self.category) + client = self.get_client(skill_config.get("slack_bot_token")) + + try: + # Prepare message parameters + message_params = { + "channel": channel_id, + "text": text, + } + + # Add thread_ts if replying to a thread + if thread_ts: + message_params["thread_ts"] = thread_ts + + # Send the message + response = client.chat_postMessage(**message_params) + + if response["ok"]: + return SlackMessage( + ts=response["ts"], + text=text, + user=response["message"]["user"], + channel=channel_id, + thread_ts=thread_ts, + ) + else: + raise Exception(f"Error sending message: {response.get('error')}") + + except Exception as e: + raise Exception(f"Error sending message: {str(e)}") diff --git a/intentkit/skills/slack/slack.jpg b/intentkit/skills/slack/slack.jpg new file mode 100644 index 00000000..92808633 Binary files /dev/null and b/intentkit/skills/slack/slack.jpg differ diff --git a/intentkit/skills/supabase/__init__.py b/intentkit/skills/supabase/__init__.py new file mode 100644 index 00000000..d706b137 --- /dev/null +++ b/intentkit/skills/supabase/__init__.py @@ -0,0 +1,117 @@ +"""Supabase skills.""" + +import logging +from typing import NotRequired, TypedDict + +from intentkit.abstracts.skill import SkillStoreABC +from intentkit.skills.base import SkillConfig, SkillState +from intentkit.skills.supabase.base import SupabaseBaseTool +from intentkit.skills.supabase.delete_data import SupabaseDeleteData +from intentkit.skills.supabase.fetch_data import SupabaseFetchData +from intentkit.skills.supabase.insert_data import SupabaseInsertData +from intentkit.skills.supabase.invoke_function import SupabaseInvokeFunction +from intentkit.skills.supabase.update_data import SupabaseUpdateData +from intentkit.skills.supabase.upsert_data import SupabaseUpsertData + +# Cache skills at the system level, because they are stateless +_cache: dict[str, SupabaseBaseTool] = {} + +logger = logging.getLogger(__name__) + + +class SkillStates(TypedDict): + fetch_data: SkillState + insert_data: SkillState + update_data: SkillState + upsert_data: SkillState + delete_data: SkillState + invoke_function: SkillState + + +class Config(SkillConfig): + """Configuration for Supabase skills.""" + + states: SkillStates + supabase_url: str + supabase_key: str + public_write_tables: NotRequired[str] + public_key: NotRequired[str] + + +async def get_skills( + config: "Config", + is_private: bool, + store: SkillStoreABC, + **_, +) -> list[SupabaseBaseTool]: + """Get all Supabase skills.""" + available_skills = [] + + # Include skills based on their state + for skill_name, state in config["states"].items(): + if state == "disabled": + continue + elif state == "public" or (state == "private" and is_private): + available_skills.append(skill_name) + + # Get each skill using the cached getter + result = [] + for name in available_skills: + skill = get_supabase_skill(name, store) + if skill: + result.append(skill) + return result + + +def get_supabase_skill( + name: str, + store: SkillStoreABC, +) -> SupabaseBaseTool: + """Get a Supabase skill by name. + + Args: + name: The name of the skill to get + store: The skill store for persisting data + + Returns: + The requested Supabase skill + """ + if name == "fetch_data": + if name not in _cache: + _cache[name] = SupabaseFetchData( + skill_store=store, + ) + return _cache[name] + elif name == "insert_data": + if name not in _cache: + _cache[name] = SupabaseInsertData( + skill_store=store, + ) + return _cache[name] + elif name == "update_data": + if name not in _cache: + _cache[name] = SupabaseUpdateData( + skill_store=store, + ) + return _cache[name] + elif name == "upsert_data": + if name not in _cache: + _cache[name] = SupabaseUpsertData( + skill_store=store, + ) + return _cache[name] + elif name == "delete_data": + if name not in _cache: + _cache[name] = SupabaseDeleteData( + skill_store=store, + ) + return _cache[name] + elif name == "invoke_function": + if name not in _cache: + _cache[name] = SupabaseInvokeFunction( + skill_store=store, + ) + return _cache[name] + else: + logger.warning(f"Unknown Supabase skill: {name}") + return None diff --git a/intentkit/skills/supabase/base.py b/intentkit/skills/supabase/base.py new file mode 100644 index 00000000..9508be5a --- /dev/null +++ b/intentkit/skills/supabase/base.py @@ -0,0 +1,83 @@ +from typing import Type + +from langchain_core.tools import ToolException +from pydantic import BaseModel, Field + +from intentkit.abstracts.graph import AgentContext +from intentkit.abstracts.skill import SkillStoreABC +from intentkit.skills.base import IntentKitSkill + + +class SupabaseBaseTool(IntentKitSkill): + """Base class for Supabase tools.""" + + name: str = Field(description="The name of the tool") + description: str = Field(description="A description of what the tool does") + args_schema: Type[BaseModel] + skill_store: SkillStoreABC = Field( + description="The skill store for persisting data" + ) + + @property + def category(self) -> str: + return "supabase" + + def get_supabase_config(self, context: AgentContext) -> tuple[str, str]: + """Get Supabase URL and key from config. + + Args: + config: The agent configuration + context: The skill context containing configuration and mode info + + Returns: + Tuple of (supabase_url, supabase_key) + + Raises: + ValueError: If required config is missing + """ + config = context.agent.skill_config(self.category) + supabase_url = config.get("supabase_url") + + # Use public_key for public operations if available, otherwise fall back to supabase_key + if context.is_private: + supabase_key = config.get("supabase_key") + else: + # Try public_key first, fall back to supabase_key if public_key doesn't exist + supabase_key = config.get("public_key") or config.get("supabase_key") + + if not supabase_url: + raise ValueError("supabase_url is required in config") + if not supabase_key: + raise ValueError("supabase_key is required in config") + + return supabase_url, supabase_key + + def validate_table_access(self, table: str, context: AgentContext) -> None: + """Validate if the table can be accessed for write operations in public mode. + + Args: + table: The table name to validate + context: The skill context containing configuration and mode info + + Raises: + ToolException: If table access is not allowed in public mode + """ + # If in private mode (owner mode), no restrictions apply + if context.is_private: + return + + config = context.agent.skill_config(self.category) + + # In public mode, check if table is in allowed list + public_write_tables = config.get("public_write_tables", "") + if not public_write_tables: + return + + allowed_tables = [ + t.strip() for t in public_write_tables.split(",") if t.strip() + ] + if table not in allowed_tables: + raise ToolException( + f"Table '{table}' is not allowed for public write operations. " + f"Allowed tables: {', '.join(allowed_tables)}" + ) diff --git a/intentkit/skills/supabase/delete_data.py b/intentkit/skills/supabase/delete_data.py new file mode 100644 index 00000000..a86041f7 --- /dev/null +++ b/intentkit/skills/supabase/delete_data.py @@ -0,0 +1,100 @@ +import logging +from typing import Any, Dict, Type + +from langchain_core.tools import ToolException +from pydantic import BaseModel, Field +from supabase import Client, create_client + +from intentkit.skills.supabase.base import SupabaseBaseTool + +NAME = "supabase_delete_data" +PROMPT = "Delete data from a Supabase table based on filtering conditions." + +logger = logging.getLogger(__name__) + + +class SupabaseDeleteDataInput(BaseModel): + """Input for SupabaseDeleteData tool.""" + + table: str = Field(description="The name of the table to delete data from") + filters: Dict[str, Any] = Field( + description="Dictionary of filters to identify which records to delete (e.g., {'id': 123})" + ) + returning: str = Field( + default="*", + description="Columns to return from deleted records (default: '*' for all)", + ) + + +class SupabaseDeleteData(SupabaseBaseTool): + """Tool for deleting data from Supabase tables. + + This tool allows deleting records from Supabase tables based on filter conditions. + """ + + name: str = NAME + description: str = PROMPT + args_schema: Type[BaseModel] = SupabaseDeleteDataInput + + async def _arun( + self, + table: str, + filters: Dict[str, Any], + returning: str = "*", + **kwargs, + ): + try: + context = self.get_context() + + # Validate table access for public mode + self.validate_table_access(table, context) + + supabase_url, supabase_key = self.get_supabase_config(context) + + # Create Supabase client + supabase: Client = create_client(supabase_url, supabase_key) + + # Start building the delete query + query = supabase.table(table).delete() + + # Apply filters to identify which records to delete + for column, value in filters.items(): + if isinstance(value, dict): + # Handle complex filters like {'gte': 18} + for operator, filter_value in value.items(): + if operator == "eq": + query = query.eq(column, filter_value) + elif operator == "neq": + query = query.neq(column, filter_value) + elif operator == "gt": + query = query.gt(column, filter_value) + elif operator == "gte": + query = query.gte(column, filter_value) + elif operator == "lt": + query = query.lt(column, filter_value) + elif operator == "lte": + query = query.lte(column, filter_value) + elif operator == "like": + query = query.like(column, filter_value) + elif operator == "ilike": + query = query.ilike(column, filter_value) + elif operator == "in": + query = query.in_(column, filter_value) + else: + logger.warning(f"Unknown filter operator: {operator}") + else: + # Simple equality filter + query = query.eq(column, value) + + # Execute the delete + response = query.execute() + + return { + "success": True, + "data": response.data, + "count": len(response.data) if response.data else 0, + } + + except Exception as e: + logger.error(f"Error deleting data from Supabase: {str(e)}") + raise ToolException(f"Failed to delete data from table '{table}': {str(e)}") diff --git a/intentkit/skills/supabase/fetch_data.py b/intentkit/skills/supabase/fetch_data.py new file mode 100644 index 00000000..9fe77d36 --- /dev/null +++ b/intentkit/skills/supabase/fetch_data.py @@ -0,0 +1,118 @@ +import logging +from typing import Any, Dict, Optional, Type + +from langchain_core.tools import ToolException +from pydantic import BaseModel, Field +from supabase import Client, create_client + +from intentkit.skills.supabase.base import SupabaseBaseTool + +NAME = "supabase_fetch_data" +PROMPT = "Fetch data from a Supabase table with optional filtering, ordering, and pagination." + +logger = logging.getLogger(__name__) + + +class SupabaseFetchDataInput(BaseModel): + """Input for SupabaseFetchData tool.""" + + table: str = Field(description="The name of the table to fetch data from") + columns: Optional[str] = Field( + default="*", + description="Comma-separated list of columns to select (default: '*' for all)", + ) + filters: Optional[Dict[str, Any]] = Field( + default=None, + description="Dictionary of filters to apply (e.g., {'column': 'value', 'age': {'gte': 18}})", + ) + order_by: Optional[str] = Field(default=None, description="Column to order by") + ascending: bool = Field( + default=True, description="Whether to order in ascending order (default: True)" + ) + limit: Optional[int] = Field( + default=None, description="Maximum number of records to return" + ) + offset: Optional[int] = Field( + default=None, description="Number of records to skip for pagination" + ) + + +class SupabaseFetchData(SupabaseBaseTool): + """Tool for fetching data from Supabase tables. + + This tool allows querying Supabase tables with filtering, ordering, and pagination. + """ + + name: str = NAME + description: str = PROMPT + args_schema: Type[BaseModel] = SupabaseFetchDataInput + + async def _arun( + self, + table: str, + columns: Optional[str] = "*", + filters: Optional[Dict[str, Any]] = None, + order_by: Optional[str] = None, + ascending: bool = True, + limit: Optional[int] = None, + offset: Optional[int] = None, + **kwargs, + ): + try: + context = self.get_context() + supabase_url, supabase_key = self.get_supabase_config(context) + + # Create Supabase client + supabase: Client = create_client(supabase_url, supabase_key) + + # Start building the query + query = supabase.table(table).select(columns) + + # Apply filters if provided + if filters: + for column, value in filters.items(): + if isinstance(value, dict): + # Handle complex filters like {'gte': 18} + for operator, filter_value in value.items(): + if operator == "eq": + query = query.eq(column, filter_value) + elif operator == "neq": + query = query.neq(column, filter_value) + elif operator == "gt": + query = query.gt(column, filter_value) + elif operator == "gte": + query = query.gte(column, filter_value) + elif operator == "lt": + query = query.lt(column, filter_value) + elif operator == "lte": + query = query.lte(column, filter_value) + elif operator == "like": + query = query.like(column, filter_value) + elif operator == "ilike": + query = query.ilike(column, filter_value) + elif operator == "in": + query = query.in_(column, filter_value) + else: + logger.warning(f"Unknown filter operator: {operator}") + else: + # Simple equality filter + query = query.eq(column, value) + + # Apply ordering if provided + if order_by: + query = query.order(order_by, desc=not ascending) + + # Apply pagination + if limit: + query = query.limit(limit) + if offset: + query = query.offset(offset) + + # Execute the query + response = query.execute() + + return {"success": True, "data": response.data, "count": len(response.data)} + + except Exception as e: + logger.error(f"Error fetching data from Supabase: {str(e)}") + raise ToolException(f"Failed to fetch data from table '{table}': {str(e)}") diff --git a/intentkit/skills/supabase/insert_data.py b/intentkit/skills/supabase/insert_data.py new file mode 100644 index 00000000..962bd792 --- /dev/null +++ b/intentkit/skills/supabase/insert_data.py @@ -0,0 +1,68 @@ +import logging +from typing import Any, Dict, List, Type, Union + +from langchain_core.tools import ToolException +from pydantic import BaseModel, Field +from supabase import Client, create_client + +from intentkit.skills.supabase.base import SupabaseBaseTool + +NAME = "supabase_insert_data" +PROMPT = "Insert new data into a Supabase table." + +logger = logging.getLogger(__name__) + + +class SupabaseInsertDataInput(BaseModel): + """Input for SupabaseInsertData tool.""" + + table: str = Field(description="The name of the table to insert data into") + data: Union[Dict[str, Any], List[Dict[str, Any]]] = Field( + description="The data to insert. Can be a single object or a list of objects" + ) + returning: str = Field( + default="*", + description="Columns to return after insertion (default: '*' for all)", + ) + + +class SupabaseInsertData(SupabaseBaseTool): + """Tool for inserting data into Supabase tables. + + This tool allows inserting single or multiple records into Supabase tables. + """ + + name: str = NAME + description: str = PROMPT + args_schema: Type[BaseModel] = SupabaseInsertDataInput + + async def _arun( + self, + table: str, + data: Union[Dict[str, Any], List[Dict[str, Any]]], + returning: str = "*", + **kwargs, + ): + try: + context = self.get_context() + + # Validate table access for public mode + self.validate_table_access(table, context) + + supabase_url, supabase_key = self.get_supabase_config(context) + + # Create Supabase client + supabase: Client = create_client(supabase_url, supabase_key) + + # Insert data + response = supabase.table(table).insert(data).execute() + + return { + "success": True, + "data": response.data, + "count": len(response.data) if response.data else 0, + } + + except Exception as e: + logger.error(f"Error inserting data into Supabase: {str(e)}") + raise ToolException(f"Failed to insert data into table '{table}': {str(e)}") diff --git a/intentkit/skills/supabase/invoke_function.py b/intentkit/skills/supabase/invoke_function.py new file mode 100644 index 00000000..4c0b2fc8 --- /dev/null +++ b/intentkit/skills/supabase/invoke_function.py @@ -0,0 +1,72 @@ +import logging +from typing import Any, Dict, Optional, Type + +from langchain_core.tools import ToolException +from pydantic import BaseModel, Field +from supabase import Client, create_client + +from intentkit.skills.supabase.base import SupabaseBaseTool + +NAME = "supabase_invoke_function" +PROMPT = "Invoke a Supabase Edge Function with optional parameters." + +logger = logging.getLogger(__name__) + + +class SupabaseInvokeFunctionInput(BaseModel): + """Input for SupabaseInvokeFunction tool.""" + + function_name: str = Field(description="The name of the Edge Function to invoke") + parameters: Optional[Dict[str, Any]] = Field( + default=None, description="Optional parameters to pass to the function" + ) + headers: Optional[Dict[str, str]] = Field( + default=None, description="Optional headers to include in the request" + ) + + +class SupabaseInvokeFunction(SupabaseBaseTool): + """Tool for invoking Supabase Edge Functions. + + This tool allows calling Supabase Edge Functions with optional parameters and headers. + """ + + name: str = NAME + description: str = PROMPT + args_schema: Type[BaseModel] = SupabaseInvokeFunctionInput + + async def _arun( + self, + function_name: str, + parameters: Optional[Dict[str, Any]] = None, + headers: Optional[Dict[str, str]] = None, + **kwargs, + ): + try: + context = self.get_context() + supabase_url, supabase_key = self.get_supabase_config(context) + + # Create Supabase client + supabase: Client = create_client(supabase_url, supabase_key) + + # Prepare function invocation parameters + invoke_options = {} + if parameters: + invoke_options["json"] = parameters + if headers: + invoke_options["headers"] = headers + + # Invoke the Edge Function + response = supabase.functions.invoke(function_name, invoke_options) + + return { + "success": True, + "data": response.json() if hasattr(response, "json") else response, + "status_code": getattr(response, "status_code", None), + } + + except Exception as e: + logger.error(f"Error invoking Supabase Edge Function: {str(e)}") + raise ToolException( + f"Failed to invoke Edge Function '{function_name}': {str(e)}" + ) diff --git a/intentkit/skills/supabase/schema.json b/intentkit/skills/supabase/schema.json new file mode 100644 index 00000000..e33e5fc8 --- /dev/null +++ b/intentkit/skills/supabase/schema.json @@ -0,0 +1,176 @@ +{ + "$schema": "http://json-schema.org/draft-07/schema#", + "type": "object", + "title": "Supabase", + "description": "Integration with Supabase backend-as-a-service platform enabling database operations and Edge Function invocations", + "x-icon": "https://ai.service.crestal.dev/skills/supabase/supabase.svg", + "x-tags": [ + "Database", + "Backend" + ], + "properties": { + "enabled": { + "type": "boolean", + "title": "Enabled", + "description": "Whether this skill is enabled", + "default": false + }, + "states": { + "type": "object", + "properties": { + "fetch_data": { + "type": "string", + "title": "Fetch Data", + "enum": [ + "disabled", + "public", + "private" + ], + "x-enum-title": [ + "Disabled", + "Agent Owner + All Users", + "Agent Owner Only" + ], + "description": "Fetch data from Supabase tables with filtering, ordering, and pagination support", + "default": "disabled" + }, + "insert_data": { + "type": "string", + "title": "Insert Data", + "enum": [ + "disabled", + "public", + "private" + ], + "x-enum-title": [ + "Disabled", + "Agent Owner + All Users", + "Agent Owner Only" + ], + "description": "Insert new records into Supabase tables", + "default": "disabled" + }, + "update_data": { + "type": "string", + "title": "Update Data", + "enum": [ + "disabled", + "public", + "private" + ], + "x-enum-title": [ + "Disabled", + "Agent Owner + All Users", + "Agent Owner Only" + ], + "description": "Update existing records in Supabase tables based on filter conditions", + "default": "disabled" + }, + "upsert_data": { + "type": "string", + "title": "Upsert Data", + "enum": [ + "disabled", + "public", + "private" + ], + "x-enum-title": [ + "Disabled", + "Agent Owner + All Users", + "Agent Owner Only" + ], + "description": "Insert or update records in Supabase tables based on conflict resolution", + "default": "disabled" + }, + "delete_data": { + "type": "string", + "title": "Delete Data", + "enum": [ + "disabled", + "public", + "private" + ], + "x-enum-title": [ + "Disabled", + "Agent Owner + All Users", + "Agent Owner Only" + ], + "description": "Delete records from Supabase tables based on filter conditions", + "default": "disabled" + }, + "invoke_function": { + "type": "string", + "title": "Invoke Edge Function", + "enum": [ + "disabled", + "public", + "private" + ], + "x-enum-title": [ + "Disabled", + "Agent Owner + All Users", + "Agent Owner Only" + ], + "description": "Invoke Supabase Edge Functions with optional parameters and headers", + "default": "disabled" + } + }, + "description": "States for each Supabase skill" + }, + "api_key_provider": { + "type": "string", + "title": "API Key Provider", + "description": "Who provides the API key", + "enum": [ + "agent_owner" + ], + "x-enum-title": [ + "Owner Provided" + ], + "default": "agent_owner" + }, + "supabase_url": { + "type": "string", + "title": "Supabase URL", + "description": "Your Supabase project URL (e.g., https://your-project.supabase.co). You can find it in Project Settings -> Data API", + "x-link": "[Create Your DB](https://supabase.com/)", + "format": "uri" + }, + "supabase_key": { + "type": "string", + "title": "Supabase API Key", + "description": "Your Supabase project's API key. You can find it in Project Settings -> API Keys", + "x-sensitive": true, + "format": "password" + }, + "public_key": { + "type": "string", + "title": "Public API Key", + "description": "You can add a key with more restrictive permissions for public skills. If not provided, supabase_key will be used.", + "x-sensitive": true, + "format": "password" + }, + "public_write_tables": { + "type": "string", + "title": "Public Write Tables", + "description": "Add tables separated by commas. When insert, update, upsert, or delete operations are enabled for public use, only tables from this list can be used. This list does not restrict the skills executed by the owner or in autonomous chat. You can use either this option or public_key to secure your public access." + } + }, + "required": [ + "states", + "enabled" + ], + "if": { + "properties": { + "enabled": { + "const": true + } + } + }, + "then": { + "required": [ + "supabase_url", + "supabase_key" + ] + } +} \ No newline at end of file diff --git a/intentkit/skills/supabase/supabase.svg b/intentkit/skills/supabase/supabase.svg new file mode 100644 index 00000000..ad802ac1 --- /dev/null +++ b/intentkit/skills/supabase/supabase.svg @@ -0,0 +1,15 @@ + + + + + + + + + + + + + + + diff --git a/intentkit/skills/supabase/update_data.py b/intentkit/skills/supabase/update_data.py new file mode 100644 index 00000000..01dd345d --- /dev/null +++ b/intentkit/skills/supabase/update_data.py @@ -0,0 +1,103 @@ +import logging +from typing import Any, Dict, Type + +from langchain_core.tools import ToolException +from pydantic import BaseModel, Field +from supabase import Client, create_client + +from intentkit.skills.supabase.base import SupabaseBaseTool + +NAME = "supabase_update_data" +PROMPT = "Update existing data in a Supabase table with filtering conditions." + +logger = logging.getLogger(__name__) + + +class SupabaseUpdateDataInput(BaseModel): + """Input for SupabaseUpdateData tool.""" + + table: str = Field(description="The name of the table to update data in") + data: Dict[str, Any] = Field( + description="The data to update (key-value pairs of columns and new values)" + ) + filters: Dict[str, Any] = Field( + description="Dictionary of filters to identify which records to update (e.g., {'id': 123})" + ) + returning: str = Field( + default="*", description="Columns to return after update (default: '*' for all)" + ) + + +class SupabaseUpdateData(SupabaseBaseTool): + """Tool for updating data in Supabase tables. + + This tool allows updating records in Supabase tables based on filter conditions. + """ + + name: str = NAME + description: str = PROMPT + args_schema: Type[BaseModel] = SupabaseUpdateDataInput + + async def _arun( + self, + table: str, + data: Dict[str, Any], + filters: Dict[str, Any], + returning: str = "*", + **kwargs, + ): + try: + context = self.get_context() + + # Validate table access for public mode + self.validate_table_access(table, context) + + supabase_url, supabase_key = self.get_supabase_config(context) + + # Create Supabase client + supabase: Client = create_client(supabase_url, supabase_key) + + # Start building the update query + query = supabase.table(table).update(data) + + # Apply filters to identify which records to update + for column, value in filters.items(): + if isinstance(value, dict): + # Handle complex filters like {'gte': 18} + for operator, filter_value in value.items(): + if operator == "eq": + query = query.eq(column, filter_value) + elif operator == "neq": + query = query.neq(column, filter_value) + elif operator == "gt": + query = query.gt(column, filter_value) + elif operator == "gte": + query = query.gte(column, filter_value) + elif operator == "lt": + query = query.lt(column, filter_value) + elif operator == "lte": + query = query.lte(column, filter_value) + elif operator == "like": + query = query.like(column, filter_value) + elif operator == "ilike": + query = query.ilike(column, filter_value) + elif operator == "in": + query = query.in_(column, filter_value) + else: + logger.warning(f"Unknown filter operator: {operator}") + else: + # Simple equality filter + query = query.eq(column, value) + + # Execute the update + response = query.execute() + + return { + "success": True, + "data": response.data, + "count": len(response.data) if response.data else 0, + } + + except Exception as e: + logger.error(f"Error updating data in Supabase: {str(e)}") + raise ToolException(f"Failed to update data in table '{table}': {str(e)}") diff --git a/intentkit/skills/supabase/upsert_data.py b/intentkit/skills/supabase/upsert_data.py new file mode 100644 index 00000000..c18232ac --- /dev/null +++ b/intentkit/skills/supabase/upsert_data.py @@ -0,0 +1,75 @@ +import logging +from typing import Any, Dict, List, Type, Union + +from langchain_core.tools import ToolException +from pydantic import BaseModel, Field +from supabase import Client, create_client + +from intentkit.skills.supabase.base import SupabaseBaseTool + +NAME = "supabase_upsert_data" +PROMPT = ( + "Upsert (insert or update) data in a Supabase table based on conflict resolution." +) + +logger = logging.getLogger(__name__) + + +class SupabaseUpsertDataInput(BaseModel): + """Input for SupabaseUpsertData tool.""" + + table: str = Field(description="The name of the table to upsert data into") + data: Union[Dict[str, Any], List[Dict[str, Any]]] = Field( + description="The data to upsert. Can be a single object or a list of objects" + ) + on_conflict: str = Field( + description="The column(s) to use for conflict resolution (e.g., 'id' or 'email,username')" + ) + returning: str = Field( + default="*", description="Columns to return after upsert (default: '*' for all)" + ) + + +class SupabaseUpsertData(SupabaseBaseTool): + """Tool for upserting data in Supabase tables. + + This tool allows inserting new records or updating existing ones based on conflict resolution. + """ + + name: str = NAME + description: str = PROMPT + args_schema: Type[BaseModel] = SupabaseUpsertDataInput + + async def _arun( + self, + table: str, + data: Union[Dict[str, Any], List[Dict[str, Any]]], + on_conflict: str, + returning: str = "*", + **kwargs, + ): + try: + context = self.get_context() + + # Validate table access for public mode + self.validate_table_access(table, context) + + supabase_url, supabase_key = self.get_supabase_config(context) + + # Create Supabase client + supabase: Client = create_client(supabase_url, supabase_key) + + # Upsert data + response = ( + supabase.table(table).upsert(data, on_conflict=on_conflict).execute() + ) + + return { + "success": True, + "data": response.data, + "count": len(response.data) if response.data else 0, + } + + except Exception as e: + logger.error(f"Error upserting data in Supabase: {str(e)}") + raise ToolException(f"Failed to upsert data in table '{table}': {str(e)}") diff --git a/intentkit/skills/system/__init__.py b/intentkit/skills/system/__init__.py new file mode 100644 index 00000000..98cb1432 --- /dev/null +++ b/intentkit/skills/system/__init__.py @@ -0,0 +1,122 @@ +"""System skills.""" + +import logging +from typing import TypedDict + +from intentkit.abstracts.skill import SkillStoreABC +from intentkit.skills.base import SkillConfig, SkillOwnerState +from intentkit.skills.system.add_autonomous_task import AddAutonomousTask +from intentkit.skills.system.base import SystemBaseTool +from intentkit.skills.system.delete_autonomous_task import DeleteAutonomousTask +from intentkit.skills.system.edit_autonomous_task import EditAutonomousTask +from intentkit.skills.system.list_autonomous_tasks import ListAutonomousTasks +from intentkit.skills.system.read_agent_api_key import ReadAgentApiKey +from intentkit.skills.system.regenerate_agent_api_key import RegenerateAgentApiKey + +# Cache skills at the system level, because they are stateless +_cache: dict[str, SystemBaseTool] = {} + +logger = logging.getLogger(__name__) + + +class SkillStates(TypedDict): + read_agent_api_key: SkillOwnerState + regenerate_agent_api_key: SkillOwnerState + list_autonomous_tasks: SkillOwnerState + add_autonomous_task: SkillOwnerState + delete_autonomous_task: SkillOwnerState + edit_autonomous_task: SkillOwnerState + + +class Config(SkillConfig): + """Configuration for system skills.""" + + states: SkillStates + + +async def get_skills( + config: "Config", + is_private: bool, + store: SkillStoreABC, + **_, +) -> list[SystemBaseTool]: + """Get all system skills. + + Args: + config: The configuration for system skills. + is_private: Whether to include private skills. + store: The skill store for persisting data. + + Returns: + A list of system skills. + """ + available_skills = [] + + # Include skills based on their state + for skill_name, state in config["states"].items(): + if state == "disabled": + continue + elif state == "public" or (state == "private" and is_private): + available_skills.append(skill_name) + + # Get each skill using the cached getter + result = [] + for name in available_skills: + skill = get_system_skill(name, store) + if skill: + result.append(skill) + return result + + +def get_system_skill( + name: str, + store: SkillStoreABC, +) -> SystemBaseTool: + """Get a system skill by name. + + Args: + name: The name of the skill to get + store: The skill store for persisting data + + Returns: + The requested system skill + """ + if name == "read_agent_api_key": + if name not in _cache: + _cache[name] = ReadAgentApiKey( + skill_store=store, + ) + return _cache[name] + elif name == "regenerate_agent_api_key": + if name not in _cache: + _cache[name] = RegenerateAgentApiKey( + skill_store=store, + ) + return _cache[name] + elif name == "list_autonomous_tasks": + if name not in _cache: + _cache[name] = ListAutonomousTasks( + skill_store=store, + ) + return _cache[name] + elif name == "add_autonomous_task": + if name not in _cache: + _cache[name] = AddAutonomousTask( + skill_store=store, + ) + return _cache[name] + elif name == "delete_autonomous_task": + if name not in _cache: + _cache[name] = DeleteAutonomousTask( + skill_store=store, + ) + return _cache[name] + elif name == "edit_autonomous_task": + if name not in _cache: + _cache[name] = EditAutonomousTask( + skill_store=store, + ) + return _cache[name] + else: + logger.warning(f"Unknown system skill: {name}") + return None diff --git a/intentkit/skills/system/add_autonomous_task.py b/intentkit/skills/system/add_autonomous_task.py new file mode 100644 index 00000000..e4d9cd14 --- /dev/null +++ b/intentkit/skills/system/add_autonomous_task.py @@ -0,0 +1,91 @@ +from typing import Optional + +from pydantic import BaseModel, Field + +from intentkit.models.agent import AgentAutonomous +from intentkit.skills.system.base import SystemBaseTool + + +class AddAutonomousTaskInput(BaseModel): + """Input model for add_autonomous_task skill.""" + + name: Optional[str] = Field( + default=None, + description="Display name of the autonomous task configuration", + max_length=50, + ) + description: Optional[str] = Field( + default=None, + description="Description of the autonomous task configuration", + max_length=200, + ) + minutes: Optional[int] = Field( + default=None, + description="Interval in minutes between operations, mutually exclusive with cron", + ) + cron: Optional[str] = Field( + default=None, + description="Cron expression for scheduling operations, mutually exclusive with minutes", + ) + prompt: str = Field(description="Special prompt used during autonomous operation") + + +class AddAutonomousTaskOutput(BaseModel): + """Output model for add_autonomous_task skill.""" + + task: AgentAutonomous = Field( + description="The created autonomous task configuration" + ) + + +class AddAutonomousTask(SystemBaseTool): + """Skill to add a new autonomous task to an agent.""" + + name: str = "system_add_autonomous_task" + description: str = ( + "Add a new autonomous task configuration to the agent. " + "Allows setting up scheduled operations with custom prompts and intervals. " + "The minutes and cron fields are mutually exclusive. But you must provide one of them. " + "If user want to add a condition task, you can add a 5 minutes task to check the condition. " + "If the user does not explicitly state that the condition task should be executed continuously, " + "then add in the task prompt that it will delete itself after successful execution. " + ) + args_schema = AddAutonomousTaskInput + + async def _arun( + self, + name: Optional[str] = None, + description: Optional[str] = None, + minutes: Optional[int] = None, + cron: Optional[str] = None, + prompt: str = "", + **kwargs, + ) -> AddAutonomousTaskOutput: + """Add an autonomous task to the agent. + + Args: + name: Display name of the task + description: Description of the task + minutes: Interval in minutes (mutually exclusive with cron) + cron: Cron expression (mutually exclusive with minutes) + prompt: Special prompt for autonomous operation + config: Runtime configuration containing agent context + + Returns: + AddAutonomousTaskOutput: The created task + """ + context = self.get_context() + agent_id = context.agent_id + + task = AgentAutonomous( + name=name, + description=description, + minutes=minutes, + cron=cron, + prompt=prompt, + enabled=True, + ) + + created_task = await self.skill_store.add_autonomous_task(agent_id, task) + + return AddAutonomousTaskOutput(task=created_task) diff --git a/intentkit/skills/system/base.py b/intentkit/skills/system/base.py new file mode 100644 index 00000000..908befc1 --- /dev/null +++ b/intentkit/skills/system/base.py @@ -0,0 +1,22 @@ +import secrets + +from intentkit.skills.base import IntentKitSkill + + +class SystemBaseTool(IntentKitSkill): + """Base class for system-related skills.""" + + @property + def category(self) -> str: + """Return the skill category.""" + return "system" + + def _generate_api_key(self) -> str: + """Generate a new API key using secure random bytes.""" + # Generate 32 random bytes and convert to hex string + return f"sk-{secrets.token_hex(32)}" + + def _generate_public_api_key(self) -> str: + """Generate a new public API key using secure random bytes.""" + # Generate 32 random bytes and convert to hex string + return f"pk-{secrets.token_hex(32)}" diff --git a/intentkit/skills/system/delete_autonomous_task.py b/intentkit/skills/system/delete_autonomous_task.py new file mode 100644 index 00000000..bdfa84e6 --- /dev/null +++ b/intentkit/skills/system/delete_autonomous_task.py @@ -0,0 +1,54 @@ +from pydantic import BaseModel, Field + +from intentkit.skills.system.base import SystemBaseTool + + +class DeleteAutonomousTaskInput(BaseModel): + """Input model for delete_autonomous_task skill.""" + + task_id: str = Field( + description="The unique identifier of the autonomous task to delete" + ) + + +class DeleteAutonomousTaskOutput(BaseModel): + """Output model for delete_autonomous_task skill.""" + + success: bool = Field( + description="Whether the task was successfully deleted", default=True + ) + message: str = Field(description="Confirmation message about the deletion") + + +class DeleteAutonomousTask(SystemBaseTool): + """Skill to delete an autonomous task from an agent.""" + + name: str = "system_delete_autonomous_task" + description: str = ( + "Delete an autonomous task configuration from the agent. " + "Requires the task ID to identify which task to remove." + ) + args_schema = DeleteAutonomousTaskInput + + async def _arun( + self, + task_id: str, + **kwargs, + ) -> DeleteAutonomousTaskOutput: + """Delete an autonomous task from the agent. + + Args: + task_id: The ID of the task to delete + config: Runtime configuration containing agent context + + Returns: + DeleteAutonomousTaskOutput: Confirmation of deletion + """ + context = self.get_context() + agent_id = context.agent_id + + await self.skill_store.delete_autonomous_task(agent_id, task_id) + + return DeleteAutonomousTaskOutput( + success=True, message=f"Successfully deleted autonomous task {task_id}" + ) diff --git a/intentkit/skills/system/edit_autonomous_task.py b/intentkit/skills/system/edit_autonomous_task.py new file mode 100644 index 00000000..197b3863 --- /dev/null +++ b/intentkit/skills/system/edit_autonomous_task.py @@ -0,0 +1,114 @@ +from typing import Optional + +from pydantic import BaseModel, Field + +from intentkit.models.agent import AgentAutonomous +from intentkit.skills.system.base import SystemBaseTool + + +class EditAutonomousTaskInput(BaseModel): + """Input model for edit_autonomous_task skill.""" + + task_id: str = Field( + description="The unique identifier of the autonomous task to edit" + ) + name: Optional[str] = Field( + default=None, + description="Display name of the autonomous task configuration", + max_length=50, + ) + description: Optional[str] = Field( + default=None, + description="Description of the autonomous task configuration", + max_length=200, + ) + minutes: Optional[int] = Field( + default=None, + description="Interval in minutes between operations, mutually exclusive with cron", + ) + cron: Optional[str] = Field( + default=None, + description="Cron expression for scheduling operations, mutually exclusive with minutes", + ) + prompt: Optional[str] = Field( + default=None, description="Special prompt used during autonomous operation" + ) + enabled: Optional[bool] = Field( + default=None, description="Whether the autonomous task is enabled" + ) + + +class EditAutonomousTaskOutput(BaseModel): + """Output model for edit_autonomous_task skill.""" + + task: AgentAutonomous = Field( + description="The updated autonomous task configuration" + ) + + +class EditAutonomousTask(SystemBaseTool): + """Skill to edit an existing autonomous task for an agent.""" + + name: str = "system_edit_autonomous_task" + description: str = ( + "Edit an existing autonomous task configuration for the agent. " + "Allows updating the name, description, schedule (minutes or cron), prompt, and enabled status. " + "Only provided fields will be updated; omitted fields will keep their current values. " + "The minutes and cron fields are mutually exclusive. Do not provide both of them. " + ) + args_schema = EditAutonomousTaskInput + + async def _arun( + self, + task_id: str, + name: Optional[str] = None, + description: Optional[str] = None, + minutes: Optional[int] = None, + cron: Optional[str] = None, + prompt: Optional[str] = None, + enabled: Optional[bool] = None, + **kwargs, + ) -> EditAutonomousTaskOutput: + """Edit an autonomous task for the agent. + + Args: + task_id: ID of the task to edit + name: Display name of the task + description: Description of the task + minutes: Interval in minutes (mutually exclusive with cron) + cron: Cron expression (mutually exclusive with minutes) + prompt: Special prompt for autonomous operation + enabled: Whether the task is enabled + config: Runtime configuration containing agent context + + Returns: + EditAutonomousTaskOutput: The updated task + """ + context = self.get_context() + agent_id = context.agent_id + + if minutes is not None and cron is not None: + raise ValueError("minutes and cron are mutually exclusive") + + # Build the updates dictionary with only provided fields + task_updates = {} + if name is not None: + task_updates["name"] = name + if description is not None: + task_updates["description"] = description + if minutes is not None: + task_updates["minutes"] = minutes + task_updates["cron"] = None + if cron is not None: + task_updates["cron"] = cron + task_updates["minutes"] = None + if prompt is not None: + task_updates["prompt"] = prompt + if enabled is not None: + task_updates["enabled"] = enabled + + updated_task = await self.skill_store.update_autonomous_task( + agent_id, task_id, task_updates + ) + + return EditAutonomousTaskOutput(task=updated_task) diff --git a/intentkit/skills/system/list_autonomous_tasks.py b/intentkit/skills/system/list_autonomous_tasks.py new file mode 100644 index 00000000..13016559 --- /dev/null +++ b/intentkit/skills/system/list_autonomous_tasks.py @@ -0,0 +1,50 @@ +from typing import List + +from pydantic import BaseModel, Field + +from intentkit.models.agent import AgentAutonomous +from intentkit.skills.system.base import SystemBaseTool + + +class ListAutonomousTasksInput(BaseModel): + """Input model for list_autonomous_tasks skill.""" + + pass + + +class ListAutonomousTasksOutput(BaseModel): + """Output model for list_autonomous_tasks skill.""" + + tasks: List[AgentAutonomous] = Field( + description="List of autonomous task configurations for the agent" + ) + + +class ListAutonomousTasks(SystemBaseTool): + """Skill to list all autonomous tasks for an agent.""" + + name: str = "system_list_autonomous_tasks" + description: str = ( + "List all autonomous task configurations for the agent. " + "Returns details about each task including scheduling, prompts, and status." + ) + args_schema = ListAutonomousTasksInput + + async def _arun( + self, + **kwargs, + ) -> ListAutonomousTasksOutput: + """List autonomous tasks for the agent. + + Args: + config: Runtime configuration containing agent context + + Returns: + ListAutonomousTasksOutput: List of autonomous tasks + """ + context = self.get_context() + agent_id = context.agent_id + + tasks = await self.skill_store.list_autonomous_tasks(agent_id) + + return ListAutonomousTasksOutput(tasks=tasks) diff --git a/intentkit/skills/system/read_agent_api_key.py b/intentkit/skills/system/read_agent_api_key.py new file mode 100644 index 00000000..8500b664 --- /dev/null +++ b/intentkit/skills/system/read_agent_api_key.py @@ -0,0 +1,83 @@ +from pydantic import BaseModel, Field + +from intentkit.skills.system.base import SystemBaseTool + + +class ReadAgentApiKeyInput(BaseModel): + """Input model for read_agent_api_key skill.""" + + pass + + +class ReadAgentApiKeyOutput(BaseModel): + """Output model for read_agent_api_key skill.""" + + api_key: str = Field(description="The private API key for the agent (sk-)") + api_key_public: str = Field(description="The public API key for the agent (pk-)") + is_new: bool = Field(description="Whether new API keys were generated") + open_api_base_url: str = Field(description="The base URL for the API") + api_endpoint: str = Field(description="The full API endpoint URL") + + +class ReadAgentApiKey(SystemBaseTool): + """Skill to retrieve or generate an API key for the agent.""" + + name: str = "system_read_agent_api_key" + description: str = ( + "Retrieve the API keys for the agent. " + "Returns both private (sk-) and public (pk-) API keys. " + "Private API key can access all skills (public and owner-only). " + "Public API key can only access public skills. " + "Make sure to tell the user the base URL and endpoint. " + "Tell user in OpenAI sdk or Desktop client like Cherry Studio, input the base URL and API key. " + "Always use markdown code block to wrap the API keys, base URL, and endpoint. " + "Tell user to check more doc in https://github.com/crestalnetwork/intentkit/blob/main/docs/agent_api.md " + ) + args_schema = ReadAgentApiKeyInput + + async def _arun(self, **kwargs) -> ReadAgentApiKeyOutput: + """Retrieve or generate an API key for the agent.""" + # Get context from runnable config to access agent.id + context = self.get_context() + agent_id = context.agent_id + + # Get agent data from skill store + agent_data = await self.skill_store.get_agent_data(agent_id) + + # Get API base URL from system config + open_api_base_url = self.skill_store.get_system_config("open_api_base_url") + api_endpoint = f"{open_api_base_url}/v1/chat/completions" + + # Check if API keys exist + if agent_data.api_key and agent_data.api_key_public: + return ReadAgentApiKeyOutput( + api_key=agent_data.api_key, + api_key_public=agent_data.api_key_public, + is_new=False, + open_api_base_url=open_api_base_url, + api_endpoint=api_endpoint, + ) + + # Generate new API keys if any are missing + new_api_key = agent_data.api_key or self._generate_api_key() + new_public_api_key = ( + agent_data.api_key_public or self._generate_public_api_key() + ) + + # Save the API keys to agent data + update_data = {} + if not agent_data.api_key: + update_data["api_key"] = new_api_key + if not agent_data.api_key_public: + update_data["api_key_public"] = new_public_api_key + + if update_data: + await self.skill_store.set_agent_data(agent_id, update_data) + + return ReadAgentApiKeyOutput( + api_key=new_api_key, + api_key_public=new_public_api_key, + is_new=bool(update_data), + open_api_base_url=open_api_base_url, + api_endpoint=api_endpoint, + ) diff --git a/intentkit/skills/system/regenerate_agent_api_key.py b/intentkit/skills/system/regenerate_agent_api_key.py new file mode 100644 index 00000000..4be1c70b --- /dev/null +++ b/intentkit/skills/system/regenerate_agent_api_key.py @@ -0,0 +1,71 @@ +from pydantic import BaseModel, Field + +from intentkit.skills.system.base import SystemBaseTool + + +class RegenerateAgentApiKeyInput(BaseModel): + """Input model for regenerate_agent_api_key skill.""" + + pass + + +class RegenerateAgentApiKeyOutput(BaseModel): + """Output model for regenerate_agent_api_key skill.""" + + api_key: str = Field(description="The new private API key for the agent (sk-)") + api_key_public: str = Field( + description="The new public API key for the agent (pk-)" + ) + previous_key_existed: bool = Field(description="Whether previous API keys existed") + open_api_base_url: str = Field(description="The base URL for the API") + api_endpoint: str = Field(description="The full API endpoint URL") + + +class RegenerateAgentApiKey(SystemBaseTool): + """Skill to regenerate and reset the API key for the agent.""" + + name: str = "system_regenerate_agent_api_key" + description: str = ( + "Generate new API keys for the agent, revoke any existing keys. " + "Generates both private (sk-) and public (pk-) API keys. " + "Private API key can access all skills (public and owner-only). " + "Public API key can only access public skills. " + "Make sure to tell the user the base URL and endpoint. " + "Tell user in OpenAI sdk or Desktop client like Cherry Studio, input the base URL and API key. " + "Always use markdown code block to wrap the API keys, base URL, and endpoint. " + "Tell user to check more doc in https://github.com/crestalnetwork/intentkit/blob/main/docs/agent_api.md " + ) + args_schema = RegenerateAgentApiKeyInput + + async def _arun(self, **kwargs) -> RegenerateAgentApiKeyOutput: + """Generate and set a new API key for the agent.""" + # Get context from runnable config to access agent.id + context = self.get_context() + agent_id = context.agent_id + + # Get agent data from skill store + agent_data = await self.skill_store.get_agent_data(agent_id) + + # Get API base URL from system config + open_api_base_url = self.skill_store.get_system_config("open_api_base_url") + api_endpoint = f"{open_api_base_url}/v1/chat/completions" + + # Check if previous API keys existed + previous_key_existed = bool(agent_data.api_key or agent_data.api_key_public) + + # Generate new API keys + new_api_key = self._generate_api_key() + new_public_api_key = self._generate_public_api_key() + + # Save the new API keys to agent data (overwrites existing) + await self.skill_store.set_agent_data( + agent_id, {"api_key": new_api_key, "api_key_public": new_public_api_key} + ) + + return RegenerateAgentApiKeyOutput( + api_key=new_api_key, + api_key_public=new_public_api_key, + previous_key_existed=previous_key_existed, + open_api_base_url=open_api_base_url, + api_endpoint=api_endpoint, + ) diff --git a/intentkit/skills/system/schema.json b/intentkit/skills/system/schema.json new file mode 100644 index 00000000..66dd59b1 --- /dev/null +++ b/intentkit/skills/system/schema.json @@ -0,0 +1,109 @@ +{ + "$schema": "http://json-schema.org/draft-07/schema#", + "type": "object", + "title": "System", + "description": "System management and configuration skills for agent operations including API key management", + "x-icon": "https://ai.service.crestal.dev/skills/system/system.svg", + "x-tags": [ + "System", + "Management", + "Configuration" + ], + "properties": { + "enabled": { + "type": "boolean", + "title": "Enabled", + "description": "Whether this skill is enabled", + "default": false + }, + "states": { + "type": "object", + "properties": { + "read_agent_api_key": { + "type": "string", + "title": "Read Agent API Key", + "enum": [ + "disabled", + "private" + ], + "x-enum-title": [ + "Disabled", + "Agent Owner Only" + ], + "description": "Retrieve the API key for the agent. If no API key exists, generates and sets a new one.", + "default": "disabled" + }, + "regenerate_agent_api_key": { + "type": "string", + "title": "Regenerate Agent API Key", + "enum": [ + "disabled", + "private" + ], + "x-enum-title": [ + "Disabled", + "Agent Owner Only" + ], + "description": "Generate a new API key for the agent, replacing any existing key.", + "default": "disabled" + }, + "list_autonomous_tasks": { + "type": "string", + "title": "List Autonomous Tasks", + "enum": [ + "disabled", + "private" + ], + "x-enum-title": [ + "Disabled", + "Agent Owner Only" + ], + "description": "List all autonomous task configurations for the agent.", + "default": "disabled" + }, + "add_autonomous_task": { + "type": "string", + "title": "Add Autonomous Task", + "enum": [ + "disabled", + "private" + ], + "x-enum-title": [ + "Disabled", + "Agent Owner Only" + ], + "description": "Add a new autonomous task configuration to the agent.", + "default": "disabled" + }, + "delete_autonomous_task": { + "type": "string", + "title": "Delete Autonomous Task", + "enum": [ + "disabled", + "private" + ], + "x-enum-title": [ + "Disabled", + "Agent Owner Only" + ], + "description": "Delete an autonomous task configuration from the agent.", + "default": "disabled" + }, + "edit_autonomous_task": { + "type": "string", + "title": "Edit Autonomous Task", + "enum": [ + "disabled", + "private" + ], + "x-enum-title": [ + "Disabled", + "Agent Owner Only" + ], + "description": "Edit an existing autonomous task configuration for the agent.", + "default": "disabled" + } + } + } + } +} \ No newline at end of file diff --git a/intentkit/skills/system/system.svg b/intentkit/skills/system/system.svg new file mode 100644 index 00000000..c9c13e0a --- /dev/null +++ b/intentkit/skills/system/system.svg @@ -0,0 +1,76 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/intentkit/skills/tavily/README.md b/intentkit/skills/tavily/README.md new file mode 100644 index 00000000..138e223e --- /dev/null +++ b/intentkit/skills/tavily/README.md @@ -0,0 +1,86 @@ +# Tavily Skills + +This skill package enables agents to search the web and extract content from web pages using the [Tavily](https://tavily.com/) API. + +## Overview + +The Tavily skills allow agents to: +- Search the internet for current information +- Retrieve relevant search results with snippets and URLs +- Extract full content from specific web pages +- Find answers to questions that may not be in the agent's training data +- Access real-time information and news + +## Available Skills + +### 1. Tavily Search +Allows agents to search the web for current information and retrieve relevant results. + +### 2. Tavily Extract +Allows agents to extract full content from specific URLs, including text and optionally images. + +## Configuration + +To enable these skills, add the following to your agent configuration: + +```yaml +skills: + tavily: + enabled: true + api_key: "your-tavily-api-key" + states: + tavily_search: public # or "private" or "disabled" + tavily_extract: public # or "private" or "disabled" +``` + +### Configuration Options + +- `enabled`: Whether the skills are enabled (true/false) +- `api_key`: Your Tavily API key +- `states.tavily_search`: The state of the Tavily search skill + - `public`: Available to agent owner and all users + - `private`: Available only to the agent owner + - `disabled`: Not available to anyone +- `states.tavily_extract`: The state of the Tavily extract skill + - `public`: Available to agent owner and all users + - `private`: Available only to the agent owner + - `disabled`: Not available to anyone + +## Usage Examples + +### Tavily Search + +The agent will automatically use Tavily search when: +- A user asks for current information or news +- The agent needs to verify facts or find up-to-date information +- A query seeks information that may not be in the agent's training data + +**Example Interaction:** + +**User**: "What's the current price of Bitcoin?" + +**Agent**: *Uses Tavily search to find current cryptocurrency prices* + +### Tavily Extract + +The agent will automatically use Tavily extract when: +- A user wants to extract or scrape content from a specific URL +- The agent needs to analyze the full content of a web page +- A query requires detailed information from a particular website + +**Example Interaction:** + +**User**: "Can you extract the content from https://en.wikipedia.org/wiki/Artificial_intelligence" + +**Agent**: *Uses Tavily extract to retrieve the full content from the Wikipedia page* + +## API Requirements + +These skills require a valid Tavily API key. You can sign up for one at [tavily.com](https://tavily.com/). + +## Limitations + +- Search results are limited to a maximum of 10 items per query +- Extract functionality may not work on all websites due to access restrictions +- The quality of results depends on the Tavily API +- Rate limits may apply based on your Tavily API plan \ No newline at end of file diff --git a/intentkit/skills/tavily/__init__.py b/intentkit/skills/tavily/__init__.py new file mode 100644 index 00000000..4554c3dc --- /dev/null +++ b/intentkit/skills/tavily/__init__.py @@ -0,0 +1,91 @@ +"""Tavily search skills.""" + +import logging +from typing import TypedDict + +from intentkit.abstracts.skill import SkillStoreABC +from intentkit.skills.base import SkillConfig, SkillState +from intentkit.skills.tavily.base import TavilyBaseTool +from intentkit.skills.tavily.tavily_extract import TavilyExtract +from intentkit.skills.tavily.tavily_search import TavilySearch + +# Cache skills at the system level, because they are stateless +_cache: dict[str, TavilyBaseTool] = {} + +logger = logging.getLogger(__name__) + + +class SkillStates(TypedDict): + tavily_search: SkillState + tavily_extract: SkillState + + +class Config(SkillConfig): + """Configuration for Tavily search skills.""" + + states: SkillStates + api_key: str + + +async def get_skills( + config: "Config", + is_private: bool, + store: SkillStoreABC, + **_, +) -> list[TavilyBaseTool]: + """Get all Tavily search skills. + + Args: + config: The configuration for Tavily search skills. + is_private: Whether to include private skills. + store: The skill store for persisting data. + + Returns: + A list of Tavily search skills. + """ + available_skills = [] + + # Include skills based on their state + for skill_name, state in config["states"].items(): + if state == "disabled": + continue + elif state == "public" or (state == "private" and is_private): + available_skills.append(skill_name) + + # Get each skill using the cached getter + result = [] + for name in available_skills: + skill = get_tavily_skill(name, store) + if skill: + result.append(skill) + return result + + +def get_tavily_skill( + name: str, + store: SkillStoreABC, +) -> TavilyBaseTool: + """Get a Tavily search skill by name. + + Args: + name: The name of the skill to get + store: The skill store for persisting data + + Returns: + The requested Tavily search skill + """ + if name == "tavily_search": + if name not in _cache: + _cache[name] = TavilySearch( + skill_store=store, + ) + return _cache[name] + elif name == "tavily_extract": + if name not in _cache: + _cache[name] = TavilyExtract( + skill_store=store, + ) + return _cache[name] + else: + logger.warning(f"Unknown Tavily skill: {name}") + return None diff --git a/intentkit/skills/tavily/base.py b/intentkit/skills/tavily/base.py new file mode 100644 index 00000000..4ae735c8 --- /dev/null +++ b/intentkit/skills/tavily/base.py @@ -0,0 +1,36 @@ +from typing import Type + +from langchain.tools.base import ToolException +from pydantic import BaseModel, Field + +from intentkit.abstracts.skill import SkillStoreABC +from intentkit.skills.base import IntentKitSkill + + +class TavilyBaseTool(IntentKitSkill): + """Base class for Tavily search tools.""" + + name: str = Field(description="The name of the tool") + description: str = Field(description="A description of what the tool does") + args_schema: Type[BaseModel] + skill_store: SkillStoreABC = Field( + description="The skill store for persisting data" + ) + + def get_api_key(self) -> str: + context = self.get_context() + skill_config = context.agent.skill_config(self.category) + api_key_provider = skill_config.get("api_key_provider") + if api_key_provider == "platform": + return self.skill_store.get_system_config("tavily_api_key") + # for backward compatibility, may only have api_key in skill_config + elif skill_config.get("api_key"): + return skill_config.get("api_key") + else: + raise ToolException( + f"Invalid API key provider: {api_key_provider}, or no api_key in config" + ) + + @property + def category(self) -> str: + return "tavily" diff --git a/intentkit/skills/tavily/schema.json b/intentkit/skills/tavily/schema.json new file mode 100644 index 00000000..7c7ad5ad --- /dev/null +++ b/intentkit/skills/tavily/schema.json @@ -0,0 +1,119 @@ +{ + "$schema": "http://json-schema.org/draft-07/schema#", + "type": "object", + "title": "Tavily Search and Extract", + "description": "Web search and content extraction capabilities using Tavily", + "x-icon": "https://ai.service.crestal.dev/skills/tavily/tavily.jpg", + "x-tags": [ + "Internet", + "Search", + "Information", + "Content Extraction" + ], + "x-nft-requirement": 1, + "properties": { + "enabled": { + "type": "boolean", + "title": "Enabled", + "description": "Whether this skill is enabled", + "default": false + }, + "states": { + "type": "object", + "properties": { + "tavily_search": { + "type": "string", + "title": "Tavily Search", + "enum": [ + "disabled", + "public", + "private" + ], + "x-enum-title": [ + "Disabled", + "Agent Owner + All Users", + "Agent Owner Only" + ], + "description": "Search the web for real-time information and recent content using Tavily", + "default": "private" + }, + "tavily_extract": { + "type": "string", + "title": "Tavily Extract", + "enum": [ + "disabled", + "public", + "private" + ], + "x-enum-title": [ + "Disabled", + "Agent Owner + All Users", + "Agent Owner Only" + ], + "description": "Extract full content from web pages using Tavily Extract API", + "default": "private" + } + }, + "description": "States for each Tavily skill (disabled, public, or private)" + }, + "api_key_provider": { + "type": "string", + "title": "API Key Provider", + "description": "Provider of the API key", + "enum": [ + "platform", + "agent_owner" + ], + "x-enum-title": [ + "Nation Hosted", + "Owner Provided" + ], + "default": "platform" + } + }, + "required": [ + "states", + "enabled" + ], + "if": { + "properties": { + "api_key_provider": { + "const": "agent_owner" + } + } + }, + "then": { + "properties": { + "api_key": { + "type": "string", + "title": "Tavily API Key", + "description": "API key for Tavily services", + "x-link": "[Get your API key](https://tavily.com/)", + "x-sensitive": true + }, + "rate_limit_number": { + "type": "integer", + "title": "Rate Limit Number", + "description": "Number of requests allowed per time window, only valid if api_key is set" + }, + "rate_limit_minutes": { + "type": "integer", + "title": "Rate Limit Minutes", + "description": "Time window in minutes for rate limiting, only valid if api_key is set" + } + }, + "if": { + "properties": { + "enabled": { + "const": true + } + } + }, + "then": { + "required": [ + "api_key" + ] + } + }, + "additionalProperties": true +} \ No newline at end of file diff --git a/intentkit/skills/tavily/tavily.jpg b/intentkit/skills/tavily/tavily.jpg new file mode 100644 index 00000000..3672db84 Binary files /dev/null and b/intentkit/skills/tavily/tavily.jpg differ diff --git a/intentkit/skills/tavily/tavily_extract.py b/intentkit/skills/tavily/tavily_extract.py new file mode 100644 index 00000000..14335117 --- /dev/null +++ b/intentkit/skills/tavily/tavily_extract.py @@ -0,0 +1,146 @@ +import logging +from typing import Type + +import httpx +from pydantic import BaseModel, Field + +from intentkit.skills.tavily.base import TavilyBaseTool + +logger = logging.getLogger(__name__) + + +class TavilyExtractInput(BaseModel): + """Input for Tavily extract tool.""" + + urls: str = Field( + description="The URL to extract content from.", + ) + include_images: bool = Field( + description="Include a list of images extracted from the URLs in the response.", + default=False, + ) + extract_depth: str = Field( + description="The depth of the extraction process. 'advanced' retrieves more data including tables and embedded content with higher success but may increase latency. 'basic' costs 1 credit per 5 successful URL extractions, while 'advanced' costs 2 credits per 5 successful URL extractions.", + default="basic", + ) + + +class TavilyExtract(TavilyBaseTool): + """Tool for extracting web page content using Tavily. + + This tool uses Tavily's extract API to retrieve content from specified URLs. + + Attributes: + name: The name of the tool. + description: A description of what the tool does. + args_schema: The schema for the tool's input arguments. + """ + + name: str = "tavily_extract" + description: str = ( + "Extract web page content from a specified URL using Tavily Extract. " + "This tool is useful when you need to get the full text content from a webpage. " + "You must call this tool whenever the user asks to extract or scrape content from a specific URL." + ) + args_schema: Type[BaseModel] = TavilyExtractInput + + async def _arun( + self, + urls: str, + include_images: bool = False, + extract_depth: str = "basic", + **kwargs, + ) -> str: + """Implementation of the Tavily extract tool. + + Args: + urls: The URL to extract content from. + include_images: Whether to include image URLs in the results. + extract_depth: The depth of the extraction process ('basic' or 'advanced'). + config: The configuration for the tool call. + + Returns: + str: Formatted extraction results with content from the URL. + """ + context = self.get_context() + skill_config = context.agent.skill_config(self.category) + logger.debug( + f"tavily_extract.py: Running web extraction with context {context}" + ) + + if skill_config.get("api_key_provider") == "agent_owner": + if skill_config.get("rate_limit_number") and skill_config.get( + "rate_limit_minutes" + ): + await self.user_rate_limit_by_category( + context.user_id, + skill_config["rate_limit_number"], + skill_config["rate_limit_minutes"], + ) + + # Get the API key from the agent's configuration + api_key = self.get_api_key() + if not api_key: + return "Error: No Tavily API key provided in the configuration." + + # Validate extract_depth + if extract_depth not in ["basic", "advanced"]: + extract_depth = "basic" + logger.warning( + "tavily_extract.py: Invalid extract_depth provided. Using default 'basic'." + ) + + # Call Tavily extract API + try: + async with httpx.AsyncClient(timeout=30.0) as client: + response = await client.post( + "https://api.tavily.com/extract", + headers={"Authorization": f"Bearer {api_key}"}, + json={ + "urls": urls, + "include_images": include_images, + "extract_depth": extract_depth, + }, + ) + + if response.status_code != 200: + logger.error( + f"tavily_extract.py: Error from Tavily API: {response.status_code} - {response.text}" + ) + return f"Error extracting web page content: {response.status_code} - {response.text}" + + data = response.json() + results = data.get("results", []) + + if not results: + return f"No content could be extracted from URL: '{urls}'" + + # Format the results + formatted_results = f"Web page content extracted from: '{urls}'\n\n" + + for i, result in enumerate(results, 1): + url = result.get("url", "No URL") + raw_content = result.get("raw_content", "No content available") + + # Truncate the content if it's too long (over 2000 characters) + if len(raw_content) > 2000: + raw_content = raw_content[:2000] + "...[content truncated]" + + formatted_results += f"{i}. Content from {url}:\n\n" + formatted_results += f"{raw_content}\n\n" + + # Add images if available and requested + if include_images and "images" in result and result["images"]: + formatted_results += "Images:\n" + for j, image_url in enumerate(result["images"], 1): + formatted_results += f" {j}. {image_url}\n" + formatted_results += "\n" + + return formatted_results.strip() + + except Exception as e: + logger.error( + f"tavily_extract.py: Error extracting web page content: {e}", + exc_info=True, + ) + return "An error occurred while extracting web page content. Please try again later." diff --git a/intentkit/skills/tavily/tavily_search.py b/intentkit/skills/tavily/tavily_search.py new file mode 100644 index 00000000..555ed35d --- /dev/null +++ b/intentkit/skills/tavily/tavily_search.py @@ -0,0 +1,141 @@ +import logging +from typing import Type + +import httpx +from pydantic import BaseModel, Field + +from intentkit.skills.tavily.base import TavilyBaseTool + +logger = logging.getLogger(__name__) + + +class TavilySearchInput(BaseModel): + """Input for Tavily search tool.""" + + query: str = Field( + description="The search query to look up on the web.", + ) + max_results: int = Field( + description="Maximum number of search results to return (1-10).", + default=5, + ge=1, + le=10, + ) + include_images: bool = Field( + description="Whether to include image URLs in the results.", + default=False, + ) + include_raw_content: bool = Field( + description="Whether to include raw HTML content in the results.", + default=False, + ) + + +class TavilySearch(TavilyBaseTool): + """Tool for searching the web using Tavily. + + This tool uses Tavily's search API to search the web and return relevant results. + + Attributes: + name: The name of the tool. + description: A description of what the tool does. + args_schema: The schema for the tool's input arguments. + """ + + name: str = "tavily_search" + description: str = ( + "Search the web for current information on a topic. Use this tool when you need to find" + " up-to-date information, facts, news, or any content available online.\n" + "You must call this tool whenever the user asks for information that may not be in your training data," + " requires current data, or when you're unsure about facts." + ) + args_schema: Type[BaseModel] = TavilySearchInput + + async def _arun( + self, + query: str, + max_results: int = 5, + include_images: bool = False, + include_raw_content: bool = False, + **kwargs, + ) -> str: + """Implementation of the Tavily search tool. + + Args: + query: The search query to look up. + max_results: Maximum number of search results to return (1-10). + include_images: Whether to include image URLs in the results. + include_raw_content: Whether to include raw HTML content in the results. + + + Returns: + str: Formatted search results with titles, snippets, and URLs. + """ + context = self.get_context() + skill_config = context.agent.skill_config(self.category) + logger.debug(f"tavily.py: Running web search with context {context}") + + if skill_config.get("api_key_provider") == "agent_owner": + if skill_config.get("rate_limit_number") and skill_config.get( + "rate_limit_minutes" + ): + await self.user_rate_limit_by_category( + context.user_id, + skill_config["rate_limit_number"], + skill_config["rate_limit_minutes"], + ) + + # Get the API key from the agent's configuration + if skill_config.get("api_key_provider") == "agent_owner": + api_key = skill_config.get("api_key") + else: + api_key = self.skill_store.get_system_config("tavily_api_key") + if not api_key: + return "Error: No Tavily API key provided in the configuration." + + # Limit max_results to a reasonable range + max_results = max(1, min(max_results, 10)) + + # Call Tavily search API + try: + async with httpx.AsyncClient(timeout=30.0) as client: + response = await client.post( + "https://api.tavily.com/search", + json={ + "api_key": api_key, + "query": query, + "max_results": max_results, + "include_images": include_images, + "include_raw_content": include_raw_content, + }, + ) + + if response.status_code != 200: + logger.error( + f"tavily.py: Error from Tavily API: {response.status_code} - {response.text}" + ) + return f"Error searching the web: {response.status_code} - {response.text}" + + data = response.json() + results = data.get("results", []) + + if not results: + return f"No results found for query: '{query}'" + + # Format the results + formatted_results = f"Web search results for: '{query}'\n\n" + + for i, result in enumerate(results, 1): + title = result.get("title", "No title") + content = result.get("content", "No content") + url = result.get("url", "No URL") + + formatted_results += f"{i}. {title}\n" + formatted_results += f"{content}\n" + formatted_results += f"Source: {url}\n\n" + + return formatted_results.strip() + + except Exception as e: + logger.error(f"tavily.py: Error searching web: {e}", exc_info=True) + return "An error occurred while searching the web. Please try again later." diff --git a/intentkit/skills/token/README.md b/intentkit/skills/token/README.md new file mode 100644 index 00000000..4ce91658 --- /dev/null +++ b/intentkit/skills/token/README.md @@ -0,0 +1,89 @@ +# Token Skills + +The Token skills provide blockchain token analytics capabilities powered by Moralis. These skills allow you to search, analyze, and track token information across multiple blockchains. + +## Available Skills + +| Skill | Description | Endpoint | Example Prompts | +|-------|-------------|----------|----------------| +| `token_price` | Get token price and information | `GET /erc20/:address/price` | "What's the current price of PEPE token?" "Get the price of USDT on Ethereum." | +| `token_erc20_transfers` | Get ERC20 token transfers for a wallet | `GET /:address/erc20/transfers` | "Show me all the USDT transfers for my wallet." "What are the recent token transactions for 0x123?" | +| `token_search` * | Search for tokens by name, symbol, or address | `GET /tokens/search` | "Find tokens with 'pepe' in the name." "Search for tokens with high market cap on Ethereum." | +| `token_analytics` | Get detailed analytics for a token | `GET /tokens/:address/analytics` | "Show me analytics for the PEPE token." "What are the buy/sell volumes for USDT in the last 24 hours?" | + +\* Premium Endpoint: To use the `token_search` API, you will need an API key associated with a Moralis account on the Business plan or a custom Enterprise plan. + +## Configuration + +The token skills require a Moralis API key to function. You can configure this in your agent's configuration file: + +```yaml +skills: + token: + api_key: "your_moralis_api_key_here" + states: + token_price: "public" + token_erc20_transfers: "public" + token_search: "public" + token_analytics: "public" +``` + +## Responses + +All token skills return structured data from the Moralis API. Here are the typical response formats: + +### Token Price + +```json +{ + "tokenName": "Pepe", + "tokenSymbol": "PEPE", + "tokenLogo": "https://cdn.moralis.io/eth/0x6982508145454ce325ddbe47a25d4ec3d2311933.png", + "tokenDecimals": "18", + "usdPrice": 0.000012302426023896, + "usdPriceFormatted": "0.000012302426023896", + "24hrPercentChange": "-3.7369101031758394", + "exchangeName": "Uniswap v3", + "tokenAddress": "0x6982508145454ce325ddbe47a25d4ec3d2311933" +} +``` + +### Token Analytics + +```json +{ + "tokenAddress": "0x6982508145454ce325ddbe47a25d4ec3d2311933", + "totalBuyVolume": { + "5m": "", + "1h": 43678.642005116264, + "6h": 129974.13379912674, + "24h": 4583254.969119737 + }, + "totalSellVolume": { + "5m": 147.69184595604904, + "1h": 393.0296489666009, + "6h": 257421.35479601548, + "24h": 4735908.689740969 + }, + "totalBuyers": { + "5m": "", + "1h": 33, + "6h": 115, + "24h": 547 + }, + "totalSellers": { + "5m": 1, + "1h": 2, + "6h": 78, + "24h": 587 + } +} +``` + +## Usage Tips + +- For the best performance, always specify the chain parameter when querying across multiple blockchains. +- When using the token_search endpoint, be aware of your Moralis plan limitations. +- The token analytics skill provides valuable trading data that can be used for token analysis and market assessment. + +For more detailed information on each endpoint, refer to the [Moralis API documentation](https://docs.moralis.io/web3-data-api/evm). \ No newline at end of file diff --git a/intentkit/skills/token/__init__.py b/intentkit/skills/token/__init__.py new file mode 100644 index 00000000..6d8188f0 --- /dev/null +++ b/intentkit/skills/token/__init__.py @@ -0,0 +1,107 @@ +"""Token skills for blockchain token analysis.""" + +import logging +from typing import TypedDict + +from intentkit.abstracts.skill import SkillStoreABC +from intentkit.skills.base import SkillConfig, SkillState +from intentkit.skills.token.base import TokenBaseTool +from intentkit.skills.token.erc20_transfers import ERC20Transfers +from intentkit.skills.token.token_analytics import TokenAnalytics +from intentkit.skills.token.token_price import TokenPrice +from intentkit.skills.token.token_search import TokenSearch + +# Cache skills at the system level, because they are stateless +_cache: dict[str, TokenBaseTool] = {} + +logger = logging.getLogger(__name__) + + +class SkillStates(TypedDict): + """State configurations for Token skills.""" + + token_price: SkillState + token_erc20_transfers: SkillState + token_search: SkillState + token_analytics: SkillState + + +class Config(SkillConfig): + """Configuration for Token blockchain analysis skills.""" + + states: SkillStates + api_key: str + + +async def get_skills( + config: "Config", + is_private: bool, + store: SkillStoreABC, + **_, +) -> list[TokenBaseTool]: + """Get all Token blockchain analysis skills. + + Args: + config: The configuration for Token skills. + is_private: Whether to include private skills. + store: The skill store for persisting data. + + Returns: + A list of Token blockchain analysis skills. + """ + if "states" not in config: + logger.error("No 'states' field in config") + return [] + + available_skills = [] + + # Include skills based on their state + for skill_name, state in config["states"].items(): + if state == "disabled": + continue + elif state == "public" or (state == "private" and is_private): + available_skills.append(skill_name) + + # Get each skill using the cached getter + result = [] + for name in available_skills: + skill = get_token_skill(name, store) + if skill: + result.append(skill) + + return result + + +def get_token_skill( + name: str, + store: SkillStoreABC, +) -> TokenBaseTool: + """Get a Token blockchain analysis skill by name. + + Args: + name: The name of the skill to get + store: The skill store for persisting data + + Returns: + The requested Token blockchain analysis skill + """ + if name in _cache: + return _cache[name] + + skill = None + if name == "token_price": + skill = TokenPrice(skill_store=store) + elif name == "token_erc20_transfers": + skill = ERC20Transfers(skill_store=store) + elif name == "token_search": + skill = TokenSearch(skill_store=store) + elif name == "token_analytics": + skill = TokenAnalytics(skill_store=store) + else: + logger.warning(f"Unknown Token skill: {name}") + return None + + if skill: + _cache[name] = skill + + return skill diff --git a/intentkit/skills/token/base.py b/intentkit/skills/token/base.py new file mode 100644 index 00000000..bdb47f6b --- /dev/null +++ b/intentkit/skills/token/base.py @@ -0,0 +1,114 @@ +"""Base class for token-related skills.""" + +import logging +from typing import Any, Dict + +import aiohttp + +from intentkit.abstracts.skill import SkillStoreABC +from intentkit.skills.base import IntentKitSkill +from intentkit.skills.token.constants import MORALIS_API_BASE_URL + +logger = logging.getLogger(__name__) + + +class TokenBaseTool(IntentKitSkill): + """Base class for all token-related skills. + + This base class provides common functionality for token API interactions, + including making HTTP requests to the Moralis API. + """ + + def __init__(self, skill_store: SkillStoreABC = None): + """Initialize the token tool with a skill store.""" + super().__init__(skill_store=skill_store) + + @property + def category(self) -> str: + return "token" + + def get_api_key(self) -> str: + """Get API key from agent config or system config. + + Returns: + The API key to use for API requests + """ + context = self.get_context() + skill_config = context.agent.skill_config(self.category) + if skill_config.get("api_key_provider") == "agent_owner": + return skill_config.get("api_key") + return self.skill_store.get_system_config("moralis_api_key") + + def _prepare_params(self, params: Dict[str, Any]) -> Dict[str, Any]: + """Convert boolean values to lowercase strings for API compatibility. + + Args: + params: Dictionary with query parameters that may contain boolean values + + Returns: + Dictionary with boolean values converted to lowercase strings + """ + if not params: + return params + + result = {} + for key, value in params.items(): + if isinstance(value, bool): + result[key] = str(value).lower() + else: + result[key] = value + return result + + async def _make_request( + self, + method: str, + endpoint: str, + api_key: str, + params: Dict[str, Any] = None, + data: Dict[str, Any] = None, + ) -> Dict[str, Any]: + """Make a request to the Moralis API. + + Args: + method: HTTP method (GET, POST, etc.) + endpoint: API endpoint (without base URL) + api_key: Moralis API key + params: Query parameters + data: Request body data for POST requests + + Returns: + Response data as dictionary + """ + url = f"{MORALIS_API_BASE_URL}{endpoint}" + + if not api_key: + logger.error("API key is missing") + return {"error": "API key is missing"} + + headers = {"accept": "application/json", "X-API-Key": api_key} + processed_params = self._prepare_params(params) if params else None + + try: + async with aiohttp.ClientSession() as session: + async with session.request( + method=method, + url=url, + headers=headers, + params=processed_params, + json=data, + ) as response: + if response.status >= 400: + error_text = await response.text() + logger.error(f"API error {response.status}: {error_text}") + return { + "error": f"API error: {response.status}", + "details": error_text, + } + + return await response.json() + except aiohttp.ClientError as e: + logger.error(f"HTTP error making request: {str(e)}") + return {"error": f"HTTP error: {str(e)}"} + except Exception as e: + logger.error(f"Unexpected error making request: {str(e)}") + return {"error": f"Unexpected error: {str(e)}"} diff --git a/intentkit/skills/token/constants.py b/intentkit/skills/token/constants.py new file mode 100644 index 00000000..8050409f --- /dev/null +++ b/intentkit/skills/token/constants.py @@ -0,0 +1,9 @@ +"""Constants for the token skills module.""" + +# Base URLs +MORALIS_API_BASE_URL = "https://deep-index.moralis.io/api/v2.2" + +# Default parameters +DEFAULT_CHAIN = "eth" +DEFAULT_LIMIT = 100 +DEFAULT_ORDER = "DESC" diff --git a/intentkit/skills/token/erc20_transfers.py b/intentkit/skills/token/erc20_transfers.py new file mode 100644 index 00000000..003721a4 --- /dev/null +++ b/intentkit/skills/token/erc20_transfers.py @@ -0,0 +1,143 @@ +import logging +from typing import Any, Dict, List, Optional, Type + +from pydantic import BaseModel, Field + +from intentkit.skills.token.base import TokenBaseTool +from intentkit.skills.token.constants import DEFAULT_CHAIN, DEFAULT_LIMIT, DEFAULT_ORDER + +logger = logging.getLogger(__name__) + + +class ERC20TransfersInput(BaseModel): + """Input for ERC20 transfers tool.""" + + address: str = Field( + description="The address of the wallet to get ERC20 token transfers for." + ) + chain: str = Field( + description="The chain to query (e.g., 'eth', 'bsc', 'polygon').", + default=DEFAULT_CHAIN, + ) + contract_addresses: Optional[List[str]] = Field( + description="List of contract addresses of transfers to filter by.", + default=None, + ) + from_block: Optional[int] = Field( + description="The minimum block number from which to get the transactions.", + default=None, + ) + to_block: Optional[int] = Field( + description="The maximum block number from which to get the transactions.", + default=None, + ) + from_date: Optional[str] = Field( + description="The start date from which to get the transactions (any format accepted by momentjs).", + default=None, + ) + to_date: Optional[str] = Field( + description="Get the transactions up to this date (any format accepted by momentjs).", + default=None, + ) + limit: Optional[int] = Field( + description="The desired page size of the result.", + default=DEFAULT_LIMIT, + ) + order: Optional[str] = Field( + description="The order of the result, in ascending (ASC) or descending (DESC).", + default=DEFAULT_ORDER, + ) + cursor: Optional[str] = Field( + description="The cursor returned in the previous response (for pagination).", + default=None, + ) + + +class ERC20Transfers(TokenBaseTool): + """Tool for retrieving ERC20 token transfers by wallet using Moralis. + + This tool uses Moralis' API to fetch ERC20 token transactions ordered by + block number in descending order for a specific wallet address. + """ + + name: str = "token_erc20_transfers" + description: str = ( + "Get ERC20 token transactions for a wallet address, ordered by block number. " + "Returns transaction details, token information, and wallet interactions." + ) + args_schema: Type[BaseModel] = ERC20TransfersInput + + async def _arun( + self, + address: str, + chain: str = DEFAULT_CHAIN, + contract_addresses: Optional[List[str]] = None, + from_block: Optional[int] = None, + to_block: Optional[int] = None, + from_date: Optional[str] = None, + to_date: Optional[str] = None, + limit: Optional[int] = DEFAULT_LIMIT, + order: Optional[str] = DEFAULT_ORDER, + cursor: Optional[str] = None, + **kwargs, + ) -> Dict[str, Any]: + """Fetch ERC20 token transfers for a wallet from Moralis. + + Args: + address: The wallet address + chain: The blockchain to query + contract_addresses: List of contract addresses to filter by + from_block: Minimum block number + to_block: Maximum block number + from_date: Start date for transfers + to_date: End date for transfers + limit: Number of results per page + order: Order of results (ASC/DESC) + cursor: Pagination cursor + config: The configuration for the tool call + + Returns: + Dict containing ERC20 transfer data + """ + context = self.get_context() + if context is None: + logger.error("Context is None, cannot retrieve API key") + return { + "error": "Cannot retrieve API key. Please check agent configuration." + } + + # Get the API key + api_key = self.get_api_key() + + if not api_key: + logger.error("No Moralis API key available") + return {"error": "No Moralis API key provided in the configuration."} + + # Build query parameters + params = {"chain": chain, "limit": limit, "order": order} + + # Add optional parameters if they exist + if contract_addresses: + params["contract_addresses"] = contract_addresses + if from_block: + params["from_block"] = from_block + if to_block: + params["to_block"] = to_block + if from_date: + params["from_date"] = from_date + if to_date: + params["to_date"] = to_date + if cursor: + params["cursor"] = cursor + + # Call Moralis API + try: + endpoint = f"/{address}/erc20/transfers" + return await self._make_request( + method="GET", endpoint=endpoint, api_key=api_key, params=params + ) + except Exception as e: + logger.error(f"Error fetching ERC20 transfers: {e}") + return { + "error": f"An error occurred while fetching ERC20 transfers: {str(e)}. Please try again later." + } diff --git a/intentkit/skills/token/moralis.png b/intentkit/skills/token/moralis.png new file mode 100644 index 00000000..04f36ca7 Binary files /dev/null and b/intentkit/skills/token/moralis.png differ diff --git a/intentkit/skills/token/schema.json b/intentkit/skills/token/schema.json new file mode 100644 index 00000000..80b3be4e --- /dev/null +++ b/intentkit/skills/token/schema.json @@ -0,0 +1,141 @@ +{ + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "Token Skills", + "description": "Token analysis skills powered by Moralis API", + "type": "object", + "x-icon": "https://ai.service.crestal.dev/skills/portfolio/moralis.png", + "x-tags": [ + "Blockchain", + "Web3", + "Crypto", + "Token", + "DeFi" + ], + "properties": { + "enabled": { + "type": "boolean", + "title": "Enabled", + "description": "Whether this skill is enabled", + "default": true + }, + "states": { + "type": "object", + "properties": { + "token_price": { + "type": "string", + "title": "ERC20 Token Price", + "enum": [ + "disabled", + "public", + "private" + ], + "x-enum-title": [ + "Disabled", + "Agent Owner + All Users", + "Agent Owner Only" + ], + "description": "Get the token price denominated in the blockchain's native token and USD", + "default": "public" + }, + "token_erc20_transfers": { + "type": "string", + "title": "ERC20 Token Transfers", + "enum": [ + "disabled", + "public", + "private" + ], + "x-enum-title": [ + "Disabled", + "Agent Owner + All Users", + "Agent Owner Only" + ], + "description": "Get ERC20 token transactions ordered by block number", + "default": "public" + }, + "token_search": { + "type": "string", + "title": "Token Search (Premium)", + "enum": [ + "disabled", + "public", + "private" + ], + "x-enum-title": [ + "Disabled", + "Agent Owner + All Users", + "Agent Owner Only" + ], + "description": "Search for tokens based on contract address, token name or token symbol. Premium endpoint available as an add-on. Requires a Moralis Business plan or Enterprise plan.", + "default": "public" + }, + "token_analytics": { + "type": "string", + "title": "Token Analytics", + "enum": [ + "disabled", + "public", + "private" + ], + "x-enum-title": [ + "Disabled", + "Agent Owner + All Users", + "Agent Owner Only" + ], + "description": "Get analytics for a token by token address", + "default": "public" + } + }, + "description": "States for each token analysis skill (disabled, public, or private)" + }, + "api_key_provider": { + "type": "string", + "title": "API Key Provider", + "description": "Provider of the API key", + "enum": [ + "platform", + "agent_owner" + ], + "x-enum-title": [ + "Nation Hosted", + "Owner Provided" + ], + "default": "platform" + } + }, + "required": [ + "states", + "enabled" + ], + "if": { + "properties": { + "api_key_provider": { + "const": "agent_owner" + } + } + }, + "then": { + "properties": { + "api_key": { + "type": "string", + "title": "Moralis API Key", + "description": "API key for Moralis API service", + "x-link": "[Get your API key](https://moralis.io/)", + "x-sensitive": true + } + }, + "if": { + "properties": { + "enabled": { + "const": true + } + } + }, + "then": { + "required": [ + "api_key" + ] + } + }, + "additionalProperties": true +} \ No newline at end of file diff --git a/intentkit/skills/token/token_analytics.py b/intentkit/skills/token/token_analytics.py new file mode 100644 index 00000000..e98d9bd0 --- /dev/null +++ b/intentkit/skills/token/token_analytics.py @@ -0,0 +1,79 @@ +import logging +from typing import Any, Dict, Type + +from pydantic import BaseModel, Field + +from intentkit.skills.token.base import TokenBaseTool +from intentkit.skills.token.constants import DEFAULT_CHAIN + +logger = logging.getLogger(__name__) + + +class TokenAnalyticsInput(BaseModel): + """Input for token analytics tool.""" + + address: str = Field(description="The token address to get analytics for.") + chain: str = Field( + description="The chain to query (e.g., 'eth', 'bsc', 'polygon').", + default=DEFAULT_CHAIN, + ) + + +class TokenAnalytics(TokenBaseTool): + """Tool for retrieving token analytics using Moralis. + + This tool uses Moralis' API to fetch analytics for a token by token address, + including trading volume, buyer/seller data, and liquidity information. + """ + + name: str = "token_analytics" + description: str = ( + "Get analytics for a token by token address. " + "Returns trading volumes, number of buyers/sellers, and liquidity information over various time periods." + ) + args_schema: Type[BaseModel] = TokenAnalyticsInput + + async def _arun( + self, + address: str, + chain: str = DEFAULT_CHAIN, + **kwargs, + ) -> Dict[str, Any]: + """Fetch token analytics from Moralis. + + Args: + address: The token address + chain: The blockchain to query + config: The configuration for the tool call + + Returns: + Dict containing token analytics data + """ + context = self.get_context() + if context is None: + logger.error("Context is None, cannot retrieve API key") + return { + "error": "Cannot retrieve API key. Please check agent configuration." + } + + # Get the API key + api_key = self.get_api_key() + + if not api_key: + logger.error("No Moralis API key available") + return {"error": "No Moralis API key provided in the configuration."} + + # Build query parameters + params = {"chain": chain} + + # Call Moralis API + try: + endpoint = f"/tokens/{address}/analytics" + return await self._make_request( + method="GET", endpoint=endpoint, api_key=api_key, params=params + ) + except Exception as e: + logger.error(f"Error fetching token analytics: {e}") + return { + "error": f"An error occurred while fetching token analytics: {str(e)}. Please try again later." + } diff --git a/intentkit/skills/token/token_price.py b/intentkit/skills/token/token_price.py new file mode 100644 index 00000000..44cda548 --- /dev/null +++ b/intentkit/skills/token/token_price.py @@ -0,0 +1,130 @@ +import logging +from typing import Any, Dict, Optional, Type + +from pydantic import BaseModel, Field + +from intentkit.skills.token.base import TokenBaseTool +from intentkit.skills.token.constants import DEFAULT_CHAIN + +logger = logging.getLogger(__name__) + + +class TokenPriceInput(BaseModel): + """Input for token price tool.""" + + address: str = Field( + description="The address of the token contract to get price for." + ) + chain: str = Field( + description="The chain to query (e.g., 'eth', 'bsc', 'polygon').", + default=DEFAULT_CHAIN, + ) + include: Optional[str] = Field( + description="If the result should contain the 24hr percent change (use 'percent_change').", + default=None, + ) + exchange: Optional[str] = Field( + description="The factory name or address of the token exchange.", + default=None, + ) + to_block: Optional[int] = Field( + description="The block number from which the token price should be checked.", + default=None, + ) + max_token_inactivity: Optional[int] = Field( + description="Exclude tokens inactive for more than the given amount of days.", + default=None, + ) + min_pair_side_liquidity_usd: Optional[int] = Field( + description="Exclude tokens with liquidity less than the specified amount in USD.", + default=None, + ) + + +class TokenPrice(TokenBaseTool): + """Tool for retrieving ERC20 token prices using Moralis. + + This tool uses Moralis' API to fetch the token price denominated in the blockchain's native token + and USD for a given token contract address. + """ + + name: str = "token_price" + description: str = ( + "Get the token price denominated in the blockchain's native token and USD for a given token contract address. " + "Returns price, token information and exchange data." + ) + args_schema: Type[BaseModel] = TokenPriceInput + + async def _arun( + self, + address: str, + chain: str = DEFAULT_CHAIN, + include: Optional[str] = None, + exchange: Optional[str] = None, + to_block: Optional[int] = None, + max_token_inactivity: Optional[int] = None, + min_pair_side_liquidity_usd: Optional[int] = None, + **kwargs, + ) -> Dict[str, Any]: + """Fetch token price from Moralis. + + Args: + address: The token contract address + chain: The blockchain to query + include: Include 24hr percent change + exchange: The token exchange factory name or address + to_block: Block number to check price from + max_token_inactivity: Max days of inactivity to exclude tokens + min_pair_side_liquidity_usd: Min liquidity in USD to include + config: The configuration for the tool call + + Returns: + Dict containing token price data + """ + # Extract context from config + context = self.get_context() + + if context is None: + logger.error("Context is None, cannot retrieve API key") + return { + "error": "Cannot retrieve API key. Please check agent configuration." + } + + # Get the API key + api_key = self.get_api_key() + + if not api_key: + logger.error("No Moralis API key available") + return {"error": "No Moralis API key provided in the configuration."} + + # Build query parameters + params = {"chain": chain} + + # Add optional parameters if they exist + if include: + params["include"] = include + if exchange: + params["exchange"] = exchange + if to_block: + params["to_block"] = to_block + if max_token_inactivity: + params["max_token_inactivity"] = max_token_inactivity + if min_pair_side_liquidity_usd: + params["min_pair_side_liquidity_usd"] = min_pair_side_liquidity_usd + + # Call Moralis API + try: + endpoint = f"/erc20/{address}/price" + response = await self._make_request( + method="GET", endpoint=endpoint, api_key=api_key, params=params + ) + + if "error" in response: + logger.error(f"API returned error: {response.get('error')}") + + return response + except Exception as e: + logger.error(f"Error fetching token price: {e}") + return { + "error": f"An error occurred while fetching token price: {str(e)}. Please try again later." + } diff --git a/intentkit/skills/token/token_search.py b/intentkit/skills/token/token_search.py new file mode 100644 index 00000000..687bb053 --- /dev/null +++ b/intentkit/skills/token/token_search.py @@ -0,0 +1,119 @@ +import logging +from typing import Any, Dict, List, Optional, Type + +from pydantic import BaseModel, Field + +from intentkit.skills.token.base import TokenBaseTool + +logger = logging.getLogger(__name__) + + +class TokenSearchInput(BaseModel): + """Input for token search tool.""" + + query: str = Field( + description="Search query - can be token address, token name or token symbol." + ) + chains: Optional[List[str]] = Field( + description="The chain(s) to query (e.g., 'eth', 'bsc', 'polygon').", + default=None, + ) + limit: Optional[int] = Field( + description="The desired page size of the result.", + default=None, + ) + is_verified_contract: Optional[bool] = Field( + description="Whether the contract is verified.", + default=None, + ) + + +class TokenSearch(TokenBaseTool): + """Tool for searching tokens using Moralis. + + This tool uses Moralis' premium API to search for tokens based on contract address, + token name or token symbol. + + NOTE: To use this API, you will need an API key associated with a Moralis account + on the Business plan or a custom Enterprise plan. + """ + + name: str = "token_search" + description: str = ( + "Search for tokens based on contract address, token name or token symbol. " + "Returns token information including price, market cap, and security information. " + "NOTE: This is a premium endpoint that requires a Moralis Business plan." + ) + args_schema: Type[BaseModel] = TokenSearchInput + + async def _arun( + self, + query: str, + chains: Optional[List[str]] = None, + limit: Optional[int] = None, + is_verified_contract: Optional[bool] = None, + **kwargs, + ) -> Dict[str, Any]: + """Search for tokens using Moralis. + + Args: + query: Search query (address, name, or symbol) + chains: The blockchains to query + limit: Number of results + is_verified_contract: Filter for verified contracts + config: The configuration for the tool call + + Returns: + Dict containing token search results + """ + # Extract context from config + context = self.get_context() + if context is None: + logger.error("Context is None, cannot retrieve API key") + return { + "error": "Cannot retrieve API key. Please check agent configuration." + } + + # Get the API key + api_key = self.get_api_key() + + if not api_key: + logger.error("No Moralis API key available") + return {"error": "No Moralis API key provided in the configuration."} + + # Build query parameters + params = {"query": query} + + # Add optional parameters if they exist + if chains: + params["chains"] = ",".join(chains) + if limit: + params["limit"] = limit + if is_verified_contract is not None: + params["isVerifiedContract"] = is_verified_contract + + # Call Moralis API + try: + endpoint = "/tokens/search" + result = await self._make_request( + method="GET", endpoint=endpoint, api_key=api_key, params=params + ) + + # Add premium notice if there's an error that might be related to plan limits + if "error" in result and "403" in str(result.get("error", "")): + logger.error("Received 403 error - likely a plan limitation") + result["notice"] = ( + "This API requires a Moralis Business plan or Enterprise plan. " + "Please ensure your API key is associated with the appropriate plan." + ) + + return result + except Exception as e: + logger.error(f"Error searching for tokens: {e}") + return { + "error": f"An error occurred while searching for tokens: {str(e)}. Please try again later.", + "notice": ( + "This API requires a Moralis Business plan or Enterprise plan. " + "Please ensure your API key is associated with the appropriate plan." + ), + } diff --git a/intentkit/skills/twitter/__init__.py b/intentkit/skills/twitter/__init__.py new file mode 100644 index 00000000..7d405c4b --- /dev/null +++ b/intentkit/skills/twitter/__init__.py @@ -0,0 +1,146 @@ +"""Twitter skills.""" + +import logging +from typing import TypedDict + +from intentkit.abstracts.skill import SkillStoreABC +from intentkit.clients import TwitterClientConfig +from intentkit.skills.base import SkillConfig, SkillState +from intentkit.skills.twitter.base import TwitterBaseTool +from intentkit.skills.twitter.follow_user import TwitterFollowUser +from intentkit.skills.twitter.get_mentions import TwitterGetMentions +from intentkit.skills.twitter.get_timeline import TwitterGetTimeline +from intentkit.skills.twitter.get_user_by_username import TwitterGetUserByUsername +from intentkit.skills.twitter.get_user_tweets import TwitterGetUserTweets +from intentkit.skills.twitter.like_tweet import TwitterLikeTweet +from intentkit.skills.twitter.post_tweet import TwitterPostTweet +from intentkit.skills.twitter.reply_tweet import TwitterReplyTweet +from intentkit.skills.twitter.retweet import TwitterRetweet +from intentkit.skills.twitter.search_tweets import TwitterSearchTweets + +# we cache skills in system level, because they are stateless +_cache: dict[str, TwitterBaseTool] = {} + +logger = logging.getLogger(__name__) + + +class SkillStates(TypedDict): + get_mentions: SkillState + post_tweet: SkillState + reply_tweet: SkillState + get_timeline: SkillState + get_user_by_username: SkillState + get_user_tweets: SkillState + follow_user: SkillState + like_tweet: SkillState + retweet: SkillState + search_tweets: SkillState + + +class Config(SkillConfig, TwitterClientConfig): + """Configuration for Twitter skills.""" + + states: SkillStates + + +async def get_skills( + config: "Config", + is_private: bool, + store: SkillStoreABC, + **_, +) -> list[TwitterBaseTool]: + """Get all Twitter skills.""" + available_skills = [] + + # Include skills based on their state + for skill_name, state in config["states"].items(): + if state == "disabled": + continue + elif state == "public" or (state == "private" and is_private): + available_skills.append(skill_name) + + # Get each skill using the cached getter + result = [] + for name in available_skills: + skill = get_twitter_skill(name, store) + if skill: + result.append(skill) + return result + + +def get_twitter_skill( + name: str, + store: SkillStoreABC, +) -> TwitterBaseTool: + """Get a Twitter skill by name. + + Args: + name: The name of the skill to get + store: The skill store for persisting data + + Returns: + The requested Twitter skill + """ + if name == "get_mentions": + if name not in _cache: + _cache[name] = TwitterGetMentions( + skill_store=store, + ) + return _cache[name] + elif name == "post_tweet": + if name not in _cache: + _cache[name] = TwitterPostTweet( + skill_store=store, + ) + return _cache[name] + elif name == "reply_tweet": + if name not in _cache: + _cache[name] = TwitterReplyTweet( + skill_store=store, + ) + return _cache[name] + elif name == "get_timeline": + if name not in _cache: + _cache[name] = TwitterGetTimeline( + skill_store=store, + ) + return _cache[name] + elif name == "follow_user": + if name not in _cache: + _cache[name] = TwitterFollowUser( + skill_store=store, + ) + return _cache[name] + elif name == "like_tweet": + if name not in _cache: + _cache[name] = TwitterLikeTweet( + skill_store=store, + ) + return _cache[name] + elif name == "retweet": + if name not in _cache: + _cache[name] = TwitterRetweet( + skill_store=store, + ) + return _cache[name] + elif name == "search_tweets": + if name not in _cache: + _cache[name] = TwitterSearchTweets( + skill_store=store, + ) + return _cache[name] + elif name == "get_user_by_username": + if name not in _cache: + _cache[name] = TwitterGetUserByUsername( + skill_store=store, + ) + return _cache[name] + elif name == "get_user_tweets": + if name not in _cache: + _cache[name] = TwitterGetUserTweets( + skill_store=store, + ) + return _cache[name] + else: + logger.warning(f"Unknown Twitter skill: {name}") + return None diff --git a/intentkit/skills/twitter/base.py b/intentkit/skills/twitter/base.py new file mode 100644 index 00000000..9e9d7ab8 --- /dev/null +++ b/intentkit/skills/twitter/base.py @@ -0,0 +1,109 @@ +from datetime import datetime, timedelta, timezone +from typing import Type + +from langchain.tools.base import ToolException +from pydantic import BaseModel, Field + +from intentkit.abstracts.skill import SkillStoreABC +from intentkit.skills.base import IntentKitSkill +from intentkit.utils.error import RateLimitExceeded + + +class TwitterBaseTool(IntentKitSkill): + """Base class for Twitter tools.""" + + name: str = Field(description="The name of the tool") + description: str = Field(description="A description of what the tool does") + args_schema: Type[BaseModel] + skill_store: SkillStoreABC = Field( + description="The skill store for persisting data" + ) + + def get_api_key(self) -> dict: + context = self.get_context() + skill_config = context.agent.skill_config(self.category) + api_key_provider = skill_config.get("api_key_provider") + if api_key_provider == "platform": + # Return platform keys (these need to be added to config.py) + return { + "consumer_key": self.skill_store.get_system_config( + "twitter_consumer_key" + ), + "consumer_secret": self.skill_store.get_system_config( + "twitter_consumer_secret" + ), + "access_token": self.skill_store.get_system_config( + "twitter_access_token" + ), + "access_token_secret": self.skill_store.get_system_config( + "twitter_access_token_secret" + ), + } + # for backward compatibility or agent_owner provider + elif api_key_provider == "agent_owner": + required_keys = [ + "consumer_key", + "consumer_secret", + "access_token", + "access_token_secret", + ] + api_keys = {} + for key in required_keys: + if skill_config.get(key): + api_keys[key] = skill_config.get(key) + else: + raise ToolException( + f"Missing required {key} in agent_owner configuration" + ) + return api_keys + else: + raise ToolException(f"Invalid API key provider: {api_key_provider}") + + @property + def category(self) -> str: + return "twitter" + + async def check_rate_limit( + self, agent_id: str, max_requests: int = 1, interval: int = 15 + ) -> None: + """Check if the rate limit has been exceeded. + + Args: + agent_id: The ID of the agent. + max_requests: Maximum number of requests allowed within the rate limit window. + interval: Time interval in minutes for the rate limit window. + + Raises: + RateLimitExceeded: If the rate limit has been exceeded. + """ + rate_limit = await self.skill_store.get_agent_skill_data( + agent_id, self.name, "rate_limit" + ) + + current_time = datetime.now(tz=timezone.utc) + + if ( + rate_limit + and rate_limit.get("reset_time") + and rate_limit["count"] is not None + and datetime.fromisoformat(rate_limit["reset_time"]) > current_time + ): + if rate_limit["count"] >= max_requests: + raise RateLimitExceeded("Rate limit exceeded") + + rate_limit["count"] += 1 + await self.skill_store.save_agent_skill_data( + agent_id, self.name, "rate_limit", rate_limit + ) + + return + + # If no rate limit exists or it has expired, create a new one + new_rate_limit = { + "count": 1, + "reset_time": (current_time + timedelta(minutes=interval)).isoformat(), + } + await self.skill_store.save_agent_skill_data( + agent_id, self.name, "rate_limit", new_rate_limit + ) + return diff --git a/intentkit/skills/twitter/follow_user.py b/intentkit/skills/twitter/follow_user.py new file mode 100644 index 00000000..5e85ea4a --- /dev/null +++ b/intentkit/skills/twitter/follow_user.py @@ -0,0 +1,69 @@ +import logging +from typing import Type + +from langchain_core.tools import ToolException +from pydantic import BaseModel, Field + +from intentkit.clients import get_twitter_client +from intentkit.skills.twitter.base import TwitterBaseTool + +NAME = "twitter_follow_user" +PROMPT = ( + "Follow a Twitter user, if you don't know the user ID, " + "use twitter_get_user_by_username tool to get it." +) +logger = logging.getLogger(__name__) + + +class TwitterFollowUserInput(BaseModel): + """Input for TwitterFollowUser tool.""" + + user_id: str = Field(description="The ID of the user to follow") + + +class TwitterFollowUser(TwitterBaseTool): + """Tool for following a Twitter user. + + This tool uses the Twitter API v2 to follow a user on Twitter. + + Attributes: + name: The name of the tool. + description: A description of what the tool does. + args_schema: The schema for the tool's input arguments. + """ + + name: str = NAME + description: str = PROMPT + args_schema: Type[BaseModel] = TwitterFollowUserInput + + async def _arun(self, user_id: str, **kwargs) -> bool: + context = self.get_context() + try: + skill_config = context.agent.skill_config(self.category) + twitter = get_twitter_client( + agent_id=context.agent_id, + skill_store=self.skill_store, + config=skill_config, + ) + client = await twitter.get_client() + + # Check rate limit only when not using OAuth + if not twitter.use_key: + await self.check_rate_limit( + context.agent_id, max_requests=5, interval=15 + ) + + # Follow the user using tweepy client + response = await client.follow_user( + target_user_id=user_id, user_auth=twitter.use_key + ) + + if "data" in response and response["data"].get("following"): + return response + else: + logger.error(f"Error following user: {str(response)}") + raise ToolException("Failed to follow user") + + except Exception as e: + logger.error("Error following user: %s", str(e)) + raise type(e)(f"[agent:{context.agent_id}]: {e}") from e diff --git a/intentkit/skills/twitter/get_mentions.py b/intentkit/skills/twitter/get_mentions.py new file mode 100644 index 00000000..13d3cb7d --- /dev/null +++ b/intentkit/skills/twitter/get_mentions.py @@ -0,0 +1,124 @@ +import logging +from datetime import datetime, timedelta, timezone +from typing import Type + +from langchain_core.tools import ToolException +from pydantic import BaseModel + +from intentkit.clients.twitter import Tweet, get_twitter_client + +from .base import TwitterBaseTool + +NAME = "twitter_get_mentions" +PROMPT = ( + "Get tweets that mention you, the result is a json object containing a list of tweets." + 'If the result is `{"meta": {"result_count": 0}}`, means no new mentions, don\'t retry this tool.' +) + +logger = logging.getLogger(__name__) + + +class TwitterGetMentionsInput(BaseModel): + """Input for TwitterGetMentions tool.""" + + pass + + +class TwitterGetMentions(TwitterBaseTool): + """Tool for getting mentions from Twitter. + + This tool uses the Twitter API v2 to retrieve mentions (tweets in which the authenticated + user is mentioned) from Twitter. + + Attributes: + name: The name of the tool. + description: A description of what the tool does. + args_schema: The schema for the tool's input arguments. + """ + + name: str = NAME + description: str = PROMPT + args_schema: Type[BaseModel] = TwitterGetMentionsInput + + async def _arun(self, **kwargs) -> list[Tweet]: + context = self.get_context() + try: + skill_config = context.agent.skill_config(self.category) + twitter = get_twitter_client( + agent_id=context.agent_id, + skill_store=self.skill_store, + config=skill_config, + ) + client = await twitter.get_client() + + logger.debug(f"Use Key: {twitter.use_key}") + + # Check rate limit only when not using OAuth + if not twitter.use_key: + await self.check_rate_limit( + context.agent_id, + max_requests=1, + interval=15, + ) + + # get since id from store + last = await self.skill_store.get_agent_skill_data( + context.agent_id, self.name, "last" + ) + last = last or {} + max_results = 10 + since_id = last.get("since_id") + if since_id: + max_results = 30 + + # Always get mentions for the last day + start_time = datetime.now(tz=timezone.utc) - timedelta(days=1) + + user_id = twitter.self_id + if not user_id: + raise ToolException("Failed to get Twitter user ID.") + + mentions = await client.get_users_mentions( + user_auth=twitter.use_key, + id=user_id, + max_results=max_results, + since_id=since_id, + start_time=start_time, + expansions=[ + "referenced_tweets.id", + "referenced_tweets.id.attachments.media_keys", + "referenced_tweets.id.author_id", + "attachments.media_keys", + "author_id", + ], + tweet_fields=[ + "created_at", + "author_id", + "text", + "referenced_tweets", + "attachments", + ], + user_fields=[ + "username", + "name", + "profile_image_url", + "description", + "public_metrics", + "location", + "connection_status", + ], + media_fields=["url", "type", "width", "height"], + ) + + # Update since_id in store + if mentions.get("meta") and mentions["meta"].get("newest_id"): + last["since_id"] = mentions["meta"].get("newest_id") + await self.skill_store.save_agent_skill_data( + context.agent_id, self.name, "last", last + ) + + return mentions + + except Exception as e: + logger.error(f"[agent:{context.agent_id}]: {e}") + raise type(e)(f"[agent:{context.agent_id}]: {e}") from e diff --git a/intentkit/skills/twitter/get_timeline.py b/intentkit/skills/twitter/get_timeline.py new file mode 100644 index 00000000..befa157c --- /dev/null +++ b/intentkit/skills/twitter/get_timeline.py @@ -0,0 +1,112 @@ +import logging +from typing import Type + +from pydantic import BaseModel + +from intentkit.clients import get_twitter_client + +from .base import TwitterBaseTool + +logger = logging.getLogger(__name__) + +NAME = "twitter_get_timeline" +PROMPT = ( + "Get tweets from your timeline, the result is a json object containing a list of tweets." + 'If the result is `{"meta": {"result_count": 0}}`, means no new tweets, don\'t retry this tool.' +) + + +class TwitterGetTimelineInput(BaseModel): + """Input for TwitterGetTimeline tool.""" + + +class TwitterGetTimeline(TwitterBaseTool): + """Tool for getting the user's timeline from Twitter. + + This tool uses the Twitter API v2 to retrieve tweets from the authenticated user's + timeline. + + Attributes: + name: The name of the tool. + description: A description of what the tool does. + args_schema: The schema for the tool's input arguments. + """ + + name: str = NAME + description: str = PROMPT + args_schema: Type[BaseModel] = TwitterGetTimelineInput + + async def _arun(self, **kwargs): + context = self.get_context() + try: + # Ensure max_results is an integer + max_results = 10 + + skill_config = context.agent.skill_config(self.category) + twitter = get_twitter_client( + agent_id=context.agent_id, + skill_store=self.skill_store, + config=skill_config, + ) + client = await twitter.get_client() + + # Check rate limit only when not using OAuth + if not twitter.use_key: + await self.check_rate_limit( + context.agent_id, max_requests=1, interval=15 + ) + + # get since id from store + last = await self.skill_store.get_agent_skill_data( + context.agent_id, self.name, "last" + ) + last = last or {} + since_id = last.get("since_id") + + user_id = twitter.self_id + if not user_id: + raise ValueError("Failed to get Twitter user ID.") + + timeline = await client.get_home_timeline( + user_auth=twitter.use_key, + max_results=max_results, + since_id=since_id, + exclude=["replies"], + expansions=[ + "referenced_tweets.id", + "referenced_tweets.id.attachments.media_keys", + "referenced_tweets.id.author_id", + "attachments.media_keys", + "author_id", + ], + tweet_fields=[ + "created_at", + "author_id", + "text", + "referenced_tweets", + "attachments", + ], + user_fields=[ + "username", + "name", + "profile_image_url", + "description", + "public_metrics", + "location", + "connection_status", + ], + media_fields=["url", "type", "width", "height"], + ) + + # Update the since_id in store for the next request + if timeline.get("meta") and timeline["meta"].get("newest_id"): + last["since_id"] = timeline["meta"]["newest_id"] + await self.skill_store.save_agent_skill_data( + context.agent_id, self.name, "last", last + ) + + return timeline + + except Exception as e: + logger.error("Error getting timeline: %s", str(e)) + raise type(e)(f"[agent:{context.agent_id}]: {e}") from e diff --git a/intentkit/skills/twitter/get_user_by_username.py b/intentkit/skills/twitter/get_user_by_username.py new file mode 100644 index 00000000..24a190dc --- /dev/null +++ b/intentkit/skills/twitter/get_user_by_username.py @@ -0,0 +1,84 @@ +import logging +from typing import Type + +from pydantic import BaseModel, Field + +from intentkit.clients import get_twitter_client + +from .base import TwitterBaseTool + +logger = logging.getLogger(__name__) + +NAME = "twitter_get_user_by_username" +PROMPT = ( + "Get a Twitter user's information by their username." + "Returns detailed user information as a json object." +) + + +class TwitterGetUserByUsernameInput(BaseModel): + """Input for TwitterGetUserByUsername tool.""" + + username: str = Field(description="The Twitter username to lookup") + + +class TwitterGetUserByUsername(TwitterBaseTool): + """Tool for getting a Twitter user by their username. + + This tool uses the Twitter API v2 to retrieve user information by username. + + Attributes: + name: The name of the tool. + description: A description of what the tool does. + args_schema: The schema for the tool's input arguments. + """ + + name: str = NAME + description: str = PROMPT + args_schema: Type[BaseModel] = TwitterGetUserByUsernameInput + + async def _arun(self, username: str, **kwargs): + context = self.get_context() + try: + skill_config = context.agent.skill_config(self.category) + twitter = get_twitter_client( + agent_id=context.agent_id, + skill_store=self.skill_store, + config=skill_config, + ) + client = await twitter.get_client() + + # Check rate limit only when not using OAuth + if not twitter.use_key: + await self.check_rate_limit( + context.agent_id, max_requests=5, interval=60 * 24 + ) + + user_data = await client.get_user( + username=username, + user_auth=twitter.use_key, + user_fields=[ + "created_at", + "description", + "entities", + "connection_status", + "id", + "location", + "name", + "pinned_tweet_id", + "profile_image_url", + "protected", + "public_metrics", + "url", + "username", + "verified", + "verified_type", + "withheld", + ], + ) + + return user_data + + except Exception as e: + logger.error(f"Error getting user by username: {str(e)}") + raise type(e)(f"[agent:{context.agent_id}]: {e}") from e diff --git a/intentkit/skills/twitter/get_user_tweets.py b/intentkit/skills/twitter/get_user_tweets.py new file mode 100644 index 00000000..7a151a10 --- /dev/null +++ b/intentkit/skills/twitter/get_user_tweets.py @@ -0,0 +1,123 @@ +import logging +from typing import List, Optional, Type + +from pydantic import BaseModel, Field + +from intentkit.clients import get_twitter_client + +from .base import TwitterBaseTool + +logger = logging.getLogger(__name__) + +NAME = "twitter_get_user_tweets" +PROMPT = ( + "Get tweets from a specific Twitter user by their user ID. " + "The result is a json object containing a list of tweets." + 'If the result is `{"meta": {"result_count": 0}}`, means no tweets found, don\'t retry this tool.' +) + + +class TwitterGetUserTweetsInput(BaseModel): + """Input for TwitterGetUserTweets tool.""" + + user_id: str = Field(description="The Twitter user ID to fetch tweets from") + exclude: Optional[List[str]] = Field( + default=["replies", "retweets"], + description="Types of tweets to exclude (e.g., 'replies', 'retweets')", + ) + + +class TwitterGetUserTweets(TwitterBaseTool): + """Tool for getting tweets from a specific Twitter user. + + This tool uses the Twitter API v2 to retrieve tweets from a specific user + by their user ID. + + Attributes: + name: The name of the tool. + description: A description of what the tool does. + args_schema: The schema for the tool's input arguments. + """ + + name: str = NAME + description: str = PROMPT + args_schema: Type[BaseModel] = TwitterGetUserTweetsInput + + async def _arun(self, **kwargs): + context = self.get_context() + try: + user_id = kwargs.get("user_id") + if not user_id: + raise ValueError("User ID is required") + + # Hardcode max_results to 10 + max_results = 10 + + # Get exclude parameter with default + exclude = kwargs.get("exclude", ["replies", "retweets"]) + + skill_config = context.agent.skill_config(self.category) + twitter = get_twitter_client( + agent_id=context.agent_id, + skill_store=self.skill_store, + config=skill_config, + ) + client = await twitter.get_client() + + # Check rate limit only when not using OAuth + if not twitter.use_key: + await self.check_rate_limit( + context.agent_id, max_requests=1, interval=15 + ) + + # get since id from store + last = await self.skill_store.get_agent_skill_data( + context.agent_id, self.name, user_id + ) + last = last or {} + since_id = last.get("since_id") + + tweets = await client.get_users_tweets( + user_auth=twitter.use_key, + id=user_id, + max_results=max_results, + since_id=since_id, + exclude=exclude, + expansions=[ + "referenced_tweets.id", + "referenced_tweets.id.attachments.media_keys", + "referenced_tweets.id.author_id", + "attachments.media_keys", + "author_id", + ], + tweet_fields=[ + "created_at", + "author_id", + "text", + "referenced_tweets", + "attachments", + ], + user_fields=[ + "username", + "name", + "profile_image_url", + "description", + "public_metrics", + "location", + "connection_status", + ], + media_fields=["url", "type", "width", "height"], + ) + + # Update the since_id in store for the next request + if tweets.get("meta") and tweets["meta"].get("newest_id"): + last["since_id"] = tweets["meta"]["newest_id"] + await self.skill_store.save_agent_skill_data( + context.agent_id, self.name, user_id, last + ) + + return tweets + + except Exception as e: + logger.error("Error getting user tweets: %s", str(e)) + raise type(e)(f"[agent:{context.agent_id}]: {e}") from e diff --git a/intentkit/skills/twitter/like_tweet.py b/intentkit/skills/twitter/like_tweet.py new file mode 100644 index 00000000..34775d20 --- /dev/null +++ b/intentkit/skills/twitter/like_tweet.py @@ -0,0 +1,65 @@ +import logging +from typing import Type + +from langchain_core.tools import ToolException +from pydantic import BaseModel, Field + +from intentkit.clients import get_twitter_client +from intentkit.skills.twitter.base import TwitterBaseTool + +NAME = "twitter_like_tweet" +PROMPT = "Like a tweet on Twitter" + +logger = logging.getLogger(__name__) + + +class TwitterLikeTweetInput(BaseModel): + """Input for TwitterLikeTweet tool.""" + + tweet_id: str = Field(description="The ID of the tweet to like") + + +class TwitterLikeTweet(TwitterBaseTool): + """Tool for liking tweets on Twitter. + + This tool uses the Twitter API v2 to like tweets on Twitter. + + Attributes: + name: The name of the tool. + description: A description of what the tool does. + args_schema: The schema for the tool's input arguments. + """ + + name: str = NAME + description: str = PROMPT + args_schema: Type[BaseModel] = TwitterLikeTweetInput + + async def _arun(self, tweet_id: str, **kwargs): + context = self.get_context() + try: + skill_config = context.agent.skill_config(self.category) + twitter = get_twitter_client( + agent_id=context.agent_id, + skill_store=self.skill_store, + config=skill_config, + ) + client = await twitter.get_client() + + # Check rate limit only when not using OAuth + if not twitter.use_key: + await self.check_rate_limit( + context.agent_id, max_requests=100, interval=1440 + ) + + # Like the tweet using tweepy client + response = await client.like(tweet_id=tweet_id, user_auth=twitter.use_key) + + if "data" in response and "liked" in response["data"]: + return response + else: + logger.error(f"Error liking tweet: {str(response)}") + raise ToolException("Failed to like tweet.") + + except Exception as e: + logger.error(f"Error liking tweet: {str(e)}") + raise type(e)(f"[agent:{context.agent_id}]: {e}") from e diff --git a/intentkit/skills/twitter/post_tweet.py b/intentkit/skills/twitter/post_tweet.py new file mode 100644 index 00000000..5497e26e --- /dev/null +++ b/intentkit/skills/twitter/post_tweet.py @@ -0,0 +1,103 @@ +import logging +from typing import Optional, Type + +from langchain_core.tools import ToolException +from pydantic import BaseModel, Field + +from intentkit.clients import get_twitter_client +from intentkit.skills.twitter.base import TwitterBaseTool + +NAME = "twitter_post_tweet" +PROMPT = ( + "Post a new tweet to Twitter. If you want to post image, " + "you must provide image url in parameters, do not add image link in text." +) + +logger = logging.getLogger(__name__) + + +class TwitterPostTweetInput(BaseModel): + """Input for TwitterPostTweet tool.""" + + text: str = Field( + description="Tweet text (280 chars for regular users, 25,000 bytes for verified)", + max_length=25000, + ) + image: Optional[str] = Field( + default=None, description="Optional URL of an image to attach to the tweet" + ) + + +class TwitterPostTweet(TwitterBaseTool): + """Tool for posting tweets to Twitter. + + This tool uses the Twitter API v2 to post new tweets to Twitter. + + Attributes: + name: The name of the tool. + description: A description of what the tool does. + args_schema: The schema for the tool's input arguments. + """ + + name: str = NAME + description: str = PROMPT + args_schema: Type[BaseModel] = TwitterPostTweetInput + + async def _arun( + self, + text: str, + image: Optional[str] = None, + **kwargs, + ): + context = self.get_context() + try: + skill_config = context.agent.skill_config(self.category) + twitter = get_twitter_client( + agent_id=context.agent_id, + skill_store=self.skill_store, + config=skill_config, + ) + client = await twitter.get_client() + + # Check rate limit only when not using OAuth + if not twitter.use_key: + await self.check_rate_limit( + context.agent_id, max_requests=24, interval=1440 + ) + + media_ids = [] + image_warning = "" + + # Handle image upload if provided + if image: + # Validate image URL - must be from system's S3 CDN + aws_s3_cdn_url = self.skill_store.get_system_config("aws_s3_cdn_url") + if aws_s3_cdn_url and image.startswith(aws_s3_cdn_url): + # Use the TwitterClient method to upload the image + media_ids = await twitter.upload_media(context.agent_id, image) + else: + # Image is not from system's S3 CDN, skip upload but warn + image_warning = "Warning: The provided image URL is not from the system's S3 CDN and has been ignored. " + logger.warning( + f"Image URL validation failed for agent {context.agent_id}: {image}" + ) + + # Post tweet using tweepy client + tweet_params = {"text": text, "user_auth": twitter.use_key} + if media_ids: + tweet_params["media_ids"] = media_ids + + response = await client.create_tweet(**tweet_params) + if "data" in response and "id" in response["data"]: + # Return response with warning if image was ignored + result = ( + f"{image_warning}Tweet posted successfully. Response: {response}" + ) + return result + else: + logger.error(f"Error posting tweet: {str(response)}") + raise ToolException("Failed to post tweet.") + + except Exception as e: + logger.error(f"Error posting tweet: {str(e)}") + raise type(e)(f"[agent:{context.agent_id}]: {e}") from e diff --git a/intentkit/skills/twitter/reply_tweet.py b/intentkit/skills/twitter/reply_tweet.py new file mode 100644 index 00000000..a3411649 --- /dev/null +++ b/intentkit/skills/twitter/reply_tweet.py @@ -0,0 +1,109 @@ +import logging +from typing import Optional, Type + +from langchain_core.tools import ToolException +from pydantic import BaseModel, Field + +from intentkit.clients import get_twitter_client +from intentkit.skills.twitter.base import TwitterBaseTool + +NAME = "twitter_reply_tweet" +PROMPT = ( + "Reply to an existing tweet on Twitter. Do not reply to your own tweet. " + "If you want to post image, you must provide image url in parameters, do not add image link in text." +) + +logger = logging.getLogger(__name__) + + +class TwitterReplyTweetInput(BaseModel): + """Input for TwitterReplyTweet tool.""" + + tweet_id: str = Field(description="The ID of the tweet to reply to") + text: str = Field( + description="Tweet text (280 chars for regular users, 25,000 bytes for verified)", + max_length=25000, + ) + image: Optional[str] = Field( + default=None, description="Optional URL of an image to attach to the reply" + ) + + +class TwitterReplyTweet(TwitterBaseTool): + """Tool for replying to tweets on Twitter. + + This tool uses the Twitter API v2 to post reply tweets to existing tweets. + + Attributes: + name: The name of the tool. + description: A description of what the tool does. + args_schema: The schema for the tool's input arguments. + """ + + name: str = NAME + description: str = PROMPT + args_schema: Type[BaseModel] = TwitterReplyTweetInput + + async def _arun( + self, + tweet_id: str, + text: str, + image: Optional[str] = None, + **kwargs, + ): + context = self.get_context() + try: + skill_config = context.agent.skill_config(self.category) + twitter = get_twitter_client( + agent_id=context.agent_id, + skill_store=self.skill_store, + config=skill_config, + ) + client = await twitter.get_client() + + # Check rate limit only when not using OAuth + if not twitter.use_key: + await self.check_rate_limit( + context.agent_id, max_requests=48, interval=1440 + ) + + media_ids = [] + image_warning = "" + + # Handle image upload if provided + if image: + # Validate image URL - must be from system's S3 CDN + aws_s3_cdn_url = self.skill_store.get_system_config("aws_s3_cdn_url") + if aws_s3_cdn_url and image.startswith(aws_s3_cdn_url): + # Use the TwitterClient method to upload the image + media_ids = await twitter.upload_media(context.agent_id, image) + else: + # Image is not from system's S3 CDN, skip upload but warn + image_warning = "Warning: The provided image URL is not from the system's S3 CDN and has been ignored. " + logger.warning( + f"Image URL validation failed for agent {context.agent_id}: {image}" + ) + + # Post reply tweet using tweepy client + tweet_params = { + "text": text, + "user_auth": twitter.use_key, + "in_reply_to_tweet_id": tweet_id, + } + + if media_ids: + tweet_params["media_ids"] = media_ids + + response = await client.create_tweet(**tweet_params) + + if "data" in response and "id" in response["data"]: + # Return response with warning if image was ignored + result = f"{image_warning}Reply tweet posted successfully. Response: {response}" + return result + else: + logger.error(f"Error replying to tweet: {str(response)}") + raise ToolException("Failed to post reply tweet.") + + except Exception as e: + logger.error(f"Error replying to tweet: {str(e)}") + raise type(e)(f"[agent:{context.agent_id}]: {e}") from e diff --git a/intentkit/skills/twitter/retweet.py b/intentkit/skills/twitter/retweet.py new file mode 100644 index 00000000..ac44a699 --- /dev/null +++ b/intentkit/skills/twitter/retweet.py @@ -0,0 +1,76 @@ +import logging +from typing import Type + +from langchain_core.tools import ToolException +from pydantic import BaseModel, Field + +from intentkit.clients import get_twitter_client +from intentkit.skills.twitter.base import TwitterBaseTool + +NAME = "twitter_retweet" +PROMPT = "Retweet a tweet on Twitter" + +logger = logging.getLogger(__name__) + + +class TwitterRetweetInput(BaseModel): + """Input for TwitterRetweet tool.""" + + tweet_id: str = Field(description="The ID of the tweet to retweet") + + +class TwitterRetweet(TwitterBaseTool): + """Tool for retweeting tweets on Twitter. + + This tool uses the Twitter API v2 to retweet tweets on Twitter. + + Attributes: + name: The name of the tool. + description: A description of what the tool does. + args_schema: The schema for the tool's input arguments. + """ + + name: str = NAME + description: str = PROMPT + args_schema: Type[BaseModel] = TwitterRetweetInput + + async def _arun(self, tweet_id: str, **kwargs): + context = self.get_context() + try: + skill_config = context.agent.skill_config(self.category) + twitter = get_twitter_client( + agent_id=context.agent_id, + skill_store=self.skill_store, + config=skill_config, + ) + client = await twitter.get_client() + + # Check rate limit only when not using OAuth + if not twitter.use_key: + await self.check_rate_limit( + context.agent_id, max_requests=5, interval=15 + ) + + # Get authenticated user's ID + user_id = twitter.self_id + if not user_id: + raise ValueError("Failed to get authenticated user ID.") + + # Retweet the tweet using tweepy client + response = await client.retweet( + tweet_id=tweet_id, user_auth=twitter.use_key + ) + + if ( + "data" in response + and "retweeted" in response["data"] + and response["data"]["retweeted"] + ): + return response + else: + logger.error(f"Error retweeting: {str(response)}") + raise ToolException("Failed to retweet.") + + except Exception as e: + logger.error(f"Error retweeting: {str(e)}") + raise type(e)(f"[agent:{context.agent_id}]: {e}") from e diff --git a/intentkit/skills/twitter/schema.json b/intentkit/skills/twitter/schema.json new file mode 100644 index 00000000..bcd308ed --- /dev/null +++ b/intentkit/skills/twitter/schema.json @@ -0,0 +1,258 @@ +{ + "$schema": "http://json-schema.org/draft-07/schema#", + "type": "object", + "title": "X", + "description": "Integration with X API enabling social media interactions including retrieving posts, mentions, user information, and posting content with media support", + "x-icon": "https://ai.service.crestal.dev/skills/twitter/twitter.png", + "x-tags": [ + "Social" + ], + "properties": { + "enabled": { + "type": "boolean", + "title": "Enabled", + "description": "Whether this skill is enabled", + "default": false + }, + "states": { + "type": "object", + "properties": { + "get_mentions": { + "type": "string", + "title": "Get Mentions", + "enum": [ + "disabled", + "public", + "private" + ], + "x-enum-title": [ + "Disabled", + "Agent Owner + All Users", + "Agent Owner Only" + ], + "description": "Retrieves posts that mention the authenticated user from the past 24 hours.", + "default": "disabled" + }, + "post_tweet": { + "type": "string", + "title": "Create a Post", + "enum": [ + "disabled", + "public", + "private" + ], + "x-enum-title": [ + "Disabled", + "Agent Owner + All Users", + "Agent Owner Only" + ], + "description": "Publishes posts with media attachments, and content moderation checks", + "default": "disabled" + }, + "reply_tweet": { + "type": "string", + "title": "Reply to a Post", + "enum": [ + "disabled", + "public", + "private" + ], + "x-enum-title": [ + "Disabled", + "Agent Owner + All Users", + "Agent Owner Only" + ], + "description": "Constructs contextual replies to posts with mention handling and conversation threading", + "default": "disabled" + }, + "get_timeline": { + "type": "string", + "title": "Get Timeline", + "enum": [ + "disabled", + "public", + "private" + ], + "x-enum-title": [ + "Disabled", + "Agent Owner + All Users", + "Agent Owner Only" + ], + "description": "Fetches user's home timeline with recent posts.", + "default": "disabled" + }, + "follow_user": { + "type": "string", + "title": "Follow User", + "enum": [ + "disabled", + "public", + "private" + ], + "x-enum-title": [ + "Disabled", + "Agent Owner + All Users", + "Agent Owner Only" + ], + "description": "Initiates following of X accounts with rate limit handling and anti-spam safeguards", + "default": "disabled" + }, + "like_tweet": { + "type": "string", + "title": "Like a Post", + "enum": [ + "disabled", + "public", + "private" + ], + "x-enum-title": [ + "Disabled", + "Agent Owner + All Users", + "Agent Owner Only" + ], + "description": "Likes a post", + "default": "disabled" + }, + "retweet": { + "type": "string", + "title": "Repost a Post", + "enum": [ + "disabled", + "public", + "private" + ], + "x-enum-title": [ + "Disabled", + "Agent Owner + All Users", + "Agent Owner Only" + ], + "description": "Shares posts with attribution tracking and duplicate prevention mechanisms", + "default": "disabled" + }, + "search_tweets": { + "type": "string", + "title": "Search Posts", + "enum": [ + "disabled", + "public", + "private" + ], + "x-enum-title": [ + "Disabled", + "Agent Owner + All Users", + "Agent Owner Only" + ], + "description": "Executes advanced X searches with keyword filters, date ranges, and engagement thresholds", + "default": "disabled" + }, + "get_user_by_username": { + "type": "string", + "title": "Get User by Username", + "enum": [ + "disabled", + "public", + "private" + ], + "x-enum-title": [ + "Disabled", + "Agent Owner + All Users", + "Agent Owner Only" + ], + "description": "Retrieves user information by username.", + "default": "disabled" + }, + "get_user_tweets": { + "type": "string", + "title": "Get User Tweets", + "enum": [ + "disabled", + "public", + "private" + ], + "x-enum-title": [ + "Disabled", + "Agent Owner + All Users", + "Agent Owner Only" + ], + "description": "Retrieves tweets from a specific user by their user ID.", + "default": "disabled" + } + }, + "description": "States for each X skill" + }, + "api_key_provider": { + "type": "string", + "title": "API Key Provider", + "description": "Who provides the API key", + "enum": [ + "platform", + "agent_owner" + ], + "x-enum-title": [ + "Nation Hosted", + "Owner Provided" + ], + "default": "platform" + } + }, + "required": [ + "states", + "enabled" + ], + "if": { + "properties": { + "api_key_provider": { + "const": "agent_owner" + } + } + }, + "then": { + "properties": { + "consumer_key": { + "type": "string", + "title": "X API consumer key", + "description": "X API consumer key", + "x-link": "[Get your API key](https://developer.x.com/)", + "x-sensitive": true, + "maxLength": 100 + }, + "consumer_secret": { + "type": "string", + "title": "X API consumer secret", + "description": "X API consumer secret", + "x-sensitive": true, + "maxLength": 100 + }, + "access_token": { + "type": "string", + "title": "X API access token", + "description": "X API access token", + "x-sensitive": true, + "maxLength": 100 + }, + "access_token_secret": { + "type": "string", + "title": "X API access token secret", + "description": "X API access token secret", + "x-sensitive": true, + "maxLength": 100 + } + }, + "if": { + "properties": { + "enabled": { + "const": true + } + } + }, + "then": { + "required": [ + "consumer_key", + "consumer_secret", + "access_token", + "access_token_secret" + ] + } + }, + "additionalProperties": true +} \ No newline at end of file diff --git a/intentkit/skills/twitter/search_tweets.py b/intentkit/skills/twitter/search_tweets.py new file mode 100644 index 00000000..2518f4be --- /dev/null +++ b/intentkit/skills/twitter/search_tweets.py @@ -0,0 +1,115 @@ +import datetime +import logging +from typing import Type + +from pydantic import BaseModel, Field + +from intentkit.clients import get_twitter_client + +from .base import TwitterBaseTool + +logger = logging.getLogger(__name__) + +NAME = "twitter_search_tweets" +PROMPT = "Search for recent tweets on Twitter using a query keyword." + + +class TwitterSearchTweetsInput(BaseModel): + """Input for TwitterSearchTweets tool.""" + + query: str = Field(description="The search query to find tweets") + + +class TwitterSearchTweets(TwitterBaseTool): + """Tool for searching recent tweets on Twitter. + + This tool uses the Twitter API v2 to search for recent tweets based on a query. + + Attributes: + name: The name of the tool. + description: A description of what the tool does. + args_schema: The schema for the tool's input arguments. + """ + + name: str = NAME + description: str = PROMPT + args_schema: Type[BaseModel] = TwitterSearchTweetsInput + + async def _arun(self, query: str, **kwargs): + context = self.get_context() + max_results = 10 + try: + skill_config = context.agent.skill_config(self.category) + twitter = get_twitter_client( + agent_id=context.agent_id, + skill_store=self.skill_store, + config=skill_config, + ) + client = await twitter.get_client() + + # Check rate limit only when not using OAuth + if not twitter.use_key: + await self.check_rate_limit( + context.agent_id, max_requests=1, interval=15 + ) + + # Get since_id from store to avoid duplicate results + last = await self.skill_store.get_agent_skill_data( + context.agent_id, self.name, query + ) + last = last or {} + since_id = last.get("since_id") + + # Reset since_id if the saved timestamp is over 6 days old + if since_id and last.get("timestamp"): + try: + saved_time = datetime.datetime.fromisoformat(last["timestamp"]) + if (datetime.datetime.now() - saved_time).days > 6: + since_id = None + except (ValueError, TypeError): + since_id = None + + tweets = await client.search_recent_tweets( + query=query, + user_auth=twitter.use_key, + since_id=since_id, + max_results=max_results, + expansions=[ + "referenced_tweets.id", + "referenced_tweets.id.attachments.media_keys", + "referenced_tweets.id.author_id", + "attachments.media_keys", + "author_id", + ], + tweet_fields=[ + "created_at", + "author_id", + "text", + "referenced_tweets", + "attachments", + ], + user_fields=[ + "username", + "name", + "profile_image_url", + "description", + "public_metrics", + "location", + "connection_status", + ], + media_fields=["url", "type", "width", "height"], + ) + + # Update the since_id in store for the next request + if tweets.get("meta") and tweets.get("meta").get("newest_id"): + last["since_id"] = tweets["meta"]["newest_id"] + last["timestamp"] = datetime.datetime.now().isoformat() + await self.skill_store.save_agent_skill_data( + context.agent_id, self.name, query, last + ) + + return tweets + + except Exception as e: + logger.error(f"Error searching tweets: {str(e)}") + raise type(e)(f"[agent:{context.agent_id}]: {e}") from e diff --git a/intentkit/skills/twitter/twitter.png b/intentkit/skills/twitter/twitter.png new file mode 100644 index 00000000..4db767fc Binary files /dev/null and b/intentkit/skills/twitter/twitter.png differ diff --git a/intentkit/skills/unrealspeech/__init__.py b/intentkit/skills/unrealspeech/__init__.py new file mode 100644 index 00000000..a5f1960a --- /dev/null +++ b/intentkit/skills/unrealspeech/__init__.py @@ -0,0 +1,55 @@ +from typing import TypedDict + +from intentkit.abstracts.skill import SkillStoreABC +from intentkit.skills.base import SkillConfig, SkillState +from intentkit.skills.unrealspeech.base import UnrealSpeechBaseTool +from intentkit.skills.unrealspeech.text_to_speech import TextToSpeech + +# Cache skills at the system level, because they are stateless +_cache: dict[str, UnrealSpeechBaseTool] = {} + + +class SkillStates(TypedDict): + text_to_speech: SkillState + + +class Config(SkillConfig): + """Configuration for UnrealSpeech skills.""" + + states: SkillStates + api_key: str = "" # Optional API key + + +async def get_skills( + config: "Config", + is_private: bool, + store: SkillStoreABC, + **_, +) -> list[UnrealSpeechBaseTool]: + """Get all UnrealSpeech tools.""" + available_skills = [] + + # Include skills based on their state + for skill_name, state in config["states"].items(): + if state == "disabled": + continue + elif state == "public" or (state == "private" and is_private): + available_skills.append(skill_name) + + # Get each skill using the cached getter + return [get_unrealspeech_skill(name, store) for name in available_skills] + + +def get_unrealspeech_skill( + name: str, + store: SkillStoreABC, +) -> UnrealSpeechBaseTool: + """Get an UnrealSpeech skill by name.""" + if name == "text_to_speech": + if name not in _cache: + _cache[name] = TextToSpeech( + skill_store=store, + ) + return _cache[name] + else: + raise ValueError(f"Unknown UnrealSpeech skill: {name}") diff --git a/intentkit/skills/unrealspeech/base.py b/intentkit/skills/unrealspeech/base.py new file mode 100644 index 00000000..6e7d4eb4 --- /dev/null +++ b/intentkit/skills/unrealspeech/base.py @@ -0,0 +1,37 @@ +from typing import Type + +from langchain.tools.base import ToolException +from pydantic import BaseModel, Field + +from intentkit.abstracts.skill import SkillStoreABC +from intentkit.skills.base import IntentKitSkill + + +class UnrealSpeechBaseTool(IntentKitSkill): + """Base class for UnrealSpeech text-to-speech tools.""" + + name: str = Field(description="The name of the tool") + description: str = Field(description="A description of what the tool does") + args_schema: Type[BaseModel] + skill_store: SkillStoreABC = Field( + description="The skill store for persisting data" + ) + + def get_api_key(self) -> str: + context = self.get_context() + skill_config = context.agent.skill_config(self.category) + api_key_provider = skill_config.get("api_key_provider") + if api_key_provider == "agent_owner": + api_key = skill_config.get("api_key") + if api_key: + return api_key + else: + raise ToolException("No api_key found in agent_owner configuration") + else: + raise ToolException( + f"Invalid API key provider: {api_key_provider}. Only 'agent_owner' is supported for UnrealSpeech." + ) + + @property + def category(self) -> str: + return "unrealspeech" diff --git a/intentkit/skills/unrealspeech/schema.json b/intentkit/skills/unrealspeech/schema.json new file mode 100644 index 00000000..e5cfad7b --- /dev/null +++ b/intentkit/skills/unrealspeech/schema.json @@ -0,0 +1,100 @@ +{ + "$schema": "http://json-schema.org/draft-07/schema#", + "type": "object", + "title": "UnrealSpeech", + "description": "Convert text to natural-sounding speech with various voices and customization options", + "x-icon": "https://ai.service.crestal.dev/skills/unrealspeech/unrealspeech.jpg", + "x-tags": [ + "Audio", + "Speech", + "Text-to-Speech", + "Voice" + ], + "properties": { + "enabled": { + "type": "boolean", + "title": "Enabled", + "description": "Whether this skill is enabled", + "default": false + }, + "states": { + "type": "object", + "properties": { + "text_to_speech": { + "type": "string", + "title": "Text to Speech", + "enum": [ + "disabled", + "public", + "private" + ], + "x-enum-title": [ + "Disabled", + "Agent Owner + All Users", + "Agent Owner Only" + ], + "description": "Convert text to natural-sounding speech with various voices and customization options", + "default": "disabled" + } + }, + "description": "States for each UnrealSpeech skill (disabled, public, or private)" + }, + "api_key_provider": { + "type": "string", + "title": "API Key Provider", + "description": "Provider of the API key for AIXBT API service", + "enum": [ + "agent_owner" + ], + "x-enum-title": [ + "Owner Provided" + ], + "default": "agent_owner" + } + }, + "required": [ + "states", + "enabled" + ], + "if": { + "properties": { + "api_key_provider": { + "const": "agent_owner" + } + } + }, + "then": { + "properties": { + "api_key": { + "type": "string", + "title": "UnrealSpeech API Key", + "description": "API key for UnrealSpeech service", + "x-link": "[Get your API key](https://unrealspeech.com/)", + "x-sensitive": true + }, + "rate_limit_number": { + "type": "integer", + "title": "Rate Limit Number", + "description": "Number of requests allowed per time window, only valid if api_key is set" + }, + "rate_limit_minutes": { + "type": "integer", + "title": "Rate Limit Minutes", + "description": "Time window in minutes for rate limiting, only valid if api_key is set" + } + }, + "if": { + "properties": { + "enabled": { + "const": true + } + } + }, + "then": { + "required": [ + "api_key" + ] + } + }, + "additionalProperties": true +} \ No newline at end of file diff --git a/intentkit/skills/unrealspeech/text_to_speech.py b/intentkit/skills/unrealspeech/text_to_speech.py new file mode 100644 index 00000000..6e8d6adb --- /dev/null +++ b/intentkit/skills/unrealspeech/text_to_speech.py @@ -0,0 +1,156 @@ +import logging +import os +from typing import Any, Dict, Literal, Optional, Type + +import httpx +from langchain_core.callbacks.manager import CallbackManagerForToolRun +from pydantic import BaseModel, Field + +from intentkit.skills.unrealspeech.base import UnrealSpeechBaseTool + +logger = logging.getLogger(__name__) + + +class TextToSpeechInput(BaseModel): + """Input for TextToSpeech tool.""" + + text: str = Field(description="The text to convert to speech. Required.") + + voice_id: str = Field( + description="The voice ID to use for speech synthesis. Options include: 'af_bella', 'af_sarah', 'af_nicole', 'af_sky', 'am_adam', 'am_michael', 'bf_emma', 'bf_isabella', 'bm_george', 'bm_lewis'.", + default="af_sarah", + ) + + bitrate: str = Field( + description="The audio bitrate. Higher values provide better quality but larger file sizes. Options: '64k', '96k', '128k', '192k', '256k', '320k'.", + default="192k", + ) + + speed: float = Field( + description="The speech speed adjustment. Range: -1.0 (slower) to 1.0 (faster), with 0.0 being the normal speed.", + default=0.0, + ) + + timestamp_type: Optional[Literal["word", "sentence"]] = Field( + description="The type of timestamps to include in the response. 'word' for word-level timestamps, 'sentence' for sentence-level, or None for no timestamps.", + default="word", + ) + + +class TextToSpeech(UnrealSpeechBaseTool): + """Tool for converting text to speech using UnrealSpeech's API. + + This tool converts text to natural-sounding speech in various voices. + It can generate speech with different voices, speeds, and qualities. + The response includes URLs to the audio file and optional word-level timestamps. + """ + + name: str = "text_to_speech" + description: str = ( + "Converts text to natural-sounding speech using UnrealSpeech.\n" + "Use this tool when you need to generate spoken audio from text.\n" + "Returns URLs to the generated audio file and word-level timestamps.\n" + "Provides various voice options and speech customization parameters." + ) + args_schema: Type[BaseModel] = TextToSpeechInput + + def get_env_var(self, env_var_name: str) -> Optional[str]: + """Helper method to get environment variables.""" + return os.environ.get(env_var_name) + + async def _arun( + self, + text: str, + voice_id: str = "af_sarah", + bitrate: str = "192k", + speed: float = 0.0, + timestamp_type: Optional[Literal["word", "sentence"]] = "word", + config: Optional[Any] = None, + run_manager: Optional[CallbackManagerForToolRun] = None, + **kwargs, + ) -> Dict[str, Any]: + """Run the tool to convert text to speech.""" + + # Get the API key from context config if available + context = self.get_context() + skill_config = context.agent.skill_config(self.category) if config else None + api_key = ( + skill_config.get("api_key", None) if context and skill_config else None + ) + + # Clean up and validate input + if not text: + return {"success": False, "error": "Text cannot be empty."} + + # Validate bitrate + valid_bitrates = ["64k", "96k", "128k", "192k", "256k", "320k"] + if bitrate not in valid_bitrates: + logger.warning(f"Invalid bitrate '{bitrate}'. Using default '192k'.") + bitrate = "192k" + + # Validate speed + if not -1.0 <= speed <= 1.0: + logger.warning( + f"Speed value {speed} is outside valid range (-1.0 to 1.0). Clamping to valid range." + ) + speed = max(-1.0, min(1.0, speed)) + + try: + # For longer text, use the /speech endpoint for better handling + endpoint = "https://api.v8.unrealspeech.com/speech" + + # Prepare the request payload + payload = { + "Text": text, + "VoiceId": voice_id, + "Bitrate": bitrate, + "Speed": str(speed), + "Pitch": "1", + "OutputFormat": "uri", + } + + # Add timestamp type if specified + if timestamp_type: + payload["TimestampType"] = timestamp_type + + # Send the request to UnrealSpeech API + async with httpx.AsyncClient(timeout=60.0) as client: + headers = { + "Authorization": f"Bearer {api_key}", + "Content-Type": "application/json", + } + + response = await client.post(endpoint, json=payload, headers=headers) + + # Check response status + if response.status_code != 200: + logger.error(f"UnrealSpeech API error: {response.text}") + return { + "success": False, + "error": f"API error: {response.status_code} - {response.text}", + } + + # Parse response + result = response.json() + + # Format the response + return { + "success": True, + "task_id": result.get("TaskId"), + "audio_url": result.get("OutputUri"), + "timestamps_url": result.get("TimestampsUri") + if timestamp_type + else None, + "status": result.get("TaskStatus"), + "voice_id": result.get("VoiceId"), + "character_count": result.get("RequestCharacters"), + "word_count": result.get("RequestCharacters", 0) + // 5, # Rough estimate + "duration_seconds": result.get("RequestCharacters", 0) + // 15, # Rough estimate (15 chars/sec) + "created_at": result.get("CreationTime"), + } + + except Exception as e: + logger.error(f"Failed to generate speech: {e}", exc_info=True) + return {"success": False, "error": f"Failed to generate speech: {str(e)}"} diff --git a/intentkit/skills/unrealspeech/unrealspeech.jpg b/intentkit/skills/unrealspeech/unrealspeech.jpg new file mode 100644 index 00000000..a08872f5 Binary files /dev/null and b/intentkit/skills/unrealspeech/unrealspeech.jpg differ diff --git a/intentkit/skills/venice_audio/__init__.py b/intentkit/skills/venice_audio/__init__.py new file mode 100644 index 00000000..a0bee0ed --- /dev/null +++ b/intentkit/skills/venice_audio/__init__.py @@ -0,0 +1,106 @@ +import logging +from typing import List, Literal, Optional, TypedDict + +from intentkit.abstracts.skill import SkillStoreABC +from intentkit.skills.base import SkillConfig, SkillState +from intentkit.skills.venice_audio.base import VeniceAudioBaseTool +from intentkit.skills.venice_audio.venice_audio import VeniceAudioTool + +logger = logging.getLogger(__name__) + + +_cache: dict[str, VeniceAudioBaseTool] = {} + +_SKILL_NAME_TO_CLASS_MAP = { + "text_to_speech": VeniceAudioTool, + # Add new mappings here: "skill_name": SkillClassName +} + + +class SkillStates(TypedDict): + text_to_speech: SkillState + + +class Config(SkillConfig): + enabled: bool + voice_model: Literal["af_heart", "bm_lewis", "custom"] + states: SkillStates # type: ignore + api_key_provider: Optional[Literal["agent_owner"]] + + # conditionally required + api_key: Optional[str] + voice_model_custom: Optional[list[str]] + + # optional + rate_limit_number: Optional[int] + rate_limit_minutes: Optional[int] + + +async def get_skills( + config: "Config", + is_private: bool, + store: SkillStoreABC, + **_, # Allow for extra arguments if the loader passes them +) -> list[VeniceAudioBaseTool]: + """ + Factory function to create and return Venice Audio skill tools. + + Args: + config: The configuration dictionary for the Venice Audio skill. + skill_store: The skill store instance. + agent_id: The ID of the agent requesting the skills. + + Returns: + A list of VeniceAudioBaseTool instances for the Venice Audio skill. + """ + # Check if the entire category is disabled first + if not config.get("enabled", False): + return [] + + available_skills: List[VeniceAudioBaseTool] = [] + skill_states = config.get("states", {}) + + # Iterate through all known skills defined in the map + for skill_name in _SKILL_NAME_TO_CLASS_MAP: + state = skill_states.get( + skill_name, "disabled" + ) # Default to disabled if not in config + + if state == "disabled": + continue + elif state == "public" or (state == "private" and is_private): + # If enabled, get the skill instance using the factory function + skill_instance = get_venice_audio_skill(skill_name, store) + if skill_instance: + available_skills.append(skill_instance) + else: + # This case should ideally not happen if the map is correct + logger.warning(f"Could not instantiate known skill: {skill_name}") + + return available_skills + + +def get_venice_audio_skill( + name: str, + store: SkillStoreABC, +) -> Optional[VeniceAudioBaseTool]: + """ + Factory function to get a cached Venice Audio skill instance by name. + + Args: + name: The name of voice model. + store: The skill store, passed to the skill constructor. + + Returns: + The requested Venice Audio skill instance, or None if the name is unknown. + """ + + # Return from cache immediately if already exists + if name in _cache: + return _cache[name] + + # Cache and return the newly created instance + _cache[name] = VeniceAudioTool( + skill_store=store, + ) + return _cache[name] diff --git a/intentkit/skills/venice_audio/base.py b/intentkit/skills/venice_audio/base.py new file mode 100644 index 00000000..f370c254 --- /dev/null +++ b/intentkit/skills/venice_audio/base.py @@ -0,0 +1,121 @@ +import logging +from typing import Dict, List, Optional, Tuple, Type + +from langchain.tools.base import ToolException +from pydantic import BaseModel, Field + +from intentkit.abstracts.skill import SkillStoreABC +from intentkit.skills.base import IntentKitSkill + +logger = logging.getLogger(__name__) + + +class VeniceAudioBaseTool(IntentKitSkill): + """Base class for Venice Audio tools.""" + + name: str = Field(default="venice_base_tool", description="The name of the tool") + description: str = Field(description="A description of what the tool does") + args_schema: Type[BaseModel] # type: ignore + skill_store: SkillStoreABC = Field( + description="The skill store for persisting data" + ) + + @property + def category(self) -> str: + return "venice_audio" + + def validate_voice_model( + self, context, voice_model: str + ) -> Tuple[bool, Optional[Dict[str, object]]]: + config = context.config + + selected_model = config.get("voice_model") + custom_models = config.get("voice_model_custom", []) + + allowed_voice_models: List[str] = [] + + if selected_model == "custom": + allowed_voice_models = custom_models or [] + else: + allowed_voice_models = [selected_model] if selected_model else [] + + if voice_model not in allowed_voice_models: + return False, { + "error": f'"{voice_model}" is not allowed', + "allowed": allowed_voice_models, + "suggestion": "please try again with allowed voice model", + } + + return True, None + + def get_api_key(self) -> str: + """ + Retrieves the Venice AI API key based on the api_key_provider setting. + + Returns: + The API key if found. + + Raises: + ToolException: If the API key is not found or provider is invalid. + """ + try: + context = self.get_context() + skill_config = context.agent.skill_config(self.category) + api_key_provider = skill_config.get("api_key_provider") + if api_key_provider == "agent_owner": + agent_api_key = skill_config.get("api_key") + if agent_api_key: + logger.debug( + f"Using agent-specific Venice API key for skill {self.name} in category {self.category}" + ) + return agent_api_key + raise ToolException( + f"No agent-owned Venice API key found for skill '{self.name}' in category '{self.category}'." + ) + + elif api_key_provider == "platform": + system_api_key = self.skill_store.get_system_config("venice_api_key") + if system_api_key: + logger.debug( + f"Using system Venice API key for skill {self.name} in category {self.category}" + ) + return system_api_key + raise ToolException( + f"No platform-hosted Venice API key found for skill '{self.name}' in category '{self.category}'." + ) + + else: + raise ToolException( + f"Invalid API key provider '{api_key_provider}' for skill '{self.name}'" + ) + + except Exception as e: + raise ToolException(f"Failed to retrieve Venice API key: {str(e)}") from e + + async def apply_rate_limit(self, context) -> None: + """ + Applies rate limiting ONLY if specified in the agent's config ('skill_config'). + Checks for 'rate_limit_number' and 'rate_limit_minutes'. + If not configured, NO rate limiting is applied. + Raises ConnectionAbortedError if the configured limit is exceeded. + """ + skill_config = context.agent.skill_config(self.category) + user_id = context.agent.id + + # Get agent-specific limits safely + limit_num = skill_config.get("rate_limit_number") + limit_min = skill_config.get("rate_limit_minutes") + + # Apply limit ONLY if both values are present and valid (truthy check handles None and 0) + if limit_num and limit_min: + limit_source = "Agent" + logger.debug( + f"Applying {limit_source} rate limit ({limit_num}/{limit_min} min) for user {user_id} on {self.name}" + ) + if user_id: + await self.user_rate_limit_by_category(user_id, limit_num, limit_min) + else: + # No valid agent configuration found, so do nothing. + logger.debug( + f"No agent rate limits configured for category '{self.category}'. Skipping rate limit for user {user_id}." + ) diff --git a/intentkit/skills/venice_audio/input.py b/intentkit/skills/venice_audio/input.py new file mode 100644 index 00000000..2c87e419 --- /dev/null +++ b/intentkit/skills/venice_audio/input.py @@ -0,0 +1,41 @@ +from typing import Literal, Optional + +from pydantic import BaseModel, Field + +# Define the allowed format literals based on the API documentation +AllowedAudioFormat = Literal["mp3", "opus", "aac", "flac", "wav", "pcm"] + + +class VeniceAudioInput(BaseModel): + """ + Input schema for Venice AI Text-to-Speech (/audio/speech endpoint). + Defines parameters controllable by the user when invoking the tool. + """ + + input: str = Field( + ..., # Ellipsis (...) indicates this field is required + description="The text to generate audio for. Maximum length is 4096 characters.", + min_length=1, # As per API docs: Required string length: 1 + max_length=4096, # As per API docs: The maximum length is 4096 characters. + ) + + voice_model: str = Field( + description="voice model to used to generate voice from text_to_speech tool." + ) + + speed: Optional[float] = Field( + default=1.0, # As per API docs: default: 1 (using float for consistency) + description="The speed of the generated audio. 1.0 is normal speed. Allowed range: 0.25 to 4.0.", + ge=0.25, # As per API docs: Required range: 0.25 <= x + le=4.0, # As per API docs: Required range: x <= 4 + ) + + response_format: Optional[AllowedAudioFormat] = Field( + default="mp3", # As per API docs: default: mp3 + description="The desired audio format for the output file.", + ) + + # --- Note on other API parameters --- + # 'model': Currently hardcoded to 'tts-kokoro' in VeniceAudioBaseTool._arun. Could be added here if needed. + # 'voice': Handled by the 'voice_model' attribute of the specific VeniceAudioBaseTool instance. Not typically set via input schema. + # 'streaming': Currently hardcoded to False in VeniceAudioBaseTool._arun. Could be added here if streaming support is implemented. diff --git a/intentkit/skills/venice_audio/schema.json b/intentkit/skills/venice_audio/schema.json new file mode 100644 index 00000000..cda8d667 --- /dev/null +++ b/intentkit/skills/venice_audio/schema.json @@ -0,0 +1,152 @@ +{ + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "Venice Audio Skills", + "x-icon": "https://ai.service.crestal.dev/skills/venice_audio/venice_audio.jpg", + "description": "Configuration for the Venice Audio skill.", + "type": "object", + "x-tags": [ + "AI", + "Audio", + "Text to Speech" + ], + "properties": { + "enabled": { + "type": "boolean", + "title": "Enabled", + "description": "Enable or disable the Venice Audio skill.", + "default": false + }, + "voice_model": { + "type": "string", + "title": "Voice Model", + "x-link": "[Listen Voice Example](https://huggingface.co/spaces/hexgrad/Kokoro-TTS)", + "enum": [ + "af_heart", + "bm_lewis", + "custom" + ], + "x-enum-title": [ + "af_heart (default female)", + "bm_lewis (default male)", + "Custom" + ], + "description": "Text to speech tool", + "default": "af_heart" + }, + "states": { + "type": "object", + "title": "Skill States", + "description": "Enable/disable specific voice models. Only enable one if you want a consistent characteristic for your agent. See docs for voice details and quality grades.", + "properties": { + "text_to_speech": { + "type": "string", + "title": "Text to Speech", + "enum": [ + "disabled", + "public", + "private" + ], + "x-enum-title": [ + "Disabled", + "Agent Owner + All Users", + "Agent Owner Only" + ], + "description": "Text to speech tool", + "default": "disabled" + } + } + }, + "api_key_provider": { + "type": "string", + "title": "API Key Provider", + "description": "Provider of the API key", + "enum": [ + "agent_owner" + ], + "x-enum-title": [ + "Owner Provided" + ], + "default": "agent_owner" + } + }, + "required": [ + "states", + "enabled" + ], + "allOf": [ + { + "if": { + "properties": { + "voice_model": { + "const": "custom" + } + } + }, + "then": { + "properties": { + "voice_model_custom": { + "type": "array", + "items": { + "type": "string" + }, + "title": "Voice Model (Custom)", + "x-link": "[Supported Voice Model](https://docs.venice.ai/api-reference/endpoint/audio/speech#body-voice)", + "description": "You can add one or more custom voice models.", + "default": [ + "af_heart", + "bm_lewis" + ] + } + }, + "required": [ + "voice_model_custom" + ] + } + }, + { + "if": { + "allOf": [ + { + "properties": { + "enabled": { + "const": true + } + } + }, + { + "properties": { + "api_key_provider": { + "const": "agent_owner" + } + } + } + ] + }, + "then": { + "properties": { + "api_key": { + "type": "string", + "title": "Venice API Key", + "x-link": "[Get your API key](https://venice.ai/)", + "x-sensitive": true, + "description": "API Key for authenticating with the Venice AI API." + }, + "rate_limit_number": { + "type": "integer", + "title": "Rate Limit Number", + "description": "Number of requests allowed per time window." + }, + "rate_limit_minutes": { + "type": "integer", + "title": "Rate Limit Minutes", + "description": "Time window in minutes for rate limiting." + } + }, + "required": [ + "api_key" + ] + } + } + ], + "additionalProperties": true +} \ No newline at end of file diff --git a/intentkit/skills/venice_audio/venice_audio.py b/intentkit/skills/venice_audio/venice_audio.py new file mode 100644 index 00000000..8e397878 --- /dev/null +++ b/intentkit/skills/venice_audio/venice_audio.py @@ -0,0 +1,238 @@ +import hashlib +import json +import logging +from typing import Any, Dict, Optional, Type + +import httpx +from pydantic import BaseModel, Field + +from intentkit.abstracts.skill import SkillStoreABC +from intentkit.skills.venice_audio.base import VeniceAudioBaseTool +from intentkit.skills.venice_audio.input import AllowedAudioFormat, VeniceAudioInput +from intentkit.utils.s3 import FileType, store_file_bytes + +logger = logging.getLogger(__name__) + +base_url = "https://api.venice.ai" + + +class VeniceAudioTool(VeniceAudioBaseTool): + """ + Tool for generating audio using the Venice AI Text-to-Speech API (/audio/speech). + It requires a specific 'voice_model' to be configured for the instance. + Handles API calls, rate limiting, storage, and returns results or API errors as dictionaries. + + On successful audio generation, returns a dictionary with audio details. + On Venice API error (non-200 status), returns a dictionary containing + the error details from the API response instead of raising an exception. + """ + + name: str = "venice_audio_text_to_speech" + description: str = ( + "Converts text to speech using a configured Venice AI voice model. " + "Requires input text. Optional parameters include speed (0.25-4.0, default 1.0) " + "and audio format (mp3, opus, aac, flac, wav, pcm, default mp3)." + ) + args_schema: Type[BaseModel] = VeniceAudioInput + skill_store: SkillStoreABC = Field( + description="The skill store instance for accessing system/agent configurations and persisting data." + ) + + async def _arun( + self, + input: str, + voice_model: str, + speed: Optional[float] = 1.0, + response_format: Optional[AllowedAudioFormat] = "mp3", + **kwargs, # type: ignore + ) -> Dict[str, Any]: + """ + Generates audio using the configured voice model via Venice AI TTS /audio/speech endpoint. + Stores the resulting audio using store_file_bytes. + Returns a dictionary containing audio details on success, or API error details on failure. + """ + context = self.get_context() + final_response_format = response_format if response_format else "mp3" + tts_model_id = "tts-kokoro" # API model used + + try: + # --- Setup Checks --- + api_key = self.get_api_key() + + _, error_info = self.validate_voice_model(context, voice_model) + if error_info: + return error_info + + if not api_key: + message = ( + f"Venice AI API key configuration missing for skill '{self.name}'." + ) + details = f"API key not found for category '{self.category}'. Please configure it." + logger.error(message) + return { + "error": True, + "error_type": "ConfigurationError", + "message": message, + "details": details, + "voice_model": voice_model, + "requested_format": final_response_format, + } + + if not voice_model: + message = ( + f"Instance of {self.name} was created without a 'voice_model'." + ) + details = "Voice model must be specified for this tool instance." + logger.error(message) + return { + "error": True, + "error_type": "ConfigurationError", + "message": message, + "details": details, + "voice_model": voice_model, + "requested_format": final_response_format, + } + + await self.apply_rate_limit(context) + + # --- Prepare API Call --- + payload: Dict[str, Any] = { + "model": tts_model_id, + "input": input, + "voice": voice_model, + "response_format": final_response_format, + "speed": speed if speed is not None else 1.0, + "streaming": False, + } + + payload = {k: v for k, v in payload.items() if v is not None} + + logger.debug( + f"Venice Audio API Call: Voice='{voice_model}', Format='{final_response_format}', Payload='{payload}'" + ) + + headers = { + "Authorization": f"Bearer {api_key}", + "Content-Type": "application/json", + } + api_url = f"{base_url}/api/v1/audio/speech" + + # --- Execute API Call --- + async with httpx.AsyncClient(timeout=120.0) as client: + response = await client.post(api_url, json=payload, headers=headers) + logger.debug( + f"Venice Audio API Response: Voice='{voice_model}', Format='{final_response_format}', Status={response.status_code}" + ) + + content_type_header = str( + response.headers.get("content-type", "") + ).lower() + + # --- Handle API Success or Error from Response Body --- + if response.status_code == 200 and content_type_header.startswith( + "audio/" + ): + audio_bytes = response.content + if not audio_bytes: + message = ( + "API returned success status but response body was empty." + ) + logger.warning( + f"Venice Audio API (Voice: {voice_model}) returned 200 OK but empty audio content." + ) + return { + "error": True, + "error_type": "NoContentError", + "message": message, + "status_code": response.status_code, + "voice_model": voice_model, + "requested_format": final_response_format, + } + + # --- Store Audio --- + file_extension = final_response_format + audio_hash = hashlib.sha256(audio_bytes).hexdigest() + key = f"{self.category}/{voice_model}/{audio_hash}.{file_extension}" + + size_limit = 1024 * 20 # 20Mb Size limit + stored_url = await store_file_bytes( + file_bytes=audio_bytes, + key=key, + file_type=FileType.AUDIO, + size_limit_bytes=size_limit, + ) + + if not stored_url: + message = "Failed to store audio: S3 storage is not configured." + logger.error( + f"Failed to store audio (Voice: {voice_model}): S3 storage is not configured." + ) + return { + "error": True, + "error_type": "StorageConfigurationError", + "message": message, + "voice_model": voice_model, + "requested_format": final_response_format, + } + + logger.info( + f"Venice TTS success: Voice='{voice_model}', Format='{final_response_format}', Stored='{stored_url}'" + ) + # --- Return Success Dictionary --- + return { + "audio_url": stored_url, + "audio_bytes_sha256": audio_hash, + "content_type": content_type_header, + "voice_model": voice_model, + "tts_engine": tts_model_id, + "speed": speed if speed is not None else 1.0, + "response_format": final_response_format, + "input_text_length": len(input), + "error": False, + "status_code": response.status_code, + } + else: + # Non-200 API response or non-audio content + error_details: Any = f"Raw error response text: {response.text}" + try: + parsed_details = response.json() + error_details = parsed_details + except json.JSONDecodeError: + pass # Keep raw text if JSON parsing fails + + message = "Venice Audio API returned a non-success status or unexpected content type." + logger.error( + f"Venice Audio API Error: Voice='{voice_model}', Format='{final_response_format}', Status={response.status_code}, Details: {error_details}" + ) + return { + "error": True, + "error_type": "APIError", + "message": message, + "status_code": response.status_code, + "details": error_details, + "voice_model": voice_model, + "requested_format": final_response_format, + } + + except Exception as e: + # Global exception handling for any uncaught error + error_type = type( + e + ).__name__ # Gets the class name of the exception (e.g., 'TimeoutException', 'ToolException') + message = f"An unexpected error occurred during audio generation for voice {voice_model}." + details = str(e) # The string representation of the exception + + # Log the error with full traceback for debugging + logger.error( + f"Venice Audio Tool Global Error ({error_type}): {message} | Details: {details}", + exc_info=True, + ) + + return { + "error": True, + "error_type": error_type, # e.g., "TimeoutException", "ToolException", "ClientError", "ValueError" + "message": message, + "details": details, + "voice_model": voice_model, + "requested_format": final_response_format, + } diff --git a/intentkit/skills/venice_audio/venice_logo.jpg b/intentkit/skills/venice_audio/venice_logo.jpg new file mode 100644 index 00000000..43be8d8d Binary files /dev/null and b/intentkit/skills/venice_audio/venice_logo.jpg differ diff --git a/intentkit/skills/venice_image/README.md b/intentkit/skills/venice_image/README.md new file mode 100644 index 00000000..7e35b8d9 --- /dev/null +++ b/intentkit/skills/venice_image/README.md @@ -0,0 +1,119 @@ +# Venice Image Skill Suite + +Venice Image is a comprehensive skill suite for intelligent agents, enabling state-of-the-art AI image generation, enhancement, upscaling, and vision analysis using the [Venice AI API](https://venice.ai/). This suite offers a modular interface: each sub-tool covers a focused aspect of visual intelligence, while sharing unified configuration and error handling. + +--- + +## Features + +### 1. **Image Generation** +Prompt-based creation of new artworks or photorealistic images, with support for multiple leading AI models, extensive style presets, and negative prompting. Models include: +- **Fluently XL** (realism, professional art) +- **Flux Dev** (innovative research, art workflows) +- **Lustify SDXL** (photorealistic, NSFW/SFW) +- **Pony Realism** (anime/character detail, Danbooru tags) +- **Venice SD35 / Stable Diffusion 3.5** (Stability AI, creative design) + +### 2. **Image Enhancement** +Stylize or refine *existing* images without changing their resolution—ideal for artistic edits, restoration, or visual polishing. + +### 3. **Image Upscaling** +Increase resolution by 2x or 4x while preserving essential details (with optional noise/replication settings). Great for preparing web images for print or HD use. + +### 4. **Image Vision** +Obtain highly detailed, context-rich textual descriptions of images—useful for content understanding, accessibility, indexing, or cognitive agents. + +--- + +## How It Works + +- Tools call the Venice API via secure network requests, automatically handling authentication, rate limiting, and error management. +- Any generated or processed images are transparently stored in an object store (S3 or compatible), with returned URLs ready for user consumption. +- Unified logging and troubleshooting: every tool shares a robust diagnostic backbone for consistent developer experience. + +--- + +## Setup and Configuration + +All skills require a **Venice API key** for operation. + +### Required Configuration +- `enabled` *(bool)*: Enable or disable the overall skill suite. +- `api_key` *(string, sensitive)*: Your [Venice AI API key](https://venice.ai/). +- `states`: Enable/disable and set visibility for each sub-tool (public/private/disabled). + +### Advanced Options +- `safe_mode` *(bool, default: true)*: If true, blurs images classified as adult/NSFW. +- `hide_watermark` *(bool, default: true)*: Request images without a Venice watermark (subject to Venice policy). +- `embed_exif_metadata` *(bool, default: false)*: Whether to embed prompt/config info in EXIF metadata. +- `negative_prompt` *(string)*: Default negative prompt, e.g. `(worst quality: 1.4), bad quality, nsfw`. +- `rate_limit_number` / `rate_limit_minutes`: (optional) Set a max request rate per agent. + +For per-tool configuration, refer to the `states` section in [schema.json](schema.json): +- Each tool (e.g. `image_generation_flux_dev`, `image_enhance`, etc.) can be set to `"public"` (all users), `"private"` (agent owner only), or `"disabled"` (hidden). + +#### Example (YAML/JSON-like) +```json +{ + "enabled": true, + "api_key": "", + "safe_mode": true, + "states": { + "image_vision": "public", + "image_enhance": "private", + "image_upscale": "disabled", + "image_generation_flux_dev": "public" + } +} +``` + +--- + +## Usage Patterns + +Each sub-tool has its own standardized input: +- URL-based tools (`image_enhance`, `image_upscale`, `image_vision`) require a web-accessible image URL. +- Generation tools require a *prompt* and offer flexible parameters (size, style, negative prompt, etc). + +Errors and troubleshooting info are always returned in a structured dictionary, with clear separation of success and error fields. + +--- + +## Output and Storage + +- All generated/processed images are written to S3-compatible storage using a SHA256-based unique key. +- Returned URLs are agent-accessible and stable. +- For Vision and non-binary results, the output is returned inline as a dictionary. + +--- + +## Security, License & Compliance + +- Your Venice API key is required and kept confidential per config practices. +- Generated images and tool usage are subject to [Venice AI Terms of Service](https://venice.ai/) and the terms of the respective models (e.g. Stability AI, Black Forest Labs). +- Agents should implement their own access and moderation layers; Safe Mode and watermarking are best-effort. + +--- + +## Included Sub-Tools + +_(For detailed docs, see the respective sub-tool README entries.)_ + +- image_generation_fluently_xl +- image_generation_flux_dev +- image_generation_flux_dev_uncensored +- image_generation_lustify_sdxl +- image_generation_pony_realism +- image_generation_venice_sd35 +- image_generation_stable_diffusion_3_5 +- image_enhance +- image_upscale +- image_vision + +--- + +## Contributing & Support + +For issues, bugfixes, or requests, please open a GitHub issue or contact the maintainers. This suite is regularly updated as Venice AI evolves. + +--- diff --git a/intentkit/skills/venice_image/__init__.py b/intentkit/skills/venice_image/__init__.py new file mode 100644 index 00000000..f0900a45 --- /dev/null +++ b/intentkit/skills/venice_image/__init__.py @@ -0,0 +1,154 @@ +import logging +from typing import NotRequired, Optional, TypedDict + +from intentkit.abstracts.skill import SkillStoreABC +from intentkit.skills.base import ( + SkillConfig, + SkillState, +) + +# Import the base tool and all specific model skill classes +from intentkit.skills.venice_image.base import VeniceImageBaseTool +from intentkit.skills.venice_image.image_enhance.image_enhance import ImageEnhance +from intentkit.skills.venice_image.image_generation.image_generation_fluently_xl import ( + ImageGenerationFluentlyXL, +) +from intentkit.skills.venice_image.image_generation.image_generation_flux_dev import ( + ImageGenerationFluxDev, +) +from intentkit.skills.venice_image.image_generation.image_generation_flux_dev_uncensored import ( + ImageGenerationFluxDevUncensored, +) +from intentkit.skills.venice_image.image_generation.image_generation_lustify_sdxl import ( + ImageGenerationLustifySDXL, +) +from intentkit.skills.venice_image.image_generation.image_generation_pony_realism import ( + ImageGenerationPonyRealism, +) +from intentkit.skills.venice_image.image_generation.image_generation_stable_diffusion_3_5 import ( + ImageGenerationStableDiffusion35, +) +from intentkit.skills.venice_image.image_generation.image_generation_venice_sd35 import ( + ImageGenerationVeniceSD35, +) +from intentkit.skills.venice_image.image_upscale.image_upscale import ImageUpscale +from intentkit.skills.venice_image.image_vision.image_vision import ImageVision + +# Cache skills at the system level, because they are stateless and only depend on the store +_cache: dict[str, VeniceImageBaseTool] = {} + +logger = logging.getLogger(__name__) + + +# Define the expected structure for the 'states' dictionary in the config +class SkillStates(TypedDict): + image_upscale: SkillState + image_enhance: SkillState + image_vision: SkillState + image_generation_flux_dev: SkillState + image_generation_flux_dev_uncensored: SkillState + image_generation_venice_sd35: SkillState + image_generation_fluently_xl: SkillState + image_generation_lustify_sdxl: SkillState + image_generation_pony_realism: SkillState + image_generation_stable_diffusion_3_5: SkillState + # Add new skill names here if more models are added + + +# Define the overall configuration structure for the venice_image category +class Config(SkillConfig): + """Configuration for Venice Image skills.""" + + enabled: bool # Keep standard enabled flag + states: SkillStates + api_key_provider: str = "agent_owner" + api_key: NotRequired[Optional[str]] # Explicitly Optional + safe_mode: NotRequired[bool] # Defaults handled in base or usage + hide_watermark: NotRequired[bool] # Defaults handled in base or usage + negative_prompt: NotRequired[str] # Defaults handled in base or usage + rate_limit_number: NotRequired[Optional[int]] # Explicitly Optional + rate_limit_minutes: NotRequired[Optional[int]] # Explicitly Optional + + +_SKILL_NAME_TO_CLASS_MAP: dict[str, type[VeniceImageBaseTool]] = { + "image_upscale": ImageUpscale, + "image_enhance": ImageEnhance, + "image_vision": ImageVision, + "image_generation_flux_dev": ImageGenerationFluxDev, + "image_generation_flux_dev_uncensored": ImageGenerationFluxDevUncensored, + "image_generation_venice_sd35": ImageGenerationVeniceSD35, + "image_generation_fluently_xl": ImageGenerationFluentlyXL, + "image_generation_lustify_sdxl": ImageGenerationLustifySDXL, + "image_generation_pony_realism": ImageGenerationPonyRealism, + "image_generation_stable_diffusion_3_5": ImageGenerationStableDiffusion35, +} + + +async def get_skills( + config: "Config", + is_private: bool, + store: SkillStoreABC, + **_, # Allow for extra arguments if the loader passes them +) -> list[VeniceImageBaseTool]: + """Get all enabled Venice Image skills based on configuration and privacy level. + + Args: + config: The configuration for Venice Image skills. + is_private: Whether the context is private (e.g., agent owner). + store: The skill store for persisting data and accessing system config. + + Returns: + A list of instantiated and enabled Venice Image skill objects. + """ + # Check if the entire category is disabled first + if not config.get("enabled", False): + return [] + + available_skills: list[VeniceImageBaseTool] = [] + + # Include skills based on their state + for skill_name, state in config["states"].items(): + if state == "disabled": + continue + elif state == "public" or (state == "private" and is_private): + available_skills.append(skill_name) + + # Get each skill using the cached getter + result = [] + for name in available_skills: + skill = get_venice_image_skill(name, store, config) + if skill: + result.append(skill) + return result + + +def get_venice_image_skill( + name: str, + store: SkillStoreABC, + config: "Config", +) -> Optional[VeniceImageBaseTool]: + """ + Factory function to get a cached Venice Image skill instance by name. + + Args: + name: The name of the skill to get (must match keys in _SKILL_NAME_TO_CLASS_MAP). + store: The skill store, passed to the skill constructor. + + Returns: + The requested Venice Image skill instance, or None if the name is unknown. + """ + + # Return from cache immediately if already exists + if name in _cache: + return _cache[name] + + skill_class = _SKILL_NAME_TO_CLASS_MAP.get(name) + if not skill_class: + logger.warning(f"Unknown Venice skill: {name}") + return None + + # Cache and return the newly created instance + _cache[name] = skill_class( + skill_store=store, + ) + return _cache[name] diff --git a/intentkit/skills/venice_image/api.py b/intentkit/skills/venice_image/api.py new file mode 100644 index 00000000..6ab6fd58 --- /dev/null +++ b/intentkit/skills/venice_image/api.py @@ -0,0 +1,138 @@ +""" +This module encapsulates all interactions with the Venice AI API. +It provides a function, make_venice_api_request, to make POST requests +to the API and handles the responses, including error handling, +content type checking, and image storage via S3. This separation +of concerns keeps the core skill logic cleaner and easier to maintain. +""" + +import hashlib +import logging +from typing import Any, Dict, Optional, Tuple + +import httpx + +from intentkit.utils.s3 import store_image_bytes + +logger = logging.getLogger(__name__) + + +async def make_venice_api_request( + api_key: str, + path: str, + payload: Dict[str, Any], + category: str, + tool_name: str, +) -> Tuple[Dict[str, Any], Optional[Dict[str, Any]]]: + """ + Makes a POST request to the Venice AI API, handling all aspects + of the API interaction. This includes: + + 1. Constructing the API URL using a base URL and the provided path. + 2. Adding the required authorization header with the provided API key. + 3. Sending the POST request with the given payload. + 4. Handling potential connection and HTTP errors. + 5. Calling the internal _handle_response function to process the API's + response, which might be JSON or an image. + + Args: + api_key: The Venice AI API key for authentication. + path: The API endpoint path (e.g., "/api/v1/image/generate"). Should *not* start with the base URL. + payload: The data to send in the request body (as JSON). + category: The category of the skill making the request (e.g., "venice_image"). Used for S3 storage and logging purpose. + tool_name: The name of the tool or skill making the request (e.g., "image_generation"). Used for S3 storage and logging purpose. + + Returns: + A tuple: (success_data, error_data). + - success_data: A dictionary containing the parsed JSON response from the API if the request was successful + (or a dictionary containing the S3 URL if the response is an image). + - error_data: A dictionary containing information about any errors that occurred, + or None if the request was successful. The dictionary includes an 'error' key. + """ + + venice_base_url = "https://api.venice.ai" # Venice AI API base URL + + if not path.startswith("/"): + path = "/" + path + + api_url = f"{venice_base_url}{path}" + + headers = { + "Authorization": f"Bearer {api_key}", + "Content-Type": "application/json", + "Accept": "image/*, application/json", + } + + logger.info( + f"[{category}/{tool_name}] Sending request to {api_url} with payload: {payload}" + ) + + try: + async with httpx.AsyncClient(timeout=180.0) as client: + response = await client.post(api_url, json=payload, headers=headers) + return await _handle_response(response, category, tool_name) + + except httpx.RequestError as e: + error_msg = f"Connection error: {e}" + logger.error(f"[{category}/{tool_name}] {error_msg}") + return {}, {"success": False, "error": error_msg} + except Exception as e: + error_msg = f"Unexpected error: {e}" + logger.error(f"[{category}/{tool_name}] {error_msg}", exc_info=True) + return {}, {"success": False, "error": error_msg} + + +async def _handle_response( + response: httpx.Response, category: str, tool_name: str +) -> Tuple[Dict[str, Any], Optional[Dict[str, Any]]]: + """ + Handles the API response, differentiating between JSON and image responses. + + If the response is an image (based on the 'content-type' header), + it stores the image in S3 and returns the S3 URL. + If the response is JSON, it parses the JSON and returns it. + If any errors occur, it returns an error dictionary. + """ + + content_type = str(response.headers.get("content-type", "")).lower() + + if response.status_code == 200 and content_type.startswith("image/"): + try: + upscaled_image_bytes = response.content + image_hash = hashlib.sha256(upscaled_image_bytes).hexdigest() + file_extension = content_type.split("/")[-1].split("+")[0] or "png" + + key = f"{category}/{tool_name}/{image_hash}.{file_extension}" + + logger.info(f"[{category}/{tool_name}] Storing image with key: {key}") + + stored_url = await store_image_bytes( + upscaled_image_bytes, key, content_type=content_type + ) + + return {"success": True, "result": stored_url}, None + + except Exception as e: + error_msg = f"Error processing image response: {e}" + logger.error(f"[{category}/{tool_name}] {error_msg}", exc_info=True) + return {}, {"success": False, "error": error_msg} + + elif response.status_code == 200: + try: + logger.info(f"[{category}/{tool_name}] Received successful JSON response.") + return response.json(), None + except Exception as json_err: + error_msg = f"Failed to parse JSON response: {json_err} - {response.text}" + logger.error(f"[{category}/{tool_name}] {error_msg}") + return {}, {"success": False, "error": error_msg} + + else: + try: + error_data = response.json() + error_msg = f"API returned error: {error_data.get('message', error_data.get('detail', response.text))}" + logger.error(f"[{category}/{tool_name}] {error_msg}") + return {}, {"success": False, "error": error_msg} + except Exception: + error_msg = f"API returned status code {response.status_code} with text: {response.text}" + logger.error(f"[{category}/{tool_name}] {error_msg}") + return {}, {"success": False, "error": error_msg} diff --git a/intentkit/skills/venice_image/base.py b/intentkit/skills/venice_image/base.py new file mode 100644 index 00000000..a63546db --- /dev/null +++ b/intentkit/skills/venice_image/base.py @@ -0,0 +1,192 @@ +import logging +from typing import Any, Dict, Optional, Tuple + +from langchain.tools.base import ToolException +from pydantic import Field + +from intentkit.abstracts.skill import SkillStoreABC +from intentkit.skills.base import IntentKitSkill +from intentkit.skills.venice_image.api import ( + make_venice_api_request, +) +from intentkit.skills.venice_image.config import VeniceImageConfig + +logger = logging.getLogger(__name__) + +venice_base_url = "https://api.venice.ai" # Common base URL for all Venice endpoints + + +class VeniceImageBaseTool(IntentKitSkill): + """ + Base class for all Venice AI image-related skills. + + This class provides common functionality for interacting with the + Venice AI API, including: + + - Retrieving the API key (from agent or system configuration). + - Applying rate limits to prevent overuse of the API. + - A standardized `post` method for making API requests. + + Subclasses should inherit from this class and implement their specific + API interactions (e.g., image generation, upscaling, inpainting) + by defining their own `_arun` methods and setting appropriate `name` + and `description` attributes. + """ + + @property + def category(self) -> str: + """ + Returns the category of this skill, used for configuration and logging. + """ + return "venice_image" + + skill_store: SkillStoreABC = Field( + description="The skill store for persisting data and configs." + ) + + def getSkillConfig(self, context) -> VeniceImageConfig: + """ + Creates a VeniceImageConfig instance from a dictionary of configuration values. + + Args: + config: A dictionary containing configuration settings. + + Returns: + A VeniceImageConfig object. + """ + + skill_config = context.agent.skill_config(self.category) + return VeniceImageConfig( + api_key_provider=skill_config.get("api_key_provider", "agent_owner"), + safe_mode=skill_config.get("safe_mode", True), + hide_watermark=skill_config.get("hide_watermark", True), + embed_exif_metadata=skill_config.get("embed_exif_metadata", False), + negative_prompt=skill_config.get( + "negative_prompt", "(worst quality: 1.4), bad quality, nsfw" + ), + rate_limit_number=skill_config.get("rate_limit_number"), + rate_limit_minutes=skill_config.get("rate_limit_minutes"), + ) + + def get_api_key(self) -> str: + """ + Retrieves the Venice AI API key based on the api_key_provider setting. + + Returns: + The API key if found. + + Raises: + ToolException: If the API key is not found or provider is invalid. + """ + try: + context = self.get_context() + skillConfig = self.getSkillConfig(context=context) + if skillConfig.api_key_provider == "agent_owner": + skill_config = context.agent.skill_config(self.category) + agent_api_key = skill_config.get("api_key") + if agent_api_key: + logger.debug( + f"Using agent-specific Venice API key for skill {self.name} in category {self.category}" + ) + return agent_api_key + raise ToolException( + f"No agent-owned Venice API key found for skill '{self.name}' in category '{self.category}'." + ) + + elif skillConfig.api_key_provider == "platform": + system_api_key = self.skill_store.get_system_config("venice_api_key") + if system_api_key: + logger.debug( + f"Using system Venice API key for skill {self.name} in category {self.category}" + ) + return system_api_key + raise ToolException( + f"No platform-hosted Venice API key found for skill '{self.name}' in category '{self.category}'." + ) + + else: + raise ToolException( + f"Invalid API key provider '{skillConfig.api_key_provider}' for skill '{self.name}'" + ) + + except Exception as e: + raise ToolException(f"Failed to retrieve Venice API key: {str(e)}") from e + + async def apply_venice_rate_limit(self, context) -> None: + """ + Applies rate limiting to prevent exceeding the Venice AI API's rate limits. + + Rate limits are applied based on the api_key_provider setting: + - 'agent_owner': uses agent-specific configuration. + - 'platform': uses system-wide configuration. + """ + try: + # Get user_id from the agent context (venice_image only supports agent_owner) + skillConfig = self.getSkillConfig(context=context) + + if skillConfig.api_key_provider == "agent_owner": + limit_num = skillConfig.rate_limit_number + limit_min = skillConfig.rate_limit_minutes + + if limit_num and limit_min: + # For agent_owner, use agent.id as user_id for rate limiting + user_id = context.agent.id + logger.debug( + f"Applying Agent rate limit ({limit_num}/{limit_min} min) for user {user_id} on {self.name}" + ) + await self.user_rate_limit_by_category( + user_id, limit_num, limit_min + ) + + elif skillConfig.api_key_provider == "platform": + system_limit_num = self.skill_store.get_system_config( + f"{self.category}_rate_limit_number" + ) + system_limit_min = self.skill_store.get_system_config( + f"{self.category}_rate_limit_minutes" + ) + + if system_limit_num and system_limit_min: + # For platform, use agent.id as user_id for rate limiting + user_id = context.agent.id + logger.debug( + f"Applying System rate limit ({system_limit_num}/{system_limit_min} min) for user {user_id} on {self.name}" + ) + await self.user_rate_limit_by_category( + user_id, system_limit_num, system_limit_min + ) + # do nothing if no rate limit is + return None + + except Exception as e: + raise ToolException(f"Failed to apply Venice rate limit: {str(e)}") from e + + async def post( + self, path: str, payload: Dict[str, Any], context + ) -> Tuple[Dict[str, Any], Optional[Dict[str, Any]]]: + """ + Makes a POST request to the Venice AI API using the `make_venice_api_request` + function from the `skills.venice_image.api` module. + + This method handles the following: + + 1. Retrieving the API key using `get_api_key`. + 2. Constructing the request payload. + 3. Calling `make_venice_api_request` to make the actual API call. + 4. Returning the results from `make_venice_api_request`. + + Args: + path: The API endpoint path (e.g., "/api/v1/image/generate"). + payload: The request payload as a dictionary. + context: The SkillContext for accessing API keys and configs. + + Returns: + A tuple: (success_data, error_data). + - If successful, success contains the JSON response from the API. + - If an error occurs, success is an empty dictionary, and error contains error details. + """ + api_key = self.get_api_key() + + return await make_venice_api_request( + api_key, path, payload, self.category, self.name + ) diff --git a/intentkit/skills/venice_image/config.py b/intentkit/skills/venice_image/config.py new file mode 100644 index 00000000..8bbe6ef1 --- /dev/null +++ b/intentkit/skills/venice_image/config.py @@ -0,0 +1,35 @@ +from typing import Optional + +from pydantic import BaseModel, Field + + +class VeniceImageConfig(BaseModel): + """Skill Config for Venice Image.""" + + api_key_provider: str = Field( + default="agent_owner", + description="Provider of the API Key, could be agent_owner or platform", + ) + safe_mode: bool = Field( + default=True, + description="Whether to use safe mode. If enabled, this will blur images that are classified as having adult content", + ) + hide_watermark: bool = Field( + default=True, + description="Whether to hide the Venice watermark. Venice may ignore this parameter for certain generated content.", + ) + embed_exif_metadata: bool = Field( + default=False, description="Whether to embed EXIF metadata in the image." + ) + negative_prompt: str = Field( + default="(worst quality: 1.4), bad quality, nsfw", + description="The default negative prompt used when no other prompt is provided.", + ) + rate_limit_number: Optional[int] = Field( + default=None, + description="Maximum number of allowed calls within the specified time window.", + ) + rate_limit_minutes: Optional[int] = Field( + default=None, + description="Duration of the time window (in minutes) for rate limiting.", + ) diff --git a/intentkit/skills/venice_image/image_enhance/README.md b/intentkit/skills/venice_image/image_enhance/README.md new file mode 100644 index 00000000..f517836b --- /dev/null +++ b/intentkit/skills/venice_image/image_enhance/README.md @@ -0,0 +1,119 @@ +# image_enhance + +**Image Enhance** enables you to improve, stylize, or refine an existing image using the Venice AI enhancer. Unlike upscaling, this tool keeps the original image size but substantially upgrades its visual quality, style, or texture—ideal for creative, restoration, or polishing use-cases. + +--- + +## What does it do? + +- Accepts a publicly accessible image URL. +- Uses a provided prompt to guide the desired enhancement—e.g., style, artistic direction, or quality upgrades (such as “gold accents”, “vivid color”, “oil painting”, or “gentle watercolor”). +- Supports adjustment of the intensity of enhancement and how much original detail is preserved (creativity & replication). +- Returns a new image (matching original dimensions) with enhanced appearance and style. + +Typical uses: +- Sharpen and clarify blurry images. +- Instantly “re-theme” a photo or artwork (color, material, style transfer). +- Polish images for social, ecommerce, professional, or creative projects. + +--- + +## Input Parameters + +| Field | Type | Description | Required | Default | +|-------------------|---------------|------------------------------------------------------------------------------------------------------------------|----------|---------| +| image_url | str (HttpUrl) | Publicly accessible URL of the image to enhance | Yes | | +| enhancePrompt | str | **Describes the desired enhancement, style, or theme.** Concise, descriptive terms work best. | Yes | | +| replication | float | How much of the original image structure, lines, and noise are retained (0.1–1.0). | No | 0.35 | +| enhanceCreativity | float | How far the AI can diverge from the original (0 = subtle, 1 = max stylization/new image). | No | 0.5 | + +**Prompt Examples (for `enhancePrompt`):** +- `"marble, gold veins, high contrast"` +- `"vaporwave color palette, cyberpunk lighting"` +- `"oil painting, impasto brushwork"` +- `"smooth skin, brighten shadows, cinematic look"` + +Example input: +```json +{ + "image_url": "https://img.site/old-photo.png", + "enhancePrompt": "soft watercolor, pastel tones, gentle light", + "replication": 0.25, + "enhanceCreativity": 0.7 +} +``` + +--- + +## Output + +On success, returns: +```json +{ + "success": true, + "result": "https://s3.storage.example/venice_image/image_enhance/ab12cd...png" +} +``` + +On error: +```json +{ + "success": false, + "error": "Failed to fetch or validate image from URL: ...", + "result": null +} +``` + +--- + +## Typical Use Cases + +- **Commerce/Product Images**: Instantly polish web photos for catalogs or listings. +- **Restoration**: Revive faded or dated artwork/photos for social, framing, or print. +- **Style Transfer**: Make a photo look like “stained glass”, “anime cel”, or “movie still”. +- **Social & Art Creation**: Quickly freshen up images for sharing with a unique twist. + +--- + +## Advanced Notes + +- **Replication**: + - Lower (`~0.1`): AI smooths out noise/details, crisper/cleaner look. + - Higher (`~0.9`): Retain original grit, preserve realistic features, more subtle change. +- **EnhanceCreativity**: + - Lower (`0.0`): Only very minor tweaks. + - Higher (`1.0`): Might look like a fully new artwork in the target style. +- **Image must be accessible and in a supported format**; conversion to PNG is automatic if needed. +- **Original resolution is kept**; for larger output, use `image_upscale` after enhancement. + +--- + +## Limitations + +- Does not increase resolution—use in conjunction with upscaling for large deliverables. +- Not a restoration-of-lost-content tool: Real degradation or loss isn’t recoverable, though apparent fidelity can be improved. +- The style quality depends on the provided enhancement prompt and the source image clarity. + +--- + +## Example Usage (Python-esque pseudocode) + +```python +result = await agent.send_tool( + "image_enhance", + { + "image_url": "https://cdn.site/photo.jpg", + "enhancePrompt": "marble, gold details, glowing edges", + "enhanceCreativity": 0.9 + } +) +enhanced_url = result["result"] +``` + +--- + +## Attribution & Compliance + +Use of this tool is subject to [Venice AI terms of service](https://venice.ai/) and applicable copyright law for input images. + +--- \ No newline at end of file diff --git a/intentkit/skills/venice_image/image_enhance/__init__.py b/intentkit/skills/venice_image/image_enhance/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/intentkit/skills/venice_image/image_enhance/image_enhance.py b/intentkit/skills/venice_image/image_enhance/image_enhance.py new file mode 100644 index 00000000..4c6427f1 --- /dev/null +++ b/intentkit/skills/venice_image/image_enhance/image_enhance.py @@ -0,0 +1,78 @@ +import logging +from typing import Optional + +from pydantic import HttpUrl + +from intentkit.skills.base import ToolException +from intentkit.skills.venice_image.image_enhance.image_enhance_base import ( + VeniceImageEnhanceBaseTool, +) +from intentkit.skills.venice_image.utils import fetch_image_as_base64 + +logger = logging.getLogger(__name__) + + +class ImageEnhance(VeniceImageEnhanceBaseTool): + """ + Enhances an existing image provided via URL using the Venice AI enhancer (not upscaling). + Useful for improving visual quality, adding style, or refining image features. + """ + + name: str = "venice_image_enhance" + description: str = ( + "Enhances an existing image from a URL using Venice AI.\n" + "Provide the public URL of the image to enhance.\n" + "Specify enhancement creativity level and a required prompt for style.\n" + "Returns the URL of the enhanced image." + ) + + async def _arun( + self, + image_url: HttpUrl, + enhancePrompt: str, + replication: Optional[float] = 0.35, + enhanceCreativity: Optional[float] = 0.5, + **kwargs, + ) -> dict: + """ + Applies AI enhancement to an image without changing its size. + """ + + try: + context = self.get_context() + + await self.apply_venice_rate_limit(context) + + image_base64 = await fetch_image_as_base64(image_url) + if not image_base64: + error_msg = f"Failed to fetch or validate image from URL: {image_url}" + logger.error(error_msg) + raise ToolException( + str({"success": False, "error": error_msg, "result": None}) + ) + + payload = { + "image": image_base64, + "scale": 1, + "enhance": True, + "replication": replication, + "enhanceCreativity": enhanceCreativity, + "enhancePrompt": enhancePrompt, + } + + result, error = await self.post("api/v1/image/upscale", payload, context) + if error: + raise ToolException(f"Venice Image Enhance API error: {error}") + return result + except ToolException as e: + raise e + except Exception as e: + logger.error(f"Error in {self.name}: {str(e)}") + raise ToolException( + str( + { + "success": False, + "error": f"An unexpected error occurred: {str(e)}", + } + ) + ) diff --git a/intentkit/skills/venice_image/image_enhance/image_enhance_base.py b/intentkit/skills/venice_image/image_enhance/image_enhance_base.py new file mode 100644 index 00000000..8ee34592 --- /dev/null +++ b/intentkit/skills/venice_image/image_enhance/image_enhance_base.py @@ -0,0 +1,23 @@ +from typing import Type + +from pydantic import BaseModel, Field + +# Import the generic base and shared input +from intentkit.skills.venice_image.base import VeniceImageBaseTool +from intentkit.skills.venice_image.image_enhance.image_enhance_input import ( + VeniceImageEnhanceInput, +) + + +class VeniceImageEnhanceBaseTool(VeniceImageBaseTool): + """ + Base class for Venice AI *Image Enchanching* tools. + Inherits from VeniceAIBaseTool and handles specifics of the + /image/upscale endpoint + """ + + args_schema: Type[BaseModel] = VeniceImageEnhanceInput + name: str = Field(description="The unique name of the image Enchanching tool.") + description: str = Field( + description="A description of what the image Enchanching tool does." + ) diff --git a/intentkit/skills/venice_image/image_enhance/image_enhance_input.py b/intentkit/skills/venice_image/image_enhance/image_enhance_input.py new file mode 100644 index 00000000..6a11bc0c --- /dev/null +++ b/intentkit/skills/venice_image/image_enhance/image_enhance_input.py @@ -0,0 +1,40 @@ +from typing import Optional + +from pydantic import BaseModel, Field + + +class VeniceImageEnhanceInput(BaseModel): + """Input for Venice Image Enhance tool (scale=1, enhance=True).""" + + image_url: str = Field( + description="The URL of the image to enhance. Must be a publicly accessible URL." + ) + + enhancePrompt: str = Field( + ..., + max_length=1500, + description=( + "Required prompt describing the desired enhancement style. " + "Best used with short descriptors like 'gold', 'marble', or 'angry, menacing'." + ), + ) + + replication: Optional[float] = Field( + default=0.35, + ge=0.1, + le=1.0, + description=( + "How strongly lines and noise in the base image are preserved. " + "Higher values retain more noise and detail but are less smooth." + ), + ) + + enhanceCreativity: float = Field( + default=0.5, + ge=0.0, + le=1.0, + description=( + "How much the enhancement AI is allowed to change the image. " + "0 = minimal change, 1 = generate a new image entirely." + ), + ) diff --git a/intentkit/skills/venice_image/image_generation/README.md b/intentkit/skills/venice_image/image_generation/README.md new file mode 100644 index 00000000..c00dd3fc --- /dev/null +++ b/intentkit/skills/venice_image/image_generation/README.md @@ -0,0 +1,144 @@ +# Venice Image Generation Tools + +Venice Image Generation provides flexible, prompt-based image creation using multiple state-of-the-art AI models via the Venice AI API. These tools support a broad spectrum of styles, subject matter, and output formats, making it ideal for artists, designers, marketers, research, and personal creativity. + +--- + +## Overview + +- **Purpose:** Synthesize original images from natural-language prompts. +- **Supported Models:** Choose from several world-class models, each suited for different tasks: + - **Fluently XL:** Professional realism, lighting, artistic work. + - **Flux Dev:** Artistic research, innovative and creative workflows. + - **Flux Dev Uncensored:** For unrestricted, uncensored generation (including NSFW). + - **Lustify SDXL:** Photorealistic, NSFW/SFW versatility. + - **Pony Realism:** High-detail anime/character design (great with Danbooru tags). + - **Venice SD35/Stable Diffusion 3.5:** Artistic, illustrative, or design content, powered by Stability AI. +- **Unified Interface:** Each model is its own sub-tool, but all support the same core set of options. + +--- + +## Input Parameters + +| Field | Type | Description | Required | Default | +|------------------|---------------------------------|--------------------------------------------------------------------------------------------------------|----------|---------------| +| prompt | string | Main description of the image to generate. Use detailed, specific language for best results. | Yes | | +| model_id | string (see below) | AI model to use. Each sub-tool sets its own model_id internally. | N/A | (hardcoded) | +| width | int (max 2048) | Output image width (pixels). Must be multiple of 8 or 16 depending on model. | No | 1024 | +| height | int (max 2048) | Output image height (pixels). | No | 1024 | +| format | "png" \| "jpeg" \| "webp" | Output image format. | No | png | +| style_preset | string (enumerated) | Optional visual preset (e.g., "Photographic", "Anime", "Abstract", etc.). See full list below. | No | Photographic | +| negative_prompt | string | Exclude these elements or concepts from the image (e.g. “nsfw, low quality”). | No | suite default | +| seed | int | Control randomness. Reuse a value for repeatability. | No | random | +| cfg_scale | float (e.g. 1–20) | Prompt fidelity – higher = closer adherence to prompt, lower = more variety. | No | 7.5 | +| return_binary | bool | Always `False`. Output is a hosted URL, not inline binary. | N/A | False | +| safe_mode | bool | If enabled, applies content filtering / blurring for NSFW. | Inherited | true | +| embed_exif_metadata | bool | If enabled, embeds prompt info in output EXIF metadata. | Inherited | false | +| hide_watermark | bool | Hide the Venice watermark, where possible. | Inherited | true | + +#### Example Prompt: +> "In the style of a Renaissance oil painting, a fierce orange tabby cat with a crown, surrounded by lush velvet drapery and golden sunlight." + +#### Style Presets +An extensive list is included, for example: +- "Photographic" +- "Anime" +- "Cinematic" +- "Digital Art" +- "Abstract" +- "Cubist" +- ...and over 30 more. See documentation or schema for the full list. + +#### Example Input: +```json +{ + "prompt": "A highly detailed portrait of a robot playing chess, cinematic lighting, photoreal 4k", + "width": 1536, + "height": 1024, + "format": "jpeg", + "style_preset": "Cinematic", + "cfg_scale": 10, + "negative_prompt": "text, watermark, blurry", + "seed": 424242 +} +``` + +--- + +## Output + +The tool returns a dict that includes: + +- `success`: true/false +- `image_url`: The URL to the generated image (stored in S3 or similar object storage) +- Additional metadata (generation params, seed, etc.) + +Example: +```json +{ + "success": true, + "image_url": "https://s3.my-storage.net/venice_image/fluently-xl/abc123f....png", + "seed": 424242, + "generation_time_s": 22.4 +} +``` + +On error: +```json +{ + "success": false, + "error": "API returned error: prompt too long", + "result": null +} +``` + +--- + +## Advanced Capabilities + +- **Inpainting**: Modify regions of an existing image with precise mask and text controls (see schema for input structure). +- **EXIF Embedding**: If enabled, the tool can embed the prompt/config info in the output file’s EXIF metadata. + +--- + +## Use Cases + +- **Art & Design:** Instantly create drafts, mood boards, or finished art for any assignment +- **Marketing/Content:** Rapid visual ideation for blog posts, social media, ads, covers, etc. +- **Ideation/Research:** Visualize concepts, inventions, or speculative scenarios quickly +- **Education:** Generate visual teaching content on demand +- **Character/Concept Design:** Leverage anime/artistic models for avatars, OC creation, comics + +--- + +## Limitations + +- Results are only as good as your prompt and model choice. +- NSFW filtering varies by model; check the tool’s description and enable `safe_mode` for safety. +- Some style/subject combinations may not be supported by a given model. +- Stable Diffusion/Flux Dev variants may have license restrictions—review Venice API and model TOS. + +--- + +## Example Usage (Pseudo-code) + +```python +result = await agent.send_tool( + "image_generation_fluently_xl", + { + "prompt": "A futuristic cityscape at sunset, neon lights, flying cars, cinematic", + "style_preset": "Cinematic", + "width": 1280, + "height": 704 + } +) +url = result["image_url"] +``` + +--- + +## Compliance & Attribution + +You must respect [Venice AI terms of service](https://venice.ai/) and the terms and licenses of the selected model. + +--- diff --git a/intentkit/skills/venice_image/image_generation/__init__.py b/intentkit/skills/venice_image/image_generation/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/intentkit/skills/venice_image/image_generation/image_generation_base.py b/intentkit/skills/venice_image/image_generation/image_generation_base.py new file mode 100644 index 00000000..e6569efd --- /dev/null +++ b/intentkit/skills/venice_image/image_generation/image_generation_base.py @@ -0,0 +1,115 @@ +import base64 +import hashlib +import logging +from typing import Any, Dict, Literal, Optional, Type + +from pydantic import BaseModel, Field + +# Import the generic base +from intentkit.skills.base import ToolException +from intentkit.skills.venice_image.base import VeniceImageBaseTool +from intentkit.skills.venice_image.image_generation.image_generation_input import ( + VeniceImageGenerationInput, +) +from intentkit.utils.s3 import store_image_bytes + +logger = logging.getLogger(__name__) + + +class VeniceImageGenerationBaseTool(VeniceImageBaseTool): + """ + Base class for Venice AI *Image Generation* tools. + Inherits from VeniceAIBaseTool and handles specifics of the + /image/generate endpoint. + """ + + # --- Attributes specific to Image Generation --- + args_schema: Type[BaseModel] = VeniceImageGenerationInput + + # --- Attributes Subclasses MUST Define --- + name: str = Field(description="The unique name of the image generation tool/model.") + description: str = Field( + description="A description of what the image generation tool/model does." + ) + model_id: str = Field( + description="The specific model ID used in the Venice Image API call." + ) + + async def _arun( + self, + prompt: str, + seed: Optional[int] = None, + negative_prompt: Optional[str] = None, + width: Optional[int] = 1024, + height: Optional[int] = 1024, + format: Literal["png", "jpeg", "webp"] = "png", + cfg_scale: Optional[float] = 7.5, + style_preset: Optional[str] = "Photographic", + **kwargs, + ) -> Dict[str, Any]: + try: + context = self.get_context() + skillConfig = self.getSkillConfig(context) + await self.apply_venice_rate_limit(context) + + final_negative_prompt = negative_prompt or skillConfig.negative_prompt + + payload = { + "model": self.model_id, + "prompt": prompt, + "width": width, + "height": height, + "seed": seed, + "format": format, + "steps": 30, + "safe_mode": skillConfig.safe_mode, + "hide_watermark": skillConfig.hide_watermark, + "embed_exif_metadata": skillConfig.embed_exif_metadata, + "cfg_scale": cfg_scale or 7.0, + "style_preset": style_preset, + "negative_prompt": final_negative_prompt, + "return_binary": False, + } + + # Strip out None values + payload = {k: v for k, v in payload.items() if v is not None} + + result, error = await self.post("/api/v1/image/generate", payload, context) + + if error: + raise ToolException(f"Venice Image Generation API error: {error}") + + base64_image_string = result.get("images", [None])[0] + if not base64_image_string: + raise ToolException("No image data found in Venice Image API response.") + + try: + image_bytes = base64.b64decode(base64_image_string) + except Exception as decode_error: + raise ToolException("Invalid base64 image data.") from decode_error + + response_format = ( + result.get("request", {}).get("data", {}).get("format", format) + ) + file_extension = response_format or format + content_type = f"image/{file_extension}" + + image_hash = hashlib.sha256(image_bytes).hexdigest() + key = f"{self.category}/{self.model_id}/{image_hash}.{file_extension}" + + stored_url = await store_image_bytes( + image_bytes, key, content_type=content_type + ) + + # Cleanup & enrich the response + result.pop("images", None) + result["image_url"] = stored_url + result["image_bytes_sha256"] = image_hash + + return result + except ToolException as e: + raise e + except Exception as e: + raise ToolException( + "An unexpected error occurred during the image generation process." + ) from e diff --git a/intentkit/skills/venice_image/image_generation/image_generation_fluently_xl.py b/intentkit/skills/venice_image/image_generation/image_generation_fluently_xl.py new file mode 100644 index 00000000..a9018bc8 --- /dev/null +++ b/intentkit/skills/venice_image/image_generation/image_generation_fluently_xl.py @@ -0,0 +1,26 @@ +from intentkit.skills.venice_image.image_generation.image_generation_base import ( + VeniceImageGenerationBaseTool, +) +from intentkit.skills.venice_image.image_generation.image_generation_input import ( + STYLE_PRESETS, +) + + +class ImageGenerationFluentlyXL(VeniceImageGenerationBaseTool): + """ + Tool for generating images using the Fluently-XL model via Venice AI. + Known for aesthetics, lighting, realism, and correct anatomy. + """ + + # --- Model Specific Configuration --- + name: str = "venice_image_generation_fluently_xl" + description: str = ( + "Generate images using the Fluently-XL model (via Venice AI).\n" + "Aims for improved aesthetics, lighting, realism, and anatomy. Good for professional-quality images.\n" + "Provide a text prompt describing the image (up to 1500 chars).\n" + f"Optionally specify a style preset from the list: {', '.join(STYLE_PRESETS)}.\n" + "Supports dimensions up to 2048x2048 (multiple of 8)." + ) + model_id: str = "fluently-xl" + + # args_schema and _arun are inherited from VeniceImageGenerationBaseTool diff --git a/intentkit/skills/venice_image/image_generation/image_generation_flux_dev.py b/intentkit/skills/venice_image/image_generation/image_generation_flux_dev.py new file mode 100644 index 00000000..44c7ed85 --- /dev/null +++ b/intentkit/skills/venice_image/image_generation/image_generation_flux_dev.py @@ -0,0 +1,27 @@ +from intentkit.skills.venice_image.image_generation.image_generation_base import ( + VeniceImageGenerationBaseTool, +) +from intentkit.skills.venice_image.image_generation.image_generation_input import ( + STYLE_PRESETS, +) + + +class ImageGenerationFluxDev(VeniceImageGenerationBaseTool): + """ + Tool for generating images using Venice AI's Flux Dev model. + Developed by Black Forest Labs, this is a 12 billion parameter rectified flow transformer. + """ + + # --- Model Specific Configuration --- + name: str = "venice_image_generation_flux_dev" + description: str = ( + "Generate images using Venice AI's Flux Dev model (by Black Forest Labs).\n" + "This 12B parameter model is good for research and innovative art workflows.\n" + "Provide a text prompt describing the image (up to 2048 chars).\n" + f"Optionally specify a style preset from the list: {', '.join(STYLE_PRESETS)}.\n" + "Supports dimensions up to 2048x2048 (multiple of 8).\n" + "Use complies with FLUX.1 [dev] Non-Commercial License." + ) + model_id: str = "flux-dev" + + # args_schema and _arun are inherited from VeniceImageGenerationBaseTool diff --git a/intentkit/skills/venice_image/image_generation/image_generation_flux_dev_uncensored.py b/intentkit/skills/venice_image/image_generation/image_generation_flux_dev_uncensored.py new file mode 100644 index 00000000..6e7ae2f4 --- /dev/null +++ b/intentkit/skills/venice_image/image_generation/image_generation_flux_dev_uncensored.py @@ -0,0 +1,26 @@ +from intentkit.skills.venice_image.image_generation.image_generation_base import ( + VeniceImageGenerationBaseTool, +) +from intentkit.skills.venice_image.image_generation.image_generation_input import ( + STYLE_PRESETS, +) + + +class ImageGenerationFluxDevUncensored(VeniceImageGenerationBaseTool): + """ + Tool for generating images using Venice AI's Flux Dev Uncensored model. + An uncensored version of the flux-dev model for unrestricted generation. + """ + + # --- Model Specific Configuration --- + name: str = "venice_image_generation_flux_dev_uncensored" + description: str = ( + "Generate images using Venice AI's Flux Dev Uncensored model.\n" + "This is an uncensored version of flux-dev, suitable for unrestricted content including NSFW.\n" + "Provide a text prompt describing the image (up to 2048 chars).\n" + f"Optionally specify a style preset from the list: {', '.join(STYLE_PRESETS)}.\n" + "Supports dimensions up to 2048x2048 (multiple of 8)." + ) + model_id: str = "flux-dev-uncensored" + + # args_schema and _arun are inherited from VeniceImageGenerationBaseTool diff --git a/intentkit/skills/venice_image/image_generation/image_generation_input.py b/intentkit/skills/venice_image/image_generation/image_generation_input.py new file mode 100644 index 00000000..6b8f7433 --- /dev/null +++ b/intentkit/skills/venice_image/image_generation/image_generation_input.py @@ -0,0 +1,158 @@ +from typing import Literal, Optional + +from pydantic import BaseModel, Field, HttpUrl + +STYLE_PRESETS = [ + "3D Model", + "Analog Film", + "Anime", + "Cinematic", + "Comic Book", + "Craft Clay", + "Digital Art", + "Enhance", + "Fantasy Art", + "Isometric Style", + "Line Art", + "Lowpoly", + "Neon Punk", + "Origami", + "Photographic", + "Pixel Art", + "Texture", + "Advertising", + "Food Photography", + "Real Estate", + "Abstract", + "Cubist", + "Graffiti", + "Hyperrealism", + "Impressionist", + "Pointillism", + "Pop Art", + "Psychedelic", + "Renaissance", + "Steampunk", + "Surrealist", + "Typography", + "Watercolor", + "Fighting Game", + "GTA", + "Super Mario", + "Minecraft", + "Pokemon", + "Retro Arcade", + "Retro Game", + "RPG Fantasy Game", + "Strategy Game", + "Street Fighter", + "Legend of Zelda", + "Architectural", + "Disco", + "Dreamscape", + "Dystopian", + "Fairy Tale", + "Gothic", + "Grunge", + "Horror", + "Minimalist", + "Monochrome", + "Nautical", + "Space", + "Stained Glass", + "Techwear Fashion", + "Tribal", + "Zentangle", + "Collage", + "Flat Papercut", + "Kirigami", + "Paper Mache", + "Paper Quilling", + "Papercut Collage", + "Papercut Shadow Box", + "Stacked Papercut", + "Thick Layered Papercut", + "Alien", + "Film Noir", + "HDR", + "Long Exposure", + "Neon Noir", + "Silhouette", + "Tilt-Shift", +] + +STYLE_PRESETS_DESCRIPTION = ( + "Optional style preset to apply. Available options: " + + ", ".join([f"'{s}'" for s in STYLE_PRESETS]) + + ". Defaults to 'Photographic'." +) + + +class InpaintMask(BaseModel): + image_prompt: str = Field( + ..., + description="A text prompt describing the original input image that an image model would use to produce a similar/identical image, including the changed features the user will be inpainting.", + ) + inferred_object: str = Field( + ..., description="The content being added via inpainting." + ) + object_target: str = Field( + ..., description="Element(s) in the original image to be inpainted over." + ) + + +class Inpaint(BaseModel): + image_url: HttpUrl = Field( + ..., + description="Image target to inpaint", + ) + strength: int = Field( + ..., ge=0, le=100, description="Strength of the inpainting (0-100).", example=50 + ) + mask: InpaintMask = Field(..., description="Mask settings for inpainting.") + + +class VeniceImageGenerationInput(BaseModel): + """Model representing input parameters for Venice Image Generation.""" + + prompt: str = Field( + description="The main text prompt describing what should be included in the generated image." + ) + seed: Optional[int] = Field( + default=None, + description="Random seed value to control image generation randomness. " + "Use the same seed to reproduce identical results. If not set, a random seed will be used.", + ) + negative_prompt: Optional[str] = Field( + default=None, + description="Text describing what should be excluded from the generated image. " + "If not provided, the default agent configuration will be used.", + ) + width: Optional[int] = Field( + default=1024, + le=2048, + description="Width of the generated image in pixels. Maximum allowed is 2048. Default is 1024.", + ) + height: Optional[int] = Field( + default=1024, + le=2048, + description="Height of the generated image in pixels. Maximum allowed is 2048. Default is 1024.", + ) + format: Literal["png", "jpeg", "webp"] = Field( + default="png", + description="Output image format. Options are 'png', 'jpeg', or 'webp'. Defaults to 'png'.", + ) + cfg_scale: Optional[float] = Field( + default=7.5, + description="Classifier-Free Guidance (CFG) scale controls how closely the image follows the prompt. " + "Higher values (1-20) result in more adherence. Default is 7.5.", + ) + style_preset: Optional[str] = Field( + default="Photographic", description=STYLE_PRESETS_DESCRIPTION + ) + inpainting: Optional[Inpaint] = Field( + default=None, + description="Optional inpainting operation that allows modification of specific objects within an image. " + "Requires an original image url, a strength value (0-100), and detailed mask instructions " + "to define which part of the image should be edited and what should replace it.", + ) diff --git a/intentkit/skills/venice_image/image_generation/image_generation_lustify_sdxl.py b/intentkit/skills/venice_image/image_generation/image_generation_lustify_sdxl.py new file mode 100644 index 00000000..49e96acd --- /dev/null +++ b/intentkit/skills/venice_image/image_generation/image_generation_lustify_sdxl.py @@ -0,0 +1,26 @@ +from intentkit.skills.venice_image.image_generation.image_generation_base import ( + VeniceImageGenerationBaseTool, +) +from intentkit.skills.venice_image.image_generation.image_generation_input import ( + STYLE_PRESETS, +) + + +class ImageGenerationLustifySDXL(VeniceImageGenerationBaseTool): + """ + Tool for generating images using the Lustify SDXL model via Venice AI. + A photorealistic SDXL checkpoint primarily focused on NSFW content, but can do SFW. + """ + + # --- Model Specific Configuration --- + name: str = "venice_image_generation_lustify_sdxl" + description: str = ( + "Generate images using the Lustify SDXL model (via Venice AI).\n" + "A photorealistic SDXL model focused on NSFW scenes, but can generate SFW images (objects, animals, fantasy).\n" + "Provide a text prompt describing the image (up to 1500 chars).\n" + f"Optionally specify a style preset from the list: {', '.join(STYLE_PRESETS)}.\n" + "Supports dimensions up to 2048x2048 (multiple of 8)." + ) + model_id: str = "lustify-sdxl" + + # args_schema and _arun are inherited from VeniceImageGenerationBaseTool diff --git a/intentkit/skills/venice_image/image_generation/image_generation_pony_realism.py b/intentkit/skills/venice_image/image_generation/image_generation_pony_realism.py new file mode 100644 index 00000000..3c9b3992 --- /dev/null +++ b/intentkit/skills/venice_image/image_generation/image_generation_pony_realism.py @@ -0,0 +1,26 @@ +from intentkit.skills.venice_image.image_generation.image_generation_base import ( + VeniceImageGenerationBaseTool, +) +from intentkit.skills.venice_image.image_generation.image_generation_input import ( + STYLE_PRESETS, +) + + +class ImageGenerationPonyRealism(VeniceImageGenerationBaseTool): + """ + Tool for generating images using the Pony Realism model via Venice AI. + Focused on high-detail, realistic images, especially anime/character designs. Uses Danbooru tags. + """ + + # --- Model Specific Configuration --- + name: str = "venice_image_generation_pony_realism" + description: str = ( + "Generate images using the Pony Realism model (via Venice AI).\n" + "Creates high-detail, realistic images, good for anime/character designs. Benefits from Danbooru tags (e.g., 'score_9', 'female'/'male').\n" + "Provide a text prompt describing the image (up to 1500 chars).\n" + f"Optionally specify a style preset from the list: {', '.join(STYLE_PRESETS)}.\n" + "Supports dimensions up to 2048x2048 (multiple of 8). Marked as 'most_uncensored'." + ) + model_id: str = "pony-realism" + + # args_schema and _arun are inherited from VeniceImageGenerationBaseTool diff --git a/intentkit/skills/venice_image/image_generation/image_generation_stable_diffusion_3_5.py b/intentkit/skills/venice_image/image_generation/image_generation_stable_diffusion_3_5.py new file mode 100644 index 00000000..23662c85 --- /dev/null +++ b/intentkit/skills/venice_image/image_generation/image_generation_stable_diffusion_3_5.py @@ -0,0 +1,28 @@ +from intentkit.skills.venice_image.image_generation.image_generation_base import ( + VeniceImageGenerationBaseTool, +) +from intentkit.skills.venice_image.image_generation.image_generation_input import ( + STYLE_PRESETS, +) + + +class ImageGenerationStableDiffusion35(VeniceImageGenerationBaseTool): + """ + Tool for generating images using Venice AI's interface to Stable Diffusion 3.5 Large (alternative ID). + Developed by Stability AI, using MMDiT architecture. Good for art and design. + """ + + # --- Model Specific Configuration --- + name: str = "venice_image_generation_stable_diffusion_3_5" # Different skill name + description: str = ( + "Generate images using Stability AI's Stable Diffusion 3.5 Large model (alternative ID via Venice AI).\n" + "Ideal for artworks, design processes, and educational use. Not for factual representations.\n" + "Provide a text prompt describing the image (up to 1500 chars).\n" + f"Optionally specify a style preset from the list: {', '.join(STYLE_PRESETS)}.\n" + "Supports dimensions up to 2048x2048 (multiple of 16).\n" + "Use must comply with Stability AI's Acceptable Use Policy." + ) + # Use the specific ID provided by Venice + model_id: str = "stable-diffusion-3.5" # Different model ID + + # args_schema and _arun are inherited from VeniceImageGenerationBaseTool diff --git a/intentkit/skills/venice_image/image_generation/image_generation_venice_sd35.py b/intentkit/skills/venice_image/image_generation/image_generation_venice_sd35.py new file mode 100644 index 00000000..a9513c93 --- /dev/null +++ b/intentkit/skills/venice_image/image_generation/image_generation_venice_sd35.py @@ -0,0 +1,28 @@ +from intentkit.skills.venice_image.image_generation.image_generation_base import ( + VeniceImageGenerationBaseTool, +) +from intentkit.skills.venice_image.image_generation.image_generation_input import ( + STYLE_PRESETS, +) + + +class ImageGenerationVeniceSD35(VeniceImageGenerationBaseTool): + """ + Tool for generating images using Venice AI's interface to Stable Diffusion 3.5 Large. + Developed by Stability AI, using MMDiT architecture. Good for art and design. + """ + + # --- Model Specific Configuration --- + name: str = "venice_image_generation_venice_sd35" + description: str = ( + "Generate images using Stability AI's Stable Diffusion 3.5 Large model (via Venice AI).\n" + "Ideal for artworks, design processes, and educational use. Not for factual representations.\n" + "Provide a text prompt describing the image (up to 1500 chars).\n" + f"Optionally specify a style preset from the list: {', '.join(STYLE_PRESETS)}.\n" + "Supports dimensions up to 2048x2048 (multiple of 16).\n" + "Use must comply with Stability AI's Acceptable Use Policy." + ) + # Use the specific ID provided by Venice, assuming it matches the name + model_id: str = "venice-sd35" + + # args_schema and _arun are inherited from VeniceImageGenerationBaseTool diff --git a/intentkit/skills/venice_image/image_upscale/README.md b/intentkit/skills/venice_image/image_upscale/README.md new file mode 100644 index 00000000..c1b622bc --- /dev/null +++ b/intentkit/skills/venice_image/image_upscale/README.md @@ -0,0 +1,111 @@ +# image_upscale + +**Image Upscale** is a sub-tool of the Venice Image suite. It uses Venice AI’s powerful super-resolution models to increase the size and clarity of images by 2x or 4x, making low-resolution images suitable for HD displays, print, or content enhancement. This is not just simple pixel stretching—it uses AI to intelligently recreate additional detail, texture, and smoothness. + +--- + +## What does it do? + +Given any publicly accessible image URL, the tool fetches the image, applies deep-learning upscaling (super-resolution), and returns a new image URL to the upscaled output. Users can choose between 2x or 4x upscaling depending on needs, and can optionally control how much "realism"/texture is preserved from the original. + +Key benefits: +- Consistent color, sharpness, and clarity at higher resolutions +- AI removes pixelation and can reduce compression artifacts +- Optional "replication" factor lets you tune how much of the original’s noise/detail is restored + +--- + +## Input + +| Field | Type | Description | Required | Default | +|--------------|------------------|------------------------------------------------------------------------------------------------------|----------|---------| +| image_url | HttpUrl | Public URL to the image you want to upscale. | Yes | | +| scale | Literal[2, 4] | The scaling factor (2 for 2x, 4 for 4x enlargement). | Yes | 2 | +| replication | float (0.1–1.0) | How much to preserve edges, texture, and noise from original (higher = more detail, less smoothing). | No | 0.35 | + +Example: +```json +{ + "image_url": "https://example.com/photo.jpg", + "scale": 4, + "replication": 0.5 +} +``` + +--- + +## Output + +On success, returns a result dictionary containing at least: +- `success`: true +- `result`: URL for the upscaled image (typically hosted on S3 or compatible object storage) +- Additional metadata as needed + +Example: +```json +{ + "success": true, + "result": "https://s3.storage.example/venice_image/image_upscale/1a2b3c....png" +} +``` + +On error: +```json +{ + "success": false, + "error": "Failed to fetch or validate image from URL: ...", + "result": null +} +``` + +--- + +## Typical Use Cases + +- **Photo Restoration** – Upscale old, small web images for print or display +- **Content Creation** – Create HD assets from AI-generated or web-ripped images +- **Design/Prototyping** – Improve source assets for posters, presentations, or large canvas +- **Archival** – Enhance legacy digital art or research scans + +--- + +## Advanced Notes + +- Works for all common raster formats (JPG, PNG, WEBP). Unsupported types are auto-converted to PNG. +- "Replication" factor explanation: + - **Low values (e.g., 0.1–0.25):** smoother, less noise, more “plastic” look (good for AI/clean results) + - **High values (e.g., 0.7–1.0):** preserves original photo noise/texture, less smoothing (good for art/photo upscaling) +- Original aspect ratio is always preserved. + +--- + +## Limitations + +- Does not add content—only increases fidelity of existing features. +- Output detail is limited by source image quality and AI model limits. +- NSFW images will be blurred if safe mode is enabled. + +--- + +## Example Usage (Pseudo-code) + +```python +result = await agent.send_tool( + "image_upscale", + { + "image_url": "https://somehost.com/image.png", + "scale": 2, + "replication": 0.4 + } +) +upscaled_url = result["result"] +``` + +--- + +## Compliance & Attribution + +- You must have rights to use the supplied image. +- Follows [Venice AI terms of service](https://venice.ai/). + +--- \ No newline at end of file diff --git a/intentkit/skills/venice_image/image_upscale/__init__.py b/intentkit/skills/venice_image/image_upscale/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/intentkit/skills/venice_image/image_upscale/image_upscale.py b/intentkit/skills/venice_image/image_upscale/image_upscale.py new file mode 100644 index 00000000..cc68b59e --- /dev/null +++ b/intentkit/skills/venice_image/image_upscale/image_upscale.py @@ -0,0 +1,88 @@ +import logging +from typing import Literal, Optional + +from pydantic import HttpUrl + +from intentkit.skills.base import ToolException +from intentkit.skills.venice_image.image_upscale.image_upscale_base import ( + VeniceImageUpscaleBaseTool, +) +from intentkit.skills.venice_image.utils import fetch_image_as_base64 + +logger = logging.getLogger(__name__) + + +class ImageUpscale(VeniceImageUpscaleBaseTool): + """ + Upscales an existing image provided via URL by a factor of 2 or 4 using the Venice AI API. + Ideal for enhancing the resolution of previously generated or existing images. + """ + + # --- Tool Specific Configuration --- + name: str = "venice_image_upscale" + description: str = ( + "Upscales an existing image from a URL using Venice AI.\n" + "Provide the public URL of the image to upscale.\n" + "Specify the desired scale factor: 2 (for 2x upscale) or 4 (for 4x upscale).\n" + "Returns the URL of the upscaled image." + ) + + # No model_id needed for the generic upscale endpoint currently + async def _arun( + self, + image_url: HttpUrl, + scale: Literal[2, 4], + replication: Optional[float] = 0.35, + **kwargs, + ) -> dict: + """ + Asynchronously upscales an image from the provided URL using the Venice AI API. + + Args: + image_url (HttpUrl): The public URL of the image to upscale. + scale (Literal[2, 4]): The scale factor for upscaling (2x or 4x). + replication (Optional[float]): The replication factor for the upscale process, defaults to 0.35. + config (RunnableConfig, optional): Configuration for the runnable, if any. + **kwargs: Additional keyword arguments. + + Returns: + dict: The API response containing the URL of the upscaled image. + + Raises: + ToolException: If the image cannot be fetched, validated, or upscaled, or if an API error occurs. + """ + + try: + context = self.get_context() + + await self.apply_venice_rate_limit(context) + + image_base64 = await fetch_image_as_base64(image_url) + if not image_base64: + error_msg = f"Failed to fetch or validate image from URL: {image_url}" + logger.error(error_msg) + raise ToolException( + str({"success": False, "error": error_msg, "result": None}) + ) + + payload = { + "image": image_base64, + "scale": scale, + "replication": replication, + } + result, error = await self.post("api/v1/image/upscale", payload, context) + if error: + raise ToolException(f"Venice Image Upscale API error: {error}") + return result + except ToolException as e: + raise e + except Exception as e: + logger.error(f"Error in {self.name}: {str(e)}") + raise ToolException( + str( + { + "success": False, + "error": f"An unexpected error occurred: {str(e)}", + } + ) + ) diff --git a/intentkit/skills/venice_image/image_upscale/image_upscale_base.py b/intentkit/skills/venice_image/image_upscale/image_upscale_base.py new file mode 100644 index 00000000..903b6ed5 --- /dev/null +++ b/intentkit/skills/venice_image/image_upscale/image_upscale_base.py @@ -0,0 +1,23 @@ +from typing import Type + +from pydantic import BaseModel, Field + +# Import the generic base and shared input +from intentkit.skills.venice_image.base import VeniceImageBaseTool +from intentkit.skills.venice_image.image_upscale.image_upscale_input import ( + VeniceImageUpscaleInput, +) + + +class VeniceImageUpscaleBaseTool(VeniceImageBaseTool): + """ + Base class for Venice AI *Image Upscaling* tools. + Inherits from VeniceAIBaseTool and handles specifics of the + /image/upscale endpoint + """ + + args_schema: Type[BaseModel] = VeniceImageUpscaleInput + name: str = Field(description="The unique name of the image upscaling tool.") + description: str = Field( + description="A description of what the image upscaling tool does." + ) diff --git a/intentkit/skills/venice_image/image_upscale/image_upscale_input.py b/intentkit/skills/venice_image/image_upscale/image_upscale_input.py new file mode 100644 index 00000000..4179a00e --- /dev/null +++ b/intentkit/skills/venice_image/image_upscale/image_upscale_input.py @@ -0,0 +1,22 @@ +from typing import Literal, Optional + +from pydantic import BaseModel, Field, HttpUrl + + +class VeniceImageUpscaleInput(BaseModel): + """Input for the Image Upscale tool.""" + + image_url: HttpUrl = Field( + description="The URL of the image to upscale. Must be a publicly accessible URL.", + ) + replication: Optional[float] = Field( + default=0.35, + description=( + 'How strongly lines and noise in the base image are preserved. Higher values are noisier but less plastic/AI "generated"/hallucinated. Must be between 0.1 and 1.' + "Required range: 0.1 <= x <= 1" + ), + ) + scale: Literal[2, 4] = Field( + default=2, + description="The factor by which to upscale the image (either 2 or 4). Defaults to 2.", + ) diff --git a/intentkit/skills/venice_image/image_vision/README.md b/intentkit/skills/venice_image/image_vision/README.md new file mode 100644 index 00000000..e6cbd546 --- /dev/null +++ b/intentkit/skills/venice_image/image_vision/README.md @@ -0,0 +1,112 @@ +# image_vision + +**Image Vision** is a sub-tool in the Venice Image suite that provides highly detailed, comprehensive, AI-generated textual descriptions of images. It is designed for analyzing and summarizing the visual content of any image accessible via URL. + +--- + +## What does it do? + +This tool uses Venice AI’s latest visual-language model (`qwen-2.5-vl`) to “see” an image as a human or curator would. It returns a paragraph-length, multi-faceted, exhaustive description covering: + +- All visible objects and their properties (colors, shapes, count, arrangement) +- Scene composition: spatial arrangement, relationships, perspective +- Surface textures, materials, lighting, color palette +- Contextual, stylistic, or artistic features (e.g., “art deco style,” “digital illustration”) +- Mood, visual storytelling elements, or any notable anomalies +- Additional inferred details where possible + +This tool is ideal for accessibility, archiving, content discovery, search, and cognitive AI workflows. + +--- + +## Input + +| Field | Type | Description | Required | +|--------------|----------|---------------------------------------------------------------------|----------| +| image_url | HttpUrl | Publicly accessible URL to the target image. | Yes | + +Example: +```json +{ + "image_url": "https://example.com/some_picture.jpg" +} +``` + +--- + +## Example Output + +A typical result will be a dictionary with the generated description under a relevant key (the raw API response may vary based on Venice formats): + +```json +{ + "success": true, + "result": "A vibrant, high-resolution digital illustration depicting a Venetian canal at midday. The scene features pastel-hued buildings on either side of the canal with ornate balconies and open shuttered windows. Gondolas and small boats glide over the calm, reflective water, casting rippling shadows. The sky is clear and blue, with sunlight streaming across the facades, creating sharp contrasts and lively reflections. Crowds of tourists are visible on the far bank, while colorful banners and flowerpots accent the architecture. The composition is balanced, with attention to perspective and depth, and the general mood is lively and picturesque." +} +``` + +In case of errors (invalid URL, fetch issues, inappropriate filetype, etc.), a descriptive error message is returned: + +```json +{ + "success": false, + "error": "Failed to fetch or validate image from URL: https://example.com/broken.jpg", + "result": null +} +``` + +--- + +## Typical Use Cases + +- **Accessibility:** Generate alt-text for visually impaired users. +- **AI Agents:** Understand and react to visual content in workflow automations. +- **Search & Tagging:** Automatically caption and index photo libraries. +- **Content Moderation:** Pre-screen or context-check image uploads. +- **Educational Tools:** Explain or transcribe visual materials for students. + +--- + +## Advanced Notes + +- The tool only works with image URLs that are publicly accessible and in a common web format (JPG, PNG, etc). +- Image URLs are validated and, where necessary, format-normalized using Pillow. +- The system never stores or caches the image, but will download it temporarily for analysis. + +**Model Details** +Venice AI leverages licensed large vision-language models; this tool currently uses `qwen-2.5-vl`, known for dense, multi-aspect, human-like image explanations. + +--- + +## Configuration Options + +- No special options; inherits API key, safe mode, and base logging from the main suite configuration. + +--- + +## Limitations + +- May not detect “hidden” content, steganographic messages, or small details lost in low-res images. +- Will describe as best possible—if the image is blank, corrupted, or unrelated, a best-effort bland description may be given. +- Not a content moderator—use with your own safety checks if required. + +--- + +## Example Usage (Pseudo-code) +```python +result = await agent.send_tool( + "image_vision", + { + "image_url": "https://mycdn.com/image.jpg" + } +) +desc = result["result"] +``` + +--- + +## Attribution/Compliance + +All usage subject to [Venice AI terms of service](https://venice.ai/). Do not use for unlawful or privacy-invading data mining. + +--- diff --git a/intentkit/skills/venice_image/image_vision/__init__.py b/intentkit/skills/venice_image/image_vision/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/intentkit/skills/venice_image/image_vision/image_vision.py b/intentkit/skills/venice_image/image_vision/image_vision.py new file mode 100644 index 00000000..82dcde35 --- /dev/null +++ b/intentkit/skills/venice_image/image_vision/image_vision.py @@ -0,0 +1,98 @@ +import logging +from typing import Any, Type + +from pydantic import BaseModel, HttpUrl + +from intentkit.skills.base import ToolException +from intentkit.skills.venice_image.image_vision.image_vision_base import ( + VeniceImageVisionBaseTool, +) +from intentkit.skills.venice_image.image_vision.image_vision_input import ( + VeniceImageVision, +) +from intentkit.skills.venice_image.utils import fetch_image_as_base64 + +logger = logging.getLogger(__name__) + + +class ImageVision(VeniceImageVisionBaseTool): + """ + Describes an image provided via URL using the Venice AI API. + Ideal for understanding the content of an existing image. + """ + + name: str = "venice_image_vision" + description: str = ( + "Describes an image from a URL using Venice AI.\n" + "Provide the public URL of the image to describe.\n" + "Returns a descriptive text of the image." + ) + args_schema: Type[BaseModel] = VeniceImageVision + # No model_id needed for the generic vision endpoint currently + + async def _arun( + self, + image_url: HttpUrl, + **kwargs, + ) -> dict[str, Any]: + try: + context = self.get_context() + + await self.apply_venice_rate_limit(context) + + image_base64 = await fetch_image_as_base64(image_url) + if not image_base64: + error_msg = f"Failed to fetch or validate image from URL: {image_url}" + logger.error(error_msg) + return {"success": False, "error": error_msg, "result": None} + + payload = { + "model": "qwen-2.5-vl", + "messages": [ + { + "role": "system", + "content": [ + { + "type": "text", + "text": ( + "You are an AI model that provides detailed descriptions of images. " + "When given an image, you must respond with a description that is as comprehensive and detailed as possible. " + "Focus on identifying all objects, colors, textures, and any other relevant features present in the image. " + "Provide a thorough and exhaustive account of what is visible in the image." + ), + } + ], + }, + { + "role": "user", + "content": [ + { + "type": "text", + "text": ( + "Provide an extremely detailed description of the image, focusing on every discernible aspect. " + "Include information about objects, colors, textures, lighting conditions, artistic style (if applicable), " + "composition, and any other relevant details that would allow someone to accurately understand and potentially " + "recreate the image. Be as thorough and comprehensive as possible." + ), + }, + {"type": "image_url", "image_url": {"url": str(image_url)}}, + ], + }, + ], + } + + result, error = await self.post("api/v1/chat/completions", payload, context) + if error: + raise ToolException(f"Venice Image Vision API error: {error}") + return result + except ToolException as e: + return { + "success": False, + "error": f"An unexpected error occurred: {str(e)}", + } + except Exception as e: + logger.error(f"Error in {self.name}: {str(e)}") + return { + "success": False, + "error": f"An unexpected error occurred: {str(e)}", + } diff --git a/intentkit/skills/venice_image/image_vision/image_vision_base.py b/intentkit/skills/venice_image/image_vision/image_vision_base.py new file mode 100644 index 00000000..68696462 --- /dev/null +++ b/intentkit/skills/venice_image/image_vision/image_vision_base.py @@ -0,0 +1,17 @@ +from pydantic import Field + +# Import the generic base and shared input +from intentkit.skills.venice_image.base import VeniceImageBaseTool + + +class VeniceImageVisionBaseTool(VeniceImageBaseTool): + """ + Base class for Venice AI *Image Vision* tools. + Inherits from VeniceAIBaseTool and handles specifics of the + /chat/completions endpoint. + """ + + name: str = Field(description="The unique name of the image vision tool.") + description: str = Field( + description="A description of what the image vision tool does." + ) diff --git a/intentkit/skills/venice_image/image_vision/image_vision_input.py b/intentkit/skills/venice_image/image_vision/image_vision_input.py new file mode 100644 index 00000000..72ba8e39 --- /dev/null +++ b/intentkit/skills/venice_image/image_vision/image_vision_input.py @@ -0,0 +1,9 @@ +from pydantic import BaseModel, Field, HttpUrl + + +class VeniceImageVision(BaseModel): + """Input for the Image Vision tool.""" + + image_url: HttpUrl = Field( + description="The URL of the image to to be described by the Vision model. Must be a publicly accessible URL.", + ) diff --git a/intentkit/skills/venice_image/schema.json b/intentkit/skills/venice_image/schema.json new file mode 100644 index 00000000..1ecbf7f9 --- /dev/null +++ b/intentkit/skills/venice_image/schema.json @@ -0,0 +1,267 @@ +{ + "$schema": "http://json-schema.org/draft-07/schema#", + "type": "object", + "title": "Venice Image", + "description": "Skills for generating images using the Venice AI API.", + "x-icon": "https://ai.service.crestal.dev/skills/venice_image/venice_image.jpg", + "x-tags": [ + "AI", + "Image Generation" + ], + "properties": { + "enabled": { + "type": "boolean", + "title": "Enabled", + "description": "Whether this skill category is enabled", + "default": false + }, + "states": { + "type": "object", + "title": "Skill States", + "description": "States for each Venice Image skill (disabled, public, or private)", + "properties": { + "image_vision": { + "type": "string", + "title": "Image Vision", + "enum": [ + "disabled", + "public", + "private" + ], + "x-enum-title": [ + "Disabled", + "Agent Owner + All Users", + "Agent Owner Only" + ], + "description": "Describes an image provided via URL using the Venice AI API. Ideal for understanding the content of an existing image", + "default": "public" + }, + "image_enchance": { + "type": "string", + "title": "Image Enchance", + "enum": [ + "disabled", + "public", + "private" + ], + "x-enum-title": [ + "Disabled", + "Agent Owner + All Users", + "Agent Owner Only" + ], + "description": "Tool for **Enchance** (modifying specific areas of) an existing image using a selected image model via Venice AI", + "default": "public" + }, + "image_upscale": { + "type": "string", + "title": "Image Upscale", + "enum": [ + "disabled", + "public", + "private" + ], + "x-enum-title": [ + "Disabled", + "Agent Owner + All Users", + "Agent Owner Only" + ], + "description": "Upscale an existing image by 2x or 4x using Venice AI.", + "default": "disabled" + }, + "image_generation_flux_dev": { + "type": "string", + "title": "Image Generation (Flux-Dev)", + "enum": [ + "disabled", + "public", + "private" + ], + "x-enum-title": [ + "Disabled", + "Agent Owner + All Users", + "Agent Owner Only" + ], + "description": "Generate images using Venice AI's Flux Dev model (research, art workflows).", + "default": "public" + }, + "image_generation_flux_dev_uncensored": { + "type": "string", + "title": "Image Generation (Flux-Dev-Uncensored)", + "enum": [ + "disabled", + "public", + "private" + ], + "x-enum-title": [ + "Disabled", + "Agent Owner + All Users", + "Agent Owner Only" + ], + "description": "Generate images using the uncensored Flux Dev model (unrestricted, NSFW).", + "default": "disabled" + }, + "image_generation_venice_sd35": { + "type": "string", + "title": "Image Generation (Venice SD3.5)", + "enum": [ + "disabled", + "public", + "private" + ], + "x-enum-title": [ + "Disabled", + "Agent Owner + All Users", + "Agent Owner Only" + ], + "description": "Generate images using Stability AI's SD 3.5 Large (art, design).", + "default": "disabled" + }, + "image_generation_fluently_xl": { + "type": "string", + "title": "Image Generation (Fluently-XL)", + "enum": [ + "disabled", + "public", + "private" + ], + "x-enum-title": [ + "Disabled", + "Agent Owner + All Users", + "Agent Owner Only" + ], + "description": "Generate images using Fluently-XL (aesthetics, lighting, realism).", + "default": "disabled" + }, + "image_generation_lustify_sdxl": { + "type": "string", + "title": "Image Generation (Lustify-SDXL)", + "enum": [ + "disabled", + "public", + "private" + ], + "x-enum-title": [ + "Disabled", + "Agent Owner + All Users", + "Agent Owner Only" + ], + "description": "Generate images using Lustify SDXL (photorealistic, focus on NSFW).", + "default": "disabled" + }, + "image_generation_pony_realism": { + "type": "string", + "title": "Image Generation (Pony-Realism)", + "enum": [ + "disabled", + "public", + "private" + ], + "x-enum-title": [ + "Disabled", + "Agent Owner + All Users", + "Agent Owner Only" + ], + "description": "Generate images using Pony Realism (high-detail, realistic, anime/characters).", + "default": "disabled" + }, + "image_generation_stable_diffusion_3_5": { + "type": "string", + "title": "Image Generation (Stable Diffusion 3.5 - Alt ID)", + "enum": [ + "disabled", + "public", + "private" + ], + "x-enum-title": [ + "Disabled", + "Agent Owner + All Users", + "Agent Owner Only" + ], + "description": "Generate images using Stability AI's SD 3.5 Large (alternative API ID).", + "default": "disabled" + } + } + }, + "safe_mode": { + "type": "boolean", + "title": "Safe Mode", + "description": "Whether to use safe mode. If enabled, this will blur images that are classified as having adult content", + "default": true + }, + "embed_exif_metadata": { + "type": "boolean", + "title": "Embed Exif Metadata", + "description": "Embed prompt generation information into the image's EXIF metadata", + "default": false + }, + "hide_watermark": { + "type": "boolean", + "title": "Hide Watermark", + "description": "Whether to hide the Venice watermark. Venice may ignore this parameter for certain generated content.", + "default": true + }, + "negative_prompt": { + "type": "string", + "title": "Default Negative Prompt", + "description": "Default negative prompt to use if none is provided in the skill call.", + "default": "(worst quality: 1.4), bad quality, nsfw" + }, + "api_key_provider": { + "type": "string", + "title": "API Key Provider", + "description": "Provider of the API key for AIXBT API service", + "enum": [ + "agent_owner" + ], + "x-enum-title": [ + "Owner Provided" + ], + "default": "agent_owner" + } + }, + "required": [ + "states", + "enabled" + ], + "if": { + "properties": { + "api_key_provider": { + "const": "agent_owner" + } + } + }, + "then": { + "properties": { + "api_key": { + "type": "string", + "title": "Venice API Key", + "x-link": "[Get your API key](https://venice.ai/)", + "x-sensitive": true, + "description": "Optional API key for Venice AI services. If not provided, the system key will be used." + }, + "rate_limit_number": { + "type": "integer", + "title": "Rate Limit Number", + "description": "Number of requests allowed per time window. Only applies if using an agent-specific API key." + }, + "rate_limit_minutes": { + "type": "integer", + "title": "Rate Limit Minutes", + "description": "Time window in minutes for rate limiting. Only applies if using an agent-specific API key." + } + }, + "if": { + "properties": { + "enabled": { + "const": true + } + } + }, + "then": { + "required": [ + "api_key" + ] + } + }, + "additionalProperties": true +} \ No newline at end of file diff --git a/intentkit/skills/venice_image/utils.py b/intentkit/skills/venice_image/utils.py new file mode 100644 index 00000000..fe2f066e --- /dev/null +++ b/intentkit/skills/venice_image/utils.py @@ -0,0 +1,78 @@ +import base64 +import io +import logging +from typing import Optional + +import filetype +import httpx +from PIL import Image +from pydantic import HttpUrl + +from intentkit.skills.base import ToolException + +logger = logging.getLogger(__name__) + + +async def fetch_image_as_bytes(image_url: HttpUrl) -> bytes: + """Fetches image bytes from a given URL. Converts unsupported formats to PNG using Pillow. + + Raises: + ToolException: If fetching or converting the image fails. + """ + try: + async with httpx.AsyncClient(timeout=90) as client: + response = await client.get(str(image_url), follow_redirects=True) + response.raise_for_status() + + original_bytes = response.content + + # Guess file type from content + kind = filetype.guess(original_bytes) + detected_ext = kind.extension if kind else None + detected_mime = kind.mime if kind else "unknown" + + if not detected_ext or not detected_mime.startswith("image/"): + msg = f"URL {image_url} did not return a recognizable image format. Detected: {detected_mime}" + logger.error(msg) + raise ToolException(msg) + + if detected_ext in ("jpg", "jpeg", "png"): + return original_bytes + + # Convert unsupported image to PNG + try: + img = Image.open(io.BytesIO(original_bytes)).convert("RGBA") + with io.BytesIO() as output: + img.save(output, format="PNG") + logger.info( + f"Converted unsupported image type '{detected_ext}' to PNG." + ) + return output.getvalue() + except Exception as e: + msg = f"Failed to convert image ({detected_ext}) to PNG: {e}" + logger.error(msg, exc_info=True) + raise ToolException(msg) from e + + except httpx.HTTPStatusError as e: + msg = f"HTTP error fetching image {image_url}: Status {e.response.status_code}" + logger.error(msg) + raise ToolException(msg) from e + except httpx.RequestError as e: + msg = f"Network error fetching image {image_url}: {e}" + logger.error(msg) + raise ToolException(msg) from e + except Exception as e: + msg = f"Unexpected error fetching image {image_url}: {e}" + logger.error(msg, exc_info=True) + raise ToolException(msg) from e + + +async def fetch_image_as_base64(image_url: HttpUrl) -> Optional[str]: + """Fetches an image from the URL and returns the image as a Base64-encoded string.""" + image_bytes = await fetch_image_as_bytes(image_url) + + if image_bytes is None: + return None + + # Convert image bytes to a Base64-encoded string + return base64.b64encode(image_bytes).decode("utf-8") diff --git a/intentkit/skills/venice_image/venice_image.jpg b/intentkit/skills/venice_image/venice_image.jpg new file mode 100644 index 00000000..43be8d8d Binary files /dev/null and b/intentkit/skills/venice_image/venice_image.jpg differ diff --git a/intentkit/skills/web_scraper/README.md b/intentkit/skills/web_scraper/README.md new file mode 100644 index 00000000..53a4b266 --- /dev/null +++ b/intentkit/skills/web_scraper/README.md @@ -0,0 +1,113 @@ +# Web Scraper & Content Indexing Skills + +Intelligent web scraping and content indexing using LangChain's WebBaseLoader with vector search capabilities. + +## Skills + +### 🔍 `scrape_and_index` +Scrape content from URLs and index into a searchable vector store with configurable chunking and persistent storage. + +### 🔎 `query_indexed_content` +Search indexed content using semantic similarity to answer questions and retrieve relevant information. + +### `website_indexer` +Index entire websites by discovering and scraping all pages using sitemaps. Automatically finds sitemaps from robots.txt, extracts all URLs, and comprehensively indexes website content. + +### `document_indexer` +Import and index document content directly to the vector database. Perfect for adding content from Google Docs, Notion pages, PDFs, or any other document sources by copy-pasting. + +## Key Features + +- **Multi-URL Support**: Scrape up to 10 URLs simultaneously +- **Sitemap Discovery**: Automatic sitemap detection from robots.txt with common patterns +- **Direct Text Input**: Add content directly without web scraping +- **Smart Chunking**: Configurable text splitting (100-4000 chars) with overlap +- **Vector Search**: FAISS + OpenAI embeddings for semantic retrieval +- **Agent Storage**: Persistent, per-agent content indexing +- **Content Filtering**: Include/exclude URL patterns for targeted scraping +- **Tagging System**: Organize content with custom tags +- **Rate Limiting**: Respectful scraping (0.1-10 req/sec) + +## Testing Examples + +### 1. Basic Scraping & Indexing + +**Agent Prompt:** +``` +Please scrape and index this URL: https://docs.crestal.network/introduction +``` + +**Expected Response:** +- Confirmation of successful scraping +- Number of URLs processed and chunks created +- Storage confirmation message + +### 2. Custom Chunking + +**Agent Prompt:** +``` +Scrape and index https://docs.crestal.network/introduction with chunk size 500 and overlap 100. +``` + +### 3. Complete Website Indexing + +**Agent Prompt:** +``` +Index the entire documentation site at https://docs.crestal.network using its sitemap. Include only pages with '/docs/' and '/guides/' in the URL, exclude '/admin/' pages, and limit to 50 URLs. +``` + +### 4. Document Content Import + +**Agent Prompt:** +``` +I'm going to paste some content from my Google Doc. Please add it to the knowledge base: + +Title: "Meeting Notes - Q4 Strategy" +Source: "Google Docs" +Tags: "meeting, strategy, q4, planning" + +[Paste your document content here...] +``` + +### 5. Content Querying + +**Agent Prompt (after indexing):** +``` +Based on the indexed documentation, what are the main items in it? +``` + + +## Testing Workflow + +1. **Setup**: Configure the skill in your agent +2. **Index Content**: Use `scrape_and_index` with test URLs +3. **Query Content**: Use `query_indexed_content` with questions +4. **Verify**: Check responses include source attribution and relevant content + +## API Testing + +```bash +# Test scraping via API +curl -X POST "http://localhost:8000/agents/your-agent-id/chat" \ + -H "Content-Type: application/json" \ + -d '{ + "message": "Scrape and index https://docs.crestal.network/introduction" + }' + +# Test querying via API +curl -X POST "http://localhost:8000/agents/your-agent-id/chat" \ + -H "Content-Type: application/json" \ + -d '{ + "message": "What information did you find?" + }' +``` + +## Dependencies + +Required packages (add to `pyproject.toml` if missing): +- `langchain-community` - WebBaseLoader and document processing +- `langchain-openai` - Embeddings +- `langchain-text-splitters` - Document chunking +- `faiss-cpu` - Vector storage +- `beautifulsoup4` - HTML parsing +- `httpx` - Async HTTP client for sitemap discovery \ No newline at end of file diff --git a/intentkit/skills/web_scraper/__init__.py b/intentkit/skills/web_scraper/__init__.py new file mode 100644 index 00000000..e7556eee --- /dev/null +++ b/intentkit/skills/web_scraper/__init__.py @@ -0,0 +1,108 @@ +"""Web scraper skills for content indexing and retrieval.""" + +import logging +from typing import TypedDict + +from intentkit.abstracts.skill import SkillStoreABC +from intentkit.skills.base import SkillConfig, SkillOwnerState, SkillState +from intentkit.skills.web_scraper.base import WebScraperBaseTool +from intentkit.skills.web_scraper.document_indexer import DocumentIndexer +from intentkit.skills.web_scraper.scrape_and_index import ( + QueryIndexedContent, + ScrapeAndIndex, +) +from intentkit.skills.web_scraper.website_indexer import WebsiteIndexer + +# Cache skills at the system level, because they are stateless +_cache: dict[str, WebScraperBaseTool] = {} + +logger = logging.getLogger(__name__) + + +class SkillStates(TypedDict): + scrape_and_index: SkillOwnerState + query_indexed_content: SkillState + website_indexer: SkillOwnerState + document_indexer: SkillOwnerState + + +class Config(SkillConfig): + """Configuration for web scraper skills.""" + + states: SkillStates + + +async def get_skills( + config: "Config", + is_private: bool, + store: SkillStoreABC, + **_, +) -> list[WebScraperBaseTool]: + """Get all web scraper skills. + + Args: + config: The configuration for web scraper skills. + is_private: Whether to include private skills. + store: The skill store for persisting data. + + Returns: + A list of web scraper skills. + """ + available_skills = [] + + # Include skills based on their state + for skill_name, state in config["states"].items(): + if state == "disabled": + continue + elif state == "public" or (state == "private" and is_private): + available_skills.append(skill_name) + + # Get each skill using the cached getter + result = [] + for name in available_skills: + skill = get_web_scraper_skill(name, store) + if skill: + result.append(skill) + return result + + +def get_web_scraper_skill( + name: str, + store: SkillStoreABC, +) -> WebScraperBaseTool: + """Get a web scraper skill by name. + + Args: + name: The name of the skill to get + store: The skill store for persisting data + + Returns: + The requested web scraper skill + """ + if name == "scrape_and_index": + if name not in _cache: + _cache[name] = ScrapeAndIndex( + skill_store=store, + ) + return _cache[name] + elif name == "query_indexed_content": + if name not in _cache: + _cache[name] = QueryIndexedContent( + skill_store=store, + ) + return _cache[name] + elif name == "website_indexer": + if name not in _cache: + _cache[name] = WebsiteIndexer( + skill_store=store, + ) + return _cache[name] + elif name == "document_indexer": + if name not in _cache: + _cache[name] = DocumentIndexer( + skill_store=store, + ) + return _cache[name] + else: + logger.warning(f"Unknown web scraper skill: {name}") + return None diff --git a/intentkit/skills/web_scraper/base.py b/intentkit/skills/web_scraper/base.py new file mode 100644 index 00000000..6d82a7e8 --- /dev/null +++ b/intentkit/skills/web_scraper/base.py @@ -0,0 +1,21 @@ +from typing import Type + +from pydantic import BaseModel, Field + +from intentkit.abstracts.skill import SkillStoreABC +from intentkit.skills.base import IntentKitSkill + + +class WebScraperBaseTool(IntentKitSkill): + """Base class for web scraper tools.""" + + name: str = Field(description="The name of the tool") + description: str = Field(description="A description of what the tool does") + args_schema: Type[BaseModel] + skill_store: SkillStoreABC = Field( + description="The skill store for persisting data" + ) + + @property + def category(self) -> str: + return "web_scraper" diff --git a/intentkit/skills/web_scraper/document_indexer.py b/intentkit/skills/web_scraper/document_indexer.py new file mode 100644 index 00000000..8d1345d6 --- /dev/null +++ b/intentkit/skills/web_scraper/document_indexer.py @@ -0,0 +1,141 @@ +import logging +from typing import Type + +from pydantic import BaseModel, Field + +from intentkit.skills.web_scraper.base import WebScraperBaseTool +from intentkit.skills.web_scraper.utils import ( + DocumentProcessor, + MetadataManager, + ResponseFormatter, + VectorStoreManager, + index_documents, +) + +logger = logging.getLogger(__name__) + + +class DocumentIndexerInput(BaseModel): + """Input for DocumentIndexer tool.""" + + text_content: str = Field( + description="The text content to add to the vector database. Can be content from Google Docs, Notion, or any other text source", + min_length=10, + max_length=100000, + ) + title: str = Field( + description="Title or name for this text content (will be used as metadata)", + max_length=200, + ) + source: str = Field( + description="Source of the text content (e.g., 'Google Doc', 'Notion Page', 'Manual Entry')", + default="Manual Entry", + max_length=100, + ) + chunk_size: int = Field( + description="Size of text chunks for indexing (default: 1000)", + default=1000, + ge=100, + le=4000, + ) + chunk_overlap: int = Field( + description="Overlap between chunks (default: 200)", + default=200, + ge=0, + le=1000, + ) + tags: str = Field( + description="Optional tags for categorizing the content (comma-separated)", + default="", + max_length=500, + ) + + +class DocumentIndexer(WebScraperBaseTool): + """Tool for importing and indexing document content to the vector database. + + This tool allows users to copy and paste document content from various sources + (like Google Docs, Notion, PDFs, etc.) and index it directly into the vector store + for later querying and retrieval. + """ + + name: str = "web_scraper_document_indexer" + description: str = ( + "Import and index document content directly to the vector database. " + "Perfect for adding content from Google Docs, Notion pages, PDFs, or any other document sources. " + "The indexed content can then be queried using the query_indexed_content tool." + ) + args_schema: Type[BaseModel] = DocumentIndexerInput + + async def _arun( + self, + text_content: str, + title: str, + source: str = "Manual Entry", + chunk_size: int = 1000, + chunk_overlap: int = 200, + tags: str = "", + **kwargs, + ) -> str: + """Add text content to the vector database.""" + # Get agent context - throw error if not available + # Configuration is always available in new runtime + pass + + context = self.get_context() + if not context or not context.agent_id: + raise ValueError("Agent ID is required but not found in configuration") + + agent_id = context.agent_id + + logger.info(f"[{agent_id}] Starting document indexing for title: '{title}'") + + # Validate content + if not DocumentProcessor.validate_content(text_content): + logger.error(f"[{agent_id}] Content validation failed - too short") + return "Error: Text content is too short. Please provide at least 10 characters of content." + + # Create document with metadata + document = DocumentProcessor.create_document( + text_content, + title, + source, + tags, + extra_metadata={"source_type": "document_indexer"}, + ) + + logger.info( + f"[{agent_id}] Document created, length: {len(document.page_content)} chars" + ) + + # Index the document + total_chunks, was_merged = await index_documents( + [document], agent_id, self.skill_store, chunk_size, chunk_overlap + ) + + # Get current storage size for response + vs_manager = VectorStoreManager(self.skill_store) + current_size = await vs_manager.get_content_size(agent_id) + + # Update metadata + metadata_manager = MetadataManager(self.skill_store) + new_metadata = metadata_manager.create_document_metadata( + title, source, tags, [document], len(text_content) + ) + await metadata_manager.update_metadata(agent_id, new_metadata) + + logger.info(f"[{agent_id}] Document indexing completed successfully") + + # Format response + response = ResponseFormatter.format_indexing_response( + "indexed", + f"Document: {title}", + total_chunks, + chunk_size, + chunk_overlap, + was_merged, + current_size_bytes=current_size, + ) + + logger.info(f"[{agent_id}] Document indexing completed successfully") + return response diff --git a/intentkit/skills/web_scraper/langchain.png b/intentkit/skills/web_scraper/langchain.png new file mode 100644 index 00000000..1df8572b Binary files /dev/null and b/intentkit/skills/web_scraper/langchain.png differ diff --git a/intentkit/skills/web_scraper/schema.json b/intentkit/skills/web_scraper/schema.json new file mode 100644 index 00000000..d2d25f5f --- /dev/null +++ b/intentkit/skills/web_scraper/schema.json @@ -0,0 +1,143 @@ +{ + "$schema": "http://json-schema.org/draft-07/schema#", + "type": "object", + "title": "Web Scraper & Content Indexing", + "description": "Scrape web content and index it for intelligent querying and retrieval", + "x-icon": "https://ai.service.crestal.dev/skills/web_scraper/langchain.png", + "x-tags": [ + "Web Scraping", + "Content Indexing", + "Vector Search", + "LangChain", + "Document Retrieval" + ], + "properties": { + "enabled": { + "type": "boolean", + "title": "Enabled", + "description": "Whether this skill is enabled", + "default": false + }, + "states": { + "type": "object", + "properties": { + "scrape_and_index": { + "type": "string", + "title": "Scrape & Index Content", + "enum": [ + "disabled", + "private" + ], + "x-enum-title": [ + "Disabled", + "Agent Owner Only" + ], + "description": "Scrape content from web URLs and index it into a searchable vector store for later retrieval. Supports multiple URLs, customizable chunking, and persistent storage.", + "default": "private" + }, + "query_indexed_content": { + "type": "string", + "title": "Query Indexed Content", + "enum": [ + "disabled", + "public", + "private" + ], + "x-enum-title": [ + "Disabled", + "Agent Owner + All Users", + "Agent Owner Only" + ], + "description": "Search and retrieve relevant information from previously indexed web content using semantic similarity. Perfect for answering questions based on scraped documents.", + "default": "private" + }, + "website_indexer": { + "type": "string", + "title": "Complete Website Indexer", + "enum": [ + "disabled", + "private" + ], + "x-enum-title": [ + "Disabled", + "Agent Owner Only" + ], + "description": "Index entire websites by discovering and scraping all pages using sitemaps. Automatically finds sitemaps from robots.txt, extracts all URLs, and comprehensively indexes website content.", + "default": "private" + }, + "document_indexer": { + "type": "string", + "title": "Document Content Indexer", + "enum": [ + "disabled", + "private" + ], + "x-enum-title": [ + "Disabled", + "Agent Owner Only" + ], + "description": "Import and index document content directly to the vector database. Perfect for adding content from Google Docs, Notion pages, PDFs, or any other document sources by copy-pasting.", + "default": "private" + } + }, + "description": "Configure the availability of each web scraper skill (disabled, public, or private)" + }, + "max_urls_per_request": { + "type": "integer", + "title": "Max URLs per Request", + "description": "Maximum number of URLs that can be scraped in a single request", + "default": 10, + "minimum": 1, + "maximum": 20 + }, + "default_chunk_size": { + "type": "integer", + "title": "Default Chunk Size", + "description": "Default size of text chunks for document indexing (characters)", + "default": 1000, + "minimum": 100, + "maximum": 4000 + }, + "default_chunk_overlap": { + "type": "integer", + "title": "Default Chunk Overlap", + "description": "Default overlap between chunks to maintain context (characters)", + "default": 200, + "minimum": 0, + "maximum": 1000 + }, + "requests_per_second": { + "type": "number", + "title": "Requests per Second", + "description": "Rate limit for web scraping to be respectful to target servers", + "default": 2, + "minimum": 0.1, + "maximum": 10 + }, + "request_timeout": { + "type": "integer", + "title": "Request Timeout", + "description": "Timeout for web requests in seconds", + "default": 30, + "minimum": 5, + "maximum": 120 + }, + "api_key_provider": { + "type": "string", + "title": "API Key Provider", + "description": "Who provides the API key for embeddings", + "enum": [ + "platform" + ], + "x-enum-title": [ + "Platform Hosted" + ], + "default": "platform" + } + }, + "required": [ + "states", + "enabled" + ], + "additionalProperties": true +} \ No newline at end of file diff --git a/intentkit/skills/web_scraper/scrape_and_index.py b/intentkit/skills/web_scraper/scrape_and_index.py new file mode 100644 index 00000000..c0e7b8e5 --- /dev/null +++ b/intentkit/skills/web_scraper/scrape_and_index.py @@ -0,0 +1,259 @@ +import logging +from typing import List, Type + +from pydantic import BaseModel, Field + +from intentkit.skills.web_scraper.base import WebScraperBaseTool +from intentkit.skills.web_scraper.utils import ( + DEFAULT_CHUNK_OVERLAP, + DEFAULT_CHUNK_SIZE, + MetadataManager, + ResponseFormatter, + VectorStoreManager, + scrape_and_index_urls, +) + +logger = logging.getLogger(__name__) + + +class ScrapeAndIndexInput(BaseModel): + """Input for ScrapeAndIndex tool.""" + + urls: List[str] = Field( + description="List of URLs to scrape and index. Each URL should be a valid web address starting with http:// or https://", + min_items=1, + max_items=25, + ) + chunk_size: int = Field( + description="Size of text chunks for indexing (default: 1000)", + default=DEFAULT_CHUNK_SIZE, + ge=100, + le=4000, + ) + chunk_overlap: int = Field( + description="Overlap between chunks (default: 200)", + default=DEFAULT_CHUNK_OVERLAP, + ge=0, + le=1000, + ) + + +class QueryIndexInput(BaseModel): + """Input for QueryIndex tool.""" + + query: str = Field( + description="Question or query to search in the indexed content", + min_length=1, + max_length=500, + ) + max_results: int = Field( + description="Maximum number of relevant documents to return (default: 4)", + default=4, + ge=1, + le=10, + ) + + +class ScrapeAndIndex(WebScraperBaseTool): + """Tool for scraping web content and indexing it into a searchable vector store. + + This tool can scrape multiple URLs, process the content into chunks, + and store it in a vector database for later retrieval and question answering. + """ + + name: str = "web_scraper_scrape_and_index" + description: str = ( + "Scrape content from one or more web URLs and index them into a vector store for later querying.\n" + "Use this tool to collect and index web content that you want to reference later.\n" + "The indexed content can then be queried using the query_indexed_content tool." + ) + args_schema: Type[BaseModel] = ScrapeAndIndexInput + + async def _arun( + self, + urls: List[str], + chunk_size: int = DEFAULT_CHUNK_SIZE, + chunk_overlap: int = DEFAULT_CHUNK_OVERLAP, + **kwargs, + ) -> str: + """Scrape URLs and index content into vector store.""" + try: + # Get agent context - throw error if not available + # Configuration is always available in new runtime + pass + + context = self.get_context() + if not context or not context.agent_id: + raise ValueError("Agent ID is required but not found in configuration") + + agent_id = context.agent_id + + logger.info( + f"[{agent_id}] Starting scrape and index operation with {len(urls)} URLs" + ) + + # Use the utility function to scrape and index URLs + total_chunks, was_merged, valid_urls = await scrape_and_index_urls( + urls, agent_id, self.skill_store, chunk_size, chunk_overlap + ) + + logger.info( + f"[{agent_id}] Scraping completed: {total_chunks} chunks indexed, merged: {was_merged}" + ) + + if not valid_urls: + logger.error(f"[{agent_id}] No valid URLs provided") + return "Error: No valid URLs provided. URLs must start with http:// or https://" + + if total_chunks == 0: + logger.error(f"[{agent_id}] No content extracted from URLs") + return "Error: No content could be extracted from the provided URLs." + + # Get current storage size for response + vs_manager = VectorStoreManager(self.skill_store) + current_size = await vs_manager.get_content_size(agent_id) + size_limit_reached = len(valid_urls) < len(urls) + + # Update metadata + metadata_manager = MetadataManager(self.skill_store) + new_metadata = metadata_manager.create_url_metadata( + valid_urls, [], "scrape_and_index" + ) + await metadata_manager.update_metadata(agent_id, new_metadata) + + logger.info(f"[{agent_id}] Metadata updated successfully") + + # Format response + response = ResponseFormatter.format_indexing_response( + "scraped and indexed", + valid_urls, + total_chunks, + chunk_size, + chunk_overlap, + was_merged, + current_size_bytes=current_size, + size_limit_reached=size_limit_reached, + total_requested_urls=len(urls), + ) + + logger.info( + f"[{agent_id}] Scrape and index operation completed successfully" + ) + return response + + except Exception as e: + # Extract agent_id for error logging if possible + agent_id = "UNKNOWN" + try: + # TODO: Fix config reference + context = self.get_context() + if context and context.agent_id: + agent_id = context.agent_id + except Exception: + pass + + logger.error(f"[{agent_id}] Error in ScrapeAndIndex: {e}", exc_info=True) + raise type(e)(f"[agent:{agent_id}]: {e}") from e + + +class QueryIndexedContent(WebScraperBaseTool): + """Tool for querying previously indexed web content. + + This tool searches through content that was previously scraped and indexed + using the scrape_and_index tool to answer questions or find relevant information. + """ + + name: str = "web_scraper_query_indexed_content" + description: str = ( + "Query previously indexed web content to find relevant information and answer questions.\n" + "Use this tool to search through content that was previously scraped and indexed.\n" + "This tool can help answer questions based on the indexed web content." + ) + args_schema: Type[BaseModel] = QueryIndexInput + + async def _arun( + self, + query: str, + max_results: int = 4, + **kwargs, + ) -> str: + """Query the indexed content.""" + try: + # Get agent context - throw error if not available + # Configuration is always available in new runtime + pass + + context = self.get_context() + if not context or not context.agent_id: + raise ValueError("Agent ID is required but not found in configuration") + + agent_id = context.agent_id + + logger.info(f"[{agent_id}] Starting query operation: '{query}'") + + # Retrieve vector store + vector_store_key = f"vector_store_{agent_id}" + + logger.info(f"[{agent_id}] Looking for vector store: {vector_store_key}") + + stored_data = await self.skill_store.get_agent_skill_data( + agent_id, "web_scraper", vector_store_key + ) + + if not stored_data: + logger.warning(f"[{agent_id}] No vector store found") + return "No indexed content found. Please use the scrape_and_index tool first to scrape and index some web content before querying." + + if not stored_data or "faiss_files" not in stored_data: + logger.warning(f"[{agent_id}] Invalid stored data structure") + return "No indexed content found. Please use the scrape_and_index tool first to scrape and index some web content before querying." + + # Create embeddings and decode vector store + logger.info(f"[{agent_id}] Decoding vector store") + vs_manager = VectorStoreManager(self.skill_store) + embeddings = vs_manager.create_embeddings() + vector_store = vs_manager.decode_vector_store( + stored_data["faiss_files"], embeddings + ) + + logger.info( + f"[{agent_id}] Vector store loaded, index count: {vector_store.index.ntotal}" + ) + + # Perform similarity search + docs = vector_store.similarity_search(query, k=max_results) + logger.info(f"[{agent_id}] Found {len(docs)} similar documents") + + if not docs: + logger.info(f"[{agent_id}] No relevant documents found for query") + return f"No relevant information found for your query: '{query}'. The indexed content may not contain information related to your search." + + # Format results + results = [] + for i, doc in enumerate(docs, 1): + content = doc.page_content.strip() + source = doc.metadata.get("source", "Unknown") + results.append(f"**Source {i}:** {source}\n{content}") + + response = "\n\n".join(results) + logger.info( + f"[{agent_id}] Query completed successfully, returning {len(response)} chars" + ) + + return response + + except Exception as e: + # Extract agent_id for error logging if possible + agent_id = "UNKNOWN" + try: + # TODO: Fix config reference + context = self.get_context() + if context and context.agent_id: + agent_id = context.agent_id + except Exception: + pass + + logger.error( + f"[{agent_id}] Error in QueryIndexedContent: {e}", exc_info=True + ) + raise type(e)(f"[agent:{agent_id}]: {e}") from e diff --git a/intentkit/skills/web_scraper/utils.py b/intentkit/skills/web_scraper/utils.py new file mode 100644 index 00000000..a221042e --- /dev/null +++ b/intentkit/skills/web_scraper/utils.py @@ -0,0 +1,684 @@ +""" +Utility functions for web scraper skills. + +This module contains common functionality used across all web scraper skills +to reduce code duplication and improve maintainability. +""" + +import asyncio +import base64 +import logging +import os +import tempfile +from typing import Dict, List, Optional, Tuple + +from langchain_community.vectorstores import FAISS +from langchain_core.documents import Document +from langchain_openai import OpenAIEmbeddings +from langchain_text_splitters import RecursiveCharacterTextSplitter + +from intentkit.abstracts.skill import SkillStoreABC + +logger = logging.getLogger(__name__) + +# Constants +DEFAULT_CHUNK_SIZE = 1000 +DEFAULT_CHUNK_OVERLAP = 200 +DEFAULT_REQUEST_TIMEOUT = 30 +DEFAULT_REQUESTS_PER_SECOND = 2 +MAX_CONTENT_SIZE_MB = 10 # 10 MB limit +MAX_CONTENT_SIZE_BYTES = MAX_CONTENT_SIZE_MB * 1024 * 1024 + +# HTTP Headers to bypass Cloudflare and other bot protection +DEFAULT_HEADERS = { + "User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36", + "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8", + "Accept-Language": "en-US,en;q=0.9", + "Accept-Encoding": "gzip, deflate, br", + "DNT": "1", + "Connection": "keep-alive", + "Upgrade-Insecure-Requests": "1", + "Sec-Fetch-Dest": "document", + "Sec-Fetch-Mode": "navigate", + "Sec-Fetch-Site": "none", + "Sec-Fetch-User": "?1", + "Cache-Control": "max-age=0", +} + +# Alternative headers for fallback when primary headers fail +FALLBACK_HEADERS = { + "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36", + "Accept": "*/*", + "Accept-Language": "en-US,en;q=0.5", + "Accept-Encoding": "gzip, deflate", + "Connection": "keep-alive", +} + +# Storage keys +VECTOR_STORE_KEY_PREFIX = "vector_store" +METADATA_KEY_PREFIX = "indexed_urls" + + +class VectorStoreManager: + """Manages vector store operations including creation, saving, loading, and merging.""" + + def __init__(self, skill_store: SkillStoreABC): + self.skill_store = skill_store + + def create_embeddings(self) -> OpenAIEmbeddings: + """Create OpenAI embeddings using system API key.""" + api_key = self.skill_store.get_system_config("openai_api_key") + return OpenAIEmbeddings(api_key=api_key) + + def get_storage_keys(self, agent_id: str) -> Tuple[str, str]: + """Get storage keys for vector store and metadata.""" + vector_store_key = f"{VECTOR_STORE_KEY_PREFIX}_{agent_id}" + metadata_key = f"{METADATA_KEY_PREFIX}_{agent_id}" + return vector_store_key, metadata_key + + def encode_vector_store(self, vector_store: FAISS) -> Dict[str, str]: + """Encode FAISS vector store to base64 for storage.""" + with tempfile.TemporaryDirectory() as temp_dir: + vector_store.save_local(temp_dir) + + encoded_files = {} + for filename in os.listdir(temp_dir): + file_path = os.path.join(temp_dir, filename) + if os.path.isfile(file_path): + with open(file_path, "rb") as f: + encoded_files[filename] = base64.b64encode(f.read()).decode( + "utf-8" + ) + + return encoded_files + + def decode_vector_store( + self, encoded_files: Dict[str, str], embeddings: OpenAIEmbeddings + ) -> FAISS: + """Decode base64 files back to FAISS vector store.""" + with tempfile.TemporaryDirectory() as temp_dir: + # Decode and write files + for filename, encoded_content in encoded_files.items(): + file_path = os.path.join(temp_dir, filename) + with open(file_path, "wb") as f: + f.write(base64.b64decode(encoded_content)) + + # Load vector store + return FAISS.load_local( + temp_dir, + embeddings, + allow_dangerous_deserialization=True, + ) + + async def get_existing_vector_store(self, agent_id: str) -> Optional[Dict]: + """Get existing vector store data if it exists.""" + vector_store_key, _ = self.get_storage_keys(agent_id) + return await self.skill_store.get_agent_skill_data( + agent_id, "web_scraper", vector_store_key + ) + + async def merge_with_existing( + self, + new_documents: List[Document], + agent_id: str, + chunk_size: int = DEFAULT_CHUNK_SIZE, + chunk_overlap: int = DEFAULT_CHUNK_OVERLAP, + ) -> Tuple[FAISS, bool]: + """ + Merge new documents with existing vector store or create new one. + + Returns: + Tuple of (vector_store, was_merged) + """ + embeddings = self.create_embeddings() + existing_data = await self.get_existing_vector_store(agent_id) + + if existing_data and "faiss_files" in existing_data: + try: + logger.info(f"[{agent_id}] Merging content with existing vector store") + + # Create new vector store from new documents + new_vector_store = FAISS.from_documents(new_documents, embeddings) + + # Load existing vector store + existing_vector_store = self.decode_vector_store( + existing_data["faiss_files"], embeddings + ) + + # Merge stores + existing_vector_store.merge_from(new_vector_store) + return existing_vector_store, True + + except Exception as e: + logger.warning( + f"[{agent_id}] Merge failed, creating new vector store: {e}" + ) + logger.info(f"[{agent_id}] Creating new vector store") + + # Create new vector store + logger.info(f"[{agent_id}] Creating new vector store") + vector_store = FAISS.from_documents(new_documents, embeddings) + return vector_store, False + + async def save_vector_store( + self, + vector_store: FAISS, + agent_id: str, + chunk_size: int = DEFAULT_CHUNK_SIZE, + chunk_overlap: int = DEFAULT_CHUNK_OVERLAP, + ) -> None: + """Save vector store to agent skill data.""" + vector_store_key, _ = self.get_storage_keys(agent_id) + + logger.info(f"[{agent_id}] Saving vector store") + + # Encode vector store + encoded_files = self.encode_vector_store(vector_store) + + # Prepare data for storage + storage_data = { + "faiss_files": encoded_files, + "chunk_size": chunk_size, + "chunk_overlap": chunk_overlap, + } + + try: + # Save to storage + await self.skill_store.save_agent_skill_data( + agent_id=agent_id, + skill="web_scraper", + key=vector_store_key, + data=storage_data, + ) + + logger.info(f"[{agent_id}] Successfully saved vector store") + + except Exception as e: + logger.error(f"[{agent_id}] Failed to save vector store: {e}") + raise + + async def load_vector_store(self, agent_id: str) -> Optional[FAISS]: + """Load vector store for an agent.""" + stored_data = await self.get_existing_vector_store(agent_id) + + if not stored_data or "faiss_files" not in stored_data: + return None + + try: + embeddings = self.create_embeddings() + return self.decode_vector_store(stored_data["faiss_files"], embeddings) + except Exception as e: + logger.error(f"Error loading vector store for agent {agent_id}: {e}") + return None + + async def get_content_size(self, agent_id: str) -> int: + """Get the current content size in bytes for an agent.""" + stored_data = await self.get_existing_vector_store(agent_id) + if not stored_data: + return 0 + + # Calculate size from stored FAISS files + total_size = 0 + if "faiss_files" in stored_data: + for encoded_content in stored_data["faiss_files"].values(): + # Base64 encoded content size (approximate original size) + total_size += len(base64.b64decode(encoded_content)) + + return total_size + + def format_size(self, size_bytes: int) -> str: + """Format size in bytes to human readable format.""" + if size_bytes < 1024: + return f"{size_bytes} B" + elif size_bytes < 1024 * 1024: + return f"{size_bytes / 1024:.1f} KB" + else: + return f"{size_bytes / (1024 * 1024):.1f} MB" + + +class DocumentProcessor: + """Handles document processing operations.""" + + @staticmethod + def create_chunks( + documents: List[Document], + chunk_size: int = DEFAULT_CHUNK_SIZE, + chunk_overlap: int = DEFAULT_CHUNK_OVERLAP, + ) -> List[Document]: + """Split documents into chunks.""" + text_splitter = RecursiveCharacterTextSplitter( + chunk_size=chunk_size, + chunk_overlap=chunk_overlap, + length_function=len, + ) + return text_splitter.split_documents(documents) + + @staticmethod + def clean_text(text: str) -> str: + """Clean and normalize text content.""" + lines = text.split("\n") + cleaned_lines = [] + + for line in lines: + cleaned_line = line.strip() + if cleaned_line: + cleaned_lines.append(cleaned_line) + + cleaned_text = "\n".join(cleaned_lines) + + # Remove excessive consecutive newlines + while "\n\n\n" in cleaned_text: + cleaned_text = cleaned_text.replace("\n\n\n", "\n\n") + + return cleaned_text.strip() + + @staticmethod + def validate_content(content: str, min_length: int = 10) -> bool: + """Validate content meets minimum requirements.""" + return len(content.strip()) >= min_length + + @staticmethod + def create_document( + content: str, + title: str, + source: str, + tags: str = "", + extra_metadata: Optional[Dict] = None, + ) -> Document: + """Create a Document with standardized metadata.""" + cleaned_content = DocumentProcessor.clean_text(content) + + # Parse tags + tag_list = ( + [tag.strip() for tag in tags.split(",") if tag.strip()] if tags else [] + ) + + metadata = { + "title": title, + "source": source, + "source_type": "manual", + "tags": tag_list, + "length": len(cleaned_content), + "indexed_at": str(asyncio.get_event_loop().time()), + } + + # Add extra metadata if provided + if extra_metadata: + metadata.update(extra_metadata) + + return Document(page_content=cleaned_content, metadata=metadata) + + +class MetadataManager: + """Manages metadata for indexed content.""" + + def __init__(self, skill_store: SkillStoreABC): + self.skill_store = skill_store + + async def get_existing_metadata(self, agent_id: str) -> Dict: + """Get existing metadata for an agent.""" + vs_manager = VectorStoreManager(self.skill_store) + _, metadata_key = vs_manager.get_storage_keys(agent_id) + return ( + await self.skill_store.get_agent_skill_data( + agent_id, "web_scraper", metadata_key + ) + or {} + ) + + def create_url_metadata( + self, + urls: List[str], + split_docs: List[Document], + source_type: str = "web_scraper", + extra_fields: Optional[Dict] = None, + ) -> Dict: + """Create metadata for a list of URLs.""" + metadata = {} + current_time = str(asyncio.get_event_loop().time()) + + for url in urls: + url_metadata = { + "indexed_at": current_time, + "chunks": len( + [doc for doc in split_docs if doc.metadata.get("source") == url] + ), + "source_type": source_type, + } + + if extra_fields: + url_metadata.update(extra_fields) + + metadata[url] = url_metadata + + return metadata + + def create_document_metadata( + self, + title: str, + source: str, + tags: str, + split_docs: List[Document], + document_length: int, + ) -> Dict: + """Create metadata for a document.""" + # Generate unique key + key = f"document_{title.lower().replace(' ', '_')}" + + return { + key: { + "title": title, + "source": source, + "source_type": "document_indexer", + "tags": [tag.strip() for tag in tags.split(",") if tag.strip()] + if tags + else [], + "indexed_at": str(asyncio.get_event_loop().time()), + "chunks": len(split_docs), + "length": document_length, + } + } + + async def update_metadata(self, agent_id: str, new_metadata: Dict) -> None: + """Update metadata for an agent.""" + vs_manager = VectorStoreManager(self.skill_store) + _, metadata_key = vs_manager.get_storage_keys(agent_id) + + # Get existing metadata + existing_metadata = await self.get_existing_metadata(agent_id) + + # Update with new metadata + existing_metadata.update(new_metadata) + + # Save updated metadata + await self.skill_store.save_agent_skill_data( + agent_id=agent_id, + skill="web_scraper", + key=metadata_key, + data=existing_metadata, + ) + + +class ResponseFormatter: + """Formats consistent responses for web scraper skills.""" + + @staticmethod + def format_indexing_response( + operation_type: str, + urls_or_content: List[str] | str, + total_chunks: int, + chunk_size: int, + chunk_overlap: int, + was_merged: bool, + extra_info: Optional[Dict] = None, + current_size_bytes: int = 0, + size_limit_reached: bool = False, + total_requested_urls: int = 0, + ) -> str: + """Format a consistent response for indexing operations.""" + + # Handle both URL lists and single content + if isinstance(urls_or_content, list): + urls = urls_or_content + processed_count = len(urls) + + if size_limit_reached and total_requested_urls > 0: + content_summary = f"Processed {processed_count} of {total_requested_urls} URLs (size limit reached)" + else: + content_summary = ( + f"Successfully {operation_type} {processed_count} URLs" + ) + + if len(urls) <= 5: + url_list = "\n".join([f"- {url}" for url in urls]) + else: + displayed_urls = urls[:5] + remaining_count = len(urls) - 5 + url_list = "\n".join([f"- {url}" for url in displayed_urls]) + url_list += f"\n... and {remaining_count} more" + else: + content_summary = f"Successfully {operation_type} content" + url_list = "" + + # Build response + response_parts = [content_summary] + + if url_list: + response_parts.append(url_list) + + response_parts.extend( + [ + f"Total chunks created: {total_chunks}", + f"Chunk size: {chunk_size} characters", + f"Chunk overlap: {chunk_overlap} characters", + f"Vector store: {'merged with existing content' if was_merged else 'created new index'}", + ] + ) + + # Add size information + if current_size_bytes > 0: + vs_manager = VectorStoreManager(None) # Just for formatting + formatted_size = vs_manager.format_size(current_size_bytes) + max_size = vs_manager.format_size(MAX_CONTENT_SIZE_BYTES) + response_parts.append( + f"Current storage size: {formatted_size} / {max_size}" + ) + + if size_limit_reached: + response_parts.append("Size limit reached - some URLs were not processed") + + if extra_info: + for key, value in extra_info.items(): + response_parts.append(f"{key}: {value}") + + response_parts.append( + "All content has been indexed and can be queried using the query_indexed_content tool." + ) + + return "\n".join(response_parts) + + +async def scrape_and_index_urls( + urls: List[str], + agent_id: str, + skill_store: SkillStoreABC, + chunk_size: int = DEFAULT_CHUNK_SIZE, + chunk_overlap: int = DEFAULT_CHUNK_OVERLAP, + requests_per_second: int = DEFAULT_REQUESTS_PER_SECOND, +) -> Tuple[int, bool, List[str]]: + """ + Scrape URLs and index their content into vector store with size limits. + + Args: + urls: List of URLs to scrape + agent_id: Agent identifier for storage + skill_store: Skill store instance + chunk_size: Size of text chunks + chunk_overlap: Overlap between chunks + requests_per_second: Rate limiting for requests + + Returns: + Tuple of (total_chunks, was_merged, valid_urls) + """ + from urllib.parse import urlparse + + from langchain_community.document_loaders import WebBaseLoader + + # Validate URLs + valid_urls = [] + for url in urls: + try: + parsed = urlparse(url) + if parsed.scheme in ["http", "https"] and parsed.netloc: + valid_urls.append(url) + else: + logger.warning(f"Invalid URL format: {url}") + except Exception as e: + logger.warning(f"Error parsing URL {url}: {e}") + + if not valid_urls: + return 0, False, [] + + # Check existing content size + vs_manager = VectorStoreManager(skill_store) + current_size = await vs_manager.get_content_size(agent_id) + + logger.info( + f"[{agent_id}] Current storage size: {vs_manager.format_size(current_size)}" + ) + + if current_size >= MAX_CONTENT_SIZE_BYTES: + logger.warning( + f"[{agent_id}] Storage limit already reached: {vs_manager.format_size(current_size)}" + ) + return 0, False, [] + + # Process URLs one by one with size checking + processed_urls = [] + total_chunks = 0 + was_merged = False + size_limit_reached = False + + for i, url in enumerate(valid_urls): + if current_size >= MAX_CONTENT_SIZE_BYTES: + size_limit_reached = True + logger.warning(f"[{agent_id}] Size limit reached after processing {i} URLs") + break + + try: + logger.info(f"[{agent_id}] Processing URL {i + 1}/{len(valid_urls)}: {url}") + + # Load single URL with enhanced headers + loader = WebBaseLoader( + web_paths=[url], + requests_per_second=requests_per_second, + ) + + # Configure loader with enhanced headers to bypass bot protection + loader.requests_kwargs = { + "verify": True, + "timeout": DEFAULT_REQUEST_TIMEOUT, + "headers": DEFAULT_HEADERS, + } + + # Scrape the URL with retry logic + documents = None + try: + documents = await asyncio.to_thread(loader.load) + except Exception as primary_error: + # If primary headers fail, try fallback headers + logger.warning( + f"[{agent_id}] Primary headers failed for {url}, trying fallback: {primary_error}" + ) + + loader.requests_kwargs["headers"] = FALLBACK_HEADERS + try: + documents = await asyncio.to_thread(loader.load) + logger.info(f"[{agent_id}] Fallback headers succeeded for {url}") + except Exception as fallback_error: + logger.error( + f"[{agent_id}] Both header sets failed for {url}: {fallback_error}" + ) + raise fallback_error + + if not documents: + logger.warning(f"[{agent_id}] No content extracted from {url}") + continue + + # Check content size before processing + content_size = sum( + len(doc.page_content.encode("utf-8")) for doc in documents + ) + + if current_size + content_size > MAX_CONTENT_SIZE_BYTES: + logger.warning( + f"[{agent_id}] Adding {url} would exceed size limit. Skipping." + ) + size_limit_reached = True + break + + # Process and index this URL's content + chunks, merged = await index_documents( + documents, agent_id, skill_store, chunk_size, chunk_overlap + ) + + if chunks > 0: + processed_urls.append(url) + total_chunks += chunks + was_merged = merged or was_merged + current_size += content_size + + logger.info( + f"[{agent_id}] Processed {url}: {chunks} chunks, current size: {vs_manager.format_size(current_size)}" + ) + + # Add delay for rate limiting + if i < len(valid_urls) - 1: # Don't delay after the last URL + await asyncio.sleep(1.0 / requests_per_second) + + except Exception as e: + logger.error(f"[{agent_id}] Error processing {url}: {e}") + continue + + # Log final results + if size_limit_reached: + logger.warning( + f"[{agent_id}] Size limit reached. Processed {len(processed_urls)}/{len(valid_urls)} URLs" + ) + else: + logger.info( + f"[{agent_id}] Successfully processed all {len(processed_urls)} URLs" + ) + + return total_chunks, was_merged, processed_urls + + +# Convenience function that combines all operations +async def index_documents( + documents: List[Document], + agent_id: str, + skill_store: SkillStoreABC, + chunk_size: int = DEFAULT_CHUNK_SIZE, + chunk_overlap: int = DEFAULT_CHUNK_OVERLAP, +) -> Tuple[int, bool]: + """ + Complete document indexing workflow. + + Returns: + Tuple of (total_chunks, was_merged) + """ + # Process documents + split_docs = DocumentProcessor.create_chunks(documents, chunk_size, chunk_overlap) + + if not split_docs: + raise ValueError("No content could be processed into chunks") + + # Handle vector store + vs_manager = VectorStoreManager(skill_store) + vector_store, was_merged = await vs_manager.merge_with_existing( + split_docs, agent_id, chunk_size, chunk_overlap + ) + + # Save vector store + await vs_manager.save_vector_store( + vector_store, agent_id, chunk_size, chunk_overlap + ) + + return len(split_docs), was_merged + + +# Error handling decorator +def handle_skill_errors(operation_name: str): + """Decorator for consistent error handling in skills.""" + + def decorator(func): + async def wrapper(*args, **kwargs): + try: + return await func(*args, **kwargs) + except Exception as e: + logger.error(f"Error in {operation_name}: {e}") + return f"Error {operation_name}: {str(e)}" + + return wrapper + + return decorator diff --git a/intentkit/skills/web_scraper/website_indexer.py b/intentkit/skills/web_scraper/website_indexer.py new file mode 100644 index 00000000..05de9eca --- /dev/null +++ b/intentkit/skills/web_scraper/website_indexer.py @@ -0,0 +1,454 @@ +import logging +from typing import List, Type +from urllib.parse import urljoin, urlparse + +import httpx +import openai +from pydantic import BaseModel, Field + +from intentkit.skills.web_scraper.base import WebScraperBaseTool +from intentkit.skills.web_scraper.utils import ( + DEFAULT_CHUNK_OVERLAP, + DEFAULT_CHUNK_SIZE, + MetadataManager, + ResponseFormatter, + VectorStoreManager, + scrape_and_index_urls, +) + +logger = logging.getLogger(__name__) + + +class WebsiteIndexerInput(BaseModel): + """Input for WebsiteIndexer tool.""" + + base_url: str = Field( + description="Base URL of the website to index (e.g., https://example.com). The tool will discover sitemaps and extract all URLs", + min_length=1, + ) + max_urls: int = Field( + description="Maximum number of URLs to scrape from the sitemap (default: 50)", + default=50, + ge=1, + le=200, + ) + chunk_size: int = Field( + description="Size of text chunks for indexing (default: 1000)", + default=DEFAULT_CHUNK_SIZE, + ge=100, + le=4000, + ) + chunk_overlap: int = Field( + description="Overlap between chunks (default: 200)", + default=DEFAULT_CHUNK_OVERLAP, + ge=0, + le=1000, + ) + include_patterns: List[str] = Field( + description="URL patterns to include (e.g., ['/blog/', '/docs/']). If empty, all URLs are included", + default=[], + ) + exclude_patterns: List[str] = Field( + description="URL patterns to exclude (e.g., ['/admin/', '/private/'])", + default=[], + ) + + +class WebsiteIndexer(WebScraperBaseTool): + """Tool for discovering and indexing entire websites using AI-powered sitemap analysis. + + This tool discovers sitemaps from robots.txt, extracts URLs from sitemap XML using GPT-4o-mini for + robust parsing of various sitemap formats, and then delegates to the proven scrape_and_index tool + for reliable content indexing. + """ + + name: str = "web_scraper_website_indexer" + description: str = ( + "Index an entire website by discovering sitemaps and extracting URLs efficiently. " + "This tool finds sitemaps from robots.txt, parses the XML content to extract URLs, " + "and then uses the reliable scrape_and_index functionality for content indexing." + ) + args_schema: Type[BaseModel] = WebsiteIndexerInput + + def _normalize_url(self, url: str) -> str: + """Normalize URL by ensuring it has a proper scheme.""" + if not url.startswith(("http://", "https://")): + return f"https://{url}" + return url + + async def _get_robots_txt(self, base_url: str) -> str: + """Fetch robots.txt content.""" + robots_url = urljoin(base_url, "/robots.txt") + + # Import headers from utils + from intentkit.skills.web_scraper.utils import DEFAULT_HEADERS, FALLBACK_HEADERS + + # Try with primary headers first + async with httpx.AsyncClient(timeout=30, headers=DEFAULT_HEADERS) as client: + try: + response = await client.get(robots_url) + if response.status_code == 200: + return response.text + except Exception as e: + logger.warning( + f"Primary headers failed for robots.txt from {robots_url}: {e}" + ) + + # Try with fallback headers + async with httpx.AsyncClient(timeout=30, headers=FALLBACK_HEADERS) as client: + try: + response = await client.get(robots_url) + if response.status_code == 200: + return response.text + except Exception as e: + logger.warning(f"Could not fetch robots.txt from {robots_url}: {e}") + return "" + + def _extract_sitemaps_from_robots( + self, robots_content: str, base_url: str + ) -> List[str]: + """Extract sitemap URLs from robots.txt content.""" + sitemaps = [] + + for line in robots_content.split("\n"): + line = line.strip() + if line.lower().startswith("sitemap:"): + sitemap_url = line.split(":", 1)[1].strip() + # Make relative URLs absolute + if sitemap_url.startswith("/"): + sitemap_url = urljoin(base_url, sitemap_url) + sitemaps.append(sitemap_url) + + return sitemaps + + def _get_common_sitemap_patterns(self, base_url: str) -> List[str]: + """Generate common sitemap URL patterns.""" + return [ + urljoin(base_url, "/sitemap.xml"), + urljoin(base_url, "/sitemap_index.xml"), + urljoin(base_url, "/sitemaps/sitemap.xml"), + urljoin(base_url, "/sitemap/sitemap.xml"), + urljoin(base_url, "/wp-sitemap.xml"), # WordPress + ] + + async def _fetch_sitemap_content(self, sitemap_url: str) -> str: + """Fetch sitemap XML content.""" + # Import headers from utils + from intentkit.skills.web_scraper.utils import DEFAULT_HEADERS, FALLBACK_HEADERS + + # Try with primary headers first + async with httpx.AsyncClient(timeout=30, headers=DEFAULT_HEADERS) as client: + try: + response = await client.get(sitemap_url) + if response.status_code == 200: + return response.text + except Exception as e: + logger.warning( + f"Primary headers failed for sitemap from {sitemap_url}: {e}" + ) + + # Try with fallback headers + async with httpx.AsyncClient(timeout=30, headers=FALLBACK_HEADERS) as client: + try: + response = await client.get(sitemap_url) + if response.status_code == 200: + return response.text + except Exception as e: + logger.warning(f"Could not fetch sitemap from {sitemap_url}: {e}") + return "" + + async def _get_all_sitemap_content(self, base_url: str) -> tuple[str, List[str]]: + """Get all sitemap content for AI analysis.""" + all_content = [] + found_sitemaps = [] + processed_sitemaps = set() + + # First, try to get sitemaps from robots.txt + robots_content = await self._get_robots_txt(base_url) + sitemap_urls = self._extract_sitemaps_from_robots(robots_content, base_url) + + # If no sitemaps found in robots.txt, try common patterns + if not sitemap_urls: + sitemap_urls = self._get_common_sitemap_patterns(base_url) + + logger.info(f"Checking {len(sitemap_urls)} potential sitemap URLs...") + + # Process each sitemap URL + sitemaps_to_process = sitemap_urls[:] + + while sitemaps_to_process: + sitemap_url = sitemaps_to_process.pop(0) + + if sitemap_url in processed_sitemaps: + continue + + processed_sitemaps.add(sitemap_url) + + xml_content = await self._fetch_sitemap_content(sitemap_url) + if not xml_content: + continue + + found_sitemaps.append(sitemap_url) + all_content.append(f"\n{xml_content}\n") + + # Check if this contains references to other sitemaps (sitemap index) + if "" in xml_content.lower() and "" in xml_content.lower(): + # This might be a sitemap index - we'll let AI handle parsing it + pass + + combined_xml = "\n".join(all_content) if all_content else "" + return combined_xml, found_sitemaps + + def _create_ai_extraction_prompt( + self, sitemap_xml: str, include_patterns: List[str], exclude_patterns: List[str] + ) -> str: + """Create a prompt for AI to extract URLs from sitemap XML.""" + filter_instructions = "" + if include_patterns: + filter_instructions += f"\n- INCLUDE only URLs containing these patterns: {', '.join(include_patterns)}" + if exclude_patterns: + filter_instructions += f"\n- EXCLUDE URLs containing these patterns: {', '.join(exclude_patterns)}" + + return f"""Analyze this sitemap XML and extract all valid webpage URLs. + +SITEMAP XML CONTENT: +{sitemap_xml} + +INSTRUCTIONS: +- Extract only URLs from tags that point to actual web pages +- Handle both standard sitemap format and sitemap index format +- Ignore any URLs ending in .xml, .rss, .atom (these are feeds/sitemaps, not pages) +- Skip any sitemap index entries that point to other sitemaps +- Handle text-based sitemaps (simple URL lists) +- Return only unique, valid HTTP/HTTPS URLs +- Format as a simple list, one URL per line{filter_instructions} + +Extract the URLs now:""" + + def _parse_ai_response(self, ai_response: str) -> List[str]: + """Parse AI response to extract clean URLs.""" + urls = [] + + for line in ai_response.strip().split("\n"): + line = line.strip() + # Remove any markdown formatting, bullets, numbering + line = line.lstrip("- â€ĸ*123456789. ") + + # Check if it looks like a URL + if line.startswith(("http://", "https://")): + # Basic validation + try: + parsed = urlparse(line) + if parsed.netloc and not line.endswith((".xml", ".rss", ".atom")): + urls.append(line) + except Exception: + continue + + return list(set(urls)) # Remove duplicates + + async def _call_ai_model(self, prompt: str, context) -> str: + """Call OpenAI GPT-4o-mini to extract URLs from sitemap content.""" + try: + # Get OpenAI API key using the standard pattern + from intentkit.skills.openai.base import OpenAIBaseTool + + temp_tool = OpenAIBaseTool(skill_store=self.skill_store) + api_key = temp_tool.get_api_key() + + # Initialize OpenAI client + client = openai.AsyncOpenAI(api_key=api_key) + + # Call the API + response = await client.chat.completions.create( + model="gpt-4o-mini", + messages=[ + { + "role": "system", + "content": "You are an expert at parsing XML sitemaps and extracting webpage URLs. Always return only clean, valid URLs, one per line.", + }, + {"role": "user", "content": prompt}, + ], + max_tokens=2000, + temperature=0.1, + ) + + return response.choices[0].message.content.strip() + + except Exception as e: + logger.error(f"Error calling OpenAI API: {e}") + raise + + async def _arun( + self, + base_url: str, + max_urls: int = 50, + chunk_size: int = DEFAULT_CHUNK_SIZE, + chunk_overlap: int = DEFAULT_CHUNK_OVERLAP, + include_patterns: List[str] = None, + exclude_patterns: List[str] = None, + **kwargs, + ) -> str: + """Discover website sitemaps, extract URLs with AI, and delegate to scrape_and_index.""" + try: + # Normalize inputs + base_url = self._normalize_url(base_url) + include_patterns = include_patterns or [] + exclude_patterns = exclude_patterns or [] + + # Validate base URL + parsed_url = urlparse(base_url) + if not parsed_url.netloc: + return "Error: Invalid base URL provided. Please provide a valid URL (e.g., https://example.com)" + + # Get agent context - throw error if not available + # TODO: Fix config reference + raise ValueError("Configuration is required but not provided") + + context = self.get_context() + if not context or not context.agent_id: + raise ValueError("Agent ID is required but not found in configuration") + + agent_id = context.agent_id + + logger.info(f"[{agent_id}] Discovering sitemaps for {base_url}...") + + # Get all sitemap content + sitemap_xml, found_sitemaps = await self._get_all_sitemap_content(base_url) + + if not sitemap_xml: + logger.error( + f"[{agent_id}] No accessible sitemaps found for {base_url}" + ) + return f"Error: No accessible sitemaps found for {base_url}. The website might not have sitemaps or they might be inaccessible." + + logger.info( + f"[{agent_id}] Found {len(found_sitemaps)} sitemap(s). Extracting URLs with AI..." + ) + + try: + # Use AI to extract URLs from sitemap + prompt = self._create_ai_extraction_prompt( + sitemap_xml, include_patterns, exclude_patterns + ) + ai_response = await self._call_ai_model(prompt, context) + all_urls = self._parse_ai_response(ai_response) + + logger.info( + f"[{agent_id}] AI extracted {len(all_urls)} URLs from sitemap" + ) + + except Exception as e: + logger.error( + f"[{agent_id}] AI extraction failed: {e}, falling back to regex" + ) + # Fallback to simple regex if AI fails + import re + + url_pattern = r"(https?://[^<]+)" + all_urls = re.findall(url_pattern, sitemap_xml) + + # Basic filtering for fallback + filtered_urls = [] + for url in all_urls: + # Skip XML files (sitemaps) + if url.endswith((".xml", ".rss", ".atom")): + continue + + # Apply exclude patterns + if exclude_patterns and any( + pattern in url for pattern in exclude_patterns + ): + continue + + # Apply include patterns + if include_patterns: + if any(pattern in url for pattern in include_patterns): + filtered_urls.append(url) + else: + filtered_urls.append(url) + + all_urls = filtered_urls + logger.info( + f"[{agent_id}] Regex fallback extracted {len(all_urls)} URLs from sitemap" + ) + + # Remove duplicates and limit + unique_urls = list(set(all_urls))[:max_urls] + + if not unique_urls: + logger.error( + f"[{agent_id}] No valid URLs found in sitemaps after filtering" + ) + return f"Error: No valid URLs found in sitemaps after filtering. Found sitemaps: {', '.join(found_sitemaps)}" + + logger.info( + f"[{agent_id}] Extracted {len(unique_urls)} URLs from sitemaps. Scraping and indexing..." + ) + + # Use the utility function to scrape and index URLs directly + total_chunks, was_merged, valid_urls = await scrape_and_index_urls( + unique_urls, agent_id, self.skill_store, chunk_size, chunk_overlap + ) + + if total_chunks == 0: + logger.error( + f"[{agent_id}] No content could be extracted from discovered URLs" + ) + return f"Error: No content could be extracted from the discovered URLs. Found sitemaps: {', '.join(found_sitemaps)}" + + # Get current storage size for response + vs_manager = VectorStoreManager(self.skill_store) + current_size = await vs_manager.get_content_size(agent_id) + size_limit_reached = len(valid_urls) < len(unique_urls) + + # Update metadata + metadata_manager = MetadataManager(self.skill_store) + new_metadata = metadata_manager.create_url_metadata( + valid_urls, [], "website_indexer" + ) + await metadata_manager.update_metadata(agent_id, new_metadata) + + logger.info(f"[{agent_id}] Website indexing completed successfully") + + # Format the indexing result + result = ResponseFormatter.format_indexing_response( + "scraped and indexed", + valid_urls, + total_chunks, + chunk_size, + chunk_overlap, + was_merged, + current_size_bytes=current_size, + size_limit_reached=size_limit_reached, + total_requested_urls=len(unique_urls), + ) + + # Enhance the response with sitemap discovery info + enhanced_result = ( + f"WEBSITE INDEXING COMPLETE\n" + f"Base URL: {base_url}\n" + f"Sitemaps discovered: {len(found_sitemaps)}\n" + f"URLs extracted: {len(unique_urls)}\n" + f"URLs successfully indexed: {len(valid_urls)}\n" + f"Include patterns: {', '.join(include_patterns) if include_patterns else 'None (all URLs)'}\n" + f"Exclude patterns: {', '.join(exclude_patterns) if exclude_patterns else 'None'}\n\n" + f"DISCOVERED SITEMAPS:\n" + f"{chr(10).join(['- ' + sitemap for sitemap in found_sitemaps])}\n\n" + f"INDEXING RESULTS:\n{result}" + ) + + return enhanced_result + + except Exception as e: + # Extract agent_id for error logging if possible + agent_id = "UNKNOWN" + try: + # TODO: Fix config reference + context = self.get_context() + if context and context.agent_id: + agent_id = context.agent_id + except Exception: + pass + + logger.error(f"[{agent_id}] Error in WebsiteIndexer: {e}", exc_info=True) + raise type(e)(f"[agent:{agent_id}]: {e}") from e diff --git a/intentkit/skills/xmtp/README.md b/intentkit/skills/xmtp/README.md new file mode 100644 index 00000000..b4a6b9ec --- /dev/null +++ b/intentkit/skills/xmtp/README.md @@ -0,0 +1,110 @@ +# XMTP Skills + +This skill category provides capabilities for creating XMTP protocol transaction requests that can be sent to users for signing. + +## Features + +- **xmtp_transfer**: Create ETH or ERC20 token transfer transactions on Base mainnet using XMTP protocol + +## Key Innovations + +This skill category uses a new response format mechanism: + +- `response_format = "content_and_artifact"` in the base class +- Skills return a tuple `(content_message, List[ChatMessageAttachment])` instead of just a string +- The `content_message` is sent to the user as conversational text +- The `ChatMessageAttachment` list contains XMTP transaction data with type "xmtp" + +## Requirements + +- Agent must be configured for Base mainnet (`network_id: "base-mainnet"`) +- Agent must have an EVM wallet address configured +- Only supports Base mainnet (Chain ID: 8453) + +## Supported Transfer Types + +### ETH Transfers +- Direct ETH transfers using transaction value +- No token contract address required + +### ERC20 Token Transfers +- Supports any ERC20 token on Base mainnet +- Uses `transfer(address,uint256)` function call +- Requires token contract address +- Automatically encodes function call data + +## Transaction Format + +### ETH Transfer Example +```json +{ + "version": "1.0", + "from": "0x...", + "chainId": "0x2105", + "calls": [{ + "to": "0x...", + "value": "0x16345785d8a0000", + "data": "0x", + "metadata": { + "description": "Send 0.1 ETH to address", + "transactionType": "transfer", + "currency": "ETH", + "amount": 100000000000000000, + "decimals": 18, + "toAddress": "0x..." + } + }] +} +``` + +### ERC20 Transfer Example +```json +{ + "version": "1.0", + "from": "0x...", + "chainId": "0x2105", + "calls": [{ + "to": "0x833589fCD6eDb6E08f4c7C32D4f71b54bdA02913", + "value": "0x0", + "data": "0xa9059cbb000000000000000000000000...", + "metadata": { + "description": "Send 100 USDC to address", + "transactionType": "erc20_transfer", + "currency": "USDC", + "amount": 100000000, + "decimals": 6, + "toAddress": "0x...", + "tokenContract": "0x833589fCD6eDb6E08f4c7C32D4f71b54bdA02913" + } + }] +} +``` + +## Parameters + +- `from_address`: Sender's wallet address (must match agent's EVM wallet) +- `to_address`: Recipient's wallet address +- `amount`: Amount to transfer (in human-readable format, e.g., "1.5" for 1.5 tokens) +- `decimals`: Token decimals (18 for ETH, 6 for USDC, etc.) +- `currency`: Currency symbol ("ETH", "USDC", "DAI", etc.) +- `token_contract_address`: Token contract address (optional, leave empty for ETH) + +## Usage + +```python +# ETH Transfer: +# from_address: agent's EVM wallet +# to_address: recipient address +# amount: "0.1" +# decimals: 18 +# currency: "ETH" +# token_contract_address: None + +# USDC Transfer: +# from_address: agent's EVM wallet +# to_address: recipient address +# amount: "100" +# decimals: 6 +# currency: "USDC" +# token_contract_address: "0x833589fCD6eDb6E08f4c7C32D4f71b54bdA02913" +``` \ No newline at end of file diff --git a/intentkit/skills/xmtp/__init__.py b/intentkit/skills/xmtp/__init__.py new file mode 100644 index 00000000..27e205e9 --- /dev/null +++ b/intentkit/skills/xmtp/__init__.py @@ -0,0 +1,98 @@ +"""XMTP skills.""" + +import logging +from typing import TypedDict + +from intentkit.abstracts.skill import SkillStoreABC +from intentkit.skills.base import SkillConfig, SkillState +from intentkit.skills.xmtp.base import XmtpBaseTool +from intentkit.skills.xmtp.price import XmtpGetSwapPrice +from intentkit.skills.xmtp.swap import XmtpSwap +from intentkit.skills.xmtp.transfer import XmtpTransfer + +# Cache skills at the module level, because they are stateless +_cache: dict[str, XmtpBaseTool] = {} + +logger = logging.getLogger(__name__) + + +class SkillStates(TypedDict): + xmtp_transfer: SkillState + xmtp_swap: SkillState + xmtp_get_swap_price: SkillState + + +class Config(SkillConfig): + """Configuration for XMTP skills.""" + + states: SkillStates + + +async def get_skills( + config: "Config", + is_private: bool, + store: SkillStoreABC, + **_, +) -> list[XmtpBaseTool]: + """Get all XMTP skills. + + Args: + config: The configuration for XMTP skills. + is_private: Whether to include private skills. + store: The skill store for persisting data. + + Returns: + A list of XMTP skills. + """ + available_skills = [] + + # Include skills based on their state + for skill_name, state in config["states"].items(): + if state == "disabled": + continue + elif state == "public" or (state == "private" and is_private): + available_skills.append(skill_name) + + # Get each skill using the cached getter + result = [] + for name in available_skills: + skill = get_xmtp_skill(name, store) + if skill: + result.append(skill) + return result + + +def get_xmtp_skill( + name: str, + store: SkillStoreABC, +) -> XmtpBaseTool: + """Get an XMTP skill by name. + + Args: + name: The name of the skill to get + store: The skill store for persisting data + + Returns: + The requested XMTP skill + """ + if name == "xmtp_transfer": + if name not in _cache: + _cache[name] = XmtpTransfer( + skill_store=store, + ) + return _cache[name] + elif name == "xmtp_swap": + if name not in _cache: + _cache[name] = XmtpSwap( + skill_store=store, + ) + return _cache[name] + elif name == "xmtp_get_swap_price": + if name not in _cache: + _cache[name] = XmtpGetSwapPrice( + skill_store=store, + ) + return _cache[name] + else: + logger.warning(f"Unknown XMTP skill: {name}") + return None diff --git a/intentkit/skills/xmtp/base.py b/intentkit/skills/xmtp/base.py new file mode 100644 index 00000000..747708f1 --- /dev/null +++ b/intentkit/skills/xmtp/base.py @@ -0,0 +1,74 @@ +from typing import Dict, Literal + +from intentkit.skills.base import IntentKitSkill + + +class XmtpBaseTool(IntentKitSkill): + """Base class for XMTP-related skills.""" + + # Set response format to content_and_artifact for returning tuple + response_format: Literal["content", "content_and_artifact"] = "content_and_artifact" + + # ChainId mapping for XMTP wallet_sendCalls (mainnet only) + CHAIN_ID_HEX_BY_NETWORK: Dict[str, str] = { + "ethereum-mainnet": "0x1", # 1 + "base-mainnet": "0x2105", # 8453 + "arbitrum-mainnet": "0xA4B1", # 42161 + "optimism-mainnet": "0xA", # 10 + } + + # CDP network mapping for swap quote API (mainnet only) + NETWORK_FOR_CDP_MAPPING: Dict[str, str] = { + "ethereum-mainnet": "ethereum", + "base-mainnet": "base", + "arbitrum-mainnet": "arbitrum", + "optimism-mainnet": "optimism", + } + + @property + def category(self) -> str: + """Return the skill category.""" + return "xmtp" + + def validate_network_and_get_chain_id( + self, network_id: str, skill_name: str + ) -> str: + """Validate network and return chain ID hex. + + Args: + network_id: The network ID to validate + skill_name: The name of the skill for error messages + + Returns: + The hex chain ID for the network + + Raises: + ValueError: If the network is not supported + """ + if network_id not in self.CHAIN_ID_HEX_BY_NETWORK: + supported_networks = ", ".join(self.CHAIN_ID_HEX_BY_NETWORK.keys()) + raise ValueError( + f"XMTP {skill_name} supports the following networks: {supported_networks}. " + f"Current agent network: {network_id}" + ) + return self.CHAIN_ID_HEX_BY_NETWORK[network_id] + + def get_cdp_network(self, network_id: str) -> str: + """Get CDP network name for the given network ID. + + Args: + network_id: The network ID + + Returns: + The CDP network name + + Raises: + ValueError: If the network is not supported for CDP + """ + if network_id not in self.NETWORK_FOR_CDP_MAPPING: + supported_networks = ", ".join(self.NETWORK_FOR_CDP_MAPPING.keys()) + raise ValueError( + f"CDP swap does not support network: {network_id}. " + f"Supported networks: {supported_networks}" + ) + return self.NETWORK_FOR_CDP_MAPPING[network_id] diff --git a/intentkit/skills/xmtp/price.py b/intentkit/skills/xmtp/price.py new file mode 100644 index 00000000..7600ce6e --- /dev/null +++ b/intentkit/skills/xmtp/price.py @@ -0,0 +1,76 @@ +from typing import Literal, Type + +from pydantic import BaseModel, Field + +from intentkit.clients.cdp import get_origin_cdp_client +from intentkit.skills.xmtp.base import XmtpBaseTool + + +class SwapPriceInput(BaseModel): + """Input for querying swap price via CDP.""" + + from_token: str = Field(description="The contract address to swap from") + to_token: str = Field(description="The contract address to swap to") + from_amount: str = Field(description="Input amount in smallest units (as string)") + from_address: str = Field( + description="The address where the from_token balance is located" + ) + + +class XmtpGetSwapPrice(XmtpBaseTool): + """Skill for fetching indicative swap price using CDP SDK.""" + + name: str = "xmtp_get_swap_price" + description: str = "Get an indicative swap price/quote for token pair and amount on Ethereum, Base, Arbitrum, and Optimism mainnet networks using CDP." + response_format: Literal["content", "content_and_artifact"] = "content" + args_schema: Type[BaseModel] = SwapPriceInput + + async def _arun( + self, + from_token: str, + to_token: str, + from_amount: str, + from_address: str, + ) -> str: + context = self.get_context() + agent = context.agent + + # Only support mainnet networks for price and swap + supported_networks = [ + "ethereum-mainnet", + "base-mainnet", + "arbitrum-mainnet", + "optimism-mainnet", + ] + if agent.network_id not in supported_networks: + raise ValueError( + f"Swap price only supported on {', '.join(supported_networks)}. Current: {agent.network_id}" + ) + + network_for_cdp = self.get_cdp_network(agent.network_id) + + cdp_client = get_origin_cdp_client(self.skill_store) + # Note: Don't use async with context manager as get_origin_cdp_client returns a managed global client + price = await cdp_client.evm.get_swap_price( + from_token=from_token, + to_token=to_token, + from_amount=str(from_amount), + network=network_for_cdp, + taker=from_address, + ) + + # Try to format a readable message from typical fields + try: + amount_out = getattr(price, "to_amount", None) or ( + price.get("to_amount") if isinstance(price, dict) else None + ) + route = getattr(price, "route", None) or ( + price.get("route") if isinstance(price, dict) else None + ) + route_str = f" via {route}" if route else "" + if amount_out: + return f"Estimated output: {amount_out} units of {to_token}{route_str} on {agent.network_id}." + except Exception: + pass + + return f"Swap price result (raw): {price}" diff --git a/intentkit/skills/xmtp/schema.json b/intentkit/skills/xmtp/schema.json new file mode 100644 index 00000000..4a7d1e6a --- /dev/null +++ b/intentkit/skills/xmtp/schema.json @@ -0,0 +1,75 @@ +{ + "$schema": "http://json-schema.org/draft-07/schema#", + "type": "object", + "title": "XMTP", + "description": "Use this skill only if you want make an XMTP Agent. XMTP protocol skills for creating blockchain transaction requests that can be sent to users for signing", + "x-icon": "https://ai.service.crestal.dev/skills/xmtp/xmtp.png", + "x-tags": [ + "XMTP", + "Blockchain", + "Transactions", + "Web3", + "Base" + ], + "properties": { + "enabled": { + "type": "boolean", + "title": "Enabled", + "description": "Whether this skill is enabled", + "default": false + }, + "states": { + "type": "object", + "properties": { + "xmtp_transfer": { + "type": "string", + "title": "XMTP Transfer", + "enum": [ + "disabled", + "public", + "private" + ], + "x-enum-title": [ + "Disabled", + "Agent Owner + All Users", + "Agent Owner Only" + ], + "description": "Create XMTP transaction requests for transferring ETH or ERC20 tokens on Base mainnet. Supports both native ETH transfers and ERC20 token transfers. Generates wallet_sendCalls transaction data that users can sign.", + "default": "disabled" + }, + "xmtp_swap": { + "type": "string", + "title": "XMTP Swap", + "enum": [ + "disabled", + "public", + "private" + ], + "x-enum-title": [ + "Disabled", + "Agent Owner + All Users", + "Agent Owner Only" + ], + "description": "Create XMTP transaction requests for swapping tokens on Base using CDP swap quote. Returns a wallet_sendCalls payload that can include an optional approval call and the swap call. Only supports base-mainnet and base-sepolia.", + "default": "disabled" + }, + "xmtp_get_swap_price": { + "type": "string", + "title": "XMTP Get Swap Price", + "enum": [ + "disabled", + "public", + "private" + ], + "x-enum-title": [ + "Disabled", + "Agent Owner + All Users", + "Agent Owner Only" + ], + "description": "Get an indicative swap price/quote for token pair and amount on Base networks using CDP. Provides estimated output amounts for token swaps without creating transactions.", + "default": "disabled" + } + } + } + } +} \ No newline at end of file diff --git a/intentkit/skills/xmtp/swap.py b/intentkit/skills/xmtp/swap.py new file mode 100644 index 00000000..0d5baaed --- /dev/null +++ b/intentkit/skills/xmtp/swap.py @@ -0,0 +1,212 @@ +from typing import List, Tuple, Type + +from pydantic import BaseModel, Field + +from intentkit.clients.cdp import get_origin_cdp_client +from intentkit.models.chat import ChatMessageAttachment, ChatMessageAttachmentType +from intentkit.skills.xmtp.base import XmtpBaseTool + + +class SwapInput(BaseModel): + """Input for XMTP swap skill. + + This creates an unsigned swap transaction attachment using CDP swap quote + that a user can review and sign via XMTP wallet_sendCalls. + """ + + from_address: str = Field(description="The sender address for the swap") + from_token: str = Field( + description="The contract address of the token to swap from" + ) + to_token: str = Field(description="The contract address of the token to swap to") + from_amount: str = Field( + description="The input amount in the smallest unit of from_token (as string)" + ) + slippage_bps: int = Field( + default=100, + description="Maximum slippage in basis points (100 = 1%). Defaults to 100.", + ) + + +class XmtpSwap(XmtpBaseTool): + """Skill for creating XMTP swap transactions using CDP swap quote. + + Generates a wallet_sendCalls transaction request to perform a token swap. + May include an ERC20 approval call followed by the router swap call. + Supports Ethereum, Polygon, Base, Arbitrum, and Optimism networks (both mainnet and testnet). + """ + + name: str = "xmtp_swap" + description: str = ( + "Create an XMTP transaction request for swapping tokens using CDP swap quote. " + "Returns a wallet_sendCalls payload that can include an optional approval call and the swap call. " + "Supports Ethereum, Base, Arbitrum, and Optimism mainnet networks." + ) + args_schema: Type[BaseModel] = SwapInput + + async def _arun( + self, + from_address: str, + from_token: str, + to_token: str, + from_amount: str, + slippage_bps: int = 100, + ) -> Tuple[str, List[ChatMessageAttachment]]: + # Input validation + if ( + not from_address + or not from_address.startswith("0x") + or len(from_address) != 42 + ): + raise ValueError("from_address must be a valid Ethereum address") + + if not from_token or not from_token.startswith("0x") or len(from_token) != 42: + raise ValueError("from_token must be a valid token contract address") + + if not to_token or not to_token.startswith("0x") or len(to_token) != 42: + raise ValueError("to_token must be a valid token contract address") + + if from_token.lower() == to_token.lower(): + raise ValueError("from_token and to_token cannot be the same") + + try: + amount_int = int(from_amount) + if amount_int <= 0: + raise ValueError("from_amount must be a positive integer") + except ValueError as e: + raise ValueError(f"from_amount must be a valid positive integer: {e}") + + if ( + not isinstance(slippage_bps, int) + or slippage_bps < 0 + or slippage_bps > 10000 + ): + raise ValueError("slippage_bps must be between 0 and 10000 (0% to 100%)") + + # Resolve agent context and target network + context = self.get_context() + agent = context.agent + + # Only support mainnet networks for swap + supported_networks = [ + "ethereum-mainnet", + "base-mainnet", + "arbitrum-mainnet", + "optimism-mainnet", + ] + if agent.network_id not in supported_networks: + raise ValueError( + f"Swap only supported on {', '.join(supported_networks)}. Current: {agent.network_id}" + ) + + # Validate network and get chain ID + chain_id_hex = self.validate_network_and_get_chain_id(agent.network_id, "swap") + + # Get CDP network name + # Reference: CDP SDK examples for swap quote and price + # https://github.com/coinbase/cdp-sdk/blob/main/examples/python/evm/swaps/create_swap_quote.py + network_for_cdp = self.get_cdp_network(agent.network_id) + + # Get CDP client from global origin helper (server-side credentials) + cdp_client = get_origin_cdp_client(self.skill_store) + + # Call CDP to create swap quote and extract call datas + # Be permissive with response shape across SDK versions + try: + # Attempt the canonical method per CDP SDK examples + # create_swap_quote(from_token, to_token, from_amount, network, taker, slippage_bps, signer_address) + # Note: Don't use async with context manager as get_origin_cdp_client returns a managed global client + quote = await cdp_client.evm.create_swap_quote( + from_token=from_token, + to_token=to_token, + from_amount=str(from_amount), + network=network_for_cdp, + taker=from_address, + slippage_bps=slippage_bps, + signer_address=from_address, + ) + except Exception as e: # pragma: no cover - defensive + raise ValueError(f"Failed to create swap quote via CDP: {e!s}") + + # Extract transaction data from QuoteSwapResult + # CDP returns a single transaction object with all necessary data + calls: list[dict] = [] + + # Validate that we have the required fields from CDP + if not hasattr(quote, "to") or not hasattr(quote, "data"): + raise ValueError( + "CDP swap quote missing required transaction fields (to, data)" + ) + + # Format value field - ensure it's a hex string + value_hex = "0x0" + if hasattr(quote, "value") and quote.value: + if isinstance(quote.value, str) and quote.value.startswith("0x"): + value_hex = quote.value + else: + value_hex = hex(int(quote.value)) if quote.value != "0" else "0x0" + + # Format data field - ensure it has 0x prefix + data_hex = quote.data if quote.data.startswith("0x") else f"0x{quote.data}" + + # Get expected output amount for metadata + to_amount = getattr(quote, "to_amount", None) or "unknown" + min_to_amount = getattr(quote, "min_to_amount", None) or "unknown" + + # Create the swap call following XMTP wallet_sendCalls format + swap_call = { + "to": quote.to, + "value": value_hex, + "data": data_hex, + "metadata": { + "description": f"Swap {from_amount} units of {from_token} for {to_token} (expected: {to_amount}, min: {min_to_amount})", + "transactionType": "swap", + "currency": from_token, + "amount": int(from_amount), + "toAddress": quote.to, + "fromToken": from_token, + "toToken": to_token, + "expectedOutput": to_amount, + "minimumOutput": min_to_amount, + "slippageBps": slippage_bps, + "network": agent.network_id, + }, + } + + calls.append(swap_call) + + # Note: CDP's create_swap_quote already includes any necessary approvals + # in the single transaction if needed, or handles them via Permit2 signatures + + # Build XMTP wallet_sendCalls payload + wallet_send_calls = { + "version": "1.0", + "from": from_address, + "chainId": chain_id_hex, + "calls": calls, + } + + # Attachment for chat + attachment: ChatMessageAttachment = { + "type": ChatMessageAttachmentType.XMTP, + "url": None, + "json": wallet_send_calls, + } + + # Human-friendly message with more details + expected_output = getattr(quote, "to_amount", "unknown") + min_output = getattr(quote, "min_to_amount", "unknown") + + content_message = ( + f"🔄 Swap transaction ready!\n\n" + f"**Details:**\n" + f"â€ĸ From: {from_amount} units of {from_token}\n" + f"â€ĸ To: {to_token}\n" + f"â€ĸ Expected output: {expected_output} units\n" + f"â€ĸ Minimum output: {min_output} units\n" + f"â€ĸ Network: {agent.network_id}\n" + f"â€ĸ Slippage: {slippage_bps / 100:.1f}%\n\n" + f"Please review the transaction details and sign to execute the swap." + ) + + return content_message, [attachment] diff --git a/intentkit/skills/xmtp/transfer.py b/intentkit/skills/xmtp/transfer.py new file mode 100644 index 00000000..23dbac98 --- /dev/null +++ b/intentkit/skills/xmtp/transfer.py @@ -0,0 +1,207 @@ +from typing import List, Optional, Tuple, Type + +from pydantic import BaseModel, Field +from web3.exceptions import ContractLogicError + +from intentkit.models.chat import ChatMessageAttachment, ChatMessageAttachmentType +from intentkit.skills.xmtp.base import XmtpBaseTool + + +class TransferInput(BaseModel): + """Input for XMTP transfer skill.""" + + from_address: str = Field(description="The sender address for the transfer") + to_address: str = Field(description="The recipient address for the transfer") + amount: str = Field( + description="The amount to transfer in human-readable format (e.g., '1.5' for 1.5 ETH, '100' for 100 USDC). Do NOT multiply by token decimals." + ) + currency: str = Field(description="Currency symbol (e.g., 'ETH', 'USDC', 'NATION')") + token_contract_address: Optional[str] = Field( + default=None, + description="Token contract address for ERC20 transfers. Leave empty for ETH transfers.", + ) + + +class XmtpTransfer(XmtpBaseTool): + """Skill for creating XMTP transfer transactions.""" + + name: str = "xmtp_transfer" + description: str = """Create an XMTP transaction request for transferring ETH or ERC20 tokens. + This skill generates a wallet_sendCalls transaction request according to XMTP protocol + that can be sent to users for signing. + Supports Ethereum, Polygon, Base, Arbitrum, and Optimism networks (both mainnet and testnet). + """ + args_schema: Type[BaseModel] = TransferInput + + async def _arun( + self, + from_address: str, + to_address: str, + amount: str, + currency: str, + token_contract_address: Optional[str], + ) -> Tuple[str, List[ChatMessageAttachment]]: + """Create an XMTP transfer transaction request. + + Args: + from_address: The sender address + to_address: The recipient address + amount: Amount to transfer + currency: Currency symbol + token_contract_address: Token contract address (None for ETH) + + Returns: + Tuple of (content_message, list_of_attachments) + """ + # Get context and check network + context = self.get_context() + agent = context.agent + + # Validate network and get chain ID + chain_id_hex = self.validate_network_and_get_chain_id( + agent.network_id, "transfer" + ) + + # Validate token contract and get decimals + if token_contract_address: + # Validate ERC20 contract and get token info + web3 = self.web3_client() + + # ERC20 ABI for symbol() and decimals() functions + erc20_abi = [ + { + "constant": True, + "inputs": [], + "name": "symbol", + "outputs": [{"name": "", "type": "string"}], + "type": "function", + }, + { + "constant": True, + "inputs": [], + "name": "decimals", + "outputs": [{"name": "", "type": "uint8"}], + "type": "function", + }, + ] + + try: + # Create contract instance + contract = web3.eth.contract( + address=web3.to_checksum_address(token_contract_address), + abi=erc20_abi, + ) + + # Get token symbol and decimals + token_symbol = contract.functions.symbol().call() + decimals = contract.functions.decimals().call() + + # Validate symbol matches currency (case insensitive) + if token_symbol.upper() != currency.upper(): + raise ValueError( + f"Token symbol mismatch: contract symbol is '{token_symbol}', " + f"but currency parameter is '{currency}'" + ) + + except ContractLogicError: + raise ValueError( + f"Invalid ERC20 contract address: {token_contract_address}. " + "The address does not point to a valid ERC20 token contract." + ) + except Exception as e: + raise ValueError( + f"Failed to validate ERC20 contract {token_contract_address}: {str(e)}" + ) + else: + # For ETH transfers, use 18 decimals + decimals = 18 + # Validate currency is ETH for native transfers + if currency.upper() != "ETH": + raise ValueError( + f"For native transfers, currency must be 'ETH', got '{currency}'" + ) + + # Calculate amount in smallest unit (wei for ETH, token units for ERC20) + amount_int = int(float(amount) * (10**decimals)) + + if token_contract_address: + # ERC20 Token Transfer + transaction_to = token_contract_address + transaction_value = "0x0" # No ETH value for token transfers + + # Create ERC20 transfer function call data + # Function signature: transfer(address,uint256) + # Method ID: First 4 bytes of keccak256("transfer(address,uint256)") + method_id = "0xa9059cbb" # transfer(address,uint256) method ID + + # Encode to_address (32 bytes, left-padded) + to_address_clean = to_address.replace("0x", "") + to_address_padded = to_address_clean.zfill(64) + + # Encode amount (32 bytes, left-padded) + amount_hex = hex(amount_int)[2:] # Remove 0x prefix + amount_padded = amount_hex.zfill(64) + + # Combine method ID + padded address + padded amount + call_data = method_id + to_address_padded + amount_padded + + description = f"Send {amount} {currency} to {to_address}" + metadata = { + "description": description, + "transactionType": "erc20_transfer", + "currency": currency, + "amount": amount_int, + "decimals": decimals, + "toAddress": to_address, + "tokenContract": token_contract_address, + } + else: + # ETH Transfer + transaction_to = to_address + transaction_value = hex(amount_int) + call_data = "0x" # No call data for simple ETH transfer + + description = f"Send {amount} {currency} to {to_address}" + metadata = { + "description": description, + "transactionType": "transfer", + "currency": currency, + "amount": amount_int, + "decimals": decimals, + "toAddress": to_address, + } + + # Create XMTP wallet_sendCalls transaction request + wallet_send_calls = { + "version": "1.0", + "from": from_address, + "chainId": chain_id_hex, + "calls": [ + { + "to": transaction_to, + "value": transaction_value, + "data": call_data, + "metadata": metadata, + } + ], + } + + # Create ChatMessageAttachment + attachment: ChatMessageAttachment = { + "type": ChatMessageAttachmentType.XMTP, + "url": None, + "json": wallet_send_calls, + } + + # Create user message + content_message = ( + f"💸 Transfer transaction ready!\n\n" + f"**Details:**\n" + f"â€ĸ Amount: {amount} {currency}\n" + f"â€ĸ To: {to_address}\n" + f"â€ĸ Network: {agent.network_id}\n" + f"â€ĸ Type: {'ERC20 Token' if token_contract_address else 'Native ETH'}\n\n" + f"Please review the transaction details and sign to execute the transfer." + ) + + return content_message, [attachment] diff --git a/intentkit/skills/xmtp/xmtp.png b/intentkit/skills/xmtp/xmtp.png new file mode 100644 index 00000000..26cac21c Binary files /dev/null and b/intentkit/skills/xmtp/xmtp.png differ diff --git a/intentkit/utils/__init__.py b/intentkit/utils/__init__.py new file mode 100644 index 00000000..8b137891 --- /dev/null +++ b/intentkit/utils/__init__.py @@ -0,0 +1 @@ + diff --git a/intentkit/utils/chain.py b/intentkit/utils/chain.py new file mode 100644 index 00000000..46380a84 --- /dev/null +++ b/intentkit/utils/chain.py @@ -0,0 +1,436 @@ +from abc import ABC, abstractmethod +from enum import IntEnum, StrEnum + +import httpx + + +class Chain(StrEnum): + """ + Enum of supported blockchain chains, using QuickNode's naming conventions. + + This list is based on common chain names used by QuickNode, but it's essential + to consult the official QuickNode documentation for the most accurate and + up-to-date list of supported chains and their exact names. Chain names can + sometimes be slightly different from what you might expect. + """ + + # EVM Chains + Ethereum = "eth" # Or "ethereum" + Avalanche = "avax" # Or "avalanche" + Binance = "bsc" # BNB Smart Chain + Polygon = "matic" # Or "polygon" + Gnosis = "gnosis" # Or "xdai" + Celo = "celo" + Fantom = "fantom" + Moonbeam = "moonbeam" + Aurora = "aurora" + Arbitrum = "arbitrum" + Optimism = "optimism" + Linea = "linea" + ZkSync = "zksync" + + # Base + Base = "base" + + # Cosmos Ecosystem + CosmosHub = "cosmos" # Or "cosmos-hub" + Osmosis = "osmosis" + Juno = "juno" + Evmos = "evmos" + Kava = "kava" + Persistence = "persistence" + Secret = "secret" + Stargaze = "stargaze" + Terra = "terra" # Or "terra-classic" + Axelar = "axelar" + + # Solana + Solana = "sol" # Or "solana" + + # Other Chains + Sonic = "sonic" + Bera = "bera" + Near = "near" + Frontera = "frontera" + + +class Network(StrEnum): + """ + Enum of well-known blockchain network names, based on QuickNode API. + + This list is not exhaustive and might not be completely up-to-date. + Always consult the official QuickNode documentation for the most accurate + and current list of supported networks. Network names can sometimes + be slightly different from what you might expect. + """ + + # Ethereum Mainnet and Testnets + EthereumMainnet = "ethereum-mainnet" + EthereumGoerli = "ethereum-goerli" # Goerli Testnet (deprecated, Sepolia preferred) + EthereumSepolia = "ethereum-sepolia" + + # Layer 2s on Ethereum + ArbitrumMainnet = "arbitrum-mainnet" + OptimismMainnet = "optimism-mainnet" # Or just "optimism" + LineaMainnet = "linea-mainnet" + ZkSyncMainnet = "zksync-mainnet" # zkSync Era + + # Other EVM Chains + AvalancheMainnet = "avalanche-mainnet" + BinanceMainnet = "bsc" # BNB Smart Chain (BSC) + PolygonMainnet = "matic" # Or "polygon-mainnet" + GnosisMainnet = "xdai" # Or "gnosis" + CeloMainnet = "celo-mainnet" + FantomMainnet = "fantom-mainnet" + MoonbeamMainnet = "moonbeam-mainnet" + AuroraMainnet = "aurora-mainnet" + + # Base + BaseMainnet = "base-mainnet" + BaseSepolia = "base-sepolia" + + # Cosmos Ecosystem (These can be tricky and may need updates) + CosmosHubMainnet = "cosmos-hub-mainnet" # Or just "cosmos" + OsmosisMainnet = "osmosis-mainnet" # Or just "osmosis" + JunoMainnet = "juno-mainnet" # Or just "juno" + + # Solana (Note: Solana uses cluster names, not typical network names) + SolanaMainnet = "solana-mainnet" # Or "solana" + + # Other Chains + SonicMainnet = "sonic-mainnet" + BeraMainnet = "bera-mainnet" + NearMainnet = "near-mainnet" # Or just "near" + KavaMainnet = "kava-mainnet" # Or just "kava" + EvmosMainnet = "evmos-mainnet" # Or just "evmos" + PersistenceMainnet = "persistence-mainnet" # Or just "persistence" + SecretMainnet = "secret-mainnet" # Or just "secret" + StargazeMainnet = "stargaze-mainnet" # Or just "stargaze" + TerraMainnet = "terra-mainnet" # Or "terra-classic" + AxelarMainnet = "axelar-mainnet" # Or just "axelar" + FronteraMainnet = "frontera-mainnet" + + +class NetworkId(IntEnum): + """ + Enum of well-known blockchain network IDs. + + This list is not exhaustive and might not be completely up-to-date. + Always consult the official documentation for the specific blockchain + you are working with for the most accurate and current chain ID. + """ + + # Ethereum Mainnet and Testnets + EthereumMainnet = 1 + EthereumGoerli = 5 # Goerli Testnet (deprecated, Sepolia is preferred) + EthereumSepolia = 11155111 + + # Layer 2s on Ethereum + ArbitrumMainnet = 42161 + OptimismMainnet = 10 + LineaMainnet = 59144 + ZkSyncMainnet = 324 # zkSync Era + + # Other EVM Chains + AvalancheMainnet = 43114 + BinanceMainnet = 56 # BNB Smart Chain (BSC) + PolygonMainnet = 137 + GnosisMainnet = 100 # xDai Chain + CeloMainnet = 42220 + FantomMainnet = 250 + MoonbeamMainnet = 1284 + AuroraMainnet = 1313161554 + + # Base + BaseMainnet = 8453 + BaseSepolia = 84532 + + # Other Chains + SonicMainnet = 146 + BeraMainnet = 80094 + + +# Mapping of Network enum members to their corresponding NetworkId enum members. +# This dictionary facilitates efficient lookup of network IDs given a network name. +# Note: SolanaMainnet is intentionally excluded as it does not have a numeric chain ID. +# Always refer to the official documentation for the most up-to-date mappings. +network_to_id: dict[Network, NetworkId] = { + Network.ArbitrumMainnet: NetworkId.ArbitrumMainnet, + Network.AvalancheMainnet: NetworkId.AvalancheMainnet, + Network.BaseMainnet: NetworkId.BaseMainnet, + Network.BaseSepolia: NetworkId.BaseSepolia, + Network.BeraMainnet: NetworkId.BeraMainnet, + Network.BinanceMainnet: NetworkId.BinanceMainnet, + Network.EthereumMainnet: NetworkId.EthereumMainnet, + Network.EthereumSepolia: NetworkId.EthereumSepolia, + Network.GnosisMainnet: NetworkId.GnosisMainnet, + Network.LineaMainnet: NetworkId.LineaMainnet, + Network.OptimismMainnet: NetworkId.OptimismMainnet, + Network.PolygonMainnet: NetworkId.PolygonMainnet, + Network.SonicMainnet: NetworkId.SonicMainnet, + Network.ZkSyncMainnet: NetworkId.ZkSyncMainnet, +} + +# Mapping of NetworkId enum members (chain IDs) to their corresponding +# Network enum members (network names). This dictionary allows for reverse +# lookup, enabling retrieval of the network name given a chain ID. +# Note: Solana is not included here as it does not use a standard numeric +# chain ID. Always consult official documentation for the most +# up-to-date mappings. +id_to_network: dict[NetworkId, Network] = { + NetworkId.ArbitrumMainnet: Network.ArbitrumMainnet, + NetworkId.AvalancheMainnet: Network.AvalancheMainnet, + NetworkId.BaseMainnet: Network.BaseMainnet, + NetworkId.BaseSepolia: Network.BaseSepolia, + NetworkId.BeraMainnet: Network.BeraMainnet, + NetworkId.BinanceMainnet: Network.BinanceMainnet, + NetworkId.EthereumMainnet: Network.EthereumMainnet, + NetworkId.EthereumSepolia: Network.EthereumSepolia, + NetworkId.GnosisMainnet: Network.GnosisMainnet, + NetworkId.LineaMainnet: Network.LineaMainnet, + NetworkId.OptimismMainnet: Network.OptimismMainnet, + NetworkId.PolygonMainnet: Network.PolygonMainnet, + NetworkId.SonicMainnet: Network.SonicMainnet, + NetworkId.ZkSyncMainnet: Network.ZkSyncMainnet, +} + + +class ChainConfig: + """ + Configuration class for a specific blockchain chain. + + This class encapsulates all the necessary information to interact with a + particular blockchain, including the chain type, network, RPC URLs, and ENS URL. + """ + + def __init__( + self, + chain: Chain, + network: Network, + rpc_url: str, + ens_url: str, + wss_url: str, + ): + """ + Initializes a ChainConfig object. + + Args: + chain: The Chain enum member representing the blockchain type (e.g., Ethereum, Solana). + network: The Network enum member representing the specific network (e.g., EthereumMainnet). + rpc_url: The URL for the RPC endpoint of the blockchain. + ens_url: The URL for the ENS (Ethereum Name Service) endpoint (can be None if not applicable). + wss_url: The URL for the WebSocket endpoint of the blockchain (can be None if not applicable). + """ + + self._chain = chain + self._network = network + self._rpc_url = rpc_url + self._ens_url = ens_url + self._wss_url = wss_url + + @property + def chain(self) -> Chain: + """ + Returns the Chain enum member. + """ + return self._chain + + @property + def network(self) -> Network: + """ + Returns the Network enum member. + """ + return self._network + + @property + def network_id(self) -> int | None: + """ + Returns the network ID (chain ID) for the configured network, or None if not applicable. + Uses the global network_to_id mapping to retrieve the ID. + """ + return network_to_id.get(self._network) + + @property + def rpc_url(self) -> str: + """ + Returns the RPC URL. + """ + return self._rpc_url + + @property + def ens_url(self) -> str: + """ + Returns the ENS URL, or None if not applicable. + """ + return self._ens_url + + @property + def wss_url(self) -> str: + """ + Returns the WebSocket URL, or None if not applicable. + """ + return self._wss_url + + +class ChainProvider(ABC): + """ + Abstract base class for providing blockchain chain configurations. + + This class defines the interface for classes responsible for managing and + providing access to `ChainConfig` objects. Subclasses *must* implement the + `init_chain_configs` method to populate the available chain configurations. + """ + + def __init__(self): + """ + Initializes the ChainProvider. + + Sets up an empty dictionary `chain_configs` to store the configurations. + """ + self.chain_configs: dict[Network, ChainConfig] = {} + + def get_chain_config(self, network: Network) -> ChainConfig: + """ + Retrieves the chain configuration for a specific network. + + Args: + network: The `Network` enum member representing the desired network. + + Returns: + The `ChainConfig` object associated with the given network. + + Raises: + Exception: If no chain configuration is found for the specified network. + """ + chain_config = self.chain_configs.get(network) + if not chain_config: + raise Exception(f"chain config for network {network} not found") + return chain_config + + def get_chain_config_by_id(self, network_id: NetworkId) -> ChainConfig: + """ + Retrieves the chain configuration by network ID. + + This method first looks up the `Network` enum member associated with the + provided `NetworkId` and then uses `get_chain_config` to retrieve the + configuration. + + Args: + network_id: The `NetworkId` enum member representing the desired network ID. + + Returns: + The `ChainConfig` object associated with the network ID. + + Raises: + Exception: If no network is found for the given ID or if the + chain configuration is not found for the resolved network. + """ + network = id_to_network.get(network_id) + if not network: + raise Exception(f"network with id {network_id} not found") + return self.get_chain_config(network) + + @abstractmethod + def init_chain_configs(self, api_key: str) -> dict[Network, ChainConfig]: + """ + Initializes the chain configurations. + + This *abstract* method *must* be implemented by subclasses. It is + responsible for populating the `chain_configs` dictionary with + `ChainConfig` objects, typically using the provided `api_key` to fetch + or generate the necessary configuration data. + + Args: + api_key: The API key used for initializing chain configurations. + + Returns: + A dictionary mapping `Network` enum members to `ChainConfig` objects. + """ + raise NotImplementedError + + +class QuicknodeChainProvider(ChainProvider): + """ + A concrete implementation of `ChainProvider` for QuickNode. + + This class retrieves chain configuration data from the QuickNode API and + populates the `chain_configs` dictionary. + """ + + def __init__(self, api_key): + """ + Initializes the QuicknodeChainProvider. + + Args: + api_key: Your QuickNode API key. + """ + super().__init__() + self.api_key = api_key + + def init_chain_configs( + self, limit: int = 100, offset: int = 0 + ) -> dict[Network, ChainConfig]: + """ + Initializes chain configurations by fetching data from the QuickNode API. + + This method retrieves a list of QuickNode endpoints using the provided + API key and populates the `chain_configs` dictionary with `ChainConfig` + objects. + + Args: + limit: The maximum number of endpoints to retrieve (default: 100). + offset: The number of endpoints to skip (default: 0). + + Returns: + A dictionary mapping `Network` enum members to `ChainConfig` objects. + + Raises: + Exception: If an error occurs during the API request or processing + the response. More specific exception types are used + for HTTP errors and request errors. + """ + url = "https://api.quicknode.com/v0/endpoints" + headers = { + "Accept": "application/json", + "x-api-key": self.api_key, + } + params = { + "limit": limit, + "offset": offset, + } + + with httpx.Client(timeout=30) as client: # Set a timeout for the request + try: + response = client.get(url, timeout=30, headers=headers, params=params) + response.raise_for_status() # Raise HTTPError for bad responses (4xx or 5xx) + json_dict = response.json() + + for item in json_dict["data"]: + # Assuming 'item' contains 'chain', 'network', 'http_url', 'wss_url' + # and that these values can be used to construct the ChainConfig object + chain = Chain(item["chain"]) + network = Network(item["network"]) + + self.chain_configs[item["network"]] = ChainConfig( + chain, + network, + item["http_url"], + item[ + "http_url" + ], # ens_url is the same as http_url in this case. + item["wss_url"], + ) + + except httpx.HTTPStatusError as http_err: + raise (f"Quicknode API HTTP Error: {http_err}") + except httpx.RequestError as req_err: + raise (f"Quicknode API Request Error: {req_err}") + except ( + KeyError, + TypeError, + ) as e: # Handle potential data issues in the API response + raise Exception( + f"Error processing QuickNode API response: {e}. Check the API response format." + ) + except Exception as e: + raise (f"Quicknode API An unexpected error occurred: {e}") diff --git a/intentkit/utils/error.py b/intentkit/utils/error.py new file mode 100644 index 00000000..3d33675d --- /dev/null +++ b/intentkit/utils/error.py @@ -0,0 +1,144 @@ +import logging +from typing import Optional, Sequence + +from fastapi.exceptions import RequestValidationError +from fastapi.utils import is_body_allowed_for_status_code +from langchain_core.tools.base import ToolException +from starlette.exceptions import HTTPException +from starlette.requests import Request +from starlette.responses import JSONResponse, Response +from starlette.status import HTTP_422_UNPROCESSABLE_ENTITY + +logger = logging.getLogger(__name__) + +# error messages in agent system message response + + +class RateLimitExceeded(Exception): + """Rate limit exceeded""" + + def __init__(self, message: Optional[str] = "Rate limit exceeded"): + self.message = message + super().__init__(self.message) + + +class IntentKitAPIError(Exception): + def __init__(self, status_code: int, key: str, message: str): + self.key = key + self.message = message + self.status_code = status_code + + def __str__(self): + return f"{self.key}: {self.message}" + + def __repr__(self): + return f"IntentKitAPIError({self.key}, {self.message}, {self.status_code})" + + +async def intentkit_api_error_handler( + request: Request, exc: IntentKitAPIError +) -> Response: + if exc.status_code >= 500: + logger.error(f"Internal Server Error for request {request.url}: {str(exc)}") + else: + logger.info(f"Bad Request for request {request.url}: {str(exc)}") + return JSONResponse( + {"error": exc.key, "msg": exc.message}, + status_code=exc.status_code, + ) + + +async def intentkit_other_error_handler(request: Request, exc: Exception) -> Response: + logger.error(f"Internal Server Error for request {request.url}: {str(exc)}") + return JSONResponse( + {"error": "ServerError", "msg": "Internal Server Error"}, + status_code=500, + ) + + +async def http_exception_handler(request: Request, exc: HTTPException) -> Response: + headers = getattr(exc, "headers", None) + if not is_body_allowed_for_status_code(exc.status_code): + return Response(status_code=exc.status_code, headers=headers) + if exc.status_code >= 500: + logger.error(f"Internal Server Error for request {request.url}: {str(exc)}") + return JSONResponse( + {"error": "ServerError", "msg": "Internal Server Error"}, + status_code=exc.status_code, + headers=headers, + ) + logger.info(f"Bad Request for request {request.url}: {str(exc)}") + return JSONResponse( + {"error": "BadRequest", "msg": str(exc.detail)}, + status_code=exc.status_code, + headers=headers, + ) + + +def format_validation_errors(errors: Sequence) -> str: + """Format validation errors into a more readable string.""" + formatted_errors = [] + + for error in errors: + loc = error.get("loc", []) + msg = error.get("msg", "") + error_type = error.get("type", "") + + # Build field path + field_path = " -> ".join(str(part) for part in loc if part != "body") + + # Format the error message with type information + if field_path: + if error_type: + formatted_error = f"Field '{field_path}' ({error_type}): {msg}" + else: + formatted_error = f"Field '{field_path}': {msg}" + else: + formatted_error = msg + + formatted_errors.append(formatted_error) + + return "; ".join(formatted_errors) + + +async def request_validation_exception_handler( + request: Request, exc: RequestValidationError +) -> JSONResponse: + formatted_msg = format_validation_errors(exc.errors()) + return JSONResponse( + status_code=HTTP_422_UNPROCESSABLE_ENTITY, + content={"error": "ValidationError", "msg": formatted_msg}, + ) + + +class IntentKitLookUpError(LookupError): + """Custom lookup error for IntentKit.""" + + pass + + +class AgentError(Exception): + """Custom exception for agent-related errors.""" + + def __init__(self, agent_id: str, message: str | None = None): + self.agent_id = agent_id + if message is None: + message = f"Agent error occurred for agent_id: {agent_id}" + super().__init__(message) + + def __str__(self): + return f"AgentError(agent_id={self.agent_id}): {super().__str__()}" + + +class SkillError(ToolException): + """Custom exception for skill-related errors.""" + + def __init__(self, agent_id: str, skill_name: str, message: str | None = None): + self.agent_id = agent_id + self.skill_name = skill_name + if message is None: + message = f"Skill error occurred for agent_id: {agent_id}, skill_name: {skill_name}" + super().__init__(message) + + def __str__(self): + return f"SkillError(agent_id={self.agent_id}, skill_name={self.skill_name}): {super().__str__()}" diff --git a/intentkit/utils/logging.py b/intentkit/utils/logging.py new file mode 100644 index 00000000..7543018a --- /dev/null +++ b/intentkit/utils/logging.py @@ -0,0 +1,70 @@ +""" +Logging configuration module +""" + +import json +import logging +from typing import Callable, Optional + + +class JsonFormatter(logging.Formatter): + def __init__( + self, filter_func: Optional[Callable[[logging.LogRecord], bool]] = None + ): + super().__init__() + self.filter_func = filter_func + + def format(self, record): + if self.filter_func and not self.filter_func(record): + return "" + + log_obj = { + "timestamp": self.formatTime(record), + "name": record.name, + "level": record.levelname, + "message": record.getMessage(), + } + # Include any extra attributes + if hasattr(record, "extra"): + log_obj.update(record.extra) + elif record.__dict__.get("extra"): + log_obj.update(record.__dict__["extra"]) + if record.exc_info: + log_obj["exc_info"] = self.formatException(record.exc_info) + return json.dumps(log_obj) + + +def setup_logging(env: str, debug: bool = False): + """ + Setup global logging configuration. + + Args: + env: Environment name ('local', 'prod', etc.) + debug: Debug mode flag + """ + + if debug: + # Set up logging configuration for local/debug + logging.basicConfig( + level=logging.DEBUG, + format="%(asctime)s - %(name)s - %(levelname)s - %(message)s", + handlers=[logging.StreamHandler()], + ) + # logging.getLogger("openai._base_client").setLevel(logging.INFO) + # logging.getLogger("httpcore.http11").setLevel(logging.INFO) + # logging.getLogger("sqlalchemy.engine").setLevel(logging.DEBUG) + else: + # For non-local environments, use JSON format + handler = logging.StreamHandler() + handler.setFormatter(JsonFormatter()) + logging.basicConfig(level=logging.INFO, handlers=[handler]) + logging.getLogger("sqlalchemy.engine").setLevel(logging.WARNING) + # fastapi access log + uvicorn_access = logging.getLogger("uvicorn.access") + uvicorn_access.handlers = [] # Remove default handlers + handler = logging.StreamHandler() + handler.setFormatter(JsonFormatter()) + uvicorn_access.addHandler(handler) + uvicorn_access.setLevel(logging.WARNING) + # telegram access log + logging.getLogger("aiohttp.access").setLevel(logging.WARNING) diff --git a/intentkit/utils/random.py b/intentkit/utils/random.py new file mode 100644 index 00000000..9d230d89 --- /dev/null +++ b/intentkit/utils/random.py @@ -0,0 +1,16 @@ +import random +import string + + +def generate_tx_confirm_string(length) -> str: + """ + Generates a random string of the specified length for the transaction reference. + + Args: + length: The desired length of the random string. + + Returns: + A random string of the specified length. + """ + letters_and_digits = string.ascii_letters + string.digits + return "tx-" + "".join(random.choice(letters_and_digits) for _ in range(length)) diff --git a/intentkit/utils/s3.py b/intentkit/utils/s3.py new file mode 100644 index 00000000..32270c81 --- /dev/null +++ b/intentkit/utils/s3.py @@ -0,0 +1,267 @@ +""" +S3 utility module for storing and retrieving images from AWS S3. +""" + +import logging +from enum import Enum +from io import BytesIO +from typing import Optional + +import boto3 +import filetype +import httpx +from botocore.exceptions import ClientError +from mypy_boto3_s3.client import S3Client + +logger = logging.getLogger(__name__) + +# Global variables for S3 configuration +_bucket: Optional[str] = None +_client: Optional[S3Client] = None +_prefix: Optional[str] = None +_cdn_url: Optional[str] = None + + +def init_s3(bucket: str, cdn_url: str, env: str) -> None: + """ + Initialize S3 configuration. + + Args: + bucket: S3 bucket name + cdn_url: CDN URL for the S3 bucket + env: Environment name for the prefix + + Raises: + ValueError: If bucket or cdn_url is empty + """ + global _bucket, _client, _prefix, _cdn_url + + if not bucket: + raise ValueError("S3 bucket name cannot be empty") + if not cdn_url: + raise ValueError("S3 CDN URL cannot be empty") + + _bucket = bucket + _cdn_url = cdn_url + _prefix = f"{env}/intentkit/" + _client = boto3.client("s3") + + logger.info(f"S3 initialized with bucket: {bucket}, prefix: {_prefix}") + + +async def store_image(url: str, key: str) -> str: + """ + Store an image from a URL to S3 asynchronously. + + Args: + url: Source URL of the image + key: Key to store the image under (without prefix) + + Returns: + str: The CDN URL of the stored image, or the original URL if S3 is not initialized + + Raises: + ClientError: If the upload fails + httpx.HTTPError: If the download fails + """ + if not _client or not _bucket or not _prefix or not _cdn_url: + # If S3 is not initialized, log and return the original URL + logger.info("S3 not initialized. Returning original URL.") + return url + + try: + # Download the image from the URL asynchronously + async with httpx.AsyncClient() as client: + response = await client.get(url, follow_redirects=True) + response.raise_for_status() + + # Prepare the S3 key with prefix + prefixed_key = f"{_prefix}{key}" + + # Use BytesIO to create a file-like object that implements read + file_obj = BytesIO(response.content) + + # Determine the correct content type + content_type = response.headers.get("Content-Type", "") + if content_type == "binary/octet-stream" or not content_type: + # Try to detect the image type from the content + kind = filetype.guess(response.content) + if kind and kind.mime.startswith("image/"): + content_type = kind.mime + else: + # Default to JPEG if detection fails + content_type = "image/jpeg" + + # Upload to S3 + _client.upload_fileobj( + file_obj, + _bucket, + prefixed_key, + ExtraArgs={"ContentType": content_type, "ContentDisposition": "inline"}, + ) + + # Return the CDN URL + cdn_url = f"{_cdn_url}/{prefixed_key}" + logger.info(f"Image uploaded successfully to {cdn_url}") + return cdn_url + + except httpx.HTTPError as e: + logger.error(f"Failed to download image from URL {url}: {str(e)}") + raise + except ClientError as e: + logger.error(f"Failed to upload image to S3: {str(e)}") + raise + + +async def store_image_bytes( + image_bytes: bytes, key: str, content_type: Optional[str] = None +) -> str: + """ + Store raw image bytes to S3. + + Args: + image_bytes: Raw bytes of the image to store + key: Key to store the image under (without prefix) + content_type: Content type of the image. If None, will attempt to detect it. + + Returns: + str: The CDN URL of the stored image, or an empty string if S3 is not initialized + + Raises: + ClientError: If the upload fails + ValueError: If S3 is not initialized or image_bytes is empty + """ + if not _client or not _bucket or not _prefix or not _cdn_url: + # If S3 is not initialized, log and return empty string + logger.info("S3 not initialized. Cannot store image bytes.") + return "" + + if not image_bytes: + raise ValueError("Image bytes cannot be empty") + + try: + # Prepare the S3 key with prefix + prefixed_key = f"{_prefix}{key}" + + # Use BytesIO to create a file-like object that implements read + file_obj = BytesIO(image_bytes) + + # Determine the correct content type if not provided + if not content_type: + # Try to detect the image type from the content + kind = filetype.guess(image_bytes) + if kind and kind.mime.startswith("image/"): + content_type = kind.mime + else: + # Default to JPEG if detection fails + content_type = "image/jpeg" + + logger.info("uploading image to s3") + # Upload to S3 + _client.upload_fileobj( + file_obj, + _bucket, + prefixed_key, + ExtraArgs={"ContentType": content_type, "ContentDisposition": "inline"}, + ) + + # Return the CDN URL + cdn_url = f"{_cdn_url}/{prefixed_key}" + logger.info(f"image is uploaded to {cdn_url}") + return cdn_url + + except ClientError as e: + logger.error(f"Failed to upload image bytes to S3: {str(e)}") + raise + + +class FileType(str, Enum): + IMAGE = "image" + VIDEO = "video" + AUDIO = "audio" + PDF = "pdf" + + +async def store_file_bytes( + file_bytes: bytes, + key: str, + file_type: FileType, + size_limit_bytes: Optional[int] = None, +) -> str: + """ + Store raw file bytes (image, video, sound, pdf) to S3. + + Args: + file_bytes: Raw bytes of the file to store + key: Key to store the file under (without prefix) + file_type: Type of the file (image, video, sound, pdf) + size_limit_bytes: Optional size limit in bytes + + Returns: + str: The CDN URL of the stored file, or an empty string if S3 is not initialized + + Raises: + ClientError: If the upload fails + ValueError: If S3 is not initialized, file_bytes is empty, or file exceeds size limit + """ + if not _client or not _bucket or not _prefix or not _cdn_url: + logger.info("S3 not initialized. Cannot store file bytes.") + return "" + if not file_bytes: + raise ValueError("File bytes cannot be empty") + + if size_limit_bytes is not None and len(file_bytes) > size_limit_bytes: + raise ValueError( + f"File size exceeds the allowed limit of {size_limit_bytes} bytes" + ) + + try: + # Prepare the S3 key with prefix + prefixed_key = f"{_prefix}{key}" + + # Use BytesIO to create a file-like object that implements read + file_obj = BytesIO(file_bytes) + + # Determine content type based on file_type + content_type = "" + if file_type == FileType.IMAGE: + kind = filetype.guess(file_bytes) + if kind and kind.mime.startswith("image/"): + content_type = kind.mime + else: + content_type = "image/jpeg" + elif file_type == FileType.VIDEO: + kind = filetype.guess(file_bytes) + if kind and kind.mime.startswith("video/"): + content_type = kind.mime + else: + content_type = "video/mp4" + elif file_type == FileType.AUDIO: + kind = filetype.guess(file_bytes) + if kind and kind.mime.startswith("audio/"): + content_type = kind.mime + else: + content_type = "audio/mpeg" + elif file_type == FileType.PDF: + content_type = "application/pdf" + else: + raise ValueError(f"Unsupported file type: {file_type}") + + logger.info(f"Uploading {file_type} to S3 with content type {content_type}") + + # Upload to S3 + _client.upload_fileobj( + file_obj, + _bucket, + prefixed_key, + ExtraArgs={"ContentType": content_type, "ContentDisposition": "inline"}, + ) + + # Return the CDN URL + cdn_url = f"{_cdn_url}/{prefixed_key}" + logger.info(f"{file_type} uploaded successfully to {cdn_url}") + return cdn_url + + except ClientError as e: + logger.error(f"Failed to upload {file_type} bytes to S3: {str(e)}") + raise diff --git a/intentkit/utils/slack_alert.py b/intentkit/utils/slack_alert.py new file mode 100644 index 00000000..c854cfbd --- /dev/null +++ b/intentkit/utils/slack_alert.py @@ -0,0 +1,79 @@ +""" +Slack notification module for sending messages to Slack channels. +""" + +import logging +from typing import Optional + +from slack_sdk import WebClient +from slack_sdk.errors import SlackApiError + +logger = logging.getLogger(__name__) + +# Global variables for Slack configuration +_slack_token: Optional[str] = None +_slack_channel: Optional[str] = None +_slack_client: Optional[WebClient] = None + + +def init_slack(token: str, channel: str) -> None: + """ + Initialize Slack configuration. + + Args: + token: Slack bot token + channel: Default Slack channel ID or name + + Raises: + ValueError: If token or channel is empty + """ + + global _slack_token, _slack_channel, _slack_client + _slack_token = token + _slack_channel = channel + _slack_client = WebClient(token=token) + + +def send_slack_message( + message: str, + blocks: Optional[list] = None, + attachments: Optional[list] = None, + thread_ts: Optional[str] = None, + channel: Optional[str] = None, +): + """ + Send a message to a Slack channel. + + Args: + message: The message text to send + blocks: Optional blocks for rich message formatting (see Slack Block Kit) + attachments: Optional attachments for the message + thread_ts: Optional thread timestamp to reply to a thread + channel: Optional channel override. If not provided, uses the default channel + + Raises: + RuntimeError: If slack is not initialized + SlackApiError: If the message fails to send + """ + if not _slack_client or not _slack_channel: + # Write the input message to the log and return + logger.info("Slack not initialized") + logger.info(message) + if blocks: + logger.info(blocks) + if attachments: + logger.info(attachments) + return + + try: + response = _slack_client.chat_postMessage( + channel=channel or _slack_channel, + text=message, + blocks=blocks, + attachments=attachments, + thread_ts=thread_ts, + ) + logger.info(f"Message sent successfully to channel {channel or _slack_channel}") + return response + except SlackApiError as e: + logger.error(f"Failed to send Slack message: {str(e)}") diff --git a/intentkit/utils/tx.py b/intentkit/utils/tx.py new file mode 100644 index 00000000..536362c9 --- /dev/null +++ b/intentkit/utils/tx.py @@ -0,0 +1,37 @@ +from pydantic import BaseModel, Field +from web3 import Web3 + + +class EvmContractWrapper: + def __init__(self, rpc_url: str, abi: list[dict], tx_data: str): + w3 = Web3(Web3.HTTPProvider(rpc_url)) + contract = w3.eth.contract(abi=abi) + + self.evm_tx = EvmTx(**tx_data) + self.fn, self.fn_args = contract.decode_function_input(self.evm_tx.data) + + for i, arg in self.fn_args.items(): + if isinstance(arg, bytes): + self.fn_args[i] = arg.hex() # Convert bytes to hexadecimal string + elif isinstance(arg, list) and all(isinstance(item, bytes) for item in arg): + self.fn_args[i] = [ + item.hex() for item in arg + ] # Convert list of bytes to list of hex strings + + @property + def fn_and_args(self): + return self.fn, self.fn_args + + @property + def dst_addr(self): + return self.evm_tx.to + + +class EvmTx(BaseModel): + data: str = Field(None, description="Data of the transaction.") + to: str = Field(None, description="Address of the receiver of the transaction.") + from_: str = Field(None, description="Address of the sender of the transaction.") + value: str = Field(None, description="Amount of token to send.") + gas: int | None = Field(None, description="Gas amount.") + gasPrice: int | None = Field(None, description="Gas Price.") + nonce: int | None = Field(None, description="Nonce of transaction.") diff --git a/intentkit/uv.lock b/intentkit/uv.lock new file mode 100644 index 00000000..42b51121 --- /dev/null +++ b/intentkit/uv.lock @@ -0,0 +1,3375 @@ +version = 1 +revision = 3 +requires-python = ">=3.12" +resolution-markers = [ + "python_full_version >= '3.13'", + "python_full_version < '3.13'", +] + +[[package]] +name = "aiohappyeyeballs" +version = "2.6.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/26/30/f84a107a9c4331c14b2b586036f40965c128aa4fee4dda5d3d51cb14ad54/aiohappyeyeballs-2.6.1.tar.gz", hash = "sha256:c3f9d0113123803ccadfdf3f0faa505bc78e6a72d1cc4806cbd719826e943558", size = 22760, upload-time = "2025-03-12T01:42:48.764Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/0f/15/5bf3b99495fb160b63f95972b81750f18f7f4e02ad051373b669d17d44f2/aiohappyeyeballs-2.6.1-py3-none-any.whl", hash = "sha256:f349ba8f4b75cb25c99c5c2d84e997e485204d2902a9597802b0371f09331fb8", size = 15265, upload-time = "2025-03-12T01:42:47.083Z" }, +] + +[[package]] +name = "aiohttp" +version = "3.11.16" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "aiohappyeyeballs" }, + { name = "aiosignal" }, + { name = "attrs" }, + { name = "frozenlist" }, + { name = "multidict" }, + { name = "propcache" }, + { name = "yarl" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/f1/d9/1c4721d143e14af753f2bf5e3b681883e1f24b592c0482df6fa6e33597fa/aiohttp-3.11.16.tar.gz", hash = "sha256:16f8a2c9538c14a557b4d309ed4d0a7c60f0253e8ed7b6c9a2859a7582f8b1b8", size = 7676826, upload-time = "2025-04-02T02:17:44.74Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/db/38/100d01cbc60553743baf0fba658cb125f8ad674a8a771f765cdc155a890d/aiohttp-3.11.16-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:911a6e91d08bb2c72938bc17f0a2d97864c531536b7832abee6429d5296e5b27", size = 704881, upload-time = "2025-04-02T02:16:09.26Z" }, + { url = "https://files.pythonhosted.org/packages/21/ed/b4102bb6245e36591209e29f03fe87e7956e54cb604ee12e20f7eb47f994/aiohttp-3.11.16-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:6ac13b71761e49d5f9e4d05d33683bbafef753e876e8e5a7ef26e937dd766713", size = 464564, upload-time = "2025-04-02T02:16:10.781Z" }, + { url = "https://files.pythonhosted.org/packages/3b/e1/a9ab6c47b62ecee080eeb33acd5352b40ecad08fb2d0779bcc6739271745/aiohttp-3.11.16-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:fd36c119c5d6551bce374fcb5c19269638f8d09862445f85a5a48596fd59f4bb", size = 456548, upload-time = "2025-04-02T02:16:12.764Z" }, + { url = "https://files.pythonhosted.org/packages/80/ad/216c6f71bdff2becce6c8776f0aa32cb0fa5d83008d13b49c3208d2e4016/aiohttp-3.11.16-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d489d9778522fbd0f8d6a5c6e48e3514f11be81cb0a5954bdda06f7e1594b321", size = 1691749, upload-time = "2025-04-02T02:16:14.304Z" }, + { url = "https://files.pythonhosted.org/packages/bd/ea/7df7bcd3f4e734301605f686ffc87993f2d51b7acb6bcc9b980af223f297/aiohttp-3.11.16-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:69a2cbd61788d26f8f1e626e188044834f37f6ae3f937bd9f08b65fc9d7e514e", size = 1736874, upload-time = "2025-04-02T02:16:16.538Z" }, + { url = "https://files.pythonhosted.org/packages/51/41/c7724b9c87a29b7cfd1202ec6446bae8524a751473d25e2ff438bc9a02bf/aiohttp-3.11.16-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:cd464ba806e27ee24a91362ba3621bfc39dbbb8b79f2e1340201615197370f7c", size = 1786885, upload-time = "2025-04-02T02:16:18.268Z" }, + { url = "https://files.pythonhosted.org/packages/86/b3/f61f8492fa6569fa87927ad35a40c159408862f7e8e70deaaead349e2fba/aiohttp-3.11.16-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1ce63ae04719513dd2651202352a2beb9f67f55cb8490c40f056cea3c5c355ce", size = 1698059, upload-time = "2025-04-02T02:16:20.234Z" }, + { url = "https://files.pythonhosted.org/packages/ce/be/7097cf860a9ce8bbb0e8960704e12869e111abcd3fbd245153373079ccec/aiohttp-3.11.16-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:09b00dd520d88eac9d1768439a59ab3d145065c91a8fab97f900d1b5f802895e", size = 1626527, upload-time = "2025-04-02T02:16:22.092Z" }, + { url = "https://files.pythonhosted.org/packages/1d/1d/aaa841c340e8c143a8d53a1f644c2a2961c58cfa26e7b398d6bf75cf5d23/aiohttp-3.11.16-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:7f6428fee52d2bcf96a8aa7b62095b190ee341ab0e6b1bcf50c615d7966fd45b", size = 1644036, upload-time = "2025-04-02T02:16:23.707Z" }, + { url = "https://files.pythonhosted.org/packages/2c/88/59d870f76e9345e2b149f158074e78db457985c2b4da713038d9da3020a8/aiohttp-3.11.16-cp312-cp312-musllinux_1_2_armv7l.whl", hash = "sha256:13ceac2c5cdcc3f64b9015710221ddf81c900c5febc505dbd8f810e770011540", size = 1685270, upload-time = "2025-04-02T02:16:25.874Z" }, + { url = "https://files.pythonhosted.org/packages/2b/b1/c6686948d4c79c3745595efc469a9f8a43cab3c7efc0b5991be65d9e8cb8/aiohttp-3.11.16-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:fadbb8f1d4140825069db3fedbbb843290fd5f5bc0a5dbd7eaf81d91bf1b003b", size = 1650852, upload-time = "2025-04-02T02:16:27.556Z" }, + { url = "https://files.pythonhosted.org/packages/fe/94/3e42a6916fd3441721941e0f1b8438e1ce2a4c49af0e28e0d3c950c9b3c9/aiohttp-3.11.16-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:6a792ce34b999fbe04a7a71a90c74f10c57ae4c51f65461a411faa70e154154e", size = 1704481, upload-time = "2025-04-02T02:16:29.573Z" }, + { url = "https://files.pythonhosted.org/packages/b1/6d/6ab5854ff59b27075c7a8c610597d2b6c38945f9a1284ee8758bc3720ff6/aiohttp-3.11.16-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:f4065145bf69de124accdd17ea5f4dc770da0a6a6e440c53f6e0a8c27b3e635c", size = 1735370, upload-time = "2025-04-02T02:16:31.191Z" }, + { url = "https://files.pythonhosted.org/packages/73/2a/08a68eec3c99a6659067d271d7553e4d490a0828d588e1daa3970dc2b771/aiohttp-3.11.16-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:fa73e8c2656a3653ae6c307b3f4e878a21f87859a9afab228280ddccd7369d71", size = 1697619, upload-time = "2025-04-02T02:16:32.873Z" }, + { url = "https://files.pythonhosted.org/packages/61/d5/fea8dbbfb0cd68fbb56f0ae913270a79422d9a41da442a624febf72d2aaf/aiohttp-3.11.16-cp312-cp312-win32.whl", hash = "sha256:f244b8e541f414664889e2c87cac11a07b918cb4b540c36f7ada7bfa76571ea2", size = 411710, upload-time = "2025-04-02T02:16:34.525Z" }, + { url = "https://files.pythonhosted.org/packages/33/fb/41cde15fbe51365024550bf77b95a4fc84ef41365705c946da0421f0e1e0/aiohttp-3.11.16-cp312-cp312-win_amd64.whl", hash = "sha256:23a15727fbfccab973343b6d1b7181bfb0b4aa7ae280f36fd2f90f5476805682", size = 438012, upload-time = "2025-04-02T02:16:36.103Z" }, + { url = "https://files.pythonhosted.org/packages/52/52/7c712b2d9fb4d5e5fd6d12f9ab76e52baddfee71e3c8203ca7a7559d7f51/aiohttp-3.11.16-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:a3814760a1a700f3cfd2f977249f1032301d0a12c92aba74605cfa6ce9f78489", size = 698005, upload-time = "2025-04-02T02:16:37.923Z" }, + { url = "https://files.pythonhosted.org/packages/51/3e/61057814f7247666d43ac538abcd6335b022869ade2602dab9bf33f607d2/aiohttp-3.11.16-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:9b751a6306f330801665ae69270a8a3993654a85569b3469662efaad6cf5cc50", size = 461106, upload-time = "2025-04-02T02:16:39.961Z" }, + { url = "https://files.pythonhosted.org/packages/4f/85/6b79fb0ea6e913d596d5b949edc2402b20803f51b1a59e1bbc5bb7ba7569/aiohttp-3.11.16-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:ad497f38a0d6c329cb621774788583ee12321863cd4bd9feee1effd60f2ad133", size = 453394, upload-time = "2025-04-02T02:16:41.562Z" }, + { url = "https://files.pythonhosted.org/packages/4b/04/e1bb3fcfbd2c26753932c759593a32299aff8625eaa0bf8ff7d9c0c34a36/aiohttp-3.11.16-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ca37057625693d097543bd88076ceebeb248291df9d6ca8481349efc0b05dcd0", size = 1666643, upload-time = "2025-04-02T02:16:43.62Z" }, + { url = "https://files.pythonhosted.org/packages/0e/27/97bc0fdd1f439b8f060beb3ba8fb47b908dc170280090801158381ad7942/aiohttp-3.11.16-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a5abcbba9f4b463a45c8ca8b7720891200658f6f46894f79517e6cd11f3405ca", size = 1721948, upload-time = "2025-04-02T02:16:45.617Z" }, + { url = "https://files.pythonhosted.org/packages/2c/4f/bc4c5119e75c05ef15c5670ef1563bbe25d4ed4893b76c57b0184d815e8b/aiohttp-3.11.16-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f420bfe862fb357a6d76f2065447ef6f484bc489292ac91e29bc65d2d7a2c84d", size = 1774454, upload-time = "2025-04-02T02:16:48.562Z" }, + { url = "https://files.pythonhosted.org/packages/73/5b/54b42b2150bb26fdf795464aa55ceb1a49c85f84e98e6896d211eabc6670/aiohttp-3.11.16-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:58ede86453a6cf2d6ce40ef0ca15481677a66950e73b0a788917916f7e35a0bb", size = 1677785, upload-time = "2025-04-02T02:16:50.367Z" }, + { url = "https://files.pythonhosted.org/packages/10/ee/a0fe68916d3f82eae199b8535624cf07a9c0a0958c7a76e56dd21140487a/aiohttp-3.11.16-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:6fdec0213244c39973674ca2a7f5435bf74369e7d4e104d6c7473c81c9bcc8c4", size = 1608456, upload-time = "2025-04-02T02:16:52.158Z" }, + { url = "https://files.pythonhosted.org/packages/8b/48/83afd779242b7cf7e1ceed2ff624a86d3221e17798061cf9a79e0b246077/aiohttp-3.11.16-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:72b1b03fb4655c1960403c131740755ec19c5898c82abd3961c364c2afd59fe7", size = 1622424, upload-time = "2025-04-02T02:16:54.386Z" }, + { url = "https://files.pythonhosted.org/packages/6f/27/452f1d5fca1f516f9f731539b7f5faa9e9d3bf8a3a6c3cd7c4b031f20cbd/aiohttp-3.11.16-cp313-cp313-musllinux_1_2_armv7l.whl", hash = "sha256:780df0d837276276226a1ff803f8d0fa5f8996c479aeef52eb040179f3156cbd", size = 1660943, upload-time = "2025-04-02T02:16:56.887Z" }, + { url = "https://files.pythonhosted.org/packages/d6/e1/5c7d63143b8d00c83b958b9e78e7048c4a69903c760c1e329bf02bac57a1/aiohttp-3.11.16-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:ecdb8173e6c7aa09eee342ac62e193e6904923bd232e76b4157ac0bfa670609f", size = 1622797, upload-time = "2025-04-02T02:16:58.676Z" }, + { url = "https://files.pythonhosted.org/packages/46/9e/2ac29cca2746ee8e449e73cd2fcb3d454467393ec03a269d50e49af743f1/aiohttp-3.11.16-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:a6db7458ab89c7d80bc1f4e930cc9df6edee2200127cfa6f6e080cf619eddfbd", size = 1687162, upload-time = "2025-04-02T02:17:01.076Z" }, + { url = "https://files.pythonhosted.org/packages/ad/6b/eaa6768e02edebaf37d77f4ffb74dd55f5cbcbb6a0dbf798ccec7b0ac23b/aiohttp-3.11.16-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:2540ddc83cc724b13d1838026f6a5ad178510953302a49e6d647f6e1de82bc34", size = 1718518, upload-time = "2025-04-02T02:17:03.388Z" }, + { url = "https://files.pythonhosted.org/packages/e5/18/dda87cbad29472a51fa058d6d8257dfce168289adaeb358b86bd93af3b20/aiohttp-3.11.16-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:3b4e6db8dc4879015b9955778cfb9881897339c8fab7b3676f8433f849425913", size = 1675254, upload-time = "2025-04-02T02:17:05.579Z" }, + { url = "https://files.pythonhosted.org/packages/32/d9/d2fb08c614df401d92c12fcbc60e6e879608d5e8909ef75c5ad8d4ad8aa7/aiohttp-3.11.16-cp313-cp313-win32.whl", hash = "sha256:493910ceb2764f792db4dc6e8e4b375dae1b08f72e18e8f10f18b34ca17d0979", size = 410698, upload-time = "2025-04-02T02:17:07.499Z" }, + { url = "https://files.pythonhosted.org/packages/ce/ed/853e36d5a33c24544cfa46585895547de152dfef0b5c79fa675f6e4b7b87/aiohttp-3.11.16-cp313-cp313-win_amd64.whl", hash = "sha256:42864e70a248f5f6a49fdaf417d9bc62d6e4d8ee9695b24c5916cb4bb666c802", size = 436395, upload-time = "2025-04-02T02:17:09.566Z" }, +] + +[[package]] +name = "aiohttp-retry" +version = "2.9.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "aiohttp" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/9d/61/ebda4d8e3d8cfa1fd3db0fb428db2dd7461d5742cea35178277ad180b033/aiohttp_retry-2.9.1.tar.gz", hash = "sha256:8eb75e904ed4ee5c2ec242fefe85bf04240f685391c4879d8f541d6028ff01f1", size = 13608, upload-time = "2024-11-06T10:44:54.574Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/1a/99/84ba7273339d0f3dfa57901b846489d2e5c2cd731470167757f1935fffbd/aiohttp_retry-2.9.1-py3-none-any.whl", hash = "sha256:66d2759d1921838256a05a3f80ad7e724936f083e35be5abb5e16eed6be6dc54", size = 9981, upload-time = "2024-11-06T10:44:52.917Z" }, +] + +[[package]] +name = "aiosignal" +version = "1.4.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "frozenlist" }, + { name = "typing-extensions", marker = "python_full_version < '3.13'" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/61/62/06741b579156360248d1ec624842ad0edf697050bbaf7c3e46394e106ad1/aiosignal-1.4.0.tar.gz", hash = "sha256:f47eecd9468083c2029cc99945502cb7708b082c232f9aca65da147157b251c7", size = 25007, upload-time = "2025-07-03T22:54:43.528Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/fb/76/641ae371508676492379f16e2fa48f4e2c11741bd63c48be4b12a6b09cba/aiosignal-1.4.0-py3-none-any.whl", hash = "sha256:053243f8b92b990551949e63930a839ff0cf0b0ebbe0597b0f3fb19e1a0fe82e", size = 7490, upload-time = "2025-07-03T22:54:42.156Z" }, +] + +[[package]] +name = "aiosqlite" +version = "0.21.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "typing-extensions" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/13/7d/8bca2bf9a247c2c5dfeec1d7a5f40db6518f88d314b8bca9da29670d2671/aiosqlite-0.21.0.tar.gz", hash = "sha256:131bb8056daa3bc875608c631c678cda73922a2d4ba8aec373b19f18c17e7aa3", size = 13454, upload-time = "2025-02-03T07:30:16.235Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/f5/10/6c25ed6de94c49f88a91fa5018cb4c0f3625f31d5be9f771ebe5cc7cd506/aiosqlite-0.21.0-py3-none-any.whl", hash = "sha256:2549cf4057f95f53dcba16f2b64e8e2791d7e1adedb13197dd8ed77bb226d7d0", size = 15792, upload-time = "2025-02-03T07:30:13.6Z" }, +] + +[[package]] +name = "allora-sdk" +version = "0.2.3" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "aiohttp" }, + { name = "annotated-types" }, + { name = "cachetools" }, + { name = "certifi" }, + { name = "chardet" }, + { name = "charset-normalizer" }, + { name = "colorama" }, + { name = "distlib" }, + { name = "filelock" }, + { name = "idna" }, + { name = "packaging" }, + { name = "platformdirs" }, + { name = "pluggy" }, + { name = "pydantic" }, + { name = "pydantic-core" }, + { name = "pyproject-api" }, + { name = "requests" }, + { name = "tox" }, + { name = "typing-extensions" }, + { name = "urllib3" }, + { name = "virtualenv" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/96/90/46df3515a79bcad5733633f38a7a8d6c7826c48c3e48fa89e2d081bc70f9/allora_sdk-0.2.3.tar.gz", hash = "sha256:d976c17816566114f45327cc843d9a996d807e48d1697c3cbd4355095c5a04c6", size = 6273, upload-time = "2025-04-14T11:35:53.406Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/f2/65/73e1ecfcc802b6178377c2156190dfc84639d9784299119e1b4ca4ca1c68/allora_sdk-0.2.3-py3-none-any.whl", hash = "sha256:ca71c39f7f6410dbb9bc7ab6569eb91a8515490d0c6cc54f9512b7af35ab214e", size = 5008, upload-time = "2025-04-14T11:35:52.01Z" }, +] + +[[package]] +name = "annotated-types" +version = "0.7.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/ee/67/531ea369ba64dcff5ec9c3402f9f51bf748cec26dde048a2f973a4eea7f5/annotated_types-0.7.0.tar.gz", hash = "sha256:aff07c09a53a08bc8cfccb9c85b05f1aa9a2a6f23728d790723543408344ce89", size = 16081, upload-time = "2024-05-20T21:33:25.928Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/78/b6/6307fbef88d9b5ee7421e68d78a9f162e0da4900bc5f5793f6d3d0e34fb8/annotated_types-0.7.0-py3-none-any.whl", hash = "sha256:1f02e8b43a8fbbc3f3e0d4f0f4bfc8131bcb4eebe8849b8e5c773f3a1c582a53", size = 13643, upload-time = "2024-05-20T21:33:24.1Z" }, +] + +[[package]] +name = "anthropic" +version = "0.64.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "anyio" }, + { name = "distro" }, + { name = "httpx" }, + { name = "jiter" }, + { name = "pydantic" }, + { name = "sniffio" }, + { name = "typing-extensions" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/d8/4f/f2b880cba1a76f3acc7d5eb2ae217632eac1b8cef5ed3027493545c59eba/anthropic-0.64.0.tar.gz", hash = "sha256:3d496c91a63dff64f451b3e8e4b238a9640bf87b0c11d0b74ddc372ba5a3fe58", size = 427893, upload-time = "2025-08-13T17:09:49.915Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/a9/b2/2d268bcd5d6441df9dc0ebebc67107657edb8b0150d3fda1a5b81d1bec45/anthropic-0.64.0-py3-none-any.whl", hash = "sha256:6f5f7d913a6a95eb7f8e1bda4e75f76670e8acd8d4cd965e02e2a256b0429dd1", size = 297244, upload-time = "2025-08-13T17:09:47.908Z" }, +] + +[[package]] +name = "anyio" +version = "4.10.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "idna" }, + { name = "sniffio" }, + { name = "typing-extensions", marker = "python_full_version < '3.13'" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/f1/b4/636b3b65173d3ce9a38ef5f0522789614e590dab6a8d505340a4efe4c567/anyio-4.10.0.tar.gz", hash = "sha256:3f3fae35c96039744587aa5b8371e7e8e603c0702999535961dd336026973ba6", size = 213252, upload-time = "2025-08-04T08:54:26.451Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/6f/12/e5e0282d673bb9746bacfb6e2dba8719989d3660cdb2ea79aee9a9651afb/anyio-4.10.0-py3-none-any.whl", hash = "sha256:60e474ac86736bbfd6f210f7a61218939c318f43f9972497381f1c5e930ed3d1", size = 107213, upload-time = "2025-08-04T08:54:24.882Z" }, +] + +[[package]] +name = "asn1crypto" +version = "1.5.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/de/cf/d547feed25b5244fcb9392e288ff9fdc3280b10260362fc45d37a798a6ee/asn1crypto-1.5.1.tar.gz", hash = "sha256:13ae38502be632115abf8a24cbe5f4da52e3b5231990aff31123c805306ccb9c", size = 121080, upload-time = "2022-03-15T14:46:52.889Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/c9/7f/09065fd9e27da0eda08b4d6897f1c13535066174cc023af248fc2a8d5e5a/asn1crypto-1.5.1-py2.py3-none-any.whl", hash = "sha256:db4e40728b728508912cbb3d44f19ce188f218e9eba635821bb4b68564f8fd67", size = 105045, upload-time = "2022-03-15T14:46:51.055Z" }, +] + +[[package]] +name = "async-lru" +version = "2.0.5" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/b2/4d/71ec4d3939dc755264f680f6c2b4906423a304c3d18e96853f0a595dfe97/async_lru-2.0.5.tar.gz", hash = "sha256:481d52ccdd27275f42c43a928b4a50c3bfb2d67af4e78b170e3e0bb39c66e5bb", size = 10380, upload-time = "2025-03-16T17:25:36.919Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/03/49/d10027df9fce941cb8184e78a02857af36360d33e1721df81c5ed2179a1a/async_lru-2.0.5-py3-none-any.whl", hash = "sha256:ab95404d8d2605310d345932697371a5f40def0487c03d6d0ad9138de52c9943", size = 6069, upload-time = "2025-03-16T17:25:35.422Z" }, +] + +[[package]] +name = "asyncio" +version = "4.0.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/71/ea/26c489a11f7ca862d5705db67683a7361ce11c23a7b98fc6c2deaeccede2/asyncio-4.0.0.tar.gz", hash = "sha256:570cd9e50db83bc1629152d4d0b7558d6451bb1bfd5dfc2e935d96fc2f40329b", size = 5371, upload-time = "2025-08-05T02:51:46.605Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/57/64/eff2564783bd650ca25e15938d1c5b459cda997574a510f7de69688cb0b4/asyncio-4.0.0-py3-none-any.whl", hash = "sha256:c1eddb0659231837046809e68103969b2bef8b0400d59cfa6363f6b5ed8cc88b", size = 5555, upload-time = "2025-08-05T02:51:45.767Z" }, +] + +[[package]] +name = "asyncpg" +version = "0.30.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/2f/4c/7c991e080e106d854809030d8584e15b2e996e26f16aee6d757e387bc17d/asyncpg-0.30.0.tar.gz", hash = "sha256:c551e9928ab6707602f44811817f82ba3c446e018bfe1d3abecc8ba5f3eac851", size = 957746, upload-time = "2024-10-20T00:30:41.127Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/4b/64/9d3e887bb7b01535fdbc45fbd5f0a8447539833b97ee69ecdbb7a79d0cb4/asyncpg-0.30.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:c902a60b52e506d38d7e80e0dd5399f657220f24635fee368117b8b5fce1142e", size = 673162, upload-time = "2024-10-20T00:29:41.88Z" }, + { url = "https://files.pythonhosted.org/packages/6e/eb/8b236663f06984f212a087b3e849731f917ab80f84450e943900e8ca4052/asyncpg-0.30.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:aca1548e43bbb9f0f627a04666fedaca23db0a31a84136ad1f868cb15deb6e3a", size = 637025, upload-time = "2024-10-20T00:29:43.352Z" }, + { url = "https://files.pythonhosted.org/packages/cc/57/2dc240bb263d58786cfaa60920779af6e8d32da63ab9ffc09f8312bd7a14/asyncpg-0.30.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6c2a2ef565400234a633da0eafdce27e843836256d40705d83ab7ec42074efb3", size = 3496243, upload-time = "2024-10-20T00:29:44.922Z" }, + { url = "https://files.pythonhosted.org/packages/f4/40/0ae9d061d278b10713ea9021ef6b703ec44698fe32178715a501ac696c6b/asyncpg-0.30.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1292b84ee06ac8a2ad8e51c7475aa309245874b61333d97411aab835c4a2f737", size = 3575059, upload-time = "2024-10-20T00:29:46.891Z" }, + { url = "https://files.pythonhosted.org/packages/c3/75/d6b895a35a2c6506952247640178e5f768eeb28b2e20299b6a6f1d743ba0/asyncpg-0.30.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:0f5712350388d0cd0615caec629ad53c81e506b1abaaf8d14c93f54b35e3595a", size = 3473596, upload-time = "2024-10-20T00:29:49.201Z" }, + { url = "https://files.pythonhosted.org/packages/c8/e7/3693392d3e168ab0aebb2d361431375bd22ffc7b4a586a0fc060d519fae7/asyncpg-0.30.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:db9891e2d76e6f425746c5d2da01921e9a16b5a71a1c905b13f30e12a257c4af", size = 3641632, upload-time = "2024-10-20T00:29:50.768Z" }, + { url = "https://files.pythonhosted.org/packages/32/ea/15670cea95745bba3f0352341db55f506a820b21c619ee66b7d12ea7867d/asyncpg-0.30.0-cp312-cp312-win32.whl", hash = "sha256:68d71a1be3d83d0570049cd1654a9bdfe506e794ecc98ad0873304a9f35e411e", size = 560186, upload-time = "2024-10-20T00:29:52.394Z" }, + { url = "https://files.pythonhosted.org/packages/7e/6b/fe1fad5cee79ca5f5c27aed7bd95baee529c1bf8a387435c8ba4fe53d5c1/asyncpg-0.30.0-cp312-cp312-win_amd64.whl", hash = "sha256:9a0292c6af5c500523949155ec17b7fe01a00ace33b68a476d6b5059f9630305", size = 621064, upload-time = "2024-10-20T00:29:53.757Z" }, + { url = "https://files.pythonhosted.org/packages/3a/22/e20602e1218dc07692acf70d5b902be820168d6282e69ef0d3cb920dc36f/asyncpg-0.30.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:05b185ebb8083c8568ea8a40e896d5f7af4b8554b64d7719c0eaa1eb5a5c3a70", size = 670373, upload-time = "2024-10-20T00:29:55.165Z" }, + { url = "https://files.pythonhosted.org/packages/3d/b3/0cf269a9d647852a95c06eb00b815d0b95a4eb4b55aa2d6ba680971733b9/asyncpg-0.30.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:c47806b1a8cbb0a0db896f4cd34d89942effe353a5035c62734ab13b9f938da3", size = 634745, upload-time = "2024-10-20T00:29:57.14Z" }, + { url = "https://files.pythonhosted.org/packages/8e/6d/a4f31bf358ce8491d2a31bfe0d7bcf25269e80481e49de4d8616c4295a34/asyncpg-0.30.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9b6fde867a74e8c76c71e2f64f80c64c0f3163e687f1763cfaf21633ec24ec33", size = 3512103, upload-time = "2024-10-20T00:29:58.499Z" }, + { url = "https://files.pythonhosted.org/packages/96/19/139227a6e67f407b9c386cb594d9628c6c78c9024f26df87c912fabd4368/asyncpg-0.30.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:46973045b567972128a27d40001124fbc821c87a6cade040cfcd4fa8a30bcdc4", size = 3592471, upload-time = "2024-10-20T00:30:00.354Z" }, + { url = "https://files.pythonhosted.org/packages/67/e4/ab3ca38f628f53f0fd28d3ff20edff1c975dd1cb22482e0061916b4b9a74/asyncpg-0.30.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:9110df111cabc2ed81aad2f35394a00cadf4f2e0635603db6ebbd0fc896f46a4", size = 3496253, upload-time = "2024-10-20T00:30:02.794Z" }, + { url = "https://files.pythonhosted.org/packages/ef/5f/0bf65511d4eeac3a1f41c54034a492515a707c6edbc642174ae79034d3ba/asyncpg-0.30.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:04ff0785ae7eed6cc138e73fc67b8e51d54ee7a3ce9b63666ce55a0bf095f7ba", size = 3662720, upload-time = "2024-10-20T00:30:04.501Z" }, + { url = "https://files.pythonhosted.org/packages/e7/31/1513d5a6412b98052c3ed9158d783b1e09d0910f51fbe0e05f56cc370bc4/asyncpg-0.30.0-cp313-cp313-win32.whl", hash = "sha256:ae374585f51c2b444510cdf3595b97ece4f233fde739aa14b50e0d64e8a7a590", size = 560404, upload-time = "2024-10-20T00:30:06.537Z" }, + { url = "https://files.pythonhosted.org/packages/c8/a4/cec76b3389c4c5ff66301cd100fe88c318563ec8a520e0b2e792b5b84972/asyncpg-0.30.0-cp313-cp313-win_amd64.whl", hash = "sha256:f59b430b8e27557c3fb9869222559f7417ced18688375825f8f12302c34e915e", size = 621623, upload-time = "2024-10-20T00:30:09.024Z" }, +] + +[[package]] +name = "attrs" +version = "25.3.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/5a/b0/1367933a8532ee6ff8d63537de4f1177af4bff9f3e829baf7331f595bb24/attrs-25.3.0.tar.gz", hash = "sha256:75d7cefc7fb576747b2c81b4442d4d4a1ce0900973527c011d1030fd3bf4af1b", size = 812032, upload-time = "2025-03-13T11:10:22.779Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/77/06/bb80f5f86020c4551da315d78b3ab75e8228f89f0162f2c3a819e407941a/attrs-25.3.0-py3-none-any.whl", hash = "sha256:427318ce031701fea540783410126f03899a97ffc6f61596ad581ac2e40e3bc3", size = 63815, upload-time = "2025-03-13T11:10:21.14Z" }, +] + +[[package]] +name = "aws-secretsmanager-caching" +version = "1.1.3" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "botocore" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/9f/d3/e6bb9f29da0081b9d00490a43612a47dbc4996e195d7e57e013124166b73/aws_secretsmanager_caching-1.1.3.tar.gz", hash = "sha256:f6d6ec9d43e0dbe4f6d5debdf36b4cb691d15a967b358b2575f5d91974a6c0ff", size = 27267, upload-time = "2024-06-20T21:14:50.283Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/76/2d/f6ffed80e0299b14c5b945d208ba892f2a13270dff7d88f439890b4fd315/aws_secretsmanager_caching-1.1.3-py3-none-any.whl", hash = "sha256:5dd8588520335ca5cc7f5ae5948e5e85f2f5b58c1341bda0db4acf6399806f78", size = 18427, upload-time = "2024-06-20T21:14:41.638Z" }, +] + +[[package]] +name = "base58" +version = "2.1.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/7f/45/8ae61209bb9015f516102fa559a2914178da1d5868428bd86a1b4421141d/base58-2.1.1.tar.gz", hash = "sha256:c5d0cb3f5b6e81e8e35da5754388ddcc6d0d14b6c6a132cb93d69ed580a7278c", size = 6528, upload-time = "2021-10-30T22:12:17.858Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/4a/45/ec96b29162a402fc4c1c5512d114d7b3787b9d1c2ec241d9568b4816ee23/base58-2.1.1-py3-none-any.whl", hash = "sha256:11a36f4d3ce51dfc1043f3218591ac4eb1ceb172919cebe05b52a5bcc8d245c2", size = 5621, upload-time = "2021-10-30T22:12:16.658Z" }, +] + +[[package]] +name = "bcl" +version = "2.3.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "cffi" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/8c/78/57a3b26ac13312ed5901f1089f0351dfd958d19e96242d557e25c1498a95/bcl-2.3.1.tar.gz", hash = "sha256:2a10f1e4fde1c146594fe835f29c9c9753a9f1c449617578c1473d6371da9853", size = 16823, upload-time = "2022-10-04T01:56:50.961Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/82/93/f712cab57d0424ff65b380e22cb286b35b8bc0ba7997926dc18c8600f451/bcl-2.3.1-cp310-abi3-macosx_10_10_universal2.whl", hash = "sha256:cf59d66d4dd653b43b197ad5fc140a131db7f842c192d9836f5a6fe2bee9019e", size = 525696, upload-time = "2022-10-04T01:56:15.925Z" }, + { url = "https://files.pythonhosted.org/packages/1a/a7/984bdb769c5ad2549fafc9365b0f6156fbeeec7df524eb064e65b164f8d0/bcl-2.3.1-cp310-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a7696201b8111e877d21c1afd5a376f27975688658fa9001278f15e9fa3da2e0", size = 740158, upload-time = "2022-10-04T01:56:18.596Z" }, + { url = "https://files.pythonhosted.org/packages/36/e3/c860ae7aa62ddacf0ff4e1d2c9741f0d2ab65fec00e3890e8ac0f5463629/bcl-2.3.1-cp310-abi3-win32.whl", hash = "sha256:28f55e08e929309eacf09118b29ffb4d110ce3702eef18e98b8b413d0dfb1bf9", size = 88671, upload-time = "2022-10-04T01:56:20.644Z" }, + { url = "https://files.pythonhosted.org/packages/30/2e/a78ec72cfc2d6f438bd2978e81e05e708953434db8614a9f4f20bb7fa606/bcl-2.3.1-cp310-abi3-win_amd64.whl", hash = "sha256:f65e9f347b76964d91294964559da05cdcefb1f0bdfe90b6173892de3598a810", size = 96393, upload-time = "2022-10-04T01:56:22.475Z" }, + { url = "https://files.pythonhosted.org/packages/25/f0/63337a824e34d0a3f48f2739d902c9c7d30524d4fc23ad73a3dcdad82e05/bcl-2.3.1-cp37-abi3-macosx_10_9_x86_64.whl", hash = "sha256:edb8277faee90121a248d26b308f4f007da1faedfd98d246841fb0f108e47db2", size = 315551, upload-time = "2022-10-04T01:56:24.025Z" }, + { url = "https://files.pythonhosted.org/packages/00/1a/20ea61d352d5804df96baf8ca70401b17db8d748a81d4225f223f2580022/bcl-2.3.1-cp37-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:99aff16e0da7a3b678c6cba9be24760eda75c068cba2b85604cf41818e2ba732", size = 740123, upload-time = "2022-10-04T01:56:26.995Z" }, + { url = "https://files.pythonhosted.org/packages/5f/a8/2714e3f7d5643f487b0ecd49b21fa8db2d9572901baa49a6e0457a3b0c19/bcl-2.3.1-cp37-abi3-win32.whl", hash = "sha256:17d2e7dbe852c4447a7a2ff179dc466a3b8809ad1f151c4625ef7feff167fcaf", size = 88674, upload-time = "2022-10-04T01:56:28.518Z" }, + { url = "https://files.pythonhosted.org/packages/26/69/6fab32cd6888887ed9113b806854ac696a76cf77febdacc6c5d4271cba8e/bcl-2.3.1-cp37-abi3-win_amd64.whl", hash = "sha256:fb778e77653735ac0bd2376636cba27ad972e0888227d4b40f49ea7ca5bceefa", size = 96395, upload-time = "2022-10-04T01:56:29.948Z" }, + { url = "https://files.pythonhosted.org/packages/ab/7a/06d9297f9805da15775615bb9229b38eb28f1e113cdd05d0e7bbcc3429e4/bcl-2.3.1-cp38-abi3-macosx_10_9_x86_64.whl", hash = "sha256:f6d551e139fa1544f7c822be57b0a8da2dff791c7ffa152bf371e3a8712b8b62", size = 315576, upload-time = "2022-10-04T01:56:32.63Z" }, + { url = "https://files.pythonhosted.org/packages/7b/15/c244b97a2ffb839fc763cbd2ce65b9290c166e279aa9fc05f046e8feb372/bcl-2.3.1-cp38-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:447835deb112f75f89cca34e34957a36e355a102a37a7b41e83e5502b11fc10a", size = 740435, upload-time = "2022-10-04T01:56:35.392Z" }, + { url = "https://files.pythonhosted.org/packages/6f/ff/25eaaf928078fc266d5f4cd485206acaec43c6a9311cf809114833bc24c4/bcl-2.3.1-cp38-abi3-win32.whl", hash = "sha256:1d8e0a25921ee705840219ed3c78e1d2e9d0d73cb2007c2708af57489bd6ce57", size = 88675, upload-time = "2022-10-04T01:56:36.943Z" }, + { url = "https://files.pythonhosted.org/packages/85/e3/a0e02b0da403503015c2196e812c8d3781ffcd94426ce5baf7f4bbfa8533/bcl-2.3.1-cp38-abi3-win_amd64.whl", hash = "sha256:a7312d21f5e8960b121fadbd950659bc58745282c1c2415e13150590d2bb271e", size = 96399, upload-time = "2022-10-04T01:56:38.555Z" }, + { url = "https://files.pythonhosted.org/packages/08/ad/a46220911bd7795f9aec10b195e1828b2e48c2015ef7e088447cba5e9089/bcl-2.3.1-cp39-abi3-macosx_10_10_universal2.whl", hash = "sha256:bb695832cb555bb0e3dee985871e6cfc2d5314fb69bbf62297f81ba645e99257", size = 525703, upload-time = "2022-10-04T01:56:40.722Z" }, + { url = "https://files.pythonhosted.org/packages/d8/3a/e8395071a89a7199363990968d438b77c55d55cce556327c98d5ce7975d1/bcl-2.3.1-cp39-abi3-macosx_10_9_x86_64.whl", hash = "sha256:0922349eb5ffd19418f46c40469d132c6e0aea0e47fec48a69bec5191ee56bec", size = 315583, upload-time = "2022-10-04T01:56:42.88Z" }, + { url = "https://files.pythonhosted.org/packages/b5/f9/2be5d88275d3d7e79cdbc8d52659b02b752d44f2bf90addb987d1fb96752/bcl-2.3.1-cp39-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:97117d57cf90679dd1b28f1039fa2090f5561d3c1ee4fe4e78d1b0680cc39b8d", size = 740137, upload-time = "2022-10-04T01:56:46.148Z" }, + { url = "https://files.pythonhosted.org/packages/7f/94/a3613caee8ca933902831343cc1040bcf3bb736cc9f38b2b4a7766292585/bcl-2.3.1-cp39-abi3-win32.whl", hash = "sha256:a5823f1b655a37259a06aa348bbc2e7a38d39d0e1683ea0596b888b7ef56d378", size = 88675, upload-time = "2022-10-04T01:56:47.459Z" }, + { url = "https://files.pythonhosted.org/packages/9e/45/302d6712a8ff733a259446a7d24ff3c868715103032f50eef0d93ba70221/bcl-2.3.1-cp39-abi3-win_amd64.whl", hash = "sha256:52cf26c4ecd76e806c6576c4848633ff44ebfff528fca63ad0e52085b6ba5aa9", size = 96394, upload-time = "2022-10-04T01:56:48.909Z" }, +] + +[[package]] +name = "bcrypt" +version = "4.3.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/bb/5d/6d7433e0f3cd46ce0b43cd65e1db465ea024dbb8216fb2404e919c2ad77b/bcrypt-4.3.0.tar.gz", hash = "sha256:3a3fd2204178b6d2adcf09cb4f6426ffef54762577a7c9b54c159008cb288c18", size = 25697, upload-time = "2025-02-28T01:24:09.174Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/bf/2c/3d44e853d1fe969d229bd58d39ae6902b3d924af0e2b5a60d17d4b809ded/bcrypt-4.3.0-cp313-cp313t-macosx_10_12_universal2.whl", hash = "sha256:f01e060f14b6b57bbb72fc5b4a83ac21c443c9a2ee708e04a10e9192f90a6281", size = 483719, upload-time = "2025-02-28T01:22:34.539Z" }, + { url = "https://files.pythonhosted.org/packages/a1/e2/58ff6e2a22eca2e2cff5370ae56dba29d70b1ea6fc08ee9115c3ae367795/bcrypt-4.3.0-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c5eeac541cefd0bb887a371ef73c62c3cd78535e4887b310626036a7c0a817bb", size = 272001, upload-time = "2025-02-28T01:22:38.078Z" }, + { url = "https://files.pythonhosted.org/packages/37/1f/c55ed8dbe994b1d088309e366749633c9eb90d139af3c0a50c102ba68a1a/bcrypt-4.3.0-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:59e1aa0e2cd871b08ca146ed08445038f42ff75968c7ae50d2fdd7860ade2180", size = 277451, upload-time = "2025-02-28T01:22:40.787Z" }, + { url = "https://files.pythonhosted.org/packages/d7/1c/794feb2ecf22fe73dcfb697ea7057f632061faceb7dcf0f155f3443b4d79/bcrypt-4.3.0-cp313-cp313t-manylinux_2_28_aarch64.whl", hash = "sha256:0042b2e342e9ae3d2ed22727c1262f76cc4f345683b5c1715f0250cf4277294f", size = 272792, upload-time = "2025-02-28T01:22:43.144Z" }, + { url = "https://files.pythonhosted.org/packages/13/b7/0b289506a3f3598c2ae2bdfa0ea66969812ed200264e3f61df77753eee6d/bcrypt-4.3.0-cp313-cp313t-manylinux_2_28_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:74a8d21a09f5e025a9a23e7c0fd2c7fe8e7503e4d356c0a2c1486ba010619f09", size = 289752, upload-time = "2025-02-28T01:22:45.56Z" }, + { url = "https://files.pythonhosted.org/packages/dc/24/d0fb023788afe9e83cc118895a9f6c57e1044e7e1672f045e46733421fe6/bcrypt-4.3.0-cp313-cp313t-manylinux_2_28_x86_64.whl", hash = "sha256:0142b2cb84a009f8452c8c5a33ace5e3dfec4159e7735f5afe9a4d50a8ea722d", size = 277762, upload-time = "2025-02-28T01:22:47.023Z" }, + { url = "https://files.pythonhosted.org/packages/e4/38/cde58089492e55ac4ef6c49fea7027600c84fd23f7520c62118c03b4625e/bcrypt-4.3.0-cp313-cp313t-manylinux_2_34_aarch64.whl", hash = "sha256:12fa6ce40cde3f0b899729dbd7d5e8811cb892d31b6f7d0334a1f37748b789fd", size = 272384, upload-time = "2025-02-28T01:22:49.221Z" }, + { url = "https://files.pythonhosted.org/packages/de/6a/d5026520843490cfc8135d03012a413e4532a400e471e6188b01b2de853f/bcrypt-4.3.0-cp313-cp313t-manylinux_2_34_x86_64.whl", hash = "sha256:5bd3cca1f2aa5dbcf39e2aa13dd094ea181f48959e1071265de49cc2b82525af", size = 277329, upload-time = "2025-02-28T01:22:51.603Z" }, + { url = "https://files.pythonhosted.org/packages/b3/a3/4fc5255e60486466c389e28c12579d2829b28a527360e9430b4041df4cf9/bcrypt-4.3.0-cp313-cp313t-musllinux_1_1_aarch64.whl", hash = "sha256:335a420cfd63fc5bc27308e929bee231c15c85cc4c496610ffb17923abf7f231", size = 305241, upload-time = "2025-02-28T01:22:53.283Z" }, + { url = "https://files.pythonhosted.org/packages/c7/15/2b37bc07d6ce27cc94e5b10fd5058900eb8fb11642300e932c8c82e25c4a/bcrypt-4.3.0-cp313-cp313t-musllinux_1_1_x86_64.whl", hash = "sha256:0e30e5e67aed0187a1764911af023043b4542e70a7461ad20e837e94d23e1d6c", size = 309617, upload-time = "2025-02-28T01:22:55.461Z" }, + { url = "https://files.pythonhosted.org/packages/5f/1f/99f65edb09e6c935232ba0430c8c13bb98cb3194b6d636e61d93fe60ac59/bcrypt-4.3.0-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:3b8d62290ebefd49ee0b3ce7500f5dbdcf13b81402c05f6dafab9a1e1b27212f", size = 335751, upload-time = "2025-02-28T01:22:57.81Z" }, + { url = "https://files.pythonhosted.org/packages/00/1b/b324030c706711c99769988fcb694b3cb23f247ad39a7823a78e361bdbb8/bcrypt-4.3.0-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:2ef6630e0ec01376f59a006dc72918b1bf436c3b571b80fa1968d775fa02fe7d", size = 355965, upload-time = "2025-02-28T01:22:59.181Z" }, + { url = "https://files.pythonhosted.org/packages/aa/dd/20372a0579dd915dfc3b1cd4943b3bca431866fcb1dfdfd7518c3caddea6/bcrypt-4.3.0-cp313-cp313t-win32.whl", hash = "sha256:7a4be4cbf241afee43f1c3969b9103a41b40bcb3a3f467ab19f891d9bc4642e4", size = 155316, upload-time = "2025-02-28T01:23:00.763Z" }, + { url = "https://files.pythonhosted.org/packages/6d/52/45d969fcff6b5577c2bf17098dc36269b4c02197d551371c023130c0f890/bcrypt-4.3.0-cp313-cp313t-win_amd64.whl", hash = "sha256:5c1949bf259a388863ced887c7861da1df681cb2388645766c89fdfd9004c669", size = 147752, upload-time = "2025-02-28T01:23:02.908Z" }, + { url = "https://files.pythonhosted.org/packages/11/22/5ada0b9af72b60cbc4c9a399fdde4af0feaa609d27eb0adc61607997a3fa/bcrypt-4.3.0-cp38-abi3-macosx_10_12_universal2.whl", hash = "sha256:f81b0ed2639568bf14749112298f9e4e2b28853dab50a8b357e31798686a036d", size = 498019, upload-time = "2025-02-28T01:23:05.838Z" }, + { url = "https://files.pythonhosted.org/packages/b8/8c/252a1edc598dc1ce57905be173328eda073083826955ee3c97c7ff5ba584/bcrypt-4.3.0-cp38-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:864f8f19adbe13b7de11ba15d85d4a428c7e2f344bac110f667676a0ff84924b", size = 279174, upload-time = "2025-02-28T01:23:07.274Z" }, + { url = "https://files.pythonhosted.org/packages/29/5b/4547d5c49b85f0337c13929f2ccbe08b7283069eea3550a457914fc078aa/bcrypt-4.3.0-cp38-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3e36506d001e93bffe59754397572f21bb5dc7c83f54454c990c74a468cd589e", size = 283870, upload-time = "2025-02-28T01:23:09.151Z" }, + { url = "https://files.pythonhosted.org/packages/be/21/7dbaf3fa1745cb63f776bb046e481fbababd7d344c5324eab47f5ca92dd2/bcrypt-4.3.0-cp38-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:842d08d75d9fe9fb94b18b071090220697f9f184d4547179b60734846461ed59", size = 279601, upload-time = "2025-02-28T01:23:11.461Z" }, + { url = "https://files.pythonhosted.org/packages/6d/64/e042fc8262e971347d9230d9abbe70d68b0a549acd8611c83cebd3eaec67/bcrypt-4.3.0-cp38-abi3-manylinux_2_28_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:7c03296b85cb87db865d91da79bf63d5609284fc0cab9472fdd8367bbd830753", size = 297660, upload-time = "2025-02-28T01:23:12.989Z" }, + { url = "https://files.pythonhosted.org/packages/50/b8/6294eb84a3fef3b67c69b4470fcdd5326676806bf2519cda79331ab3c3a9/bcrypt-4.3.0-cp38-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:62f26585e8b219cdc909b6a0069efc5e4267e25d4a3770a364ac58024f62a761", size = 284083, upload-time = "2025-02-28T01:23:14.5Z" }, + { url = "https://files.pythonhosted.org/packages/62/e6/baff635a4f2c42e8788fe1b1633911c38551ecca9a749d1052d296329da6/bcrypt-4.3.0-cp38-abi3-manylinux_2_34_aarch64.whl", hash = "sha256:beeefe437218a65322fbd0069eb437e7c98137e08f22c4660ac2dc795c31f8bb", size = 279237, upload-time = "2025-02-28T01:23:16.686Z" }, + { url = "https://files.pythonhosted.org/packages/39/48/46f623f1b0c7dc2e5de0b8af5e6f5ac4cc26408ac33f3d424e5ad8da4a90/bcrypt-4.3.0-cp38-abi3-manylinux_2_34_x86_64.whl", hash = "sha256:97eea7408db3a5bcce4a55d13245ab3fa566e23b4c67cd227062bb49e26c585d", size = 283737, upload-time = "2025-02-28T01:23:18.897Z" }, + { url = "https://files.pythonhosted.org/packages/49/8b/70671c3ce9c0fca4a6cc3cc6ccbaa7e948875a2e62cbd146e04a4011899c/bcrypt-4.3.0-cp38-abi3-musllinux_1_1_aarch64.whl", hash = "sha256:191354ebfe305e84f344c5964c7cd5f924a3bfc5d405c75ad07f232b6dffb49f", size = 312741, upload-time = "2025-02-28T01:23:21.041Z" }, + { url = "https://files.pythonhosted.org/packages/27/fb/910d3a1caa2d249b6040a5caf9f9866c52114d51523ac2fb47578a27faee/bcrypt-4.3.0-cp38-abi3-musllinux_1_1_x86_64.whl", hash = "sha256:41261d64150858eeb5ff43c753c4b216991e0ae16614a308a15d909503617732", size = 316472, upload-time = "2025-02-28T01:23:23.183Z" }, + { url = "https://files.pythonhosted.org/packages/dc/cf/7cf3a05b66ce466cfb575dbbda39718d45a609daa78500f57fa9f36fa3c0/bcrypt-4.3.0-cp38-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:33752b1ba962ee793fa2b6321404bf20011fe45b9afd2a842139de3011898fef", size = 343606, upload-time = "2025-02-28T01:23:25.361Z" }, + { url = "https://files.pythonhosted.org/packages/e3/b8/e970ecc6d7e355c0d892b7f733480f4aa8509f99b33e71550242cf0b7e63/bcrypt-4.3.0-cp38-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:50e6e80a4bfd23a25f5c05b90167c19030cf9f87930f7cb2eacb99f45d1c3304", size = 362867, upload-time = "2025-02-28T01:23:26.875Z" }, + { url = "https://files.pythonhosted.org/packages/a9/97/8d3118efd8354c555a3422d544163f40d9f236be5b96c714086463f11699/bcrypt-4.3.0-cp38-abi3-win32.whl", hash = "sha256:67a561c4d9fb9465ec866177e7aebcad08fe23aaf6fbd692a6fab69088abfc51", size = 160589, upload-time = "2025-02-28T01:23:28.381Z" }, + { url = "https://files.pythonhosted.org/packages/29/07/416f0b99f7f3997c69815365babbc2e8754181a4b1899d921b3c7d5b6f12/bcrypt-4.3.0-cp38-abi3-win_amd64.whl", hash = "sha256:584027857bc2843772114717a7490a37f68da563b3620f78a849bcb54dc11e62", size = 152794, upload-time = "2025-02-28T01:23:30.187Z" }, + { url = "https://files.pythonhosted.org/packages/6e/c1/3fa0e9e4e0bfd3fd77eb8b52ec198fd6e1fd7e9402052e43f23483f956dd/bcrypt-4.3.0-cp39-abi3-macosx_10_12_universal2.whl", hash = "sha256:0d3efb1157edebfd9128e4e46e2ac1a64e0c1fe46fb023158a407c7892b0f8c3", size = 498969, upload-time = "2025-02-28T01:23:31.945Z" }, + { url = "https://files.pythonhosted.org/packages/ce/d4/755ce19b6743394787fbd7dff6bf271b27ee9b5912a97242e3caf125885b/bcrypt-4.3.0-cp39-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:08bacc884fd302b611226c01014eca277d48f0a05187666bca23aac0dad6fe24", size = 279158, upload-time = "2025-02-28T01:23:34.161Z" }, + { url = "https://files.pythonhosted.org/packages/9b/5d/805ef1a749c965c46b28285dfb5cd272a7ed9fa971f970435a5133250182/bcrypt-4.3.0-cp39-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f6746e6fec103fcd509b96bacdfdaa2fbde9a553245dbada284435173a6f1aef", size = 284285, upload-time = "2025-02-28T01:23:35.765Z" }, + { url = "https://files.pythonhosted.org/packages/ab/2b/698580547a4a4988e415721b71eb45e80c879f0fb04a62da131f45987b96/bcrypt-4.3.0-cp39-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:afe327968aaf13fc143a56a3360cb27d4ad0345e34da12c7290f1b00b8fe9a8b", size = 279583, upload-time = "2025-02-28T01:23:38.021Z" }, + { url = "https://files.pythonhosted.org/packages/f2/87/62e1e426418204db520f955ffd06f1efd389feca893dad7095bf35612eec/bcrypt-4.3.0-cp39-abi3-manylinux_2_28_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:d9af79d322e735b1fc33404b5765108ae0ff232d4b54666d46730f8ac1a43676", size = 297896, upload-time = "2025-02-28T01:23:39.575Z" }, + { url = "https://files.pythonhosted.org/packages/cb/c6/8fedca4c2ada1b6e889c52d2943b2f968d3427e5d65f595620ec4c06fa2f/bcrypt-4.3.0-cp39-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:f1e3ffa1365e8702dc48c8b360fef8d7afeca482809c5e45e653af82ccd088c1", size = 284492, upload-time = "2025-02-28T01:23:40.901Z" }, + { url = "https://files.pythonhosted.org/packages/4d/4d/c43332dcaaddb7710a8ff5269fcccba97ed3c85987ddaa808db084267b9a/bcrypt-4.3.0-cp39-abi3-manylinux_2_34_aarch64.whl", hash = "sha256:3004df1b323d10021fda07a813fd33e0fd57bef0e9a480bb143877f6cba996fe", size = 279213, upload-time = "2025-02-28T01:23:42.653Z" }, + { url = "https://files.pythonhosted.org/packages/dc/7f/1e36379e169a7df3a14a1c160a49b7b918600a6008de43ff20d479e6f4b5/bcrypt-4.3.0-cp39-abi3-manylinux_2_34_x86_64.whl", hash = "sha256:531457e5c839d8caea9b589a1bcfe3756b0547d7814e9ce3d437f17da75c32b0", size = 284162, upload-time = "2025-02-28T01:23:43.964Z" }, + { url = "https://files.pythonhosted.org/packages/1c/0a/644b2731194b0d7646f3210dc4d80c7fee3ecb3a1f791a6e0ae6bb8684e3/bcrypt-4.3.0-cp39-abi3-musllinux_1_1_aarch64.whl", hash = "sha256:17a854d9a7a476a89dcef6c8bd119ad23e0f82557afbd2c442777a16408e614f", size = 312856, upload-time = "2025-02-28T01:23:46.011Z" }, + { url = "https://files.pythonhosted.org/packages/dc/62/2a871837c0bb6ab0c9a88bf54de0fc021a6a08832d4ea313ed92a669d437/bcrypt-4.3.0-cp39-abi3-musllinux_1_1_x86_64.whl", hash = "sha256:6fb1fd3ab08c0cbc6826a2e0447610c6f09e983a281b919ed721ad32236b8b23", size = 316726, upload-time = "2025-02-28T01:23:47.575Z" }, + { url = "https://files.pythonhosted.org/packages/0c/a1/9898ea3faac0b156d457fd73a3cb9c2855c6fd063e44b8522925cdd8ce46/bcrypt-4.3.0-cp39-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:e965a9c1e9a393b8005031ff52583cedc15b7884fce7deb8b0346388837d6cfe", size = 343664, upload-time = "2025-02-28T01:23:49.059Z" }, + { url = "https://files.pythonhosted.org/packages/40/f2/71b4ed65ce38982ecdda0ff20c3ad1b15e71949c78b2c053df53629ce940/bcrypt-4.3.0-cp39-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:79e70b8342a33b52b55d93b3a59223a844962bef479f6a0ea318ebbcadf71505", size = 363128, upload-time = "2025-02-28T01:23:50.399Z" }, + { url = "https://files.pythonhosted.org/packages/11/99/12f6a58eca6dea4be992d6c681b7ec9410a1d9f5cf368c61437e31daa879/bcrypt-4.3.0-cp39-abi3-win32.whl", hash = "sha256:b4d4e57f0a63fd0b358eb765063ff661328f69a04494427265950c71b992a39a", size = 160598, upload-time = "2025-02-28T01:23:51.775Z" }, + { url = "https://files.pythonhosted.org/packages/a9/cf/45fb5261ece3e6b9817d3d82b2f343a505fd58674a92577923bc500bd1aa/bcrypt-4.3.0-cp39-abi3-win_amd64.whl", hash = "sha256:e53e074b120f2877a35cc6c736b8eb161377caae8925c17688bd46ba56daaa5b", size = 152799, upload-time = "2025-02-28T01:23:53.139Z" }, +] + +[[package]] +name = "bip32" +version = "4.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "coincurve" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/4a/37/b69968b1b6eaea1fedb8efdb1862d86e92b6f68e182f39c764f894984db5/bip32-4.0.tar.gz", hash = "sha256:8035588f252f569bb414bc60df151ae431fc1c6789a19488a32890532ef3a2fc", size = 21662, upload-time = "2024-09-07T12:40:26.388Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/00/bd/dcf1650776a241c10a2bc6826b6e23ff63bf55373bb053b716c69c463758/bip32-4.0-py3-none-any.whl", hash = "sha256:9728b38336129c00e1f870bbb3e328c9632d51c1bddeef4011fd3115cb3aeff9", size = 12898, upload-time = "2024-09-07T12:40:25.358Z" }, +] + +[[package]] +name = "bitarray" +version = "3.6.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/1b/76/b08705dbfabc4169eab93bba4a10b0ad60940f48cc8a62ff16e2a05f0452/bitarray-3.6.1.tar.gz", hash = "sha256:4255bff37b01562b8e6adcf9db256029765985b0790c5ff76bbe1837edcd53ea", size = 148620, upload-time = "2025-08-12T09:52:35.677Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/e6/80/2a8514df92257d54cded7733aebeade6b594d551c0fb16746d4564fb1303/bitarray-3.6.1-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:a236fc1e87a70adb588b37b09b18add71224279d28140d9ee847778e1f3f5a1a", size = 145778, upload-time = "2025-08-12T09:49:54.401Z" }, + { url = "https://files.pythonhosted.org/packages/c0/f8/a14a3deecb3580da1ae208ea32567a6581ebf6944c93c6c7f381fda08060/bitarray-3.6.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:d5cf59d8f1ee8332f60e7352464209db1de909ae960d3b1f9d76897e484aa4ed", size = 142496, upload-time = "2025-08-12T09:49:55.772Z" }, + { url = "https://files.pythonhosted.org/packages/21/50/ae0e1cb8c1633372ad493b9a11bc0c66108c219e6bea5519d5119e28ec3e/bitarray-3.6.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:38ce28427eea22bafcef073768d7e14d14233ced3eea8505ee13b92fb3723bce", size = 328326, upload-time = "2025-08-12T09:49:56.848Z" }, + { url = "https://files.pythonhosted.org/packages/f7/8b/4e196ea39ef05affc1591d09097c14d750b6a5b226973bdf4bfe761b08b3/bitarray-3.6.1-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e92011003d87e224e101533a98ede388bb40de0ec65978c6d0bb0d98f949f1b8", size = 346514, upload-time = "2025-08-12T09:49:57.977Z" }, + { url = "https://files.pythonhosted.org/packages/31/78/23c7c3fead7e7de36c4c7ed1ed3db105d6e293775cda6943b43e24e54fe3/bitarray-3.6.1-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f378316f45ffcec4ed429cf2ef446c8a3d7afe29e5020eb51ed789e443f4359f", size = 339657, upload-time = "2025-08-12T09:49:59.276Z" }, + { url = "https://files.pythonhosted.org/packages/24/44/f811e87fc5d937955502b5e5124e2a81315d577e9ff200ab568c8cd0bde6/bitarray-3.6.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b12c11894d991dfaa415229329452e8be2b230e06fba2aff27110158e2f0dafd", size = 331357, upload-time = "2025-08-12T09:50:00.464Z" }, + { url = "https://files.pythonhosted.org/packages/b4/08/e8250cba930c59c37786795b5cad2bef9772f7d9b6f68d21a474500b7e22/bitarray-3.6.1-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7f53d6a0ac86d67b6760530196963ea0598588c1a9b155f7e137d9b6a1befd27", size = 319921, upload-time = "2025-08-12T09:50:01.65Z" }, + { url = "https://files.pythonhosted.org/packages/38/1f/bfedc526e3c512663062402ab2dcc4993eb73292aadac8c2cdaf06425135/bitarray-3.6.1-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:4b17029647fd990ce6fd3f1fb253ff47bfc27df8255bea99b5e381b2030b6d54", size = 321237, upload-time = "2025-08-12T09:50:02.866Z" }, + { url = "https://files.pythonhosted.org/packages/15/bb/e813631a61d54c6d6662e6adfc2ab42596413b46c05d607ad66f2cd0c7e3/bitarray-3.6.1-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:36cd656877eb3d215ecbb575743d05c521911514985b2a0999a23bb504a8ae64", size = 317839, upload-time = "2025-08-12T09:50:04.138Z" }, + { url = "https://files.pythonhosted.org/packages/64/5d/548a375c81d3b366cc76ee611cbebb267ef6e5e33cc58461628e68ff65d8/bitarray-3.6.1-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:ac29a0cda5ea50c78ff20d06d8c5b8402147448a9dde2b118ecea2b4cec490ec", size = 345106, upload-time = "2025-08-12T09:50:05.744Z" }, + { url = "https://files.pythonhosted.org/packages/6d/7d/7ffeab0566d798a6a4652b0fe16126611446863ede12873309e23b3e1978/bitarray-3.6.1-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:8dceb8d43fe51b8643766152736ec0f32f0a6a5b6e2e6742f1165cbe5799102e", size = 346469, upload-time = "2025-08-12T09:50:07.248Z" }, + { url = "https://files.pythonhosted.org/packages/92/47/f5fb907c9c8ea9c0376792152e7c511ae95a0140f54562f6a49c66a30094/bitarray-3.6.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:137bfb9c00c172c16ddabe8615a4746e745789cfedb0e7c5b25236a20ccf051c", size = 327378, upload-time = "2025-08-12T09:50:08.512Z" }, + { url = "https://files.pythonhosted.org/packages/f3/fb/3203d199f9cba9595227afefc614c5390b1babb9d73df0df78d50b88f053/bitarray-3.6.1-cp312-cp312-win32.whl", hash = "sha256:aba6043eb44b68055145c5ae2062f976c02ec0b04ff688ee5b43deda8185b708", size = 138827, upload-time = "2025-08-12T09:50:09.745Z" }, + { url = "https://files.pythonhosted.org/packages/60/95/cd3e4a4e783ff8d0f54ae3bc8fc26e077333d2eebe917f6ee7886c9004e8/bitarray-3.6.1-cp312-cp312-win_amd64.whl", hash = "sha256:64a3a8c79468bd5283907f2e60651c857f0dab3dc671943bcf5ec2d15e2f8177", size = 145639, upload-time = "2025-08-12T09:50:10.857Z" }, + { url = "https://files.pythonhosted.org/packages/42/91/4ec54801707f8c205a0f80ada04c8a3be982fc974e4f76405ac250b3021c/bitarray-3.6.1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:639fc29267348a78b259fb24c471b7e3322c85f1eb95712bac852ab2a56e742a", size = 145768, upload-time = "2025-08-12T09:50:12.606Z" }, + { url = "https://files.pythonhosted.org/packages/01/13/d79d0db69933b826235168619c13ee3ec1ca6fba63e6833616230675b10e/bitarray-3.6.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:357f6c07cb3116a2d4a917fd4f54727f1b71102f5e316c2d4c9fe26ec3dfd8e9", size = 142488, upload-time = "2025-08-12T09:50:15.836Z" }, + { url = "https://files.pythonhosted.org/packages/f2/9f/8e11d5082f062ca530de0d2e04a5b5a63be0dd08ac34aa85055cf179a878/bitarray-3.6.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:384eee34bdb4ea52517421cac778d4a881652cd7a5a052bd0939adbca9a6b7d6", size = 328276, upload-time = "2025-08-12T09:50:16.962Z" }, + { url = "https://files.pythonhosted.org/packages/4f/a0/176423287161375c018ac90231425a1134e8c7133d41a82d1d1ace10c86b/bitarray-3.6.1-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:25cc576ac013b33e69b8123dd7eca78af80100f1a10174e5a30e174ad31f715a", size = 346324, upload-time = "2025-08-12T09:50:18.662Z" }, + { url = "https://files.pythonhosted.org/packages/88/3c/e13f8a434875b6c1636bd58a41191310411cbff0b5d28848795eb4119e43/bitarray-3.6.1-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:84951ce773cbe69e4e15ae27bf6617f08ef2415ad37e1114a2d7979e210bf9e5", size = 339595, upload-time = "2025-08-12T09:50:20.385Z" }, + { url = "https://files.pythonhosted.org/packages/c9/a4/118035f807d9396a4b9b339f5ac3db896a162c2e6b0fe8750fc1dcf700e4/bitarray-3.6.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c277f6a1cbfdedcec704add6ff2ace89b56a29c1955f2441a7fd2fbde410f791", size = 331233, upload-time = "2025-08-12T09:50:22.096Z" }, + { url = "https://files.pythonhosted.org/packages/b8/8e/27e500b61a02c74173bb62760a079c1968b0e87394577197bc43f7380006/bitarray-3.6.1-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c8d8f78bf8bebac391867a7637008ffa68f4870d9ee8154c836745ff237cfa98", size = 319819, upload-time = "2025-08-12T09:50:23.756Z" }, + { url = "https://files.pythonhosted.org/packages/d4/bb/b33eb1dd2577d956dd23e58b387b3d740b0083899c0575cea82302705804/bitarray-3.6.1-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:9b263a460b4ea6a7ebbe450593928651fa81fa3a1426732ff6ea52bb4b210d6c", size = 321232, upload-time = "2025-08-12T09:50:24.985Z" }, + { url = "https://files.pythonhosted.org/packages/cf/ab/843caf3a536be1034a357423067b244d23dee9d6bcab7fb8e20744bed0e2/bitarray-3.6.1-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:2a2f08fa831bafb42903231c23df5c597d2cc47fabb2532659180a54386dc49a", size = 317863, upload-time = "2025-08-12T09:50:26.25Z" }, + { url = "https://files.pythonhosted.org/packages/83/01/11f9dc356bb33155a756a24d90d8a2b74bf8df52fe7bbc805fe06f72ae21/bitarray-3.6.1-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:43d2244aa721dc92713a72695d43dd2a75b920338c9f34da50133d05e23c9ff3", size = 345115, upload-time = "2025-08-12T09:50:27.768Z" }, + { url = "https://files.pythonhosted.org/packages/7a/fe/6e0ae1bbfad42bc2b106cd1687fd6b4b190a69252c46b0274caf490a9381/bitarray-3.6.1-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:298a0f4442edb98695040009e9d411f221b33cc8d3b749a00d9813a69c047fc1", size = 346439, upload-time = "2025-08-12T09:50:29.273Z" }, + { url = "https://files.pythonhosted.org/packages/93/6a/f85a26763c272e65b3f4a42f7ed1623d07f7b1237d4d5a14caf90e0b7286/bitarray-3.6.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:7f16d704c40bd3db661005819daa818c4f5f82823d29491d6744dfc7ff3d6b4a", size = 327316, upload-time = "2025-08-12T09:50:30.874Z" }, + { url = "https://files.pythonhosted.org/packages/b1/29/c3886af0e5ccface7dfada61feb87d3e42967cf88d040331dbcb8b3d437f/bitarray-3.6.1-cp313-cp313-win32.whl", hash = "sha256:a2120ce67c0d0047564c5af1afdd7d03688c2d7109e21ce699742366934c658f", size = 138842, upload-time = "2025-08-12T09:50:32.135Z" }, + { url = "https://files.pythonhosted.org/packages/4b/2b/9257924827af9c59c71644fd7021dd18e431e4c6f07faed9e92b247a010e/bitarray-3.6.1-cp313-cp313-win_amd64.whl", hash = "sha256:e3361f1c9e537925284d1f37447a3aad5f5dc1c741cec2ebc078250232d939af", size = 145641, upload-time = "2025-08-12T09:50:33.248Z" }, +] + +[[package]] +name = "boto3" +version = "1.40.9" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "botocore" }, + { name = "jmespath" }, + { name = "s3transfer" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/72/63/b263070ba4a2815de633d71dd4c5c04c9eb7000d33c510036c9557692324/boto3-1.40.9.tar.gz", hash = "sha256:af3f77a548b3dd7db5046609598a28a9ad5d062437b1783da9b526cc67c38b79", size = 111953, upload-time = "2025-08-13T19:20:32.495Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/b6/6d/79fad38fcd7e1fc6961061b46cc87706c5c946088bc4620abf0d0aa49420/boto3-1.40.9-py3-none-any.whl", hash = "sha256:516f5e3f7552b2a7ca4d2c89b338fb4684998c676b11b906e2ab694c91716ba6", size = 140061, upload-time = "2025-08-13T19:20:30.652Z" }, +] + +[[package]] +name = "botocore" +version = "1.40.9" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "jmespath" }, + { name = "python-dateutil" }, + { name = "urllib3" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/ff/f3/7bf4913b4b61416c014cfee38211d071f75894cca37f7234519c4d8676d1/botocore-1.40.9.tar.gz", hash = "sha256:f4a9c6ed08e8637138e1b5534f89d38c02650974b6458a07690493130e295f68", size = 14325768, upload-time = "2025-08-13T19:20:22.393Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/02/e9/367e81e114deb92a6e0d5740f0bff4548af710be318af65265b9aad72237/botocore-1.40.9-py3-none-any.whl", hash = "sha256:d4960a39aab9658bcd0272490003001cb4a8d12b89bb297ccef994ee023fb638", size = 13990592, upload-time = "2025-08-13T19:20:16.942Z" }, +] + +[[package]] +name = "cachetools" +version = "5.5.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/c3/38/a0f315319737ecf45b4319a8cd1f3a908e29d9277b46942263292115eee7/cachetools-5.5.0.tar.gz", hash = "sha256:2cc24fb4cbe39633fb7badd9db9ca6295d766d9c2995f245725a46715d050f2a", size = 27661, upload-time = "2024-08-18T20:28:44.639Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/a4/07/14f8ad37f2d12a5ce41206c21820d8cb6561b728e51fad4530dff0552a67/cachetools-5.5.0-py3-none-any.whl", hash = "sha256:02134e8439cdc2ffb62023ce1debca2944c3f289d66bb17ead3ab3dede74b292", size = 9524, upload-time = "2024-08-18T20:28:43.404Z" }, +] + +[[package]] +name = "cdp-sdk" +version = "1.29.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "aiohttp" }, + { name = "aiohttp-retry" }, + { name = "base58" }, + { name = "cryptography" }, + { name = "nest-asyncio" }, + { name = "pydantic" }, + { name = "pyjwt" }, + { name = "python-dateutil" }, + { name = "solana" }, + { name = "solders" }, + { name = "urllib3" }, + { name = "web3" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/59/e2/eafe02b5051b997327222c2b2e03e41d03472daeaee7417b3fe3d3380fda/cdp_sdk-1.29.1.tar.gz", hash = "sha256:2e96eb859c047f599cae11c457f5945975adca50bfeccdedd1c2242dd46cf318", size = 292050, upload-time = "2025-08-09T14:38:55.722Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/d6/5e/2dfcc0fdac9522929d889a68b753b177790335984e6a5612f72b637bc5e6/cdp_sdk-1.29.1-py3-none-any.whl", hash = "sha256:d3922348e51c12901991db6e687deac66ea316a2520afabef39388b6ca25e203", size = 701937, upload-time = "2025-08-09T14:38:54.021Z" }, +] + +[[package]] +name = "certifi" +version = "2024.12.14" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/0f/bd/1d41ee578ce09523c81a15426705dd20969f5abf006d1afe8aeff0dd776a/certifi-2024.12.14.tar.gz", hash = "sha256:b650d30f370c2b724812bee08008be0c4163b163ddaec3f2546c1caf65f191db", size = 166010, upload-time = "2024-12-14T13:52:38.02Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/a5/32/8f6669fc4798494966bf446c8c4a162e0b5d893dff088afddf76414f70e1/certifi-2024.12.14-py3-none-any.whl", hash = "sha256:1275f7a45be9464efc1173084eaa30f866fe2e47d389406136d332ed4967ec56", size = 164927, upload-time = "2024-12-14T13:52:36.114Z" }, +] + +[[package]] +name = "cffi" +version = "1.17.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "pycparser" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/fc/97/c783634659c2920c3fc70419e3af40972dbaf758daa229a7d6ea6135c90d/cffi-1.17.1.tar.gz", hash = "sha256:1c39c6016c32bc48dd54561950ebd6836e1670f2ae46128f67cf49e789c52824", size = 516621, upload-time = "2024-09-04T20:45:21.852Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/5a/84/e94227139ee5fb4d600a7a4927f322e1d4aea6fdc50bd3fca8493caba23f/cffi-1.17.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:805b4371bf7197c329fcb3ead37e710d1bca9da5d583f5073b799d5c5bd1eee4", size = 183178, upload-time = "2024-09-04T20:44:12.232Z" }, + { url = "https://files.pythonhosted.org/packages/da/ee/fb72c2b48656111c4ef27f0f91da355e130a923473bf5ee75c5643d00cca/cffi-1.17.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:733e99bc2df47476e3848417c5a4540522f234dfd4ef3ab7fafdf555b082ec0c", size = 178840, upload-time = "2024-09-04T20:44:13.739Z" }, + { url = "https://files.pythonhosted.org/packages/cc/b6/db007700f67d151abadf508cbfd6a1884f57eab90b1bb985c4c8c02b0f28/cffi-1.17.1-cp312-cp312-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1257bdabf294dceb59f5e70c64a3e2f462c30c7ad68092d01bbbfb1c16b1ba36", size = 454803, upload-time = "2024-09-04T20:44:15.231Z" }, + { url = "https://files.pythonhosted.org/packages/1a/df/f8d151540d8c200eb1c6fba8cd0dfd40904f1b0682ea705c36e6c2e97ab3/cffi-1.17.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:da95af8214998d77a98cc14e3a3bd00aa191526343078b530ceb0bd710fb48a5", size = 478850, upload-time = "2024-09-04T20:44:17.188Z" }, + { url = "https://files.pythonhosted.org/packages/28/c0/b31116332a547fd2677ae5b78a2ef662dfc8023d67f41b2a83f7c2aa78b1/cffi-1.17.1-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d63afe322132c194cf832bfec0dc69a99fb9bb6bbd550f161a49e9e855cc78ff", size = 485729, upload-time = "2024-09-04T20:44:18.688Z" }, + { url = "https://files.pythonhosted.org/packages/91/2b/9a1ddfa5c7f13cab007a2c9cc295b70fbbda7cb10a286aa6810338e60ea1/cffi-1.17.1-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f79fc4fc25f1c8698ff97788206bb3c2598949bfe0fef03d299eb1b5356ada99", size = 471256, upload-time = "2024-09-04T20:44:20.248Z" }, + { url = "https://files.pythonhosted.org/packages/b2/d5/da47df7004cb17e4955df6a43d14b3b4ae77737dff8bf7f8f333196717bf/cffi-1.17.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b62ce867176a75d03a665bad002af8e6d54644fad99a3c70905c543130e39d93", size = 479424, upload-time = "2024-09-04T20:44:21.673Z" }, + { url = "https://files.pythonhosted.org/packages/0b/ac/2a28bcf513e93a219c8a4e8e125534f4f6db03e3179ba1c45e949b76212c/cffi-1.17.1-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:386c8bf53c502fff58903061338ce4f4950cbdcb23e2902d86c0f722b786bbe3", size = 484568, upload-time = "2024-09-04T20:44:23.245Z" }, + { url = "https://files.pythonhosted.org/packages/d4/38/ca8a4f639065f14ae0f1d9751e70447a261f1a30fa7547a828ae08142465/cffi-1.17.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:4ceb10419a9adf4460ea14cfd6bc43d08701f0835e979bf821052f1805850fe8", size = 488736, upload-time = "2024-09-04T20:44:24.757Z" }, + { url = "https://files.pythonhosted.org/packages/86/c5/28b2d6f799ec0bdecf44dced2ec5ed43e0eb63097b0f58c293583b406582/cffi-1.17.1-cp312-cp312-win32.whl", hash = "sha256:a08d7e755f8ed21095a310a693525137cfe756ce62d066e53f502a83dc550f65", size = 172448, upload-time = "2024-09-04T20:44:26.208Z" }, + { url = "https://files.pythonhosted.org/packages/50/b9/db34c4755a7bd1cb2d1603ac3863f22bcecbd1ba29e5ee841a4bc510b294/cffi-1.17.1-cp312-cp312-win_amd64.whl", hash = "sha256:51392eae71afec0d0c8fb1a53b204dbb3bcabcb3c9b807eedf3e1e6ccf2de903", size = 181976, upload-time = "2024-09-04T20:44:27.578Z" }, + { url = "https://files.pythonhosted.org/packages/8d/f8/dd6c246b148639254dad4d6803eb6a54e8c85c6e11ec9df2cffa87571dbe/cffi-1.17.1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:f3a2b4222ce6b60e2e8b337bb9596923045681d71e5a082783484d845390938e", size = 182989, upload-time = "2024-09-04T20:44:28.956Z" }, + { url = "https://files.pythonhosted.org/packages/8b/f1/672d303ddf17c24fc83afd712316fda78dc6fce1cd53011b839483e1ecc8/cffi-1.17.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:0984a4925a435b1da406122d4d7968dd861c1385afe3b45ba82b750f229811e2", size = 178802, upload-time = "2024-09-04T20:44:30.289Z" }, + { url = "https://files.pythonhosted.org/packages/0e/2d/eab2e858a91fdff70533cab61dcff4a1f55ec60425832ddfdc9cd36bc8af/cffi-1.17.1-cp313-cp313-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d01b12eeeb4427d3110de311e1774046ad344f5b1a7403101878976ecd7a10f3", size = 454792, upload-time = "2024-09-04T20:44:32.01Z" }, + { url = "https://files.pythonhosted.org/packages/75/b2/fbaec7c4455c604e29388d55599b99ebcc250a60050610fadde58932b7ee/cffi-1.17.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:706510fe141c86a69c8ddc029c7910003a17353970cff3b904ff0686a5927683", size = 478893, upload-time = "2024-09-04T20:44:33.606Z" }, + { url = "https://files.pythonhosted.org/packages/4f/b7/6e4a2162178bf1935c336d4da8a9352cccab4d3a5d7914065490f08c0690/cffi-1.17.1-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:de55b766c7aa2e2a3092c51e0483d700341182f08e67c63630d5b6f200bb28e5", size = 485810, upload-time = "2024-09-04T20:44:35.191Z" }, + { url = "https://files.pythonhosted.org/packages/c7/8a/1d0e4a9c26e54746dc08c2c6c037889124d4f59dffd853a659fa545f1b40/cffi-1.17.1-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:c59d6e989d07460165cc5ad3c61f9fd8f1b4796eacbd81cee78957842b834af4", size = 471200, upload-time = "2024-09-04T20:44:36.743Z" }, + { url = "https://files.pythonhosted.org/packages/26/9f/1aab65a6c0db35f43c4d1b4f580e8df53914310afc10ae0397d29d697af4/cffi-1.17.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:dd398dbc6773384a17fe0d3e7eeb8d1a21c2200473ee6806bb5e6a8e62bb73dd", size = 479447, upload-time = "2024-09-04T20:44:38.492Z" }, + { url = "https://files.pythonhosted.org/packages/5f/e4/fb8b3dd8dc0e98edf1135ff067ae070bb32ef9d509d6cb0f538cd6f7483f/cffi-1.17.1-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:3edc8d958eb099c634dace3c7e16560ae474aa3803a5df240542b305d14e14ed", size = 484358, upload-time = "2024-09-04T20:44:40.046Z" }, + { url = "https://files.pythonhosted.org/packages/f1/47/d7145bf2dc04684935d57d67dff9d6d795b2ba2796806bb109864be3a151/cffi-1.17.1-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:72e72408cad3d5419375fc87d289076ee319835bdfa2caad331e377589aebba9", size = 488469, upload-time = "2024-09-04T20:44:41.616Z" }, + { url = "https://files.pythonhosted.org/packages/bf/ee/f94057fa6426481d663b88637a9a10e859e492c73d0384514a17d78ee205/cffi-1.17.1-cp313-cp313-win32.whl", hash = "sha256:e03eab0a8677fa80d646b5ddece1cbeaf556c313dcfac435ba11f107ba117b5d", size = 172475, upload-time = "2024-09-04T20:44:43.733Z" }, + { url = "https://files.pythonhosted.org/packages/7c/fc/6a8cb64e5f0324877d503c854da15d76c1e50eb722e320b15345c4d0c6de/cffi-1.17.1-cp313-cp313-win_amd64.whl", hash = "sha256:f6a16c31041f09ead72d69f583767292f750d24913dadacf5756b966aacb3f1a", size = 182009, upload-time = "2024-09-04T20:44:45.309Z" }, +] + +[[package]] +name = "chardet" +version = "5.2.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/f3/0d/f7b6ab21ec75897ed80c17d79b15951a719226b9fababf1e40ea74d69079/chardet-5.2.0.tar.gz", hash = "sha256:1b3b6ff479a8c414bc3fa2c0852995695c4a026dcd6d0633b2dd092ca39c1cf7", size = 2069618, upload-time = "2023-08-01T19:23:02.662Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/38/6f/f5fbc992a329ee4e0f288c1fe0e2ad9485ed064cac731ed2fe47dcc38cbf/chardet-5.2.0-py3-none-any.whl", hash = "sha256:e1cf59446890a00105fe7b7912492ea04b6e6f06d4b742b2c788469e34c82970", size = 199385, upload-time = "2023-08-01T19:23:00.661Z" }, +] + +[[package]] +name = "charset-normalizer" +version = "3.4.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/16/b0/572805e227f01586461c80e0fd25d65a2115599cc9dad142fee4b747c357/charset_normalizer-3.4.1.tar.gz", hash = "sha256:44251f18cd68a75b56585dd00dae26183e102cd5e0f9f1466e6df5da2ed64ea3", size = 123188, upload-time = "2024-12-24T18:12:35.43Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/0a/9a/dd1e1cdceb841925b7798369a09279bd1cf183cef0f9ddf15a3a6502ee45/charset_normalizer-3.4.1-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:73d94b58ec7fecbc7366247d3b0b10a21681004153238750bb67bd9012414545", size = 196105, upload-time = "2024-12-24T18:10:38.83Z" }, + { url = "https://files.pythonhosted.org/packages/d3/8c/90bfabf8c4809ecb648f39794cf2a84ff2e7d2a6cf159fe68d9a26160467/charset_normalizer-3.4.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:dad3e487649f498dd991eeb901125411559b22e8d7ab25d3aeb1af367df5efd7", size = 140404, upload-time = "2024-12-24T18:10:44.272Z" }, + { url = "https://files.pythonhosted.org/packages/ad/8f/e410d57c721945ea3b4f1a04b74f70ce8fa800d393d72899f0a40526401f/charset_normalizer-3.4.1-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:c30197aa96e8eed02200a83fba2657b4c3acd0f0aa4bdc9f6c1af8e8962e0757", size = 150423, upload-time = "2024-12-24T18:10:45.492Z" }, + { url = "https://files.pythonhosted.org/packages/f0/b8/e6825e25deb691ff98cf5c9072ee0605dc2acfca98af70c2d1b1bc75190d/charset_normalizer-3.4.1-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2369eea1ee4a7610a860d88f268eb39b95cb588acd7235e02fd5a5601773d4fa", size = 143184, upload-time = "2024-12-24T18:10:47.898Z" }, + { url = "https://files.pythonhosted.org/packages/3e/a2/513f6cbe752421f16d969e32f3583762bfd583848b763913ddab8d9bfd4f/charset_normalizer-3.4.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bc2722592d8998c870fa4e290c2eec2c1569b87fe58618e67d38b4665dfa680d", size = 145268, upload-time = "2024-12-24T18:10:50.589Z" }, + { url = "https://files.pythonhosted.org/packages/74/94/8a5277664f27c3c438546f3eb53b33f5b19568eb7424736bdc440a88a31f/charset_normalizer-3.4.1-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ffc9202a29ab3920fa812879e95a9e78b2465fd10be7fcbd042899695d75e616", size = 147601, upload-time = "2024-12-24T18:10:52.541Z" }, + { url = "https://files.pythonhosted.org/packages/7c/5f/6d352c51ee763623a98e31194823518e09bfa48be2a7e8383cf691bbb3d0/charset_normalizer-3.4.1-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:804a4d582ba6e5b747c625bf1255e6b1507465494a40a2130978bda7b932c90b", size = 141098, upload-time = "2024-12-24T18:10:53.789Z" }, + { url = "https://files.pythonhosted.org/packages/78/d4/f5704cb629ba5ab16d1d3d741396aec6dc3ca2b67757c45b0599bb010478/charset_normalizer-3.4.1-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:0f55e69f030f7163dffe9fd0752b32f070566451afe180f99dbeeb81f511ad8d", size = 149520, upload-time = "2024-12-24T18:10:55.048Z" }, + { url = "https://files.pythonhosted.org/packages/c5/96/64120b1d02b81785f222b976c0fb79a35875457fa9bb40827678e54d1bc8/charset_normalizer-3.4.1-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:c4c3e6da02df6fa1410a7680bd3f63d4f710232d3139089536310d027950696a", size = 152852, upload-time = "2024-12-24T18:10:57.647Z" }, + { url = "https://files.pythonhosted.org/packages/84/c9/98e3732278a99f47d487fd3468bc60b882920cef29d1fa6ca460a1fdf4e6/charset_normalizer-3.4.1-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:5df196eb874dae23dcfb968c83d4f8fdccb333330fe1fc278ac5ceeb101003a9", size = 150488, upload-time = "2024-12-24T18:10:59.43Z" }, + { url = "https://files.pythonhosted.org/packages/13/0e/9c8d4cb99c98c1007cc11eda969ebfe837bbbd0acdb4736d228ccaabcd22/charset_normalizer-3.4.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:e358e64305fe12299a08e08978f51fc21fac060dcfcddd95453eabe5b93ed0e1", size = 146192, upload-time = "2024-12-24T18:11:00.676Z" }, + { url = "https://files.pythonhosted.org/packages/b2/21/2b6b5b860781a0b49427309cb8670785aa543fb2178de875b87b9cc97746/charset_normalizer-3.4.1-cp312-cp312-win32.whl", hash = "sha256:9b23ca7ef998bc739bf6ffc077c2116917eabcc901f88da1b9856b210ef63f35", size = 95550, upload-time = "2024-12-24T18:11:01.952Z" }, + { url = "https://files.pythonhosted.org/packages/21/5b/1b390b03b1d16c7e382b561c5329f83cc06623916aab983e8ab9239c7d5c/charset_normalizer-3.4.1-cp312-cp312-win_amd64.whl", hash = "sha256:6ff8a4a60c227ad87030d76e99cd1698345d4491638dfa6673027c48b3cd395f", size = 102785, upload-time = "2024-12-24T18:11:03.142Z" }, + { url = "https://files.pythonhosted.org/packages/38/94/ce8e6f63d18049672c76d07d119304e1e2d7c6098f0841b51c666e9f44a0/charset_normalizer-3.4.1-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:aabfa34badd18f1da5ec1bc2715cadc8dca465868a4e73a0173466b688f29dda", size = 195698, upload-time = "2024-12-24T18:11:05.834Z" }, + { url = "https://files.pythonhosted.org/packages/24/2e/dfdd9770664aae179a96561cc6952ff08f9a8cd09a908f259a9dfa063568/charset_normalizer-3.4.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:22e14b5d70560b8dd51ec22863f370d1e595ac3d024cb8ad7d308b4cd95f8313", size = 140162, upload-time = "2024-12-24T18:11:07.064Z" }, + { url = "https://files.pythonhosted.org/packages/24/4e/f646b9093cff8fc86f2d60af2de4dc17c759de9d554f130b140ea4738ca6/charset_normalizer-3.4.1-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:8436c508b408b82d87dc5f62496973a1805cd46727c34440b0d29d8a2f50a6c9", size = 150263, upload-time = "2024-12-24T18:11:08.374Z" }, + { url = "https://files.pythonhosted.org/packages/5e/67/2937f8d548c3ef6e2f9aab0f6e21001056f692d43282b165e7c56023e6dd/charset_normalizer-3.4.1-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2d074908e1aecee37a7635990b2c6d504cd4766c7bc9fc86d63f9c09af3fa11b", size = 142966, upload-time = "2024-12-24T18:11:09.831Z" }, + { url = "https://files.pythonhosted.org/packages/52/ed/b7f4f07de100bdb95c1756d3a4d17b90c1a3c53715c1a476f8738058e0fa/charset_normalizer-3.4.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:955f8851919303c92343d2f66165294848d57e9bba6cf6e3625485a70a038d11", size = 144992, upload-time = "2024-12-24T18:11:12.03Z" }, + { url = "https://files.pythonhosted.org/packages/96/2c/d49710a6dbcd3776265f4c923bb73ebe83933dfbaa841c5da850fe0fd20b/charset_normalizer-3.4.1-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:44ecbf16649486d4aebafeaa7ec4c9fed8b88101f4dd612dcaf65d5e815f837f", size = 147162, upload-time = "2024-12-24T18:11:13.372Z" }, + { url = "https://files.pythonhosted.org/packages/b4/41/35ff1f9a6bd380303dea55e44c4933b4cc3c4850988927d4082ada230273/charset_normalizer-3.4.1-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:0924e81d3d5e70f8126529951dac65c1010cdf117bb75eb02dd12339b57749dd", size = 140972, upload-time = "2024-12-24T18:11:14.628Z" }, + { url = "https://files.pythonhosted.org/packages/fb/43/c6a0b685fe6910d08ba971f62cd9c3e862a85770395ba5d9cad4fede33ab/charset_normalizer-3.4.1-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:2967f74ad52c3b98de4c3b32e1a44e32975e008a9cd2a8cc8966d6a5218c5cb2", size = 149095, upload-time = "2024-12-24T18:11:17.672Z" }, + { url = "https://files.pythonhosted.org/packages/4c/ff/a9a504662452e2d2878512115638966e75633519ec11f25fca3d2049a94a/charset_normalizer-3.4.1-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:c75cb2a3e389853835e84a2d8fb2b81a10645b503eca9bcb98df6b5a43eb8886", size = 152668, upload-time = "2024-12-24T18:11:18.989Z" }, + { url = "https://files.pythonhosted.org/packages/6c/71/189996b6d9a4b932564701628af5cee6716733e9165af1d5e1b285c530ed/charset_normalizer-3.4.1-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:09b26ae6b1abf0d27570633b2b078a2a20419c99d66fb2823173d73f188ce601", size = 150073, upload-time = "2024-12-24T18:11:21.507Z" }, + { url = "https://files.pythonhosted.org/packages/e4/93/946a86ce20790e11312c87c75ba68d5f6ad2208cfb52b2d6a2c32840d922/charset_normalizer-3.4.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:fa88b843d6e211393a37219e6a1c1df99d35e8fd90446f1118f4216e307e48cd", size = 145732, upload-time = "2024-12-24T18:11:22.774Z" }, + { url = "https://files.pythonhosted.org/packages/cd/e5/131d2fb1b0dddafc37be4f3a2fa79aa4c037368be9423061dccadfd90091/charset_normalizer-3.4.1-cp313-cp313-win32.whl", hash = "sha256:eb8178fe3dba6450a3e024e95ac49ed3400e506fd4e9e5c32d30adda88cbd407", size = 95391, upload-time = "2024-12-24T18:11:24.139Z" }, + { url = "https://files.pythonhosted.org/packages/27/f2/4f9a69cc7712b9b5ad8fdb87039fd89abba997ad5cbe690d1835d40405b0/charset_normalizer-3.4.1-cp313-cp313-win_amd64.whl", hash = "sha256:b1ac5992a838106edb89654e0aebfc24f5848ae2547d22c2c3f66454daa11971", size = 102702, upload-time = "2024-12-24T18:11:26.535Z" }, + { url = "https://files.pythonhosted.org/packages/0e/f6/65ecc6878a89bb1c23a086ea335ad4bf21a588990c3f535a227b9eea9108/charset_normalizer-3.4.1-py3-none-any.whl", hash = "sha256:d98b1668f06378c6dbefec3b92299716b931cd4e6061f3c875a71ced1780ab85", size = 49767, upload-time = "2024-12-24T18:12:32.852Z" }, +] + +[[package]] +name = "ckzg" +version = "2.1.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/55/df/f6db8e83bd4594c1ea685cd37fb81d5399e55765aae16d1a8a9502598f4e/ckzg-2.1.1.tar.gz", hash = "sha256:d6b306b7ec93a24e4346aa53d07f7f75053bc0afc7398e35fa649e5f9d48fcc4", size = 1120500, upload-time = "2025-03-31T21:24:12.324Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/93/a1/9c07513dd0ea01e5db727e67bd2660f3b300a4511281cdb8d5e04afa1cfd/ckzg-2.1.1-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:c60e8903344ce98ce036f0fabacce952abb714cad4607198b2f0961c28b8aa72", size = 116421, upload-time = "2025-03-31T21:22:46.434Z" }, + { url = "https://files.pythonhosted.org/packages/27/04/b69a0dfbb2722a14c98a52973f276679151ec56a14178cb48e6f2e1697bc/ckzg-2.1.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:a4299149dd72448e5a8d2d1cc6cc7472c92fc9d9f00b1377f5b017c089d9cd92", size = 100216, upload-time = "2025-03-31T21:22:47.633Z" }, + { url = "https://files.pythonhosted.org/packages/2e/24/9cc850d0b8ead395ad5064de67c7c91adacaf31b6b35292ab53fbd93270b/ckzg-2.1.1-cp312-cp312-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:025dd31ffdcc799f3ff842570a2a6683b6c5b01567da0109c0c05d11768729c4", size = 175764, upload-time = "2025-03-31T21:22:48.768Z" }, + { url = "https://files.pythonhosted.org/packages/c0/c1/eb13ba399082a98b932f10b230ec08e6456051c0ce3886b3f6d8548d11ab/ckzg-2.1.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9b42ab8385c273f40a693657c09d2bba40cb4f4666141e263906ba2e519e80bd", size = 161885, upload-time = "2025-03-31T21:22:50.05Z" }, + { url = "https://files.pythonhosted.org/packages/57/c7/58baa64199781950c5a8c6139a46e1acff0f057a36e56769817400eb87fb/ckzg-2.1.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1be3890fc1543f4fcfc0063e4baf5c036eb14bcf736dabdc6171ab017e0f1671", size = 170757, upload-time = "2025-03-31T21:22:51.282Z" }, + { url = "https://files.pythonhosted.org/packages/65/bd/4b8e1c70972c98829371b7004dc750a45268c5d3442d602e1b62f13ca867/ckzg-2.1.1-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:b754210ded172968b201e2d7252573af6bf52d6ad127ddd13d0b9a45a51dae7b", size = 173761, upload-time = "2025-03-31T21:22:52.6Z" }, + { url = "https://files.pythonhosted.org/packages/1f/32/c3fd1002f97ba3e0c5b1d9ab2c8fb7a6f475fa9b80ed9c4fa55975501a54/ckzg-2.1.1-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:b2f8fda87865897a269c4e951e3826c2e814427a6cdfed6731cccfe548f12b36", size = 188666, upload-time = "2025-03-31T21:22:53.47Z" }, + { url = "https://files.pythonhosted.org/packages/e2/d9/91cf5a8169ee60c9397c975163cbca34432571f94facec5f8c0086bb47d8/ckzg-2.1.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:98e70b5923d77c7359432490145e9d1ab0bf873eb5de56ec53f4a551d7eaec79", size = 183652, upload-time = "2025-03-31T21:22:54.351Z" }, + { url = "https://files.pythonhosted.org/packages/25/d4/8c9f6b852f99926862344b29f0c59681916ccfec2ac60a85952a369e0bca/ckzg-2.1.1-cp312-cp312-win_amd64.whl", hash = "sha256:42af7bde4ca45469cd93a96c3d15d69d51d40e7f0d30e3a20711ebd639465fcb", size = 98816, upload-time = "2025-03-31T21:22:55.23Z" }, + { url = "https://files.pythonhosted.org/packages/b7/9a/fa698b12e97452d11dd314e0335aae759725284ef6e1c1665aed56b1cd3e/ckzg-2.1.1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:7e4edfdaf87825ff43b9885fabfdea408737a714f4ce5467100d9d1d0a03b673", size = 116426, upload-time = "2025-03-31T21:22:56.108Z" }, + { url = "https://files.pythonhosted.org/packages/a1/a6/8cccd308bd11b49b40eecad6900b5769da117951cac33e880dd25e851ef7/ckzg-2.1.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:815fd2a87d6d6c57d669fda30c150bc9bf387d47e67d84535aa42b909fdc28ea", size = 100219, upload-time = "2025-03-31T21:22:56.982Z" }, + { url = "https://files.pythonhosted.org/packages/30/0e/63573d816c1292b9a4d70eb6a7366b3593d29a977794039e926805a76ca0/ckzg-2.1.1-cp313-cp313-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c32466e809b1ab3ff01d3b0bb0b9912f61dcf72957885615595f75e3f7cc10e5", size = 175725, upload-time = "2025-03-31T21:22:58.213Z" }, + { url = "https://files.pythonhosted.org/packages/86/f6/a279609516695ad3fb8b201098c669ba3b2844cbf4fa0d83a0f02b9bb29b/ckzg-2.1.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f11b73ccf37b12993f39a7dbace159c6d580aacacde6ee17282848476550ddbc", size = 161835, upload-time = "2025-03-31T21:22:59.448Z" }, + { url = "https://files.pythonhosted.org/packages/39/e4/8cf7aef7dc05a777cb221e94046f947c6fe5317159a8dae2cd7090d52ef2/ckzg-2.1.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:de3b9433a1f2604bd9ac1646d3c83ad84a850d454d3ac589fe8e70c94b38a6b0", size = 170759, upload-time = "2025-03-31T21:23:01.022Z" }, + { url = "https://files.pythonhosted.org/packages/0b/17/b34e3c08eb36bc67e338b114f289b2595e581b8bdc09a8f12299a1db5d2f/ckzg-2.1.1-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:b7d7e1b5ea06234558cd95c483666fd785a629b720a7f1622b3cbffebdc62033", size = 173787, upload-time = "2025-03-31T21:23:01.974Z" }, + { url = "https://files.pythonhosted.org/packages/2e/f0/aff87c3ed80713453cb6c84fe6fbb7582d86a7a5e4460fda2a497d47f489/ckzg-2.1.1-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:9f5556e6675866040cc4335907be6c537051e7f668da289fa660fdd8a30c9ddb", size = 188722, upload-time = "2025-03-31T21:23:02.966Z" }, + { url = "https://files.pythonhosted.org/packages/44/d9/1f08bfb8fd1cbb8c7513e7ad3fb76bbb5c3fb446238c1eba582276e4d905/ckzg-2.1.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:55b2ba30c5c9daac0c55f1aac851f1b7bf1f7aa0028c2db4440e963dd5b866d6", size = 183686, upload-time = "2025-03-31T21:23:03.905Z" }, + { url = "https://files.pythonhosted.org/packages/a3/ff/434f6d2893cbdfad00c20d17e9a52d426ca042f5e980d5c3db96bc6b6e15/ckzg-2.1.1-cp313-cp313-win_amd64.whl", hash = "sha256:10d201601fc8f28c0e8cec3406676797024dd374c367bbeec5a7a9eac9147237", size = 98817, upload-time = "2025-03-31T21:23:05.2Z" }, +] + +[[package]] +name = "click" +version = "8.2.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "colorama", marker = "sys_platform == 'win32'" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/60/6c/8ca2efa64cf75a977a0d7fac081354553ebe483345c734fb6b6515d96bbc/click-8.2.1.tar.gz", hash = "sha256:27c491cc05d968d271d5a1db13e3b5a184636d9d930f148c50b038f0d0646202", size = 286342, upload-time = "2025-05-20T23:19:49.832Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/85/32/10bb5764d90a8eee674e9dc6f4db6a0ab47c8c4d0d83c27f7c39ac415a4d/click-8.2.1-py3-none-any.whl", hash = "sha256:61a3265b914e850b85317d0b3109c7f8cd35a670f963866005d6ef1d5175a12b", size = 102215, upload-time = "2025-05-20T23:19:47.796Z" }, +] + +[[package]] +name = "coinbase-agentkit" +version = "0.6.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "allora-sdk" }, + { name = "cdp-sdk" }, + { name = "ecdsa" }, + { name = "jsonschema" }, + { name = "nilql" }, + { name = "paramiko" }, + { name = "pydantic" }, + { name = "pyjwt", extra = ["crypto"] }, + { name = "python-dotenv" }, + { name = "requests" }, + { name = "web3" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/5c/e1/6a21afddfffe91b53b6865bae9e6a910b4d595eb05feab25e59a382e6242/coinbase_agentkit-0.6.0.tar.gz", hash = "sha256:b0700af4a3a736254dc1f308d3673e238817067fdee91b5e5d44c9e7fda7052f", size = 102816, upload-time = "2025-05-30T19:26:08.424Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/85/35/76010c76941a8a0f47e1d49ffc0cd1ec0eac5a6746eca9bb7a59c388ccb9/coinbase_agentkit-0.6.0-py3-none-any.whl", hash = "sha256:9847a4037accd0de25743fd8b941ca4666715454db0146a809030f9b80d31535", size = 163009, upload-time = "2025-05-30T19:26:06.141Z" }, +] + +[[package]] +name = "coinbase-agentkit-langchain" +version = "0.5.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "coinbase-agentkit" }, + { name = "langchain" }, + { name = "nest-asyncio" }, + { name = "python-dotenv" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/11/07/c3d2cc1015e34aab038775f5a599251431f5d9c087026bd59ee2606ba555/coinbase_agentkit_langchain-0.5.0.tar.gz", hash = "sha256:52a76976da0cf09673b5f0aa7b5bec8bd34316b0a63e798a3e9fb9dc36b85259", size = 2954, upload-time = "2025-05-30T19:54:42.699Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/4c/62/ebdaf7dd4fe7278f8c17d8f4b6e5e4c39c91769aee8e6d8aa12263fb443d/coinbase_agentkit_langchain-0.5.0-py3-none-any.whl", hash = "sha256:356d0e839f2c13bb550473737ac98ecb6edc3a88697f7b9bacf898a7833a00ac", size = 2625, upload-time = "2025-05-30T19:54:41.423Z" }, +] + +[[package]] +name = "coincurve" +version = "20.0.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "asn1crypto" }, + { name = "cffi" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/d9/4c/9e5e51e6c12cec6444c86697992f9c6ccffa19f84d042ff939c8b89206ff/coincurve-20.0.0.tar.gz", hash = "sha256:872419e404300302e938849b6b92a196fabdad651060b559dc310e52f8392829", size = 122865, upload-time = "2024-06-02T18:15:50.787Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/8f/24/e1bf259dd57186fbdc7cec51909db320884162cfad5ec72cbaa63573ff9d/coincurve-20.0.0-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:4df4416a6c0370d777aa725a25b14b04e45aa228da1251c258ff91444643f688", size = 1255671, upload-time = "2024-06-02T18:14:57.863Z" }, + { url = "https://files.pythonhosted.org/packages/0a/c5/1817f87d1cd5ff50d8537fe60fb96f66b76dd02da885d970952e6189a801/coincurve-20.0.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:1ccc3e4db55abf3fc0e604a187fdb05f0702bc5952e503d9a75f4ae6eeb4cb3a", size = 1255565, upload-time = "2024-06-02T18:14:59.128Z" }, + { url = "https://files.pythonhosted.org/packages/90/9f/35e15f993717ed1dcc4c26d9771f073a1054af26808a0f421783bb4cd7e0/coincurve-20.0.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ac8335b1658a2ef5b3eb66d52647742fe8c6f413ad5b9d5310d7ea6d8060d40f", size = 1191953, upload-time = "2024-06-02T18:15:01.047Z" }, + { url = "https://files.pythonhosted.org/packages/4a/3d/6a9bc32e69b738b5e05f5027bace1da6722352a4a447e495d3c03a601d99/coincurve-20.0.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c7ac025e485a0229fd5394e0bf6b4a75f8a4f6cee0dcf6f0b01a2ef05c5210ff", size = 1194425, upload-time = "2024-06-02T18:15:02.919Z" }, + { url = "https://files.pythonhosted.org/packages/1a/a6/15424973dc47fc7c87e3c0f8859f6f1b1032582ee9f1b85fdd5d1e33d630/coincurve-20.0.0-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e46e3f1c21b3330857bcb1a3a5b942f645c8bce912a8a2b252216f34acfe4195", size = 1204678, upload-time = "2024-06-02T18:15:04.308Z" }, + { url = "https://files.pythonhosted.org/packages/6a/e7/71ddb4d66c11c4ad13e729362f8852e048ae452eba3dfcf57751842bb292/coincurve-20.0.0-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:df9ff9b17a1d27271bf476cf3fa92df4c151663b11a55d8cea838b8f88d83624", size = 1215395, upload-time = "2024-06-02T18:15:05.701Z" }, + { url = "https://files.pythonhosted.org/packages/b9/7d/03e0a19cfff1d86f5d019afc69cfbff02caada701ed5a4a50abc63d4261c/coincurve-20.0.0-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:4155759f071375699282e03b3d95fb473ee05c022641c077533e0d906311e57a", size = 1204552, upload-time = "2024-06-02T18:15:07.107Z" }, + { url = "https://files.pythonhosted.org/packages/07/cd/e9bd4ca7d931653a35c74194da04191a9aecc54b8f48a554cd538dc810e4/coincurve-20.0.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:0530b9dd02fc6f6c2916716974b79bdab874227f560c422801ade290e3fc5013", size = 1209392, upload-time = "2024-06-02T18:15:08.663Z" }, + { url = "https://files.pythonhosted.org/packages/99/54/260053f14f74b99b645084231e1c76994134ded49407a3bba23a8ffc0ff6/coincurve-20.0.0-cp312-cp312-win_amd64.whl", hash = "sha256:eacf9c0ce8739c84549a89c083b1f3526c8780b84517ee75d6b43d276e55f8a0", size = 1198932, upload-time = "2024-06-02T18:15:10.786Z" }, + { url = "https://files.pythonhosted.org/packages/b4/b5/c465e09345dd38b9415f5d47ae7683b3f461db02fcc03e699b6b5687ab2b/coincurve-20.0.0-cp312-cp312-win_arm64.whl", hash = "sha256:52a67bfddbd6224dfa42085c88ad176559801b57d6a8bd30d92ee040de88b7b3", size = 1193324, upload-time = "2024-06-02T18:15:12.511Z" }, +] + +[[package]] +name = "colorama" +version = "0.4.6" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/d8/53/6f443c9a4a8358a93a6792e2acffb9d9d5cb0a5cfd8802644b7b1c9a02e4/colorama-0.4.6.tar.gz", hash = "sha256:08695f5cb7ed6e0531a20572697297273c47b8cae5a63ffc6d6ed5c201be6e44", size = 27697, upload-time = "2022-10-25T02:36:22.414Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/d1/d6/3965ed04c63042e047cb6a3e6ed1a63a35087b6a609aa3a15ed8ac56c221/colorama-0.4.6-py2.py3-none-any.whl", hash = "sha256:4f1d9991f5acc0ca119f9d443620b77f9d6b33703e51011c16baf57afb285fc6", size = 25335, upload-time = "2022-10-25T02:36:20.889Z" }, +] + +[[package]] +name = "construct" +version = "2.10.68" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/e0/b7/a4a032e94bcfdff481f2e6fecd472794d9da09f474a2185ed33b2c7cad64/construct-2.10.68.tar.gz", hash = "sha256:7b2a3fd8e5f597a5aa1d614c3bd516fa065db01704c72a1efaaeec6ef23d8b45", size = 57856, upload-time = "2022-02-21T23:09:15.1Z" } + +[[package]] +name = "construct-typing" +version = "0.6.2" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "construct" }, + { name = "typing-extensions" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/f1/13/c609e60a687252813aa4b69f989f42754ccd5e217717216fc852eefedfd7/construct-typing-0.6.2.tar.gz", hash = "sha256:948e998cfc003681dc34f2d071c3a688cf35b805cbe107febbc488ef967ccba1", size = 22029, upload-time = "2023-08-03T07:31:06.205Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/b2/0b/ab3ce2b27dd74b6a6703065bd304ea8211ff4de3b1c304446ed95234177b/construct_typing-0.6.2-py3-none-any.whl", hash = "sha256:ebea6989ac622d0c4eb457092cef0c7bfbcfa110bd018670fea7064d0bc09e47", size = 23298, upload-time = "2023-08-03T07:31:04.545Z" }, +] + +[[package]] +name = "cron-validator" +version = "1.0.8" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "python-dateutil" }, + { name = "pytz" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/31/55/d3d8e7acad9dc3f54133df0972c79d38f2c6fc4be41f281a396c3afe4411/cron-validator-1.0.8.tar.gz", hash = "sha256:dd485257adb6f590b3e9433f641440c801d307015259c1ee3eb6e21c964c8026", size = 9657, upload-time = "2023-06-19T10:09:48.434Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/49/0c/d6bf9d572fb2ce3404fe37b794cf10dd44566368f7084c71b6028d3818ff/cron_validator-1.0.8-py3-none-any.whl", hash = "sha256:6477fcc3d60bfbd1ec00a708f0b8b5136c1fef8140c10effea1f45b79d778653", size = 7830, upload-time = "2023-06-19T10:09:44.881Z" }, +] + +[[package]] +name = "cryptography" +version = "45.0.6" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "cffi", marker = "platform_python_implementation != 'PyPy'" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/d6/0d/d13399c94234ee8f3df384819dc67e0c5ce215fb751d567a55a1f4b028c7/cryptography-45.0.6.tar.gz", hash = "sha256:5c966c732cf6e4a276ce83b6e4c729edda2df6929083a952cc7da973c539c719", size = 744949, upload-time = "2025-08-05T23:59:27.93Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/8c/29/2793d178d0eda1ca4a09a7c4e09a5185e75738cc6d526433e8663b460ea6/cryptography-45.0.6-cp311-abi3-macosx_10_9_universal2.whl", hash = "sha256:048e7ad9e08cf4c0ab07ff7f36cc3115924e22e2266e034450a890d9e312dd74", size = 7042702, upload-time = "2025-08-05T23:58:23.464Z" }, + { url = "https://files.pythonhosted.org/packages/b3/b6/cabd07410f222f32c8d55486c464f432808abaa1f12af9afcbe8f2f19030/cryptography-45.0.6-cp311-abi3-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:44647c5d796f5fc042bbc6d61307d04bf29bccb74d188f18051b635f20a9c75f", size = 4206483, upload-time = "2025-08-05T23:58:27.132Z" }, + { url = "https://files.pythonhosted.org/packages/8b/9e/f9c7d36a38b1cfeb1cc74849aabe9bf817990f7603ff6eb485e0d70e0b27/cryptography-45.0.6-cp311-abi3-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:e40b80ecf35ec265c452eea0ba94c9587ca763e739b8e559c128d23bff7ebbbf", size = 4429679, upload-time = "2025-08-05T23:58:29.152Z" }, + { url = "https://files.pythonhosted.org/packages/9c/2a/4434c17eb32ef30b254b9e8b9830cee4e516f08b47fdd291c5b1255b8101/cryptography-45.0.6-cp311-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:00e8724bdad672d75e6f069b27970883179bd472cd24a63f6e620ca7e41cc0c5", size = 4210553, upload-time = "2025-08-05T23:58:30.596Z" }, + { url = "https://files.pythonhosted.org/packages/ef/1d/09a5df8e0c4b7970f5d1f3aff1b640df6d4be28a64cae970d56c6cf1c772/cryptography-45.0.6-cp311-abi3-manylinux_2_28_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:7a3085d1b319d35296176af31c90338eeb2ddac8104661df79f80e1d9787b8b2", size = 3894499, upload-time = "2025-08-05T23:58:32.03Z" }, + { url = "https://files.pythonhosted.org/packages/79/62/120842ab20d9150a9d3a6bdc07fe2870384e82f5266d41c53b08a3a96b34/cryptography-45.0.6-cp311-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:1b7fa6a1c1188c7ee32e47590d16a5a0646270921f8020efc9a511648e1b2e08", size = 4458484, upload-time = "2025-08-05T23:58:33.526Z" }, + { url = "https://files.pythonhosted.org/packages/fd/80/1bc3634d45ddfed0871bfba52cf8f1ad724761662a0c792b97a951fb1b30/cryptography-45.0.6-cp311-abi3-manylinux_2_34_aarch64.whl", hash = "sha256:275ba5cc0d9e320cd70f8e7b96d9e59903c815ca579ab96c1e37278d231fc402", size = 4210281, upload-time = "2025-08-05T23:58:35.445Z" }, + { url = "https://files.pythonhosted.org/packages/7d/fe/ffb12c2d83d0ee625f124880a1f023b5878f79da92e64c37962bbbe35f3f/cryptography-45.0.6-cp311-abi3-manylinux_2_34_x86_64.whl", hash = "sha256:f4028f29a9f38a2025abedb2e409973709c660d44319c61762202206ed577c42", size = 4456890, upload-time = "2025-08-05T23:58:36.923Z" }, + { url = "https://files.pythonhosted.org/packages/8c/8e/b3f3fe0dc82c77a0deb5f493b23311e09193f2268b77196ec0f7a36e3f3e/cryptography-45.0.6-cp311-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:ee411a1b977f40bd075392c80c10b58025ee5c6b47a822a33c1198598a7a5f05", size = 4333247, upload-time = "2025-08-05T23:58:38.781Z" }, + { url = "https://files.pythonhosted.org/packages/b3/a6/c3ef2ab9e334da27a1d7b56af4a2417d77e7806b2e0f90d6267ce120d2e4/cryptography-45.0.6-cp311-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:e2a21a8eda2d86bb604934b6b37691585bd095c1f788530c1fcefc53a82b3453", size = 4565045, upload-time = "2025-08-05T23:58:40.415Z" }, + { url = "https://files.pythonhosted.org/packages/31/c3/77722446b13fa71dddd820a5faab4ce6db49e7e0bf8312ef4192a3f78e2f/cryptography-45.0.6-cp311-abi3-win32.whl", hash = "sha256:d063341378d7ee9c91f9d23b431a3502fc8bfacd54ef0a27baa72a0843b29159", size = 2928923, upload-time = "2025-08-05T23:58:41.919Z" }, + { url = "https://files.pythonhosted.org/packages/38/63/a025c3225188a811b82932a4dcc8457a26c3729d81578ccecbcce2cb784e/cryptography-45.0.6-cp311-abi3-win_amd64.whl", hash = "sha256:833dc32dfc1e39b7376a87b9a6a4288a10aae234631268486558920029b086ec", size = 3403805, upload-time = "2025-08-05T23:58:43.792Z" }, + { url = "https://files.pythonhosted.org/packages/5b/af/bcfbea93a30809f126d51c074ee0fac5bd9d57d068edf56c2a73abedbea4/cryptography-45.0.6-cp37-abi3-macosx_10_9_universal2.whl", hash = "sha256:3436128a60a5e5490603ab2adbabc8763613f638513ffa7d311c900a8349a2a0", size = 7020111, upload-time = "2025-08-05T23:58:45.316Z" }, + { url = "https://files.pythonhosted.org/packages/98/c6/ea5173689e014f1a8470899cd5beeb358e22bb3cf5a876060f9d1ca78af4/cryptography-45.0.6-cp37-abi3-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:0d9ef57b6768d9fa58e92f4947cea96ade1233c0e236db22ba44748ffedca394", size = 4198169, upload-time = "2025-08-05T23:58:47.121Z" }, + { url = "https://files.pythonhosted.org/packages/ba/73/b12995edc0c7e2311ffb57ebd3b351f6b268fed37d93bfc6f9856e01c473/cryptography-45.0.6-cp37-abi3-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:ea3c42f2016a5bbf71825537c2ad753f2870191134933196bee408aac397b3d9", size = 4421273, upload-time = "2025-08-05T23:58:48.557Z" }, + { url = "https://files.pythonhosted.org/packages/f7/6e/286894f6f71926bc0da67408c853dd9ba953f662dcb70993a59fd499f111/cryptography-45.0.6-cp37-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:20ae4906a13716139d6d762ceb3e0e7e110f7955f3bc3876e3a07f5daadec5f3", size = 4199211, upload-time = "2025-08-05T23:58:50.139Z" }, + { url = "https://files.pythonhosted.org/packages/de/34/a7f55e39b9623c5cb571d77a6a90387fe557908ffc44f6872f26ca8ae270/cryptography-45.0.6-cp37-abi3-manylinux_2_28_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:2dac5ec199038b8e131365e2324c03d20e97fe214af051d20c49db129844e8b3", size = 3883732, upload-time = "2025-08-05T23:58:52.253Z" }, + { url = "https://files.pythonhosted.org/packages/f9/b9/c6d32edbcba0cd9f5df90f29ed46a65c4631c4fbe11187feb9169c6ff506/cryptography-45.0.6-cp37-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:18f878a34b90d688982e43f4b700408b478102dd58b3e39de21b5ebf6509c301", size = 4450655, upload-time = "2025-08-05T23:58:53.848Z" }, + { url = "https://files.pythonhosted.org/packages/77/2d/09b097adfdee0227cfd4c699b3375a842080f065bab9014248933497c3f9/cryptography-45.0.6-cp37-abi3-manylinux_2_34_aarch64.whl", hash = "sha256:5bd6020c80c5b2b2242d6c48487d7b85700f5e0038e67b29d706f98440d66eb5", size = 4198956, upload-time = "2025-08-05T23:58:55.209Z" }, + { url = "https://files.pythonhosted.org/packages/55/66/061ec6689207d54effdff535bbdf85cc380d32dd5377173085812565cf38/cryptography-45.0.6-cp37-abi3-manylinux_2_34_x86_64.whl", hash = "sha256:eccddbd986e43014263eda489abbddfbc287af5cddfd690477993dbb31e31016", size = 4449859, upload-time = "2025-08-05T23:58:56.639Z" }, + { url = "https://files.pythonhosted.org/packages/41/ff/e7d5a2ad2d035e5a2af116e1a3adb4d8fcd0be92a18032917a089c6e5028/cryptography-45.0.6-cp37-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:550ae02148206beb722cfe4ef0933f9352bab26b087af00e48fdfb9ade35c5b3", size = 4320254, upload-time = "2025-08-05T23:58:58.833Z" }, + { url = "https://files.pythonhosted.org/packages/82/27/092d311af22095d288f4db89fcaebadfb2f28944f3d790a4cf51fe5ddaeb/cryptography-45.0.6-cp37-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:5b64e668fc3528e77efa51ca70fadcd6610e8ab231e3e06ae2bab3b31c2b8ed9", size = 4554815, upload-time = "2025-08-05T23:59:00.283Z" }, + { url = "https://files.pythonhosted.org/packages/7e/01/aa2f4940262d588a8fdf4edabe4cda45854d00ebc6eaac12568b3a491a16/cryptography-45.0.6-cp37-abi3-win32.whl", hash = "sha256:780c40fb751c7d2b0c6786ceee6b6f871e86e8718a8ff4bc35073ac353c7cd02", size = 2912147, upload-time = "2025-08-05T23:59:01.716Z" }, + { url = "https://files.pythonhosted.org/packages/0a/bc/16e0276078c2de3ceef6b5a34b965f4436215efac45313df90d55f0ba2d2/cryptography-45.0.6-cp37-abi3-win_amd64.whl", hash = "sha256:20d15aed3ee522faac1a39fbfdfee25d17b1284bafd808e1640a74846d7c4d1b", size = 3390459, upload-time = "2025-08-05T23:59:03.358Z" }, +] + +[[package]] +name = "cytoolz" +version = "1.0.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "toolz" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/a7/f9/3243eed3a6545c2a33a21f74f655e3fcb5d2192613cd3db81a93369eb339/cytoolz-1.0.1.tar.gz", hash = "sha256:89cc3161b89e1bb3ed7636f74ed2e55984fd35516904fc878cae216e42b2c7d6", size = 626652, upload-time = "2024-12-13T05:47:36.672Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/d8/e8/218098344ed2cb5f8441fade9b2428e435e7073962374a9c71e59ac141a7/cytoolz-1.0.1-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:fcb8f7d0d65db1269022e7e0428471edee8c937bc288ebdcb72f13eaa67c2fe4", size = 414121, upload-time = "2024-12-13T05:45:26.588Z" }, + { url = "https://files.pythonhosted.org/packages/de/27/4d729a5653718109262b758fec1a959aa9facb74c15460d9074dc76d6635/cytoolz-1.0.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:207d4e4b445e087e65556196ff472ff134370d9a275d591724142e255f384662", size = 390904, upload-time = "2024-12-13T05:45:27.718Z" }, + { url = "https://files.pythonhosted.org/packages/72/c0/cbabfa788bab9c6038953bf9478adaec06e88903a726946ea7c88092f5c4/cytoolz-1.0.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:21cdf6bac6fd843f3b20280a66fd8df20dea4c58eb7214a2cd8957ec176f0bb3", size = 2090734, upload-time = "2024-12-13T05:45:30.515Z" }, + { url = "https://files.pythonhosted.org/packages/c3/66/369262c60f9423c2da82a60864a259c852f1aa122aced4acd2c679af58c0/cytoolz-1.0.1-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:4a55ec098036c0dea9f3bdc021f8acd9d105a945227d0811589f0573f21c9ce1", size = 2155933, upload-time = "2024-12-13T05:45:32.721Z" }, + { url = "https://files.pythonhosted.org/packages/aa/4e/ee55186802f8d24b5fbf9a11405ccd1203b30eded07cc17750618219b94e/cytoolz-1.0.1-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a13ab79ff4ce202e03ab646a2134696988b554b6dc4b71451e948403db1331d8", size = 2171903, upload-time = "2024-12-13T05:45:34.205Z" }, + { url = "https://files.pythonhosted.org/packages/a1/96/bd1a9f3396e9b7f618db8cd08d15630769ce3c8b7d0534f92cd639c977ae/cytoolz-1.0.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4e2d944799026e1ff08a83241f1027a2d9276c41f7a74224cd98b7df6e03957d", size = 2125270, upload-time = "2024-12-13T05:45:36.982Z" }, + { url = "https://files.pythonhosted.org/packages/28/48/2a3762873091c88a69e161111cfbc6c222ff145d57ff011a642b169f04f1/cytoolz-1.0.1-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:88ba85834cd523b91fdf10325e1e6d71c798de36ea9bdc187ca7bd146420de6f", size = 1973967, upload-time = "2024-12-13T05:45:39.505Z" }, + { url = "https://files.pythonhosted.org/packages/e4/50/500bd69774bdc49a4d78ec8779eb6ac7c1a9d706bfd91cf2a1dba604373a/cytoolz-1.0.1-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:5a750b1af7e8bf6727f588940b690d69e25dc47cce5ce467925a76561317eaf7", size = 2021695, upload-time = "2024-12-13T05:45:40.911Z" }, + { url = "https://files.pythonhosted.org/packages/e4/4e/ba5a0ce34869495eb50653de8d676847490cf13a2cac1760fc4d313e78de/cytoolz-1.0.1-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:44a71870f7eae31d263d08b87da7c2bf1176f78892ed8bdade2c2850478cb126", size = 2010177, upload-time = "2024-12-13T05:45:42.48Z" }, + { url = "https://files.pythonhosted.org/packages/87/57/615c630b3089a13adb15351d958d227430cf624f03b1dd39eb52c34c1f59/cytoolz-1.0.1-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:c8231b9abbd8e368e036f4cc2e16902c9482d4cf9e02a6147ed0e9a3cd4a9ab0", size = 2154321, upload-time = "2024-12-13T05:45:43.979Z" }, + { url = "https://files.pythonhosted.org/packages/7f/0f/fe1aa2d931e3b35ecc05215bd75da945ea7346095b3b6f6027164e602d5a/cytoolz-1.0.1-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:aa87599ccc755de5a096a4d6c34984de6cd9dc928a0c5eaa7607457317aeaf9b", size = 2188374, upload-time = "2024-12-13T05:45:46.783Z" }, + { url = "https://files.pythonhosted.org/packages/de/fa/fd363d97a641b6d0e2fd1d5c35b8fd41d9ccaeb4df56302f53bf23a58e3a/cytoolz-1.0.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:67cd16537df51baabde3baa770ab7b8d16839c4d21219d5b96ac59fb012ebd2d", size = 2077911, upload-time = "2024-12-13T05:45:48.219Z" }, + { url = "https://files.pythonhosted.org/packages/d9/68/0a22946b98ae5201b54ccb4e651295285c0fb79406022b6ee8b2f791940c/cytoolz-1.0.1-cp312-cp312-win32.whl", hash = "sha256:fb988c333f05ee30ad4693fe4da55d95ec0bb05775d2b60191236493ea2e01f9", size = 321903, upload-time = "2024-12-13T05:45:50.3Z" }, + { url = "https://files.pythonhosted.org/packages/62/1a/f3903197956055032f8cb297342e2dff07e50f83991aebfe5b4c4fcb55e4/cytoolz-1.0.1-cp312-cp312-win_amd64.whl", hash = "sha256:8f89c48d8e5aec55ffd566a8ec858706d70ed0c6a50228eca30986bfa5b4da8b", size = 364490, upload-time = "2024-12-13T05:45:51.494Z" }, + { url = "https://files.pythonhosted.org/packages/aa/2e/a9f069db0107749e9e72baf6c21abe3f006841a3bcfdc9b8420e22ef31eb/cytoolz-1.0.1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:6944bb93b287032a4c5ca6879b69bcd07df46f3079cf8393958cf0b0454f50c0", size = 407365, upload-time = "2024-12-13T05:45:52.803Z" }, + { url = "https://files.pythonhosted.org/packages/a9/9b/5e87dd0e31f54c778b4f9f34cc14c1162d3096c8d746b0f8be97d70dd73c/cytoolz-1.0.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:e027260fd2fc5cb041277158ac294fc13dca640714527219f702fb459a59823a", size = 385233, upload-time = "2024-12-13T05:45:53.994Z" }, + { url = "https://files.pythonhosted.org/packages/63/00/2fd32b16284cdb97cfe092822179bc0c3bcdd5e927dd39f986169a517642/cytoolz-1.0.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:88662c0e07250d26f5af9bc95911e6137e124a5c1ec2ce4a5d74de96718ab242", size = 2062903, upload-time = "2024-12-13T05:45:55.202Z" }, + { url = "https://files.pythonhosted.org/packages/85/39/b3cbb5a9847ba59584a263772ad4f8ca2dbfd2a0e11efd09211d1219804c/cytoolz-1.0.1-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:309dffa78b0961b4c0cf55674b828fbbc793cf2d816277a5c8293c0c16155296", size = 2139517, upload-time = "2024-12-13T05:45:56.804Z" }, + { url = "https://files.pythonhosted.org/packages/ea/39/bfcab4a46d50c467e36fe704f19d8904efead417787806ee210327f68390/cytoolz-1.0.1-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:edb34246e6eb40343c5860fc51b24937698e4fa1ee415917a73ad772a9a1746b", size = 2154849, upload-time = "2024-12-13T05:45:58.814Z" }, + { url = "https://files.pythonhosted.org/packages/fd/42/3bc6ee61b0aa47e1cb40819adc1a456d7efa809f0dea9faddacb43fdde8f/cytoolz-1.0.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0a54da7a8e4348a18d45d4d5bc84af6c716d7f131113a4f1cc45569d37edff1b", size = 2102302, upload-time = "2024-12-13T05:46:00.181Z" }, + { url = "https://files.pythonhosted.org/packages/00/66/3f636c6ddea7b18026b90a8c238af472e423b86e427b11df02213689b012/cytoolz-1.0.1-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:241c679c3b1913c0f7259cf1d9639bed5084c86d0051641d537a0980548aa266", size = 1960872, upload-time = "2024-12-13T05:46:01.612Z" }, + { url = "https://files.pythonhosted.org/packages/40/36/cb3b7cdd651007b69f9c48e9d104cec7cb8dc53afa1d6a720e5ad08022fa/cytoolz-1.0.1-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:5bfc860251a8f280ac79696fc3343cfc3a7c30b94199e0240b6c9e5b6b01a2a5", size = 2014430, upload-time = "2024-12-13T05:46:03.022Z" }, + { url = "https://files.pythonhosted.org/packages/88/3f/2e9bd2a16cfd269808922147551dcb2d8b68ba54a2c4deca2fa6a6cd0d5f/cytoolz-1.0.1-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:c8edd1547014050c1bdad3ff85d25c82bd1c2a3c96830c6181521eb78b9a42b3", size = 2003127, upload-time = "2024-12-13T05:46:04.401Z" }, + { url = "https://files.pythonhosted.org/packages/c4/7d/08604ff940aa784df8343c387fdf2489b948b714a6afb587775ae94da912/cytoolz-1.0.1-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:b349bf6162e8de215403d7f35f8a9b4b1853dc2a48e6e1a609a5b1a16868b296", size = 2142369, upload-time = "2024-12-13T05:46:06.004Z" }, + { url = "https://files.pythonhosted.org/packages/d2/c6/39919a0645bdbdf720e97cae107f959ea9d1267fbc3b0d94fc6e1d12ac8f/cytoolz-1.0.1-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:1b18b35256219b6c3dd0fa037741b85d0bea39c552eab0775816e85a52834140", size = 2180427, upload-time = "2024-12-13T05:46:07.526Z" }, + { url = "https://files.pythonhosted.org/packages/d8/03/dbb9d47556ee54337e7e0ac209d17ceff2d2a197c34de08005abc7a7449b/cytoolz-1.0.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:738b2350f340ff8af883eb301054eb724997f795d20d90daec7911c389d61581", size = 2069785, upload-time = "2024-12-13T05:46:10.122Z" }, + { url = "https://files.pythonhosted.org/packages/ea/f8/11bb7b8947002231faae3ec2342df5896afbc19eb783a332cce6d219ff79/cytoolz-1.0.1-cp313-cp313-win32.whl", hash = "sha256:9cbd9c103df54fcca42be55ef40e7baea624ac30ee0b8bf1149f21146d1078d9", size = 320685, upload-time = "2024-12-13T05:46:11.553Z" }, + { url = "https://files.pythonhosted.org/packages/40/eb/dde173cf2357084ca9423950be1f2f11ab11d65d8bd30165bfb8fd4213e9/cytoolz-1.0.1-cp313-cp313-win_amd64.whl", hash = "sha256:90e577e08d3a4308186d9e1ec06876d4756b1e8164b92971c69739ea17e15297", size = 362898, upload-time = "2024-12-13T05:46:12.771Z" }, +] + +[[package]] +name = "dataclasses-json" +version = "0.6.7" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "marshmallow" }, + { name = "typing-inspect" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/64/a4/f71d9cf3a5ac257c993b5ca3f93df5f7fb395c725e7f1e6479d2514173c3/dataclasses_json-0.6.7.tar.gz", hash = "sha256:b6b3e528266ea45b9535223bc53ca645f5208833c29229e847b3f26a1cc55fc0", size = 32227, upload-time = "2024-06-09T16:20:19.103Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/c3/be/d0d44e092656fe7a06b55e6103cbce807cdbdee17884a5367c68c9860853/dataclasses_json-0.6.7-py3-none-any.whl", hash = "sha256:0dbf33f26c8d5305befd61b39d2b3414e8a407bedc2834dea9b8d642666fb40a", size = 28686, upload-time = "2024-06-09T16:20:16.715Z" }, +] + +[[package]] +name = "deprecation" +version = "2.1.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "packaging" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/5a/d3/8ae2869247df154b64c1884d7346d412fed0c49df84db635aab2d1c40e62/deprecation-2.1.0.tar.gz", hash = "sha256:72b3bde64e5d778694b0cf68178aed03d15e15477116add3fb773e581f9518ff", size = 173788, upload-time = "2020-04-20T14:23:38.738Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/02/c3/253a89ee03fc9b9682f1541728eb66db7db22148cd94f89ab22528cd1e1b/deprecation-2.1.0-py2.py3-none-any.whl", hash = "sha256:a10811591210e1fb0e768a8c25517cabeabcba6f0bf96564f8ff45189f90b14a", size = 11178, upload-time = "2020-04-20T14:23:36.581Z" }, +] + +[[package]] +name = "distlib" +version = "0.3.9" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/0d/dd/1bec4c5ddb504ca60fc29472f3d27e8d4da1257a854e1d96742f15c1d02d/distlib-0.3.9.tar.gz", hash = "sha256:a60f20dea646b8a33f3e7772f74dc0b2d0772d2837ee1342a00645c81edf9403", size = 613923, upload-time = "2024-10-09T18:35:47.551Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/91/a1/cf2472db20f7ce4a6be1253a81cfdf85ad9c7885ffbed7047fb72c24cf87/distlib-0.3.9-py2.py3-none-any.whl", hash = "sha256:47f8c22fd27c27e25a65601af709b38e4f0a45ea4fc2e710f65755fa8caaaf87", size = 468973, upload-time = "2024-10-09T18:35:44.272Z" }, +] + +[[package]] +name = "distro" +version = "1.9.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/fc/f8/98eea607f65de6527f8a2e8885fc8015d3e6f5775df186e443e0964a11c3/distro-1.9.0.tar.gz", hash = "sha256:2fa77c6fd8940f116ee1d6b94a2f90b13b5ea8d019b98bc8bafdcabcdd9bdbed", size = 60722, upload-time = "2023-12-24T09:54:32.31Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/12/b3/231ffd4ab1fc9d679809f356cebee130ac7daa00d6d6f3206dd4fd137e9e/distro-1.9.0-py3-none-any.whl", hash = "sha256:7bffd925d65168f85027d8da9af6bddab658135b840670a223589bc0c8ef02b2", size = 20277, upload-time = "2023-12-24T09:54:30.421Z" }, +] + +[[package]] +name = "dydantic" +version = "0.0.8" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "pydantic" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/08/c5/2d097e5a4816b15186c1ae06c5cfe3c332e69a0f3556dc6cee2d370acf2a/dydantic-0.0.8.tar.gz", hash = "sha256:14a31d4cdfce314ce3e69e8f8c7c46cbc26ce3ce4485de0832260386c612942f", size = 8115, upload-time = "2025-01-29T20:36:13.771Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/7a/7c/a1b120141a300853d82291faf0ba1a95133fa390e4b7d773647b69c8c0f4/dydantic-0.0.8-py3-none-any.whl", hash = "sha256:cd0a991f523bd8632699872f1c0c4278415dd04783e36adec5428defa0afb721", size = 8637, upload-time = "2025-01-29T20:36:12.217Z" }, +] + +[[package]] +name = "ecdsa" +version = "0.19.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "six" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/c0/1f/924e3caae75f471eae4b26bd13b698f6af2c44279f67af317439c2f4c46a/ecdsa-0.19.1.tar.gz", hash = "sha256:478cba7b62555866fcb3bb3fe985e06decbdb68ef55713c4e5ab98c57d508e61", size = 201793, upload-time = "2025-03-13T11:52:43.25Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/cb/a3/460c57f094a4a165c84a1341c373b0a4f5ec6ac244b998d5021aade89b77/ecdsa-0.19.1-py2.py3-none-any.whl", hash = "sha256:30638e27cf77b7e15c4c4cc1973720149e1033827cfd00661ca5c8cc0cdb24c3", size = 150607, upload-time = "2025-03-13T11:52:41.757Z" }, +] + +[[package]] +name = "egcd" +version = "2.0.2" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/63/f5/c0c0808f8a3f8a4af605b48a241b16a634ceddd41b5e3ee05ae2fd9e1e42/egcd-2.0.2.tar.gz", hash = "sha256:3b05b0feb67549f8f76c97afed36c53252c0d7cb9a65bf4e6ca8b99110fb77f2", size = 6952, upload-time = "2024-12-31T21:05:21.984Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/a3/e7/9d984faee490e50a495b50d0a87c42fe661252f9513157776d8cb2724445/egcd-2.0.2-py3-none-any.whl", hash = "sha256:2f0576a651b4aa9e9c4640bba078f9741d1624f386b55cb5363a79ae4b564bd2", size = 7187, upload-time = "2024-12-31T21:05:19.098Z" }, +] + +[[package]] +name = "epyxid" +version = "0.3.3" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/a4/6e/0c7c674117ca089890eb311d17b29035dd2226978f29288b8bcbece9ace7/epyxid-0.3.3.tar.gz", hash = "sha256:3fbb54b96b5c1fdc1cb2484c992e450beaeb21a299ba5fbb6fcf8a2b04ee4249", size = 10579, upload-time = "2025-01-13T08:24:51.227Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/a3/44/53bd1ef40092013712359c4a8e1f74df321076f889ef3d59271fa2c094cf/epyxid-0.3.3-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:2dbd91b3e31d6a722cb621f62300f6e75c1f67b56582a16d52193ca589e66adb", size = 264092, upload-time = "2025-01-13T08:24:47.174Z" }, + { url = "https://files.pythonhosted.org/packages/b8/fd/6a0df28c23af1343e79abdfcbdd79084a8e98279f1405e09c035def88d27/epyxid-0.3.3-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:87394b46a9b6a70693afd2b2448bdcea6bb4e47462d3658f177c80e64ee4307b", size = 256315, upload-time = "2025-01-13T08:24:39.447Z" }, + { url = "https://files.pythonhosted.org/packages/a1/68/7c4e83ba6d2c8588b70ea10a9ffaf22763a9d14d80dfa54fa86a26e34a7b/epyxid-0.3.3-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:af1be68bb3ca25cd0e4f2c3fd9d42dc652d0e5dc5f80ac26bb4fb3481fc0924d", size = 280706, upload-time = "2025-01-13T08:23:18.882Z" }, + { url = "https://files.pythonhosted.org/packages/5c/0a/d401924e611a06d3bc72432669eaa6d8062e1321ac29896db713b2768bb6/epyxid-0.3.3-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:ed2a83f783324e2fc31639de40b253dc3dd6cbe1477ac5f46e3c3a0194da6665", size = 295278, upload-time = "2025-01-13T08:23:33.028Z" }, + { url = "https://files.pythonhosted.org/packages/dd/f0/6020ef13af523f52f2911b14784168bfd82caddc88731996da9f92977bc9/epyxid-0.3.3-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d8eebaf3cd173f4b57749081dab32b3aa7fba1ae5de455559ab143a6dd84571f", size = 312316, upload-time = "2025-01-13T08:23:48.777Z" }, + { url = "https://files.pythonhosted.org/packages/40/40/c8dbcf13c48dd330748a63d9524f876da6c0fdb07d3b324ba2665f9744ef/epyxid-0.3.3-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:103ba7f38c376e4534582a24a6a71ac169c7d936d44b1ff9a73a30f72cc39919", size = 361536, upload-time = "2025-01-13T08:24:03.133Z" }, + { url = "https://files.pythonhosted.org/packages/b1/e9/b40b9b02e9a41b749403f46d96aac4d9038b65820950ba405ebb212d6e72/epyxid-0.3.3-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0d8c7044b57eb62f09eebcc33607282189df6b2312f87f8f1c654204c27c6c60", size = 285577, upload-time = "2025-01-13T08:24:29.292Z" }, + { url = "https://files.pythonhosted.org/packages/f8/06/a51158d6aad4196a1dea8996553a94c20509b27c14891efa02f74c898d23/epyxid-0.3.3-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:02dc9fc6bbfea1b6f2e38e2436df66be91034ef609d53f77049714ad6f027072", size = 299404, upload-time = "2025-01-13T08:24:18.594Z" }, + { url = "https://files.pythonhosted.org/packages/76/70/8a0584c630fc5480b3f824bcce66afc6e61663731fd18b0be7c0bfaf40c2/epyxid-0.3.3-cp312-cp312-win32.whl", hash = "sha256:923dd761bd4615113a318c01fec02d364f574b59a45ca49ad706a50db8883161", size = 163283, upload-time = "2025-01-13T08:25:06.163Z" }, + { url = "https://files.pythonhosted.org/packages/9c/e9/dcb5cc97a049d2648324b8faa33df01c9f78938f682713fef19f4618679f/epyxid-0.3.3-cp312-cp312-win_amd64.whl", hash = "sha256:47faf9e3acc2733155c42ff878ad054645f856c1a1f34a63c85ed91446ebfdf4", size = 169123, upload-time = "2025-01-13T08:24:54.817Z" }, + { url = "https://files.pythonhosted.org/packages/bb/2b/d0fd787face4e624670ebb439dbcce478a0735f9b1d16a9c3454127a6c59/epyxid-0.3.3-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:1b67a002d22745decfbcc28c5bb101a5652b655d1532f5971cbdeb0935a54bbb", size = 264091, upload-time = "2025-01-13T08:24:48.232Z" }, + { url = "https://files.pythonhosted.org/packages/9c/c4/206d893a8641cc286c9e757867e4a13e44a2197e9fa5d4f76f29e246189c/epyxid-0.3.3-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:d131d9999265555f6ce73142ea98dc42a1dc0d732e1c4b2e0ae798fb14739dac", size = 256307, upload-time = "2025-01-13T08:24:40.496Z" }, + { url = "https://files.pythonhosted.org/packages/d0/a7/7c3349be50da748c66a3e355dd9646613c4e07b42ea3affdf71799e6786a/epyxid-0.3.3-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6fa6758ac94a4178db4e600b29e37c5eecd0ec31ffd85f1c035b868bf198a7b7", size = 280705, upload-time = "2025-01-13T08:23:21.198Z" }, + { url = "https://files.pythonhosted.org/packages/38/27/2cb9dd00cd5db6eecd2847a985d980788ea937682070f9eb9165a2e09d8e/epyxid-0.3.3-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:85604c081efd99d7e9efaea74d71d1247c72368e5551df3bb819877a44dff91e", size = 295277, upload-time = "2025-01-13T08:23:34.259Z" }, + { url = "https://files.pythonhosted.org/packages/34/7a/bb1461aaddf0b2091659f0588b92a036d3e4bc053af152e18fa6a82a33e4/epyxid-0.3.3-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:9e1964633d17888f318db05f5434207559c463a300c6900782f567c0586e809d", size = 312317, upload-time = "2025-01-13T08:23:49.86Z" }, + { url = "https://files.pythonhosted.org/packages/e8/aa/de279e81e56c125bebf8bc52e559b8411e05cd8f764ea44b5146170120f3/epyxid-0.3.3-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:e5c4c7e157b2ba95002ed1a9e2ae72d6d7565156bc1bbc58a31bb9b0433372e5", size = 361535, upload-time = "2025-01-13T08:24:04.241Z" }, + { url = "https://files.pythonhosted.org/packages/52/dc/93a55850fa98b69f4361fc4b124b4d2f49352cf928c8f3982cb1acaf9405/epyxid-0.3.3-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:29ec69cb69d45ef643d3da0188b021291c19c6d242a17273f6d96e8a9bbd74ab", size = 285576, upload-time = "2025-01-13T08:24:30.413Z" }, + { url = "https://files.pythonhosted.org/packages/2a/2b/f5b9f9847d26dc3e47a12798d4c0678c54f82ab453e15b197265f7da442e/epyxid-0.3.3-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:ee02dff3574e77d5c1cc631bcb2e051b78fa207f0db209542bdc390f2ae83940", size = 299404, upload-time = "2025-01-13T08:24:19.682Z" }, + { url = "https://files.pythonhosted.org/packages/08/6f/774202cc066cd0cdfc9b6b2e3d06f5859736d0022b2c1deffe4742c00cd3/epyxid-0.3.3-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:90e3fb69966e7f7242e5310ca06c908e2e4f7424ac517553c9534f8f7cbba792", size = 279728, upload-time = "2025-01-13T08:23:22.346Z" }, + { url = "https://files.pythonhosted.org/packages/1c/0d/f0b0a1ff539d4a74686012451fcdb9eb14a1374357b558194d03fca36ede/epyxid-0.3.3-cp313-cp313t-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:9734a4044e3731bfb42566ffc95ec96d175c161e8bd4823671c2912066104ba7", size = 294781, upload-time = "2025-01-13T08:23:35.343Z" }, + { url = "https://files.pythonhosted.org/packages/a5/9c/2edf3d9eb2e79628872241a394dc61df2f35fe2137b58b6292bf4e1577f3/epyxid-0.3.3-cp313-cp313t-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:5bd575164d250a94c1e5708a16cc2bedc447f05d5cf4792183b3936fdde7296e", size = 311572, upload-time = "2025-01-13T08:23:52.218Z" }, + { url = "https://files.pythonhosted.org/packages/0a/cd/b85c0af374ebc1825b20e23e36fd57e5279bc9d055a07619205286df00cb/epyxid-0.3.3-cp313-cp313t-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:4ec5c8b0fb12200826c0a2cd934ea1928d81a87bacac82f32780aa11a64973f7", size = 364267, upload-time = "2025-01-13T08:24:06.883Z" }, +] + +[[package]] +name = "eth-abi" +version = "5.2.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "eth-typing" }, + { name = "eth-utils" }, + { name = "parsimonious" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/00/71/d9e1380bd77fd22f98b534699af564f189b56d539cc2b9dab908d4e4c242/eth_abi-5.2.0.tar.gz", hash = "sha256:178703fa98c07d8eecd5ae569e7e8d159e493ebb6eeb534a8fe973fbc4e40ef0", size = 49797, upload-time = "2025-01-14T16:29:34.629Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/7a/b4/2f3982c4cbcbf5eeb6aec62df1533c0e63c653b3021ff338d44944405676/eth_abi-5.2.0-py3-none-any.whl", hash = "sha256:17abe47560ad753f18054f5b3089fcb588f3e3a092136a416b6c1502cb7e8877", size = 28511, upload-time = "2025-01-14T16:29:31.862Z" }, +] + +[[package]] +name = "eth-account" +version = "0.13.7" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "bitarray" }, + { name = "ckzg" }, + { name = "eth-abi" }, + { name = "eth-keyfile" }, + { name = "eth-keys" }, + { name = "eth-rlp" }, + { name = "eth-utils" }, + { name = "hexbytes" }, + { name = "pydantic" }, + { name = "rlp" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/74/cf/20f76a29be97339c969fd765f1237154286a565a1d61be98e76bb7af946a/eth_account-0.13.7.tar.gz", hash = "sha256:5853ecbcbb22e65411176f121f5f24b8afeeaf13492359d254b16d8b18c77a46", size = 935998, upload-time = "2025-04-21T21:11:21.204Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/46/18/088fb250018cbe665bc2111974301b2d59f294a565aff7564c4df6878da2/eth_account-0.13.7-py3-none-any.whl", hash = "sha256:39727de8c94d004ff61d10da7587509c04d2dc7eac71e04830135300bdfc6d24", size = 587452, upload-time = "2025-04-21T21:11:18.346Z" }, +] + +[[package]] +name = "eth-hash" +version = "0.7.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/ee/38/577b7bc9380ef9dff0f1dffefe0c9a1ded2385e7a06c306fd95afb6f9451/eth_hash-0.7.1.tar.gz", hash = "sha256:d2411a403a0b0a62e8247b4117932d900ffb4c8c64b15f92620547ca5ce46be5", size = 12227, upload-time = "2025-01-13T21:29:21.765Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/eb/db/f8775490669d28aca24871c67dd56b3e72105cb3bcae9a4ec65dd70859b3/eth_hash-0.7.1-py3-none-any.whl", hash = "sha256:0fb1add2adf99ef28883fd6228eb447ef519ea72933535ad1a0b28c6f65f868a", size = 8028, upload-time = "2025-01-13T21:29:19.365Z" }, +] + +[package.optional-dependencies] +pycryptodome = [ + { name = "pycryptodome" }, +] + +[[package]] +name = "eth-keyfile" +version = "0.8.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "eth-keys" }, + { name = "eth-utils" }, + { name = "pycryptodome" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/35/66/dd823b1537befefbbff602e2ada88f1477c5b40ec3731e3d9bc676c5f716/eth_keyfile-0.8.1.tar.gz", hash = "sha256:9708bc31f386b52cca0969238ff35b1ac72bd7a7186f2a84b86110d3c973bec1", size = 12267, upload-time = "2024-04-23T20:28:53.862Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/88/fc/48a586175f847dd9e05e5b8994d2fe8336098781ec2e9836a2ad94280281/eth_keyfile-0.8.1-py3-none-any.whl", hash = "sha256:65387378b82fe7e86d7cb9f8d98e6d639142661b2f6f490629da09fddbef6d64", size = 7510, upload-time = "2024-04-23T20:28:51.063Z" }, +] + +[[package]] +name = "eth-keys" +version = "0.7.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "eth-typing" }, + { name = "eth-utils" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/58/11/1ed831c50bd74f57829aa06e58bd82a809c37e070ee501c953b9ac1f1552/eth_keys-0.7.0.tar.gz", hash = "sha256:79d24fd876201df67741de3e3fefb3f4dbcbb6ace66e47e6fe662851a4547814", size = 30166, upload-time = "2025-04-07T17:40:21.697Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/4d/25/0ae00f2b0095e559d61ad3dc32171bd5a29dfd95ab04b4edd641f7c75f72/eth_keys-0.7.0-py3-none-any.whl", hash = "sha256:b0cdda8ffe8e5ba69c7c5ca33f153828edcace844f67aabd4542d7de38b159cf", size = 20656, upload-time = "2025-04-07T17:40:20.441Z" }, +] + +[[package]] +name = "eth-rlp" +version = "2.2.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "eth-utils" }, + { name = "hexbytes" }, + { name = "rlp" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/7f/ea/ad39d001fa9fed07fad66edb00af701e29b48be0ed44a3bcf58cb3adf130/eth_rlp-2.2.0.tar.gz", hash = "sha256:5e4b2eb1b8213e303d6a232dfe35ab8c29e2d3051b86e8d359def80cd21db83d", size = 7720, upload-time = "2025-02-04T21:51:08.134Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/99/3b/57efe2bc2df0980680d57c01a36516cd3171d2319ceb30e675de19fc2cc5/eth_rlp-2.2.0-py3-none-any.whl", hash = "sha256:5692d595a741fbaef1203db6a2fedffbd2506d31455a6ad378c8449ee5985c47", size = 4446, upload-time = "2025-02-04T21:51:05.823Z" }, +] + +[[package]] +name = "eth-typing" +version = "5.2.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "typing-extensions" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/60/54/62aa24b9cc708f06316167ee71c362779c8ed21fc8234a5cd94a8f53b623/eth_typing-5.2.1.tar.gz", hash = "sha256:7557300dbf02a93c70fa44af352b5c4a58f94e997a0fd6797fb7d1c29d9538ee", size = 21806, upload-time = "2025-04-14T20:39:28.217Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/30/72/c370bbe4c53da7bf998d3523f5a0f38867654923a82192df88d0705013d3/eth_typing-5.2.1-py3-none-any.whl", hash = "sha256:b0c2812ff978267563b80e9d701f487dd926f1d376d674f3b535cfe28b665d3d", size = 19163, upload-time = "2025-04-14T20:39:26.571Z" }, +] + +[[package]] +name = "eth-utils" +version = "5.3.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "cytoolz", marker = "implementation_name == 'cpython'" }, + { name = "eth-hash" }, + { name = "eth-typing" }, + { name = "pydantic" }, + { name = "toolz", marker = "implementation_name == 'pypy'" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/0d/49/bee95f16d2ef068097afeeffbd6c67738107001ee57ad7bcdd4fc4d3c6a7/eth_utils-5.3.0.tar.gz", hash = "sha256:1f096867ac6be895f456fa3acb26e9573ae66e753abad9208f316d24d6178156", size = 123753, upload-time = "2025-04-14T19:35:56.431Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/c4/c6/0417a92e6a3fc9b85f5a8380d9f9d43b69ba836a90e45f79f9ae74d41e53/eth_utils-5.3.0-py3-none-any.whl", hash = "sha256:ac184883ab299d923428bbe25dae5e356979a3993e0ef695a864db0a20bc262d", size = 102531, upload-time = "2025-04-14T19:35:55.176Z" }, +] + +[[package]] +name = "fastapi" +version = "0.116.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "pydantic" }, + { name = "starlette" }, + { name = "typing-extensions" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/78/d7/6c8b3bfe33eeffa208183ec037fee0cce9f7f024089ab1c5d12ef04bd27c/fastapi-0.116.1.tar.gz", hash = "sha256:ed52cbf946abfd70c5a0dccb24673f0670deeb517a88b3544d03c2a6bf283143", size = 296485, upload-time = "2025-07-11T16:22:32.057Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/e5/47/d63c60f59a59467fda0f93f46335c9d18526d7071f025cb5b89d5353ea42/fastapi-0.116.1-py3-none-any.whl", hash = "sha256:c46ac7c312df840f0c9e220f7964bada936781bc4e2e6eb71f1c4d7553786565", size = 95631, upload-time = "2025-07-11T16:22:30.485Z" }, +] + +[[package]] +name = "filelock" +version = "3.16.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/9d/db/3ef5bb276dae18d6ec2124224403d1d67bccdbefc17af4cc8f553e341ab1/filelock-3.16.1.tar.gz", hash = "sha256:c249fbfcd5db47e5e2d6d62198e565475ee65e4831e2561c8e313fa7eb961435", size = 18037, upload-time = "2024-09-17T19:02:01.779Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/b9/f8/feced7779d755758a52d1f6635d990b8d98dc0a29fa568bbe0625f18fdf3/filelock-3.16.1-py3-none-any.whl", hash = "sha256:2082e5703d51fbf98ea75855d9d5527e33d8ff23099bec374a134febee6946b0", size = 16163, upload-time = "2024-09-17T19:02:00.268Z" }, +] + +[[package]] +name = "filetype" +version = "1.2.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/bb/29/745f7d30d47fe0f251d3ad3dc2978a23141917661998763bebb6da007eb1/filetype-1.2.0.tar.gz", hash = "sha256:66b56cd6474bf41d8c54660347d37afcc3f7d1970648de365c102ef77548aadb", size = 998020, upload-time = "2022-11-02T17:34:04.141Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/18/79/1b8fa1bb3568781e84c9200f951c735f3f157429f44be0495da55894d620/filetype-1.2.0-py2.py3-none-any.whl", hash = "sha256:7ce71b6880181241cf7ac8697a2f1eb6a8bd9b429f7ad6d27b8db9ba5f1c2d25", size = 19970, upload-time = "2022-11-02T17:34:01.425Z" }, +] + +[[package]] +name = "frozenlist" +version = "1.7.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/79/b1/b64018016eeb087db503b038296fd782586432b9c077fc5c7839e9cb6ef6/frozenlist-1.7.0.tar.gz", hash = "sha256:2e310d81923c2437ea8670467121cc3e9b0f76d3043cc1d2331d56c7fb7a3a8f", size = 45078, upload-time = "2025-06-09T23:02:35.538Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/ef/a2/c8131383f1e66adad5f6ecfcce383d584ca94055a34d683bbb24ac5f2f1c/frozenlist-1.7.0-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:3dbf9952c4bb0e90e98aec1bd992b3318685005702656bc6f67c1a32b76787f2", size = 81424, upload-time = "2025-06-09T23:00:42.24Z" }, + { url = "https://files.pythonhosted.org/packages/4c/9d/02754159955088cb52567337d1113f945b9e444c4960771ea90eb73de8db/frozenlist-1.7.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:1f5906d3359300b8a9bb194239491122e6cf1444c2efb88865426f170c262cdb", size = 47952, upload-time = "2025-06-09T23:00:43.481Z" }, + { url = "https://files.pythonhosted.org/packages/01/7a/0046ef1bd6699b40acd2067ed6d6670b4db2f425c56980fa21c982c2a9db/frozenlist-1.7.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:3dabd5a8f84573c8d10d8859a50ea2dec01eea372031929871368c09fa103478", size = 46688, upload-time = "2025-06-09T23:00:44.793Z" }, + { url = "https://files.pythonhosted.org/packages/d6/a2/a910bafe29c86997363fb4c02069df4ff0b5bc39d33c5198b4e9dd42d8f8/frozenlist-1.7.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:aa57daa5917f1738064f302bf2626281a1cb01920c32f711fbc7bc36111058a8", size = 243084, upload-time = "2025-06-09T23:00:46.125Z" }, + { url = "https://files.pythonhosted.org/packages/64/3e/5036af9d5031374c64c387469bfcc3af537fc0f5b1187d83a1cf6fab1639/frozenlist-1.7.0-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:c193dda2b6d49f4c4398962810fa7d7c78f032bf45572b3e04dd5249dff27e08", size = 233524, upload-time = "2025-06-09T23:00:47.73Z" }, + { url = "https://files.pythonhosted.org/packages/06/39/6a17b7c107a2887e781a48ecf20ad20f1c39d94b2a548c83615b5b879f28/frozenlist-1.7.0-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:bfe2b675cf0aaa6d61bf8fbffd3c274b3c9b7b1623beb3809df8a81399a4a9c4", size = 248493, upload-time = "2025-06-09T23:00:49.742Z" }, + { url = "https://files.pythonhosted.org/packages/be/00/711d1337c7327d88c44d91dd0f556a1c47fb99afc060ae0ef66b4d24793d/frozenlist-1.7.0-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:8fc5d5cda37f62b262405cf9652cf0856839c4be8ee41be0afe8858f17f4c94b", size = 244116, upload-time = "2025-06-09T23:00:51.352Z" }, + { url = "https://files.pythonhosted.org/packages/24/fe/74e6ec0639c115df13d5850e75722750adabdc7de24e37e05a40527ca539/frozenlist-1.7.0-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b0d5ce521d1dd7d620198829b87ea002956e4319002ef0bc8d3e6d045cb4646e", size = 224557, upload-time = "2025-06-09T23:00:52.855Z" }, + { url = "https://files.pythonhosted.org/packages/8d/db/48421f62a6f77c553575201e89048e97198046b793f4a089c79a6e3268bd/frozenlist-1.7.0-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:488d0a7d6a0008ca0db273c542098a0fa9e7dfaa7e57f70acef43f32b3f69dca", size = 241820, upload-time = "2025-06-09T23:00:54.43Z" }, + { url = "https://files.pythonhosted.org/packages/1d/fa/cb4a76bea23047c8462976ea7b7a2bf53997a0ca171302deae9d6dd12096/frozenlist-1.7.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:15a7eaba63983d22c54d255b854e8108e7e5f3e89f647fc854bd77a237e767df", size = 236542, upload-time = "2025-06-09T23:00:56.409Z" }, + { url = "https://files.pythonhosted.org/packages/5d/32/476a4b5cfaa0ec94d3f808f193301debff2ea42288a099afe60757ef6282/frozenlist-1.7.0-cp312-cp312-musllinux_1_2_armv7l.whl", hash = "sha256:1eaa7e9c6d15df825bf255649e05bd8a74b04a4d2baa1ae46d9c2d00b2ca2cb5", size = 249350, upload-time = "2025-06-09T23:00:58.468Z" }, + { url = "https://files.pythonhosted.org/packages/8d/ba/9a28042f84a6bf8ea5dbc81cfff8eaef18d78b2a1ad9d51c7bc5b029ad16/frozenlist-1.7.0-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:e4389e06714cfa9d47ab87f784a7c5be91d3934cd6e9a7b85beef808297cc025", size = 225093, upload-time = "2025-06-09T23:01:00.015Z" }, + { url = "https://files.pythonhosted.org/packages/bc/29/3a32959e68f9cf000b04e79ba574527c17e8842e38c91d68214a37455786/frozenlist-1.7.0-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:73bd45e1488c40b63fe5a7df892baf9e2a4d4bb6409a2b3b78ac1c6236178e01", size = 245482, upload-time = "2025-06-09T23:01:01.474Z" }, + { url = "https://files.pythonhosted.org/packages/80/e8/edf2f9e00da553f07f5fa165325cfc302dead715cab6ac8336a5f3d0adc2/frozenlist-1.7.0-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:99886d98e1643269760e5fe0df31e5ae7050788dd288947f7f007209b8c33f08", size = 249590, upload-time = "2025-06-09T23:01:02.961Z" }, + { url = "https://files.pythonhosted.org/packages/1c/80/9a0eb48b944050f94cc51ee1c413eb14a39543cc4f760ed12657a5a3c45a/frozenlist-1.7.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:290a172aae5a4c278c6da8a96222e6337744cd9c77313efe33d5670b9f65fc43", size = 237785, upload-time = "2025-06-09T23:01:05.095Z" }, + { url = "https://files.pythonhosted.org/packages/f3/74/87601e0fb0369b7a2baf404ea921769c53b7ae00dee7dcfe5162c8c6dbf0/frozenlist-1.7.0-cp312-cp312-win32.whl", hash = "sha256:426c7bc70e07cfebc178bc4c2bf2d861d720c4fff172181eeb4a4c41d4ca2ad3", size = 39487, upload-time = "2025-06-09T23:01:06.54Z" }, + { url = "https://files.pythonhosted.org/packages/0b/15/c026e9a9fc17585a9d461f65d8593d281fedf55fbf7eb53f16c6df2392f9/frozenlist-1.7.0-cp312-cp312-win_amd64.whl", hash = "sha256:563b72efe5da92e02eb68c59cb37205457c977aa7a449ed1b37e6939e5c47c6a", size = 43874, upload-time = "2025-06-09T23:01:07.752Z" }, + { url = "https://files.pythonhosted.org/packages/24/90/6b2cebdabdbd50367273c20ff6b57a3dfa89bd0762de02c3a1eb42cb6462/frozenlist-1.7.0-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:ee80eeda5e2a4e660651370ebffd1286542b67e268aa1ac8d6dbe973120ef7ee", size = 79791, upload-time = "2025-06-09T23:01:09.368Z" }, + { url = "https://files.pythonhosted.org/packages/83/2e/5b70b6a3325363293fe5fc3ae74cdcbc3e996c2a11dde2fd9f1fb0776d19/frozenlist-1.7.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:d1a81c85417b914139e3a9b995d4a1c84559afc839a93cf2cb7f15e6e5f6ed2d", size = 47165, upload-time = "2025-06-09T23:01:10.653Z" }, + { url = "https://files.pythonhosted.org/packages/f4/25/a0895c99270ca6966110f4ad98e87e5662eab416a17e7fd53c364bf8b954/frozenlist-1.7.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:cbb65198a9132ebc334f237d7b0df163e4de83fb4f2bdfe46c1e654bdb0c5d43", size = 45881, upload-time = "2025-06-09T23:01:12.296Z" }, + { url = "https://files.pythonhosted.org/packages/19/7c/71bb0bbe0832793c601fff68cd0cf6143753d0c667f9aec93d3c323f4b55/frozenlist-1.7.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:dab46c723eeb2c255a64f9dc05b8dd601fde66d6b19cdb82b2e09cc6ff8d8b5d", size = 232409, upload-time = "2025-06-09T23:01:13.641Z" }, + { url = "https://files.pythonhosted.org/packages/c0/45/ed2798718910fe6eb3ba574082aaceff4528e6323f9a8570be0f7028d8e9/frozenlist-1.7.0-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:6aeac207a759d0dedd2e40745575ae32ab30926ff4fa49b1635def65806fddee", size = 225132, upload-time = "2025-06-09T23:01:15.264Z" }, + { url = "https://files.pythonhosted.org/packages/ba/e2/8417ae0f8eacb1d071d4950f32f229aa6bf68ab69aab797b72a07ea68d4f/frozenlist-1.7.0-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:bd8c4e58ad14b4fa7802b8be49d47993182fdd4023393899632c88fd8cd994eb", size = 237638, upload-time = "2025-06-09T23:01:16.752Z" }, + { url = "https://files.pythonhosted.org/packages/f8/b7/2ace5450ce85f2af05a871b8c8719b341294775a0a6c5585d5e6170f2ce7/frozenlist-1.7.0-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:04fb24d104f425da3540ed83cbfc31388a586a7696142004c577fa61c6298c3f", size = 233539, upload-time = "2025-06-09T23:01:18.202Z" }, + { url = "https://files.pythonhosted.org/packages/46/b9/6989292c5539553dba63f3c83dc4598186ab2888f67c0dc1d917e6887db6/frozenlist-1.7.0-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:6a5c505156368e4ea6b53b5ac23c92d7edc864537ff911d2fb24c140bb175e60", size = 215646, upload-time = "2025-06-09T23:01:19.649Z" }, + { url = "https://files.pythonhosted.org/packages/72/31/bc8c5c99c7818293458fe745dab4fd5730ff49697ccc82b554eb69f16a24/frozenlist-1.7.0-cp313-cp313-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8bd7eb96a675f18aa5c553eb7ddc24a43c8c18f22e1f9925528128c052cdbe00", size = 232233, upload-time = "2025-06-09T23:01:21.175Z" }, + { url = "https://files.pythonhosted.org/packages/59/52/460db4d7ba0811b9ccb85af996019f5d70831f2f5f255f7cc61f86199795/frozenlist-1.7.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:05579bf020096fe05a764f1f84cd104a12f78eaab68842d036772dc6d4870b4b", size = 227996, upload-time = "2025-06-09T23:01:23.098Z" }, + { url = "https://files.pythonhosted.org/packages/ba/c9/f4b39e904c03927b7ecf891804fd3b4df3db29b9e487c6418e37988d6e9d/frozenlist-1.7.0-cp313-cp313-musllinux_1_2_armv7l.whl", hash = "sha256:376b6222d114e97eeec13d46c486facd41d4f43bab626b7c3f6a8b4e81a5192c", size = 242280, upload-time = "2025-06-09T23:01:24.808Z" }, + { url = "https://files.pythonhosted.org/packages/b8/33/3f8d6ced42f162d743e3517781566b8481322be321b486d9d262adf70bfb/frozenlist-1.7.0-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:0aa7e176ebe115379b5b1c95b4096fb1c17cce0847402e227e712c27bdb5a949", size = 217717, upload-time = "2025-06-09T23:01:26.28Z" }, + { url = "https://files.pythonhosted.org/packages/3e/e8/ad683e75da6ccef50d0ab0c2b2324b32f84fc88ceee778ed79b8e2d2fe2e/frozenlist-1.7.0-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:3fbba20e662b9c2130dc771e332a99eff5da078b2b2648153a40669a6d0e36ca", size = 236644, upload-time = "2025-06-09T23:01:27.887Z" }, + { url = "https://files.pythonhosted.org/packages/b2/14/8d19ccdd3799310722195a72ac94ddc677541fb4bef4091d8e7775752360/frozenlist-1.7.0-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:f3f4410a0a601d349dd406b5713fec59b4cee7e71678d5b17edda7f4655a940b", size = 238879, upload-time = "2025-06-09T23:01:29.524Z" }, + { url = "https://files.pythonhosted.org/packages/ce/13/c12bf657494c2fd1079a48b2db49fa4196325909249a52d8f09bc9123fd7/frozenlist-1.7.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:e2cdfaaec6a2f9327bf43c933c0319a7c429058e8537c508964a133dffee412e", size = 232502, upload-time = "2025-06-09T23:01:31.287Z" }, + { url = "https://files.pythonhosted.org/packages/d7/8b/e7f9dfde869825489382bc0d512c15e96d3964180c9499efcec72e85db7e/frozenlist-1.7.0-cp313-cp313-win32.whl", hash = "sha256:5fc4df05a6591c7768459caba1b342d9ec23fa16195e744939ba5914596ae3e1", size = 39169, upload-time = "2025-06-09T23:01:35.503Z" }, + { url = "https://files.pythonhosted.org/packages/35/89/a487a98d94205d85745080a37860ff5744b9820a2c9acbcdd9440bfddf98/frozenlist-1.7.0-cp313-cp313-win_amd64.whl", hash = "sha256:52109052b9791a3e6b5d1b65f4b909703984b770694d3eb64fad124c835d7cba", size = 43219, upload-time = "2025-06-09T23:01:36.784Z" }, + { url = "https://files.pythonhosted.org/packages/56/d5/5c4cf2319a49eddd9dd7145e66c4866bdc6f3dbc67ca3d59685149c11e0d/frozenlist-1.7.0-cp313-cp313t-macosx_10_13_universal2.whl", hash = "sha256:a6f86e4193bb0e235ef6ce3dde5cbabed887e0b11f516ce8a0f4d3b33078ec2d", size = 84345, upload-time = "2025-06-09T23:01:38.295Z" }, + { url = "https://files.pythonhosted.org/packages/a4/7d/ec2c1e1dc16b85bc9d526009961953df9cec8481b6886debb36ec9107799/frozenlist-1.7.0-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:82d664628865abeb32d90ae497fb93df398a69bb3434463d172b80fc25b0dd7d", size = 48880, upload-time = "2025-06-09T23:01:39.887Z" }, + { url = "https://files.pythonhosted.org/packages/69/86/f9596807b03de126e11e7d42ac91e3d0b19a6599c714a1989a4e85eeefc4/frozenlist-1.7.0-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:912a7e8375a1c9a68325a902f3953191b7b292aa3c3fb0d71a216221deca460b", size = 48498, upload-time = "2025-06-09T23:01:41.318Z" }, + { url = "https://files.pythonhosted.org/packages/5e/cb/df6de220f5036001005f2d726b789b2c0b65f2363b104bbc16f5be8084f8/frozenlist-1.7.0-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9537c2777167488d539bc5de2ad262efc44388230e5118868e172dd4a552b146", size = 292296, upload-time = "2025-06-09T23:01:42.685Z" }, + { url = "https://files.pythonhosted.org/packages/83/1f/de84c642f17c8f851a2905cee2dae401e5e0daca9b5ef121e120e19aa825/frozenlist-1.7.0-cp313-cp313t-manylinux_2_17_armv7l.manylinux2014_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:f34560fb1b4c3e30ba35fa9a13894ba39e5acfc5f60f57d8accde65f46cc5e74", size = 273103, upload-time = "2025-06-09T23:01:44.166Z" }, + { url = "https://files.pythonhosted.org/packages/88/3c/c840bfa474ba3fa13c772b93070893c6e9d5c0350885760376cbe3b6c1b3/frozenlist-1.7.0-cp313-cp313t-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:acd03d224b0175f5a850edc104ac19040d35419eddad04e7cf2d5986d98427f1", size = 292869, upload-time = "2025-06-09T23:01:45.681Z" }, + { url = "https://files.pythonhosted.org/packages/a6/1c/3efa6e7d5a39a1d5ef0abeb51c48fb657765794a46cf124e5aca2c7a592c/frozenlist-1.7.0-cp313-cp313t-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f2038310bc582f3d6a09b3816ab01737d60bf7b1ec70f5356b09e84fb7408ab1", size = 291467, upload-time = "2025-06-09T23:01:47.234Z" }, + { url = "https://files.pythonhosted.org/packages/4f/00/d5c5e09d4922c395e2f2f6b79b9a20dab4b67daaf78ab92e7729341f61f6/frozenlist-1.7.0-cp313-cp313t-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b8c05e4c8e5f36e5e088caa1bf78a687528f83c043706640a92cb76cd6999384", size = 266028, upload-time = "2025-06-09T23:01:48.819Z" }, + { url = "https://files.pythonhosted.org/packages/4e/27/72765be905619dfde25a7f33813ac0341eb6b076abede17a2e3fbfade0cb/frozenlist-1.7.0-cp313-cp313t-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:765bb588c86e47d0b68f23c1bee323d4b703218037765dcf3f25c838c6fecceb", size = 284294, upload-time = "2025-06-09T23:01:50.394Z" }, + { url = "https://files.pythonhosted.org/packages/88/67/c94103a23001b17808eb7dd1200c156bb69fb68e63fcf0693dde4cd6228c/frozenlist-1.7.0-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:32dc2e08c67d86d0969714dd484fd60ff08ff81d1a1e40a77dd34a387e6ebc0c", size = 281898, upload-time = "2025-06-09T23:01:52.234Z" }, + { url = "https://files.pythonhosted.org/packages/42/34/a3e2c00c00f9e2a9db5653bca3fec306349e71aff14ae45ecc6d0951dd24/frozenlist-1.7.0-cp313-cp313t-musllinux_1_2_armv7l.whl", hash = "sha256:c0303e597eb5a5321b4de9c68e9845ac8f290d2ab3f3e2c864437d3c5a30cd65", size = 290465, upload-time = "2025-06-09T23:01:53.788Z" }, + { url = "https://files.pythonhosted.org/packages/bb/73/f89b7fbce8b0b0c095d82b008afd0590f71ccb3dee6eee41791cf8cd25fd/frozenlist-1.7.0-cp313-cp313t-musllinux_1_2_i686.whl", hash = "sha256:a47f2abb4e29b3a8d0b530f7c3598badc6b134562b1a5caee867f7c62fee51e3", size = 266385, upload-time = "2025-06-09T23:01:55.769Z" }, + { url = "https://files.pythonhosted.org/packages/cd/45/e365fdb554159462ca12df54bc59bfa7a9a273ecc21e99e72e597564d1ae/frozenlist-1.7.0-cp313-cp313t-musllinux_1_2_ppc64le.whl", hash = "sha256:3d688126c242a6fabbd92e02633414d40f50bb6002fa4cf995a1d18051525657", size = 288771, upload-time = "2025-06-09T23:01:57.4Z" }, + { url = "https://files.pythonhosted.org/packages/00/11/47b6117002a0e904f004d70ec5194fe9144f117c33c851e3d51c765962d0/frozenlist-1.7.0-cp313-cp313t-musllinux_1_2_s390x.whl", hash = "sha256:4e7e9652b3d367c7bd449a727dc79d5043f48b88d0cbfd4f9f1060cf2b414104", size = 288206, upload-time = "2025-06-09T23:01:58.936Z" }, + { url = "https://files.pythonhosted.org/packages/40/37/5f9f3c3fd7f7746082ec67bcdc204db72dad081f4f83a503d33220a92973/frozenlist-1.7.0-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:1a85e345b4c43db8b842cab1feb41be5cc0b10a1830e6295b69d7310f99becaf", size = 282620, upload-time = "2025-06-09T23:02:00.493Z" }, + { url = "https://files.pythonhosted.org/packages/0b/31/8fbc5af2d183bff20f21aa743b4088eac4445d2bb1cdece449ae80e4e2d1/frozenlist-1.7.0-cp313-cp313t-win32.whl", hash = "sha256:3a14027124ddb70dfcee5148979998066897e79f89f64b13328595c4bdf77c81", size = 43059, upload-time = "2025-06-09T23:02:02.072Z" }, + { url = "https://files.pythonhosted.org/packages/bb/ed/41956f52105b8dbc26e457c5705340c67c8cc2b79f394b79bffc09d0e938/frozenlist-1.7.0-cp313-cp313t-win_amd64.whl", hash = "sha256:3bf8010d71d4507775f658e9823210b7427be36625b387221642725b515dcf3e", size = 47516, upload-time = "2025-06-09T23:02:03.779Z" }, + { url = "https://files.pythonhosted.org/packages/ee/45/b82e3c16be2182bff01179db177fe144d58b5dc787a7d4492c6ed8b9317f/frozenlist-1.7.0-py3-none-any.whl", hash = "sha256:9a5af342e34f7e97caf8c995864c7a396418ae2859cc6fdf1b1073020d516a7e", size = 13106, upload-time = "2025-06-09T23:02:34.204Z" }, +] + +[[package]] +name = "gotrue" +version = "2.12.4" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "httpx", extra = ["http2"] }, + { name = "pydantic" }, + { name = "pyjwt" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/aa/6c/fe920e91959bd211325860332be5898b6b53d6ccd873c053fc5cc829020c/gotrue-2.12.4.tar.gz", hash = "sha256:35d2e58e066486321f4dff0033b30a53d057c7f436c15287122fa0cb833029b1", size = 34817, upload-time = "2025-08-08T15:55:49.393Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/9a/2f/0e68d566d9339b8d320399d755704e17a94a123cf22f124b4ab2f686bcc3/gotrue-2.12.4-py3-none-any.whl", hash = "sha256:cf36dfcebc1da63b8d1e7b93eb1a35dfee3dcb1e1376833c256464010eb5fcd6", size = 42783, upload-time = "2025-08-08T15:55:48.289Z" }, +] + +[[package]] +name = "greenlet" +version = "3.2.4" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/03/b8/704d753a5a45507a7aab61f18db9509302ed3d0a27ac7e0359ec2905b1a6/greenlet-3.2.4.tar.gz", hash = "sha256:0dca0d95ff849f9a364385f36ab49f50065d76964944638be9691e1832e9f86d", size = 188260, upload-time = "2025-08-07T13:24:33.51Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/44/69/9b804adb5fd0671f367781560eb5eb586c4d495277c93bde4307b9e28068/greenlet-3.2.4-cp312-cp312-macosx_11_0_universal2.whl", hash = "sha256:3b67ca49f54cede0186854a008109d6ee71f66bd57bb36abd6d0a0267b540cdd", size = 274079, upload-time = "2025-08-07T13:15:45.033Z" }, + { url = "https://files.pythonhosted.org/packages/46/e9/d2a80c99f19a153eff70bc451ab78615583b8dac0754cfb942223d2c1a0d/greenlet-3.2.4-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:ddf9164e7a5b08e9d22511526865780a576f19ddd00d62f8a665949327fde8bb", size = 640997, upload-time = "2025-08-07T13:42:56.234Z" }, + { url = "https://files.pythonhosted.org/packages/3b/16/035dcfcc48715ccd345f3a93183267167cdd162ad123cd93067d86f27ce4/greenlet-3.2.4-cp312-cp312-manylinux2014_ppc64le.manylinux_2_17_ppc64le.whl", hash = "sha256:f28588772bb5fb869a8eb331374ec06f24a83a9c25bfa1f38b6993afe9c1e968", size = 655185, upload-time = "2025-08-07T13:45:27.624Z" }, + { url = "https://files.pythonhosted.org/packages/31/da/0386695eef69ffae1ad726881571dfe28b41970173947e7c558d9998de0f/greenlet-3.2.4-cp312-cp312-manylinux2014_s390x.manylinux_2_17_s390x.whl", hash = "sha256:5c9320971821a7cb77cfab8d956fa8e39cd07ca44b6070db358ceb7f8797c8c9", size = 649926, upload-time = "2025-08-07T13:53:15.251Z" }, + { url = "https://files.pythonhosted.org/packages/68/88/69bf19fd4dc19981928ceacbc5fd4bb6bc2215d53199e367832e98d1d8fe/greenlet-3.2.4-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:c60a6d84229b271d44b70fb6e5fa23781abb5d742af7b808ae3f6efd7c9c60f6", size = 651839, upload-time = "2025-08-07T13:18:30.281Z" }, + { url = "https://files.pythonhosted.org/packages/19/0d/6660d55f7373b2ff8152401a83e02084956da23ae58cddbfb0b330978fe9/greenlet-3.2.4-cp312-cp312-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:3b3812d8d0c9579967815af437d96623f45c0f2ae5f04e366de62a12d83a8fb0", size = 607586, upload-time = "2025-08-07T13:18:28.544Z" }, + { url = "https://files.pythonhosted.org/packages/8e/1a/c953fdedd22d81ee4629afbb38d2f9d71e37d23caace44775a3a969147d4/greenlet-3.2.4-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:abbf57b5a870d30c4675928c37278493044d7c14378350b3aa5d484fa65575f0", size = 1123281, upload-time = "2025-08-07T13:42:39.858Z" }, + { url = "https://files.pythonhosted.org/packages/3f/c7/12381b18e21aef2c6bd3a636da1088b888b97b7a0362fac2e4de92405f97/greenlet-3.2.4-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:20fb936b4652b6e307b8f347665e2c615540d4b42b3b4c8a321d8286da7e520f", size = 1151142, upload-time = "2025-08-07T13:18:22.981Z" }, + { url = "https://files.pythonhosted.org/packages/e9/08/b0814846b79399e585f974bbeebf5580fbe59e258ea7be64d9dfb253c84f/greenlet-3.2.4-cp312-cp312-win_amd64.whl", hash = "sha256:a7d4e128405eea3814a12cc2605e0e6aedb4035bf32697f72deca74de4105e02", size = 299899, upload-time = "2025-08-07T13:38:53.448Z" }, + { url = "https://files.pythonhosted.org/packages/49/e8/58c7f85958bda41dafea50497cbd59738c5c43dbbea5ee83d651234398f4/greenlet-3.2.4-cp313-cp313-macosx_11_0_universal2.whl", hash = "sha256:1a921e542453fe531144e91e1feedf12e07351b1cf6c9e8a3325ea600a715a31", size = 272814, upload-time = "2025-08-07T13:15:50.011Z" }, + { url = "https://files.pythonhosted.org/packages/62/dd/b9f59862e9e257a16e4e610480cfffd29e3fae018a68c2332090b53aac3d/greenlet-3.2.4-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:cd3c8e693bff0fff6ba55f140bf390fa92c994083f838fece0f63be121334945", size = 641073, upload-time = "2025-08-07T13:42:57.23Z" }, + { url = "https://files.pythonhosted.org/packages/f7/0b/bc13f787394920b23073ca3b6c4a7a21396301ed75a655bcb47196b50e6e/greenlet-3.2.4-cp313-cp313-manylinux2014_ppc64le.manylinux_2_17_ppc64le.whl", hash = "sha256:710638eb93b1fa52823aa91bf75326f9ecdfd5e0466f00789246a5280f4ba0fc", size = 655191, upload-time = "2025-08-07T13:45:29.752Z" }, + { url = "https://files.pythonhosted.org/packages/f2/d6/6adde57d1345a8d0f14d31e4ab9c23cfe8e2cd39c3baf7674b4b0338d266/greenlet-3.2.4-cp313-cp313-manylinux2014_s390x.manylinux_2_17_s390x.whl", hash = "sha256:c5111ccdc9c88f423426df3fd1811bfc40ed66264d35aa373420a34377efc98a", size = 649516, upload-time = "2025-08-07T13:53:16.314Z" }, + { url = "https://files.pythonhosted.org/packages/7f/3b/3a3328a788d4a473889a2d403199932be55b1b0060f4ddd96ee7cdfcad10/greenlet-3.2.4-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:d76383238584e9711e20ebe14db6c88ddcedc1829a9ad31a584389463b5aa504", size = 652169, upload-time = "2025-08-07T13:18:32.861Z" }, + { url = "https://files.pythonhosted.org/packages/ee/43/3cecdc0349359e1a527cbf2e3e28e5f8f06d3343aaf82ca13437a9aa290f/greenlet-3.2.4-cp313-cp313-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:23768528f2911bcd7e475210822ffb5254ed10d71f4028387e5a99b4c6699671", size = 610497, upload-time = "2025-08-07T13:18:31.636Z" }, + { url = "https://files.pythonhosted.org/packages/b8/19/06b6cf5d604e2c382a6f31cafafd6f33d5dea706f4db7bdab184bad2b21d/greenlet-3.2.4-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:00fadb3fedccc447f517ee0d3fd8fe49eae949e1cd0f6a611818f4f6fb7dc83b", size = 1121662, upload-time = "2025-08-07T13:42:41.117Z" }, + { url = "https://files.pythonhosted.org/packages/a2/15/0d5e4e1a66fab130d98168fe984c509249c833c1a3c16806b90f253ce7b9/greenlet-3.2.4-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:d25c5091190f2dc0eaa3f950252122edbbadbb682aa7b1ef2f8af0f8c0afefae", size = 1149210, upload-time = "2025-08-07T13:18:24.072Z" }, + { url = "https://files.pythonhosted.org/packages/0b/55/2321e43595e6801e105fcfdee02b34c0f996eb71e6ddffca6b10b7e1d771/greenlet-3.2.4-cp313-cp313-win_amd64.whl", hash = "sha256:554b03b6e73aaabec3745364d6239e9e012d64c68ccd0b8430c64ccc14939a8b", size = 299685, upload-time = "2025-08-07T13:24:38.824Z" }, + { url = "https://files.pythonhosted.org/packages/22/5c/85273fd7cc388285632b0498dbbab97596e04b154933dfe0f3e68156c68c/greenlet-3.2.4-cp314-cp314-macosx_11_0_universal2.whl", hash = "sha256:49a30d5fda2507ae77be16479bdb62a660fa51b1eb4928b524975b3bde77b3c0", size = 273586, upload-time = "2025-08-07T13:16:08.004Z" }, + { url = "https://files.pythonhosted.org/packages/d1/75/10aeeaa3da9332c2e761e4c50d4c3556c21113ee3f0afa2cf5769946f7a3/greenlet-3.2.4-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:299fd615cd8fc86267b47597123e3f43ad79c9d8a22bebdce535e53550763e2f", size = 686346, upload-time = "2025-08-07T13:42:59.944Z" }, + { url = "https://files.pythonhosted.org/packages/c0/aa/687d6b12ffb505a4447567d1f3abea23bd20e73a5bed63871178e0831b7a/greenlet-3.2.4-cp314-cp314-manylinux2014_ppc64le.manylinux_2_17_ppc64le.whl", hash = "sha256:c17b6b34111ea72fc5a4e4beec9711d2226285f0386ea83477cbb97c30a3f3a5", size = 699218, upload-time = "2025-08-07T13:45:30.969Z" }, + { url = "https://files.pythonhosted.org/packages/dc/8b/29aae55436521f1d6f8ff4e12fb676f3400de7fcf27fccd1d4d17fd8fecd/greenlet-3.2.4-cp314-cp314-manylinux2014_s390x.manylinux_2_17_s390x.whl", hash = "sha256:b4a1870c51720687af7fa3e7cda6d08d801dae660f75a76f3845b642b4da6ee1", size = 694659, upload-time = "2025-08-07T13:53:17.759Z" }, + { url = "https://files.pythonhosted.org/packages/92/2e/ea25914b1ebfde93b6fc4ff46d6864564fba59024e928bdc7de475affc25/greenlet-3.2.4-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:061dc4cf2c34852b052a8620d40f36324554bc192be474b9e9770e8c042fd735", size = 695355, upload-time = "2025-08-07T13:18:34.517Z" }, + { url = "https://files.pythonhosted.org/packages/72/60/fc56c62046ec17f6b0d3060564562c64c862948c9d4bc8aa807cf5bd74f4/greenlet-3.2.4-cp314-cp314-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:44358b9bf66c8576a9f57a590d5f5d6e72fa4228b763d0e43fee6d3b06d3a337", size = 657512, upload-time = "2025-08-07T13:18:33.969Z" }, + { url = "https://files.pythonhosted.org/packages/e3/a5/6ddab2b4c112be95601c13428db1d8b6608a8b6039816f2ba09c346c08fc/greenlet-3.2.4-cp314-cp314-win_amd64.whl", hash = "sha256:e37ab26028f12dbb0ff65f29a8d3d44a765c61e729647bf2ddfbbed621726f01", size = 303425, upload-time = "2025-08-07T13:32:27.59Z" }, +] + +[[package]] +name = "h11" +version = "0.16.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/01/ee/02a2c011bdab74c6fb3c75474d40b3052059d95df7e73351460c8588d963/h11-0.16.0.tar.gz", hash = "sha256:4e35b956cf45792e4caa5885e69fba00bdbc6ffafbfa020300e549b208ee5ff1", size = 101250, upload-time = "2025-04-24T03:35:25.427Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/04/4b/29cac41a4d98d144bf5f6d33995617b185d14b22401f75ca86f384e87ff1/h11-0.16.0-py3-none-any.whl", hash = "sha256:63cf8bbe7522de3bf65932fda1d9c2772064ffb3dae62d55932da54b31cb6c86", size = 37515, upload-time = "2025-04-24T03:35:24.344Z" }, +] + +[[package]] +name = "h2" +version = "4.2.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "hpack" }, + { name = "hyperframe" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/1b/38/d7f80fd13e6582fb8e0df8c9a653dcc02b03ca34f4d72f34869298c5baf8/h2-4.2.0.tar.gz", hash = "sha256:c8a52129695e88b1a0578d8d2cc6842bbd79128ac685463b887ee278126ad01f", size = 2150682, upload-time = "2025-02-02T07:43:51.815Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/d0/9e/984486f2d0a0bd2b024bf4bc1c62688fcafa9e61991f041fb0e2def4a982/h2-4.2.0-py3-none-any.whl", hash = "sha256:479a53ad425bb29af087f3458a61d30780bc818e4ebcf01f0b536ba916462ed0", size = 60957, upload-time = "2025-02-01T11:02:26.481Z" }, +] + +[[package]] +name = "hexbytes" +version = "1.3.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/7f/87/adf4635b4b8c050283d74e6db9a81496063229c9263e6acc1903ab79fbec/hexbytes-1.3.1.tar.gz", hash = "sha256:a657eebebdfe27254336f98d8af6e2236f3f83aed164b87466b6cf6c5f5a4765", size = 8633, upload-time = "2025-05-14T16:45:17.5Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/8d/e0/3b31492b1c89da3c5a846680517871455b30c54738486fc57ac79a5761bd/hexbytes-1.3.1-py3-none-any.whl", hash = "sha256:da01ff24a1a9a2b1881c4b85f0e9f9b0f51b526b379ffa23832ae7899d29c2c7", size = 5074, upload-time = "2025-05-14T16:45:16.179Z" }, +] + +[[package]] +name = "hpack" +version = "4.1.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/2c/48/71de9ed269fdae9c8057e5a4c0aa7402e8bb16f2c6e90b3aa53327b113f8/hpack-4.1.0.tar.gz", hash = "sha256:ec5eca154f7056aa06f196a557655c5b009b382873ac8d1e66e79e87535f1dca", size = 51276, upload-time = "2025-01-22T21:44:58.347Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/07/c6/80c95b1b2b94682a72cbdbfb85b81ae2daffa4291fbfa1b1464502ede10d/hpack-4.1.0-py3-none-any.whl", hash = "sha256:157ac792668d995c657d93111f46b4535ed114f0c9c8d672271bbec7eae1b496", size = 34357, upload-time = "2025-01-22T21:44:56.92Z" }, +] + +[[package]] +name = "httpcore" +version = "1.0.9" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "certifi" }, + { name = "h11" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/06/94/82699a10bca87a5556c9c59b5963f2d039dbd239f25bc2a63907a05a14cb/httpcore-1.0.9.tar.gz", hash = "sha256:6e34463af53fd2ab5d807f399a9b45ea31c3dfa2276f15a2c3f00afff6e176e8", size = 85484, upload-time = "2025-04-24T22:06:22.219Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/7e/f5/f66802a942d491edb555dd61e3a9961140fd64c90bce1eafd741609d334d/httpcore-1.0.9-py3-none-any.whl", hash = "sha256:2d400746a40668fc9dec9810239072b40b4484b640a8c38fd654a024c7a1bf55", size = 78784, upload-time = "2025-04-24T22:06:20.566Z" }, +] + +[[package]] +name = "httpx" +version = "0.28.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "anyio" }, + { name = "certifi" }, + { name = "httpcore" }, + { name = "idna" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/b1/df/48c586a5fe32a0f01324ee087459e112ebb7224f646c0b5023f5e79e9956/httpx-0.28.1.tar.gz", hash = "sha256:75e98c5f16b0f35b567856f597f06ff2270a374470a5c2392242528e3e3e42fc", size = 141406, upload-time = "2024-12-06T15:37:23.222Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/2a/39/e50c7c3a983047577ee07d2a9e53faf5a69493943ec3f6a384bdc792deb2/httpx-0.28.1-py3-none-any.whl", hash = "sha256:d909fcccc110f8c7faf814ca82a9a4d816bc5a6dbfea25d6591d6985b8ba59ad", size = 73517, upload-time = "2024-12-06T15:37:21.509Z" }, +] + +[package.optional-dependencies] +http2 = [ + { name = "h2" }, +] + +[[package]] +name = "httpx-sse" +version = "0.4.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/6e/fa/66bd985dd0b7c109a3bcb89272ee0bfb7e2b4d06309ad7b38ff866734b2a/httpx_sse-0.4.1.tar.gz", hash = "sha256:8f44d34414bc7b21bf3602713005c5df4917884f76072479b21f68befa4ea26e", size = 12998, upload-time = "2025-06-24T13:21:05.71Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/25/0a/6269e3473b09aed2dab8aa1a600c70f31f00ae1349bee30658f7e358a159/httpx_sse-0.4.1-py3-none-any.whl", hash = "sha256:cba42174344c3a5b06f255ce65b350880f962d99ead85e776f23c6618a377a37", size = 8054, upload-time = "2025-06-24T13:21:04.772Z" }, +] + +[[package]] +name = "hyperframe" +version = "6.1.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/02/e7/94f8232d4a74cc99514c13a9f995811485a6903d48e5d952771ef6322e30/hyperframe-6.1.0.tar.gz", hash = "sha256:f630908a00854a7adeabd6382b43923a4c4cd4b821fcb527e6ab9e15382a3b08", size = 26566, upload-time = "2025-01-22T21:41:49.302Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/48/30/47d0bf6072f7252e6521f3447ccfa40b421b6824517f82854703d0f5a98b/hyperframe-6.1.0-py3-none-any.whl", hash = "sha256:b03380493a519fce58ea5af42e4a42317bf9bd425596f7a0835ffce80f1a42e5", size = 13007, upload-time = "2025-01-22T21:41:47.295Z" }, +] + +[[package]] +name = "idna" +version = "3.10" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/f1/70/7703c29685631f5a7590aa73f1f1d3fa9a380e654b86af429e0934a32f7d/idna-3.10.tar.gz", hash = "sha256:12f65c9b470abda6dc35cf8e63cc574b1c52b11df2c86030af0ac09b01b13ea9", size = 190490, upload-time = "2024-09-15T18:07:39.745Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/76/c6/c88e154df9c4e1a2a66ccf0005a88dfb2650c1dffb6f5ce603dfbd452ce3/idna-3.10-py3-none-any.whl", hash = "sha256:946d195a0d259cbba61165e88e65941f16e9b36ea6ddb97f00452bae8b1287d3", size = 70442, upload-time = "2024-09-15T18:07:37.964Z" }, +] + +[[package]] +name = "iniconfig" +version = "2.1.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/f2/97/ebf4da567aa6827c909642694d71c9fcf53e5b504f2d96afea02718862f3/iniconfig-2.1.0.tar.gz", hash = "sha256:3abbd2e30b36733fee78f9c7f7308f2d0050e88f0087fd25c2645f63c773e1c7", size = 4793, upload-time = "2025-03-19T20:09:59.721Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/2c/e1/e6716421ea10d38022b952c159d5161ca1193197fb744506875fbb87ea7b/iniconfig-2.1.0-py3-none-any.whl", hash = "sha256:9deba5723312380e77435581c6bf4935c94cbfab9b1ed33ef8d238ea168eb760", size = 6050, upload-time = "2025-03-19T20:10:01.071Z" }, +] + +[[package]] +name = "intentkit" +version = "0.0.1" +source = { editable = "." } +dependencies = [ + { name = "aiohttp" }, + { name = "aiosqlite" }, + { name = "asyncio" }, + { name = "asyncpg" }, + { name = "aws-secretsmanager-caching" }, + { name = "bip32" }, + { name = "boto3" }, + { name = "botocore" }, + { name = "cdp-sdk" }, + { name = "coinbase-agentkit" }, + { name = "coinbase-agentkit-langchain" }, + { name = "cron-validator" }, + { name = "epyxid" }, + { name = "eth-keys" }, + { name = "eth-utils" }, + { name = "fastapi" }, + { name = "filetype" }, + { name = "httpx" }, + { name = "jsonref" }, + { name = "langchain" }, + { name = "langchain-community" }, + { name = "langchain-core" }, + { name = "langchain-deepseek" }, + { name = "langchain-mcp-adapters" }, + { name = "langchain-openai" }, + { name = "langchain-text-splitters" }, + { name = "langchain-xai" }, + { name = "langgraph" }, + { name = "langgraph-checkpoint" }, + { name = "langgraph-checkpoint-postgres" }, + { name = "langgraph-prebuilt" }, + { name = "langmem" }, + { name = "mypy-boto3-s3" }, + { name = "openai" }, + { name = "pillow" }, + { name = "psycopg" }, + { name = "psycopg-pool" }, + { name = "pydantic" }, + { name = "python-dotenv" }, + { name = "pytz" }, + { name = "pyyaml" }, + { name = "redis" }, + { name = "requests" }, + { name = "requests-oauthlib" }, + { name = "slack-sdk" }, + { name = "sqlalchemy", extra = ["asyncio"] }, + { name = "starlette" }, + { name = "supabase" }, + { name = "tenacity" }, + { name = "tweepy", extra = ["async"] }, + { name = "uvicorn" }, + { name = "web3" }, +] + +[package.dev-dependencies] +dev = [ + { name = "jsonschema" }, + { name = "pytest" }, + { name = "pytest-asyncio" }, + { name = "ruff" }, +] + +[package.metadata] +requires-dist = [ + { name = "aiohttp", specifier = ">=3.11.16" }, + { name = "aiosqlite", specifier = ">=0.21.0" }, + { name = "asyncio", specifier = ">=4.0.0" }, + { name = "asyncpg", specifier = ">=0.30.0" }, + { name = "aws-secretsmanager-caching", specifier = ">=1.1.3" }, + { name = "bip32", specifier = ">=2.0.0" }, + { name = "boto3", specifier = ">=1.37.23,<2.0.0" }, + { name = "botocore", specifier = ">=1.35.97" }, + { name = "cdp-sdk", specifier = ">=1.22.0" }, + { name = "coinbase-agentkit", specifier = ">=0.6.0,<0.7.0" }, + { name = "coinbase-agentkit-langchain", specifier = ">=0.5.0" }, + { name = "cron-validator", specifier = ">=1.0.8,<2.0.0" }, + { name = "epyxid", specifier = ">=0.3.3" }, + { name = "eth-keys", specifier = ">=0.4.0" }, + { name = "eth-utils", specifier = ">=2.1.0" }, + { name = "fastapi", specifier = ">=0.115.8" }, + { name = "filetype", specifier = ">=1.2.0,<2.0.0" }, + { name = "httpx", specifier = ">=0.28.1" }, + { name = "jsonref", specifier = ">=1.1.0" }, + { name = "langchain", specifier = ">=0.3.25,<0.4.0" }, + { name = "langchain-community", specifier = ">=0.3.19" }, + { name = "langchain-core", specifier = ">=0.3.43" }, + { name = "langchain-deepseek", specifier = ">=0.1.4" }, + { name = "langchain-mcp-adapters", specifier = ">=0.0.11" }, + { name = "langchain-openai", specifier = ">=0.3.8" }, + { name = "langchain-text-splitters", specifier = ">=0.3.8" }, + { name = "langchain-xai", specifier = ">=0.2.1" }, + { name = "langgraph", specifier = ">=0.6.1,<0.7.0" }, + { name = "langgraph-checkpoint", specifier = ">=2.0.18" }, + { name = "langgraph-checkpoint-postgres", specifier = ">=2.0.16,<2.0.23" }, + { name = "langgraph-prebuilt", specifier = ">=0.6.1,<0.7.0" }, + { name = "langmem", specifier = ">=0.0.27" }, + { name = "mypy-boto3-s3", specifier = ">=1.37.24,<2.0.0" }, + { name = "openai", specifier = ">=1.59.6" }, + { name = "pillow", specifier = ">=11.1.0,<12.0.0" }, + { name = "psycopg", specifier = ">=3.2.9" }, + { name = "psycopg-pool", specifier = ">=3.2.4" }, + { name = "pydantic", specifier = ">=2.10.6,<2.11.0" }, + { name = "python-dotenv", specifier = ">=1.0.1" }, + { name = "pytz", specifier = ">=2025.1" }, + { name = "pyyaml", specifier = ">=6.0.2" }, + { name = "redis", specifier = ">=5.2.1,<7.0.0" }, + { name = "requests", specifier = ">=2.32.3" }, + { name = "requests-oauthlib", specifier = ">=2.0.0" }, + { name = "slack-sdk", specifier = ">=3.34.0" }, + { name = "sqlalchemy", extras = ["asyncio"], specifier = ">=2.0.37" }, + { name = "starlette", specifier = ">=0.47.1" }, + { name = "supabase", specifier = ">=2.16.0" }, + { name = "tenacity", specifier = ">=9.1.2" }, + { name = "tweepy", extras = ["async"], specifier = ">=4.15.0" }, + { name = "uvicorn", specifier = ">=0.34.0,<1.0.0" }, + { name = "web3", specifier = ">=7.10.0" }, +] + +[package.metadata.requires-dev] +dev = [ + { name = "jsonschema", specifier = ">=4.21.1,<5" }, + { name = "pytest", specifier = ">=7.0.0" }, + { name = "pytest-asyncio", specifier = ">=0.21.0" }, + { name = "ruff", specifier = ">=0.11.9,<0.12" }, +] + +[[package]] +name = "jiter" +version = "0.10.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/ee/9d/ae7ddb4b8ab3fb1b51faf4deb36cb48a4fbbd7cb36bad6a5fca4741306f7/jiter-0.10.0.tar.gz", hash = "sha256:07a7142c38aacc85194391108dc91b5b57093c978a9932bd86a36862759d9500", size = 162759, upload-time = "2025-05-18T19:04:59.73Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/6d/b5/348b3313c58f5fbfb2194eb4d07e46a35748ba6e5b3b3046143f3040bafa/jiter-0.10.0-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:1e274728e4a5345a6dde2d343c8da018b9d4bd4350f5a472fa91f66fda44911b", size = 312262, upload-time = "2025-05-18T19:03:44.637Z" }, + { url = "https://files.pythonhosted.org/packages/9c/4a/6a2397096162b21645162825f058d1709a02965606e537e3304b02742e9b/jiter-0.10.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:7202ae396446c988cb2a5feb33a543ab2165b786ac97f53b59aafb803fef0744", size = 320124, upload-time = "2025-05-18T19:03:46.341Z" }, + { url = "https://files.pythonhosted.org/packages/2a/85/1ce02cade7516b726dd88f59a4ee46914bf79d1676d1228ef2002ed2f1c9/jiter-0.10.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:23ba7722d6748b6920ed02a8f1726fb4b33e0fd2f3f621816a8b486c66410ab2", size = 345330, upload-time = "2025-05-18T19:03:47.596Z" }, + { url = "https://files.pythonhosted.org/packages/75/d0/bb6b4f209a77190ce10ea8d7e50bf3725fc16d3372d0a9f11985a2b23eff/jiter-0.10.0-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:371eab43c0a288537d30e1f0b193bc4eca90439fc08a022dd83e5e07500ed026", size = 369670, upload-time = "2025-05-18T19:03:49.334Z" }, + { url = "https://files.pythonhosted.org/packages/a0/f5/a61787da9b8847a601e6827fbc42ecb12be2c925ced3252c8ffcb56afcaf/jiter-0.10.0-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:6c675736059020365cebc845a820214765162728b51ab1e03a1b7b3abb70f74c", size = 489057, upload-time = "2025-05-18T19:03:50.66Z" }, + { url = "https://files.pythonhosted.org/packages/12/e4/6f906272810a7b21406c760a53aadbe52e99ee070fc5c0cb191e316de30b/jiter-0.10.0-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:0c5867d40ab716e4684858e4887489685968a47e3ba222e44cde6e4a2154f959", size = 389372, upload-time = "2025-05-18T19:03:51.98Z" }, + { url = "https://files.pythonhosted.org/packages/e2/ba/77013b0b8ba904bf3762f11e0129b8928bff7f978a81838dfcc958ad5728/jiter-0.10.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:395bb9a26111b60141757d874d27fdea01b17e8fac958b91c20128ba8f4acc8a", size = 352038, upload-time = "2025-05-18T19:03:53.703Z" }, + { url = "https://files.pythonhosted.org/packages/67/27/c62568e3ccb03368dbcc44a1ef3a423cb86778a4389e995125d3d1aaa0a4/jiter-0.10.0-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:6842184aed5cdb07e0c7e20e5bdcfafe33515ee1741a6835353bb45fe5d1bd95", size = 391538, upload-time = "2025-05-18T19:03:55.046Z" }, + { url = "https://files.pythonhosted.org/packages/c0/72/0d6b7e31fc17a8fdce76164884edef0698ba556b8eb0af9546ae1a06b91d/jiter-0.10.0-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:62755d1bcea9876770d4df713d82606c8c1a3dca88ff39046b85a048566d56ea", size = 523557, upload-time = "2025-05-18T19:03:56.386Z" }, + { url = "https://files.pythonhosted.org/packages/2f/09/bc1661fbbcbeb6244bd2904ff3a06f340aa77a2b94e5a7373fd165960ea3/jiter-0.10.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:533efbce2cacec78d5ba73a41756beff8431dfa1694b6346ce7af3a12c42202b", size = 514202, upload-time = "2025-05-18T19:03:57.675Z" }, + { url = "https://files.pythonhosted.org/packages/1b/84/5a5d5400e9d4d54b8004c9673bbe4403928a00d28529ff35b19e9d176b19/jiter-0.10.0-cp312-cp312-win32.whl", hash = "sha256:8be921f0cadd245e981b964dfbcd6fd4bc4e254cdc069490416dd7a2632ecc01", size = 211781, upload-time = "2025-05-18T19:03:59.025Z" }, + { url = "https://files.pythonhosted.org/packages/9b/52/7ec47455e26f2d6e5f2ea4951a0652c06e5b995c291f723973ae9e724a65/jiter-0.10.0-cp312-cp312-win_amd64.whl", hash = "sha256:a7c7d785ae9dda68c2678532a5a1581347e9c15362ae9f6e68f3fdbfb64f2e49", size = 206176, upload-time = "2025-05-18T19:04:00.305Z" }, + { url = "https://files.pythonhosted.org/packages/2e/b0/279597e7a270e8d22623fea6c5d4eeac328e7d95c236ed51a2b884c54f70/jiter-0.10.0-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:e0588107ec8e11b6f5ef0e0d656fb2803ac6cf94a96b2b9fc675c0e3ab5e8644", size = 311617, upload-time = "2025-05-18T19:04:02.078Z" }, + { url = "https://files.pythonhosted.org/packages/91/e3/0916334936f356d605f54cc164af4060e3e7094364add445a3bc79335d46/jiter-0.10.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:cafc4628b616dc32530c20ee53d71589816cf385dd9449633e910d596b1f5c8a", size = 318947, upload-time = "2025-05-18T19:04:03.347Z" }, + { url = "https://files.pythonhosted.org/packages/6a/8e/fd94e8c02d0e94539b7d669a7ebbd2776e51f329bb2c84d4385e8063a2ad/jiter-0.10.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:520ef6d981172693786a49ff5b09eda72a42e539f14788124a07530f785c3ad6", size = 344618, upload-time = "2025-05-18T19:04:04.709Z" }, + { url = "https://files.pythonhosted.org/packages/6f/b0/f9f0a2ec42c6e9c2e61c327824687f1e2415b767e1089c1d9135f43816bd/jiter-0.10.0-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:554dedfd05937f8fc45d17ebdf298fe7e0c77458232bcb73d9fbbf4c6455f5b3", size = 368829, upload-time = "2025-05-18T19:04:06.912Z" }, + { url = "https://files.pythonhosted.org/packages/e8/57/5bbcd5331910595ad53b9fd0c610392ac68692176f05ae48d6ce5c852967/jiter-0.10.0-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:5bc299da7789deacf95f64052d97f75c16d4fc8c4c214a22bf8d859a4288a1c2", size = 491034, upload-time = "2025-05-18T19:04:08.222Z" }, + { url = "https://files.pythonhosted.org/packages/9b/be/c393df00e6e6e9e623a73551774449f2f23b6ec6a502a3297aeeece2c65a/jiter-0.10.0-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5161e201172de298a8a1baad95eb85db4fb90e902353b1f6a41d64ea64644e25", size = 388529, upload-time = "2025-05-18T19:04:09.566Z" }, + { url = "https://files.pythonhosted.org/packages/42/3e/df2235c54d365434c7f150b986a6e35f41ebdc2f95acea3036d99613025d/jiter-0.10.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2e2227db6ba93cb3e2bf67c87e594adde0609f146344e8207e8730364db27041", size = 350671, upload-time = "2025-05-18T19:04:10.98Z" }, + { url = "https://files.pythonhosted.org/packages/c6/77/71b0b24cbcc28f55ab4dbfe029f9a5b73aeadaba677843fc6dc9ed2b1d0a/jiter-0.10.0-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:15acb267ea5e2c64515574b06a8bf393fbfee6a50eb1673614aa45f4613c0cca", size = 390864, upload-time = "2025-05-18T19:04:12.722Z" }, + { url = "https://files.pythonhosted.org/packages/6a/d3/ef774b6969b9b6178e1d1e7a89a3bd37d241f3d3ec5f8deb37bbd203714a/jiter-0.10.0-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:901b92f2e2947dc6dfcb52fd624453862e16665ea909a08398dde19c0731b7f4", size = 522989, upload-time = "2025-05-18T19:04:14.261Z" }, + { url = "https://files.pythonhosted.org/packages/0c/41/9becdb1d8dd5d854142f45a9d71949ed7e87a8e312b0bede2de849388cb9/jiter-0.10.0-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:d0cb9a125d5a3ec971a094a845eadde2db0de85b33c9f13eb94a0c63d463879e", size = 513495, upload-time = "2025-05-18T19:04:15.603Z" }, + { url = "https://files.pythonhosted.org/packages/9c/36/3468e5a18238bdedae7c4d19461265b5e9b8e288d3f86cd89d00cbb48686/jiter-0.10.0-cp313-cp313-win32.whl", hash = "sha256:48a403277ad1ee208fb930bdf91745e4d2d6e47253eedc96e2559d1e6527006d", size = 211289, upload-time = "2025-05-18T19:04:17.541Z" }, + { url = "https://files.pythonhosted.org/packages/7e/07/1c96b623128bcb913706e294adb5f768fb7baf8db5e1338ce7b4ee8c78ef/jiter-0.10.0-cp313-cp313-win_amd64.whl", hash = "sha256:75f9eb72ecb640619c29bf714e78c9c46c9c4eaafd644bf78577ede459f330d4", size = 205074, upload-time = "2025-05-18T19:04:19.21Z" }, + { url = "https://files.pythonhosted.org/packages/54/46/caa2c1342655f57d8f0f2519774c6d67132205909c65e9aa8255e1d7b4f4/jiter-0.10.0-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:28ed2a4c05a1f32ef0e1d24c2611330219fed727dae01789f4a335617634b1ca", size = 318225, upload-time = "2025-05-18T19:04:20.583Z" }, + { url = "https://files.pythonhosted.org/packages/43/84/c7d44c75767e18946219ba2d703a5a32ab37b0bc21886a97bc6062e4da42/jiter-0.10.0-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:14a4c418b1ec86a195f1ca69da8b23e8926c752b685af665ce30777233dfe070", size = 350235, upload-time = "2025-05-18T19:04:22.363Z" }, + { url = "https://files.pythonhosted.org/packages/01/16/f5a0135ccd968b480daad0e6ab34b0c7c5ba3bc447e5088152696140dcb3/jiter-0.10.0-cp313-cp313t-win_amd64.whl", hash = "sha256:d7bfed2fe1fe0e4dda6ef682cee888ba444b21e7a6553e03252e4feb6cf0adca", size = 207278, upload-time = "2025-05-18T19:04:23.627Z" }, + { url = "https://files.pythonhosted.org/packages/1c/9b/1d646da42c3de6c2188fdaa15bce8ecb22b635904fc68be025e21249ba44/jiter-0.10.0-cp314-cp314-macosx_10_12_x86_64.whl", hash = "sha256:5e9251a5e83fab8d87799d3e1a46cb4b7f2919b895c6f4483629ed2446f66522", size = 310866, upload-time = "2025-05-18T19:04:24.891Z" }, + { url = "https://files.pythonhosted.org/packages/ad/0e/26538b158e8a7c7987e94e7aeb2999e2e82b1f9d2e1f6e9874ddf71ebda0/jiter-0.10.0-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:023aa0204126fe5b87ccbcd75c8a0d0261b9abdbbf46d55e7ae9f8e22424eeb8", size = 318772, upload-time = "2025-05-18T19:04:26.161Z" }, + { url = "https://files.pythonhosted.org/packages/7b/fb/d302893151caa1c2636d6574d213e4b34e31fd077af6050a9c5cbb42f6fb/jiter-0.10.0-cp314-cp314-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3c189c4f1779c05f75fc17c0c1267594ed918996a231593a21a5ca5438445216", size = 344534, upload-time = "2025-05-18T19:04:27.495Z" }, + { url = "https://files.pythonhosted.org/packages/01/d8/5780b64a149d74e347c5128d82176eb1e3241b1391ac07935693466d6219/jiter-0.10.0-cp314-cp314-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:15720084d90d1098ca0229352607cd68256c76991f6b374af96f36920eae13c4", size = 369087, upload-time = "2025-05-18T19:04:28.896Z" }, + { url = "https://files.pythonhosted.org/packages/e8/5b/f235a1437445160e777544f3ade57544daf96ba7e96c1a5b24a6f7ac7004/jiter-0.10.0-cp314-cp314-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e4f2fb68e5f1cfee30e2b2a09549a00683e0fde4c6a2ab88c94072fc33cb7426", size = 490694, upload-time = "2025-05-18T19:04:30.183Z" }, + { url = "https://files.pythonhosted.org/packages/85/a9/9c3d4617caa2ff89cf61b41e83820c27ebb3f7b5fae8a72901e8cd6ff9be/jiter-0.10.0-cp314-cp314-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:ce541693355fc6da424c08b7edf39a2895f58d6ea17d92cc2b168d20907dee12", size = 388992, upload-time = "2025-05-18T19:04:32.028Z" }, + { url = "https://files.pythonhosted.org/packages/68/b1/344fd14049ba5c94526540af7eb661871f9c54d5f5601ff41a959b9a0bbd/jiter-0.10.0-cp314-cp314-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:31c50c40272e189d50006ad5c73883caabb73d4e9748a688b216e85a9a9ca3b9", size = 351723, upload-time = "2025-05-18T19:04:33.467Z" }, + { url = "https://files.pythonhosted.org/packages/41/89/4c0e345041186f82a31aee7b9d4219a910df672b9fef26f129f0cda07a29/jiter-0.10.0-cp314-cp314-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:fa3402a2ff9815960e0372a47b75c76979d74402448509ccd49a275fa983ef8a", size = 392215, upload-time = "2025-05-18T19:04:34.827Z" }, + { url = "https://files.pythonhosted.org/packages/55/58/ee607863e18d3f895feb802154a2177d7e823a7103f000df182e0f718b38/jiter-0.10.0-cp314-cp314-musllinux_1_1_aarch64.whl", hash = "sha256:1956f934dca32d7bb647ea21d06d93ca40868b505c228556d3373cbd255ce853", size = 522762, upload-time = "2025-05-18T19:04:36.19Z" }, + { url = "https://files.pythonhosted.org/packages/15/d0/9123fb41825490d16929e73c212de9a42913d68324a8ce3c8476cae7ac9d/jiter-0.10.0-cp314-cp314-musllinux_1_1_x86_64.whl", hash = "sha256:fcedb049bdfc555e261d6f65a6abe1d5ad68825b7202ccb9692636c70fcced86", size = 513427, upload-time = "2025-05-18T19:04:37.544Z" }, + { url = "https://files.pythonhosted.org/packages/d8/b3/2bd02071c5a2430d0b70403a34411fc519c2f227da7b03da9ba6a956f931/jiter-0.10.0-cp314-cp314-win32.whl", hash = "sha256:ac509f7eccca54b2a29daeb516fb95b6f0bd0d0d8084efaf8ed5dfc7b9f0b357", size = 210127, upload-time = "2025-05-18T19:04:38.837Z" }, + { url = "https://files.pythonhosted.org/packages/03/0c/5fe86614ea050c3ecd728ab4035534387cd41e7c1855ef6c031f1ca93e3f/jiter-0.10.0-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:5ed975b83a2b8639356151cef5c0d597c68376fc4922b45d0eb384ac058cfa00", size = 318527, upload-time = "2025-05-18T19:04:40.612Z" }, + { url = "https://files.pythonhosted.org/packages/b3/4a/4175a563579e884192ba6e81725fc0448b042024419be8d83aa8a80a3f44/jiter-0.10.0-cp314-cp314t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3aa96f2abba33dc77f79b4cf791840230375f9534e5fac927ccceb58c5e604a5", size = 354213, upload-time = "2025-05-18T19:04:41.894Z" }, +] + +[[package]] +name = "jmespath" +version = "1.0.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/00/2a/e867e8531cf3e36b41201936b7fa7ba7b5702dbef42922193f05c8976cd6/jmespath-1.0.1.tar.gz", hash = "sha256:90261b206d6defd58fdd5e85f478bf633a2901798906be2ad389150c5c60edbe", size = 25843, upload-time = "2022-06-17T18:00:12.224Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/31/b4/b9b800c45527aadd64d5b442f9b932b00648617eb5d63d2c7a6587b7cafc/jmespath-1.0.1-py3-none-any.whl", hash = "sha256:02e2e4cc71b5bcab88332eebf907519190dd9e6e82107fa7f83b1003a6252980", size = 20256, upload-time = "2022-06-17T18:00:10.251Z" }, +] + +[[package]] +name = "jsonalias" +version = "0.1.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/ec/45/ee7e17002cb7f3264f755ff6a1a72c55d1830e07808d643167d2a2277c4f/jsonalias-0.1.1.tar.gz", hash = "sha256:64f04d935397d579fc94509e1fcb6212f2d081235d9d6395bd10baedf760a769", size = 1095, upload-time = "2022-10-28T22:57:56.224Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/41/ed/05aebce69f78c104feff2ffcdd5a6f9d668a208aba3a8bf56e3750809fd8/jsonalias-0.1.1-py3-none-any.whl", hash = "sha256:a56d2888e6397812c606156504e861e8ec00e188005af149f003c787db3d3f18", size = 1312, upload-time = "2022-10-28T22:57:54.763Z" }, +] + +[[package]] +name = "jsonpatch" +version = "1.33" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "jsonpointer" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/42/78/18813351fe5d63acad16aec57f94ec2b70a09e53ca98145589e185423873/jsonpatch-1.33.tar.gz", hash = "sha256:9fcd4009c41e6d12348b4a0ff2563ba56a2923a7dfee731d004e212e1ee5030c", size = 21699, upload-time = "2023-06-26T12:07:29.144Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/73/07/02e16ed01e04a374e644b575638ec7987ae846d25ad97bcc9945a3ee4b0e/jsonpatch-1.33-py2.py3-none-any.whl", hash = "sha256:0ae28c0cd062bbd8b8ecc26d7d164fbbea9652a1a3693f3b956c1eae5145dade", size = 12898, upload-time = "2023-06-16T21:01:28.466Z" }, +] + +[[package]] +name = "jsonpointer" +version = "3.0.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/6a/0a/eebeb1fa92507ea94016a2a790b93c2ae41a7e18778f85471dc54475ed25/jsonpointer-3.0.0.tar.gz", hash = "sha256:2b2d729f2091522d61c3b31f82e11870f60b68f43fbc705cb76bf4b832af59ef", size = 9114, upload-time = "2024-06-10T19:24:42.462Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/71/92/5e77f98553e9e75130c78900d000368476aed74276eb8ae8796f65f00918/jsonpointer-3.0.0-py2.py3-none-any.whl", hash = "sha256:13e088adc14fca8b6aa8177c044e12701e6ad4b28ff10e65f2267a90109c9942", size = 7595, upload-time = "2024-06-10T19:24:40.698Z" }, +] + +[[package]] +name = "jsonref" +version = "1.1.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/aa/0d/c1f3277e90ccdb50d33ed5ba1ec5b3f0a242ed8c1b1a85d3afeb68464dca/jsonref-1.1.0.tar.gz", hash = "sha256:32fe8e1d85af0fdefbebce950af85590b22b60f9e95443176adbde4e1ecea552", size = 8814, upload-time = "2023-01-16T16:10:04.455Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/0c/ec/e1db9922bceb168197a558a2b8c03a7963f1afe93517ddd3cf99f202f996/jsonref-1.1.0-py3-none-any.whl", hash = "sha256:590dc7773df6c21cbf948b5dac07a72a251db28b0238ceecce0a2abfa8ec30a9", size = 9425, upload-time = "2023-01-16T16:10:02.255Z" }, +] + +[[package]] +name = "jsonschema" +version = "4.25.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "attrs" }, + { name = "jsonschema-specifications" }, + { name = "referencing" }, + { name = "rpds-py" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/d5/00/a297a868e9d0784450faa7365c2172a7d6110c763e30ba861867c32ae6a9/jsonschema-4.25.0.tar.gz", hash = "sha256:e63acf5c11762c0e6672ffb61482bdf57f0876684d8d249c0fe2d730d48bc55f", size = 356830, upload-time = "2025-07-18T15:39:45.11Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/fe/54/c86cd8e011fe98803d7e382fd67c0df5ceab8d2b7ad8c5a81524f791551c/jsonschema-4.25.0-py3-none-any.whl", hash = "sha256:24c2e8da302de79c8b9382fee3e76b355e44d2a4364bb207159ce10b517bd716", size = 89184, upload-time = "2025-07-18T15:39:42.956Z" }, +] + +[[package]] +name = "jsonschema-specifications" +version = "2025.4.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "referencing" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/bf/ce/46fbd9c8119cfc3581ee5643ea49464d168028cfb5caff5fc0596d0cf914/jsonschema_specifications-2025.4.1.tar.gz", hash = "sha256:630159c9f4dbea161a6a2205c3011cc4f18ff381b189fff48bb39b9bf26ae608", size = 15513, upload-time = "2025-04-23T12:34:07.418Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/01/0e/b27cdbaccf30b890c40ed1da9fd4a3593a5cf94dae54fb34f8a4b74fcd3f/jsonschema_specifications-2025.4.1-py3-none-any.whl", hash = "sha256:4653bffbd6584f7de83a67e0d620ef16900b390ddc7939d56684d6c81e33f1af", size = 18437, upload-time = "2025-04-23T12:34:05.422Z" }, +] + +[[package]] +name = "lagrange" +version = "3.0.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/07/9d/4b6470fd6769b0943fbda9b30e2068bb8d9940be2977b1e80a184d527fa6/lagrange-3.0.1.tar.gz", hash = "sha256:272f352a676679ee318b0b302054f667f23afb73d10063cd3926c612527e09f1", size = 6894, upload-time = "2025-01-01T01:33:14.999Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/3e/d8/f1c3ff60a8b3e114cfb3e9eed75140d2a3e1e766791cfe2f210a5c736d61/lagrange-3.0.1-py3-none-any.whl", hash = "sha256:d473913d901f0c257456c505e4a94450f2e4a2f147460a68ad0cfb9ea33a6d0a", size = 6905, upload-time = "2025-01-01T01:33:11.031Z" }, +] + +[[package]] +name = "langchain" +version = "0.3.27" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "langchain-core" }, + { name = "langchain-text-splitters" }, + { name = "langsmith" }, + { name = "pydantic" }, + { name = "pyyaml" }, + { name = "requests" }, + { name = "sqlalchemy" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/83/f6/f4f7f3a56626fe07e2bb330feb61254dbdf06c506e6b59a536a337da51cf/langchain-0.3.27.tar.gz", hash = "sha256:aa6f1e6274ff055d0fd36254176770f356ed0a8994297d1df47df341953cec62", size = 10233809, upload-time = "2025-07-24T14:42:32.959Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/f6/d5/4861816a95b2f6993f1360cfb605aacb015506ee2090433a71de9cca8477/langchain-0.3.27-py3-none-any.whl", hash = "sha256:7b20c4f338826acb148d885b20a73a16e410ede9ee4f19bb02011852d5f98798", size = 1018194, upload-time = "2025-07-24T14:42:30.23Z" }, +] + +[[package]] +name = "langchain-anthropic" +version = "0.3.18" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "anthropic" }, + { name = "langchain-core" }, + { name = "pydantic" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/67/2c/7da4fc946ee9b6b1fc629c165a15413f73705be9cac2e477736a22eeaae0/langchain_anthropic-0.3.18.tar.gz", hash = "sha256:f18970ae58fc4d79c8431dd67f8ab777de5e6d2f92285c8c9af1999cd126fb0a", size = 276719, upload-time = "2025-07-28T21:14:25.964Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/2b/e2/980619565df5045cec91eb978019a4ea66247ccd1f06589fb2a5f576acc5/langchain_anthropic-0.3.18-py3-none-any.whl", hash = "sha256:1be6ece317f08b3d780671fd4425b1dd05fd291a751e3debe3d4704bcf785082", size = 29817, upload-time = "2025-07-28T21:14:24.556Z" }, +] + +[[package]] +name = "langchain-community" +version = "0.3.27" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "aiohttp" }, + { name = "dataclasses-json" }, + { name = "httpx-sse" }, + { name = "langchain" }, + { name = "langchain-core" }, + { name = "langsmith" }, + { name = "numpy" }, + { name = "pydantic-settings" }, + { name = "pyyaml" }, + { name = "requests" }, + { name = "sqlalchemy" }, + { name = "tenacity" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/5c/76/200494f6de488217a196c4369e665d26b94c8c3642d46e2fd62f9daf0a3a/langchain_community-0.3.27.tar.gz", hash = "sha256:e1037c3b9da0c6d10bf06e838b034eb741e016515c79ef8f3f16e53ead33d882", size = 33237737, upload-time = "2025-07-02T18:47:02.329Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/c8/bc/f8c7dae8321d37ed39ac9d7896617c4203248240a4835b136e3724b3bb62/langchain_community-0.3.27-py3-none-any.whl", hash = "sha256:581f97b795f9633da738ea95da9cb78f8879b538090c9b7a68c0aed49c828f0d", size = 2530442, upload-time = "2025-07-02T18:47:00.246Z" }, +] + +[[package]] +name = "langchain-core" +version = "0.3.74" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "jsonpatch" }, + { name = "langsmith" }, + { name = "packaging" }, + { name = "pydantic" }, + { name = "pyyaml" }, + { name = "tenacity" }, + { name = "typing-extensions" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/f1/c6/5d755a0f1f4857abbe5ea6f5907ed0e2b5df52bf4dde0a0fd768290e3084/langchain_core-0.3.74.tar.gz", hash = "sha256:ff604441aeade942fbcc0a3860a592daba7671345230c2078ba2eb5f82b6ba76", size = 569553, upload-time = "2025-08-07T20:47:05.094Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/4d/26/545283681ac0379d31c7ad0bac5f195e1982092d76c65ca048db9e3cec0e/langchain_core-0.3.74-py3-none-any.whl", hash = "sha256:088338b5bc2f6a66892f9afc777992c24ee3188f41cbc603d09181e34a228ce7", size = 443453, upload-time = "2025-08-07T20:47:03.853Z" }, +] + +[[package]] +name = "langchain-deepseek" +version = "0.1.4" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "langchain-core" }, + { name = "langchain-openai" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/8a/64/15fe061165574b3ba80011d96cb3f428f9e6f5631cd76058b028333023db/langchain_deepseek-0.1.4.tar.gz", hash = "sha256:dc105138aee4fce03badd0521e69d5508b37f5c087d92b3e8481ffb8f9563d33", size = 8937, upload-time = "2025-07-22T17:37:42.798Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/39/d3/0da6f3f5548e1a771d0500e17da723996f8ba1398bcef7198a2ecbca6e1b/langchain_deepseek-0.1.4-py3-none-any.whl", hash = "sha256:9ce3dbfc7a40f221657ffe31e8623ea6e397f6c90de2a58d38204ac63e8f41ff", size = 7440, upload-time = "2025-07-22T17:37:41.618Z" }, +] + +[[package]] +name = "langchain-mcp-adapters" +version = "0.1.7" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "langchain-core" }, + { name = "mcp" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/01/5f/f6d2b724f7100bd53e4413a72dd7b77a61e5284549c9de63ba043e63d163/langchain_mcp_adapters-0.1.7.tar.gz", hash = "sha256:b5d0ab520211d8c12cfc4df83fd6335f8197a3557ee7ca4f14e3380846610535", size = 20023, upload-time = "2025-06-05T20:18:39.425Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/01/05/b6cf2f4651c9429b374c8837117ecc0619bfb8b6e106ce8390f2c932a293/langchain_mcp_adapters-0.1.7-py3-none-any.whl", hash = "sha256:6b3ded5f51b311e67cefa87b500f776c454caf2269d2eae4b2338ecec19a9258", size = 13318, upload-time = "2025-06-05T20:18:38.477Z" }, +] + +[[package]] +name = "langchain-openai" +version = "0.3.30" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "langchain-core" }, + { name = "openai" }, + { name = "tiktoken" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/ad/21/6b2024cdd907812d33d31d42c05baa6a3fc6b341d76f7a982730b6985501/langchain_openai-0.3.30.tar.gz", hash = "sha256:90df37509b2dcf5e057f491326fcbf78cf2a71caff5103a5a7de560320171842", size = 766426, upload-time = "2025-08-12T17:05:55.587Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/23/36/cd370071243ae321c22bfafbf75fef1601dd22d0baeeedb71835954ed0ad/langchain_openai-0.3.30-py3-none-any.whl", hash = "sha256:280f1f31004393228e3f75ff8353b1aae86bbc282abc7890a05beb5f43b89923", size = 74362, upload-time = "2025-08-12T17:05:54.415Z" }, +] + +[[package]] +name = "langchain-text-splitters" +version = "0.3.9" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "langchain-core" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/91/52/d43ad77acae169210cc476cbc1e4ab37a701017c950211a11ab500fe7d7e/langchain_text_splitters-0.3.9.tar.gz", hash = "sha256:7cd1e5a3aaf609979583eeca2eb34177622570b8fa8f586a605c6b1c34e7ebdb", size = 45260, upload-time = "2025-07-24T14:38:45.14Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/e2/52/7638394b88bc15083fd2c3752a843784d9d2d110d68fed6437c8607fb749/langchain_text_splitters-0.3.9-py3-none-any.whl", hash = "sha256:cee0bb816211584ea79cc79927317c358543f40404bcfdd69e69ba3ccde54401", size = 33314, upload-time = "2025-07-24T14:38:43.953Z" }, +] + +[[package]] +name = "langchain-xai" +version = "0.2.5" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "aiohttp" }, + { name = "langchain-core" }, + { name = "langchain-openai" }, + { name = "requests" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/3e/6c/f491dc55c7f91fe8196b59b6111788ee62b2d4efd629230788caa08e0fd6/langchain_xai-0.2.5.tar.gz", hash = "sha256:e94b17d4928aaa26998acf7cff537bb6b3c72468177e00ed7de7747dc9b2ecdc", size = 11086, upload-time = "2025-07-22T17:22:21.238Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/bf/50/fc6ae91c9b40ca8259d501c0754d91136ac98e2502c67b4a55b78a426fba/langchain_xai-0.2.5-py3-none-any.whl", hash = "sha256:8d351393eddc0ad024a82a1120b815e6e3ccfe83390cbc65a2fce81786584795", size = 9297, upload-time = "2025-07-22T17:22:20.173Z" }, +] + +[[package]] +name = "langgraph" +version = "0.6.5" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "langchain-core" }, + { name = "langgraph-checkpoint" }, + { name = "langgraph-prebuilt" }, + { name = "langgraph-sdk" }, + { name = "pydantic" }, + { name = "xxhash" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/24/ea/ce85312450e98a01d0974da404fa7572f2a90ec01013f8d2dfd94b714b67/langgraph-0.6.5.tar.gz", hash = "sha256:59639927997457fe04f802b39f0e7179cedf8db1cf85f33db764de02ae23c2f0", size = 455213, upload-time = "2025-08-13T23:42:35.063Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/b4/09/93774886995cccb9110280887f7b489e5b951bbc928f078a9788254290be/langgraph-0.6.5-py3-none-any.whl", hash = "sha256:042b2ee7af6f308659520eea5ba6def50f2d109475691666045850d0661b1082", size = 153154, upload-time = "2025-08-13T23:42:33.263Z" }, +] + +[[package]] +name = "langgraph-checkpoint" +version = "2.1.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "langchain-core" }, + { name = "ormsgpack" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/73/3e/d00eb2b56c3846a0cabd2e5aa71c17a95f882d4f799a6ffe96a19b55eba9/langgraph_checkpoint-2.1.1.tar.gz", hash = "sha256:72038c0f9e22260cb9bff1f3ebe5eb06d940b7ee5c1e4765019269d4f21cf92d", size = 136256, upload-time = "2025-07-17T13:07:52.411Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/4c/dd/64686797b0927fb18b290044be12ae9d4df01670dce6bb2498d5ab65cb24/langgraph_checkpoint-2.1.1-py3-none-any.whl", hash = "sha256:5a779134fd28134a9a83d078be4450bbf0e0c79fdf5e992549658899e6fc5ea7", size = 43925, upload-time = "2025-07-17T13:07:51.023Z" }, +] + +[[package]] +name = "langgraph-checkpoint-postgres" +version = "2.0.22" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "langgraph-checkpoint" }, + { name = "orjson" }, + { name = "psycopg" }, + { name = "psycopg-pool" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/56/10/bfab9d031c0eeff9785e28ebcb79107b0a4c57ad3e0f21935679935f77ee/langgraph_checkpoint_postgres-2.0.22.tar.gz", hash = "sha256:4b58346f9d7d44994fc8141310bbd3429fe0e17a18c4a606bf3d7ff673325391", size = 118024, upload-time = "2025-07-10T22:45:24.941Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/2b/26/9594505e5698f40e9a76d66c06f412c48a119c3ca41493c432d66f7e1e44/langgraph_checkpoint_postgres-2.0.22-py3-none-any.whl", hash = "sha256:81623697050ea755abd3cab936e60ae0203c0c492675b16d4d4608da8b586bd5", size = 40339, upload-time = "2025-07-10T22:45:23.737Z" }, +] + +[[package]] +name = "langgraph-prebuilt" +version = "0.6.4" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "langchain-core" }, + { name = "langgraph-checkpoint" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/d6/21/9b198d11732101ee8cdf30af98d0b4f11254c768de15173e57f5260fd14b/langgraph_prebuilt-0.6.4.tar.gz", hash = "sha256:e9e53b906ee5df46541d1dc5303239e815d3ec551e52bb03dd6463acc79ec28f", size = 125695, upload-time = "2025-08-07T18:17:57.333Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/0a/7f/973b0d9729d9693d6e5b4bc5f3ae41138d194cb7b16b0ed230020beeb13a/langgraph_prebuilt-0.6.4-py3-none-any.whl", hash = "sha256:819f31d88b84cb2729ff1b79db2d51e9506b8fb7aaacfc0d359d4fe16e717344", size = 28025, upload-time = "2025-08-07T18:17:56.493Z" }, +] + +[[package]] +name = "langgraph-sdk" +version = "0.2.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "httpx" }, + { name = "orjson" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/2a/3e/3dc45dc7682c9940db9edaf8773d2e157397c5bd6881f6806808afd8731e/langgraph_sdk-0.2.0.tar.gz", hash = "sha256:cd8b5f6595e5571be5cbffd04cf936978ab8f5d1005517c99715947ef871e246", size = 72510, upload-time = "2025-07-22T17:31:06.745Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/a5/03/a8ab0e8ea74be6058cb48bb1d85485b5c65d6ea183e3ee1aa8ca1ac73b3e/langgraph_sdk-0.2.0-py3-none-any.whl", hash = "sha256:150722264f225c4d47bbe7394676be102fdbf04c4400a0dd1bd41a70c6430cc7", size = 50569, upload-time = "2025-07-22T17:31:04.582Z" }, +] + +[[package]] +name = "langmem" +version = "0.0.29" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "langchain" }, + { name = "langchain-anthropic" }, + { name = "langchain-core" }, + { name = "langchain-openai" }, + { name = "langgraph" }, + { name = "langgraph-checkpoint" }, + { name = "langsmith" }, + { name = "trustcall" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/ec/75/a58f56a1f635003919f1c5c356a4247d8136d9183b63b9f52599aa7a8710/langmem-0.0.29.tar.gz", hash = "sha256:9a4a7bfcbde87f02494caf6add55c0cdd49c5a1a6396e19fe12a56ba6fb96267", size = 206315, upload-time = "2025-07-28T19:55:33.437Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/f5/6a/ea17974afc18dbf278bbfaaa1331e3dfef979cf42bfae1dc695b5e4ea750/langmem-0.0.29-py3-none-any.whl", hash = "sha256:3e0b56d3e4077e96dab45616e2800c9550bf61c1e1eee4c119ec704518037d8c", size = 67127, upload-time = "2025-07-28T19:55:32.279Z" }, +] + +[[package]] +name = "langsmith" +version = "0.4.14" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "httpx" }, + { name = "orjson", marker = "platform_python_implementation != 'PyPy'" }, + { name = "packaging" }, + { name = "pydantic" }, + { name = "requests" }, + { name = "requests-toolbelt" }, + { name = "zstandard" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/85/b0/1def3c6d12eb5e412213e39f1ba4ac64a47ec3102cf42a3a1ff86af1402d/langsmith-0.4.14.tar.gz", hash = "sha256:4d29c7a9c85b20ba813ab9c855407bccdf5eb4f397f512ffa89959b2a2cb83ed", size = 921872, upload-time = "2025-08-12T20:39:43.704Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/9e/08/3f0fb3e2f7cc6fd91c4d06d7abc6607425a66973bee79d04018bac41dd4f/langsmith-0.4.14-py3-none-any.whl", hash = "sha256:b6d070ac425196947d2a98126fb0e35f3b8c001a2e6e5b7049dd1c56f0767d0b", size = 373249, upload-time = "2025-08-12T20:39:41.992Z" }, +] + +[[package]] +name = "marshmallow" +version = "3.26.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "packaging" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/ab/5e/5e53d26b42ab75491cda89b871dab9e97c840bf12c63ec58a1919710cd06/marshmallow-3.26.1.tar.gz", hash = "sha256:e6d8affb6cb61d39d26402096dc0aee12d5a26d490a121f118d2e81dc0719dc6", size = 221825, upload-time = "2025-02-03T15:32:25.093Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/34/75/51952c7b2d3873b44a0028b1bd26a25078c18f92f256608e8d1dc61b39fd/marshmallow-3.26.1-py3-none-any.whl", hash = "sha256:3350409f20a70a7e4e11a27661187b77cdcaeb20abca41c1454fe33636bea09c", size = 50878, upload-time = "2025-02-03T15:32:22.295Z" }, +] + +[[package]] +name = "mcp" +version = "1.12.4" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "anyio" }, + { name = "httpx" }, + { name = "httpx-sse" }, + { name = "jsonschema" }, + { name = "pydantic" }, + { name = "pydantic-settings" }, + { name = "python-multipart" }, + { name = "pywin32", marker = "sys_platform == 'win32'" }, + { name = "sse-starlette" }, + { name = "starlette" }, + { name = "uvicorn", marker = "sys_platform != 'emscripten'" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/31/88/f6cb7e7c260cd4b4ce375f2b1614b33ce401f63af0f49f7141a2e9bf0a45/mcp-1.12.4.tar.gz", hash = "sha256:0765585e9a3a5916a3c3ab8659330e493adc7bd8b2ca6120c2d7a0c43e034ca5", size = 431148, upload-time = "2025-08-07T20:31:18.082Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/ad/68/316cbc54b7163fa22571dcf42c9cc46562aae0a021b974e0a8141e897200/mcp-1.12.4-py3-none-any.whl", hash = "sha256:7aa884648969fab8e78b89399d59a683202972e12e6bc9a1c88ce7eda7743789", size = 160145, upload-time = "2025-08-07T20:31:15.69Z" }, +] + +[[package]] +name = "multidict" +version = "6.6.4" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/69/7f/0652e6ed47ab288e3756ea9c0df8b14950781184d4bd7883f4d87dd41245/multidict-6.6.4.tar.gz", hash = "sha256:d2d4e4787672911b48350df02ed3fa3fffdc2f2e8ca06dd6afdf34189b76a9dd", size = 101843, upload-time = "2025-08-11T12:08:48.217Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/05/f6/512ffd8fd8b37fb2680e5ac35d788f1d71bbaf37789d21a820bdc441e565/multidict-6.6.4-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:0ffb87be160942d56d7b87b0fdf098e81ed565add09eaa1294268c7f3caac4c8", size = 76516, upload-time = "2025-08-11T12:06:53.393Z" }, + { url = "https://files.pythonhosted.org/packages/99/58/45c3e75deb8855c36bd66cc1658007589662ba584dbf423d01df478dd1c5/multidict-6.6.4-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:d191de6cbab2aff5de6c5723101705fd044b3e4c7cfd587a1929b5028b9714b3", size = 45394, upload-time = "2025-08-11T12:06:54.555Z" }, + { url = "https://files.pythonhosted.org/packages/fd/ca/e8c4472a93a26e4507c0b8e1f0762c0d8a32de1328ef72fd704ef9cc5447/multidict-6.6.4-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:38a0956dd92d918ad5feff3db8fcb4a5eb7dba114da917e1a88475619781b57b", size = 43591, upload-time = "2025-08-11T12:06:55.672Z" }, + { url = "https://files.pythonhosted.org/packages/05/51/edf414f4df058574a7265034d04c935aa84a89e79ce90fcf4df211f47b16/multidict-6.6.4-cp312-cp312-manylinux1_i686.manylinux2014_i686.manylinux_2_17_i686.manylinux_2_5_i686.whl", hash = "sha256:6865f6d3b7900ae020b495d599fcf3765653bc927951c1abb959017f81ae8287", size = 237215, upload-time = "2025-08-11T12:06:57.213Z" }, + { url = "https://files.pythonhosted.org/packages/c8/45/8b3d6dbad8cf3252553cc41abea09ad527b33ce47a5e199072620b296902/multidict-6.6.4-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:0a2088c126b6f72db6c9212ad827d0ba088c01d951cee25e758c450da732c138", size = 258299, upload-time = "2025-08-11T12:06:58.946Z" }, + { url = "https://files.pythonhosted.org/packages/3c/e8/8ca2e9a9f5a435fc6db40438a55730a4bf4956b554e487fa1b9ae920f825/multidict-6.6.4-cp312-cp312-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:0f37bed7319b848097085d7d48116f545985db988e2256b2e6f00563a3416ee6", size = 242357, upload-time = "2025-08-11T12:07:00.301Z" }, + { url = "https://files.pythonhosted.org/packages/0f/84/80c77c99df05a75c28490b2af8f7cba2a12621186e0a8b0865d8e745c104/multidict-6.6.4-cp312-cp312-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:01368e3c94032ba6ca0b78e7ccb099643466cf24f8dc8eefcfdc0571d56e58f9", size = 268369, upload-time = "2025-08-11T12:07:01.638Z" }, + { url = "https://files.pythonhosted.org/packages/0d/e9/920bfa46c27b05fb3e1ad85121fd49f441492dca2449c5bcfe42e4565d8a/multidict-6.6.4-cp312-cp312-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:8fe323540c255db0bffee79ad7f048c909f2ab0edb87a597e1c17da6a54e493c", size = 269341, upload-time = "2025-08-11T12:07:02.943Z" }, + { url = "https://files.pythonhosted.org/packages/af/65/753a2d8b05daf496f4a9c367fe844e90a1b2cac78e2be2c844200d10cc4c/multidict-6.6.4-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:b8eb3025f17b0a4c3cd08cda49acf312a19ad6e8a4edd9dbd591e6506d999402", size = 256100, upload-time = "2025-08-11T12:07:04.564Z" }, + { url = "https://files.pythonhosted.org/packages/09/54/655be13ae324212bf0bc15d665a4e34844f34c206f78801be42f7a0a8aaa/multidict-6.6.4-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:bbc14f0365534d35a06970d6a83478b249752e922d662dc24d489af1aa0d1be7", size = 253584, upload-time = "2025-08-11T12:07:05.914Z" }, + { url = "https://files.pythonhosted.org/packages/5c/74/ab2039ecc05264b5cec73eb018ce417af3ebb384ae9c0e9ed42cb33f8151/multidict-6.6.4-cp312-cp312-musllinux_1_2_armv7l.whl", hash = "sha256:75aa52fba2d96bf972e85451b99d8e19cc37ce26fd016f6d4aa60da9ab2b005f", size = 251018, upload-time = "2025-08-11T12:07:08.301Z" }, + { url = "https://files.pythonhosted.org/packages/af/0a/ccbb244ac848e56c6427f2392741c06302bbfba49c0042f1eb3c5b606497/multidict-6.6.4-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:4fefd4a815e362d4f011919d97d7b4a1e566f1dde83dc4ad8cfb5b41de1df68d", size = 251477, upload-time = "2025-08-11T12:07:10.248Z" }, + { url = "https://files.pythonhosted.org/packages/0e/b0/0ed49bba775b135937f52fe13922bc64a7eaf0a3ead84a36e8e4e446e096/multidict-6.6.4-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:db9801fe021f59a5b375ab778973127ca0ac52429a26e2fd86aa9508f4d26eb7", size = 263575, upload-time = "2025-08-11T12:07:11.928Z" }, + { url = "https://files.pythonhosted.org/packages/3e/d9/7fb85a85e14de2e44dfb6a24f03c41e2af8697a6df83daddb0e9b7569f73/multidict-6.6.4-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:a650629970fa21ac1fb06ba25dabfc5b8a2054fcbf6ae97c758aa956b8dba802", size = 259649, upload-time = "2025-08-11T12:07:13.244Z" }, + { url = "https://files.pythonhosted.org/packages/03/9e/b3a459bcf9b6e74fa461a5222a10ff9b544cb1cd52fd482fb1b75ecda2a2/multidict-6.6.4-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:452ff5da78d4720d7516a3a2abd804957532dd69296cb77319c193e3ffb87e24", size = 251505, upload-time = "2025-08-11T12:07:14.57Z" }, + { url = "https://files.pythonhosted.org/packages/86/a2/8022f78f041dfe6d71e364001a5cf987c30edfc83c8a5fb7a3f0974cff39/multidict-6.6.4-cp312-cp312-win32.whl", hash = "sha256:8c2fcb12136530ed19572bbba61b407f655e3953ba669b96a35036a11a485793", size = 41888, upload-time = "2025-08-11T12:07:15.904Z" }, + { url = "https://files.pythonhosted.org/packages/c7/eb/d88b1780d43a56db2cba24289fa744a9d216c1a8546a0dc3956563fd53ea/multidict-6.6.4-cp312-cp312-win_amd64.whl", hash = "sha256:047d9425860a8c9544fed1b9584f0c8bcd31bcde9568b047c5e567a1025ecd6e", size = 46072, upload-time = "2025-08-11T12:07:17.045Z" }, + { url = "https://files.pythonhosted.org/packages/9f/16/b929320bf5750e2d9d4931835a4c638a19d2494a5b519caaaa7492ebe105/multidict-6.6.4-cp312-cp312-win_arm64.whl", hash = "sha256:14754eb72feaa1e8ae528468f24250dd997b8e2188c3d2f593f9eba259e4b364", size = 43222, upload-time = "2025-08-11T12:07:18.328Z" }, + { url = "https://files.pythonhosted.org/packages/3a/5d/e1db626f64f60008320aab00fbe4f23fc3300d75892a3381275b3d284580/multidict-6.6.4-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:f46a6e8597f9bd71b31cc708195d42b634c8527fecbcf93febf1052cacc1f16e", size = 75848, upload-time = "2025-08-11T12:07:19.912Z" }, + { url = "https://files.pythonhosted.org/packages/4c/aa/8b6f548d839b6c13887253af4e29c939af22a18591bfb5d0ee6f1931dae8/multidict-6.6.4-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:22e38b2bc176c5eb9c0a0e379f9d188ae4cd8b28c0f53b52bce7ab0a9e534657", size = 45060, upload-time = "2025-08-11T12:07:21.163Z" }, + { url = "https://files.pythonhosted.org/packages/eb/c6/f5e97e5d99a729bc2aa58eb3ebfa9f1e56a9b517cc38c60537c81834a73f/multidict-6.6.4-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:5df8afd26f162da59e218ac0eefaa01b01b2e6cd606cffa46608f699539246da", size = 43269, upload-time = "2025-08-11T12:07:22.392Z" }, + { url = "https://files.pythonhosted.org/packages/dc/31/d54eb0c62516776f36fe67f84a732f97e0b0e12f98d5685bebcc6d396910/multidict-6.6.4-cp313-cp313-manylinux1_i686.manylinux2014_i686.manylinux_2_17_i686.manylinux_2_5_i686.whl", hash = "sha256:49517449b58d043023720aa58e62b2f74ce9b28f740a0b5d33971149553d72aa", size = 237158, upload-time = "2025-08-11T12:07:23.636Z" }, + { url = "https://files.pythonhosted.org/packages/c4/1c/8a10c1c25b23156e63b12165a929d8eb49a6ed769fdbefb06e6f07c1e50d/multidict-6.6.4-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:ae9408439537c5afdca05edd128a63f56a62680f4b3c234301055d7a2000220f", size = 257076, upload-time = "2025-08-11T12:07:25.049Z" }, + { url = "https://files.pythonhosted.org/packages/ad/86/90e20b5771d6805a119e483fd3d1e8393e745a11511aebca41f0da38c3e2/multidict-6.6.4-cp313-cp313-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:87a32d20759dc52a9e850fe1061b6e41ab28e2998d44168a8a341b99ded1dba0", size = 240694, upload-time = "2025-08-11T12:07:26.458Z" }, + { url = "https://files.pythonhosted.org/packages/e7/49/484d3e6b535bc0555b52a0a26ba86e4d8d03fd5587d4936dc59ba7583221/multidict-6.6.4-cp313-cp313-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:52e3c8d43cdfff587ceedce9deb25e6ae77daba560b626e97a56ddcad3756879", size = 266350, upload-time = "2025-08-11T12:07:27.94Z" }, + { url = "https://files.pythonhosted.org/packages/bf/b4/aa4c5c379b11895083d50021e229e90c408d7d875471cb3abf721e4670d6/multidict-6.6.4-cp313-cp313-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:ad8850921d3a8d8ff6fbef790e773cecfc260bbfa0566998980d3fa8f520bc4a", size = 267250, upload-time = "2025-08-11T12:07:29.303Z" }, + { url = "https://files.pythonhosted.org/packages/80/e5/5e22c5bf96a64bdd43518b1834c6d95a4922cc2066b7d8e467dae9b6cee6/multidict-6.6.4-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:497a2954adc25c08daff36f795077f63ad33e13f19bfff7736e72c785391534f", size = 254900, upload-time = "2025-08-11T12:07:30.764Z" }, + { url = "https://files.pythonhosted.org/packages/17/38/58b27fed927c07035abc02befacab42491e7388ca105e087e6e0215ead64/multidict-6.6.4-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:024ce601f92d780ca1617ad4be5ac15b501cc2414970ffa2bb2bbc2bd5a68fa5", size = 252355, upload-time = "2025-08-11T12:07:32.205Z" }, + { url = "https://files.pythonhosted.org/packages/d0/a1/dad75d23a90c29c02b5d6f3d7c10ab36c3197613be5d07ec49c7791e186c/multidict-6.6.4-cp313-cp313-musllinux_1_2_armv7l.whl", hash = "sha256:a693fc5ed9bdd1c9e898013e0da4dcc640de7963a371c0bd458e50e046bf6438", size = 250061, upload-time = "2025-08-11T12:07:33.623Z" }, + { url = "https://files.pythonhosted.org/packages/b8/1a/ac2216b61c7f116edab6dc3378cca6c70dc019c9a457ff0d754067c58b20/multidict-6.6.4-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:190766dac95aab54cae5b152a56520fd99298f32a1266d66d27fdd1b5ac00f4e", size = 249675, upload-time = "2025-08-11T12:07:34.958Z" }, + { url = "https://files.pythonhosted.org/packages/d4/79/1916af833b800d13883e452e8e0977c065c4ee3ab7a26941fbfdebc11895/multidict-6.6.4-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:34d8f2a5ffdceab9dcd97c7a016deb2308531d5f0fced2bb0c9e1df45b3363d7", size = 261247, upload-time = "2025-08-11T12:07:36.588Z" }, + { url = "https://files.pythonhosted.org/packages/c5/65/d1f84fe08ac44a5fc7391cbc20a7cedc433ea616b266284413fd86062f8c/multidict-6.6.4-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:59e8d40ab1f5a8597abcef00d04845155a5693b5da00d2c93dbe88f2050f2812", size = 257960, upload-time = "2025-08-11T12:07:39.735Z" }, + { url = "https://files.pythonhosted.org/packages/13/b5/29ec78057d377b195ac2c5248c773703a6b602e132a763e20ec0457e7440/multidict-6.6.4-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:467fe64138cfac771f0e949b938c2e1ada2b5af22f39692aa9258715e9ea613a", size = 250078, upload-time = "2025-08-11T12:07:41.525Z" }, + { url = "https://files.pythonhosted.org/packages/c4/0e/7e79d38f70a872cae32e29b0d77024bef7834b0afb406ddae6558d9e2414/multidict-6.6.4-cp313-cp313-win32.whl", hash = "sha256:14616a30fe6d0a48d0a48d1a633ab3b8bec4cf293aac65f32ed116f620adfd69", size = 41708, upload-time = "2025-08-11T12:07:43.405Z" }, + { url = "https://files.pythonhosted.org/packages/9d/34/746696dffff742e97cd6a23da953e55d0ea51fa601fa2ff387b3edcfaa2c/multidict-6.6.4-cp313-cp313-win_amd64.whl", hash = "sha256:40cd05eaeb39e2bc8939451f033e57feaa2ac99e07dbca8afe2be450a4a3b6cf", size = 45912, upload-time = "2025-08-11T12:07:45.082Z" }, + { url = "https://files.pythonhosted.org/packages/c7/87/3bac136181e271e29170d8d71929cdeddeb77f3e8b6a0c08da3a8e9da114/multidict-6.6.4-cp313-cp313-win_arm64.whl", hash = "sha256:f6eb37d511bfae9e13e82cb4d1af36b91150466f24d9b2b8a9785816deb16605", size = 43076, upload-time = "2025-08-11T12:07:46.746Z" }, + { url = "https://files.pythonhosted.org/packages/64/94/0a8e63e36c049b571c9ae41ee301ada29c3fee9643d9c2548d7d558a1d99/multidict-6.6.4-cp313-cp313t-macosx_10_13_universal2.whl", hash = "sha256:6c84378acd4f37d1b507dfa0d459b449e2321b3ba5f2338f9b085cf7a7ba95eb", size = 82812, upload-time = "2025-08-11T12:07:48.402Z" }, + { url = "https://files.pythonhosted.org/packages/25/1a/be8e369dfcd260d2070a67e65dd3990dd635cbd735b98da31e00ea84cd4e/multidict-6.6.4-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:0e0558693063c75f3d952abf645c78f3c5dfdd825a41d8c4d8156fc0b0da6e7e", size = 48313, upload-time = "2025-08-11T12:07:49.679Z" }, + { url = "https://files.pythonhosted.org/packages/26/5a/dd4ade298674b2f9a7b06a32c94ffbc0497354df8285f27317c66433ce3b/multidict-6.6.4-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:3f8e2384cb83ebd23fd07e9eada8ba64afc4c759cd94817433ab8c81ee4b403f", size = 46777, upload-time = "2025-08-11T12:07:51.318Z" }, + { url = "https://files.pythonhosted.org/packages/89/db/98aa28bc7e071bfba611ac2ae803c24e96dd3a452b4118c587d3d872c64c/multidict-6.6.4-cp313-cp313t-manylinux1_i686.manylinux2014_i686.manylinux_2_17_i686.manylinux_2_5_i686.whl", hash = "sha256:f996b87b420995a9174b2a7c1a8daf7db4750be6848b03eb5e639674f7963773", size = 229321, upload-time = "2025-08-11T12:07:52.965Z" }, + { url = "https://files.pythonhosted.org/packages/c7/bc/01ddda2a73dd9d167bd85d0e8ef4293836a8f82b786c63fb1a429bc3e678/multidict-6.6.4-cp313-cp313t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:cc356250cffd6e78416cf5b40dc6a74f1edf3be8e834cf8862d9ed5265cf9b0e", size = 249954, upload-time = "2025-08-11T12:07:54.423Z" }, + { url = "https://files.pythonhosted.org/packages/06/78/6b7c0f020f9aa0acf66d0ab4eb9f08375bac9a50ff5e3edb1c4ccd59eafc/multidict-6.6.4-cp313-cp313t-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:dadf95aa862714ea468a49ad1e09fe00fcc9ec67d122f6596a8d40caf6cec7d0", size = 228612, upload-time = "2025-08-11T12:07:55.914Z" }, + { url = "https://files.pythonhosted.org/packages/00/44/3faa416f89b2d5d76e9d447296a81521e1c832ad6e40b92f990697b43192/multidict-6.6.4-cp313-cp313t-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:7dd57515bebffd8ebd714d101d4c434063322e4fe24042e90ced41f18b6d3395", size = 257528, upload-time = "2025-08-11T12:07:57.371Z" }, + { url = "https://files.pythonhosted.org/packages/05/5f/77c03b89af0fcb16f018f668207768191fb9dcfb5e3361a5e706a11db2c9/multidict-6.6.4-cp313-cp313t-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:967af5f238ebc2eb1da4e77af5492219fbd9b4b812347da39a7b5f5c72c0fa45", size = 256329, upload-time = "2025-08-11T12:07:58.844Z" }, + { url = "https://files.pythonhosted.org/packages/cf/e9/ed750a2a9afb4f8dc6f13dc5b67b514832101b95714f1211cd42e0aafc26/multidict-6.6.4-cp313-cp313t-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:2a4c6875c37aae9794308ec43e3530e4aa0d36579ce38d89979bbf89582002bb", size = 247928, upload-time = "2025-08-11T12:08:01.037Z" }, + { url = "https://files.pythonhosted.org/packages/1f/b5/e0571bc13cda277db7e6e8a532791d4403dacc9850006cb66d2556e649c0/multidict-6.6.4-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:7f683a551e92bdb7fac545b9c6f9fa2aebdeefa61d607510b3533286fcab67f5", size = 245228, upload-time = "2025-08-11T12:08:02.96Z" }, + { url = "https://files.pythonhosted.org/packages/f3/a3/69a84b0eccb9824491f06368f5b86e72e4af54c3067c37c39099b6687109/multidict-6.6.4-cp313-cp313t-musllinux_1_2_armv7l.whl", hash = "sha256:3ba5aaf600edaf2a868a391779f7a85d93bed147854925f34edd24cc70a3e141", size = 235869, upload-time = "2025-08-11T12:08:04.746Z" }, + { url = "https://files.pythonhosted.org/packages/a9/9d/28802e8f9121a6a0804fa009debf4e753d0a59969ea9f70be5f5fdfcb18f/multidict-6.6.4-cp313-cp313t-musllinux_1_2_i686.whl", hash = "sha256:580b643b7fd2c295d83cad90d78419081f53fd532d1f1eb67ceb7060f61cff0d", size = 243446, upload-time = "2025-08-11T12:08:06.332Z" }, + { url = "https://files.pythonhosted.org/packages/38/ea/6c98add069b4878c1d66428a5f5149ddb6d32b1f9836a826ac764b9940be/multidict-6.6.4-cp313-cp313t-musllinux_1_2_ppc64le.whl", hash = "sha256:37b7187197da6af3ee0b044dbc9625afd0c885f2800815b228a0e70f9a7f473d", size = 252299, upload-time = "2025-08-11T12:08:07.931Z" }, + { url = "https://files.pythonhosted.org/packages/3a/09/8fe02d204473e14c0af3affd50af9078839dfca1742f025cca765435d6b4/multidict-6.6.4-cp313-cp313t-musllinux_1_2_s390x.whl", hash = "sha256:e1b93790ed0bc26feb72e2f08299691ceb6da5e9e14a0d13cc74f1869af327a0", size = 246926, upload-time = "2025-08-11T12:08:09.467Z" }, + { url = "https://files.pythonhosted.org/packages/37/3d/7b1e10d774a6df5175ecd3c92bff069e77bed9ec2a927fdd4ff5fe182f67/multidict-6.6.4-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:a506a77ddee1efcca81ecbeae27ade3e09cdf21a8ae854d766c2bb4f14053f92", size = 243383, upload-time = "2025-08-11T12:08:10.981Z" }, + { url = "https://files.pythonhosted.org/packages/50/b0/a6fae46071b645ae98786ab738447de1ef53742eaad949f27e960864bb49/multidict-6.6.4-cp313-cp313t-win32.whl", hash = "sha256:f93b2b2279883d1d0a9e1bd01f312d6fc315c5e4c1f09e112e4736e2f650bc4e", size = 47775, upload-time = "2025-08-11T12:08:12.439Z" }, + { url = "https://files.pythonhosted.org/packages/b2/0a/2436550b1520091af0600dff547913cb2d66fbac27a8c33bc1b1bccd8d98/multidict-6.6.4-cp313-cp313t-win_amd64.whl", hash = "sha256:6d46a180acdf6e87cc41dc15d8f5c2986e1e8739dc25dbb7dac826731ef381a4", size = 53100, upload-time = "2025-08-11T12:08:13.823Z" }, + { url = "https://files.pythonhosted.org/packages/97/ea/43ac51faff934086db9c072a94d327d71b7d8b40cd5dcb47311330929ef0/multidict-6.6.4-cp313-cp313t-win_arm64.whl", hash = "sha256:756989334015e3335d087a27331659820d53ba432befdef6a718398b0a8493ad", size = 45501, upload-time = "2025-08-11T12:08:15.173Z" }, + { url = "https://files.pythonhosted.org/packages/fd/69/b547032297c7e63ba2af494edba695d781af8a0c6e89e4d06cf848b21d80/multidict-6.6.4-py3-none-any.whl", hash = "sha256:27d8f8e125c07cb954e54d75d04905a9bba8a439c1d84aca94949d4d03d8601c", size = 12313, upload-time = "2025-08-11T12:08:46.891Z" }, +] + +[[package]] +name = "mypy-boto3-s3" +version = "1.40.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/06/d7/b2100702d2f200fdb3468e419c729790bd8543ee0af6f6d63d8dfdab4e28/mypy_boto3_s3-1.40.0.tar.gz", hash = "sha256:99a4a27f04d62fe0b31032f274f2e19889fa66424413617a9416873c48567f1d", size = 75924, upload-time = "2025-07-31T19:50:01.979Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/43/4f/4d32cd202d8c8c7e11e44dd288f66b8985e6ee4402b9a0891b7b94ff6cc6/mypy_boto3_s3-1.40.0-py3-none-any.whl", hash = "sha256:5736b7780d57a156312d8d136462c207671d0236b0355704b5754496bb712bc8", size = 82710, upload-time = "2025-07-31T19:49:59.713Z" }, +] + +[[package]] +name = "mypy-extensions" +version = "1.1.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/a2/6e/371856a3fb9d31ca8dac321cda606860fa4548858c0cc45d9d1d4ca2628b/mypy_extensions-1.1.0.tar.gz", hash = "sha256:52e68efc3284861e772bbcd66823fde5ae21fd2fdb51c62a211403730b916558", size = 6343, upload-time = "2025-04-22T14:54:24.164Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/79/7b/2c79738432f5c924bef5071f933bcc9efd0473bac3b4aa584a6f7c1c8df8/mypy_extensions-1.1.0-py3-none-any.whl", hash = "sha256:1be4cccdb0f2482337c4743e60421de3a356cd97508abadd57d47403e94f5505", size = 4963, upload-time = "2025-04-22T14:54:22.983Z" }, +] + +[[package]] +name = "nest-asyncio" +version = "1.6.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/83/f8/51569ac65d696c8ecbee95938f89d4abf00f47d58d48f6fbabfe8f0baefe/nest_asyncio-1.6.0.tar.gz", hash = "sha256:6f172d5449aca15afd6c646851f4e31e02c598d553a667e38cafa997cfec55fe", size = 7418, upload-time = "2024-01-21T14:25:19.227Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/a0/c4/c2971a3ba4c6103a3d10c4b0f24f461ddc027f0f09763220cf35ca1401b3/nest_asyncio-1.6.0-py3-none-any.whl", hash = "sha256:87af6efd6b5e897c81050477ef65c62e2b2f35d51703cae01aff2905b1852e1c", size = 5195, upload-time = "2024-01-21T14:25:17.223Z" }, +] + +[[package]] +name = "nilql" +version = "0.0.0a13" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "bcl" }, + { name = "lagrange" }, + { name = "pailliers" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/3f/67/59f4b6ceac7c7719e4830a8f017d7d289b48da20dc2791f55ba3d2968bbb/nilql-0.0.0a13.tar.gz", hash = "sha256:cb0011ff704001c83209a3185d89a8b0a16cec8089551469b5c19d52659dd359", size = 19329, upload-time = "2025-04-12T05:08:18.383Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/13/b0/8b8641ddd3d40aaee0fccb3a167d7ee31f0021c93c93c0a19ddc50942d9a/nilql-0.0.0a13-py3-none-any.whl", hash = "sha256:f369a0530340ef890d9916d748147c0c54eb8a1b9f79dd6f3124c546ec39d9f3", size = 13826, upload-time = "2025-04-12T05:08:17.043Z" }, +] + +[[package]] +name = "numpy" +version = "2.3.2" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/37/7d/3fec4199c5ffb892bed55cff901e4f39a58c81df9c44c280499e92cad264/numpy-2.3.2.tar.gz", hash = "sha256:e0486a11ec30cdecb53f184d496d1c6a20786c81e55e41640270130056f8ee48", size = 20489306, upload-time = "2025-07-24T21:32:07.553Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/00/6d/745dd1c1c5c284d17725e5c802ca4d45cfc6803519d777f087b71c9f4069/numpy-2.3.2-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:bc3186bea41fae9d8e90c2b4fb5f0a1f5a690682da79b92574d63f56b529080b", size = 20956420, upload-time = "2025-07-24T20:28:18.002Z" }, + { url = "https://files.pythonhosted.org/packages/bc/96/e7b533ea5740641dd62b07a790af5d9d8fec36000b8e2d0472bd7574105f/numpy-2.3.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:2f4f0215edb189048a3c03bd5b19345bdfa7b45a7a6f72ae5945d2a28272727f", size = 14184660, upload-time = "2025-07-24T20:28:39.522Z" }, + { url = "https://files.pythonhosted.org/packages/2b/53/102c6122db45a62aa20d1b18c9986f67e6b97e0d6fbc1ae13e3e4c84430c/numpy-2.3.2-cp312-cp312-macosx_14_0_arm64.whl", hash = "sha256:8b1224a734cd509f70816455c3cffe13a4f599b1bf7130f913ba0e2c0b2006c0", size = 5113382, upload-time = "2025-07-24T20:28:48.544Z" }, + { url = "https://files.pythonhosted.org/packages/2b/21/376257efcbf63e624250717e82b4fae93d60178f09eb03ed766dbb48ec9c/numpy-2.3.2-cp312-cp312-macosx_14_0_x86_64.whl", hash = "sha256:3dcf02866b977a38ba3ec10215220609ab9667378a9e2150615673f3ffd6c73b", size = 6647258, upload-time = "2025-07-24T20:28:59.104Z" }, + { url = "https://files.pythonhosted.org/packages/91/ba/f4ebf257f08affa464fe6036e13f2bf9d4642a40228781dc1235da81be9f/numpy-2.3.2-cp312-cp312-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:572d5512df5470f50ada8d1972c5f1082d9a0b7aa5944db8084077570cf98370", size = 14281409, upload-time = "2025-07-24T20:40:30.298Z" }, + { url = "https://files.pythonhosted.org/packages/59/ef/f96536f1df42c668cbacb727a8c6da7afc9c05ece6d558927fb1722693e1/numpy-2.3.2-cp312-cp312-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:8145dd6d10df13c559d1e4314df29695613575183fa2e2d11fac4c208c8a1f73", size = 16641317, upload-time = "2025-07-24T20:40:56.625Z" }, + { url = "https://files.pythonhosted.org/packages/f6/a7/af813a7b4f9a42f498dde8a4c6fcbff8100eed00182cc91dbaf095645f38/numpy-2.3.2-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:103ea7063fa624af04a791c39f97070bf93b96d7af7eb23530cd087dc8dbe9dc", size = 16056262, upload-time = "2025-07-24T20:41:20.797Z" }, + { url = "https://files.pythonhosted.org/packages/8b/5d/41c4ef8404caaa7f05ed1cfb06afe16a25895260eacbd29b4d84dff2920b/numpy-2.3.2-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:fc927d7f289d14f5e037be917539620603294454130b6de200091e23d27dc9be", size = 18579342, upload-time = "2025-07-24T20:41:50.753Z" }, + { url = "https://files.pythonhosted.org/packages/a1/4f/9950e44c5a11636f4a3af6e825ec23003475cc9a466edb7a759ed3ea63bd/numpy-2.3.2-cp312-cp312-win32.whl", hash = "sha256:d95f59afe7f808c103be692175008bab926b59309ade3e6d25009e9a171f7036", size = 6320610, upload-time = "2025-07-24T20:42:01.551Z" }, + { url = "https://files.pythonhosted.org/packages/7c/2f/244643a5ce54a94f0a9a2ab578189c061e4a87c002e037b0829dd77293b6/numpy-2.3.2-cp312-cp312-win_amd64.whl", hash = "sha256:9e196ade2400c0c737d93465327d1ae7c06c7cb8a1756121ebf54b06ca183c7f", size = 12786292, upload-time = "2025-07-24T20:42:20.738Z" }, + { url = "https://files.pythonhosted.org/packages/54/cd/7b5f49d5d78db7badab22d8323c1b6ae458fbf86c4fdfa194ab3cd4eb39b/numpy-2.3.2-cp312-cp312-win_arm64.whl", hash = "sha256:ee807923782faaf60d0d7331f5e86da7d5e3079e28b291973c545476c2b00d07", size = 10194071, upload-time = "2025-07-24T20:42:36.657Z" }, + { url = "https://files.pythonhosted.org/packages/1c/c0/c6bb172c916b00700ed3bf71cb56175fd1f7dbecebf8353545d0b5519f6c/numpy-2.3.2-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:c8d9727f5316a256425892b043736d63e89ed15bbfe6556c5ff4d9d4448ff3b3", size = 20949074, upload-time = "2025-07-24T20:43:07.813Z" }, + { url = "https://files.pythonhosted.org/packages/20/4e/c116466d22acaf4573e58421c956c6076dc526e24a6be0903219775d862e/numpy-2.3.2-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:efc81393f25f14d11c9d161e46e6ee348637c0a1e8a54bf9dedc472a3fae993b", size = 14177311, upload-time = "2025-07-24T20:43:29.335Z" }, + { url = "https://files.pythonhosted.org/packages/78/45/d4698c182895af189c463fc91d70805d455a227261d950e4e0f1310c2550/numpy-2.3.2-cp313-cp313-macosx_14_0_arm64.whl", hash = "sha256:dd937f088a2df683cbb79dda9a772b62a3e5a8a7e76690612c2737f38c6ef1b6", size = 5106022, upload-time = "2025-07-24T20:43:37.999Z" }, + { url = "https://files.pythonhosted.org/packages/9f/76/3e6880fef4420179309dba72a8c11f6166c431cf6dee54c577af8906f914/numpy-2.3.2-cp313-cp313-macosx_14_0_x86_64.whl", hash = "sha256:11e58218c0c46c80509186e460d79fbdc9ca1eb8d8aee39d8f2dc768eb781089", size = 6640135, upload-time = "2025-07-24T20:43:49.28Z" }, + { url = "https://files.pythonhosted.org/packages/34/fa/87ff7f25b3c4ce9085a62554460b7db686fef1e0207e8977795c7b7d7ba1/numpy-2.3.2-cp313-cp313-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:5ad4ebcb683a1f99f4f392cc522ee20a18b2bb12a2c1c42c3d48d5a1adc9d3d2", size = 14278147, upload-time = "2025-07-24T20:44:10.328Z" }, + { url = "https://files.pythonhosted.org/packages/1d/0f/571b2c7a3833ae419fe69ff7b479a78d313581785203cc70a8db90121b9a/numpy-2.3.2-cp313-cp313-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:938065908d1d869c7d75d8ec45f735a034771c6ea07088867f713d1cd3bbbe4f", size = 16635989, upload-time = "2025-07-24T20:44:34.88Z" }, + { url = "https://files.pythonhosted.org/packages/24/5a/84ae8dca9c9a4c592fe11340b36a86ffa9fd3e40513198daf8a97839345c/numpy-2.3.2-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:66459dccc65d8ec98cc7df61307b64bf9e08101f9598755d42d8ae65d9a7a6ee", size = 16053052, upload-time = "2025-07-24T20:44:58.872Z" }, + { url = "https://files.pythonhosted.org/packages/57/7c/e5725d99a9133b9813fcf148d3f858df98511686e853169dbaf63aec6097/numpy-2.3.2-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:a7af9ed2aa9ec5950daf05bb11abc4076a108bd3c7db9aa7251d5f107079b6a6", size = 18577955, upload-time = "2025-07-24T20:45:26.714Z" }, + { url = "https://files.pythonhosted.org/packages/ae/11/7c546fcf42145f29b71e4d6f429e96d8d68e5a7ba1830b2e68d7418f0bbd/numpy-2.3.2-cp313-cp313-win32.whl", hash = "sha256:906a30249315f9c8e17b085cc5f87d3f369b35fedd0051d4a84686967bdbbd0b", size = 6311843, upload-time = "2025-07-24T20:49:24.444Z" }, + { url = "https://files.pythonhosted.org/packages/aa/6f/a428fd1cb7ed39b4280d057720fed5121b0d7754fd2a9768640160f5517b/numpy-2.3.2-cp313-cp313-win_amd64.whl", hash = "sha256:c63d95dc9d67b676e9108fe0d2182987ccb0f11933c1e8959f42fa0da8d4fa56", size = 12782876, upload-time = "2025-07-24T20:49:43.227Z" }, + { url = "https://files.pythonhosted.org/packages/65/85/4ea455c9040a12595fb6c43f2c217257c7b52dd0ba332c6a6c1d28b289fe/numpy-2.3.2-cp313-cp313-win_arm64.whl", hash = "sha256:b05a89f2fb84d21235f93de47129dd4f11c16f64c87c33f5e284e6a3a54e43f2", size = 10192786, upload-time = "2025-07-24T20:49:59.443Z" }, + { url = "https://files.pythonhosted.org/packages/80/23/8278f40282d10c3f258ec3ff1b103d4994bcad78b0cba9208317f6bb73da/numpy-2.3.2-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:4e6ecfeddfa83b02318f4d84acf15fbdbf9ded18e46989a15a8b6995dfbf85ab", size = 21047395, upload-time = "2025-07-24T20:45:58.821Z" }, + { url = "https://files.pythonhosted.org/packages/1f/2d/624f2ce4a5df52628b4ccd16a4f9437b37c35f4f8a50d00e962aae6efd7a/numpy-2.3.2-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:508b0eada3eded10a3b55725b40806a4b855961040180028f52580c4729916a2", size = 14300374, upload-time = "2025-07-24T20:46:20.207Z" }, + { url = "https://files.pythonhosted.org/packages/f6/62/ff1e512cdbb829b80a6bd08318a58698867bca0ca2499d101b4af063ee97/numpy-2.3.2-cp313-cp313t-macosx_14_0_arm64.whl", hash = "sha256:754d6755d9a7588bdc6ac47dc4ee97867271b17cee39cb87aef079574366db0a", size = 5228864, upload-time = "2025-07-24T20:46:30.58Z" }, + { url = "https://files.pythonhosted.org/packages/7d/8e/74bc18078fff03192d4032cfa99d5a5ca937807136d6f5790ce07ca53515/numpy-2.3.2-cp313-cp313t-macosx_14_0_x86_64.whl", hash = "sha256:a9f66e7d2b2d7712410d3bc5684149040ef5f19856f20277cd17ea83e5006286", size = 6737533, upload-time = "2025-07-24T20:46:46.111Z" }, + { url = "https://files.pythonhosted.org/packages/19/ea/0731efe2c9073ccca5698ef6a8c3667c4cf4eea53fcdcd0b50140aba03bc/numpy-2.3.2-cp313-cp313t-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:de6ea4e5a65d5a90c7d286ddff2b87f3f4ad61faa3db8dabe936b34c2275b6f8", size = 14352007, upload-time = "2025-07-24T20:47:07.1Z" }, + { url = "https://files.pythonhosted.org/packages/cf/90/36be0865f16dfed20f4bc7f75235b963d5939707d4b591f086777412ff7b/numpy-2.3.2-cp313-cp313t-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:a3ef07ec8cbc8fc9e369c8dcd52019510c12da4de81367d8b20bc692aa07573a", size = 16701914, upload-time = "2025-07-24T20:47:32.459Z" }, + { url = "https://files.pythonhosted.org/packages/94/30/06cd055e24cb6c38e5989a9e747042b4e723535758e6153f11afea88c01b/numpy-2.3.2-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:27c9f90e7481275c7800dc9c24b7cc40ace3fdb970ae4d21eaff983a32f70c91", size = 16132708, upload-time = "2025-07-24T20:47:58.129Z" }, + { url = "https://files.pythonhosted.org/packages/9a/14/ecede608ea73e58267fd7cb78f42341b3b37ba576e778a1a06baffbe585c/numpy-2.3.2-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:07b62978075b67eee4065b166d000d457c82a1efe726cce608b9db9dd66a73a5", size = 18651678, upload-time = "2025-07-24T20:48:25.402Z" }, + { url = "https://files.pythonhosted.org/packages/40/f3/2fe6066b8d07c3685509bc24d56386534c008b462a488b7f503ba82b8923/numpy-2.3.2-cp313-cp313t-win32.whl", hash = "sha256:c771cfac34a4f2c0de8e8c97312d07d64fd8f8ed45bc9f5726a7e947270152b5", size = 6441832, upload-time = "2025-07-24T20:48:37.181Z" }, + { url = "https://files.pythonhosted.org/packages/0b/ba/0937d66d05204d8f28630c9c60bc3eda68824abde4cf756c4d6aad03b0c6/numpy-2.3.2-cp313-cp313t-win_amd64.whl", hash = "sha256:72dbebb2dcc8305c431b2836bcc66af967df91be793d63a24e3d9b741374c450", size = 12927049, upload-time = "2025-07-24T20:48:56.24Z" }, + { url = "https://files.pythonhosted.org/packages/e9/ed/13542dd59c104d5e654dfa2ac282c199ba64846a74c2c4bcdbc3a0f75df1/numpy-2.3.2-cp313-cp313t-win_arm64.whl", hash = "sha256:72c6df2267e926a6d5286b0a6d556ebe49eae261062059317837fda12ddf0c1a", size = 10262935, upload-time = "2025-07-24T20:49:13.136Z" }, + { url = "https://files.pythonhosted.org/packages/c9/7c/7659048aaf498f7611b783e000c7268fcc4dcf0ce21cd10aad7b2e8f9591/numpy-2.3.2-cp314-cp314-macosx_10_13_x86_64.whl", hash = "sha256:448a66d052d0cf14ce9865d159bfc403282c9bc7bb2a31b03cc18b651eca8b1a", size = 20950906, upload-time = "2025-07-24T20:50:30.346Z" }, + { url = "https://files.pythonhosted.org/packages/80/db/984bea9d4ddf7112a04cfdfb22b1050af5757864cfffe8e09e44b7f11a10/numpy-2.3.2-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:546aaf78e81b4081b2eba1d105c3b34064783027a06b3ab20b6eba21fb64132b", size = 14185607, upload-time = "2025-07-24T20:50:51.923Z" }, + { url = "https://files.pythonhosted.org/packages/e4/76/b3d6f414f4eca568f469ac112a3b510938d892bc5a6c190cb883af080b77/numpy-2.3.2-cp314-cp314-macosx_14_0_arm64.whl", hash = "sha256:87c930d52f45df092f7578889711a0768094debf73cfcde105e2d66954358125", size = 5114110, upload-time = "2025-07-24T20:51:01.041Z" }, + { url = "https://files.pythonhosted.org/packages/9e/d2/6f5e6826abd6bca52392ed88fe44a4b52aacb60567ac3bc86c67834c3a56/numpy-2.3.2-cp314-cp314-macosx_14_0_x86_64.whl", hash = "sha256:8dc082ea901a62edb8f59713c6a7e28a85daddcb67454c839de57656478f5b19", size = 6642050, upload-time = "2025-07-24T20:51:11.64Z" }, + { url = "https://files.pythonhosted.org/packages/c4/43/f12b2ade99199e39c73ad182f103f9d9791f48d885c600c8e05927865baf/numpy-2.3.2-cp314-cp314-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:af58de8745f7fa9ca1c0c7c943616c6fe28e75d0c81f5c295810e3c83b5be92f", size = 14296292, upload-time = "2025-07-24T20:51:33.488Z" }, + { url = "https://files.pythonhosted.org/packages/5d/f9/77c07d94bf110a916b17210fac38680ed8734c236bfed9982fd8524a7b47/numpy-2.3.2-cp314-cp314-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:fed5527c4cf10f16c6d0b6bee1f89958bccb0ad2522c8cadc2efd318bcd545f5", size = 16638913, upload-time = "2025-07-24T20:51:58.517Z" }, + { url = "https://files.pythonhosted.org/packages/9b/d1/9d9f2c8ea399cc05cfff8a7437453bd4e7d894373a93cdc46361bbb49a7d/numpy-2.3.2-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:095737ed986e00393ec18ec0b21b47c22889ae4b0cd2d5e88342e08b01141f58", size = 16071180, upload-time = "2025-07-24T20:52:22.827Z" }, + { url = "https://files.pythonhosted.org/packages/4c/41/82e2c68aff2a0c9bf315e47d61951099fed65d8cb2c8d9dc388cb87e947e/numpy-2.3.2-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:b5e40e80299607f597e1a8a247ff8d71d79c5b52baa11cc1cce30aa92d2da6e0", size = 18576809, upload-time = "2025-07-24T20:52:51.015Z" }, + { url = "https://files.pythonhosted.org/packages/14/14/4b4fd3efb0837ed252d0f583c5c35a75121038a8c4e065f2c259be06d2d8/numpy-2.3.2-cp314-cp314-win32.whl", hash = "sha256:7d6e390423cc1f76e1b8108c9b6889d20a7a1f59d9a60cac4a050fa734d6c1e2", size = 6366410, upload-time = "2025-07-24T20:56:44.949Z" }, + { url = "https://files.pythonhosted.org/packages/11/9e/b4c24a6b8467b61aced5c8dc7dcfce23621baa2e17f661edb2444a418040/numpy-2.3.2-cp314-cp314-win_amd64.whl", hash = "sha256:b9d0878b21e3918d76d2209c924ebb272340da1fb51abc00f986c258cd5e957b", size = 12918821, upload-time = "2025-07-24T20:57:06.479Z" }, + { url = "https://files.pythonhosted.org/packages/0e/0f/0dc44007c70b1007c1cef86b06986a3812dd7106d8f946c09cfa75782556/numpy-2.3.2-cp314-cp314-win_arm64.whl", hash = "sha256:2738534837c6a1d0c39340a190177d7d66fdf432894f469728da901f8f6dc910", size = 10477303, upload-time = "2025-07-24T20:57:22.879Z" }, + { url = "https://files.pythonhosted.org/packages/8b/3e/075752b79140b78ddfc9c0a1634d234cfdbc6f9bbbfa6b7504e445ad7d19/numpy-2.3.2-cp314-cp314t-macosx_10_13_x86_64.whl", hash = "sha256:4d002ecf7c9b53240be3bb69d80f86ddbd34078bae04d87be81c1f58466f264e", size = 21047524, upload-time = "2025-07-24T20:53:22.086Z" }, + { url = "https://files.pythonhosted.org/packages/fe/6d/60e8247564a72426570d0e0ea1151b95ce5bd2f1597bb878a18d32aec855/numpy-2.3.2-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:293b2192c6bcce487dbc6326de5853787f870aeb6c43f8f9c6496db5b1781e45", size = 14300519, upload-time = "2025-07-24T20:53:44.053Z" }, + { url = "https://files.pythonhosted.org/packages/4d/73/d8326c442cd428d47a067070c3ac6cc3b651a6e53613a1668342a12d4479/numpy-2.3.2-cp314-cp314t-macosx_14_0_arm64.whl", hash = "sha256:0a4f2021a6da53a0d580d6ef5db29947025ae8b35b3250141805ea9a32bbe86b", size = 5228972, upload-time = "2025-07-24T20:53:53.81Z" }, + { url = "https://files.pythonhosted.org/packages/34/2e/e71b2d6dad075271e7079db776196829019b90ce3ece5c69639e4f6fdc44/numpy-2.3.2-cp314-cp314t-macosx_14_0_x86_64.whl", hash = "sha256:9c144440db4bf3bb6372d2c3e49834cc0ff7bb4c24975ab33e01199e645416f2", size = 6737439, upload-time = "2025-07-24T20:54:04.742Z" }, + { url = "https://files.pythonhosted.org/packages/15/b0/d004bcd56c2c5e0500ffc65385eb6d569ffd3363cb5e593ae742749b2daa/numpy-2.3.2-cp314-cp314t-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:f92d6c2a8535dc4fe4419562294ff957f83a16ebdec66df0805e473ffaad8bd0", size = 14352479, upload-time = "2025-07-24T20:54:25.819Z" }, + { url = "https://files.pythonhosted.org/packages/11/e3/285142fcff8721e0c99b51686426165059874c150ea9ab898e12a492e291/numpy-2.3.2-cp314-cp314t-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:cefc2219baa48e468e3db7e706305fcd0c095534a192a08f31e98d83a7d45fb0", size = 16702805, upload-time = "2025-07-24T20:54:50.814Z" }, + { url = "https://files.pythonhosted.org/packages/33/c3/33b56b0e47e604af2c7cd065edca892d180f5899599b76830652875249a3/numpy-2.3.2-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:76c3e9501ceb50b2ff3824c3589d5d1ab4ac857b0ee3f8f49629d0de55ecf7c2", size = 16133830, upload-time = "2025-07-24T20:55:17.306Z" }, + { url = "https://files.pythonhosted.org/packages/6e/ae/7b1476a1f4d6a48bc669b8deb09939c56dd2a439db1ab03017844374fb67/numpy-2.3.2-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:122bf5ed9a0221b3419672493878ba4967121514b1d7d4656a7580cd11dddcbf", size = 18652665, upload-time = "2025-07-24T20:55:46.665Z" }, + { url = "https://files.pythonhosted.org/packages/14/ba/5b5c9978c4bb161034148ade2de9db44ec316fab89ce8c400db0e0c81f86/numpy-2.3.2-cp314-cp314t-win32.whl", hash = "sha256:6f1ae3dcb840edccc45af496f312528c15b1f79ac318169d094e85e4bb35fdf1", size = 6514777, upload-time = "2025-07-24T20:55:57.66Z" }, + { url = "https://files.pythonhosted.org/packages/eb/46/3dbaf0ae7c17cdc46b9f662c56da2054887b8d9e737c1476f335c83d33db/numpy-2.3.2-cp314-cp314t-win_amd64.whl", hash = "sha256:087ffc25890d89a43536f75c5fe8770922008758e8eeeef61733957041ed2f9b", size = 13111856, upload-time = "2025-07-24T20:56:17.318Z" }, + { url = "https://files.pythonhosted.org/packages/c1/9e/1652778bce745a67b5fe05adde60ed362d38eb17d919a540e813d30f6874/numpy-2.3.2-cp314-cp314t-win_arm64.whl", hash = "sha256:092aeb3449833ea9c0bf0089d70c29ae480685dd2377ec9cdbbb620257f84631", size = 10544226, upload-time = "2025-07-24T20:56:34.509Z" }, +] + +[[package]] +name = "oauthlib" +version = "3.3.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/0b/5f/19930f824ffeb0ad4372da4812c50edbd1434f678c90c2733e1188edfc63/oauthlib-3.3.1.tar.gz", hash = "sha256:0f0f8aa759826a193cf66c12ea1af1637f87b9b4622d46e866952bb022e538c9", size = 185918, upload-time = "2025-06-19T22:48:08.269Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/be/9c/92789c596b8df838baa98fa71844d84283302f7604ed565dafe5a6b5041a/oauthlib-3.3.1-py3-none-any.whl", hash = "sha256:88119c938d2b8fb88561af5f6ee0eec8cc8d552b7bb1f712743136eb7523b7a1", size = 160065, upload-time = "2025-06-19T22:48:06.508Z" }, +] + +[[package]] +name = "openai" +version = "1.99.9" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "anyio" }, + { name = "distro" }, + { name = "httpx" }, + { name = "jiter" }, + { name = "pydantic" }, + { name = "sniffio" }, + { name = "tqdm" }, + { name = "typing-extensions" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/8a/d2/ef89c6f3f36b13b06e271d3cc984ddd2f62508a0972c1cbcc8485a6644ff/openai-1.99.9.tar.gz", hash = "sha256:f2082d155b1ad22e83247c3de3958eb4255b20ccf4a1de2e6681b6957b554e92", size = 506992, upload-time = "2025-08-12T02:31:10.054Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/e8/fb/df274ca10698ee77b07bff952f302ea627cc12dac6b85289485dd77db6de/openai-1.99.9-py3-none-any.whl", hash = "sha256:9dbcdb425553bae1ac5d947147bebbd630d91bbfc7788394d4c4f3a35682ab3a", size = 786816, upload-time = "2025-08-12T02:31:08.34Z" }, +] + +[[package]] +name = "orjson" +version = "3.11.2" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/df/1d/5e0ae38788bdf0721326695e65fdf41405ed535f633eb0df0f06f57552fa/orjson-3.11.2.tar.gz", hash = "sha256:91bdcf5e69a8fd8e8bdb3de32b31ff01d2bd60c1e8d5fe7d5afabdcf19920309", size = 5470739, upload-time = "2025-08-12T15:12:28.626Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/40/02/46054ebe7996a8adee9640dcad7d39d76c2000dc0377efa38e55dc5cbf78/orjson-3.11.2-cp312-cp312-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:901d80d349d8452162b3aa1afb82cec5bee79a10550660bc21311cc61a4c5486", size = 226528, upload-time = "2025-08-12T15:11:03.317Z" }, + { url = "https://files.pythonhosted.org/packages/e2/c6/6b6f0b4d8aea1137436546b990f71be2cd8bd870aa2f5aa14dba0fcc95dc/orjson-3.11.2-cp312-cp312-macosx_15_0_arm64.whl", hash = "sha256:cf3bd3967a360e87ee14ed82cb258b7f18c710dacf3822fb0042a14313a673a1", size = 115931, upload-time = "2025-08-12T15:11:04.759Z" }, + { url = "https://files.pythonhosted.org/packages/ae/05/4205cc97c30e82a293dd0d149b1a89b138ebe76afeca66fc129fa2aa4e6a/orjson-3.11.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:26693dde66910078229a943e80eeb99fdce6cd2c26277dc80ead9f3ab97d2131", size = 111382, upload-time = "2025-08-12T15:11:06.468Z" }, + { url = "https://files.pythonhosted.org/packages/50/c7/b8a951a93caa821f9272a7c917115d825ae2e4e8768f5ddf37968ec9de01/orjson-3.11.2-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:4ad4c8acb50a28211c33fc7ef85ddf5cb18d4636a5205fd3fa2dce0411a0e30c", size = 116271, upload-time = "2025-08-12T15:11:07.845Z" }, + { url = "https://files.pythonhosted.org/packages/17/03/1006c7f8782d5327439e26d9b0ec66500ea7b679d4bbb6b891d2834ab3ee/orjson-3.11.2-cp312-cp312-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:994181e7f1725bb5f2d481d7d228738e0743b16bf319ca85c29369c65913df14", size = 119086, upload-time = "2025-08-12T15:11:09.329Z" }, + { url = "https://files.pythonhosted.org/packages/44/61/57d22bc31f36a93878a6f772aea76b2184102c6993dea897656a66d18c74/orjson-3.11.2-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:dbb79a0476393c07656b69c8e763c3cc925fa8e1d9e9b7d1f626901bb5025448", size = 120724, upload-time = "2025-08-12T15:11:10.674Z" }, + { url = "https://files.pythonhosted.org/packages/78/a9/4550e96b4c490c83aea697d5347b8f7eb188152cd7b5a38001055ca5b379/orjson-3.11.2-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:191ed27a1dddb305083d8716af413d7219f40ec1d4c9b0e977453b4db0d6fb6c", size = 123577, upload-time = "2025-08-12T15:11:12.015Z" }, + { url = "https://files.pythonhosted.org/packages/3a/86/09b8cb3ebd513d708ef0c92d36ac3eebda814c65c72137b0a82d6d688fc4/orjson-3.11.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0afb89f16f07220183fd00f5f297328ed0a68d8722ad1b0c8dcd95b12bc82804", size = 121195, upload-time = "2025-08-12T15:11:13.399Z" }, + { url = "https://files.pythonhosted.org/packages/37/68/7b40b39ac2c1c644d4644e706d0de6c9999764341cd85f2a9393cb387661/orjson-3.11.2-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:6ab6e6b4e93b1573a026b6ec16fca9541354dd58e514b62c558b58554ae04307", size = 119234, upload-time = "2025-08-12T15:11:15.134Z" }, + { url = "https://files.pythonhosted.org/packages/40/7c/bb6e7267cd80c19023d44d8cbc4ea4ed5429fcd4a7eb9950f50305697a28/orjson-3.11.2-cp312-cp312-musllinux_1_2_armv7l.whl", hash = "sha256:9cb23527efb61fb75527df55d20ee47989c4ee34e01a9c98ee9ede232abf6219", size = 392250, upload-time = "2025-08-12T15:11:16.604Z" }, + { url = "https://files.pythonhosted.org/packages/64/f2/6730ace05583dbca7c1b406d59f4266e48cd0d360566e71482420fb849fc/orjson-3.11.2-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:a4dd1268e4035af21b8a09e4adf2e61f87ee7bf63b86d7bb0a237ac03fad5b45", size = 134572, upload-time = "2025-08-12T15:11:18.205Z" }, + { url = "https://files.pythonhosted.org/packages/96/0f/7d3e03a30d5aac0432882b539a65b8c02cb6dd4221ddb893babf09c424cc/orjson-3.11.2-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:ff8b155b145eaf5a9d94d2c476fbe18d6021de93cf36c2ae2c8c5b775763f14e", size = 123869, upload-time = "2025-08-12T15:11:19.554Z" }, + { url = "https://files.pythonhosted.org/packages/45/80/1513265eba6d4a960f078f4b1d2bff94a571ab2d28c6f9835e03dfc65cc6/orjson-3.11.2-cp312-cp312-win32.whl", hash = "sha256:ae3bb10279d57872f9aba68c9931aa71ed3b295fa880f25e68da79e79453f46e", size = 124430, upload-time = "2025-08-12T15:11:20.914Z" }, + { url = "https://files.pythonhosted.org/packages/fb/61/eadf057b68a332351eeb3d89a4cc538d14f31cd8b5ec1b31a280426ccca2/orjson-3.11.2-cp312-cp312-win_amd64.whl", hash = "sha256:d026e1967239ec11a2559b4146a61d13914504b396f74510a1c4d6b19dfd8732", size = 119598, upload-time = "2025-08-12T15:11:22.372Z" }, + { url = "https://files.pythonhosted.org/packages/6b/3f/7f4b783402143d965ab7e9a2fc116fdb887fe53bdce7d3523271cd106098/orjson-3.11.2-cp312-cp312-win_arm64.whl", hash = "sha256:59f8d5ad08602711af9589375be98477d70e1d102645430b5a7985fdbf613b36", size = 114052, upload-time = "2025-08-12T15:11:23.762Z" }, + { url = "https://files.pythonhosted.org/packages/c2/f3/0dd6b4750eb556ae4e2c6a9cb3e219ec642e9c6d95f8ebe5dc9020c67204/orjson-3.11.2-cp313-cp313-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:a079fdba7062ab396380eeedb589afb81dc6683f07f528a03b6f7aae420a0219", size = 226419, upload-time = "2025-08-12T15:11:25.517Z" }, + { url = "https://files.pythonhosted.org/packages/44/d5/e67f36277f78f2af8a4690e0c54da6b34169812f807fd1b4bfc4dbcf9558/orjson-3.11.2-cp313-cp313-macosx_15_0_arm64.whl", hash = "sha256:6a5f62ebbc530bb8bb4b1ead103647b395ba523559149b91a6c545f7cd4110ad", size = 115803, upload-time = "2025-08-12T15:11:27.357Z" }, + { url = "https://files.pythonhosted.org/packages/24/37/ff8bc86e0dacc48f07c2b6e20852f230bf4435611bab65e3feae2b61f0ae/orjson-3.11.2-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d7df6c7b8b0931feb3420b72838c3e2ba98c228f7aa60d461bc050cf4ca5f7b2", size = 111337, upload-time = "2025-08-12T15:11:28.805Z" }, + { url = "https://files.pythonhosted.org/packages/b9/25/37d4d3e8079ea9784ea1625029988e7f4594ce50d4738b0c1e2bf4a9e201/orjson-3.11.2-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:6f59dfea7da1fced6e782bb3699718088b1036cb361f36c6e4dd843c5111aefe", size = 116222, upload-time = "2025-08-12T15:11:30.18Z" }, + { url = "https://files.pythonhosted.org/packages/b7/32/a63fd9c07fce3b4193dcc1afced5dd4b0f3a24e27556604e9482b32189c9/orjson-3.11.2-cp313-cp313-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:edf49146520fef308c31aa4c45b9925fd9c7584645caca7c0c4217d7900214ae", size = 119020, upload-time = "2025-08-12T15:11:31.59Z" }, + { url = "https://files.pythonhosted.org/packages/b4/b6/400792b8adc3079a6b5d649264a3224d6342436d9fac9a0ed4abc9dc4596/orjson-3.11.2-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:50995bbeb5d41a32ad15e023305807f561ac5dcd9bd41a12c8d8d1d2c83e44e6", size = 120721, upload-time = "2025-08-12T15:11:33.035Z" }, + { url = "https://files.pythonhosted.org/packages/40/f3/31ab8f8c699eb9e65af8907889a0b7fef74c1d2b23832719a35da7bb0c58/orjson-3.11.2-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2cc42960515076eb639b705f105712b658c525863d89a1704d984b929b0577d1", size = 123574, upload-time = "2025-08-12T15:11:34.433Z" }, + { url = "https://files.pythonhosted.org/packages/bd/a6/ce4287c412dff81878f38d06d2c80845709c60012ca8daf861cb064b4574/orjson-3.11.2-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c56777cab2a7b2a8ea687fedafb84b3d7fdafae382165c31a2adf88634c432fa", size = 121225, upload-time = "2025-08-12T15:11:36.133Z" }, + { url = "https://files.pythonhosted.org/packages/69/b0/7a881b2aef4fed0287d2a4fbb029d01ed84fa52b4a68da82bdee5e50598e/orjson-3.11.2-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:07349e88025b9b5c783077bf7a9f401ffbfb07fd20e86ec6fc5b7432c28c2c5e", size = 119201, upload-time = "2025-08-12T15:11:37.642Z" }, + { url = "https://files.pythonhosted.org/packages/cf/98/a325726b37f7512ed6338e5e65035c3c6505f4e628b09a5daf0419f054ea/orjson-3.11.2-cp313-cp313-musllinux_1_2_armv7l.whl", hash = "sha256:45841fbb79c96441a8c58aa29ffef570c5df9af91f0f7a9572e5505e12412f15", size = 392193, upload-time = "2025-08-12T15:11:39.153Z" }, + { url = "https://files.pythonhosted.org/packages/cb/4f/a7194f98b0ce1d28190e0c4caa6d091a3fc8d0107ad2209f75c8ba398984/orjson-3.11.2-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:13d8d8db6cd8d89d4d4e0f4161acbbb373a4d2a4929e862d1d2119de4aa324ac", size = 134548, upload-time = "2025-08-12T15:11:40.768Z" }, + { url = "https://files.pythonhosted.org/packages/e8/5e/b84caa2986c3f472dc56343ddb0167797a708a8d5c3be043e1e2677b55df/orjson-3.11.2-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:51da1ee2178ed09c00d09c1b953e45846bbc16b6420965eb7a913ba209f606d8", size = 123798, upload-time = "2025-08-12T15:11:42.164Z" }, + { url = "https://files.pythonhosted.org/packages/9c/5b/e398449080ce6b4c8fcadad57e51fa16f65768e1b142ba90b23ac5d10801/orjson-3.11.2-cp313-cp313-win32.whl", hash = "sha256:51dc033df2e4a4c91c0ba4f43247de99b3cbf42ee7a42ee2b2b2f76c8b2f2cb5", size = 124402, upload-time = "2025-08-12T15:11:44.036Z" }, + { url = "https://files.pythonhosted.org/packages/b3/66/429e4608e124debfc4790bfc37131f6958e59510ba3b542d5fc163be8e5f/orjson-3.11.2-cp313-cp313-win_amd64.whl", hash = "sha256:29d91d74942b7436f29b5d1ed9bcfc3f6ef2d4f7c4997616509004679936650d", size = 119498, upload-time = "2025-08-12T15:11:45.864Z" }, + { url = "https://files.pythonhosted.org/packages/7b/04/f8b5f317cce7ad3580a9ad12d7e2df0714dfa8a83328ecddd367af802f5b/orjson-3.11.2-cp313-cp313-win_arm64.whl", hash = "sha256:4ca4fb5ac21cd1e48028d4f708b1bb13e39c42d45614befd2ead004a8bba8535", size = 114051, upload-time = "2025-08-12T15:11:47.555Z" }, + { url = "https://files.pythonhosted.org/packages/74/83/2c363022b26c3c25b3708051a19d12f3374739bb81323f05b284392080c0/orjson-3.11.2-cp314-cp314-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:3dcba7101ea6a8d4ef060746c0f2e7aa8e2453a1012083e1ecce9726d7554cb7", size = 226406, upload-time = "2025-08-12T15:11:49.445Z" }, + { url = "https://files.pythonhosted.org/packages/b0/a7/aa3c973de0b33fc93b4bd71691665ffdfeae589ea9d0625584ab10a7d0f5/orjson-3.11.2-cp314-cp314-macosx_15_0_arm64.whl", hash = "sha256:15d17bdb76a142e1f55d91913e012e6e6769659daa6bfef3ef93f11083137e81", size = 115788, upload-time = "2025-08-12T15:11:50.992Z" }, + { url = "https://files.pythonhosted.org/packages/ef/f2/e45f233dfd09fdbb052ec46352363dca3906618e1a2b264959c18f809d0b/orjson-3.11.2-cp314-cp314-manylinux_2_34_aarch64.whl", hash = "sha256:53c9e81768c69d4b66b8876ec3c8e431c6e13477186d0db1089d82622bccd19f", size = 111318, upload-time = "2025-08-12T15:11:52.495Z" }, + { url = "https://files.pythonhosted.org/packages/3e/23/cf5a73c4da6987204cbbf93167f353ff0c5013f7c5e5ef845d4663a366da/orjson-3.11.2-cp314-cp314-manylinux_2_34_x86_64.whl", hash = "sha256:d4f13af59a7b84c1ca6b8a7ab70d608f61f7c44f9740cd42409e6ae7b6c8d8b7", size = 121231, upload-time = "2025-08-12T15:11:53.941Z" }, + { url = "https://files.pythonhosted.org/packages/40/1d/47468a398ae68a60cc21e599144e786e035bb12829cb587299ecebc088f1/orjson-3.11.2-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:bde64aa469b5ee46cc960ed241fae3721d6a8801dacb2ca3466547a2535951e4", size = 119204, upload-time = "2025-08-12T15:11:55.409Z" }, + { url = "https://files.pythonhosted.org/packages/4d/d9/f99433d89b288b5bc8836bffb32a643f805e673cf840ef8bab6e73ced0d1/orjson-3.11.2-cp314-cp314-musllinux_1_2_armv7l.whl", hash = "sha256:b5ca86300aeb383c8fa759566aca065878d3d98c3389d769b43f0a2e84d52c5f", size = 392237, upload-time = "2025-08-12T15:11:57.18Z" }, + { url = "https://files.pythonhosted.org/packages/d4/dc/1b9d80d40cebef603325623405136a29fb7d08c877a728c0943dd066c29a/orjson-3.11.2-cp314-cp314-musllinux_1_2_i686.whl", hash = "sha256:24e32a558ebed73a6a71c8f1cbc163a7dd5132da5270ff3d8eeb727f4b6d1bc7", size = 134578, upload-time = "2025-08-12T15:11:58.844Z" }, + { url = "https://files.pythonhosted.org/packages/45/b3/72e7a4c5b6485ef4e83ef6aba7f1dd041002bad3eb5d1d106ca5b0fc02c6/orjson-3.11.2-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:e36319a5d15b97e4344110517450396845cc6789aed712b1fbf83c1bd95792f6", size = 123799, upload-time = "2025-08-12T15:12:00.352Z" }, + { url = "https://files.pythonhosted.org/packages/c8/3e/a3d76b392e7acf9b34dc277171aad85efd6accc75089bb35b4c614990ea9/orjson-3.11.2-cp314-cp314-win32.whl", hash = "sha256:40193ada63fab25e35703454d65b6afc71dbc65f20041cb46c6d91709141ef7f", size = 124461, upload-time = "2025-08-12T15:12:01.854Z" }, + { url = "https://files.pythonhosted.org/packages/fb/e3/75c6a596ff8df9e4a5894813ff56695f0a218e6ea99420b4a645c4f7795d/orjson-3.11.2-cp314-cp314-win_amd64.whl", hash = "sha256:7c8ac5f6b682d3494217085cf04dadae66efee45349ad4ee2a1da3c97e2305a8", size = 119494, upload-time = "2025-08-12T15:12:03.337Z" }, + { url = "https://files.pythonhosted.org/packages/5b/3d/9e74742fc261c5ca473c96bb3344d03995869e1dc6402772c60afb97736a/orjson-3.11.2-cp314-cp314-win_arm64.whl", hash = "sha256:21cf261e8e79284242e4cb1e5924df16ae28255184aafeff19be1405f6d33f67", size = 114046, upload-time = "2025-08-12T15:12:04.87Z" }, +] + +[[package]] +name = "ormsgpack" +version = "1.10.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/92/36/44eed5ef8ce93cded76a576780bab16425ce7876f10d3e2e6265e46c21ea/ormsgpack-1.10.0.tar.gz", hash = "sha256:7f7a27efd67ef22d7182ec3b7fa7e9d147c3ad9be2a24656b23c989077e08b16", size = 58629, upload-time = "2025-05-24T19:07:53.944Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/99/95/f3ab1a7638f6aa9362e87916bb96087fbbc5909db57e19f12ad127560e1e/ormsgpack-1.10.0-cp312-cp312-macosx_10_12_x86_64.macosx_11_0_arm64.macosx_10_12_universal2.whl", hash = "sha256:4e159d50cd4064d7540e2bc6a0ab66eab70b0cc40c618b485324ee17037527c0", size = 376806, upload-time = "2025-05-24T19:07:17.221Z" }, + { url = "https://files.pythonhosted.org/packages/6c/2b/42f559f13c0b0f647b09d749682851d47c1a7e48308c43612ae6833499c8/ormsgpack-1.10.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:eeb47c85f3a866e29279d801115b554af0fefc409e2ed8aa90aabfa77efe5cc6", size = 204433, upload-time = "2025-05-24T19:07:18.569Z" }, + { url = "https://files.pythonhosted.org/packages/45/42/1ca0cb4d8c80340a89a4af9e6d8951fb8ba0d076a899d2084eadf536f677/ormsgpack-1.10.0-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:c28249574934534c9bd5dce5485c52f21bcea0ee44d13ece3def6e3d2c3798b5", size = 215547, upload-time = "2025-05-24T19:07:20.245Z" }, + { url = "https://files.pythonhosted.org/packages/0a/38/184a570d7c44c0260bc576d1daaac35b2bfd465a50a08189518505748b9a/ormsgpack-1.10.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1957dcadbb16e6a981cd3f9caef9faf4c2df1125e2a1b702ee8236a55837ce07", size = 216746, upload-time = "2025-05-24T19:07:21.83Z" }, + { url = "https://files.pythonhosted.org/packages/69/2f/1aaffd08f6b7fdc2a57336a80bdfb8df24e6a65ada5aa769afecfcbc6cc6/ormsgpack-1.10.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:3b29412558c740bf6bac156727aa85ac67f9952cd6f071318f29ee72e1a76044", size = 384783, upload-time = "2025-05-24T19:07:23.674Z" }, + { url = "https://files.pythonhosted.org/packages/a9/63/3e53d6f43bb35e00c98f2b8ab2006d5138089ad254bc405614fbf0213502/ormsgpack-1.10.0-cp312-cp312-musllinux_1_2_armv7l.whl", hash = "sha256:6933f350c2041ec189fe739f0ba7d6117c8772f5bc81f45b97697a84d03020dd", size = 479076, upload-time = "2025-05-24T19:07:25.047Z" }, + { url = "https://files.pythonhosted.org/packages/b8/19/fa1121b03b61402bb4d04e35d164e2320ef73dfb001b57748110319dd014/ormsgpack-1.10.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:9a86de06d368fcc2e58b79dece527dc8ca831e0e8b9cec5d6e633d2777ec93d0", size = 390447, upload-time = "2025-05-24T19:07:26.568Z" }, + { url = "https://files.pythonhosted.org/packages/b0/0d/73143ecb94ac4a5dcba223402139240a75dee0cc6ba8a543788a5646407a/ormsgpack-1.10.0-cp312-cp312-win_amd64.whl", hash = "sha256:35fa9f81e5b9a0dab42e09a73f7339ecffdb978d6dbf9deb2ecf1e9fc7808722", size = 121401, upload-time = "2025-05-24T19:07:28.308Z" }, + { url = "https://files.pythonhosted.org/packages/61/f8/ec5f4e03268d0097545efaab2893aa63f171cf2959cb0ea678a5690e16a1/ormsgpack-1.10.0-cp313-cp313-macosx_10_12_x86_64.macosx_11_0_arm64.macosx_10_12_universal2.whl", hash = "sha256:8d816d45175a878993b7372bd5408e0f3ec5a40f48e2d5b9d8f1cc5d31b61f1f", size = 376806, upload-time = "2025-05-24T19:07:29.555Z" }, + { url = "https://files.pythonhosted.org/packages/c1/19/b3c53284aad1e90d4d7ed8c881a373d218e16675b8b38e3569d5b40cc9b8/ormsgpack-1.10.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a90345ccb058de0f35262893751c603b6376b05f02be2b6f6b7e05d9dd6d5643", size = 204433, upload-time = "2025-05-24T19:07:30.977Z" }, + { url = "https://files.pythonhosted.org/packages/09/0b/845c258f59df974a20a536c06cace593698491defdd3d026a8a5f9b6e745/ormsgpack-1.10.0-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:144b5e88f1999433e54db9d637bae6fe21e935888be4e3ac3daecd8260bd454e", size = 215549, upload-time = "2025-05-24T19:07:32.345Z" }, + { url = "https://files.pythonhosted.org/packages/61/56/57fce8fb34ca6c9543c026ebebf08344c64dbb7b6643d6ddd5355d37e724/ormsgpack-1.10.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2190b352509d012915921cca76267db136cd026ddee42f1b0d9624613cc7058c", size = 216747, upload-time = "2025-05-24T19:07:34.075Z" }, + { url = "https://files.pythonhosted.org/packages/b8/3f/655b5f6a2475c8d209f5348cfbaaf73ce26237b92d79ef2ad439407dd0fa/ormsgpack-1.10.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:86fd9c1737eaba43d3bb2730add9c9e8b5fbed85282433705dd1b1e88ea7e6fb", size = 384785, upload-time = "2025-05-24T19:07:35.83Z" }, + { url = "https://files.pythonhosted.org/packages/4b/94/687a0ad8afd17e4bce1892145d6a1111e58987ddb176810d02a1f3f18686/ormsgpack-1.10.0-cp313-cp313-musllinux_1_2_armv7l.whl", hash = "sha256:33afe143a7b61ad21bb60109a86bb4e87fec70ef35db76b89c65b17e32da7935", size = 479076, upload-time = "2025-05-24T19:07:37.533Z" }, + { url = "https://files.pythonhosted.org/packages/c8/34/68925232e81e0e062a2f0ac678f62aa3b6f7009d6a759e19324dbbaebae7/ormsgpack-1.10.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:f23d45080846a7b90feabec0d330a9cc1863dc956728412e4f7986c80ab3a668", size = 390446, upload-time = "2025-05-24T19:07:39.469Z" }, + { url = "https://files.pythonhosted.org/packages/12/ad/f4e1a36a6d1714afb7ffb74b3ababdcb96529cf4e7a216f9f7c8eda837b6/ormsgpack-1.10.0-cp313-cp313-win_amd64.whl", hash = "sha256:534d18acb805c75e5fba09598bf40abe1851c853247e61dda0c01f772234da69", size = 121399, upload-time = "2025-05-24T19:07:40.854Z" }, +] + +[[package]] +name = "packaging" +version = "24.2" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/d0/63/68dbb6eb2de9cb10ee4c9c14a0148804425e13c4fb20d61cce69f53106da/packaging-24.2.tar.gz", hash = "sha256:c228a6dc5e932d346bc5739379109d49e8853dd8223571c7c5b55260edc0b97f", size = 163950, upload-time = "2024-11-08T09:47:47.202Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/88/ef/eb23f262cca3c0c4eb7ab1933c3b1f03d021f2c48f54763065b6f0e321be/packaging-24.2-py3-none-any.whl", hash = "sha256:09abb1bccd265c01f4a3aa3f7a7db064b36514d2cba19a2f694fe6150451a759", size = 65451, upload-time = "2024-11-08T09:47:44.722Z" }, +] + +[[package]] +name = "pailliers" +version = "0.2.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "egcd" }, + { name = "rabinmiller" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/b5/c2/578c08af348247c025179e9f22d4970549fd58635d3881a9ac86192b159b/pailliers-0.2.0.tar.gz", hash = "sha256:a1d3d7d840594f51073e531078b3da4dc5a7a527b410102a0f0fa65d6c222871", size = 8919, upload-time = "2025-01-01T23:18:57.343Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/8e/0e/d793836d158ea15f7705e8ae705d73991f58e3eda0dde07e64bc423a4c12/pailliers-0.2.0-py3-none-any.whl", hash = "sha256:ad0ddc72be63f9b3c10200e23178fe527b566c4aa86659ab54a8faeb367ac7d6", size = 7404, upload-time = "2025-01-01T23:18:54.718Z" }, +] + +[[package]] +name = "paramiko" +version = "3.5.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "bcrypt" }, + { name = "cryptography" }, + { name = "pynacl" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/7d/15/ad6ce226e8138315f2451c2aeea985bf35ee910afb477bae7477dc3a8f3b/paramiko-3.5.1.tar.gz", hash = "sha256:b2c665bc45b2b215bd7d7f039901b14b067da00f3a11e6640995fd58f2664822", size = 1566110, upload-time = "2025-02-04T02:37:59.783Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/15/f8/c7bd0ef12954a81a1d3cea60a13946bd9a49a0036a5927770c461eade7ae/paramiko-3.5.1-py3-none-any.whl", hash = "sha256:43b9a0501fc2b5e70680388d9346cf252cfb7d00b0667c39e80eb43a408b8f61", size = 227298, upload-time = "2025-02-04T02:37:57.672Z" }, +] + +[[package]] +name = "parsimonious" +version = "0.10.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "regex" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/7b/91/abdc50c4ef06fdf8d047f60ee777ca9b2a7885e1a9cea81343fbecda52d7/parsimonious-0.10.0.tar.gz", hash = "sha256:8281600da180ec8ae35427a4ab4f7b82bfec1e3d1e52f80cb60ea82b9512501c", size = 52172, upload-time = "2022-09-03T17:01:17.004Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/aa/0f/c8b64d9b54ea631fcad4e9e3c8dbe8c11bb32a623be94f22974c88e71eaf/parsimonious-0.10.0-py3-none-any.whl", hash = "sha256:982ab435fabe86519b57f6b35610aa4e4e977e9f02a14353edf4bbc75369fc0f", size = 48427, upload-time = "2022-09-03T17:01:13.814Z" }, +] + +[[package]] +name = "pillow" +version = "11.3.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/f3/0d/d0d6dea55cd152ce3d6767bb38a8fc10e33796ba4ba210cbab9354b6d238/pillow-11.3.0.tar.gz", hash = "sha256:3828ee7586cd0b2091b6209e5ad53e20d0649bbe87164a459d0676e035e8f523", size = 47113069, upload-time = "2025-07-01T09:16:30.666Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/40/fe/1bc9b3ee13f68487a99ac9529968035cca2f0a51ec36892060edcc51d06a/pillow-11.3.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:fdae223722da47b024b867c1ea0be64e0df702c5e0a60e27daad39bf960dd1e4", size = 5278800, upload-time = "2025-07-01T09:14:17.648Z" }, + { url = "https://files.pythonhosted.org/packages/2c/32/7e2ac19b5713657384cec55f89065fb306b06af008cfd87e572035b27119/pillow-11.3.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:921bd305b10e82b4d1f5e802b6850677f965d8394203d182f078873851dada69", size = 4686296, upload-time = "2025-07-01T09:14:19.828Z" }, + { url = "https://files.pythonhosted.org/packages/8e/1e/b9e12bbe6e4c2220effebc09ea0923a07a6da1e1f1bfbc8d7d29a01ce32b/pillow-11.3.0-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:eb76541cba2f958032d79d143b98a3a6b3ea87f0959bbe256c0b5e416599fd5d", size = 5871726, upload-time = "2025-07-03T13:10:04.448Z" }, + { url = "https://files.pythonhosted.org/packages/8d/33/e9200d2bd7ba00dc3ddb78df1198a6e80d7669cce6c2bdbeb2530a74ec58/pillow-11.3.0-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:67172f2944ebba3d4a7b54f2e95c786a3a50c21b88456329314caaa28cda70f6", size = 7644652, upload-time = "2025-07-03T13:10:10.391Z" }, + { url = "https://files.pythonhosted.org/packages/41/f1/6f2427a26fc683e00d985bc391bdd76d8dd4e92fac33d841127eb8fb2313/pillow-11.3.0-cp312-cp312-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:97f07ed9f56a3b9b5f49d3661dc9607484e85c67e27f3e8be2c7d28ca032fec7", size = 5977787, upload-time = "2025-07-01T09:14:21.63Z" }, + { url = "https://files.pythonhosted.org/packages/e4/c9/06dd4a38974e24f932ff5f98ea3c546ce3f8c995d3f0985f8e5ba48bba19/pillow-11.3.0-cp312-cp312-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:676b2815362456b5b3216b4fd5bd89d362100dc6f4945154ff172e206a22c024", size = 6645236, upload-time = "2025-07-01T09:14:23.321Z" }, + { url = "https://files.pythonhosted.org/packages/40/e7/848f69fb79843b3d91241bad658e9c14f39a32f71a301bcd1d139416d1be/pillow-11.3.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:3e184b2f26ff146363dd07bde8b711833d7b0202e27d13540bfe2e35a323a809", size = 6086950, upload-time = "2025-07-01T09:14:25.237Z" }, + { url = "https://files.pythonhosted.org/packages/0b/1a/7cff92e695a2a29ac1958c2a0fe4c0b2393b60aac13b04a4fe2735cad52d/pillow-11.3.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:6be31e3fc9a621e071bc17bb7de63b85cbe0bfae91bb0363c893cbe67247780d", size = 6723358, upload-time = "2025-07-01T09:14:27.053Z" }, + { url = "https://files.pythonhosted.org/packages/26/7d/73699ad77895f69edff76b0f332acc3d497f22f5d75e5360f78cbcaff248/pillow-11.3.0-cp312-cp312-win32.whl", hash = "sha256:7b161756381f0918e05e7cb8a371fff367e807770f8fe92ecb20d905d0e1c149", size = 6275079, upload-time = "2025-07-01T09:14:30.104Z" }, + { url = "https://files.pythonhosted.org/packages/8c/ce/e7dfc873bdd9828f3b6e5c2bbb74e47a98ec23cc5c74fc4e54462f0d9204/pillow-11.3.0-cp312-cp312-win_amd64.whl", hash = "sha256:a6444696fce635783440b7f7a9fc24b3ad10a9ea3f0ab66c5905be1c19ccf17d", size = 6986324, upload-time = "2025-07-01T09:14:31.899Z" }, + { url = "https://files.pythonhosted.org/packages/16/8f/b13447d1bf0b1f7467ce7d86f6e6edf66c0ad7cf44cf5c87a37f9bed9936/pillow-11.3.0-cp312-cp312-win_arm64.whl", hash = "sha256:2aceea54f957dd4448264f9bf40875da0415c83eb85f55069d89c0ed436e3542", size = 2423067, upload-time = "2025-07-01T09:14:33.709Z" }, + { url = "https://files.pythonhosted.org/packages/1e/93/0952f2ed8db3a5a4c7a11f91965d6184ebc8cd7cbb7941a260d5f018cd2d/pillow-11.3.0-cp313-cp313-ios_13_0_arm64_iphoneos.whl", hash = "sha256:1c627742b539bba4309df89171356fcb3cc5a9178355b2727d1b74a6cf155fbd", size = 2128328, upload-time = "2025-07-01T09:14:35.276Z" }, + { url = "https://files.pythonhosted.org/packages/4b/e8/100c3d114b1a0bf4042f27e0f87d2f25e857e838034e98ca98fe7b8c0a9c/pillow-11.3.0-cp313-cp313-ios_13_0_arm64_iphonesimulator.whl", hash = "sha256:30b7c02f3899d10f13d7a48163c8969e4e653f8b43416d23d13d1bbfdc93b9f8", size = 2170652, upload-time = "2025-07-01T09:14:37.203Z" }, + { url = "https://files.pythonhosted.org/packages/aa/86/3f758a28a6e381758545f7cdb4942e1cb79abd271bea932998fc0db93cb6/pillow-11.3.0-cp313-cp313-ios_13_0_x86_64_iphonesimulator.whl", hash = "sha256:7859a4cc7c9295f5838015d8cc0a9c215b77e43d07a25e460f35cf516df8626f", size = 2227443, upload-time = "2025-07-01T09:14:39.344Z" }, + { url = "https://files.pythonhosted.org/packages/01/f4/91d5b3ffa718df2f53b0dc109877993e511f4fd055d7e9508682e8aba092/pillow-11.3.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:ec1ee50470b0d050984394423d96325b744d55c701a439d2bd66089bff963d3c", size = 5278474, upload-time = "2025-07-01T09:14:41.843Z" }, + { url = "https://files.pythonhosted.org/packages/f9/0e/37d7d3eca6c879fbd9dba21268427dffda1ab00d4eb05b32923d4fbe3b12/pillow-11.3.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:7db51d222548ccfd274e4572fdbf3e810a5e66b00608862f947b163e613b67dd", size = 4686038, upload-time = "2025-07-01T09:14:44.008Z" }, + { url = "https://files.pythonhosted.org/packages/ff/b0/3426e5c7f6565e752d81221af9d3676fdbb4f352317ceafd42899aaf5d8a/pillow-11.3.0-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:2d6fcc902a24ac74495df63faad1884282239265c6839a0a6416d33faedfae7e", size = 5864407, upload-time = "2025-07-03T13:10:15.628Z" }, + { url = "https://files.pythonhosted.org/packages/fc/c1/c6c423134229f2a221ee53f838d4be9d82bab86f7e2f8e75e47b6bf6cd77/pillow-11.3.0-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:f0f5d8f4a08090c6d6d578351a2b91acf519a54986c055af27e7a93feae6d3f1", size = 7639094, upload-time = "2025-07-03T13:10:21.857Z" }, + { url = "https://files.pythonhosted.org/packages/ba/c9/09e6746630fe6372c67c648ff9deae52a2bc20897d51fa293571977ceb5d/pillow-11.3.0-cp313-cp313-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:c37d8ba9411d6003bba9e518db0db0c58a680ab9fe5179f040b0463644bc9805", size = 5973503, upload-time = "2025-07-01T09:14:45.698Z" }, + { url = "https://files.pythonhosted.org/packages/d5/1c/a2a29649c0b1983d3ef57ee87a66487fdeb45132df66ab30dd37f7dbe162/pillow-11.3.0-cp313-cp313-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:13f87d581e71d9189ab21fe0efb5a23e9f28552d5be6979e84001d3b8505abe8", size = 6642574, upload-time = "2025-07-01T09:14:47.415Z" }, + { url = "https://files.pythonhosted.org/packages/36/de/d5cc31cc4b055b6c6fd990e3e7f0f8aaf36229a2698501bcb0cdf67c7146/pillow-11.3.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:023f6d2d11784a465f09fd09a34b150ea4672e85fb3d05931d89f373ab14abb2", size = 6084060, upload-time = "2025-07-01T09:14:49.636Z" }, + { url = "https://files.pythonhosted.org/packages/d5/ea/502d938cbaeec836ac28a9b730193716f0114c41325db428e6b280513f09/pillow-11.3.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:45dfc51ac5975b938e9809451c51734124e73b04d0f0ac621649821a63852e7b", size = 6721407, upload-time = "2025-07-01T09:14:51.962Z" }, + { url = "https://files.pythonhosted.org/packages/45/9c/9c5e2a73f125f6cbc59cc7087c8f2d649a7ae453f83bd0362ff7c9e2aee2/pillow-11.3.0-cp313-cp313-win32.whl", hash = "sha256:a4d336baed65d50d37b88ca5b60c0fa9d81e3a87d4a7930d3880d1624d5b31f3", size = 6273841, upload-time = "2025-07-01T09:14:54.142Z" }, + { url = "https://files.pythonhosted.org/packages/23/85/397c73524e0cd212067e0c969aa245b01d50183439550d24d9f55781b776/pillow-11.3.0-cp313-cp313-win_amd64.whl", hash = "sha256:0bce5c4fd0921f99d2e858dc4d4d64193407e1b99478bc5cacecba2311abde51", size = 6978450, upload-time = "2025-07-01T09:14:56.436Z" }, + { url = "https://files.pythonhosted.org/packages/17/d2/622f4547f69cd173955194b78e4d19ca4935a1b0f03a302d655c9f6aae65/pillow-11.3.0-cp313-cp313-win_arm64.whl", hash = "sha256:1904e1264881f682f02b7f8167935cce37bc97db457f8e7849dc3a6a52b99580", size = 2423055, upload-time = "2025-07-01T09:14:58.072Z" }, + { url = "https://files.pythonhosted.org/packages/dd/80/a8a2ac21dda2e82480852978416cfacd439a4b490a501a288ecf4fe2532d/pillow-11.3.0-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:4c834a3921375c48ee6b9624061076bc0a32a60b5532b322cc0ea64e639dd50e", size = 5281110, upload-time = "2025-07-01T09:14:59.79Z" }, + { url = "https://files.pythonhosted.org/packages/44/d6/b79754ca790f315918732e18f82a8146d33bcd7f4494380457ea89eb883d/pillow-11.3.0-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:5e05688ccef30ea69b9317a9ead994b93975104a677a36a8ed8106be9260aa6d", size = 4689547, upload-time = "2025-07-01T09:15:01.648Z" }, + { url = "https://files.pythonhosted.org/packages/49/20/716b8717d331150cb00f7fdd78169c01e8e0c219732a78b0e59b6bdb2fd6/pillow-11.3.0-cp313-cp313t-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:1019b04af07fc0163e2810167918cb5add8d74674b6267616021ab558dc98ced", size = 5901554, upload-time = "2025-07-03T13:10:27.018Z" }, + { url = "https://files.pythonhosted.org/packages/74/cf/a9f3a2514a65bb071075063a96f0a5cf949c2f2fce683c15ccc83b1c1cab/pillow-11.3.0-cp313-cp313t-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:f944255db153ebb2b19c51fe85dd99ef0ce494123f21b9db4877ffdfc5590c7c", size = 7669132, upload-time = "2025-07-03T13:10:33.01Z" }, + { url = "https://files.pythonhosted.org/packages/98/3c/da78805cbdbee9cb43efe8261dd7cc0b4b93f2ac79b676c03159e9db2187/pillow-11.3.0-cp313-cp313t-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:1f85acb69adf2aaee8b7da124efebbdb959a104db34d3a2cb0f3793dbae422a8", size = 6005001, upload-time = "2025-07-01T09:15:03.365Z" }, + { url = "https://files.pythonhosted.org/packages/6c/fa/ce044b91faecf30e635321351bba32bab5a7e034c60187fe9698191aef4f/pillow-11.3.0-cp313-cp313t-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:05f6ecbeff5005399bb48d198f098a9b4b6bdf27b8487c7f38ca16eeb070cd59", size = 6668814, upload-time = "2025-07-01T09:15:05.655Z" }, + { url = "https://files.pythonhosted.org/packages/7b/51/90f9291406d09bf93686434f9183aba27b831c10c87746ff49f127ee80cb/pillow-11.3.0-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:a7bc6e6fd0395bc052f16b1a8670859964dbd7003bd0af2ff08342eb6e442cfe", size = 6113124, upload-time = "2025-07-01T09:15:07.358Z" }, + { url = "https://files.pythonhosted.org/packages/cd/5a/6fec59b1dfb619234f7636d4157d11fb4e196caeee220232a8d2ec48488d/pillow-11.3.0-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:83e1b0161c9d148125083a35c1c5a89db5b7054834fd4387499e06552035236c", size = 6747186, upload-time = "2025-07-01T09:15:09.317Z" }, + { url = "https://files.pythonhosted.org/packages/49/6b/00187a044f98255225f172de653941e61da37104a9ea60e4f6887717e2b5/pillow-11.3.0-cp313-cp313t-win32.whl", hash = "sha256:2a3117c06b8fb646639dce83694f2f9eac405472713fcb1ae887469c0d4f6788", size = 6277546, upload-time = "2025-07-01T09:15:11.311Z" }, + { url = "https://files.pythonhosted.org/packages/e8/5c/6caaba7e261c0d75bab23be79f1d06b5ad2a2ae49f028ccec801b0e853d6/pillow-11.3.0-cp313-cp313t-win_amd64.whl", hash = "sha256:857844335c95bea93fb39e0fa2726b4d9d758850b34075a7e3ff4f4fa3aa3b31", size = 6985102, upload-time = "2025-07-01T09:15:13.164Z" }, + { url = "https://files.pythonhosted.org/packages/f3/7e/b623008460c09a0cb38263c93b828c666493caee2eb34ff67f778b87e58c/pillow-11.3.0-cp313-cp313t-win_arm64.whl", hash = "sha256:8797edc41f3e8536ae4b10897ee2f637235c94f27404cac7297f7b607dd0716e", size = 2424803, upload-time = "2025-07-01T09:15:15.695Z" }, + { url = "https://files.pythonhosted.org/packages/73/f4/04905af42837292ed86cb1b1dabe03dce1edc008ef14c473c5c7e1443c5d/pillow-11.3.0-cp314-cp314-macosx_10_13_x86_64.whl", hash = "sha256:d9da3df5f9ea2a89b81bb6087177fb1f4d1c7146d583a3fe5c672c0d94e55e12", size = 5278520, upload-time = "2025-07-01T09:15:17.429Z" }, + { url = "https://files.pythonhosted.org/packages/41/b0/33d79e377a336247df6348a54e6d2a2b85d644ca202555e3faa0cf811ecc/pillow-11.3.0-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:0b275ff9b04df7b640c59ec5a3cb113eefd3795a8df80bac69646ef699c6981a", size = 4686116, upload-time = "2025-07-01T09:15:19.423Z" }, + { url = "https://files.pythonhosted.org/packages/49/2d/ed8bc0ab219ae8768f529597d9509d184fe8a6c4741a6864fea334d25f3f/pillow-11.3.0-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:0743841cabd3dba6a83f38a92672cccbd69af56e3e91777b0ee7f4dba4385632", size = 5864597, upload-time = "2025-07-03T13:10:38.404Z" }, + { url = "https://files.pythonhosted.org/packages/b5/3d/b932bb4225c80b58dfadaca9d42d08d0b7064d2d1791b6a237f87f661834/pillow-11.3.0-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:2465a69cf967b8b49ee1b96d76718cd98c4e925414ead59fdf75cf0fd07df673", size = 7638246, upload-time = "2025-07-03T13:10:44.987Z" }, + { url = "https://files.pythonhosted.org/packages/09/b5/0487044b7c096f1b48f0d7ad416472c02e0e4bf6919541b111efd3cae690/pillow-11.3.0-cp314-cp314-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:41742638139424703b4d01665b807c6468e23e699e8e90cffefe291c5832b027", size = 5973336, upload-time = "2025-07-01T09:15:21.237Z" }, + { url = "https://files.pythonhosted.org/packages/a8/2d/524f9318f6cbfcc79fbc004801ea6b607ec3f843977652fdee4857a7568b/pillow-11.3.0-cp314-cp314-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:93efb0b4de7e340d99057415c749175e24c8864302369e05914682ba642e5d77", size = 6642699, upload-time = "2025-07-01T09:15:23.186Z" }, + { url = "https://files.pythonhosted.org/packages/6f/d2/a9a4f280c6aefedce1e8f615baaa5474e0701d86dd6f1dede66726462bbd/pillow-11.3.0-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:7966e38dcd0fa11ca390aed7c6f20454443581d758242023cf36fcb319b1a874", size = 6083789, upload-time = "2025-07-01T09:15:25.1Z" }, + { url = "https://files.pythonhosted.org/packages/fe/54/86b0cd9dbb683a9d5e960b66c7379e821a19be4ac5810e2e5a715c09a0c0/pillow-11.3.0-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:98a9afa7b9007c67ed84c57c9e0ad86a6000da96eaa638e4f8abe5b65ff83f0a", size = 6720386, upload-time = "2025-07-01T09:15:27.378Z" }, + { url = "https://files.pythonhosted.org/packages/e7/95/88efcaf384c3588e24259c4203b909cbe3e3c2d887af9e938c2022c9dd48/pillow-11.3.0-cp314-cp314-win32.whl", hash = "sha256:02a723e6bf909e7cea0dac1b0e0310be9d7650cd66222a5f1c571455c0a45214", size = 6370911, upload-time = "2025-07-01T09:15:29.294Z" }, + { url = "https://files.pythonhosted.org/packages/2e/cc/934e5820850ec5eb107e7b1a72dd278140731c669f396110ebc326f2a503/pillow-11.3.0-cp314-cp314-win_amd64.whl", hash = "sha256:a418486160228f64dd9e9efcd132679b7a02a5f22c982c78b6fc7dab3fefb635", size = 7117383, upload-time = "2025-07-01T09:15:31.128Z" }, + { url = "https://files.pythonhosted.org/packages/d6/e9/9c0a616a71da2a5d163aa37405e8aced9a906d574b4a214bede134e731bc/pillow-11.3.0-cp314-cp314-win_arm64.whl", hash = "sha256:155658efb5e044669c08896c0c44231c5e9abcaadbc5cd3648df2f7c0b96b9a6", size = 2511385, upload-time = "2025-07-01T09:15:33.328Z" }, + { url = "https://files.pythonhosted.org/packages/1a/33/c88376898aff369658b225262cd4f2659b13e8178e7534df9e6e1fa289f6/pillow-11.3.0-cp314-cp314t-macosx_10_13_x86_64.whl", hash = "sha256:59a03cdf019efbfeeed910bf79c7c93255c3d54bc45898ac2a4140071b02b4ae", size = 5281129, upload-time = "2025-07-01T09:15:35.194Z" }, + { url = "https://files.pythonhosted.org/packages/1f/70/d376247fb36f1844b42910911c83a02d5544ebd2a8bad9efcc0f707ea774/pillow-11.3.0-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:f8a5827f84d973d8636e9dc5764af4f0cf2318d26744b3d902931701b0d46653", size = 4689580, upload-time = "2025-07-01T09:15:37.114Z" }, + { url = "https://files.pythonhosted.org/packages/eb/1c/537e930496149fbac69efd2fc4329035bbe2e5475b4165439e3be9cb183b/pillow-11.3.0-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:ee92f2fd10f4adc4b43d07ec5e779932b4eb3dbfbc34790ada5a6669bc095aa6", size = 5902860, upload-time = "2025-07-03T13:10:50.248Z" }, + { url = "https://files.pythonhosted.org/packages/bd/57/80f53264954dcefeebcf9dae6e3eb1daea1b488f0be8b8fef12f79a3eb10/pillow-11.3.0-cp314-cp314t-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:c96d333dcf42d01f47b37e0979b6bd73ec91eae18614864622d9b87bbd5bbf36", size = 7670694, upload-time = "2025-07-03T13:10:56.432Z" }, + { url = "https://files.pythonhosted.org/packages/70/ff/4727d3b71a8578b4587d9c276e90efad2d6fe0335fd76742a6da08132e8c/pillow-11.3.0-cp314-cp314t-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:4c96f993ab8c98460cd0c001447bff6194403e8b1d7e149ade5f00594918128b", size = 6005888, upload-time = "2025-07-01T09:15:39.436Z" }, + { url = "https://files.pythonhosted.org/packages/05/ae/716592277934f85d3be51d7256f3636672d7b1abfafdc42cf3f8cbd4b4c8/pillow-11.3.0-cp314-cp314t-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:41342b64afeba938edb034d122b2dda5db2139b9a4af999729ba8818e0056477", size = 6670330, upload-time = "2025-07-01T09:15:41.269Z" }, + { url = "https://files.pythonhosted.org/packages/e7/bb/7fe6cddcc8827b01b1a9766f5fdeb7418680744f9082035bdbabecf1d57f/pillow-11.3.0-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:068d9c39a2d1b358eb9f245ce7ab1b5c3246c7c8c7d9ba58cfa5b43146c06e50", size = 6114089, upload-time = "2025-07-01T09:15:43.13Z" }, + { url = "https://files.pythonhosted.org/packages/8b/f5/06bfaa444c8e80f1a8e4bff98da9c83b37b5be3b1deaa43d27a0db37ef84/pillow-11.3.0-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:a1bc6ba083b145187f648b667e05a2534ecc4b9f2784c2cbe3089e44868f2b9b", size = 6748206, upload-time = "2025-07-01T09:15:44.937Z" }, + { url = "https://files.pythonhosted.org/packages/f0/77/bc6f92a3e8e6e46c0ca78abfffec0037845800ea38c73483760362804c41/pillow-11.3.0-cp314-cp314t-win32.whl", hash = "sha256:118ca10c0d60b06d006be10a501fd6bbdfef559251ed31b794668ed569c87e12", size = 6377370, upload-time = "2025-07-01T09:15:46.673Z" }, + { url = "https://files.pythonhosted.org/packages/4a/82/3a721f7d69dca802befb8af08b7c79ebcab461007ce1c18bd91a5d5896f9/pillow-11.3.0-cp314-cp314t-win_amd64.whl", hash = "sha256:8924748b688aa210d79883357d102cd64690e56b923a186f35a82cbc10f997db", size = 7121500, upload-time = "2025-07-01T09:15:48.512Z" }, + { url = "https://files.pythonhosted.org/packages/89/c7/5572fa4a3f45740eaab6ae86fcdf7195b55beac1371ac8c619d880cfe948/pillow-11.3.0-cp314-cp314t-win_arm64.whl", hash = "sha256:79ea0d14d3ebad43ec77ad5272e6ff9bba5b679ef73375ea760261207fa8e0aa", size = 2512835, upload-time = "2025-07-01T09:15:50.399Z" }, +] + +[[package]] +name = "platformdirs" +version = "4.3.6" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/13/fc/128cc9cb8f03208bdbf93d3aa862e16d376844a14f9a0ce5cf4507372de4/platformdirs-4.3.6.tar.gz", hash = "sha256:357fb2acbc885b0419afd3ce3ed34564c13c9b95c89360cd9563f73aa5e2b907", size = 21302, upload-time = "2024-09-17T19:06:50.688Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/3c/a6/bc1012356d8ece4d66dd75c4b9fc6c1f6650ddd5991e421177d9f8f671be/platformdirs-4.3.6-py3-none-any.whl", hash = "sha256:73e575e1408ab8103900836b97580d5307456908a03e92031bab39e4554cc3fb", size = 18439, upload-time = "2024-09-17T19:06:49.212Z" }, +] + +[[package]] +name = "pluggy" +version = "1.5.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/96/2d/02d4312c973c6050a18b314a5ad0b3210edb65a906f868e31c111dede4a6/pluggy-1.5.0.tar.gz", hash = "sha256:2cffa88e94fdc978c4c574f15f9e59b7f4201d439195c3715ca9e2486f1d0cf1", size = 67955, upload-time = "2024-04-20T21:34:42.531Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/88/5f/e351af9a41f866ac3f1fac4ca0613908d9a41741cfcf2228f4ad853b697d/pluggy-1.5.0-py3-none-any.whl", hash = "sha256:44e1ad92c8ca002de6377e165f3e0f1be63266ab4d554740532335b9d75ea669", size = 20556, upload-time = "2024-04-20T21:34:40.434Z" }, +] + +[[package]] +name = "postgrest" +version = "1.1.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "deprecation" }, + { name = "httpx", extra = ["http2"] }, + { name = "pydantic" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/6e/3e/1b50568e1f5db0bdced4a82c7887e37326585faef7ca43ead86849cb4861/postgrest-1.1.1.tar.gz", hash = "sha256:f3bb3e8c4602775c75c844a31f565f5f3dd584df4d36d683f0b67d01a86be322", size = 15431, upload-time = "2025-06-23T19:21:34.742Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/a4/71/188a50ea64c17f73ff4df5196ec1553a8f1723421eb2d1069c73bab47d78/postgrest-1.1.1-py3-none-any.whl", hash = "sha256:98a6035ee1d14288484bfe36235942c5fb2d26af6d8120dfe3efbe007859251a", size = 22366, upload-time = "2025-06-23T19:21:33.637Z" }, +] + +[[package]] +name = "propcache" +version = "0.3.2" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/a6/16/43264e4a779dd8588c21a70f0709665ee8f611211bdd2c87d952cfa7c776/propcache-0.3.2.tar.gz", hash = "sha256:20d7d62e4e7ef05f221e0db2856b979540686342e7dd9973b815599c7057e168", size = 44139, upload-time = "2025-06-09T22:56:06.081Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/a8/42/9ca01b0a6f48e81615dca4765a8f1dd2c057e0540f6116a27dc5ee01dfb6/propcache-0.3.2-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:8de106b6c84506b31c27168582cd3cb3000a6412c16df14a8628e5871ff83c10", size = 73674, upload-time = "2025-06-09T22:54:30.551Z" }, + { url = "https://files.pythonhosted.org/packages/af/6e/21293133beb550f9c901bbece755d582bfaf2176bee4774000bd4dd41884/propcache-0.3.2-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:28710b0d3975117239c76600ea351934ac7b5ff56e60953474342608dbbb6154", size = 43570, upload-time = "2025-06-09T22:54:32.296Z" }, + { url = "https://files.pythonhosted.org/packages/0c/c8/0393a0a3a2b8760eb3bde3c147f62b20044f0ddac81e9d6ed7318ec0d852/propcache-0.3.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:ce26862344bdf836650ed2487c3d724b00fbfec4233a1013f597b78c1cb73615", size = 43094, upload-time = "2025-06-09T22:54:33.929Z" }, + { url = "https://files.pythonhosted.org/packages/37/2c/489afe311a690399d04a3e03b069225670c1d489eb7b044a566511c1c498/propcache-0.3.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bca54bd347a253af2cf4544bbec232ab982f4868de0dd684246b67a51bc6b1db", size = 226958, upload-time = "2025-06-09T22:54:35.186Z" }, + { url = "https://files.pythonhosted.org/packages/9d/ca/63b520d2f3d418c968bf596839ae26cf7f87bead026b6192d4da6a08c467/propcache-0.3.2-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:55780d5e9a2ddc59711d727226bb1ba83a22dd32f64ee15594b9392b1f544eb1", size = 234894, upload-time = "2025-06-09T22:54:36.708Z" }, + { url = "https://files.pythonhosted.org/packages/11/60/1d0ed6fff455a028d678df30cc28dcee7af77fa2b0e6962ce1df95c9a2a9/propcache-0.3.2-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:035e631be25d6975ed87ab23153db6a73426a48db688070d925aa27e996fe93c", size = 233672, upload-time = "2025-06-09T22:54:38.062Z" }, + { url = "https://files.pythonhosted.org/packages/37/7c/54fd5301ef38505ab235d98827207176a5c9b2aa61939b10a460ca53e123/propcache-0.3.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ee6f22b6eaa39297c751d0e80c0d3a454f112f5c6481214fcf4c092074cecd67", size = 224395, upload-time = "2025-06-09T22:54:39.634Z" }, + { url = "https://files.pythonhosted.org/packages/ee/1a/89a40e0846f5de05fdc6779883bf46ba980e6df4d2ff8fb02643de126592/propcache-0.3.2-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7ca3aee1aa955438c4dba34fc20a9f390e4c79967257d830f137bd5a8a32ed3b", size = 212510, upload-time = "2025-06-09T22:54:41.565Z" }, + { url = "https://files.pythonhosted.org/packages/5e/33/ca98368586c9566a6b8d5ef66e30484f8da84c0aac3f2d9aec6d31a11bd5/propcache-0.3.2-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:7a4f30862869fa2b68380d677cc1c5fcf1e0f2b9ea0cf665812895c75d0ca3b8", size = 222949, upload-time = "2025-06-09T22:54:43.038Z" }, + { url = "https://files.pythonhosted.org/packages/ba/11/ace870d0aafe443b33b2f0b7efdb872b7c3abd505bfb4890716ad7865e9d/propcache-0.3.2-cp312-cp312-musllinux_1_2_armv7l.whl", hash = "sha256:b77ec3c257d7816d9f3700013639db7491a434644c906a2578a11daf13176251", size = 217258, upload-time = "2025-06-09T22:54:44.376Z" }, + { url = "https://files.pythonhosted.org/packages/5b/d2/86fd6f7adffcfc74b42c10a6b7db721d1d9ca1055c45d39a1a8f2a740a21/propcache-0.3.2-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:cab90ac9d3f14b2d5050928483d3d3b8fb6b4018893fc75710e6aa361ecb2474", size = 213036, upload-time = "2025-06-09T22:54:46.243Z" }, + { url = "https://files.pythonhosted.org/packages/07/94/2d7d1e328f45ff34a0a284cf5a2847013701e24c2a53117e7c280a4316b3/propcache-0.3.2-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:0b504d29f3c47cf6b9e936c1852246c83d450e8e063d50562115a6be6d3a2535", size = 227684, upload-time = "2025-06-09T22:54:47.63Z" }, + { url = "https://files.pythonhosted.org/packages/b7/05/37ae63a0087677e90b1d14710e532ff104d44bc1efa3b3970fff99b891dc/propcache-0.3.2-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:ce2ac2675a6aa41ddb2a0c9cbff53780a617ac3d43e620f8fd77ba1c84dcfc06", size = 234562, upload-time = "2025-06-09T22:54:48.982Z" }, + { url = "https://files.pythonhosted.org/packages/a4/7c/3f539fcae630408d0bd8bf3208b9a647ccad10976eda62402a80adf8fc34/propcache-0.3.2-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:62b4239611205294cc433845b914131b2a1f03500ff3c1ed093ed216b82621e1", size = 222142, upload-time = "2025-06-09T22:54:50.424Z" }, + { url = "https://files.pythonhosted.org/packages/7c/d2/34b9eac8c35f79f8a962546b3e97e9d4b990c420ee66ac8255d5d9611648/propcache-0.3.2-cp312-cp312-win32.whl", hash = "sha256:df4a81b9b53449ebc90cc4deefb052c1dd934ba85012aa912c7ea7b7e38b60c1", size = 37711, upload-time = "2025-06-09T22:54:52.072Z" }, + { url = "https://files.pythonhosted.org/packages/19/61/d582be5d226cf79071681d1b46b848d6cb03d7b70af7063e33a2787eaa03/propcache-0.3.2-cp312-cp312-win_amd64.whl", hash = "sha256:7046e79b989d7fe457bb755844019e10f693752d169076138abf17f31380800c", size = 41479, upload-time = "2025-06-09T22:54:53.234Z" }, + { url = "https://files.pythonhosted.org/packages/dc/d1/8c747fafa558c603c4ca19d8e20b288aa0c7cda74e9402f50f31eb65267e/propcache-0.3.2-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:ca592ed634a73ca002967458187109265e980422116c0a107cf93d81f95af945", size = 71286, upload-time = "2025-06-09T22:54:54.369Z" }, + { url = "https://files.pythonhosted.org/packages/61/99/d606cb7986b60d89c36de8a85d58764323b3a5ff07770a99d8e993b3fa73/propcache-0.3.2-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:9ecb0aad4020e275652ba3975740f241bd12a61f1a784df044cf7477a02bc252", size = 42425, upload-time = "2025-06-09T22:54:55.642Z" }, + { url = "https://files.pythonhosted.org/packages/8c/96/ef98f91bbb42b79e9bb82bdd348b255eb9d65f14dbbe3b1594644c4073f7/propcache-0.3.2-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:7f08f1cc28bd2eade7a8a3d2954ccc673bb02062e3e7da09bc75d843386b342f", size = 41846, upload-time = "2025-06-09T22:54:57.246Z" }, + { url = "https://files.pythonhosted.org/packages/5b/ad/3f0f9a705fb630d175146cd7b1d2bf5555c9beaed54e94132b21aac098a6/propcache-0.3.2-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d1a342c834734edb4be5ecb1e9fb48cb64b1e2320fccbd8c54bf8da8f2a84c33", size = 208871, upload-time = "2025-06-09T22:54:58.975Z" }, + { url = "https://files.pythonhosted.org/packages/3a/38/2085cda93d2c8b6ec3e92af2c89489a36a5886b712a34ab25de9fbca7992/propcache-0.3.2-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:8a544caaae1ac73f1fecfae70ded3e93728831affebd017d53449e3ac052ac1e", size = 215720, upload-time = "2025-06-09T22:55:00.471Z" }, + { url = "https://files.pythonhosted.org/packages/61/c1/d72ea2dc83ac7f2c8e182786ab0fc2c7bd123a1ff9b7975bee671866fe5f/propcache-0.3.2-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:310d11aa44635298397db47a3ebce7db99a4cc4b9bbdfcf6c98a60c8d5261cf1", size = 215203, upload-time = "2025-06-09T22:55:01.834Z" }, + { url = "https://files.pythonhosted.org/packages/af/81/b324c44ae60c56ef12007105f1460d5c304b0626ab0cc6b07c8f2a9aa0b8/propcache-0.3.2-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4c1396592321ac83157ac03a2023aa6cc4a3cc3cfdecb71090054c09e5a7cce3", size = 206365, upload-time = "2025-06-09T22:55:03.199Z" }, + { url = "https://files.pythonhosted.org/packages/09/73/88549128bb89e66d2aff242488f62869014ae092db63ccea53c1cc75a81d/propcache-0.3.2-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:8cabf5b5902272565e78197edb682017d21cf3b550ba0460ee473753f28d23c1", size = 196016, upload-time = "2025-06-09T22:55:04.518Z" }, + { url = "https://files.pythonhosted.org/packages/b9/3f/3bdd14e737d145114a5eb83cb172903afba7242f67c5877f9909a20d948d/propcache-0.3.2-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:0a2f2235ac46a7aa25bdeb03a9e7060f6ecbd213b1f9101c43b3090ffb971ef6", size = 205596, upload-time = "2025-06-09T22:55:05.942Z" }, + { url = "https://files.pythonhosted.org/packages/0f/ca/2f4aa819c357d3107c3763d7ef42c03980f9ed5c48c82e01e25945d437c1/propcache-0.3.2-cp313-cp313-musllinux_1_2_armv7l.whl", hash = "sha256:92b69e12e34869a6970fd2f3da91669899994b47c98f5d430b781c26f1d9f387", size = 200977, upload-time = "2025-06-09T22:55:07.792Z" }, + { url = "https://files.pythonhosted.org/packages/cd/4a/e65276c7477533c59085251ae88505caf6831c0e85ff8b2e31ebcbb949b1/propcache-0.3.2-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:54e02207c79968ebbdffc169591009f4474dde3b4679e16634d34c9363ff56b4", size = 197220, upload-time = "2025-06-09T22:55:09.173Z" }, + { url = "https://files.pythonhosted.org/packages/7c/54/fc7152e517cf5578278b242396ce4d4b36795423988ef39bb8cd5bf274c8/propcache-0.3.2-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:4adfb44cb588001f68c5466579d3f1157ca07f7504fc91ec87862e2b8e556b88", size = 210642, upload-time = "2025-06-09T22:55:10.62Z" }, + { url = "https://files.pythonhosted.org/packages/b9/80/abeb4a896d2767bf5f1ea7b92eb7be6a5330645bd7fb844049c0e4045d9d/propcache-0.3.2-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:fd3e6019dc1261cd0291ee8919dd91fbab7b169bb76aeef6c716833a3f65d206", size = 212789, upload-time = "2025-06-09T22:55:12.029Z" }, + { url = "https://files.pythonhosted.org/packages/b3/db/ea12a49aa7b2b6d68a5da8293dcf50068d48d088100ac016ad92a6a780e6/propcache-0.3.2-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:4c181cad81158d71c41a2bce88edce078458e2dd5ffee7eddd6b05da85079f43", size = 205880, upload-time = "2025-06-09T22:55:13.45Z" }, + { url = "https://files.pythonhosted.org/packages/d1/e5/9076a0bbbfb65d1198007059c65639dfd56266cf8e477a9707e4b1999ff4/propcache-0.3.2-cp313-cp313-win32.whl", hash = "sha256:8a08154613f2249519e549de2330cf8e2071c2887309a7b07fb56098f5170a02", size = 37220, upload-time = "2025-06-09T22:55:15.284Z" }, + { url = "https://files.pythonhosted.org/packages/d3/f5/b369e026b09a26cd77aa88d8fffd69141d2ae00a2abaaf5380d2603f4b7f/propcache-0.3.2-cp313-cp313-win_amd64.whl", hash = "sha256:e41671f1594fc4ab0a6dec1351864713cb3a279910ae8b58f884a88a0a632c05", size = 40678, upload-time = "2025-06-09T22:55:16.445Z" }, + { url = "https://files.pythonhosted.org/packages/a4/3a/6ece377b55544941a08d03581c7bc400a3c8cd3c2865900a68d5de79e21f/propcache-0.3.2-cp313-cp313t-macosx_10_13_universal2.whl", hash = "sha256:9a3cf035bbaf035f109987d9d55dc90e4b0e36e04bbbb95af3055ef17194057b", size = 76560, upload-time = "2025-06-09T22:55:17.598Z" }, + { url = "https://files.pythonhosted.org/packages/0c/da/64a2bb16418740fa634b0e9c3d29edff1db07f56d3546ca2d86ddf0305e1/propcache-0.3.2-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:156c03d07dc1323d8dacaa221fbe028c5c70d16709cdd63502778e6c3ccca1b0", size = 44676, upload-time = "2025-06-09T22:55:18.922Z" }, + { url = "https://files.pythonhosted.org/packages/36/7b/f025e06ea51cb72c52fb87e9b395cced02786610b60a3ed51da8af017170/propcache-0.3.2-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:74413c0ba02ba86f55cf60d18daab219f7e531620c15f1e23d95563f505efe7e", size = 44701, upload-time = "2025-06-09T22:55:20.106Z" }, + { url = "https://files.pythonhosted.org/packages/a4/00/faa1b1b7c3b74fc277f8642f32a4c72ba1d7b2de36d7cdfb676db7f4303e/propcache-0.3.2-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f066b437bb3fa39c58ff97ab2ca351db465157d68ed0440abecb21715eb24b28", size = 276934, upload-time = "2025-06-09T22:55:21.5Z" }, + { url = "https://files.pythonhosted.org/packages/74/ab/935beb6f1756e0476a4d5938ff44bf0d13a055fed880caf93859b4f1baf4/propcache-0.3.2-cp313-cp313t-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f1304b085c83067914721e7e9d9917d41ad87696bf70f0bc7dee450e9c71ad0a", size = 278316, upload-time = "2025-06-09T22:55:22.918Z" }, + { url = "https://files.pythonhosted.org/packages/f8/9d/994a5c1ce4389610838d1caec74bdf0e98b306c70314d46dbe4fcf21a3e2/propcache-0.3.2-cp313-cp313t-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:ab50cef01b372763a13333b4e54021bdcb291fc9a8e2ccb9c2df98be51bcde6c", size = 282619, upload-time = "2025-06-09T22:55:24.651Z" }, + { url = "https://files.pythonhosted.org/packages/2b/00/a10afce3d1ed0287cef2e09506d3be9822513f2c1e96457ee369adb9a6cd/propcache-0.3.2-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fad3b2a085ec259ad2c2842666b2a0a49dea8463579c606426128925af1ed725", size = 265896, upload-time = "2025-06-09T22:55:26.049Z" }, + { url = "https://files.pythonhosted.org/packages/2e/a8/2aa6716ffa566ca57c749edb909ad27884680887d68517e4be41b02299f3/propcache-0.3.2-cp313-cp313t-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:261fa020c1c14deafd54c76b014956e2f86991af198c51139faf41c4d5e83892", size = 252111, upload-time = "2025-06-09T22:55:27.381Z" }, + { url = "https://files.pythonhosted.org/packages/36/4f/345ca9183b85ac29c8694b0941f7484bf419c7f0fea2d1e386b4f7893eed/propcache-0.3.2-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:46d7f8aa79c927e5f987ee3a80205c987717d3659f035c85cf0c3680526bdb44", size = 268334, upload-time = "2025-06-09T22:55:28.747Z" }, + { url = "https://files.pythonhosted.org/packages/3e/ca/fcd54f78b59e3f97b3b9715501e3147f5340167733d27db423aa321e7148/propcache-0.3.2-cp313-cp313t-musllinux_1_2_armv7l.whl", hash = "sha256:6d8f3f0eebf73e3c0ff0e7853f68be638b4043c65a70517bb575eff54edd8dbe", size = 255026, upload-time = "2025-06-09T22:55:30.184Z" }, + { url = "https://files.pythonhosted.org/packages/8b/95/8e6a6bbbd78ac89c30c225210a5c687790e532ba4088afb8c0445b77ef37/propcache-0.3.2-cp313-cp313t-musllinux_1_2_i686.whl", hash = "sha256:03c89c1b14a5452cf15403e291c0ccd7751d5b9736ecb2c5bab977ad6c5bcd81", size = 250724, upload-time = "2025-06-09T22:55:31.646Z" }, + { url = "https://files.pythonhosted.org/packages/ee/b0/0dd03616142baba28e8b2d14ce5df6631b4673850a3d4f9c0f9dd714a404/propcache-0.3.2-cp313-cp313t-musllinux_1_2_ppc64le.whl", hash = "sha256:0cc17efde71e12bbaad086d679ce575268d70bc123a5a71ea7ad76f70ba30bba", size = 268868, upload-time = "2025-06-09T22:55:33.209Z" }, + { url = "https://files.pythonhosted.org/packages/c5/98/2c12407a7e4fbacd94ddd32f3b1e3d5231e77c30ef7162b12a60e2dd5ce3/propcache-0.3.2-cp313-cp313t-musllinux_1_2_s390x.whl", hash = "sha256:acdf05d00696bc0447e278bb53cb04ca72354e562cf88ea6f9107df8e7fd9770", size = 271322, upload-time = "2025-06-09T22:55:35.065Z" }, + { url = "https://files.pythonhosted.org/packages/35/91/9cb56efbb428b006bb85db28591e40b7736847b8331d43fe335acf95f6c8/propcache-0.3.2-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:4445542398bd0b5d32df908031cb1b30d43ac848e20470a878b770ec2dcc6330", size = 265778, upload-time = "2025-06-09T22:55:36.45Z" }, + { url = "https://files.pythonhosted.org/packages/9a/4c/b0fe775a2bdd01e176b14b574be679d84fc83958335790f7c9a686c1f468/propcache-0.3.2-cp313-cp313t-win32.whl", hash = "sha256:f86e5d7cd03afb3a1db8e9f9f6eff15794e79e791350ac48a8c924e6f439f394", size = 41175, upload-time = "2025-06-09T22:55:38.436Z" }, + { url = "https://files.pythonhosted.org/packages/a4/ff/47f08595e3d9b5e149c150f88d9714574f1a7cbd89fe2817158a952674bf/propcache-0.3.2-cp313-cp313t-win_amd64.whl", hash = "sha256:9704bedf6e7cbe3c65eca4379a9b53ee6a83749f047808cbb5044d40d7d72198", size = 44857, upload-time = "2025-06-09T22:55:39.687Z" }, + { url = "https://files.pythonhosted.org/packages/cc/35/cc0aaecf278bb4575b8555f2b137de5ab821595ddae9da9d3cd1da4072c7/propcache-0.3.2-py3-none-any.whl", hash = "sha256:98f1ec44fb675f5052cccc8e609c46ed23a35a1cfd18545ad4e29002d858a43f", size = 12663, upload-time = "2025-06-09T22:56:04.484Z" }, +] + +[[package]] +name = "psycopg" +version = "3.2.9" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "typing-extensions", marker = "python_full_version < '3.13'" }, + { name = "tzdata", marker = "sys_platform == 'win32'" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/27/4a/93a6ab570a8d1a4ad171a1f4256e205ce48d828781312c0bbaff36380ecb/psycopg-3.2.9.tar.gz", hash = "sha256:2fbb46fcd17bc81f993f28c47f1ebea38d66ae97cc2dbc3cad73b37cefbff700", size = 158122, upload-time = "2025-05-13T16:11:15.533Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/44/b0/a73c195a56eb6b92e937a5ca58521a5c3346fb233345adc80fd3e2f542e2/psycopg-3.2.9-py3-none-any.whl", hash = "sha256:01a8dadccdaac2123c916208c96e06631641c0566b22005493f09663c7a8d3b6", size = 202705, upload-time = "2025-05-13T16:06:26.584Z" }, +] + +[[package]] +name = "psycopg-pool" +version = "3.2.6" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "typing-extensions" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/cf/13/1e7850bb2c69a63267c3dbf37387d3f71a00fd0e2fa55c5db14d64ba1af4/psycopg_pool-3.2.6.tar.gz", hash = "sha256:0f92a7817719517212fbfe2fd58b8c35c1850cdd2a80d36b581ba2085d9148e5", size = 29770, upload-time = "2025-02-26T12:03:47.129Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/47/fd/4feb52a55c1a4bd748f2acaed1903ab54a723c47f6d0242780f4d97104d4/psycopg_pool-3.2.6-py3-none-any.whl", hash = "sha256:5887318a9f6af906d041a0b1dc1c60f8f0dda8340c2572b74e10907b51ed5da7", size = 38252, upload-time = "2025-02-26T12:03:45.073Z" }, +] + +[[package]] +name = "pycparser" +version = "2.22" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/1d/b2/31537cf4b1ca988837256c910a668b553fceb8f069bedc4b1c826024b52c/pycparser-2.22.tar.gz", hash = "sha256:491c8be9c040f5390f5bf44a5b07752bd07f56edf992381b05c701439eec10f6", size = 172736, upload-time = "2024-03-30T13:22:22.564Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/13/a3/a812df4e2dd5696d1f351d58b8fe16a405b234ad2886a0dab9183fb78109/pycparser-2.22-py3-none-any.whl", hash = "sha256:c3702b6d3dd8c7abc1afa565d7e63d53a1d0bd86cdc24edd75470f4de499cfcc", size = 117552, upload-time = "2024-03-30T13:22:20.476Z" }, +] + +[[package]] +name = "pycryptodome" +version = "3.23.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/8e/a6/8452177684d5e906854776276ddd34eca30d1b1e15aa1ee9cefc289a33f5/pycryptodome-3.23.0.tar.gz", hash = "sha256:447700a657182d60338bab09fdb27518f8856aecd80ae4c6bdddb67ff5da44ef", size = 4921276, upload-time = "2025-05-17T17:21:45.242Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/04/5d/bdb09489b63cd34a976cc9e2a8d938114f7a53a74d3dd4f125ffa49dce82/pycryptodome-3.23.0-cp313-cp313t-macosx_10_13_universal2.whl", hash = "sha256:0011f7f00cdb74879142011f95133274741778abba114ceca229adbf8e62c3e4", size = 2495152, upload-time = "2025-05-17T17:20:20.833Z" }, + { url = "https://files.pythonhosted.org/packages/a7/ce/7840250ed4cc0039c433cd41715536f926d6e86ce84e904068eb3244b6a6/pycryptodome-3.23.0-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:90460fc9e088ce095f9ee8356722d4f10f86e5be06e2354230a9880b9c549aae", size = 1639348, upload-time = "2025-05-17T17:20:23.171Z" }, + { url = "https://files.pythonhosted.org/packages/ee/f0/991da24c55c1f688d6a3b5a11940567353f74590734ee4a64294834ae472/pycryptodome-3.23.0-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4764e64b269fc83b00f682c47443c2e6e85b18273712b98aa43bcb77f8570477", size = 2184033, upload-time = "2025-05-17T17:20:25.424Z" }, + { url = "https://files.pythonhosted.org/packages/54/16/0e11882deddf00f68b68dd4e8e442ddc30641f31afeb2bc25588124ac8de/pycryptodome-3.23.0-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:eb8f24adb74984aa0e5d07a2368ad95276cf38051fe2dc6605cbcf482e04f2a7", size = 2270142, upload-time = "2025-05-17T17:20:27.808Z" }, + { url = "https://files.pythonhosted.org/packages/d5/fc/4347fea23a3f95ffb931f383ff28b3f7b1fe868739182cb76718c0da86a1/pycryptodome-3.23.0-cp313-cp313t-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d97618c9c6684a97ef7637ba43bdf6663a2e2e77efe0f863cce97a76af396446", size = 2309384, upload-time = "2025-05-17T17:20:30.765Z" }, + { url = "https://files.pythonhosted.org/packages/6e/d9/c5261780b69ce66d8cfab25d2797bd6e82ba0241804694cd48be41add5eb/pycryptodome-3.23.0-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:9a53a4fe5cb075075d515797d6ce2f56772ea7e6a1e5e4b96cf78a14bac3d265", size = 2183237, upload-time = "2025-05-17T17:20:33.736Z" }, + { url = "https://files.pythonhosted.org/packages/5a/6f/3af2ffedd5cfa08c631f89452c6648c4d779e7772dfc388c77c920ca6bbf/pycryptodome-3.23.0-cp313-cp313t-musllinux_1_2_i686.whl", hash = "sha256:763d1d74f56f031788e5d307029caef067febf890cd1f8bf61183ae142f1a77b", size = 2343898, upload-time = "2025-05-17T17:20:36.086Z" }, + { url = "https://files.pythonhosted.org/packages/9a/dc/9060d807039ee5de6e2f260f72f3d70ac213993a804f5e67e0a73a56dd2f/pycryptodome-3.23.0-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:954af0e2bd7cea83ce72243b14e4fb518b18f0c1649b576d114973e2073b273d", size = 2269197, upload-time = "2025-05-17T17:20:38.414Z" }, + { url = "https://files.pythonhosted.org/packages/f9/34/e6c8ca177cb29dcc4967fef73f5de445912f93bd0343c9c33c8e5bf8cde8/pycryptodome-3.23.0-cp313-cp313t-win32.whl", hash = "sha256:257bb3572c63ad8ba40b89f6fc9d63a2a628e9f9708d31ee26560925ebe0210a", size = 1768600, upload-time = "2025-05-17T17:20:40.688Z" }, + { url = "https://files.pythonhosted.org/packages/e4/1d/89756b8d7ff623ad0160f4539da571d1f594d21ee6d68be130a6eccb39a4/pycryptodome-3.23.0-cp313-cp313t-win_amd64.whl", hash = "sha256:6501790c5b62a29fcb227bd6b62012181d886a767ce9ed03b303d1f22eb5c625", size = 1799740, upload-time = "2025-05-17T17:20:42.413Z" }, + { url = "https://files.pythonhosted.org/packages/5d/61/35a64f0feaea9fd07f0d91209e7be91726eb48c0f1bfc6720647194071e4/pycryptodome-3.23.0-cp313-cp313t-win_arm64.whl", hash = "sha256:9a77627a330ab23ca43b48b130e202582e91cc69619947840ea4d2d1be21eb39", size = 1703685, upload-time = "2025-05-17T17:20:44.388Z" }, + { url = "https://files.pythonhosted.org/packages/db/6c/a1f71542c969912bb0e106f64f60a56cc1f0fabecf9396f45accbe63fa68/pycryptodome-3.23.0-cp37-abi3-macosx_10_9_universal2.whl", hash = "sha256:187058ab80b3281b1de11c2e6842a357a1f71b42cb1e15bce373f3d238135c27", size = 2495627, upload-time = "2025-05-17T17:20:47.139Z" }, + { url = "https://files.pythonhosted.org/packages/6e/4e/a066527e079fc5002390c8acdd3aca431e6ea0a50ffd7201551175b47323/pycryptodome-3.23.0-cp37-abi3-macosx_10_9_x86_64.whl", hash = "sha256:cfb5cd445280c5b0a4e6187a7ce8de5a07b5f3f897f235caa11f1f435f182843", size = 1640362, upload-time = "2025-05-17T17:20:50.392Z" }, + { url = "https://files.pythonhosted.org/packages/50/52/adaf4c8c100a8c49d2bd058e5b551f73dfd8cb89eb4911e25a0c469b6b4e/pycryptodome-3.23.0-cp37-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:67bd81fcbe34f43ad9422ee8fd4843c8e7198dd88dd3d40e6de42ee65fbe1490", size = 2182625, upload-time = "2025-05-17T17:20:52.866Z" }, + { url = "https://files.pythonhosted.org/packages/5f/e9/a09476d436d0ff1402ac3867d933c61805ec2326c6ea557aeeac3825604e/pycryptodome-3.23.0-cp37-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c8987bd3307a39bc03df5c8e0e3d8be0c4c3518b7f044b0f4c15d1aa78f52575", size = 2268954, upload-time = "2025-05-17T17:20:55.027Z" }, + { url = "https://files.pythonhosted.org/packages/f9/c5/ffe6474e0c551d54cab931918127c46d70cab8f114e0c2b5a3c071c2f484/pycryptodome-3.23.0-cp37-abi3-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:aa0698f65e5b570426fc31b8162ed4603b0c2841cbb9088e2b01641e3065915b", size = 2308534, upload-time = "2025-05-17T17:20:57.279Z" }, + { url = "https://files.pythonhosted.org/packages/18/28/e199677fc15ecf43010f2463fde4c1a53015d1fe95fb03bca2890836603a/pycryptodome-3.23.0-cp37-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:53ecbafc2b55353edcebd64bf5da94a2a2cdf5090a6915bcca6eca6cc452585a", size = 2181853, upload-time = "2025-05-17T17:20:59.322Z" }, + { url = "https://files.pythonhosted.org/packages/ce/ea/4fdb09f2165ce1365c9eaefef36625583371ee514db58dc9b65d3a255c4c/pycryptodome-3.23.0-cp37-abi3-musllinux_1_2_i686.whl", hash = "sha256:156df9667ad9f2ad26255926524e1c136d6664b741547deb0a86a9acf5ea631f", size = 2342465, upload-time = "2025-05-17T17:21:03.83Z" }, + { url = "https://files.pythonhosted.org/packages/22/82/6edc3fc42fe9284aead511394bac167693fb2b0e0395b28b8bedaa07ef04/pycryptodome-3.23.0-cp37-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:dea827b4d55ee390dc89b2afe5927d4308a8b538ae91d9c6f7a5090f397af1aa", size = 2267414, upload-time = "2025-05-17T17:21:06.72Z" }, + { url = "https://files.pythonhosted.org/packages/59/fe/aae679b64363eb78326c7fdc9d06ec3de18bac68be4b612fc1fe8902693c/pycryptodome-3.23.0-cp37-abi3-win32.whl", hash = "sha256:507dbead45474b62b2bbe318eb1c4c8ee641077532067fec9c1aa82c31f84886", size = 1768484, upload-time = "2025-05-17T17:21:08.535Z" }, + { url = "https://files.pythonhosted.org/packages/54/2f/e97a1b8294db0daaa87012c24a7bb714147c7ade7656973fd6c736b484ff/pycryptodome-3.23.0-cp37-abi3-win_amd64.whl", hash = "sha256:c75b52aacc6c0c260f204cbdd834f76edc9fb0d8e0da9fbf8352ef58202564e2", size = 1799636, upload-time = "2025-05-17T17:21:10.393Z" }, + { url = "https://files.pythonhosted.org/packages/18/3d/f9441a0d798bf2b1e645adc3265e55706aead1255ccdad3856dbdcffec14/pycryptodome-3.23.0-cp37-abi3-win_arm64.whl", hash = "sha256:11eeeb6917903876f134b56ba11abe95c0b0fd5e3330def218083c7d98bbcb3c", size = 1703675, upload-time = "2025-05-17T17:21:13.146Z" }, +] + +[[package]] +name = "pydantic" +version = "2.10.6" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "annotated-types" }, + { name = "pydantic-core" }, + { name = "typing-extensions" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/b7/ae/d5220c5c52b158b1de7ca89fc5edb72f304a70a4c540c84c8844bf4008de/pydantic-2.10.6.tar.gz", hash = "sha256:ca5daa827cce33de7a42be142548b0096bf05a7e7b365aebfa5f8eeec7128236", size = 761681, upload-time = "2025-01-24T01:42:12.693Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/f4/3c/8cc1cc84deffa6e25d2d0c688ebb80635dfdbf1dbea3e30c541c8cf4d860/pydantic-2.10.6-py3-none-any.whl", hash = "sha256:427d664bf0b8a2b34ff5dd0f5a18df00591adcee7198fbd71981054cef37b584", size = 431696, upload-time = "2025-01-24T01:42:10.371Z" }, +] + +[[package]] +name = "pydantic-core" +version = "2.27.2" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "typing-extensions" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/fc/01/f3e5ac5e7c25833db5eb555f7b7ab24cd6f8c322d3a3ad2d67a952dc0abc/pydantic_core-2.27.2.tar.gz", hash = "sha256:eb026e5a4c1fee05726072337ff51d1efb6f59090b7da90d30ea58625b1ffb39", size = 413443, upload-time = "2024-12-18T11:31:54.917Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/d6/74/51c8a5482ca447871c93e142d9d4a92ead74de6c8dc5e66733e22c9bba89/pydantic_core-2.27.2-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:9e0c8cfefa0ef83b4da9588448b6d8d2a2bf1a53c3f1ae5fca39eb3061e2f0b0", size = 1893127, upload-time = "2024-12-18T11:28:30.346Z" }, + { url = "https://files.pythonhosted.org/packages/d3/f3/c97e80721735868313c58b89d2de85fa80fe8dfeeed84dc51598b92a135e/pydantic_core-2.27.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:83097677b8e3bd7eaa6775720ec8e0405f1575015a463285a92bfdfe254529ef", size = 1811340, upload-time = "2024-12-18T11:28:32.521Z" }, + { url = "https://files.pythonhosted.org/packages/9e/91/840ec1375e686dbae1bd80a9e46c26a1e0083e1186abc610efa3d9a36180/pydantic_core-2.27.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:172fce187655fece0c90d90a678424b013f8fbb0ca8b036ac266749c09438cb7", size = 1822900, upload-time = "2024-12-18T11:28:34.507Z" }, + { url = "https://files.pythonhosted.org/packages/f6/31/4240bc96025035500c18adc149aa6ffdf1a0062a4b525c932065ceb4d868/pydantic_core-2.27.2-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:519f29f5213271eeeeb3093f662ba2fd512b91c5f188f3bb7b27bc5973816934", size = 1869177, upload-time = "2024-12-18T11:28:36.488Z" }, + { url = "https://files.pythonhosted.org/packages/fa/20/02fbaadb7808be578317015c462655c317a77a7c8f0ef274bc016a784c54/pydantic_core-2.27.2-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:05e3a55d124407fffba0dd6b0c0cd056d10e983ceb4e5dbd10dda135c31071d6", size = 2038046, upload-time = "2024-12-18T11:28:39.409Z" }, + { url = "https://files.pythonhosted.org/packages/06/86/7f306b904e6c9eccf0668248b3f272090e49c275bc488a7b88b0823444a4/pydantic_core-2.27.2-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:9c3ed807c7b91de05e63930188f19e921d1fe90de6b4f5cd43ee7fcc3525cb8c", size = 2685386, upload-time = "2024-12-18T11:28:41.221Z" }, + { url = "https://files.pythonhosted.org/packages/8d/f0/49129b27c43396581a635d8710dae54a791b17dfc50c70164866bbf865e3/pydantic_core-2.27.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6fb4aadc0b9a0c063206846d603b92030eb6f03069151a625667f982887153e2", size = 1997060, upload-time = "2024-12-18T11:28:44.709Z" }, + { url = "https://files.pythonhosted.org/packages/0d/0f/943b4af7cd416c477fd40b187036c4f89b416a33d3cc0ab7b82708a667aa/pydantic_core-2.27.2-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:28ccb213807e037460326424ceb8b5245acb88f32f3d2777427476e1b32c48c4", size = 2004870, upload-time = "2024-12-18T11:28:46.839Z" }, + { url = "https://files.pythonhosted.org/packages/35/40/aea70b5b1a63911c53a4c8117c0a828d6790483f858041f47bab0b779f44/pydantic_core-2.27.2-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:de3cd1899e2c279b140adde9357c4495ed9d47131b4a4eaff9052f23398076b3", size = 1999822, upload-time = "2024-12-18T11:28:48.896Z" }, + { url = "https://files.pythonhosted.org/packages/f2/b3/807b94fd337d58effc5498fd1a7a4d9d59af4133e83e32ae39a96fddec9d/pydantic_core-2.27.2-cp312-cp312-musllinux_1_1_armv7l.whl", hash = "sha256:220f892729375e2d736b97d0e51466252ad84c51857d4d15f5e9692f9ef12be4", size = 2130364, upload-time = "2024-12-18T11:28:50.755Z" }, + { url = "https://files.pythonhosted.org/packages/fc/df/791c827cd4ee6efd59248dca9369fb35e80a9484462c33c6649a8d02b565/pydantic_core-2.27.2-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:a0fcd29cd6b4e74fe8ddd2c90330fd8edf2e30cb52acda47f06dd615ae72da57", size = 2158303, upload-time = "2024-12-18T11:28:54.122Z" }, + { url = "https://files.pythonhosted.org/packages/9b/67/4e197c300976af185b7cef4c02203e175fb127e414125916bf1128b639a9/pydantic_core-2.27.2-cp312-cp312-win32.whl", hash = "sha256:1e2cb691ed9834cd6a8be61228471d0a503731abfb42f82458ff27be7b2186fc", size = 1834064, upload-time = "2024-12-18T11:28:56.074Z" }, + { url = "https://files.pythonhosted.org/packages/1f/ea/cd7209a889163b8dcca139fe32b9687dd05249161a3edda62860430457a5/pydantic_core-2.27.2-cp312-cp312-win_amd64.whl", hash = "sha256:cc3f1a99a4f4f9dd1de4fe0312c114e740b5ddead65bb4102884b384c15d8bc9", size = 1989046, upload-time = "2024-12-18T11:28:58.107Z" }, + { url = "https://files.pythonhosted.org/packages/bc/49/c54baab2f4658c26ac633d798dab66b4c3a9bbf47cff5284e9c182f4137a/pydantic_core-2.27.2-cp312-cp312-win_arm64.whl", hash = "sha256:3911ac9284cd8a1792d3cb26a2da18f3ca26c6908cc434a18f730dc0db7bfa3b", size = 1885092, upload-time = "2024-12-18T11:29:01.335Z" }, + { url = "https://files.pythonhosted.org/packages/41/b1/9bc383f48f8002f99104e3acff6cba1231b29ef76cfa45d1506a5cad1f84/pydantic_core-2.27.2-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:7d14bd329640e63852364c306f4d23eb744e0f8193148d4044dd3dacdaacbd8b", size = 1892709, upload-time = "2024-12-18T11:29:03.193Z" }, + { url = "https://files.pythonhosted.org/packages/10/6c/e62b8657b834f3eb2961b49ec8e301eb99946245e70bf42c8817350cbefc/pydantic_core-2.27.2-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:82f91663004eb8ed30ff478d77c4d1179b3563df6cdb15c0817cd1cdaf34d154", size = 1811273, upload-time = "2024-12-18T11:29:05.306Z" }, + { url = "https://files.pythonhosted.org/packages/ba/15/52cfe49c8c986e081b863b102d6b859d9defc63446b642ccbbb3742bf371/pydantic_core-2.27.2-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:71b24c7d61131bb83df10cc7e687433609963a944ccf45190cfc21e0887b08c9", size = 1823027, upload-time = "2024-12-18T11:29:07.294Z" }, + { url = "https://files.pythonhosted.org/packages/b1/1c/b6f402cfc18ec0024120602bdbcebc7bdd5b856528c013bd4d13865ca473/pydantic_core-2.27.2-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:fa8e459d4954f608fa26116118bb67f56b93b209c39b008277ace29937453dc9", size = 1868888, upload-time = "2024-12-18T11:29:09.249Z" }, + { url = "https://files.pythonhosted.org/packages/bd/7b/8cb75b66ac37bc2975a3b7de99f3c6f355fcc4d89820b61dffa8f1e81677/pydantic_core-2.27.2-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ce8918cbebc8da707ba805b7fd0b382816858728ae7fe19a942080c24e5b7cd1", size = 2037738, upload-time = "2024-12-18T11:29:11.23Z" }, + { url = "https://files.pythonhosted.org/packages/c8/f1/786d8fe78970a06f61df22cba58e365ce304bf9b9f46cc71c8c424e0c334/pydantic_core-2.27.2-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:eda3f5c2a021bbc5d976107bb302e0131351c2ba54343f8a496dc8783d3d3a6a", size = 2685138, upload-time = "2024-12-18T11:29:16.396Z" }, + { url = "https://files.pythonhosted.org/packages/a6/74/d12b2cd841d8724dc8ffb13fc5cef86566a53ed358103150209ecd5d1999/pydantic_core-2.27.2-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bd8086fa684c4775c27f03f062cbb9eaa6e17f064307e86b21b9e0abc9c0f02e", size = 1997025, upload-time = "2024-12-18T11:29:20.25Z" }, + { url = "https://files.pythonhosted.org/packages/a0/6e/940bcd631bc4d9a06c9539b51f070b66e8f370ed0933f392db6ff350d873/pydantic_core-2.27.2-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:8d9b3388db186ba0c099a6d20f0604a44eabdeef1777ddd94786cdae158729e4", size = 2004633, upload-time = "2024-12-18T11:29:23.877Z" }, + { url = "https://files.pythonhosted.org/packages/50/cc/a46b34f1708d82498c227d5d80ce615b2dd502ddcfd8376fc14a36655af1/pydantic_core-2.27.2-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:7a66efda2387de898c8f38c0cf7f14fca0b51a8ef0b24bfea5849f1b3c95af27", size = 1999404, upload-time = "2024-12-18T11:29:25.872Z" }, + { url = "https://files.pythonhosted.org/packages/ca/2d/c365cfa930ed23bc58c41463bae347d1005537dc8db79e998af8ba28d35e/pydantic_core-2.27.2-cp313-cp313-musllinux_1_1_armv7l.whl", hash = "sha256:18a101c168e4e092ab40dbc2503bdc0f62010e95d292b27827871dc85450d7ee", size = 2130130, upload-time = "2024-12-18T11:29:29.252Z" }, + { url = "https://files.pythonhosted.org/packages/f4/d7/eb64d015c350b7cdb371145b54d96c919d4db516817f31cd1c650cae3b21/pydantic_core-2.27.2-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:ba5dd002f88b78a4215ed2f8ddbdf85e8513382820ba15ad5ad8955ce0ca19a1", size = 2157946, upload-time = "2024-12-18T11:29:31.338Z" }, + { url = "https://files.pythonhosted.org/packages/a4/99/bddde3ddde76c03b65dfd5a66ab436c4e58ffc42927d4ff1198ffbf96f5f/pydantic_core-2.27.2-cp313-cp313-win32.whl", hash = "sha256:1ebaf1d0481914d004a573394f4be3a7616334be70261007e47c2a6fe7e50130", size = 1834387, upload-time = "2024-12-18T11:29:33.481Z" }, + { url = "https://files.pythonhosted.org/packages/71/47/82b5e846e01b26ac6f1893d3c5f9f3a2eb6ba79be26eef0b759b4fe72946/pydantic_core-2.27.2-cp313-cp313-win_amd64.whl", hash = "sha256:953101387ecf2f5652883208769a79e48db18c6df442568a0b5ccd8c2723abee", size = 1990453, upload-time = "2024-12-18T11:29:35.533Z" }, + { url = "https://files.pythonhosted.org/packages/51/b2/b2b50d5ecf21acf870190ae5d093602d95f66c9c31f9d5de6062eb329ad1/pydantic_core-2.27.2-cp313-cp313-win_arm64.whl", hash = "sha256:ac4dbfd1691affb8f48c2c13241a2e3b60ff23247cbcf981759c768b6633cf8b", size = 1885186, upload-time = "2024-12-18T11:29:37.649Z" }, +] + +[[package]] +name = "pydantic-settings" +version = "2.10.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "pydantic" }, + { name = "python-dotenv" }, + { name = "typing-inspection" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/68/85/1ea668bbab3c50071ca613c6ab30047fb36ab0da1b92fa8f17bbc38fd36c/pydantic_settings-2.10.1.tar.gz", hash = "sha256:06f0062169818d0f5524420a360d632d5857b83cffd4d42fe29597807a1614ee", size = 172583, upload-time = "2025-06-24T13:26:46.841Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/58/f0/427018098906416f580e3cf1366d3b1abfb408a0652e9f31600c24a1903c/pydantic_settings-2.10.1-py3-none-any.whl", hash = "sha256:a60952460b99cf661dc25c29c0ef171721f98bfcb52ef8d9ea4c943d7c8cc796", size = 45235, upload-time = "2025-06-24T13:26:45.485Z" }, +] + +[[package]] +name = "pygments" +version = "2.19.2" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/b0/77/a5b8c569bf593b0140bde72ea885a803b82086995367bf2037de0159d924/pygments-2.19.2.tar.gz", hash = "sha256:636cb2477cec7f8952536970bc533bc43743542f70392ae026374600add5b887", size = 4968631, upload-time = "2025-06-21T13:39:12.283Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/c7/21/705964c7812476f378728bdf590ca4b771ec72385c533964653c68e86bdc/pygments-2.19.2-py3-none-any.whl", hash = "sha256:86540386c03d588bb81d44bc3928634ff26449851e99741617ecb9037ee5ec0b", size = 1225217, upload-time = "2025-06-21T13:39:07.939Z" }, +] + +[[package]] +name = "pyjwt" +version = "2.10.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/e7/46/bd74733ff231675599650d3e47f361794b22ef3e3770998dda30d3b63726/pyjwt-2.10.1.tar.gz", hash = "sha256:3cc5772eb20009233caf06e9d8a0577824723b44e6648ee0a2aedb6cf9381953", size = 87785, upload-time = "2024-11-28T03:43:29.933Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/61/ad/689f02752eeec26aed679477e80e632ef1b682313be70793d798c1d5fc8f/PyJWT-2.10.1-py3-none-any.whl", hash = "sha256:dcdd193e30abefd5debf142f9adfcdd2b58004e644f25406ffaebd50bd98dacb", size = 22997, upload-time = "2024-11-28T03:43:27.893Z" }, +] + +[package.optional-dependencies] +crypto = [ + { name = "cryptography" }, +] + +[[package]] +name = "pynacl" +version = "1.5.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "cffi" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/a7/22/27582568be639dfe22ddb3902225f91f2f17ceff88ce80e4db396c8986da/PyNaCl-1.5.0.tar.gz", hash = "sha256:8ac7448f09ab85811607bdd21ec2464495ac8b7c66d146bf545b0f08fb9220ba", size = 3392854, upload-time = "2022-01-07T22:05:41.134Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/ce/75/0b8ede18506041c0bf23ac4d8e2971b4161cd6ce630b177d0a08eb0d8857/PyNaCl-1.5.0-cp36-abi3-macosx_10_10_universal2.whl", hash = "sha256:401002a4aaa07c9414132aaed7f6836ff98f59277a234704ff66878c2ee4a0d1", size = 349920, upload-time = "2022-01-07T22:05:49.156Z" }, + { url = "https://files.pythonhosted.org/packages/59/bb/fddf10acd09637327a97ef89d2a9d621328850a72f1fdc8c08bdf72e385f/PyNaCl-1.5.0-cp36-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_24_aarch64.whl", hash = "sha256:52cb72a79269189d4e0dc537556f4740f7f0a9ec41c1322598799b0bdad4ef92", size = 601722, upload-time = "2022-01-07T22:05:50.989Z" }, + { url = "https://files.pythonhosted.org/packages/5d/70/87a065c37cca41a75f2ce113a5a2c2aa7533be648b184ade58971b5f7ccc/PyNaCl-1.5.0-cp36-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a36d4a9dda1f19ce6e03c9a784a2921a4b726b02e1c736600ca9c22029474394", size = 680087, upload-time = "2022-01-07T22:05:52.539Z" }, + { url = "https://files.pythonhosted.org/packages/ee/87/f1bb6a595f14a327e8285b9eb54d41fef76c585a0edef0a45f6fc95de125/PyNaCl-1.5.0-cp36-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_24_x86_64.whl", hash = "sha256:0c84947a22519e013607c9be43706dd42513f9e6ae5d39d3613ca1e142fba44d", size = 856678, upload-time = "2022-01-07T22:05:54.251Z" }, + { url = "https://files.pythonhosted.org/packages/66/28/ca86676b69bf9f90e710571b67450508484388bfce09acf8a46f0b8c785f/PyNaCl-1.5.0-cp36-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:06b8f6fa7f5de8d5d2f7573fe8c863c051225a27b61e6860fd047b1775807858", size = 1133660, upload-time = "2022-01-07T22:05:56.056Z" }, + { url = "https://files.pythonhosted.org/packages/3d/85/c262db650e86812585e2bc59e497a8f59948a005325a11bbbc9ecd3fe26b/PyNaCl-1.5.0-cp36-abi3-musllinux_1_1_aarch64.whl", hash = "sha256:a422368fc821589c228f4c49438a368831cb5bbc0eab5ebe1d7fac9dded6567b", size = 663824, upload-time = "2022-01-07T22:05:57.434Z" }, + { url = "https://files.pythonhosted.org/packages/fd/1a/cc308a884bd299b651f1633acb978e8596c71c33ca85e9dc9fa33a5399b9/PyNaCl-1.5.0-cp36-abi3-musllinux_1_1_x86_64.whl", hash = "sha256:61f642bf2378713e2c2e1de73444a3778e5f0a38be6fee0fe532fe30060282ff", size = 1117912, upload-time = "2022-01-07T22:05:58.665Z" }, + { url = "https://files.pythonhosted.org/packages/25/2d/b7df6ddb0c2a33afdb358f8af6ea3b8c4d1196ca45497dd37a56f0c122be/PyNaCl-1.5.0-cp36-abi3-win32.whl", hash = "sha256:e46dae94e34b085175f8abb3b0aaa7da40767865ac82c928eeb9e57e1ea8a543", size = 204624, upload-time = "2022-01-07T22:06:00.085Z" }, + { url = "https://files.pythonhosted.org/packages/5e/22/d3db169895faaf3e2eda892f005f433a62db2decbcfbc2f61e6517adfa87/PyNaCl-1.5.0-cp36-abi3-win_amd64.whl", hash = "sha256:20f42270d27e1b6a29f54032090b972d97f0a1b0948cc52392041ef7831fee93", size = 212141, upload-time = "2022-01-07T22:06:01.861Z" }, +] + +[[package]] +name = "pyproject-api" +version = "1.8.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "packaging" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/bb/19/441e0624a8afedd15bbcce96df1b80479dd0ff0d965f5ce8fde4f2f6ffad/pyproject_api-1.8.0.tar.gz", hash = "sha256:77b8049f2feb5d33eefcc21b57f1e279636277a8ac8ad6b5871037b243778496", size = 22340, upload-time = "2024-09-18T23:18:37.805Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/ba/f4/3c4ddfcc0c19c217c6de513842d286de8021af2f2ab79bbb86c00342d778/pyproject_api-1.8.0-py3-none-any.whl", hash = "sha256:3d7d347a047afe796fd5d1885b1e391ba29be7169bd2f102fcd378f04273d228", size = 13100, upload-time = "2024-09-18T23:18:35.927Z" }, +] + +[[package]] +name = "pytest" +version = "8.4.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "colorama", marker = "sys_platform == 'win32'" }, + { name = "iniconfig" }, + { name = "packaging" }, + { name = "pluggy" }, + { name = "pygments" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/08/ba/45911d754e8eba3d5a841a5ce61a65a685ff1798421ac054f85aa8747dfb/pytest-8.4.1.tar.gz", hash = "sha256:7c67fd69174877359ed9371ec3af8a3d2b04741818c51e5e99cc1742251fa93c", size = 1517714, upload-time = "2025-06-18T05:48:06.109Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/29/16/c8a903f4c4dffe7a12843191437d7cd8e32751d5de349d45d3fe69544e87/pytest-8.4.1-py3-none-any.whl", hash = "sha256:539c70ba6fcead8e78eebbf1115e8b589e7565830d7d006a8723f19ac8a0afb7", size = 365474, upload-time = "2025-06-18T05:48:03.955Z" }, +] + +[[package]] +name = "pytest-asyncio" +version = "1.1.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "pytest" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/4e/51/f8794af39eeb870e87a8c8068642fc07bce0c854d6865d7dd0f2a9d338c2/pytest_asyncio-1.1.0.tar.gz", hash = "sha256:796aa822981e01b68c12e4827b8697108f7205020f24b5793b3c41555dab68ea", size = 46652, upload-time = "2025-07-16T04:29:26.393Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/c7/9d/bf86eddabf8c6c9cb1ea9a869d6873b46f105a5d292d3a6f7071f5b07935/pytest_asyncio-1.1.0-py3-none-any.whl", hash = "sha256:5fe2d69607b0bd75c656d1211f969cadba035030156745ee09e7d71740e58ecf", size = 15157, upload-time = "2025-07-16T04:29:24.929Z" }, +] + +[[package]] +name = "python-dateutil" +version = "2.9.0.post0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "six" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/66/c0/0c8b6ad9f17a802ee498c46e004a0eb49bc148f2fd230864601a86dcf6db/python-dateutil-2.9.0.post0.tar.gz", hash = "sha256:37dd54208da7e1cd875388217d5e00ebd4179249f90fb72437e91a35459a0ad3", size = 342432, upload-time = "2024-03-01T18:36:20.211Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/ec/57/56b9bcc3c9c6a792fcbaf139543cee77261f3651ca9da0c93f5c1221264b/python_dateutil-2.9.0.post0-py2.py3-none-any.whl", hash = "sha256:a8b2bc7bffae282281c8140a97d3aa9c14da0b136dfe83f850eea9a5f7470427", size = 229892, upload-time = "2024-03-01T18:36:18.57Z" }, +] + +[[package]] +name = "python-dotenv" +version = "1.1.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/f6/b0/4bc07ccd3572a2f9df7e6782f52b0c6c90dcbb803ac4a167702d7d0dfe1e/python_dotenv-1.1.1.tar.gz", hash = "sha256:a8a6399716257f45be6a007360200409fce5cda2661e3dec71d23dc15f6189ab", size = 41978, upload-time = "2025-06-24T04:21:07.341Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/5f/ed/539768cf28c661b5b068d66d96a2f155c4971a5d55684a514c1a0e0dec2f/python_dotenv-1.1.1-py3-none-any.whl", hash = "sha256:31f23644fe2602f88ff55e1f5c79ba497e01224ee7737937930c448e4d0e24dc", size = 20556, upload-time = "2025-06-24T04:21:06.073Z" }, +] + +[[package]] +name = "python-multipart" +version = "0.0.20" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/f3/87/f44d7c9f274c7ee665a29b885ec97089ec5dc034c7f3fafa03da9e39a09e/python_multipart-0.0.20.tar.gz", hash = "sha256:8dd0cab45b8e23064ae09147625994d090fa46f5b0d1e13af944c331a7fa9d13", size = 37158, upload-time = "2024-12-16T19:45:46.972Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/45/58/38b5afbc1a800eeea951b9285d3912613f2603bdf897a4ab0f4bd7f405fc/python_multipart-0.0.20-py3-none-any.whl", hash = "sha256:8a62d3a8335e06589fe01f2a3e178cdcc632f3fbe0d492ad9ee0ec35aab1f104", size = 24546, upload-time = "2024-12-16T19:45:44.423Z" }, +] + +[[package]] +name = "pytz" +version = "2025.2" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/f8/bf/abbd3cdfb8fbc7fb3d4d38d320f2441b1e7cbe29be4f23797b4a2b5d8aac/pytz-2025.2.tar.gz", hash = "sha256:360b9e3dbb49a209c21ad61809c7fb453643e048b38924c765813546746e81c3", size = 320884, upload-time = "2025-03-25T02:25:00.538Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/81/c4/34e93fe5f5429d7570ec1fa436f1986fb1f00c3e0f43a589fe2bbcd22c3f/pytz-2025.2-py2.py3-none-any.whl", hash = "sha256:5ddf76296dd8c44c26eb8f4b6f35488f3ccbf6fbbd7adee0b7262d43f0ec2f00", size = 509225, upload-time = "2025-03-25T02:24:58.468Z" }, +] + +[[package]] +name = "pyunormalize" +version = "16.0.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/b3/08/568036c725dac746ecb267bb749ef930fb7907454fe69fce83c8557287fb/pyunormalize-16.0.0.tar.gz", hash = "sha256:2e1dfbb4a118154ae26f70710426a52a364b926c9191f764601f5a8cb12761f7", size = 49968, upload-time = "2024-09-17T17:08:18.245Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/39/f9/9d86e56f716e0651194a5ad58be9c146fcaf1de6901ac6f3cd3affeeb74e/pyunormalize-16.0.0-py3-none-any.whl", hash = "sha256:c647d95e5d1e2ea9a2f448d1d95d8518348df24eab5c3fd32d2b5c3300a49152", size = 49173, upload-time = "2024-09-17T17:08:17.078Z" }, +] + +[[package]] +name = "pywin32" +version = "311" +source = { registry = "https://pypi.org/simple" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/e7/ab/01ea1943d4eba0f850c3c61e78e8dd59757ff815ff3ccd0a84de5f541f42/pywin32-311-cp312-cp312-win32.whl", hash = "sha256:750ec6e621af2b948540032557b10a2d43b0cee2ae9758c54154d711cc852d31", size = 8706543, upload-time = "2025-07-14T20:13:20.765Z" }, + { url = "https://files.pythonhosted.org/packages/d1/a8/a0e8d07d4d051ec7502cd58b291ec98dcc0c3fff027caad0470b72cfcc2f/pywin32-311-cp312-cp312-win_amd64.whl", hash = "sha256:b8c095edad5c211ff31c05223658e71bf7116daa0ecf3ad85f3201ea3190d067", size = 9495040, upload-time = "2025-07-14T20:13:22.543Z" }, + { url = "https://files.pythonhosted.org/packages/ba/3a/2ae996277b4b50f17d61f0603efd8253cb2d79cc7ae159468007b586396d/pywin32-311-cp312-cp312-win_arm64.whl", hash = "sha256:e286f46a9a39c4a18b319c28f59b61de793654af2f395c102b4f819e584b5852", size = 8710102, upload-time = "2025-07-14T20:13:24.682Z" }, + { url = "https://files.pythonhosted.org/packages/a5/be/3fd5de0979fcb3994bfee0d65ed8ca9506a8a1260651b86174f6a86f52b3/pywin32-311-cp313-cp313-win32.whl", hash = "sha256:f95ba5a847cba10dd8c4d8fefa9f2a6cf283b8b88ed6178fa8a6c1ab16054d0d", size = 8705700, upload-time = "2025-07-14T20:13:26.471Z" }, + { url = "https://files.pythonhosted.org/packages/e3/28/e0a1909523c6890208295a29e05c2adb2126364e289826c0a8bc7297bd5c/pywin32-311-cp313-cp313-win_amd64.whl", hash = "sha256:718a38f7e5b058e76aee1c56ddd06908116d35147e133427e59a3983f703a20d", size = 9494700, upload-time = "2025-07-14T20:13:28.243Z" }, + { url = "https://files.pythonhosted.org/packages/04/bf/90339ac0f55726dce7d794e6d79a18a91265bdf3aa70b6b9ca52f35e022a/pywin32-311-cp313-cp313-win_arm64.whl", hash = "sha256:7b4075d959648406202d92a2310cb990fea19b535c7f4a78d3f5e10b926eeb8a", size = 8709318, upload-time = "2025-07-14T20:13:30.348Z" }, + { url = "https://files.pythonhosted.org/packages/c9/31/097f2e132c4f16d99a22bfb777e0fd88bd8e1c634304e102f313af69ace5/pywin32-311-cp314-cp314-win32.whl", hash = "sha256:b7a2c10b93f8986666d0c803ee19b5990885872a7de910fc460f9b0c2fbf92ee", size = 8840714, upload-time = "2025-07-14T20:13:32.449Z" }, + { url = "https://files.pythonhosted.org/packages/90/4b/07c77d8ba0e01349358082713400435347df8426208171ce297da32c313d/pywin32-311-cp314-cp314-win_amd64.whl", hash = "sha256:3aca44c046bd2ed8c90de9cb8427f581c479e594e99b5c0bb19b29c10fd6cb87", size = 9656800, upload-time = "2025-07-14T20:13:34.312Z" }, + { url = "https://files.pythonhosted.org/packages/c0/d2/21af5c535501a7233e734b8af901574572da66fcc254cb35d0609c9080dd/pywin32-311-cp314-cp314-win_arm64.whl", hash = "sha256:a508e2d9025764a8270f93111a970e1d0fbfc33f4153b388bb649b7eec4f9b42", size = 8932540, upload-time = "2025-07-14T20:13:36.379Z" }, +] + +[[package]] +name = "pyyaml" +version = "6.0.2" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/54/ed/79a089b6be93607fa5cdaedf301d7dfb23af5f25c398d5ead2525b063e17/pyyaml-6.0.2.tar.gz", hash = "sha256:d584d9ec91ad65861cc08d42e834324ef890a082e591037abe114850ff7bbc3e", size = 130631, upload-time = "2024-08-06T20:33:50.674Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/86/0c/c581167fc46d6d6d7ddcfb8c843a4de25bdd27e4466938109ca68492292c/PyYAML-6.0.2-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:c70c95198c015b85feafc136515252a261a84561b7b1d51e3384e0655ddf25ab", size = 183873, upload-time = "2024-08-06T20:32:25.131Z" }, + { url = "https://files.pythonhosted.org/packages/a8/0c/38374f5bb272c051e2a69281d71cba6fdb983413e6758b84482905e29a5d/PyYAML-6.0.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:ce826d6ef20b1bc864f0a68340c8b3287705cae2f8b4b1d932177dcc76721725", size = 173302, upload-time = "2024-08-06T20:32:26.511Z" }, + { url = "https://files.pythonhosted.org/packages/c3/93/9916574aa8c00aa06bbac729972eb1071d002b8e158bd0e83a3b9a20a1f7/PyYAML-6.0.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1f71ea527786de97d1a0cc0eacd1defc0985dcf6b3f17bb77dcfc8c34bec4dc5", size = 739154, upload-time = "2024-08-06T20:32:28.363Z" }, + { url = "https://files.pythonhosted.org/packages/95/0f/b8938f1cbd09739c6da569d172531567dbcc9789e0029aa070856f123984/PyYAML-6.0.2-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:9b22676e8097e9e22e36d6b7bda33190d0d400f345f23d4065d48f4ca7ae0425", size = 766223, upload-time = "2024-08-06T20:32:30.058Z" }, + { url = "https://files.pythonhosted.org/packages/b9/2b/614b4752f2e127db5cc206abc23a8c19678e92b23c3db30fc86ab731d3bd/PyYAML-6.0.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:80bab7bfc629882493af4aa31a4cfa43a4c57c83813253626916b8c7ada83476", size = 767542, upload-time = "2024-08-06T20:32:31.881Z" }, + { url = "https://files.pythonhosted.org/packages/d4/00/dd137d5bcc7efea1836d6264f049359861cf548469d18da90cd8216cf05f/PyYAML-6.0.2-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:0833f8694549e586547b576dcfaba4a6b55b9e96098b36cdc7ebefe667dfed48", size = 731164, upload-time = "2024-08-06T20:32:37.083Z" }, + { url = "https://files.pythonhosted.org/packages/c9/1f/4f998c900485e5c0ef43838363ba4a9723ac0ad73a9dc42068b12aaba4e4/PyYAML-6.0.2-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:8b9c7197f7cb2738065c481a0461e50ad02f18c78cd75775628afb4d7137fb3b", size = 756611, upload-time = "2024-08-06T20:32:38.898Z" }, + { url = "https://files.pythonhosted.org/packages/df/d1/f5a275fdb252768b7a11ec63585bc38d0e87c9e05668a139fea92b80634c/PyYAML-6.0.2-cp312-cp312-win32.whl", hash = "sha256:ef6107725bd54b262d6dedcc2af448a266975032bc85ef0172c5f059da6325b4", size = 140591, upload-time = "2024-08-06T20:32:40.241Z" }, + { url = "https://files.pythonhosted.org/packages/0c/e8/4f648c598b17c3d06e8753d7d13d57542b30d56e6c2dedf9c331ae56312e/PyYAML-6.0.2-cp312-cp312-win_amd64.whl", hash = "sha256:7e7401d0de89a9a855c839bc697c079a4af81cf878373abd7dc625847d25cbd8", size = 156338, upload-time = "2024-08-06T20:32:41.93Z" }, + { url = "https://files.pythonhosted.org/packages/ef/e3/3af305b830494fa85d95f6d95ef7fa73f2ee1cc8ef5b495c7c3269fb835f/PyYAML-6.0.2-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:efdca5630322a10774e8e98e1af481aad470dd62c3170801852d752aa7a783ba", size = 181309, upload-time = "2024-08-06T20:32:43.4Z" }, + { url = "https://files.pythonhosted.org/packages/45/9f/3b1c20a0b7a3200524eb0076cc027a970d320bd3a6592873c85c92a08731/PyYAML-6.0.2-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:50187695423ffe49e2deacb8cd10510bc361faac997de9efef88badc3bb9e2d1", size = 171679, upload-time = "2024-08-06T20:32:44.801Z" }, + { url = "https://files.pythonhosted.org/packages/7c/9a/337322f27005c33bcb656c655fa78325b730324c78620e8328ae28b64d0c/PyYAML-6.0.2-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0ffe8360bab4910ef1b9e87fb812d8bc0a308b0d0eef8c8f44e0254ab3b07133", size = 733428, upload-time = "2024-08-06T20:32:46.432Z" }, + { url = "https://files.pythonhosted.org/packages/a3/69/864fbe19e6c18ea3cc196cbe5d392175b4cf3d5d0ac1403ec3f2d237ebb5/PyYAML-6.0.2-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:17e311b6c678207928d649faa7cb0d7b4c26a0ba73d41e99c4fff6b6c3276484", size = 763361, upload-time = "2024-08-06T20:32:51.188Z" }, + { url = "https://files.pythonhosted.org/packages/04/24/b7721e4845c2f162d26f50521b825fb061bc0a5afcf9a386840f23ea19fa/PyYAML-6.0.2-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:70b189594dbe54f75ab3a1acec5f1e3faa7e8cf2f1e08d9b561cb41b845f69d5", size = 759523, upload-time = "2024-08-06T20:32:53.019Z" }, + { url = "https://files.pythonhosted.org/packages/2b/b2/e3234f59ba06559c6ff63c4e10baea10e5e7df868092bf9ab40e5b9c56b6/PyYAML-6.0.2-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:41e4e3953a79407c794916fa277a82531dd93aad34e29c2a514c2c0c5fe971cc", size = 726660, upload-time = "2024-08-06T20:32:54.708Z" }, + { url = "https://files.pythonhosted.org/packages/fe/0f/25911a9f080464c59fab9027482f822b86bf0608957a5fcc6eaac85aa515/PyYAML-6.0.2-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:68ccc6023a3400877818152ad9a1033e3db8625d899c72eacb5a668902e4d652", size = 751597, upload-time = "2024-08-06T20:32:56.985Z" }, + { url = "https://files.pythonhosted.org/packages/14/0d/e2c3b43bbce3cf6bd97c840b46088a3031085179e596d4929729d8d68270/PyYAML-6.0.2-cp313-cp313-win32.whl", hash = "sha256:bc2fa7c6b47d6bc618dd7fb02ef6fdedb1090ec036abab80d4681424b84c1183", size = 140527, upload-time = "2024-08-06T20:33:03.001Z" }, + { url = "https://files.pythonhosted.org/packages/fa/de/02b54f42487e3d3c6efb3f89428677074ca7bf43aae402517bc7cca949f3/PyYAML-6.0.2-cp313-cp313-win_amd64.whl", hash = "sha256:8388ee1976c416731879ac16da0aff3f63b286ffdd57cdeb95f3f2e085687563", size = 156446, upload-time = "2024-08-06T20:33:04.33Z" }, +] + +[[package]] +name = "rabinmiller" +version = "0.1.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/2e/c8/9a4bd1d823200b4fcbdc25584cf4e788f672cdf0d6622b66a8b49c3be925/rabinmiller-0.1.0.tar.gz", hash = "sha256:a9873aa6fdd0c26d5205d99e126fd94e6e1bb2aa966e167e136dfbfab0d0556d", size = 5159, upload-time = "2024-11-22T07:14:04.89Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/f3/b0/68c2efd5f025b80316fce28e49ce25c5d0171aa17ce7f94a89c0a6544d2b/rabinmiller-0.1.0-py3-none-any.whl", hash = "sha256:3fec2d26fc210772ced965a8f0e2870e5582cadf255bc665ef3f4932752ada5f", size = 5309, upload-time = "2024-11-22T07:14:03.572Z" }, +] + +[[package]] +name = "realtime" +version = "2.4.2" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "aiohttp" }, + { name = "python-dateutil" }, + { name = "typing-extensions" }, + { name = "websockets" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/1e/1e/c5f47928789cd5abb96e527929dea088213968f785983a231b3dfe08cc4f/realtime-2.4.2.tar.gz", hash = "sha256:760308d5310533f65a9098e0b482a518f6ad2f3c0f2723e83cf5856865bafc5d", size = 18802, upload-time = "2025-03-26T17:39:11.26Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/1d/b7/1b7651f353e14543c60cdfe40e3ea4dea412cfb2e93ab6384e72be813f05/realtime-2.4.2-py3-none-any.whl", hash = "sha256:0cc1b4a097acf9c0bd3a2f1998170de47744574c606617285113ddb3021e54ca", size = 22025, upload-time = "2025-03-26T17:39:10.031Z" }, +] + +[[package]] +name = "redis" +version = "6.4.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/0d/d6/e8b92798a5bd67d659d51a18170e91c16ac3b59738d91894651ee255ed49/redis-6.4.0.tar.gz", hash = "sha256:b01bc7282b8444e28ec36b261df5375183bb47a07eb9c603f284e89cbc5ef010", size = 4647399, upload-time = "2025-08-07T08:10:11.441Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/e8/02/89e2ed7e85db6c93dfa9e8f691c5087df4e3551ab39081a4d7c6d1f90e05/redis-6.4.0-py3-none-any.whl", hash = "sha256:f0544fa9604264e9464cdf4814e7d4830f74b165d52f2a330a760a88dd248b7f", size = 279847, upload-time = "2025-08-07T08:10:09.84Z" }, +] + +[[package]] +name = "referencing" +version = "0.36.2" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "attrs" }, + { name = "rpds-py" }, + { name = "typing-extensions", marker = "python_full_version < '3.13'" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/2f/db/98b5c277be99dd18bfd91dd04e1b759cad18d1a338188c936e92f921c7e2/referencing-0.36.2.tar.gz", hash = "sha256:df2e89862cd09deabbdba16944cc3f10feb6b3e6f18e902f7cc25609a34775aa", size = 74744, upload-time = "2025-01-25T08:48:16.138Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/c1/b1/3baf80dc6d2b7bc27a95a67752d0208e410351e3feb4eb78de5f77454d8d/referencing-0.36.2-py3-none-any.whl", hash = "sha256:e8699adbbf8b5c7de96d8ffa0eb5c158b3beafce084968e2ea8bb08c6794dcd0", size = 26775, upload-time = "2025-01-25T08:48:14.241Z" }, +] + +[[package]] +name = "regex" +version = "2025.7.34" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/0b/de/e13fa6dc61d78b30ba47481f99933a3b49a57779d625c392d8036770a60d/regex-2025.7.34.tar.gz", hash = "sha256:9ead9765217afd04a86822dfcd4ed2747dfe426e887da413b15ff0ac2457e21a", size = 400714, upload-time = "2025-07-31T00:21:16.262Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/ff/f0/31d62596c75a33f979317658e8d261574785c6cd8672c06741ce2e2e2070/regex-2025.7.34-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:7f7211a746aced993bef487de69307a38c5ddd79257d7be83f7b202cb59ddb50", size = 485492, upload-time = "2025-07-31T00:19:35.57Z" }, + { url = "https://files.pythonhosted.org/packages/d8/16/b818d223f1c9758c3434be89aa1a01aae798e0e0df36c1f143d1963dd1ee/regex-2025.7.34-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:fb31080f2bd0681484b275461b202b5ad182f52c9ec606052020fe13eb13a72f", size = 290000, upload-time = "2025-07-31T00:19:37.175Z" }, + { url = "https://files.pythonhosted.org/packages/cd/70/69506d53397b4bd6954061bae75677ad34deb7f6ca3ba199660d6f728ff5/regex-2025.7.34-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:0200a5150c4cf61e407038f4b4d5cdad13e86345dac29ff9dab3d75d905cf130", size = 286072, upload-time = "2025-07-31T00:19:38.612Z" }, + { url = "https://files.pythonhosted.org/packages/b0/73/536a216d5f66084fb577bb0543b5cb7de3272eb70a157f0c3a542f1c2551/regex-2025.7.34-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:739a74970e736df0773788377969c9fea3876c2fc13d0563f98e5503e5185f46", size = 797341, upload-time = "2025-07-31T00:19:40.119Z" }, + { url = "https://files.pythonhosted.org/packages/26/af/733f8168449e56e8f404bb807ea7189f59507cbea1b67a7bbcd92f8bf844/regex-2025.7.34-cp312-cp312-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:4fef81b2f7ea6a2029161ed6dea9ae13834c28eb5a95b8771828194a026621e4", size = 862556, upload-time = "2025-07-31T00:19:41.556Z" }, + { url = "https://files.pythonhosted.org/packages/19/dd/59c464d58c06c4f7d87de4ab1f590e430821345a40c5d345d449a636d15f/regex-2025.7.34-cp312-cp312-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:ea74cf81fe61a7e9d77989050d0089a927ab758c29dac4e8e1b6c06fccf3ebf0", size = 910762, upload-time = "2025-07-31T00:19:43Z" }, + { url = "https://files.pythonhosted.org/packages/37/a8/b05ccf33ceca0815a1e253693b2c86544932ebcc0049c16b0fbdf18b688b/regex-2025.7.34-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:e4636a7f3b65a5f340ed9ddf53585c42e3ff37101d383ed321bfe5660481744b", size = 801892, upload-time = "2025-07-31T00:19:44.645Z" }, + { url = "https://files.pythonhosted.org/packages/5f/9a/b993cb2e634cc22810afd1652dba0cae156c40d4864285ff486c73cd1996/regex-2025.7.34-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:6cef962d7834437fe8d3da6f9bfc6f93f20f218266dcefec0560ed7765f5fe01", size = 786551, upload-time = "2025-07-31T00:19:46.127Z" }, + { url = "https://files.pythonhosted.org/packages/2d/79/7849d67910a0de4e26834b5bb816e028e35473f3d7ae563552ea04f58ca2/regex-2025.7.34-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:cbe1698e5b80298dbce8df4d8d1182279fbdaf1044e864cbc9d53c20e4a2be77", size = 856457, upload-time = "2025-07-31T00:19:47.562Z" }, + { url = "https://files.pythonhosted.org/packages/91/c6/de516bc082524b27e45cb4f54e28bd800c01efb26d15646a65b87b13a91e/regex-2025.7.34-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:32b9f9bcf0f605eb094b08e8da72e44badabb63dde6b83bd530580b488d1c6da", size = 848902, upload-time = "2025-07-31T00:19:49.312Z" }, + { url = "https://files.pythonhosted.org/packages/7d/22/519ff8ba15f732db099b126f039586bd372da6cd4efb810d5d66a5daeda1/regex-2025.7.34-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:524c868ba527eab4e8744a9287809579f54ae8c62fbf07d62aacd89f6026b282", size = 788038, upload-time = "2025-07-31T00:19:50.794Z" }, + { url = "https://files.pythonhosted.org/packages/3f/7d/aabb467d8f57d8149895d133c88eb809a1a6a0fe262c1d508eb9dfabb6f9/regex-2025.7.34-cp312-cp312-win32.whl", hash = "sha256:d600e58ee6d036081c89696d2bdd55d507498a7180df2e19945c6642fac59588", size = 264417, upload-time = "2025-07-31T00:19:52.292Z" }, + { url = "https://files.pythonhosted.org/packages/3b/39/bd922b55a4fc5ad5c13753274e5b536f5b06ec8eb9747675668491c7ab7a/regex-2025.7.34-cp312-cp312-win_amd64.whl", hash = "sha256:9a9ab52a466a9b4b91564437b36417b76033e8778e5af8f36be835d8cb370d62", size = 275387, upload-time = "2025-07-31T00:19:53.593Z" }, + { url = "https://files.pythonhosted.org/packages/f7/3c/c61d2fdcecb754a40475a3d1ef9a000911d3e3fc75c096acf44b0dfb786a/regex-2025.7.34-cp312-cp312-win_arm64.whl", hash = "sha256:c83aec91af9c6fbf7c743274fd952272403ad9a9db05fe9bfc9df8d12b45f176", size = 268482, upload-time = "2025-07-31T00:19:55.183Z" }, + { url = "https://files.pythonhosted.org/packages/15/16/b709b2119975035169a25aa8e4940ca177b1a2e25e14f8d996d09130368e/regex-2025.7.34-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:c3c9740a77aeef3f5e3aaab92403946a8d34437db930a0280e7e81ddcada61f5", size = 485334, upload-time = "2025-07-31T00:19:56.58Z" }, + { url = "https://files.pythonhosted.org/packages/94/a6/c09136046be0595f0331bc58a0e5f89c2d324cf734e0b0ec53cf4b12a636/regex-2025.7.34-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:69ed3bc611540f2ea70a4080f853741ec698be556b1df404599f8724690edbcd", size = 289942, upload-time = "2025-07-31T00:19:57.943Z" }, + { url = "https://files.pythonhosted.org/packages/36/91/08fc0fd0f40bdfb0e0df4134ee37cfb16e66a1044ac56d36911fd01c69d2/regex-2025.7.34-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:d03c6f9dcd562c56527c42b8530aad93193e0b3254a588be1f2ed378cdfdea1b", size = 285991, upload-time = "2025-07-31T00:19:59.837Z" }, + { url = "https://files.pythonhosted.org/packages/be/2f/99dc8f6f756606f0c214d14c7b6c17270b6bbe26d5c1f05cde9dbb1c551f/regex-2025.7.34-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:6164b1d99dee1dfad33f301f174d8139d4368a9fb50bf0a3603b2eaf579963ad", size = 797415, upload-time = "2025-07-31T00:20:01.668Z" }, + { url = "https://files.pythonhosted.org/packages/62/cf/2fcdca1110495458ba4e95c52ce73b361cf1cafd8a53b5c31542cde9a15b/regex-2025.7.34-cp313-cp313-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:1e4f4f62599b8142362f164ce776f19d79bdd21273e86920a7b604a4275b4f59", size = 862487, upload-time = "2025-07-31T00:20:03.142Z" }, + { url = "https://files.pythonhosted.org/packages/90/38/899105dd27fed394e3fae45607c1983e138273ec167e47882fc401f112b9/regex-2025.7.34-cp313-cp313-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:72a26dcc6a59c057b292f39d41465d8233a10fd69121fa24f8f43ec6294e5415", size = 910717, upload-time = "2025-07-31T00:20:04.727Z" }, + { url = "https://files.pythonhosted.org/packages/ee/f6/4716198dbd0bcc9c45625ac4c81a435d1c4d8ad662e8576dac06bab35b17/regex-2025.7.34-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:d5273fddf7a3e602695c92716c420c377599ed3c853ea669c1fe26218867002f", size = 801943, upload-time = "2025-07-31T00:20:07.1Z" }, + { url = "https://files.pythonhosted.org/packages/40/5d/cff8896d27e4e3dd11dd72ac78797c7987eb50fe4debc2c0f2f1682eb06d/regex-2025.7.34-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:c1844be23cd40135b3a5a4dd298e1e0c0cb36757364dd6cdc6025770363e06c1", size = 786664, upload-time = "2025-07-31T00:20:08.818Z" }, + { url = "https://files.pythonhosted.org/packages/10/29/758bf83cf7b4c34f07ac3423ea03cee3eb3176941641e4ccc05620f6c0b8/regex-2025.7.34-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:dde35e2afbbe2272f8abee3b9fe6772d9b5a07d82607b5788e8508974059925c", size = 856457, upload-time = "2025-07-31T00:20:10.328Z" }, + { url = "https://files.pythonhosted.org/packages/d7/30/c19d212b619963c5b460bfed0ea69a092c6a43cba52a973d46c27b3e2975/regex-2025.7.34-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:f3f6e8e7af516a7549412ce57613e859c3be27d55341a894aacaa11703a4c31a", size = 849008, upload-time = "2025-07-31T00:20:11.823Z" }, + { url = "https://files.pythonhosted.org/packages/9e/b8/3c35da3b12c87e3cc00010ef6c3a4ae787cff0bc381aa3d251def219969a/regex-2025.7.34-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:469142fb94a869beb25b5f18ea87646d21def10fbacb0bcb749224f3509476f0", size = 788101, upload-time = "2025-07-31T00:20:13.729Z" }, + { url = "https://files.pythonhosted.org/packages/47/80/2f46677c0b3c2b723b2c358d19f9346e714113865da0f5f736ca1a883bde/regex-2025.7.34-cp313-cp313-win32.whl", hash = "sha256:da7507d083ee33ccea1310447410c27ca11fb9ef18c95899ca57ff60a7e4d8f1", size = 264401, upload-time = "2025-07-31T00:20:15.233Z" }, + { url = "https://files.pythonhosted.org/packages/be/fa/917d64dd074682606a003cba33585c28138c77d848ef72fc77cbb1183849/regex-2025.7.34-cp313-cp313-win_amd64.whl", hash = "sha256:9d644de5520441e5f7e2db63aec2748948cc39ed4d7a87fd5db578ea4043d997", size = 275368, upload-time = "2025-07-31T00:20:16.711Z" }, + { url = "https://files.pythonhosted.org/packages/65/cd/f94383666704170a2154a5df7b16be28f0c27a266bffcd843e58bc84120f/regex-2025.7.34-cp313-cp313-win_arm64.whl", hash = "sha256:7bf1c5503a9f2cbd2f52d7e260acb3131b07b6273c470abb78568174fe6bde3f", size = 268482, upload-time = "2025-07-31T00:20:18.189Z" }, + { url = "https://files.pythonhosted.org/packages/ac/23/6376f3a23cf2f3c00514b1cdd8c990afb4dfbac3cb4a68b633c6b7e2e307/regex-2025.7.34-cp314-cp314-macosx_10_13_universal2.whl", hash = "sha256:8283afe7042d8270cecf27cca558873168e771183d4d593e3c5fe5f12402212a", size = 485385, upload-time = "2025-07-31T00:20:19.692Z" }, + { url = "https://files.pythonhosted.org/packages/73/5b/6d4d3a0b4d312adbfd6d5694c8dddcf1396708976dd87e4d00af439d962b/regex-2025.7.34-cp314-cp314-macosx_10_13_x86_64.whl", hash = "sha256:6c053f9647e3421dd2f5dff8172eb7b4eec129df9d1d2f7133a4386319b47435", size = 289788, upload-time = "2025-07-31T00:20:21.941Z" }, + { url = "https://files.pythonhosted.org/packages/92/71/5862ac9913746e5054d01cb9fb8125b3d0802c0706ef547cae1e7f4428fa/regex-2025.7.34-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:a16dd56bbcb7d10e62861c3cd000290ddff28ea142ffb5eb3470f183628011ac", size = 286136, upload-time = "2025-07-31T00:20:26.146Z" }, + { url = "https://files.pythonhosted.org/packages/27/df/5b505dc447eb71278eba10d5ec940769ca89c1af70f0468bfbcb98035dc2/regex-2025.7.34-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:69c593ff5a24c0d5c1112b0df9b09eae42b33c014bdca7022d6523b210b69f72", size = 797753, upload-time = "2025-07-31T00:20:27.919Z" }, + { url = "https://files.pythonhosted.org/packages/86/38/3e3dc953d13998fa047e9a2414b556201dbd7147034fbac129392363253b/regex-2025.7.34-cp314-cp314-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:98d0ce170fcde1a03b5df19c5650db22ab58af375aaa6ff07978a85c9f250f0e", size = 863263, upload-time = "2025-07-31T00:20:29.803Z" }, + { url = "https://files.pythonhosted.org/packages/68/e5/3ff66b29dde12f5b874dda2d9dec7245c2051f2528d8c2a797901497f140/regex-2025.7.34-cp314-cp314-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:d72765a4bff8c43711d5b0f5b452991a9947853dfa471972169b3cc0ba1d0751", size = 910103, upload-time = "2025-07-31T00:20:31.313Z" }, + { url = "https://files.pythonhosted.org/packages/9e/fe/14176f2182125977fba3711adea73f472a11f3f9288c1317c59cd16ad5e6/regex-2025.7.34-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:4494f8fd95a77eb434039ad8460e64d57baa0434f1395b7da44015bef650d0e4", size = 801709, upload-time = "2025-07-31T00:20:33.323Z" }, + { url = "https://files.pythonhosted.org/packages/5a/0d/80d4e66ed24f1ba876a9e8e31b709f9fd22d5c266bf5f3ab3c1afe683d7d/regex-2025.7.34-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:4f42b522259c66e918a0121a12429b2abcf696c6f967fa37bdc7b72e61469f98", size = 786726, upload-time = "2025-07-31T00:20:35.252Z" }, + { url = "https://files.pythonhosted.org/packages/12/75/c3ebb30e04a56c046f5c85179dc173818551037daae2c0c940c7b19152cb/regex-2025.7.34-cp314-cp314-musllinux_1_2_ppc64le.whl", hash = "sha256:aaef1f056d96a0a5d53ad47d019d5b4c66fe4be2da87016e0d43b7242599ffc7", size = 857306, upload-time = "2025-07-31T00:20:37.12Z" }, + { url = "https://files.pythonhosted.org/packages/b1/b2/a4dc5d8b14f90924f27f0ac4c4c4f5e195b723be98adecc884f6716614b6/regex-2025.7.34-cp314-cp314-musllinux_1_2_s390x.whl", hash = "sha256:656433e5b7dccc9bc0da6312da8eb897b81f5e560321ec413500e5367fcd5d47", size = 848494, upload-time = "2025-07-31T00:20:38.818Z" }, + { url = "https://files.pythonhosted.org/packages/0d/21/9ac6e07a4c5e8646a90b56b61f7e9dac11ae0747c857f91d3d2bc7c241d9/regex-2025.7.34-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:e91eb2c62c39705e17b4d42d4b86c4e86c884c0d15d9c5a47d0835f8387add8e", size = 787850, upload-time = "2025-07-31T00:20:40.478Z" }, + { url = "https://files.pythonhosted.org/packages/be/6c/d51204e28e7bc54f9a03bb799b04730d7e54ff2718862b8d4e09e7110a6a/regex-2025.7.34-cp314-cp314-win32.whl", hash = "sha256:f978ddfb6216028c8f1d6b0f7ef779949498b64117fc35a939022f67f810bdcb", size = 269730, upload-time = "2025-07-31T00:20:42.253Z" }, + { url = "https://files.pythonhosted.org/packages/74/52/a7e92d02fa1fdef59d113098cb9f02c5d03289a0e9f9e5d4d6acccd10677/regex-2025.7.34-cp314-cp314-win_amd64.whl", hash = "sha256:4b7dc33b9b48fb37ead12ffc7bdb846ac72f99a80373c4da48f64b373a7abeae", size = 278640, upload-time = "2025-07-31T00:20:44.42Z" }, + { url = "https://files.pythonhosted.org/packages/d1/78/a815529b559b1771080faa90c3ab401730661f99d495ab0071649f139ebd/regex-2025.7.34-cp314-cp314-win_arm64.whl", hash = "sha256:4b8c4d39f451e64809912c82392933d80fe2e4a87eeef8859fcc5380d0173c64", size = 271757, upload-time = "2025-07-31T00:20:46.355Z" }, +] + +[[package]] +name = "requests" +version = "2.32.3" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "certifi" }, + { name = "charset-normalizer" }, + { name = "idna" }, + { name = "urllib3" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/63/70/2bf7780ad2d390a8d301ad0b550f1581eadbd9a20f896afe06353c2a2913/requests-2.32.3.tar.gz", hash = "sha256:55365417734eb18255590a9ff9eb97e9e1da868d4ccd6402399eaf68af20a760", size = 131218, upload-time = "2024-05-29T15:37:49.536Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/f9/9b/335f9764261e915ed497fcdeb11df5dfd6f7bf257d4a6a2a686d80da4d54/requests-2.32.3-py3-none-any.whl", hash = "sha256:70761cfe03c773ceb22aa2f671b4757976145175cdfca038c02654d061d6dcc6", size = 64928, upload-time = "2024-05-29T15:37:47.027Z" }, +] + +[[package]] +name = "requests-oauthlib" +version = "2.0.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "oauthlib" }, + { name = "requests" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/42/f2/05f29bc3913aea15eb670be136045bf5c5bbf4b99ecb839da9b422bb2c85/requests-oauthlib-2.0.0.tar.gz", hash = "sha256:b3dffaebd884d8cd778494369603a9e7b58d29111bf6b41bdc2dcd87203af4e9", size = 55650, upload-time = "2024-03-22T20:32:29.939Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/3b/5d/63d4ae3b9daea098d5d6f5da83984853c1bbacd5dc826764b249fe119d24/requests_oauthlib-2.0.0-py2.py3-none-any.whl", hash = "sha256:7dd8a5c40426b779b0868c404bdef9768deccf22749cde15852df527e6269b36", size = 24179, upload-time = "2024-03-22T20:32:28.055Z" }, +] + +[[package]] +name = "requests-toolbelt" +version = "1.0.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "requests" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/f3/61/d7545dafb7ac2230c70d38d31cbfe4cc64f7144dc41f6e4e4b78ecd9f5bb/requests-toolbelt-1.0.0.tar.gz", hash = "sha256:7681a0a3d047012b5bdc0ee37d7f8f07ebe76ab08caeccfc3921ce23c88d5bc6", size = 206888, upload-time = "2023-05-01T04:11:33.229Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/3f/51/d4db610ef29373b879047326cbf6fa98b6c1969d6f6dc423279de2b1be2c/requests_toolbelt-1.0.0-py2.py3-none-any.whl", hash = "sha256:cccfdd665f0a24fcf4726e690f65639d272bb0637b9b92dfd91a5568ccf6bd06", size = 54481, upload-time = "2023-05-01T04:11:28.427Z" }, +] + +[[package]] +name = "rlp" +version = "4.1.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "eth-utils" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/1b/2d/439b0728a92964a04d9c88ea1ca9ebb128893fbbd5834faa31f987f2fd4c/rlp-4.1.0.tar.gz", hash = "sha256:be07564270a96f3e225e2c107db263de96b5bc1f27722d2855bd3459a08e95a9", size = 33429, upload-time = "2025-02-04T22:05:59.089Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/99/fb/e4c0ced9893b84ac95b7181d69a9786ce5879aeb3bbbcbba80a164f85d6a/rlp-4.1.0-py3-none-any.whl", hash = "sha256:8eca394c579bad34ee0b937aecb96a57052ff3716e19c7a578883e767bc5da6f", size = 19973, upload-time = "2025-02-04T22:05:57.05Z" }, +] + +[[package]] +name = "rpds-py" +version = "0.27.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/1e/d9/991a0dee12d9fc53ed027e26a26a64b151d77252ac477e22666b9688bc16/rpds_py-0.27.0.tar.gz", hash = "sha256:8b23cf252f180cda89220b378d917180f29d313cd6a07b2431c0d3b776aae86f", size = 27420, upload-time = "2025-08-07T08:26:39.624Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/cd/17/e67309ca1ac993fa1888a0d9b2f5ccc1f67196ace32e76c9f8e1dbbbd50c/rpds_py-0.27.0-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:19c990fdf5acecbf0623e906ae2e09ce1c58947197f9bced6bbd7482662231c4", size = 362611, upload-time = "2025-08-07T08:23:44.773Z" }, + { url = "https://files.pythonhosted.org/packages/93/2e/28c2fb84aa7aa5d75933d1862d0f7de6198ea22dfd9a0cca06e8a4e7509e/rpds_py-0.27.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:6c27a7054b5224710fcfb1a626ec3ff4f28bcb89b899148c72873b18210e446b", size = 347680, upload-time = "2025-08-07T08:23:46.014Z" }, + { url = "https://files.pythonhosted.org/packages/44/3e/9834b4c8f4f5fe936b479e623832468aa4bd6beb8d014fecaee9eac6cdb1/rpds_py-0.27.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:09965b314091829b378b60607022048953e25f0b396c2b70e7c4c81bcecf932e", size = 384600, upload-time = "2025-08-07T08:23:48Z" }, + { url = "https://files.pythonhosted.org/packages/19/78/744123c7b38865a965cd9e6f691fde7ef989a00a256fa8bf15b75240d12f/rpds_py-0.27.0-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:14f028eb47f59e9169bfdf9f7ceafd29dd64902141840633683d0bad5b04ff34", size = 400697, upload-time = "2025-08-07T08:23:49.407Z" }, + { url = "https://files.pythonhosted.org/packages/32/97/3c3d32fe7daee0a1f1a678b6d4dfb8c4dcf88197fa2441f9da7cb54a8466/rpds_py-0.27.0-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:6168af0be75bba990a39f9431cdfae5f0ad501f4af32ae62e8856307200517b8", size = 517781, upload-time = "2025-08-07T08:23:50.557Z" }, + { url = "https://files.pythonhosted.org/packages/b2/be/28f0e3e733680aa13ecec1212fc0f585928a206292f14f89c0b8a684cad1/rpds_py-0.27.0-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:ab47fe727c13c09d0e6f508e3a49e545008e23bf762a245b020391b621f5b726", size = 406449, upload-time = "2025-08-07T08:23:51.732Z" }, + { url = "https://files.pythonhosted.org/packages/95/ae/5d15c83e337c082d0367053baeb40bfba683f42459f6ebff63a2fd7e5518/rpds_py-0.27.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5fa01b3d5e3b7d97efab65bd3d88f164e289ec323a8c033c5c38e53ee25c007e", size = 386150, upload-time = "2025-08-07T08:23:52.822Z" }, + { url = "https://files.pythonhosted.org/packages/bf/65/944e95f95d5931112829e040912b25a77b2e7ed913ea5fe5746aa5c1ce75/rpds_py-0.27.0-cp312-cp312-manylinux_2_31_riscv64.whl", hash = "sha256:6c135708e987f46053e0a1246a206f53717f9fadfba27174a9769ad4befba5c3", size = 406100, upload-time = "2025-08-07T08:23:54.339Z" }, + { url = "https://files.pythonhosted.org/packages/21/a4/1664b83fae02894533cd11dc0b9f91d673797c2185b7be0f7496107ed6c5/rpds_py-0.27.0-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:fc327f4497b7087d06204235199daf208fd01c82d80465dc5efa4ec9df1c5b4e", size = 421345, upload-time = "2025-08-07T08:23:55.832Z" }, + { url = "https://files.pythonhosted.org/packages/7c/26/b7303941c2b0823bfb34c71378249f8beedce57301f400acb04bb345d025/rpds_py-0.27.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:7e57906e38583a2cba67046a09c2637e23297618dc1f3caddbc493f2be97c93f", size = 561891, upload-time = "2025-08-07T08:23:56.951Z" }, + { url = "https://files.pythonhosted.org/packages/9b/c8/48623d64d4a5a028fa99576c768a6159db49ab907230edddc0b8468b998b/rpds_py-0.27.0-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:0f4f69d7a4300fbf91efb1fb4916421bd57804c01ab938ab50ac9c4aa2212f03", size = 591756, upload-time = "2025-08-07T08:23:58.146Z" }, + { url = "https://files.pythonhosted.org/packages/b3/51/18f62617e8e61cc66334c9fb44b1ad7baae3438662098efbc55fb3fda453/rpds_py-0.27.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:b4c4fbbcff474e1e5f38be1bf04511c03d492d42eec0babda5d03af3b5589374", size = 557088, upload-time = "2025-08-07T08:23:59.6Z" }, + { url = "https://files.pythonhosted.org/packages/bd/4c/e84c3a276e2496a93d245516be6b49e20499aa8ca1c94d59fada0d79addc/rpds_py-0.27.0-cp312-cp312-win32.whl", hash = "sha256:27bac29bbbf39601b2aab474daf99dbc8e7176ca3389237a23944b17f8913d97", size = 221926, upload-time = "2025-08-07T08:24:00.695Z" }, + { url = "https://files.pythonhosted.org/packages/83/89/9d0fbcef64340db0605eb0a0044f258076f3ae0a3b108983b2c614d96212/rpds_py-0.27.0-cp312-cp312-win_amd64.whl", hash = "sha256:8a06aa1197ec0281eb1d7daf6073e199eb832fe591ffa329b88bae28f25f5fe5", size = 233235, upload-time = "2025-08-07T08:24:01.846Z" }, + { url = "https://files.pythonhosted.org/packages/c9/b0/e177aa9f39cbab060f96de4a09df77d494f0279604dc2f509263e21b05f9/rpds_py-0.27.0-cp312-cp312-win_arm64.whl", hash = "sha256:e14aab02258cb776a108107bd15f5b5e4a1bbaa61ef33b36693dfab6f89d54f9", size = 223315, upload-time = "2025-08-07T08:24:03.337Z" }, + { url = "https://files.pythonhosted.org/packages/81/d2/dfdfd42565a923b9e5a29f93501664f5b984a802967d48d49200ad71be36/rpds_py-0.27.0-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:443d239d02d9ae55b74015234f2cd8eb09e59fbba30bf60baeb3123ad4c6d5ff", size = 362133, upload-time = "2025-08-07T08:24:04.508Z" }, + { url = "https://files.pythonhosted.org/packages/ac/4a/0a2e2460c4b66021d349ce9f6331df1d6c75d7eea90df9785d333a49df04/rpds_py-0.27.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:b8a7acf04fda1f30f1007f3cc96d29d8cf0a53e626e4e1655fdf4eabc082d367", size = 347128, upload-time = "2025-08-07T08:24:05.695Z" }, + { url = "https://files.pythonhosted.org/packages/35/8d/7d1e4390dfe09d4213b3175a3f5a817514355cb3524593380733204f20b9/rpds_py-0.27.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9d0f92b78cfc3b74a42239fdd8c1266f4715b573204c234d2f9fc3fc7a24f185", size = 384027, upload-time = "2025-08-07T08:24:06.841Z" }, + { url = "https://files.pythonhosted.org/packages/c1/65/78499d1a62172891c8cd45de737b2a4b84a414b6ad8315ab3ac4945a5b61/rpds_py-0.27.0-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:ce4ed8e0c7dbc5b19352b9c2c6131dd23b95fa8698b5cdd076307a33626b72dc", size = 399973, upload-time = "2025-08-07T08:24:08.143Z" }, + { url = "https://files.pythonhosted.org/packages/10/a1/1c67c1d8cc889107b19570bb01f75cf49852068e95e6aee80d22915406fc/rpds_py-0.27.0-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:fde355b02934cc6b07200cc3b27ab0c15870a757d1a72fd401aa92e2ea3c6bfe", size = 515295, upload-time = "2025-08-07T08:24:09.711Z" }, + { url = "https://files.pythonhosted.org/packages/df/27/700ec88e748436b6c7c4a2262d66e80f8c21ab585d5e98c45e02f13f21c0/rpds_py-0.27.0-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:13bbc4846ae4c993f07c93feb21a24d8ec637573d567a924b1001e81c8ae80f9", size = 406737, upload-time = "2025-08-07T08:24:11.182Z" }, + { url = "https://files.pythonhosted.org/packages/33/cc/6b0ee8f0ba3f2df2daac1beda17fde5cf10897a7d466f252bd184ef20162/rpds_py-0.27.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:be0744661afbc4099fef7f4e604e7f1ea1be1dd7284f357924af12a705cc7d5c", size = 385898, upload-time = "2025-08-07T08:24:12.798Z" }, + { url = "https://files.pythonhosted.org/packages/e8/7e/c927b37d7d33c0a0ebf249cc268dc2fcec52864c1b6309ecb960497f2285/rpds_py-0.27.0-cp313-cp313-manylinux_2_31_riscv64.whl", hash = "sha256:069e0384a54f427bd65d7fda83b68a90606a3835901aaff42185fcd94f5a9295", size = 405785, upload-time = "2025-08-07T08:24:14.906Z" }, + { url = "https://files.pythonhosted.org/packages/5b/d2/8ed50746d909dcf402af3fa58b83d5a590ed43e07251d6b08fad1a535ba6/rpds_py-0.27.0-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:4bc262ace5a1a7dc3e2eac2fa97b8257ae795389f688b5adf22c5db1e2431c43", size = 419760, upload-time = "2025-08-07T08:24:16.129Z" }, + { url = "https://files.pythonhosted.org/packages/d3/60/2b2071aee781cb3bd49f94d5d35686990b925e9b9f3e3d149235a6f5d5c1/rpds_py-0.27.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:2fe6e18e5c8581f0361b35ae575043c7029d0a92cb3429e6e596c2cdde251432", size = 561201, upload-time = "2025-08-07T08:24:17.645Z" }, + { url = "https://files.pythonhosted.org/packages/98/1f/27b67304272521aaea02be293fecedce13fa351a4e41cdb9290576fc6d81/rpds_py-0.27.0-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:d93ebdb82363d2e7bec64eecdc3632b59e84bd270d74fe5be1659f7787052f9b", size = 591021, upload-time = "2025-08-07T08:24:18.999Z" }, + { url = "https://files.pythonhosted.org/packages/db/9b/a2fadf823164dd085b1f894be6443b0762a54a7af6f36e98e8fcda69ee50/rpds_py-0.27.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:0954e3a92e1d62e83a54ea7b3fdc9efa5d61acef8488a8a3d31fdafbfb00460d", size = 556368, upload-time = "2025-08-07T08:24:20.54Z" }, + { url = "https://files.pythonhosted.org/packages/24/f3/6d135d46a129cda2e3e6d4c5e91e2cc26ea0428c6cf152763f3f10b6dd05/rpds_py-0.27.0-cp313-cp313-win32.whl", hash = "sha256:2cff9bdd6c7b906cc562a505c04a57d92e82d37200027e8d362518df427f96cd", size = 221236, upload-time = "2025-08-07T08:24:22.144Z" }, + { url = "https://files.pythonhosted.org/packages/c5/44/65d7494f5448ecc755b545d78b188440f81da98b50ea0447ab5ebfdf9bd6/rpds_py-0.27.0-cp313-cp313-win_amd64.whl", hash = "sha256:dc79d192fb76fc0c84f2c58672c17bbbc383fd26c3cdc29daae16ce3d927e8b2", size = 232634, upload-time = "2025-08-07T08:24:23.642Z" }, + { url = "https://files.pythonhosted.org/packages/70/d9/23852410fadab2abb611733933401de42a1964ce6600a3badae35fbd573e/rpds_py-0.27.0-cp313-cp313-win_arm64.whl", hash = "sha256:5b3a5c8089eed498a3af23ce87a80805ff98f6ef8f7bdb70bd1b7dae5105f6ac", size = 222783, upload-time = "2025-08-07T08:24:25.098Z" }, + { url = "https://files.pythonhosted.org/packages/15/75/03447917f78512b34463f4ef11066516067099a0c466545655503bed0c77/rpds_py-0.27.0-cp313-cp313t-macosx_10_12_x86_64.whl", hash = "sha256:90fb790138c1a89a2e58c9282fe1089638401f2f3b8dddd758499041bc6e0774", size = 359154, upload-time = "2025-08-07T08:24:26.249Z" }, + { url = "https://files.pythonhosted.org/packages/6b/fc/4dac4fa756451f2122ddaf136e2c6aeb758dc6fdbe9ccc4bc95c98451d50/rpds_py-0.27.0-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:010c4843a3b92b54373e3d2291a7447d6c3fc29f591772cc2ea0e9f5c1da434b", size = 343909, upload-time = "2025-08-07T08:24:27.405Z" }, + { url = "https://files.pythonhosted.org/packages/7b/81/723c1ed8e6f57ed9d8c0c07578747a2d3d554aaefc1ab89f4e42cfeefa07/rpds_py-0.27.0-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c9ce7a9e967afc0a2af7caa0d15a3e9c1054815f73d6a8cb9225b61921b419bd", size = 379340, upload-time = "2025-08-07T08:24:28.714Z" }, + { url = "https://files.pythonhosted.org/packages/98/16/7e3740413de71818ce1997df82ba5f94bae9fff90c0a578c0e24658e6201/rpds_py-0.27.0-cp313-cp313t-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:aa0bf113d15e8abdfee92aa4db86761b709a09954083afcb5bf0f952d6065fdb", size = 391655, upload-time = "2025-08-07T08:24:30.223Z" }, + { url = "https://files.pythonhosted.org/packages/e0/63/2a9f510e124d80660f60ecce07953f3f2d5f0b96192c1365443859b9c87f/rpds_py-0.27.0-cp313-cp313t-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:eb91d252b35004a84670dfeafadb042528b19842a0080d8b53e5ec1128e8f433", size = 513017, upload-time = "2025-08-07T08:24:31.446Z" }, + { url = "https://files.pythonhosted.org/packages/2c/4e/cf6ff311d09776c53ea1b4f2e6700b9d43bb4e99551006817ade4bbd6f78/rpds_py-0.27.0-cp313-cp313t-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:db8a6313dbac934193fc17fe7610f70cd8181c542a91382531bef5ed785e5615", size = 402058, upload-time = "2025-08-07T08:24:32.613Z" }, + { url = "https://files.pythonhosted.org/packages/88/11/5e36096d474cb10f2a2d68b22af60a3bc4164fd8db15078769a568d9d3ac/rpds_py-0.27.0-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ce96ab0bdfcef1b8c371ada2100767ace6804ea35aacce0aef3aeb4f3f499ca8", size = 383474, upload-time = "2025-08-07T08:24:33.767Z" }, + { url = "https://files.pythonhosted.org/packages/db/a2/3dff02805b06058760b5eaa6d8cb8db3eb3e46c9e452453ad5fc5b5ad9fe/rpds_py-0.27.0-cp313-cp313t-manylinux_2_31_riscv64.whl", hash = "sha256:7451ede3560086abe1aa27dcdcf55cd15c96b56f543fb12e5826eee6f721f858", size = 400067, upload-time = "2025-08-07T08:24:35.021Z" }, + { url = "https://files.pythonhosted.org/packages/67/87/eed7369b0b265518e21ea836456a4ed4a6744c8c12422ce05bce760bb3cf/rpds_py-0.27.0-cp313-cp313t-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:32196b5a99821476537b3f7732432d64d93a58d680a52c5e12a190ee0135d8b5", size = 412085, upload-time = "2025-08-07T08:24:36.267Z" }, + { url = "https://files.pythonhosted.org/packages/8b/48/f50b2ab2fbb422fbb389fe296e70b7a6b5ea31b263ada5c61377e710a924/rpds_py-0.27.0-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:a029be818059870664157194e46ce0e995082ac49926f1423c1f058534d2aaa9", size = 555928, upload-time = "2025-08-07T08:24:37.573Z" }, + { url = "https://files.pythonhosted.org/packages/98/41/b18eb51045d06887666c3560cd4bbb6819127b43d758f5adb82b5f56f7d1/rpds_py-0.27.0-cp313-cp313t-musllinux_1_2_i686.whl", hash = "sha256:3841f66c1ffdc6cebce8aed64e36db71466f1dc23c0d9a5592e2a782a3042c79", size = 585527, upload-time = "2025-08-07T08:24:39.391Z" }, + { url = "https://files.pythonhosted.org/packages/be/03/a3dd6470fc76499959b00ae56295b76b4bdf7c6ffc60d62006b1217567e1/rpds_py-0.27.0-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:42894616da0fc0dcb2ec08a77896c3f56e9cb2f4b66acd76fc8992c3557ceb1c", size = 554211, upload-time = "2025-08-07T08:24:40.6Z" }, + { url = "https://files.pythonhosted.org/packages/bf/d1/ee5fd1be395a07423ac4ca0bcc05280bf95db2b155d03adefeb47d5ebf7e/rpds_py-0.27.0-cp313-cp313t-win32.whl", hash = "sha256:b1fef1f13c842a39a03409e30ca0bf87b39a1e2a305a9924deadb75a43105d23", size = 216624, upload-time = "2025-08-07T08:24:42.204Z" }, + { url = "https://files.pythonhosted.org/packages/1c/94/4814c4c858833bf46706f87349c37ca45e154da7dbbec9ff09f1abeb08cc/rpds_py-0.27.0-cp313-cp313t-win_amd64.whl", hash = "sha256:183f5e221ba3e283cd36fdfbe311d95cd87699a083330b4f792543987167eff1", size = 230007, upload-time = "2025-08-07T08:24:43.329Z" }, + { url = "https://files.pythonhosted.org/packages/0e/a5/8fffe1c7dc7c055aa02df310f9fb71cfc693a4d5ccc5de2d3456ea5fb022/rpds_py-0.27.0-cp314-cp314-macosx_10_12_x86_64.whl", hash = "sha256:f3cd110e02c5bf17d8fb562f6c9df5c20e73029d587cf8602a2da6c5ef1e32cb", size = 362595, upload-time = "2025-08-07T08:24:44.478Z" }, + { url = "https://files.pythonhosted.org/packages/bc/c7/4e4253fd2d4bb0edbc0b0b10d9f280612ca4f0f990e3c04c599000fe7d71/rpds_py-0.27.0-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:8d0e09cf4863c74106b5265c2c310f36146e2b445ff7b3018a56799f28f39f6f", size = 347252, upload-time = "2025-08-07T08:24:45.678Z" }, + { url = "https://files.pythonhosted.org/packages/f3/c8/3d1a954d30f0174dd6baf18b57c215da03cf7846a9d6e0143304e784cddc/rpds_py-0.27.0-cp314-cp314-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:64f689ab822f9b5eb6dfc69893b4b9366db1d2420f7db1f6a2adf2a9ca15ad64", size = 384886, upload-time = "2025-08-07T08:24:46.86Z" }, + { url = "https://files.pythonhosted.org/packages/e0/52/3c5835f2df389832b28f9276dd5395b5a965cea34226e7c88c8fbec2093c/rpds_py-0.27.0-cp314-cp314-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:e36c80c49853b3ffda7aa1831bf175c13356b210c73128c861f3aa93c3cc4015", size = 399716, upload-time = "2025-08-07T08:24:48.174Z" }, + { url = "https://files.pythonhosted.org/packages/40/73/176e46992461a1749686a2a441e24df51ff86b99c2d34bf39f2a5273b987/rpds_py-0.27.0-cp314-cp314-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:6de6a7f622860af0146cb9ee148682ff4d0cea0b8fd3ad51ce4d40efb2f061d0", size = 517030, upload-time = "2025-08-07T08:24:49.52Z" }, + { url = "https://files.pythonhosted.org/packages/79/2a/7266c75840e8c6e70effeb0d38922a45720904f2cd695e68a0150e5407e2/rpds_py-0.27.0-cp314-cp314-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:4045e2fc4b37ec4b48e8907a5819bdd3380708c139d7cc358f03a3653abedb89", size = 408448, upload-time = "2025-08-07T08:24:50.727Z" }, + { url = "https://files.pythonhosted.org/packages/e6/5f/a7efc572b8e235093dc6cf39f4dbc8a7f08e65fdbcec7ff4daeb3585eef1/rpds_py-0.27.0-cp314-cp314-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9da162b718b12c4219eeeeb68a5b7552fbc7aadedf2efee440f88b9c0e54b45d", size = 387320, upload-time = "2025-08-07T08:24:52.004Z" }, + { url = "https://files.pythonhosted.org/packages/a2/eb/9ff6bc92efe57cf5a2cb74dee20453ba444b6fdc85275d8c99e0d27239d1/rpds_py-0.27.0-cp314-cp314-manylinux_2_31_riscv64.whl", hash = "sha256:0665be515767dc727ffa5f74bd2ef60b0ff85dad6bb8f50d91eaa6b5fb226f51", size = 407414, upload-time = "2025-08-07T08:24:53.664Z" }, + { url = "https://files.pythonhosted.org/packages/fb/bd/3b9b19b00d5c6e1bd0f418c229ab0f8d3b110ddf7ec5d9d689ef783d0268/rpds_py-0.27.0-cp314-cp314-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:203f581accef67300a942e49a37d74c12ceeef4514874c7cede21b012613ca2c", size = 420766, upload-time = "2025-08-07T08:24:55.917Z" }, + { url = "https://files.pythonhosted.org/packages/17/6b/521a7b1079ce16258c70805166e3ac6ec4ee2139d023fe07954dc9b2d568/rpds_py-0.27.0-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:7873b65686a6471c0037139aa000d23fe94628e0daaa27b6e40607c90e3f5ec4", size = 562409, upload-time = "2025-08-07T08:24:57.17Z" }, + { url = "https://files.pythonhosted.org/packages/8b/bf/65db5bfb14ccc55e39de8419a659d05a2a9cd232f0a699a516bb0991da7b/rpds_py-0.27.0-cp314-cp314-musllinux_1_2_i686.whl", hash = "sha256:249ab91ceaa6b41abc5f19513cb95b45c6f956f6b89f1fe3d99c81255a849f9e", size = 590793, upload-time = "2025-08-07T08:24:58.388Z" }, + { url = "https://files.pythonhosted.org/packages/db/b8/82d368b378325191ba7aae8f40f009b78057b598d4394d1f2cdabaf67b3f/rpds_py-0.27.0-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:d2f184336bc1d6abfaaa1262ed42739c3789b1e3a65a29916a615307d22ffd2e", size = 558178, upload-time = "2025-08-07T08:24:59.756Z" }, + { url = "https://files.pythonhosted.org/packages/f6/ff/f270bddbfbc3812500f8131b1ebbd97afd014cd554b604a3f73f03133a36/rpds_py-0.27.0-cp314-cp314-win32.whl", hash = "sha256:d3c622c39f04d5751408f5b801ecb527e6e0a471b367f420a877f7a660d583f6", size = 222355, upload-time = "2025-08-07T08:25:01.027Z" }, + { url = "https://files.pythonhosted.org/packages/bf/20/fdab055b1460c02ed356a0e0b0a78c1dd32dc64e82a544f7b31c9ac643dc/rpds_py-0.27.0-cp314-cp314-win_amd64.whl", hash = "sha256:cf824aceaeffff029ccfba0da637d432ca71ab21f13e7f6f5179cd88ebc77a8a", size = 234007, upload-time = "2025-08-07T08:25:02.268Z" }, + { url = "https://files.pythonhosted.org/packages/4d/a8/694c060005421797a3be4943dab8347c76c2b429a9bef68fb2c87c9e70c7/rpds_py-0.27.0-cp314-cp314-win_arm64.whl", hash = "sha256:86aca1616922b40d8ac1b3073a1ead4255a2f13405e5700c01f7c8d29a03972d", size = 223527, upload-time = "2025-08-07T08:25:03.45Z" }, + { url = "https://files.pythonhosted.org/packages/1e/f9/77f4c90f79d2c5ca8ce6ec6a76cb4734ee247de6b3a4f337e289e1f00372/rpds_py-0.27.0-cp314-cp314t-macosx_10_12_x86_64.whl", hash = "sha256:341d8acb6724c0c17bdf714319c393bb27f6d23d39bc74f94221b3e59fc31828", size = 359469, upload-time = "2025-08-07T08:25:04.648Z" }, + { url = "https://files.pythonhosted.org/packages/c0/22/b97878d2f1284286fef4172069e84b0b42b546ea7d053e5fb7adb9ac6494/rpds_py-0.27.0-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:6b96b0b784fe5fd03beffff2b1533dc0d85e92bab8d1b2c24ef3a5dc8fac5669", size = 343960, upload-time = "2025-08-07T08:25:05.863Z" }, + { url = "https://files.pythonhosted.org/packages/b1/b0/dfd55b5bb480eda0578ae94ef256d3061d20b19a0f5e18c482f03e65464f/rpds_py-0.27.0-cp314-cp314t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0c431bfb91478d7cbe368d0a699978050d3b112d7f1d440a41e90faa325557fd", size = 380201, upload-time = "2025-08-07T08:25:07.513Z" }, + { url = "https://files.pythonhosted.org/packages/28/22/e1fa64e50d58ad2b2053077e3ec81a979147c43428de9e6de68ddf6aff4e/rpds_py-0.27.0-cp314-cp314t-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:20e222a44ae9f507d0f2678ee3dd0c45ec1e930f6875d99b8459631c24058aec", size = 392111, upload-time = "2025-08-07T08:25:09.149Z" }, + { url = "https://files.pythonhosted.org/packages/49/f9/43ab7a43e97aedf6cea6af70fdcbe18abbbc41d4ae6cdec1bfc23bbad403/rpds_py-0.27.0-cp314-cp314t-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:184f0d7b342967f6cda94a07d0e1fae177d11d0b8f17d73e06e36ac02889f303", size = 515863, upload-time = "2025-08-07T08:25:10.431Z" }, + { url = "https://files.pythonhosted.org/packages/38/9b/9bd59dcc636cd04d86a2d20ad967770bf348f5eb5922a8f29b547c074243/rpds_py-0.27.0-cp314-cp314t-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a00c91104c173c9043bc46f7b30ee5e6d2f6b1149f11f545580f5d6fdff42c0b", size = 402398, upload-time = "2025-08-07T08:25:11.819Z" }, + { url = "https://files.pythonhosted.org/packages/71/bf/f099328c6c85667aba6b66fa5c35a8882db06dcd462ea214be72813a0dd2/rpds_py-0.27.0-cp314-cp314t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f7a37dd208f0d658e0487522078b1ed68cd6bce20ef4b5a915d2809b9094b410", size = 384665, upload-time = "2025-08-07T08:25:13.194Z" }, + { url = "https://files.pythonhosted.org/packages/a9/c5/9c1f03121ece6634818490bd3c8be2c82a70928a19de03467fb25a3ae2a8/rpds_py-0.27.0-cp314-cp314t-manylinux_2_31_riscv64.whl", hash = "sha256:92f3b3ec3e6008a1fe00b7c0946a170f161ac00645cde35e3c9a68c2475e8156", size = 400405, upload-time = "2025-08-07T08:25:14.417Z" }, + { url = "https://files.pythonhosted.org/packages/b5/b8/e25d54af3e63ac94f0c16d8fe143779fe71ff209445a0c00d0f6984b6b2c/rpds_py-0.27.0-cp314-cp314t-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:a1b3db5fae5cbce2131b7420a3f83553d4d89514c03d67804ced36161fe8b6b2", size = 413179, upload-time = "2025-08-07T08:25:15.664Z" }, + { url = "https://files.pythonhosted.org/packages/f9/d1/406b3316433fe49c3021546293a04bc33f1478e3ec7950215a7fce1a1208/rpds_py-0.27.0-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:5355527adaa713ab693cbce7c1e0ec71682f599f61b128cf19d07e5c13c9b1f1", size = 556895, upload-time = "2025-08-07T08:25:17.061Z" }, + { url = "https://files.pythonhosted.org/packages/5f/bc/3697c0c21fcb9a54d46ae3b735eb2365eea0c2be076b8f770f98e07998de/rpds_py-0.27.0-cp314-cp314t-musllinux_1_2_i686.whl", hash = "sha256:fcc01c57ce6e70b728af02b2401c5bc853a9e14eb07deda30624374f0aebfe42", size = 585464, upload-time = "2025-08-07T08:25:18.406Z" }, + { url = "https://files.pythonhosted.org/packages/63/09/ee1bb5536f99f42c839b177d552f6114aa3142d82f49cef49261ed28dbe0/rpds_py-0.27.0-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:3001013dae10f806380ba739d40dee11db1ecb91684febb8406a87c2ded23dae", size = 555090, upload-time = "2025-08-07T08:25:20.461Z" }, + { url = "https://files.pythonhosted.org/packages/7d/2c/363eada9e89f7059199d3724135a86c47082cbf72790d6ba2f336d146ddb/rpds_py-0.27.0-cp314-cp314t-win32.whl", hash = "sha256:0f401c369186a5743694dd9fc08cba66cf70908757552e1f714bfc5219c655b5", size = 218001, upload-time = "2025-08-07T08:25:21.761Z" }, + { url = "https://files.pythonhosted.org/packages/e2/3f/d6c216ed5199c9ef79e2a33955601f454ed1e7420a93b89670133bca5ace/rpds_py-0.27.0-cp314-cp314t-win_amd64.whl", hash = "sha256:8a1dca5507fa1337f75dcd5070218b20bc68cf8844271c923c1b79dfcbc20391", size = 230993, upload-time = "2025-08-07T08:25:23.34Z" }, +] + +[[package]] +name = "ruff" +version = "0.11.13" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/ed/da/9c6f995903b4d9474b39da91d2d626659af3ff1eeb43e9ae7c119349dba6/ruff-0.11.13.tar.gz", hash = "sha256:26fa247dc68d1d4e72c179e08889a25ac0c7ba4d78aecfc835d49cbfd60bf514", size = 4282054, upload-time = "2025-06-05T21:00:15.721Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/7d/ce/a11d381192966e0b4290842cc8d4fac7dc9214ddf627c11c1afff87da29b/ruff-0.11.13-py3-none-linux_armv6l.whl", hash = "sha256:4bdfbf1240533f40042ec00c9e09a3aade6f8c10b6414cf11b519488d2635d46", size = 10292516, upload-time = "2025-06-05T20:59:32.944Z" }, + { url = "https://files.pythonhosted.org/packages/78/db/87c3b59b0d4e753e40b6a3b4a2642dfd1dcaefbff121ddc64d6c8b47ba00/ruff-0.11.13-py3-none-macosx_10_12_x86_64.whl", hash = "sha256:aef9c9ed1b5ca28bb15c7eac83b8670cf3b20b478195bd49c8d756ba0a36cf48", size = 11106083, upload-time = "2025-06-05T20:59:37.03Z" }, + { url = "https://files.pythonhosted.org/packages/77/79/d8cec175856ff810a19825d09ce700265f905c643c69f45d2b737e4a470a/ruff-0.11.13-py3-none-macosx_11_0_arm64.whl", hash = "sha256:53b15a9dfdce029c842e9a5aebc3855e9ab7771395979ff85b7c1dedb53ddc2b", size = 10436024, upload-time = "2025-06-05T20:59:39.741Z" }, + { url = "https://files.pythonhosted.org/packages/8b/5b/f6d94f2980fa1ee854b41568368a2e1252681b9238ab2895e133d303538f/ruff-0.11.13-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ab153241400789138d13f362c43f7edecc0edfffce2afa6a68434000ecd8f69a", size = 10646324, upload-time = "2025-06-05T20:59:42.185Z" }, + { url = "https://files.pythonhosted.org/packages/6c/9c/b4c2acf24ea4426016d511dfdc787f4ce1ceb835f3c5fbdbcb32b1c63bda/ruff-0.11.13-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:6c51f93029d54a910d3d24f7dd0bb909e31b6cd989a5e4ac513f4eb41629f0dc", size = 10174416, upload-time = "2025-06-05T20:59:44.319Z" }, + { url = "https://files.pythonhosted.org/packages/f3/10/e2e62f77c65ede8cd032c2ca39c41f48feabedb6e282bfd6073d81bb671d/ruff-0.11.13-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1808b3ed53e1a777c2ef733aca9051dc9bf7c99b26ece15cb59a0320fbdbd629", size = 11724197, upload-time = "2025-06-05T20:59:46.935Z" }, + { url = "https://files.pythonhosted.org/packages/bb/f0/466fe8469b85c561e081d798c45f8a1d21e0b4a5ef795a1d7f1a9a9ec182/ruff-0.11.13-py3-none-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:d28ce58b5ecf0f43c1b71edffabe6ed7f245d5336b17805803312ec9bc665933", size = 12511615, upload-time = "2025-06-05T20:59:49.534Z" }, + { url = "https://files.pythonhosted.org/packages/17/0e/cefe778b46dbd0cbcb03a839946c8f80a06f7968eb298aa4d1a4293f3448/ruff-0.11.13-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:55e4bc3a77842da33c16d55b32c6cac1ec5fb0fbec9c8c513bdce76c4f922165", size = 12117080, upload-time = "2025-06-05T20:59:51.654Z" }, + { url = "https://files.pythonhosted.org/packages/5d/2c/caaeda564cbe103bed145ea557cb86795b18651b0f6b3ff6a10e84e5a33f/ruff-0.11.13-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:633bf2c6f35678c56ec73189ba6fa19ff1c5e4807a78bf60ef487b9dd272cc71", size = 11326315, upload-time = "2025-06-05T20:59:54.469Z" }, + { url = "https://files.pythonhosted.org/packages/75/f0/782e7d681d660eda8c536962920c41309e6dd4ebcea9a2714ed5127d44bd/ruff-0.11.13-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4ffbc82d70424b275b089166310448051afdc6e914fdab90e08df66c43bb5ca9", size = 11555640, upload-time = "2025-06-05T20:59:56.986Z" }, + { url = "https://files.pythonhosted.org/packages/5d/d4/3d580c616316c7f07fb3c99dbecfe01fbaea7b6fd9a82b801e72e5de742a/ruff-0.11.13-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:4a9ddd3ec62a9a89578c85842b836e4ac832d4a2e0bfaad3b02243f930ceafcc", size = 10507364, upload-time = "2025-06-05T20:59:59.154Z" }, + { url = "https://files.pythonhosted.org/packages/5a/dc/195e6f17d7b3ea6b12dc4f3e9de575db7983db187c378d44606e5d503319/ruff-0.11.13-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:d237a496e0778d719efb05058c64d28b757c77824e04ffe8796c7436e26712b7", size = 10141462, upload-time = "2025-06-05T21:00:01.481Z" }, + { url = "https://files.pythonhosted.org/packages/f4/8e/39a094af6967faa57ecdeacb91bedfb232474ff8c3d20f16a5514e6b3534/ruff-0.11.13-py3-none-musllinux_1_2_i686.whl", hash = "sha256:26816a218ca6ef02142343fd24c70f7cd8c5aa6c203bca284407adf675984432", size = 11121028, upload-time = "2025-06-05T21:00:04.06Z" }, + { url = "https://files.pythonhosted.org/packages/5a/c0/b0b508193b0e8a1654ec683ebab18d309861f8bd64e3a2f9648b80d392cb/ruff-0.11.13-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:51c3f95abd9331dc5b87c47ac7f376db5616041173826dfd556cfe3d4977f492", size = 11602992, upload-time = "2025-06-05T21:00:06.249Z" }, + { url = "https://files.pythonhosted.org/packages/7c/91/263e33ab93ab09ca06ce4f8f8547a858cc198072f873ebc9be7466790bae/ruff-0.11.13-py3-none-win32.whl", hash = "sha256:96c27935418e4e8e77a26bb05962817f28b8ef3843a6c6cc49d8783b5507f250", size = 10474944, upload-time = "2025-06-05T21:00:08.459Z" }, + { url = "https://files.pythonhosted.org/packages/46/f4/7c27734ac2073aae8efb0119cae6931b6fb48017adf048fdf85c19337afc/ruff-0.11.13-py3-none-win_amd64.whl", hash = "sha256:29c3189895a8a6a657b7af4e97d330c8a3afd2c9c8f46c81e2fc5a31866517e3", size = 11548669, upload-time = "2025-06-05T21:00:11.147Z" }, + { url = "https://files.pythonhosted.org/packages/ec/bf/b273dd11673fed8a6bd46032c0ea2a04b2ac9bfa9c628756a5856ba113b0/ruff-0.11.13-py3-none-win_arm64.whl", hash = "sha256:b4385285e9179d608ff1d2fb9922062663c658605819a6876d8beef0c30b7f3b", size = 10683928, upload-time = "2025-06-05T21:00:13.758Z" }, +] + +[[package]] +name = "s3transfer" +version = "0.13.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "botocore" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/6d/05/d52bf1e65044b4e5e27d4e63e8d1579dbdec54fce685908ae09bc3720030/s3transfer-0.13.1.tar.gz", hash = "sha256:c3fdba22ba1bd367922f27ec8032d6a1cf5f10c934fb5d68cf60fd5a23d936cf", size = 150589, upload-time = "2025-07-18T19:22:42.31Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/6d/4f/d073e09df851cfa251ef7840007d04db3293a0482ce607d2b993926089be/s3transfer-0.13.1-py3-none-any.whl", hash = "sha256:a981aa7429be23fe6dfc13e80e4020057cbab622b08c0315288758d67cabc724", size = 85308, upload-time = "2025-07-18T19:22:40.947Z" }, +] + +[[package]] +name = "six" +version = "1.17.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/94/e7/b2c673351809dca68a0e064b6af791aa332cf192da575fd474ed7d6f16a2/six-1.17.0.tar.gz", hash = "sha256:ff70335d468e7eb6ec65b95b99d3a2836546063f63acc5171de367e834932a81", size = 34031, upload-time = "2024-12-04T17:35:28.174Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/b7/ce/149a00dd41f10bc29e5921b496af8b574d8413afcd5e30dfa0ed46c2cc5e/six-1.17.0-py2.py3-none-any.whl", hash = "sha256:4721f391ed90541fddacab5acf947aa0d3dc7d27b2e1e8eda2be8970586c3274", size = 11050, upload-time = "2024-12-04T17:35:26.475Z" }, +] + +[[package]] +name = "slack-sdk" +version = "3.36.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/73/1e/bbf7fdd00306f097ddb839c23628b7e271128cc8f584b9cae8f704b3924e/slack_sdk-3.36.0.tar.gz", hash = "sha256:8586022bdbdf9f8f8d32f394540436c53b1e7c8da9d21e1eab4560ba70cfcffa", size = 233382, upload-time = "2025-07-09T20:58:22.838Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/70/9a/380d20856d9ea39fbc4d3bb66f076b0d72035ebe873eb05fc88ebee4125f/slack_sdk-3.36.0-py2.py3-none-any.whl", hash = "sha256:6c96887d7175fc1b0b2777b73bb65f39b5b8bee9bd8acfec071d64014f9e2d10", size = 293949, upload-time = "2025-07-09T20:58:21.233Z" }, +] + +[[package]] +name = "sniffio" +version = "1.3.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/a2/87/a6771e1546d97e7e041b6ae58d80074f81b7d5121207425c964ddf5cfdbd/sniffio-1.3.1.tar.gz", hash = "sha256:f4324edc670a0f49750a81b895f35c3adb843cca46f0530f79fc1babb23789dc", size = 20372, upload-time = "2024-02-25T23:20:04.057Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/e9/44/75a9c9421471a6c4805dbf2356f7c181a29c1879239abab1ea2cc8f38b40/sniffio-1.3.1-py3-none-any.whl", hash = "sha256:2f6da418d1f1e0fddd844478f41680e794e6051915791a034ff65e5f100525a2", size = 10235, upload-time = "2024-02-25T23:20:01.196Z" }, +] + +[[package]] +name = "solana" +version = "0.36.9" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "construct-typing" }, + { name = "httpx" }, + { name = "solders" }, + { name = "typing-extensions" }, + { name = "websockets" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/8c/e0/ce762b6763e3a0f8a5ccecbf695d65ef54b6f874ad5f58ce5cdcaba224f1/solana-0.36.9.tar.gz", hash = "sha256:f702f6177337c67a982909ef54ef3abce5e795b8cd93edb045bedfa4d13c20c5", size = 52722, upload-time = "2025-08-09T16:23:25.307Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/ac/11/d5e5d02200ca85b615da39078806b377156b67b2093c8bc08a1b9c293070/solana-0.36.9-py3-none-any.whl", hash = "sha256:e05824f91f95abe5a687914976e8bc78986386156f2106108c696db998c3c542", size = 62882, upload-time = "2025-08-09T16:23:24.149Z" }, +] + +[[package]] +name = "solders" +version = "0.26.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "jsonalias" }, + { name = "typing-extensions" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/87/96/23ad2e43e2676b78834064fe051e3db3ce1899336ecd4797f92fcd06113a/solders-0.26.0.tar.gz", hash = "sha256:057533892d6fa432c1ce1e2f5e3428802964666c10b57d3d1bcaab86295f046c", size = 181123, upload-time = "2025-02-18T19:23:57.734Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/a5/ce/58bbb4d2c696e770cdd37e5f6dc2891ef7610c0c085bf400f9c42dcff1ad/solders-0.26.0-cp37-abi3-macosx_10_12_x86_64.macosx_11_0_arm64.macosx_10_12_universal2.whl", hash = "sha256:9c1a0ef5daa1a05934af5fb6e7e32eab7c42cede406c80067fee006f461ffc4a", size = 24344472, upload-time = "2025-02-18T19:23:30.273Z" }, + { url = "https://files.pythonhosted.org/packages/5a/35/221cec0e5900c2202833e7e9110c3405a2d96ed25e110b247f88b8782e29/solders-0.26.0-cp37-abi3-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:1b964efbd7c0b38aef3bf4293ea5938517ae649b9a23e7cd147d889931775aab", size = 6674734, upload-time = "2025-02-18T19:23:35.15Z" }, + { url = "https://files.pythonhosted.org/packages/41/33/d17b7dbc92672351d59fc65cdb93b8924fc682deba09f6d96f25440187ae/solders-0.26.0-cp37-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:36e6a769c5298b887b7588edb171d93709a89302aef75913fe893d11c653739d", size = 13472961, upload-time = "2025-02-18T19:23:38.582Z" }, + { url = "https://files.pythonhosted.org/packages/bb/e7/533367d815ab000587ccc37d89e154132f63347f02dcaaac5df72bd851de/solders-0.26.0-cp37-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:b3cc55b971ec6ed1b4466fa7e7e09eee9baba492b8cd9e3204e3e1a0c5a0c4aa", size = 6886198, upload-time = "2025-02-18T19:23:41.453Z" }, + { url = "https://files.pythonhosted.org/packages/52/e0/ab41ab3df5fdf3b0e55613be93a43c2fe58b15a6ea8ceca26d3fba02e3c6/solders-0.26.0-cp37-abi3-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:3e3973074c17265921c70246a17bcf80972c5b96a3e1ed7f5049101f11865092", size = 7319170, upload-time = "2025-02-18T19:23:43.758Z" }, + { url = "https://files.pythonhosted.org/packages/7d/34/5174ce592607e0ac020aff203217f2f113a55eec49af3db12945fea42d89/solders-0.26.0-cp37-abi3-musllinux_1_2_i686.whl", hash = "sha256:59b52419452602f697e659199a25acacda8365971c376ef3c0687aecdd929e07", size = 7134977, upload-time = "2025-02-18T19:23:46.157Z" }, + { url = "https://files.pythonhosted.org/packages/ba/5e/822faabda0d473c29bdf59fe8869a411fd436af8ca6f5d6e89f7513f682f/solders-0.26.0-cp37-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:5946ec3f2a340afa9ce5c2b8ab628ae1dea2ad2235551b1297cafdd7e3e5c51a", size = 6984222, upload-time = "2025-02-18T19:23:49.429Z" }, + { url = "https://files.pythonhosted.org/packages/23/e8/dc992f677762ea2de44b7768120d95887ef39fab10d6f29fb53e6a9882c1/solders-0.26.0-cp37-abi3-win_amd64.whl", hash = "sha256:5466616610170aab08c627ae01724e425bcf90085bc574da682e9f3bd954900b", size = 5480492, upload-time = "2025-02-18T19:23:53.285Z" }, +] + +[[package]] +name = "sqlalchemy" +version = "2.0.43" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "greenlet", marker = "(python_full_version < '3.14' and platform_machine == 'AMD64') or (python_full_version < '3.14' and platform_machine == 'WIN32') or (python_full_version < '3.14' and platform_machine == 'aarch64') or (python_full_version < '3.14' and platform_machine == 'amd64') or (python_full_version < '3.14' and platform_machine == 'ppc64le') or (python_full_version < '3.14' and platform_machine == 'win32') or (python_full_version < '3.14' and platform_machine == 'x86_64')" }, + { name = "typing-extensions" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/d7/bc/d59b5d97d27229b0e009bd9098cd81af71c2fa5549c580a0a67b9bed0496/sqlalchemy-2.0.43.tar.gz", hash = "sha256:788bfcef6787a7764169cfe9859fe425bf44559619e1d9f56f5bddf2ebf6f417", size = 9762949, upload-time = "2025-08-11T14:24:58.438Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/61/db/20c78f1081446095450bdc6ee6cc10045fce67a8e003a5876b6eaafc5cc4/sqlalchemy-2.0.43-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:20d81fc2736509d7a2bd33292e489b056cbae543661bb7de7ce9f1c0cd6e7f24", size = 2134891, upload-time = "2025-08-11T15:51:13.019Z" }, + { url = "https://files.pythonhosted.org/packages/45/0a/3d89034ae62b200b4396f0f95319f7d86e9945ee64d2343dcad857150fa2/sqlalchemy-2.0.43-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:25b9fc27650ff5a2c9d490c13c14906b918b0de1f8fcbb4c992712d8caf40e83", size = 2123061, upload-time = "2025-08-11T15:51:14.319Z" }, + { url = "https://files.pythonhosted.org/packages/cb/10/2711f7ff1805919221ad5bee205971254845c069ee2e7036847103ca1e4c/sqlalchemy-2.0.43-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6772e3ca8a43a65a37c88e2f3e2adfd511b0b1da37ef11ed78dea16aeae85bd9", size = 3320384, upload-time = "2025-08-11T15:52:35.088Z" }, + { url = "https://files.pythonhosted.org/packages/6e/0e/3d155e264d2ed2778484006ef04647bc63f55b3e2d12e6a4f787747b5900/sqlalchemy-2.0.43-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1a113da919c25f7f641ffbd07fbc9077abd4b3b75097c888ab818f962707eb48", size = 3329648, upload-time = "2025-08-11T15:56:34.153Z" }, + { url = "https://files.pythonhosted.org/packages/5b/81/635100fb19725c931622c673900da5efb1595c96ff5b441e07e3dd61f2be/sqlalchemy-2.0.43-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:4286a1139f14b7d70141c67a8ae1582fc2b69105f1b09d9573494eb4bb4b2687", size = 3258030, upload-time = "2025-08-11T15:52:36.933Z" }, + { url = "https://files.pythonhosted.org/packages/0c/ed/a99302716d62b4965fded12520c1cbb189f99b17a6d8cf77611d21442e47/sqlalchemy-2.0.43-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:529064085be2f4d8a6e5fab12d36ad44f1909a18848fcfbdb59cc6d4bbe48efe", size = 3294469, upload-time = "2025-08-11T15:56:35.553Z" }, + { url = "https://files.pythonhosted.org/packages/5d/a2/3a11b06715149bf3310b55a98b5c1e84a42cfb949a7b800bc75cb4e33abc/sqlalchemy-2.0.43-cp312-cp312-win32.whl", hash = "sha256:b535d35dea8bbb8195e7e2b40059e2253acb2b7579b73c1b432a35363694641d", size = 2098906, upload-time = "2025-08-11T15:55:00.645Z" }, + { url = "https://files.pythonhosted.org/packages/bc/09/405c915a974814b90aa591280623adc6ad6b322f61fd5cff80aeaef216c9/sqlalchemy-2.0.43-cp312-cp312-win_amd64.whl", hash = "sha256:1c6d85327ca688dbae7e2b06d7d84cfe4f3fffa5b5f9e21bb6ce9d0e1a0e0e0a", size = 2126260, upload-time = "2025-08-11T15:55:02.965Z" }, + { url = "https://files.pythonhosted.org/packages/41/1c/a7260bd47a6fae7e03768bf66451437b36451143f36b285522b865987ced/sqlalchemy-2.0.43-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:e7c08f57f75a2bb62d7ee80a89686a5e5669f199235c6d1dac75cd59374091c3", size = 2130598, upload-time = "2025-08-11T15:51:15.903Z" }, + { url = "https://files.pythonhosted.org/packages/8e/84/8a337454e82388283830b3586ad7847aa9c76fdd4f1df09cdd1f94591873/sqlalchemy-2.0.43-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:14111d22c29efad445cd5021a70a8b42f7d9152d8ba7f73304c4d82460946aaa", size = 2118415, upload-time = "2025-08-11T15:51:17.256Z" }, + { url = "https://files.pythonhosted.org/packages/cf/ff/22ab2328148492c4d71899d62a0e65370ea66c877aea017a244a35733685/sqlalchemy-2.0.43-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:21b27b56eb2f82653168cefe6cb8e970cdaf4f3a6cb2c5e3c3c1cf3158968ff9", size = 3248707, upload-time = "2025-08-11T15:52:38.444Z" }, + { url = "https://files.pythonhosted.org/packages/dc/29/11ae2c2b981de60187f7cbc84277d9d21f101093d1b2e945c63774477aba/sqlalchemy-2.0.43-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9c5a9da957c56e43d72126a3f5845603da00e0293720b03bde0aacffcf2dc04f", size = 3253602, upload-time = "2025-08-11T15:56:37.348Z" }, + { url = "https://files.pythonhosted.org/packages/b8/61/987b6c23b12c56d2be451bc70900f67dd7d989d52b1ee64f239cf19aec69/sqlalchemy-2.0.43-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:5d79f9fdc9584ec83d1b3c75e9f4595c49017f5594fee1a2217117647225d738", size = 3183248, upload-time = "2025-08-11T15:52:39.865Z" }, + { url = "https://files.pythonhosted.org/packages/86/85/29d216002d4593c2ce1c0ec2cec46dda77bfbcd221e24caa6e85eff53d89/sqlalchemy-2.0.43-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:9df7126fd9db49e3a5a3999442cc67e9ee8971f3cb9644250107d7296cb2a164", size = 3219363, upload-time = "2025-08-11T15:56:39.11Z" }, + { url = "https://files.pythonhosted.org/packages/b6/e4/bd78b01919c524f190b4905d47e7630bf4130b9f48fd971ae1c6225b6f6a/sqlalchemy-2.0.43-cp313-cp313-win32.whl", hash = "sha256:7f1ac7828857fcedb0361b48b9ac4821469f7694089d15550bbcf9ab22564a1d", size = 2096718, upload-time = "2025-08-11T15:55:05.349Z" }, + { url = "https://files.pythonhosted.org/packages/ac/a5/ca2f07a2a201f9497de1928f787926613db6307992fe5cda97624eb07c2f/sqlalchemy-2.0.43-cp313-cp313-win_amd64.whl", hash = "sha256:971ba928fcde01869361f504fcff3b7143b47d30de188b11c6357c0505824197", size = 2123200, upload-time = "2025-08-11T15:55:07.932Z" }, + { url = "https://files.pythonhosted.org/packages/b8/d9/13bdde6521f322861fab67473cec4b1cc8999f3871953531cf61945fad92/sqlalchemy-2.0.43-py3-none-any.whl", hash = "sha256:1681c21dd2ccee222c2fe0bef671d1aef7c504087c9c4e800371cfcc8ac966fc", size = 1924759, upload-time = "2025-08-11T15:39:53.024Z" }, +] + +[package.optional-dependencies] +asyncio = [ + { name = "greenlet" }, +] + +[[package]] +name = "sse-starlette" +version = "3.0.2" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "anyio" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/42/6f/22ed6e33f8a9e76ca0a412405f31abb844b779d52c5f96660766edcd737c/sse_starlette-3.0.2.tar.gz", hash = "sha256:ccd60b5765ebb3584d0de2d7a6e4f745672581de4f5005ab31c3a25d10b52b3a", size = 20985, upload-time = "2025-07-27T09:07:44.565Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/ef/10/c78f463b4ef22eef8491f218f692be838282cd65480f6e423d7730dfd1fb/sse_starlette-3.0.2-py3-none-any.whl", hash = "sha256:16b7cbfddbcd4eaca11f7b586f3b8a080f1afe952c15813455b162edea619e5a", size = 11297, upload-time = "2025-07-27T09:07:43.268Z" }, +] + +[[package]] +name = "starlette" +version = "0.47.2" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "anyio" }, + { name = "typing-extensions", marker = "python_full_version < '3.13'" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/04/57/d062573f391d062710d4088fa1369428c38d51460ab6fedff920efef932e/starlette-0.47.2.tar.gz", hash = "sha256:6ae9aa5db235e4846decc1e7b79c4f346adf41e9777aebeb49dfd09bbd7023d8", size = 2583948, upload-time = "2025-07-20T17:31:58.522Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/f7/1f/b876b1f83aef204198a42dc101613fefccb32258e5428b5f9259677864b4/starlette-0.47.2-py3-none-any.whl", hash = "sha256:c5847e96134e5c5371ee9fac6fdf1a67336d5815e09eb2a01fdb57a351ef915b", size = 72984, upload-time = "2025-07-20T17:31:56.738Z" }, +] + +[[package]] +name = "storage3" +version = "0.12.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "deprecation" }, + { name = "httpx", extra = ["http2"] }, + { name = "python-dateutil" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/b9/e2/280fe75f65e7a3ca680b7843acfc572a63aa41230e3d3c54c66568809c85/storage3-0.12.1.tar.gz", hash = "sha256:32ea8f5eb2f7185c2114a4f6ae66d577722e32503f0a30b56e7ed5c7f13e6b48", size = 10198, upload-time = "2025-08-05T18:09:11.989Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/7f/3b/c5f8709fc5349928e591fee47592eeff78d29a7d75b097f96a4e01de028d/storage3-0.12.1-py3-none-any.whl", hash = "sha256:9da77fd4f406b019fdcba201e9916aefbf615ef87f551253ce427d8136459a34", size = 18420, upload-time = "2025-08-05T18:09:10.365Z" }, +] + +[[package]] +name = "strenum" +version = "0.4.15" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/85/ad/430fb60d90e1d112a62ff57bdd1f286ec73a2a0331272febfddd21f330e1/StrEnum-0.4.15.tar.gz", hash = "sha256:878fb5ab705442070e4dd1929bb5e2249511c0bcf2b0eeacf3bcd80875c82eff", size = 23384, upload-time = "2023-06-29T22:02:58.399Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/81/69/297302c5f5f59c862faa31e6cb9a4cd74721cd1e052b38e464c5b402df8b/StrEnum-0.4.15-py3-none-any.whl", hash = "sha256:a30cda4af7cc6b5bf52c8055bc4bf4b2b6b14a93b574626da33df53cf7740659", size = 8851, upload-time = "2023-06-29T22:02:56.947Z" }, +] + +[[package]] +name = "supabase" +version = "2.16.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "gotrue" }, + { name = "httpx" }, + { name = "postgrest" }, + { name = "realtime" }, + { name = "storage3" }, + { name = "supafunc" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/c6/93/335b91e8d09a95a337f051f84e85495f7732400f10c1bcb698a7571f8f1c/supabase-2.16.0.tar.gz", hash = "sha256:98f3810158012d4ec0e3083f2e5515f5e10b32bd71e7d458662140e963c1d164", size = 14595, upload-time = "2025-06-23T16:09:29.504Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/7e/75/2ab71e6605d20a740ff041c6176a328cfaa3fcee0dd0db885e081d98df06/supabase-2.16.0-py3-none-any.whl", hash = "sha256:99065caab3d90a56650bf39fbd0e49740995da3738ab28706c61bd7f2401db55", size = 17713, upload-time = "2025-06-23T16:09:28.299Z" }, +] + +[[package]] +name = "supafunc" +version = "0.10.2" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "httpx", extra = ["http2"] }, + { name = "strenum" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/a0/a9/cd7c89972d8638f3b658126b2f580fe13bcd7235f8abfbdd9da70ebb2933/supafunc-0.10.2.tar.gz", hash = "sha256:45e4d500854167c261515c43f7a363320e0a928118182fe8932adefddeddb545", size = 5033, upload-time = "2025-08-08T15:58:28.626Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/eb/d3/784314aa18185f97c4b998a0384c7b3c021637a93cef20247f15772f0c84/supafunc-0.10.2-py3-none-any.whl", hash = "sha256:547a2c115b15319c78fc84460f19cb5ea6e72597f7573a3498f4db087787e0fd", size = 8444, upload-time = "2025-08-08T15:58:27.154Z" }, +] + +[[package]] +name = "tenacity" +version = "9.1.2" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/0a/d4/2b0cd0fe285e14b36db076e78c93766ff1d529d70408bd1d2a5a84f1d929/tenacity-9.1.2.tar.gz", hash = "sha256:1169d376c297e7de388d18b4481760d478b0e99a777cad3a9c86e556f4b697cb", size = 48036, upload-time = "2025-04-02T08:25:09.966Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/e5/30/643397144bfbfec6f6ef821f36f33e57d35946c44a2352d3c9f0ae847619/tenacity-9.1.2-py3-none-any.whl", hash = "sha256:f77bf36710d8b73a50b2dd155c97b870017ad21afe6ab300326b0371b3b05138", size = 28248, upload-time = "2025-04-02T08:25:07.678Z" }, +] + +[[package]] +name = "tiktoken" +version = "0.11.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "regex" }, + { name = "requests" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/a7/86/ad0155a37c4f310935d5ac0b1ccf9bdb635dcb906e0a9a26b616dd55825a/tiktoken-0.11.0.tar.gz", hash = "sha256:3c518641aee1c52247c2b97e74d8d07d780092af79d5911a6ab5e79359d9b06a", size = 37648, upload-time = "2025-08-08T23:58:08.495Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/e7/9e/eceddeffc169fc75fe0fd4f38471309f11cb1906f9b8aa39be4f5817df65/tiktoken-0.11.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:fd9e6b23e860973cf9526544e220b223c60badf5b62e80a33509d6d40e6c8f5d", size = 1055199, upload-time = "2025-08-08T23:57:45.076Z" }, + { url = "https://files.pythonhosted.org/packages/4f/cf/5f02bfefffdc6b54e5094d2897bc80efd43050e5b09b576fd85936ee54bf/tiktoken-0.11.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:6a76d53cee2da71ee2731c9caa747398762bda19d7f92665e882fef229cb0b5b", size = 996655, upload-time = "2025-08-08T23:57:46.304Z" }, + { url = "https://files.pythonhosted.org/packages/65/8e/c769b45ef379bc360c9978c4f6914c79fd432400a6733a8afc7ed7b0726a/tiktoken-0.11.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6ef72aab3ea240646e642413cb363b73869fed4e604dcfd69eec63dc54d603e8", size = 1128867, upload-time = "2025-08-08T23:57:47.438Z" }, + { url = "https://files.pythonhosted.org/packages/d5/2d/4d77f6feb9292bfdd23d5813e442b3bba883f42d0ac78ef5fdc56873f756/tiktoken-0.11.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7f929255c705efec7a28bf515e29dc74220b2f07544a8c81b8d69e8efc4578bd", size = 1183308, upload-time = "2025-08-08T23:57:48.566Z" }, + { url = "https://files.pythonhosted.org/packages/7a/65/7ff0a65d3bb0fc5a1fb6cc71b03e0f6e71a68c5eea230d1ff1ba3fd6df49/tiktoken-0.11.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:61f1d15822e4404953d499fd1dcc62817a12ae9fb1e4898033ec8fe3915fdf8e", size = 1244301, upload-time = "2025-08-08T23:57:49.642Z" }, + { url = "https://files.pythonhosted.org/packages/f5/6e/5b71578799b72e5bdcef206a214c3ce860d999d579a3b56e74a6c8989ee2/tiktoken-0.11.0-cp312-cp312-win_amd64.whl", hash = "sha256:45927a71ab6643dfd3ef57d515a5db3d199137adf551f66453be098502838b0f", size = 884282, upload-time = "2025-08-08T23:57:50.759Z" }, + { url = "https://files.pythonhosted.org/packages/cc/cd/a9034bcee638716d9310443818d73c6387a6a96db93cbcb0819b77f5b206/tiktoken-0.11.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:a5f3f25ffb152ee7fec78e90a5e5ea5b03b4ea240beed03305615847f7a6ace2", size = 1055339, upload-time = "2025-08-08T23:57:51.802Z" }, + { url = "https://files.pythonhosted.org/packages/f1/91/9922b345f611b4e92581f234e64e9661e1c524875c8eadd513c4b2088472/tiktoken-0.11.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:7dc6e9ad16a2a75b4c4be7208055a1f707c9510541d94d9cc31f7fbdc8db41d8", size = 997080, upload-time = "2025-08-08T23:57:53.442Z" }, + { url = "https://files.pythonhosted.org/packages/d0/9d/49cd047c71336bc4b4af460ac213ec1c457da67712bde59b892e84f1859f/tiktoken-0.11.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5a0517634d67a8a48fd4a4ad73930c3022629a85a217d256a6e9b8b47439d1e4", size = 1128501, upload-time = "2025-08-08T23:57:54.808Z" }, + { url = "https://files.pythonhosted.org/packages/52/d5/a0dcdb40dd2ea357e83cb36258967f0ae96f5dd40c722d6e382ceee6bba9/tiktoken-0.11.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7fb4effe60574675118b73c6fbfd3b5868e5d7a1f570d6cc0d18724b09ecf318", size = 1182743, upload-time = "2025-08-08T23:57:56.307Z" }, + { url = "https://files.pythonhosted.org/packages/3b/17/a0fc51aefb66b7b5261ca1314afa83df0106b033f783f9a7bcbe8e741494/tiktoken-0.11.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:94f984c9831fd32688aef4348803b0905d4ae9c432303087bae370dc1381a2b8", size = 1244057, upload-time = "2025-08-08T23:57:57.628Z" }, + { url = "https://files.pythonhosted.org/packages/50/79/bcf350609f3a10f09fe4fc207f132085e497fdd3612f3925ab24d86a0ca0/tiktoken-0.11.0-cp313-cp313-win_amd64.whl", hash = "sha256:2177ffda31dec4023356a441793fed82f7af5291120751dee4d696414f54db0c", size = 883901, upload-time = "2025-08-08T23:57:59.359Z" }, +] + +[[package]] +name = "toolz" +version = "1.0.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/8a/0b/d80dfa675bf592f636d1ea0b835eab4ec8df6e9415d8cfd766df54456123/toolz-1.0.0.tar.gz", hash = "sha256:2c86e3d9a04798ac556793bced838816296a2f085017664e4995cb40a1047a02", size = 66790, upload-time = "2024-10-04T16:17:04.001Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/03/98/eb27cc78ad3af8e302c9d8ff4977f5026676e130d28dd7578132a457170c/toolz-1.0.0-py3-none-any.whl", hash = "sha256:292c8f1c4e7516bf9086f8850935c799a874039c8bcf959d47b600e4c44a6236", size = 56383, upload-time = "2024-10-04T16:17:01.533Z" }, +] + +[[package]] +name = "tox" +version = "4.23.2" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "cachetools" }, + { name = "chardet" }, + { name = "colorama" }, + { name = "filelock" }, + { name = "packaging" }, + { name = "platformdirs" }, + { name = "pluggy" }, + { name = "pyproject-api" }, + { name = "virtualenv" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/1f/86/32b10f91b4b975a37ac402b0f9fa016775088e0565c93602ba0b3c729ce8/tox-4.23.2.tar.gz", hash = "sha256:86075e00e555df6e82e74cfc333917f91ecb47ffbc868dcafbd2672e332f4a2c", size = 189998, upload-time = "2024-10-22T14:29:04.46Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/af/c0/124b73d01c120e917383bc6c53ebc34efdf7243faa9fca64d105c94cf2ab/tox-4.23.2-py3-none-any.whl", hash = "sha256:452bc32bb031f2282881a2118923176445bac783ab97c874b8770ab4c3b76c38", size = 166758, upload-time = "2024-10-22T14:29:02.087Z" }, +] + +[[package]] +name = "tqdm" +version = "4.67.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "colorama", marker = "sys_platform == 'win32'" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/a8/4b/29b4ef32e036bb34e4ab51796dd745cdba7ed47ad142a9f4a1eb8e0c744d/tqdm-4.67.1.tar.gz", hash = "sha256:f8aef9c52c08c13a65f30ea34f4e5aac3fd1a34959879d7e59e63027286627f2", size = 169737, upload-time = "2024-11-24T20:12:22.481Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/d0/30/dc54f88dd4a2b5dc8a0279bdd7270e735851848b762aeb1c1184ed1f6b14/tqdm-4.67.1-py3-none-any.whl", hash = "sha256:26445eca388f82e72884e0d580d5464cd801a3ea01e63e5601bdff9ba6a48de2", size = 78540, upload-time = "2024-11-24T20:12:19.698Z" }, +] + +[[package]] +name = "trustcall" +version = "0.0.39" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "dydantic" }, + { name = "jsonpatch" }, + { name = "langgraph" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/2b/72/4cdb54a31952827e8b58e11ea286bbfe2d3aa0ffb77a2f87dbc1c7ea77d3/trustcall-0.0.39.tar.gz", hash = "sha256:ec315818224501b9537ce6b7618dbc21be41210c6e8f2e239169a5a00912cd6e", size = 38637, upload-time = "2025-04-14T22:02:50.857Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/2c/3a/58de925a104ce554fc250b833fe76401c7822aa8d65f2002cb53195e6c64/trustcall-0.0.39-py3-none-any.whl", hash = "sha256:d7da42e0bba816c0539b2936dfed90ffb3ea8d789e548e73865d416f8ac4ee64", size = 30073, upload-time = "2025-04-14T22:02:49.402Z" }, +] + +[[package]] +name = "tweepy" +version = "4.16.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "oauthlib" }, + { name = "requests" }, + { name = "requests-oauthlib" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/6e/45/a73bb812b1817247d3f79b3b9a4784ab93a081853b697e87428caa8c287b/tweepy-4.16.0.tar.gz", hash = "sha256:1d95cbdc50bf6353a387f881f2584eaf60d14e00dbbdd8872a73de79c66878e3", size = 87646, upload-time = "2025-06-22T01:17:51.34Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/ee/7c/3375cd1fbefcb8ead580fe324b1b6dcdc21aabf51562ee6def7266fcf363/tweepy-4.16.0-py3-none-any.whl", hash = "sha256:48d1a1eb311d2c4b8990abcfa6f9fa2b2ad61be05c723b1a9b4f242656badae2", size = 98843, upload-time = "2025-06-22T01:17:49.823Z" }, +] + +[package.optional-dependencies] +async = [ + { name = "aiohttp" }, + { name = "async-lru" }, +] + +[[package]] +name = "types-requests" +version = "2.32.4.20250809" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "urllib3" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/ed/b0/9355adb86ec84d057fea765e4c49cce592aaf3d5117ce5609a95a7fc3dac/types_requests-2.32.4.20250809.tar.gz", hash = "sha256:d8060de1c8ee599311f56ff58010fb4902f462a1470802cf9f6ed27bc46c4df3", size = 23027, upload-time = "2025-08-09T03:17:10.664Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/2b/6f/ec0012be842b1d888d46884ac5558fd62aeae1f0ec4f7a581433d890d4b5/types_requests-2.32.4.20250809-py3-none-any.whl", hash = "sha256:f73d1832fb519ece02c85b1f09d5f0dd3108938e7d47e7f94bbfa18a6782b163", size = 20644, upload-time = "2025-08-09T03:17:09.716Z" }, +] + +[[package]] +name = "typing-extensions" +version = "4.12.2" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/df/db/f35a00659bc03fec321ba8bce9420de607a1d37f8342eee1863174c69557/typing_extensions-4.12.2.tar.gz", hash = "sha256:1a7ead55c7e559dd4dee8856e3a88b41225abfe1ce8df57b7c13915fe121ffb8", size = 85321, upload-time = "2024-06-07T18:52:15.995Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/26/9f/ad63fc0248c5379346306f8668cda6e2e2e9c95e01216d2b8ffd9ff037d0/typing_extensions-4.12.2-py3-none-any.whl", hash = "sha256:04e5ca0351e0f3f85c6853954072df659d0d13fac324d0072316b67d7794700d", size = 37438, upload-time = "2024-06-07T18:52:13.582Z" }, +] + +[[package]] +name = "typing-inspect" +version = "0.9.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "mypy-extensions" }, + { name = "typing-extensions" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/dc/74/1789779d91f1961fa9438e9a8710cdae6bd138c80d7303996933d117264a/typing_inspect-0.9.0.tar.gz", hash = "sha256:b23fc42ff6f6ef6954e4852c1fb512cdd18dbea03134f91f856a95ccc9461f78", size = 13825, upload-time = "2023-05-24T20:25:47.612Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/65/f3/107a22063bf27bdccf2024833d3445f4eea42b2e598abfbd46f6a63b6cb0/typing_inspect-0.9.0-py3-none-any.whl", hash = "sha256:9ee6fc59062311ef8547596ab6b955e1b8aa46242d854bfc78f4f6b0eff35f9f", size = 8827, upload-time = "2023-05-24T20:25:45.287Z" }, +] + +[[package]] +name = "typing-inspection" +version = "0.4.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "typing-extensions" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/f8/b1/0c11f5058406b3af7609f121aaa6b609744687f1d158b3c3a5bf4cc94238/typing_inspection-0.4.1.tar.gz", hash = "sha256:6ae134cc0203c33377d43188d4064e9b357dba58cff3185f22924610e70a9d28", size = 75726, upload-time = "2025-05-21T18:55:23.885Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/17/69/cd203477f944c353c31bade965f880aa1061fd6bf05ded0726ca845b6ff7/typing_inspection-0.4.1-py3-none-any.whl", hash = "sha256:389055682238f53b04f7badcb49b989835495a96700ced5dab2d8feae4b26f51", size = 14552, upload-time = "2025-05-21T18:55:22.152Z" }, +] + +[[package]] +name = "tzdata" +version = "2025.2" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/95/32/1a225d6164441be760d75c2c42e2780dc0873fe382da3e98a2e1e48361e5/tzdata-2025.2.tar.gz", hash = "sha256:b60a638fcc0daffadf82fe0f57e53d06bdec2f36c4df66280ae79bce6bd6f2b9", size = 196380, upload-time = "2025-03-23T13:54:43.652Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/5c/23/c7abc0ca0a1526a0774eca151daeb8de62ec457e77262b66b359c3c7679e/tzdata-2025.2-py2.py3-none-any.whl", hash = "sha256:1a403fada01ff9221ca8044d701868fa132215d84beb92242d9acd2147f667a8", size = 347839, upload-time = "2025-03-23T13:54:41.845Z" }, +] + +[[package]] +name = "urllib3" +version = "2.3.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/aa/63/e53da845320b757bf29ef6a9062f5c669fe997973f966045cb019c3f4b66/urllib3-2.3.0.tar.gz", hash = "sha256:f8c5449b3cf0861679ce7e0503c7b44b5ec981bec0d1d3795a07f1ba96f0204d", size = 307268, upload-time = "2024-12-22T07:47:30.032Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/c8/19/4ec628951a74043532ca2cf5d97b7b14863931476d117c471e8e2b1eb39f/urllib3-2.3.0-py3-none-any.whl", hash = "sha256:1cee9ad369867bfdbbb48b7dd50374c0967a0bb7710050facf0dd6911440e3df", size = 128369, upload-time = "2024-12-22T07:47:28.074Z" }, +] + +[[package]] +name = "uvicorn" +version = "0.35.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "click" }, + { name = "h11" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/5e/42/e0e305207bb88c6b8d3061399c6a961ffe5fbb7e2aa63c9234df7259e9cd/uvicorn-0.35.0.tar.gz", hash = "sha256:bc662f087f7cf2ce11a1d7fd70b90c9f98ef2e2831556dd078d131b96cc94a01", size = 78473, upload-time = "2025-06-28T16:15:46.058Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/d2/e2/dc81b1bd1dcfe91735810265e9d26bc8ec5da45b4c0f6237e286819194c3/uvicorn-0.35.0-py3-none-any.whl", hash = "sha256:197535216b25ff9b785e29a0b79199f55222193d47f820816e7da751e9bc8d4a", size = 66406, upload-time = "2025-06-28T16:15:44.816Z" }, +] + +[[package]] +name = "virtualenv" +version = "20.28.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "distlib" }, + { name = "filelock" }, + { name = "platformdirs" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/50/39/689abee4adc85aad2af8174bb195a819d0be064bf55fcc73b49d2b28ae77/virtualenv-20.28.1.tar.gz", hash = "sha256:5d34ab240fdb5d21549b76f9e8ff3af28252f5499fb6d6f031adac4e5a8c5329", size = 7650532, upload-time = "2025-01-03T01:56:53.613Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/51/8f/dfb257ca6b4e27cb990f1631142361e4712badab8e3ca8dc134d96111515/virtualenv-20.28.1-py3-none-any.whl", hash = "sha256:412773c85d4dab0409b83ec36f7a6499e72eaf08c80e81e9576bca61831c71cb", size = 4276719, upload-time = "2025-01-03T01:56:50.498Z" }, +] + +[[package]] +name = "web3" +version = "7.10.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "aiohttp" }, + { name = "eth-abi" }, + { name = "eth-account" }, + { name = "eth-hash", extra = ["pycryptodome"] }, + { name = "eth-typing" }, + { name = "eth-utils" }, + { name = "hexbytes" }, + { name = "pydantic" }, + { name = "pyunormalize" }, + { name = "pywin32", marker = "sys_platform == 'win32'" }, + { name = "requests" }, + { name = "types-requests" }, + { name = "typing-extensions" }, + { name = "websockets" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/6d/19/c1e213dd87ead2ace55ff1dd179df6050bcf5d9006440c9153969c7d6863/web3-7.10.0.tar.gz", hash = "sha256:0cace05ea14f800a4497649ecd99332ca4e85c8a90ea577e05ae909cb08902b9", size = 2193725, upload-time = "2025-03-27T17:02:27.919Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/4c/c5/a8e25e3ff51c7cd6d2bdecf75da2afb2923b29eba28e5dfe4fde72ad2322/web3-7.10.0-py3-none-any.whl", hash = "sha256:06fcab920554450e9f7d108da5e6b9d29c0d1a981a59a5551cc82d2cb2233b34", size = 1365880, upload-time = "2025-03-27T17:02:25.04Z" }, +] + +[[package]] +name = "websockets" +version = "14.2" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/94/54/8359678c726243d19fae38ca14a334e740782336c9f19700858c4eb64a1e/websockets-14.2.tar.gz", hash = "sha256:5059ed9c54945efb321f097084b4c7e52c246f2c869815876a69d1efc4ad6eb5", size = 164394, upload-time = "2025-01-19T21:00:56.431Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/c1/81/04f7a397653dc8bec94ddc071f34833e8b99b13ef1a3804c149d59f92c18/websockets-14.2-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:1f20522e624d7ffbdbe259c6b6a65d73c895045f76a93719aa10cd93b3de100c", size = 163096, upload-time = "2025-01-19T20:59:29.763Z" }, + { url = "https://files.pythonhosted.org/packages/ec/c5/de30e88557e4d70988ed4d2eabd73fd3e1e52456b9f3a4e9564d86353b6d/websockets-14.2-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:647b573f7d3ada919fd60e64d533409a79dcf1ea21daeb4542d1d996519ca967", size = 160758, upload-time = "2025-01-19T20:59:32.095Z" }, + { url = "https://files.pythonhosted.org/packages/e5/8c/d130d668781f2c77d106c007b6c6c1d9db68239107c41ba109f09e6c218a/websockets-14.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:6af99a38e49f66be5a64b1e890208ad026cda49355661549c507152113049990", size = 160995, upload-time = "2025-01-19T20:59:33.527Z" }, + { url = "https://files.pythonhosted.org/packages/a6/bc/f6678a0ff17246df4f06765e22fc9d98d1b11a258cc50c5968b33d6742a1/websockets-14.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:091ab63dfc8cea748cc22c1db2814eadb77ccbf82829bac6b2fbe3401d548eda", size = 170815, upload-time = "2025-01-19T20:59:35.837Z" }, + { url = "https://files.pythonhosted.org/packages/d8/b2/8070cb970c2e4122a6ef38bc5b203415fd46460e025652e1ee3f2f43a9a3/websockets-14.2-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b374e8953ad477d17e4851cdc66d83fdc2db88d9e73abf755c94510ebddceb95", size = 169759, upload-time = "2025-01-19T20:59:38.216Z" }, + { url = "https://files.pythonhosted.org/packages/81/da/72f7caabd94652e6eb7e92ed2d3da818626e70b4f2b15a854ef60bf501ec/websockets-14.2-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a39d7eceeea35db85b85e1169011bb4321c32e673920ae9c1b6e0978590012a3", size = 170178, upload-time = "2025-01-19T20:59:40.423Z" }, + { url = "https://files.pythonhosted.org/packages/31/e0/812725b6deca8afd3a08a2e81b3c4c120c17f68c9b84522a520b816cda58/websockets-14.2-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:0a6f3efd47ffd0d12080594f434faf1cd2549b31e54870b8470b28cc1d3817d9", size = 170453, upload-time = "2025-01-19T20:59:41.996Z" }, + { url = "https://files.pythonhosted.org/packages/66/d3/8275dbc231e5ba9bb0c4f93144394b4194402a7a0c8ffaca5307a58ab5e3/websockets-14.2-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:065ce275e7c4ffb42cb738dd6b20726ac26ac9ad0a2a48e33ca632351a737267", size = 169830, upload-time = "2025-01-19T20:59:44.669Z" }, + { url = "https://files.pythonhosted.org/packages/a3/ae/e7d1a56755ae15ad5a94e80dd490ad09e345365199600b2629b18ee37bc7/websockets-14.2-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:e9d0e53530ba7b8b5e389c02282f9d2aa47581514bd6049d3a7cffe1385cf5fe", size = 169824, upload-time = "2025-01-19T20:59:46.932Z" }, + { url = "https://files.pythonhosted.org/packages/b6/32/88ccdd63cb261e77b882e706108d072e4f1c839ed723bf91a3e1f216bf60/websockets-14.2-cp312-cp312-win32.whl", hash = "sha256:20e6dd0984d7ca3037afcb4494e48c74ffb51e8013cac71cf607fffe11df7205", size = 163981, upload-time = "2025-01-19T20:59:49.228Z" }, + { url = "https://files.pythonhosted.org/packages/b3/7d/32cdb77990b3bdc34a306e0a0f73a1275221e9a66d869f6ff833c95b56ef/websockets-14.2-cp312-cp312-win_amd64.whl", hash = "sha256:44bba1a956c2c9d268bdcdf234d5e5ff4c9b6dc3e300545cbe99af59dda9dcce", size = 164421, upload-time = "2025-01-19T20:59:50.674Z" }, + { url = "https://files.pythonhosted.org/packages/82/94/4f9b55099a4603ac53c2912e1f043d6c49d23e94dd82a9ce1eb554a90215/websockets-14.2-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:6f1372e511c7409a542291bce92d6c83320e02c9cf392223272287ce55bc224e", size = 163102, upload-time = "2025-01-19T20:59:52.177Z" }, + { url = "https://files.pythonhosted.org/packages/8e/b7/7484905215627909d9a79ae07070057afe477433fdacb59bf608ce86365a/websockets-14.2-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:4da98b72009836179bb596a92297b1a61bb5a830c0e483a7d0766d45070a08ad", size = 160766, upload-time = "2025-01-19T20:59:54.368Z" }, + { url = "https://files.pythonhosted.org/packages/a3/a4/edb62efc84adb61883c7d2c6ad65181cb087c64252138e12d655989eec05/websockets-14.2-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:f8a86a269759026d2bde227652b87be79f8a734e582debf64c9d302faa1e9f03", size = 160998, upload-time = "2025-01-19T20:59:56.671Z" }, + { url = "https://files.pythonhosted.org/packages/f5/79/036d320dc894b96af14eac2529967a6fc8b74f03b83c487e7a0e9043d842/websockets-14.2-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:86cf1aaeca909bf6815ea714d5c5736c8d6dd3a13770e885aafe062ecbd04f1f", size = 170780, upload-time = "2025-01-19T20:59:58.085Z" }, + { url = "https://files.pythonhosted.org/packages/63/75/5737d21ee4dd7e4b9d487ee044af24a935e36a9ff1e1419d684feedcba71/websockets-14.2-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a9b0f6c3ba3b1240f602ebb3971d45b02cc12bd1845466dd783496b3b05783a5", size = 169717, upload-time = "2025-01-19T20:59:59.545Z" }, + { url = "https://files.pythonhosted.org/packages/2c/3c/bf9b2c396ed86a0b4a92ff4cdaee09753d3ee389be738e92b9bbd0330b64/websockets-14.2-cp313-cp313-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:669c3e101c246aa85bc8534e495952e2ca208bd87994650b90a23d745902db9a", size = 170155, upload-time = "2025-01-19T21:00:01.887Z" }, + { url = "https://files.pythonhosted.org/packages/75/2d/83a5aca7247a655b1da5eb0ee73413abd5c3a57fc8b92915805e6033359d/websockets-14.2-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:eabdb28b972f3729348e632ab08f2a7b616c7e53d5414c12108c29972e655b20", size = 170495, upload-time = "2025-01-19T21:00:04.064Z" }, + { url = "https://files.pythonhosted.org/packages/79/dd/699238a92761e2f943885e091486378813ac8f43e3c84990bc394c2be93e/websockets-14.2-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:2066dc4cbcc19f32c12a5a0e8cc1b7ac734e5b64ac0a325ff8353451c4b15ef2", size = 169880, upload-time = "2025-01-19T21:00:05.695Z" }, + { url = "https://files.pythonhosted.org/packages/c8/c9/67a8f08923cf55ce61aadda72089e3ed4353a95a3a4bc8bf42082810e580/websockets-14.2-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:ab95d357cd471df61873dadf66dd05dd4709cae001dd6342edafc8dc6382f307", size = 169856, upload-time = "2025-01-19T21:00:07.192Z" }, + { url = "https://files.pythonhosted.org/packages/17/b1/1ffdb2680c64e9c3921d99db460546194c40d4acbef999a18c37aa4d58a3/websockets-14.2-cp313-cp313-win32.whl", hash = "sha256:a9e72fb63e5f3feacdcf5b4ff53199ec8c18d66e325c34ee4c551ca748623bbc", size = 163974, upload-time = "2025-01-19T21:00:08.698Z" }, + { url = "https://files.pythonhosted.org/packages/14/13/8b7fc4cb551b9cfd9890f0fd66e53c18a06240319915533b033a56a3d520/websockets-14.2-cp313-cp313-win_amd64.whl", hash = "sha256:b439ea828c4ba99bb3176dc8d9b933392a2413c0f6b149fdcba48393f573377f", size = 164420, upload-time = "2025-01-19T21:00:10.182Z" }, + { url = "https://files.pythonhosted.org/packages/7b/c8/d529f8a32ce40d98309f4470780631e971a5a842b60aec864833b3615786/websockets-14.2-py3-none-any.whl", hash = "sha256:7a6ceec4ea84469f15cf15807a747e9efe57e369c384fa86e022b3bea679b79b", size = 157416, upload-time = "2025-01-19T21:00:54.843Z" }, +] + +[[package]] +name = "xxhash" +version = "3.5.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/00/5e/d6e5258d69df8b4ed8c83b6664f2b47d30d2dec551a29ad72a6c69eafd31/xxhash-3.5.0.tar.gz", hash = "sha256:84f2caddf951c9cbf8dc2e22a89d4ccf5d86391ac6418fe81e3c67d0cf60b45f", size = 84241, upload-time = "2024-08-17T09:20:38.972Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/07/0e/1bfce2502c57d7e2e787600b31c83535af83746885aa1a5f153d8c8059d6/xxhash-3.5.0-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:14470ace8bd3b5d51318782cd94e6f94431974f16cb3b8dc15d52f3b69df8e00", size = 31969, upload-time = "2024-08-17T09:18:24.025Z" }, + { url = "https://files.pythonhosted.org/packages/3f/d6/8ca450d6fe5b71ce521b4e5db69622383d039e2b253e9b2f24f93265b52c/xxhash-3.5.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:59aa1203de1cb96dbeab595ded0ad0c0056bb2245ae11fac11c0ceea861382b9", size = 30787, upload-time = "2024-08-17T09:18:25.318Z" }, + { url = "https://files.pythonhosted.org/packages/5b/84/de7c89bc6ef63d750159086a6ada6416cc4349eab23f76ab870407178b93/xxhash-3.5.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:08424f6648526076e28fae6ea2806c0a7d504b9ef05ae61d196d571e5c879c84", size = 220959, upload-time = "2024-08-17T09:18:26.518Z" }, + { url = "https://files.pythonhosted.org/packages/fe/86/51258d3e8a8545ff26468c977101964c14d56a8a37f5835bc0082426c672/xxhash-3.5.0-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:61a1ff00674879725b194695e17f23d3248998b843eb5e933007ca743310f793", size = 200006, upload-time = "2024-08-17T09:18:27.905Z" }, + { url = "https://files.pythonhosted.org/packages/02/0a/96973bd325412feccf23cf3680fd2246aebf4b789122f938d5557c54a6b2/xxhash-3.5.0-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f2f2c61bee5844d41c3eb015ac652a0229e901074951ae48581d58bfb2ba01be", size = 428326, upload-time = "2024-08-17T09:18:29.335Z" }, + { url = "https://files.pythonhosted.org/packages/11/a7/81dba5010f7e733de88af9555725146fc133be97ce36533867f4c7e75066/xxhash-3.5.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9d32a592cac88d18cc09a89172e1c32d7f2a6e516c3dfde1b9adb90ab5df54a6", size = 194380, upload-time = "2024-08-17T09:18:30.706Z" }, + { url = "https://files.pythonhosted.org/packages/fb/7d/f29006ab398a173f4501c0e4977ba288f1c621d878ec217b4ff516810c04/xxhash-3.5.0-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:70dabf941dede727cca579e8c205e61121afc9b28516752fd65724be1355cc90", size = 207934, upload-time = "2024-08-17T09:18:32.133Z" }, + { url = "https://files.pythonhosted.org/packages/8a/6e/6e88b8f24612510e73d4d70d9b0c7dff62a2e78451b9f0d042a5462c8d03/xxhash-3.5.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:e5d0ddaca65ecca9c10dcf01730165fd858533d0be84c75c327487c37a906a27", size = 216301, upload-time = "2024-08-17T09:18:33.474Z" }, + { url = "https://files.pythonhosted.org/packages/af/51/7862f4fa4b75a25c3b4163c8a873f070532fe5f2d3f9b3fc869c8337a398/xxhash-3.5.0-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:3e5b5e16c5a480fe5f59f56c30abdeba09ffd75da8d13f6b9b6fd224d0b4d0a2", size = 203351, upload-time = "2024-08-17T09:18:34.889Z" }, + { url = "https://files.pythonhosted.org/packages/22/61/8d6a40f288f791cf79ed5bb113159abf0c81d6efb86e734334f698eb4c59/xxhash-3.5.0-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:149b7914451eb154b3dfaa721315117ea1dac2cc55a01bfbd4df7c68c5dd683d", size = 210294, upload-time = "2024-08-17T09:18:36.355Z" }, + { url = "https://files.pythonhosted.org/packages/17/02/215c4698955762d45a8158117190261b2dbefe9ae7e5b906768c09d8bc74/xxhash-3.5.0-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:eade977f5c96c677035ff39c56ac74d851b1cca7d607ab3d8f23c6b859379cab", size = 414674, upload-time = "2024-08-17T09:18:38.536Z" }, + { url = "https://files.pythonhosted.org/packages/31/5c/b7a8db8a3237cff3d535261325d95de509f6a8ae439a5a7a4ffcff478189/xxhash-3.5.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:fa9f547bd98f5553d03160967866a71056a60960be00356a15ecc44efb40ba8e", size = 192022, upload-time = "2024-08-17T09:18:40.138Z" }, + { url = "https://files.pythonhosted.org/packages/78/e3/dd76659b2811b3fd06892a8beb850e1996b63e9235af5a86ea348f053e9e/xxhash-3.5.0-cp312-cp312-win32.whl", hash = "sha256:f7b58d1fd3551b8c80a971199543379be1cee3d0d409e1f6d8b01c1a2eebf1f8", size = 30170, upload-time = "2024-08-17T09:18:42.163Z" }, + { url = "https://files.pythonhosted.org/packages/d9/6b/1c443fe6cfeb4ad1dcf231cdec96eb94fb43d6498b4469ed8b51f8b59a37/xxhash-3.5.0-cp312-cp312-win_amd64.whl", hash = "sha256:fa0cafd3a2af231b4e113fba24a65d7922af91aeb23774a8b78228e6cd785e3e", size = 30040, upload-time = "2024-08-17T09:18:43.699Z" }, + { url = "https://files.pythonhosted.org/packages/0f/eb/04405305f290173acc0350eba6d2f1a794b57925df0398861a20fbafa415/xxhash-3.5.0-cp312-cp312-win_arm64.whl", hash = "sha256:586886c7e89cb9828bcd8a5686b12e161368e0064d040e225e72607b43858ba2", size = 26796, upload-time = "2024-08-17T09:18:45.29Z" }, + { url = "https://files.pythonhosted.org/packages/c9/b8/e4b3ad92d249be5c83fa72916c9091b0965cb0faeff05d9a0a3870ae6bff/xxhash-3.5.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:37889a0d13b0b7d739cfc128b1c902f04e32de17b33d74b637ad42f1c55101f6", size = 31795, upload-time = "2024-08-17T09:18:46.813Z" }, + { url = "https://files.pythonhosted.org/packages/fc/d8/b3627a0aebfbfa4c12a41e22af3742cf08c8ea84f5cc3367b5de2d039cce/xxhash-3.5.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:97a662338797c660178e682f3bc180277b9569a59abfb5925e8620fba00b9fc5", size = 30792, upload-time = "2024-08-17T09:18:47.862Z" }, + { url = "https://files.pythonhosted.org/packages/c3/cc/762312960691da989c7cd0545cb120ba2a4148741c6ba458aa723c00a3f8/xxhash-3.5.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7f85e0108d51092bdda90672476c7d909c04ada6923c14ff9d913c4f7dc8a3bc", size = 220950, upload-time = "2024-08-17T09:18:49.06Z" }, + { url = "https://files.pythonhosted.org/packages/fe/e9/cc266f1042c3c13750e86a535496b58beb12bf8c50a915c336136f6168dc/xxhash-3.5.0-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:cd2fd827b0ba763ac919440042302315c564fdb797294d86e8cdd4578e3bc7f3", size = 199980, upload-time = "2024-08-17T09:18:50.445Z" }, + { url = "https://files.pythonhosted.org/packages/bf/85/a836cd0dc5cc20376de26b346858d0ac9656f8f730998ca4324921a010b9/xxhash-3.5.0-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:82085c2abec437abebf457c1d12fccb30cc8b3774a0814872511f0f0562c768c", size = 428324, upload-time = "2024-08-17T09:18:51.988Z" }, + { url = "https://files.pythonhosted.org/packages/b4/0e/15c243775342ce840b9ba34aceace06a1148fa1630cd8ca269e3223987f5/xxhash-3.5.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:07fda5de378626e502b42b311b049848c2ef38784d0d67b6f30bb5008642f8eb", size = 194370, upload-time = "2024-08-17T09:18:54.164Z" }, + { url = "https://files.pythonhosted.org/packages/87/a1/b028bb02636dfdc190da01951d0703b3d904301ed0ef6094d948983bef0e/xxhash-3.5.0-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c279f0d2b34ef15f922b77966640ade58b4ccdfef1c4d94b20f2a364617a493f", size = 207911, upload-time = "2024-08-17T09:18:55.509Z" }, + { url = "https://files.pythonhosted.org/packages/80/d5/73c73b03fc0ac73dacf069fdf6036c9abad82de0a47549e9912c955ab449/xxhash-3.5.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:89e66ceed67b213dec5a773e2f7a9e8c58f64daeb38c7859d8815d2c89f39ad7", size = 216352, upload-time = "2024-08-17T09:18:57.073Z" }, + { url = "https://files.pythonhosted.org/packages/b6/2a/5043dba5ddbe35b4fe6ea0a111280ad9c3d4ba477dd0f2d1fe1129bda9d0/xxhash-3.5.0-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:bcd51708a633410737111e998ceb3b45d3dbc98c0931f743d9bb0a209033a326", size = 203410, upload-time = "2024-08-17T09:18:58.54Z" }, + { url = "https://files.pythonhosted.org/packages/a2/b2/9a8ded888b7b190aed75b484eb5c853ddd48aa2896e7b59bbfbce442f0a1/xxhash-3.5.0-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:3ff2c0a34eae7df88c868be53a8dd56fbdf592109e21d4bfa092a27b0bf4a7bf", size = 210322, upload-time = "2024-08-17T09:18:59.943Z" }, + { url = "https://files.pythonhosted.org/packages/98/62/440083fafbc917bf3e4b67c2ade621920dd905517e85631c10aac955c1d2/xxhash-3.5.0-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:4e28503dccc7d32e0b9817aa0cbfc1f45f563b2c995b7a66c4c8a0d232e840c7", size = 414725, upload-time = "2024-08-17T09:19:01.332Z" }, + { url = "https://files.pythonhosted.org/packages/75/db/009206f7076ad60a517e016bb0058381d96a007ce3f79fa91d3010f49cc2/xxhash-3.5.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:a6c50017518329ed65a9e4829154626f008916d36295b6a3ba336e2458824c8c", size = 192070, upload-time = "2024-08-17T09:19:03.007Z" }, + { url = "https://files.pythonhosted.org/packages/1f/6d/c61e0668943a034abc3a569cdc5aeae37d686d9da7e39cf2ed621d533e36/xxhash-3.5.0-cp313-cp313-win32.whl", hash = "sha256:53a068fe70301ec30d868ece566ac90d873e3bb059cf83c32e76012c889b8637", size = 30172, upload-time = "2024-08-17T09:19:04.355Z" }, + { url = "https://files.pythonhosted.org/packages/96/14/8416dce965f35e3d24722cdf79361ae154fa23e2ab730e5323aa98d7919e/xxhash-3.5.0-cp313-cp313-win_amd64.whl", hash = "sha256:80babcc30e7a1a484eab952d76a4f4673ff601f54d5142c26826502740e70b43", size = 30041, upload-time = "2024-08-17T09:19:05.435Z" }, + { url = "https://files.pythonhosted.org/packages/27/ee/518b72faa2073f5aa8e3262408d284892cb79cf2754ba0c3a5870645ef73/xxhash-3.5.0-cp313-cp313-win_arm64.whl", hash = "sha256:4811336f1ce11cac89dcbd18f3a25c527c16311709a89313c3acaf771def2d4b", size = 26801, upload-time = "2024-08-17T09:19:06.547Z" }, +] + +[[package]] +name = "yarl" +version = "1.20.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "idna" }, + { name = "multidict" }, + { name = "propcache" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/3c/fb/efaa23fa4e45537b827620f04cf8f3cd658b76642205162e072703a5b963/yarl-1.20.1.tar.gz", hash = "sha256:d017a4997ee50c91fd5466cef416231bb82177b93b029906cefc542ce14c35ac", size = 186428, upload-time = "2025-06-10T00:46:09.923Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/5f/9a/cb7fad7d73c69f296eda6815e4a2c7ed53fc70c2f136479a91c8e5fbdb6d/yarl-1.20.1-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:bdcc4cd244e58593a4379fe60fdee5ac0331f8eb70320a24d591a3be197b94a9", size = 133667, upload-time = "2025-06-10T00:43:44.369Z" }, + { url = "https://files.pythonhosted.org/packages/67/38/688577a1cb1e656e3971fb66a3492501c5a5df56d99722e57c98249e5b8a/yarl-1.20.1-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:b29a2c385a5f5b9c7d9347e5812b6f7ab267193c62d282a540b4fc528c8a9d2a", size = 91025, upload-time = "2025-06-10T00:43:46.295Z" }, + { url = "https://files.pythonhosted.org/packages/50/ec/72991ae51febeb11a42813fc259f0d4c8e0507f2b74b5514618d8b640365/yarl-1.20.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:1112ae8154186dfe2de4732197f59c05a83dc814849a5ced892b708033f40dc2", size = 89709, upload-time = "2025-06-10T00:43:48.22Z" }, + { url = "https://files.pythonhosted.org/packages/99/da/4d798025490e89426e9f976702e5f9482005c548c579bdae792a4c37769e/yarl-1.20.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:90bbd29c4fe234233f7fa2b9b121fb63c321830e5d05b45153a2ca68f7d310ee", size = 352287, upload-time = "2025-06-10T00:43:49.924Z" }, + { url = "https://files.pythonhosted.org/packages/1a/26/54a15c6a567aac1c61b18aa0f4b8aa2e285a52d547d1be8bf48abe2b3991/yarl-1.20.1-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:680e19c7ce3710ac4cd964e90dad99bf9b5029372ba0c7cbfcd55e54d90ea819", size = 345429, upload-time = "2025-06-10T00:43:51.7Z" }, + { url = "https://files.pythonhosted.org/packages/d6/95/9dcf2386cb875b234353b93ec43e40219e14900e046bf6ac118f94b1e353/yarl-1.20.1-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:4a979218c1fdb4246a05efc2cc23859d47c89af463a90b99b7c56094daf25a16", size = 365429, upload-time = "2025-06-10T00:43:53.494Z" }, + { url = "https://files.pythonhosted.org/packages/91/b2/33a8750f6a4bc224242a635f5f2cff6d6ad5ba651f6edcccf721992c21a0/yarl-1.20.1-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:255b468adf57b4a7b65d8aad5b5138dce6a0752c139965711bdcb81bc370e1b6", size = 363862, upload-time = "2025-06-10T00:43:55.766Z" }, + { url = "https://files.pythonhosted.org/packages/98/28/3ab7acc5b51f4434b181b0cee8f1f4b77a65919700a355fb3617f9488874/yarl-1.20.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a97d67108e79cfe22e2b430d80d7571ae57d19f17cda8bb967057ca8a7bf5bfd", size = 355616, upload-time = "2025-06-10T00:43:58.056Z" }, + { url = "https://files.pythonhosted.org/packages/36/a3/f666894aa947a371724ec7cd2e5daa78ee8a777b21509b4252dd7bd15e29/yarl-1.20.1-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:8570d998db4ddbfb9a590b185a0a33dbf8aafb831d07a5257b4ec9948df9cb0a", size = 339954, upload-time = "2025-06-10T00:43:59.773Z" }, + { url = "https://files.pythonhosted.org/packages/f1/81/5f466427e09773c04219d3450d7a1256138a010b6c9f0af2d48565e9ad13/yarl-1.20.1-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:97c75596019baae7c71ccf1d8cc4738bc08134060d0adfcbe5642f778d1dca38", size = 365575, upload-time = "2025-06-10T00:44:02.051Z" }, + { url = "https://files.pythonhosted.org/packages/2e/e3/e4b0ad8403e97e6c9972dd587388940a032f030ebec196ab81a3b8e94d31/yarl-1.20.1-cp312-cp312-musllinux_1_2_armv7l.whl", hash = "sha256:1c48912653e63aef91ff988c5432832692ac5a1d8f0fb8a33091520b5bbe19ef", size = 365061, upload-time = "2025-06-10T00:44:04.196Z" }, + { url = "https://files.pythonhosted.org/packages/ac/99/b8a142e79eb86c926f9f06452eb13ecb1bb5713bd01dc0038faf5452e544/yarl-1.20.1-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:4c3ae28f3ae1563c50f3d37f064ddb1511ecc1d5584e88c6b7c63cf7702a6d5f", size = 364142, upload-time = "2025-06-10T00:44:06.527Z" }, + { url = "https://files.pythonhosted.org/packages/34/f2/08ed34a4a506d82a1a3e5bab99ccd930a040f9b6449e9fd050320e45845c/yarl-1.20.1-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:c5e9642f27036283550f5f57dc6156c51084b458570b9d0d96100c8bebb186a8", size = 381894, upload-time = "2025-06-10T00:44:08.379Z" }, + { url = "https://files.pythonhosted.org/packages/92/f8/9a3fbf0968eac704f681726eff595dce9b49c8a25cd92bf83df209668285/yarl-1.20.1-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:2c26b0c49220d5799f7b22c6838409ee9bc58ee5c95361a4d7831f03cc225b5a", size = 383378, upload-time = "2025-06-10T00:44:10.51Z" }, + { url = "https://files.pythonhosted.org/packages/af/85/9363f77bdfa1e4d690957cd39d192c4cacd1c58965df0470a4905253b54f/yarl-1.20.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:564ab3d517e3d01c408c67f2e5247aad4019dcf1969982aba3974b4093279004", size = 374069, upload-time = "2025-06-10T00:44:12.834Z" }, + { url = "https://files.pythonhosted.org/packages/35/99/9918c8739ba271dcd935400cff8b32e3cd319eaf02fcd023d5dcd487a7c8/yarl-1.20.1-cp312-cp312-win32.whl", hash = "sha256:daea0d313868da1cf2fac6b2d3a25c6e3a9e879483244be38c8e6a41f1d876a5", size = 81249, upload-time = "2025-06-10T00:44:14.731Z" }, + { url = "https://files.pythonhosted.org/packages/eb/83/5d9092950565481b413b31a23e75dd3418ff0a277d6e0abf3729d4d1ce25/yarl-1.20.1-cp312-cp312-win_amd64.whl", hash = "sha256:48ea7d7f9be0487339828a4de0360d7ce0efc06524a48e1810f945c45b813698", size = 86710, upload-time = "2025-06-10T00:44:16.716Z" }, + { url = "https://files.pythonhosted.org/packages/8a/e1/2411b6d7f769a07687acee88a062af5833cf1966b7266f3d8dfb3d3dc7d3/yarl-1.20.1-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:0b5ff0fbb7c9f1b1b5ab53330acbfc5247893069e7716840c8e7d5bb7355038a", size = 131811, upload-time = "2025-06-10T00:44:18.933Z" }, + { url = "https://files.pythonhosted.org/packages/b2/27/584394e1cb76fb771371770eccad35de400e7b434ce3142c2dd27392c968/yarl-1.20.1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:14f326acd845c2b2e2eb38fb1346c94f7f3b01a4f5c788f8144f9b630bfff9a3", size = 90078, upload-time = "2025-06-10T00:44:20.635Z" }, + { url = "https://files.pythonhosted.org/packages/bf/9a/3246ae92d4049099f52d9b0fe3486e3b500e29b7ea872d0f152966fc209d/yarl-1.20.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:f60e4ad5db23f0b96e49c018596707c3ae89f5d0bd97f0ad3684bcbad899f1e7", size = 88748, upload-time = "2025-06-10T00:44:22.34Z" }, + { url = "https://files.pythonhosted.org/packages/a3/25/35afe384e31115a1a801fbcf84012d7a066d89035befae7c5d4284df1e03/yarl-1.20.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:49bdd1b8e00ce57e68ba51916e4bb04461746e794e7c4d4bbc42ba2f18297691", size = 349595, upload-time = "2025-06-10T00:44:24.314Z" }, + { url = "https://files.pythonhosted.org/packages/28/2d/8aca6cb2cabc8f12efcb82749b9cefecbccfc7b0384e56cd71058ccee433/yarl-1.20.1-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:66252d780b45189975abfed839616e8fd2dbacbdc262105ad7742c6ae58f3e31", size = 342616, upload-time = "2025-06-10T00:44:26.167Z" }, + { url = "https://files.pythonhosted.org/packages/0b/e9/1312633d16b31acf0098d30440ca855e3492d66623dafb8e25b03d00c3da/yarl-1.20.1-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:59174e7332f5d153d8f7452a102b103e2e74035ad085f404df2e40e663a22b28", size = 361324, upload-time = "2025-06-10T00:44:27.915Z" }, + { url = "https://files.pythonhosted.org/packages/bc/a0/688cc99463f12f7669eec7c8acc71ef56a1521b99eab7cd3abb75af887b0/yarl-1.20.1-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:e3968ec7d92a0c0f9ac34d5ecfd03869ec0cab0697c91a45db3fbbd95fe1b653", size = 359676, upload-time = "2025-06-10T00:44:30.041Z" }, + { url = "https://files.pythonhosted.org/packages/af/44/46407d7f7a56e9a85a4c207724c9f2c545c060380718eea9088f222ba697/yarl-1.20.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d1a4fbb50e14396ba3d375f68bfe02215d8e7bc3ec49da8341fe3157f59d2ff5", size = 352614, upload-time = "2025-06-10T00:44:32.171Z" }, + { url = "https://files.pythonhosted.org/packages/b1/91/31163295e82b8d5485d31d9cf7754d973d41915cadce070491778d9c9825/yarl-1.20.1-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:11a62c839c3a8eac2410e951301309426f368388ff2f33799052787035793b02", size = 336766, upload-time = "2025-06-10T00:44:34.494Z" }, + { url = "https://files.pythonhosted.org/packages/b4/8e/c41a5bc482121f51c083c4c2bcd16b9e01e1cf8729e380273a952513a21f/yarl-1.20.1-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:041eaa14f73ff5a8986b4388ac6bb43a77f2ea09bf1913df7a35d4646db69e53", size = 364615, upload-time = "2025-06-10T00:44:36.856Z" }, + { url = "https://files.pythonhosted.org/packages/e3/5b/61a3b054238d33d70ea06ebba7e58597891b71c699e247df35cc984ab393/yarl-1.20.1-cp313-cp313-musllinux_1_2_armv7l.whl", hash = "sha256:377fae2fef158e8fd9d60b4c8751387b8d1fb121d3d0b8e9b0be07d1b41e83dc", size = 360982, upload-time = "2025-06-10T00:44:39.141Z" }, + { url = "https://files.pythonhosted.org/packages/df/a3/6a72fb83f8d478cb201d14927bc8040af901811a88e0ff2da7842dd0ed19/yarl-1.20.1-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:1c92f4390e407513f619d49319023664643d3339bd5e5a56a3bebe01bc67ec04", size = 369792, upload-time = "2025-06-10T00:44:40.934Z" }, + { url = "https://files.pythonhosted.org/packages/7c/af/4cc3c36dfc7c077f8dedb561eb21f69e1e9f2456b91b593882b0b18c19dc/yarl-1.20.1-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:d25ddcf954df1754ab0f86bb696af765c5bfaba39b74095f27eececa049ef9a4", size = 382049, upload-time = "2025-06-10T00:44:42.854Z" }, + { url = "https://files.pythonhosted.org/packages/19/3a/e54e2c4752160115183a66dc9ee75a153f81f3ab2ba4bf79c3c53b33de34/yarl-1.20.1-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:909313577e9619dcff8c31a0ea2aa0a2a828341d92673015456b3ae492e7317b", size = 384774, upload-time = "2025-06-10T00:44:45.275Z" }, + { url = "https://files.pythonhosted.org/packages/9c/20/200ae86dabfca89060ec6447649f219b4cbd94531e425e50d57e5f5ac330/yarl-1.20.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:793fd0580cb9664548c6b83c63b43c477212c0260891ddf86809e1c06c8b08f1", size = 374252, upload-time = "2025-06-10T00:44:47.31Z" }, + { url = "https://files.pythonhosted.org/packages/83/75/11ee332f2f516b3d094e89448da73d557687f7d137d5a0f48c40ff211487/yarl-1.20.1-cp313-cp313-win32.whl", hash = "sha256:468f6e40285de5a5b3c44981ca3a319a4b208ccc07d526b20b12aeedcfa654b7", size = 81198, upload-time = "2025-06-10T00:44:49.164Z" }, + { url = "https://files.pythonhosted.org/packages/ba/ba/39b1ecbf51620b40ab402b0fc817f0ff750f6d92712b44689c2c215be89d/yarl-1.20.1-cp313-cp313-win_amd64.whl", hash = "sha256:495b4ef2fea40596bfc0affe3837411d6aa3371abcf31aac0ccc4bdd64d4ef5c", size = 86346, upload-time = "2025-06-10T00:44:51.182Z" }, + { url = "https://files.pythonhosted.org/packages/43/c7/669c52519dca4c95153c8ad96dd123c79f354a376346b198f438e56ffeb4/yarl-1.20.1-cp313-cp313t-macosx_10_13_universal2.whl", hash = "sha256:f60233b98423aab21d249a30eb27c389c14929f47be8430efa7dbd91493a729d", size = 138826, upload-time = "2025-06-10T00:44:52.883Z" }, + { url = "https://files.pythonhosted.org/packages/6a/42/fc0053719b44f6ad04a75d7f05e0e9674d45ef62f2d9ad2c1163e5c05827/yarl-1.20.1-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:6f3eff4cc3f03d650d8755c6eefc844edde99d641d0dcf4da3ab27141a5f8ddf", size = 93217, upload-time = "2025-06-10T00:44:54.658Z" }, + { url = "https://files.pythonhosted.org/packages/4f/7f/fa59c4c27e2a076bba0d959386e26eba77eb52ea4a0aac48e3515c186b4c/yarl-1.20.1-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:69ff8439d8ba832d6bed88af2c2b3445977eba9a4588b787b32945871c2444e3", size = 92700, upload-time = "2025-06-10T00:44:56.784Z" }, + { url = "https://files.pythonhosted.org/packages/2f/d4/062b2f48e7c93481e88eff97a6312dca15ea200e959f23e96d8ab898c5b8/yarl-1.20.1-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3cf34efa60eb81dd2645a2e13e00bb98b76c35ab5061a3989c7a70f78c85006d", size = 347644, upload-time = "2025-06-10T00:44:59.071Z" }, + { url = "https://files.pythonhosted.org/packages/89/47/78b7f40d13c8f62b499cc702fdf69e090455518ae544c00a3bf4afc9fc77/yarl-1.20.1-cp313-cp313t-manylinux_2_17_armv7l.manylinux2014_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:8e0fe9364ad0fddab2688ce72cb7a8e61ea42eff3c7caeeb83874a5d479c896c", size = 323452, upload-time = "2025-06-10T00:45:01.605Z" }, + { url = "https://files.pythonhosted.org/packages/eb/2b/490d3b2dc66f52987d4ee0d3090a147ea67732ce6b4d61e362c1846d0d32/yarl-1.20.1-cp313-cp313t-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:8f64fbf81878ba914562c672024089e3401974a39767747691c65080a67b18c1", size = 346378, upload-time = "2025-06-10T00:45:03.946Z" }, + { url = "https://files.pythonhosted.org/packages/66/ad/775da9c8a94ce925d1537f939a4f17d782efef1f973039d821cbe4bcc211/yarl-1.20.1-cp313-cp313t-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f6342d643bf9a1de97e512e45e4b9560a043347e779a173250824f8b254bd5ce", size = 353261, upload-time = "2025-06-10T00:45:05.992Z" }, + { url = "https://files.pythonhosted.org/packages/4b/23/0ed0922b47a4f5c6eb9065d5ff1e459747226ddce5c6a4c111e728c9f701/yarl-1.20.1-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:56dac5f452ed25eef0f6e3c6a066c6ab68971d96a9fb441791cad0efba6140d3", size = 335987, upload-time = "2025-06-10T00:45:08.227Z" }, + { url = "https://files.pythonhosted.org/packages/3e/49/bc728a7fe7d0e9336e2b78f0958a2d6b288ba89f25a1762407a222bf53c3/yarl-1.20.1-cp313-cp313t-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c7d7f497126d65e2cad8dc5f97d34c27b19199b6414a40cb36b52f41b79014be", size = 329361, upload-time = "2025-06-10T00:45:10.11Z" }, + { url = "https://files.pythonhosted.org/packages/93/8f/b811b9d1f617c83c907e7082a76e2b92b655400e61730cd61a1f67178393/yarl-1.20.1-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:67e708dfb8e78d8a19169818eeb5c7a80717562de9051bf2413aca8e3696bf16", size = 346460, upload-time = "2025-06-10T00:45:12.055Z" }, + { url = "https://files.pythonhosted.org/packages/70/fd/af94f04f275f95da2c3b8b5e1d49e3e79f1ed8b6ceb0f1664cbd902773ff/yarl-1.20.1-cp313-cp313t-musllinux_1_2_armv7l.whl", hash = "sha256:595c07bc79af2494365cc96ddeb772f76272364ef7c80fb892ef9d0649586513", size = 334486, upload-time = "2025-06-10T00:45:13.995Z" }, + { url = "https://files.pythonhosted.org/packages/84/65/04c62e82704e7dd0a9b3f61dbaa8447f8507655fd16c51da0637b39b2910/yarl-1.20.1-cp313-cp313t-musllinux_1_2_i686.whl", hash = "sha256:7bdd2f80f4a7df852ab9ab49484a4dee8030023aa536df41f2d922fd57bf023f", size = 342219, upload-time = "2025-06-10T00:45:16.479Z" }, + { url = "https://files.pythonhosted.org/packages/91/95/459ca62eb958381b342d94ab9a4b6aec1ddec1f7057c487e926f03c06d30/yarl-1.20.1-cp313-cp313t-musllinux_1_2_ppc64le.whl", hash = "sha256:c03bfebc4ae8d862f853a9757199677ab74ec25424d0ebd68a0027e9c639a390", size = 350693, upload-time = "2025-06-10T00:45:18.399Z" }, + { url = "https://files.pythonhosted.org/packages/a6/00/d393e82dd955ad20617abc546a8f1aee40534d599ff555ea053d0ec9bf03/yarl-1.20.1-cp313-cp313t-musllinux_1_2_s390x.whl", hash = "sha256:344d1103e9c1523f32a5ed704d576172d2cabed3122ea90b1d4e11fe17c66458", size = 355803, upload-time = "2025-06-10T00:45:20.677Z" }, + { url = "https://files.pythonhosted.org/packages/9e/ed/c5fb04869b99b717985e244fd93029c7a8e8febdfcffa06093e32d7d44e7/yarl-1.20.1-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:88cab98aa4e13e1ade8c141daeedd300a4603b7132819c484841bb7af3edce9e", size = 341709, upload-time = "2025-06-10T00:45:23.221Z" }, + { url = "https://files.pythonhosted.org/packages/24/fd/725b8e73ac2a50e78a4534ac43c6addf5c1c2d65380dd48a9169cc6739a9/yarl-1.20.1-cp313-cp313t-win32.whl", hash = "sha256:b121ff6a7cbd4abc28985b6028235491941b9fe8fe226e6fdc539c977ea1739d", size = 86591, upload-time = "2025-06-10T00:45:25.793Z" }, + { url = "https://files.pythonhosted.org/packages/94/c3/b2e9f38bc3e11191981d57ea08cab2166e74ea770024a646617c9cddd9f6/yarl-1.20.1-cp313-cp313t-win_amd64.whl", hash = "sha256:541d050a355bbbc27e55d906bc91cb6fe42f96c01413dd0f4ed5a5240513874f", size = 93003, upload-time = "2025-06-10T00:45:27.752Z" }, + { url = "https://files.pythonhosted.org/packages/b4/2d/2345fce04cfd4bee161bf1e7d9cdc702e3e16109021035dbb24db654a622/yarl-1.20.1-py3-none-any.whl", hash = "sha256:83b8eb083fe4683c6115795d9fc1cfaf2cbbefb19b3a1cb68f6527460f483a77", size = 46542, upload-time = "2025-06-10T00:46:07.521Z" }, +] + +[[package]] +name = "zstandard" +version = "0.23.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "cffi", marker = "platform_python_implementation == 'PyPy'" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/ed/f6/2ac0287b442160a89d726b17a9184a4c615bb5237db763791a7fd16d9df1/zstandard-0.23.0.tar.gz", hash = "sha256:b2d8c62d08e7255f68f7a740bae85b3c9b8e5466baa9cbf7f57f1cde0ac6bc09", size = 681701, upload-time = "2024-07-15T00:18:06.141Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/7b/83/f23338c963bd9de687d47bf32efe9fd30164e722ba27fb59df33e6b1719b/zstandard-0.23.0-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:b4567955a6bc1b20e9c31612e615af6b53733491aeaa19a6b3b37f3b65477094", size = 788713, upload-time = "2024-07-15T00:15:35.815Z" }, + { url = "https://files.pythonhosted.org/packages/5b/b3/1a028f6750fd9227ee0b937a278a434ab7f7fdc3066c3173f64366fe2466/zstandard-0.23.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:1e172f57cd78c20f13a3415cc8dfe24bf388614324d25539146594c16d78fcc8", size = 633459, upload-time = "2024-07-15T00:15:37.995Z" }, + { url = "https://files.pythonhosted.org/packages/26/af/36d89aae0c1f95a0a98e50711bc5d92c144939efc1f81a2fcd3e78d7f4c1/zstandard-0.23.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b0e166f698c5a3e914947388c162be2583e0c638a4703fc6a543e23a88dea3c1", size = 4945707, upload-time = "2024-07-15T00:15:39.872Z" }, + { url = "https://files.pythonhosted.org/packages/cd/2e/2051f5c772f4dfc0aae3741d5fc72c3dcfe3aaeb461cc231668a4db1ce14/zstandard-0.23.0-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:12a289832e520c6bd4dcaad68e944b86da3bad0d339ef7989fb7e88f92e96072", size = 5306545, upload-time = "2024-07-15T00:15:41.75Z" }, + { url = "https://files.pythonhosted.org/packages/0a/9e/a11c97b087f89cab030fa71206963090d2fecd8eb83e67bb8f3ffb84c024/zstandard-0.23.0-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d50d31bfedd53a928fed6707b15a8dbeef011bb6366297cc435accc888b27c20", size = 5337533, upload-time = "2024-07-15T00:15:44.114Z" }, + { url = "https://files.pythonhosted.org/packages/fc/79/edeb217c57fe1bf16d890aa91a1c2c96b28c07b46afed54a5dcf310c3f6f/zstandard-0.23.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:72c68dda124a1a138340fb62fa21b9bf4848437d9ca60bd35db36f2d3345f373", size = 5436510, upload-time = "2024-07-15T00:15:46.509Z" }, + { url = "https://files.pythonhosted.org/packages/81/4f/c21383d97cb7a422ddf1ae824b53ce4b51063d0eeb2afa757eb40804a8ef/zstandard-0.23.0-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:53dd9d5e3d29f95acd5de6802e909ada8d8d8cfa37a3ac64836f3bc4bc5512db", size = 4859973, upload-time = "2024-07-15T00:15:49.939Z" }, + { url = "https://files.pythonhosted.org/packages/ab/15/08d22e87753304405ccac8be2493a495f529edd81d39a0870621462276ef/zstandard-0.23.0-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:6a41c120c3dbc0d81a8e8adc73312d668cd34acd7725f036992b1b72d22c1772", size = 4936968, upload-time = "2024-07-15T00:15:52.025Z" }, + { url = "https://files.pythonhosted.org/packages/eb/fa/f3670a597949fe7dcf38119a39f7da49a8a84a6f0b1a2e46b2f71a0ab83f/zstandard-0.23.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:40b33d93c6eddf02d2c19f5773196068d875c41ca25730e8288e9b672897c105", size = 5467179, upload-time = "2024-07-15T00:15:54.971Z" }, + { url = "https://files.pythonhosted.org/packages/4e/a9/dad2ab22020211e380adc477a1dbf9f109b1f8d94c614944843e20dc2a99/zstandard-0.23.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:9206649ec587e6b02bd124fb7799b86cddec350f6f6c14bc82a2b70183e708ba", size = 4848577, upload-time = "2024-07-15T00:15:57.634Z" }, + { url = "https://files.pythonhosted.org/packages/08/03/dd28b4484b0770f1e23478413e01bee476ae8227bbc81561f9c329e12564/zstandard-0.23.0-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:76e79bc28a65f467e0409098fa2c4376931fd3207fbeb6b956c7c476d53746dd", size = 4693899, upload-time = "2024-07-15T00:16:00.811Z" }, + { url = "https://files.pythonhosted.org/packages/2b/64/3da7497eb635d025841e958bcd66a86117ae320c3b14b0ae86e9e8627518/zstandard-0.23.0-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:66b689c107857eceabf2cf3d3fc699c3c0fe8ccd18df2219d978c0283e4c508a", size = 5199964, upload-time = "2024-07-15T00:16:03.669Z" }, + { url = "https://files.pythonhosted.org/packages/43/a4/d82decbab158a0e8a6ebb7fc98bc4d903266bce85b6e9aaedea1d288338c/zstandard-0.23.0-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:9c236e635582742fee16603042553d276cca506e824fa2e6489db04039521e90", size = 5655398, upload-time = "2024-07-15T00:16:06.694Z" }, + { url = "https://files.pythonhosted.org/packages/f2/61/ac78a1263bc83a5cf29e7458b77a568eda5a8f81980691bbc6eb6a0d45cc/zstandard-0.23.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:a8fffdbd9d1408006baaf02f1068d7dd1f016c6bcb7538682622c556e7b68e35", size = 5191313, upload-time = "2024-07-15T00:16:09.758Z" }, + { url = "https://files.pythonhosted.org/packages/e7/54/967c478314e16af5baf849b6ee9d6ea724ae5b100eb506011f045d3d4e16/zstandard-0.23.0-cp312-cp312-win32.whl", hash = "sha256:dc1d33abb8a0d754ea4763bad944fd965d3d95b5baef6b121c0c9013eaf1907d", size = 430877, upload-time = "2024-07-15T00:16:11.758Z" }, + { url = "https://files.pythonhosted.org/packages/75/37/872d74bd7739639c4553bf94c84af7d54d8211b626b352bc57f0fd8d1e3f/zstandard-0.23.0-cp312-cp312-win_amd64.whl", hash = "sha256:64585e1dba664dc67c7cdabd56c1e5685233fbb1fc1966cfba2a340ec0dfff7b", size = 495595, upload-time = "2024-07-15T00:16:13.731Z" }, + { url = "https://files.pythonhosted.org/packages/80/f1/8386f3f7c10261fe85fbc2c012fdb3d4db793b921c9abcc995d8da1b7a80/zstandard-0.23.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:576856e8594e6649aee06ddbfc738fec6a834f7c85bf7cadd1c53d4a58186ef9", size = 788975, upload-time = "2024-07-15T00:16:16.005Z" }, + { url = "https://files.pythonhosted.org/packages/16/e8/cbf01077550b3e5dc86089035ff8f6fbbb312bc0983757c2d1117ebba242/zstandard-0.23.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:38302b78a850ff82656beaddeb0bb989a0322a8bbb1bf1ab10c17506681d772a", size = 633448, upload-time = "2024-07-15T00:16:17.897Z" }, + { url = "https://files.pythonhosted.org/packages/06/27/4a1b4c267c29a464a161aeb2589aff212b4db653a1d96bffe3598f3f0d22/zstandard-0.23.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d2240ddc86b74966c34554c49d00eaafa8200a18d3a5b6ffbf7da63b11d74ee2", size = 4945269, upload-time = "2024-07-15T00:16:20.136Z" }, + { url = "https://files.pythonhosted.org/packages/7c/64/d99261cc57afd9ae65b707e38045ed8269fbdae73544fd2e4a4d50d0ed83/zstandard-0.23.0-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:2ef230a8fd217a2015bc91b74f6b3b7d6522ba48be29ad4ea0ca3a3775bf7dd5", size = 5306228, upload-time = "2024-07-15T00:16:23.398Z" }, + { url = "https://files.pythonhosted.org/packages/7a/cf/27b74c6f22541f0263016a0fd6369b1b7818941de639215c84e4e94b2a1c/zstandard-0.23.0-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:774d45b1fac1461f48698a9d4b5fa19a69d47ece02fa469825b442263f04021f", size = 5336891, upload-time = "2024-07-15T00:16:26.391Z" }, + { url = "https://files.pythonhosted.org/packages/fa/18/89ac62eac46b69948bf35fcd90d37103f38722968e2981f752d69081ec4d/zstandard-0.23.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6f77fa49079891a4aab203d0b1744acc85577ed16d767b52fc089d83faf8d8ed", size = 5436310, upload-time = "2024-07-15T00:16:29.018Z" }, + { url = "https://files.pythonhosted.org/packages/a8/a8/5ca5328ee568a873f5118d5b5f70d1f36c6387716efe2e369010289a5738/zstandard-0.23.0-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ac184f87ff521f4840e6ea0b10c0ec90c6b1dcd0bad2f1e4a9a1b4fa177982ea", size = 4859912, upload-time = "2024-07-15T00:16:31.871Z" }, + { url = "https://files.pythonhosted.org/packages/ea/ca/3781059c95fd0868658b1cf0440edd832b942f84ae60685d0cfdb808bca1/zstandard-0.23.0-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:c363b53e257246a954ebc7c488304b5592b9c53fbe74d03bc1c64dda153fb847", size = 4936946, upload-time = "2024-07-15T00:16:34.593Z" }, + { url = "https://files.pythonhosted.org/packages/ce/11/41a58986f809532742c2b832c53b74ba0e0a5dae7e8ab4642bf5876f35de/zstandard-0.23.0-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:e7792606d606c8df5277c32ccb58f29b9b8603bf83b48639b7aedf6df4fe8171", size = 5466994, upload-time = "2024-07-15T00:16:36.887Z" }, + { url = "https://files.pythonhosted.org/packages/83/e3/97d84fe95edd38d7053af05159465d298c8b20cebe9ccb3d26783faa9094/zstandard-0.23.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:a0817825b900fcd43ac5d05b8b3079937073d2b1ff9cf89427590718b70dd840", size = 4848681, upload-time = "2024-07-15T00:16:39.709Z" }, + { url = "https://files.pythonhosted.org/packages/6e/99/cb1e63e931de15c88af26085e3f2d9af9ce53ccafac73b6e48418fd5a6e6/zstandard-0.23.0-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:9da6bc32faac9a293ddfdcb9108d4b20416219461e4ec64dfea8383cac186690", size = 4694239, upload-time = "2024-07-15T00:16:41.83Z" }, + { url = "https://files.pythonhosted.org/packages/ab/50/b1e703016eebbc6501fc92f34db7b1c68e54e567ef39e6e59cf5fb6f2ec0/zstandard-0.23.0-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:fd7699e8fd9969f455ef2926221e0233f81a2542921471382e77a9e2f2b57f4b", size = 5200149, upload-time = "2024-07-15T00:16:44.287Z" }, + { url = "https://files.pythonhosted.org/packages/aa/e0/932388630aaba70197c78bdb10cce2c91fae01a7e553b76ce85471aec690/zstandard-0.23.0-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:d477ed829077cd945b01fc3115edd132c47e6540ddcd96ca169facff28173057", size = 5655392, upload-time = "2024-07-15T00:16:46.423Z" }, + { url = "https://files.pythonhosted.org/packages/02/90/2633473864f67a15526324b007a9f96c96f56d5f32ef2a56cc12f9548723/zstandard-0.23.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:fa6ce8b52c5987b3e34d5674b0ab529a4602b632ebab0a93b07bfb4dfc8f8a33", size = 5191299, upload-time = "2024-07-15T00:16:49.053Z" }, + { url = "https://files.pythonhosted.org/packages/b0/4c/315ca5c32da7e2dc3455f3b2caee5c8c2246074a61aac6ec3378a97b7136/zstandard-0.23.0-cp313-cp313-win32.whl", hash = "sha256:a9b07268d0c3ca5c170a385a0ab9fb7fdd9f5fd866be004c4ea39e44edce47dd", size = 430862, upload-time = "2024-07-15T00:16:51.003Z" }, + { url = "https://files.pythonhosted.org/packages/a2/bf/c6aaba098e2d04781e8f4f7c0ba3c7aa73d00e4c436bcc0cf059a66691d1/zstandard-0.23.0-cp313-cp313-win_amd64.whl", hash = "sha256:f3513916e8c645d0610815c257cbfd3242adfd5c4cfa78be514e5a3ebb42a41b", size = 495578, upload-time = "2024-07-15T00:16:53.135Z" }, +] diff --git a/lint.sh b/lint.sh new file mode 100755 index 00000000..9242f664 --- /dev/null +++ b/lint.sh @@ -0,0 +1,61 @@ +#!/bin/bash +set -e + +echo "Running code formatters and linters..." + +# Check if running in CI mode (no fixes) +if [ "$1" = "ci" ]; then + echo "Running in CI mode - checking only, not fixing..." + uv run ruff format --check + uv run ruff check +else + uv run ruff format + uv run ruff check --fix +fi + +echo "Validating JSON schema files..." + +# Function to validate a JSON schema file using Python +validate_schema() { + local schema_file=$1 + echo "Validating $schema_file..." + + # Use Python to validate both JSON syntax and schema validity + uv run python -c "import json, jsonschema; schema = json.load(open('$schema_file')); jsonschema.Draft7Validator.check_schema(schema)" 2>/dev/null + + if [ $? -ne 0 ]; then + echo "Error: $schema_file is not a valid JSON schema" + return 1 + fi + + return 0 +} + +# Validate the main agent schema +if ! validate_schema "intentkit/models/agent_schema.json"; then + exit 1 +fi + +# Validate all schema.json files in skills subdirectories +echo "Validating schema.json files in skills subdirectories..." +find_exit_code=0 + +# Find all schema.json files and store them in a temporary file +find intentkit/skills -name "schema.json" > /tmp/schema_files.txt + +# Read each line from the temporary file +while IFS= read -r schema_file; do + if ! validate_schema "$schema_file"; then + find_exit_code=1 + fi +done < /tmp/schema_files.txt + +# Clean up the temporary file +rm -f /tmp/schema_files.txt + +if [ $find_exit_code -ne 0 ]; then + echo "Error: Some schema files are not valid" + exit 1 +fi + +echo "All JSON schema files are valid!" diff --git a/pyproject.toml b/pyproject.toml new file mode 100644 index 00000000..7033aac1 --- /dev/null +++ b/pyproject.toml @@ -0,0 +1,105 @@ +[project] +name = "intentkit-workspace" +version = "0.5.0" +description = "Intent-based AI Agent Platform - Workspace" +authors = [{ name = "Ruihua", email = "ruihua@crestal.network" }] +requires-python = "==3.12.*" +readme = "README.md" +dependencies = [ + "aiogram>=3.17.0", + "aiosqlite>=0.21.0", + "alembic>=1.14.0", + "anyio>=4.8.0", + "APScheduler>=3.11.0", + "asyncpg>=0.30.0", + "aws-secretsmanager-caching>=1.1.3", + "beautifulsoup4>=4.13.4", + "boto3 (>=1.37.23,<2.0.0)", + "botocore>=1.35.97", + "coinbase-agentkit (>=0.6.0,<0.7.0)", + "coinbase-agentkit-langchain>=0.5.0", + "cron-validator (>=1.0.8,<2.0.0)", + "epyxid>=0.3.3", + "faiss-cpu>=1.11.0", + "fastapi>=0.115.8", + "filetype (>=1.2.0,<2.0.0)", + "gunicorn>=23.0.0", + "httpx>=0.28.1", + "jsonref>=1.1.0", + "langchain (>=0.3.25,<0.4.0)", + "langchain-community>=0.3.19", + "langchain-core>=0.3.43", + "langchain-mcp-adapters>=0.0.11", + "langchain-openai>=0.3.8", + "langchain-postgres>=0.0.13", + "langchain-text-splitters>=0.3.8", + "langchain-xai>=0.2.1", + "langgraph (>=0.6.1,<0.7.0)", + "langgraph-checkpoint>=2.0.18", + "langgraph-checkpoint-postgres>=2.0.16,<2.0.23", + "langgraph-prebuilt (>=0.6.1,<0.7.0)", + "langmem>=0.0.27", + "mypy-boto3-s3 (>=1.37.24,<2.0.0)", + "openai>=1.59.6", + "pgvector>=0.3.6", + "pillow (>=11.1.0,<12.0.0)", + "psycopg>=3.2.9", + "psycopg-pool>=3.2.4", + "psycopg2-binary>=2.9.10,<3.0.0", + "pydantic>=2.10.6,<2.13.0", + "pydantic-settings>=2.8.1", + "python-dotenv>=1.0.1", + "python-multipart>=0.0.20", + "pytz>=2025.1", + "pyyaml>=6.0.2", + "redis (>=5.2.1,<7.0.0)", + "requests>=2.32.3", + "sentry-sdk[fastapi]>=2.20.0", + "slack-sdk>=3.34.0", + "sqlalchemy[asyncio]>=2.0.37", + "supabase>=2.16.0", + "telegramify-markdown (>=0.5.0,<0.6.0)", + "tweepy[async]>=4.15.0", + "uvicorn>=0.34.0,<1.0.0", + "bip32>=2.0.0", + "eth-keys>=0.4.0", + "eth-utils>=2.1.0", + "jsonschema>=4.24.0", + "starlette>=0.47.1", + "aiohttp>=3.11.16", + "requests-oauthlib>=2.0.0", + "tenacity>=9.1.2", + "web3>=7.10.0", + "cdp-sdk>=1.22.0", + "pyjwt>=2.10.1", + "langchain-deepseek>=0.1.4", + "intentkit", +] + +[dependency-groups] +dev = [ + "ruff>=0.11.9,<0.12", + "jsonschema>=4.21.1,<5", + "deptry>=0.23.0", +] + +[tool.uv] +package = false + +[tool.uv.workspace] +members = [ + "intentkit", +] + +[tool.uv.sources] +intentkit = { workspace = true } + +[build-system] +requires = ["hatchling"] +build-backend = "hatchling.build" + +[tool.ruff.lint] +extend-select = ["I"] + +[tool.deptry] +known_first_party = ["intentkit"] diff --git a/scripts/__init__.py b/scripts/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/scripts/batch_migrate_skills.py b/scripts/batch_migrate_skills.py new file mode 100644 index 00000000..47c11b00 --- /dev/null +++ b/scripts/batch_migrate_skills.py @@ -0,0 +1,160 @@ +#!/usr/bin/env python3 +""" +Batch migration script for Agent skills configuration. + +This script fetches all agents from the database, migrates their skills configuration +from the old format (xxx_skills and xxx_config) to the new format where they are moved +into the skills field as a sub-dictionary, and saves them back to the database. + +Usage: + intentkit export AGENT_ID + intentkit import AGENT_ID.yaml +""" + +import asyncio +import logging + +from sqlalchemy import select + +from intentkit.config.config import config +from intentkit.models.agent import AgentTable +from intentkit.models.db import get_session, init_db + +# Configure logging +logging.basicConfig( + level=logging.INFO, format="%(asctime)s - %(levelname)s - %(message)s" +) +logger = logging.getLogger(__name__) + + +async def migrate_agent_skills(agent: AgentTable) -> bool: + """ + Migrate an agent's skills from old format to new format. + + Old format: + ``` + acolyt_skills = ["ask_gpt"] + acolyt_config = {"api_key": "abc"} + ``` + + New format: + ``` + skills = { + "acolyt": { + "states": {"ask_gpt": "public"}, + "enabled": true + } + } + ``` + + Args: + agent: The agent to migrate + + Returns: + bool: True if the agent was modified, False otherwise + """ + # Initialize skills field if it doesn't exist + if agent.skills is None: + agent.skills = {} + + # Define the mapping of old skill fields to new skill names + skill_mappings = [ + {"skills": "cdp_skills", "config": None, "name": "cdp"}, + {"skills": "twitter_skills", "config": "twitter_config", "name": "twitter"}, + {"skills": "common_skills", "config": None, "name": "common"}, + {"skills": "enso_skills", "config": "enso_config", "name": "enso"}, + {"skills": "acolyt_skills", "config": "acolyt_config", "name": "acolyt"}, + {"skills": "allora_skills", "config": "allora_config", "name": "allora"}, + {"skills": "elfa_skills", "config": "elfa_config", "name": "elfa"}, + ] + + modified = False + + # Process each skill mapping + for mapping in skill_mappings: + skills_field = mapping["skills"] + config_field = mapping["config"] + skill_name = mapping["name"] + + # Get the skills list using getattr to access the column values + skills_list = getattr(agent, skills_field, None) + + # Skip if the skills list is empty or None + if not skills_list: + continue + + # Get the config if it exists + config = getattr(agent, config_field, {}) if config_field else {} + + # Create the new skill entry + skill_entry = { + "states": {skill: "public" for skill in skills_list}, + "enabled": True, + } + + # Add any config values + if config: + # Merge config with the skill entry + for key, value in config.items(): + if key != "states" and key != "enabled": + skill_entry[key] = value + + # Add the skill entry to the skills field + agent.skills[skill_name] = skill_entry + + # Clear the old fields + setattr(agent, skills_field, None) + if config_field: + setattr(agent, config_field, None) + + modified = True + + return modified + + +async def batch_migrate_skills(): + """ + Fetch all agents from the database, migrate their skills, and save them back. + """ + async with get_session() as session: + # Fetch all agents + result = await session.execute(select(AgentTable)) + agents = result.scalars().all() + + logger.info(f"Found {len(agents)} agents to process") + + migrated_count = 0 + for agent in agents: + try: + # Migrate the agent's skills + modified = await migrate_agent_skills(agent) + + if modified: + # Save the agent back to the database + session.add(agent) + migrated_count += 1 + logger.info(f"Migrated agent {agent.id} ({agent.name})") + except Exception as e: + logger.error(f"Error migrating agent {agent.id}: {e}") + + if migrated_count > 0: + # Commit the changes + await session.commit() + logger.info(f"Successfully migrated {migrated_count} agents") + else: + logger.info("No agents needed migration") + + +async def main(): + """ + Main entry point for the script. + """ + # Initialize the database connection + await init_db(**config.db) + + # Run the batch migration + await batch_migrate_skills() + + +if __name__ == "__main__": + asyncio.run(main()) diff --git a/scripts/cdp_list_token_balances_example.py b/scripts/cdp_list_token_balances_example.py new file mode 100644 index 00000000..47945001 --- /dev/null +++ b/scripts/cdp_list_token_balances_example.py @@ -0,0 +1,64 @@ +#!/usr/bin/env python3 +"""Simple example script for account.list_token_balances using CDP client. + +This script demonstrates how to: +1. Create a CDP client with API credentials +2. Import an existing wallet account using the wallet secret +3. List token balances for the account on base network + +Usage: + uv run scripts/cdp_list_token_balances_example.py + +Environment variables required: + CDP_API_KEY_ID: Your CDP API key ID + CDP_API_KEY_SECRET: Your CDP API key secret + CDP_WALLET_SECRET: Your CDP wallet secret (base64 encoded) +""" + +import asyncio +import os + +from cdp import CdpClient +from dotenv import load_dotenv + +load_dotenv() + + +async def main(): + """Main function to demonstrate account.list_token_balances.""" + # Get credentials from environment variables + api_key_id = os.getenv("CDP_API_KEY_ID") + api_key_secret = os.getenv("CDP_API_KEY_SECRET") + wallet_secret = os.getenv("CDP_WALLET_SECRET") + + if not all([api_key_id, api_key_secret, wallet_secret]): + print("Error: Missing required environment variables:") + print("- CDP_API_KEY_ID") + print("- CDP_API_KEY_SECRET") + print("- CDP_WALLET_SECRET") + return + + print("Creating CDP client...") + cdp_client = CdpClient( + api_key_id=api_key_id, + api_key_secret=api_key_secret, + wallet_secret=wallet_secret, + ) + + account = await cdp_client.evm.get_account(name="eva") + + print(f"load account with address: {account.address}") + + # if client close too early, it will have error + # await cdp_client.close() + + # List token balances on base network + print("\nListing token balances on base network...") + token_balances = await account.list_token_balances("base") + print(token_balances) + + await cdp_client.close() + + +if __name__ == "__main__": + asyncio.run(main()) diff --git a/scripts/check_credit_event_consistency.py b/scripts/check_credit_event_consistency.py new file mode 100644 index 00000000..29447511 --- /dev/null +++ b/scripts/check_credit_event_consistency.py @@ -0,0 +1,268 @@ +#!/usr/bin/env python3 +""" +Credit Event Consistency Checker + +This script checks the consistency of credit amounts in CreditEvent records. +It verifies that the sum of free/reward/permanent amounts equals the total amounts +for platform fees, dev fees, agent fees, and total amounts. +""" + +import asyncio +import logging +from decimal import Decimal +from typing import Dict, List, Tuple + +from sqlalchemy import select +from sqlalchemy.ext.asyncio import AsyncSession + +from intentkit.config.config import config +from intentkit.models.credit import CreditEventTable +from intentkit.models.db import get_session, init_db + +logging.basicConfig(level=logging.INFO) +logger = logging.getLogger(__name__) + + +class CreditEventConsistencyChecker: + """Checker for credit event consistency.""" + + def __init__(self): + self.total_records = 0 + self.consistent_records = 0 + self.inconsistent_records = 0 + self.zero_sum_inconsistent_count = ( + 0 # Cases where detail sum is 0 but total is not 0 + ) + self.zero_sum_inconsistent_details: List[ + Dict + ] = [] # Cases where detail sum is 0 but total is not 0 + self.non_zero_sum_inconsistent_details: List[ + Dict + ] = [] # Unexpected errors where details are non-zero but still unequal + + def check_record_consistency( + self, record: CreditEventTable + ) -> Tuple[bool, List[str], bool]: + """Check if a single record is consistent. + + Returns: + Tuple of (is_consistent, list_of_errors, is_zero_sum_error) + """ + errors = [] + + # Helper function to safely convert to Decimal + def to_decimal(value) -> Decimal: + if value is None: + return Decimal("0") + return Decimal(str(value)) + + # Check platform fee consistency + platform_free = to_decimal(record.fee_platform_free_amount) + platform_reward = to_decimal(record.fee_platform_reward_amount) + platform_permanent = to_decimal(record.fee_platform_permanent_amount) + platform_total = to_decimal(record.fee_platform_amount) + + platform_sum = platform_free + platform_reward + platform_permanent + if platform_sum != platform_total: + errors.append( + f"Platform fee mismatch: {platform_free} + {platform_reward} + {platform_permanent} = {platform_sum} != {platform_total}" + ) + + # Check dev fee consistency + dev_free = to_decimal(record.fee_dev_free_amount) + dev_reward = to_decimal(record.fee_dev_reward_amount) + dev_permanent = to_decimal(record.fee_dev_permanent_amount) + dev_total = to_decimal(record.fee_dev_amount) + + dev_sum = dev_free + dev_reward + dev_permanent + if dev_sum != dev_total: + errors.append( + f"Dev fee mismatch: {dev_free} + {dev_reward} + {dev_permanent} = {dev_sum} != {dev_total}" + ) + + # Check agent fee consistency + agent_free = to_decimal(record.fee_agent_free_amount) + agent_reward = to_decimal(record.fee_agent_reward_amount) + agent_permanent = to_decimal(record.fee_agent_permanent_amount) + agent_total = to_decimal(record.fee_agent_amount) + + agent_sum = agent_free + agent_reward + agent_permanent + if agent_sum != agent_total: + errors.append( + f"Agent fee mismatch: {agent_free} + {agent_reward} + {agent_permanent} = {agent_sum} != {agent_total}" + ) + + # Check total amount consistency + free_amount = to_decimal(record.free_amount) + reward_amount = to_decimal(record.reward_amount) + permanent_amount = to_decimal(record.permanent_amount) + total_amount = to_decimal(record.total_amount) + + total_sum = free_amount + reward_amount + permanent_amount + if total_sum != total_amount: + errors.append( + f"Total amount mismatch: {free_amount} + {reward_amount} + {permanent_amount} = {total_sum} != {total_amount}" + ) + + # Check if all errors are cases where detail sum is 0 + is_zero_sum_error = False + if errors: + # Check if all errors are cases where detail sum is 0 but total is not 0 + zero_sum_patterns = [ + "0 + 0 + 0 = 0 !=", # Pattern for zero sum details + ] + is_zero_sum_error = all( + any(pattern in error for pattern in zero_sum_patterns) + for error in errors + ) + + return len(errors) == 0, errors, is_zero_sum_error + + async def check_all_records(self, session: AsyncSession, batch_size: int = 1000): + """Check all credit event records in batches using cursor-based pagination.""" + logger.info("Starting credit event consistency check...") + + # Use cursor-based pagination to avoid batch drift + last_id = "" + batch_number = 1 + + while True: + query = ( + select(CreditEventTable) + .where(CreditEventTable.id > last_id if last_id else True) + .order_by(CreditEventTable.id) + .limit(batch_size) + ) + + result = await session.execute(query) + records = result.fetchall() + + if not records: + break + + logger.info( + f"Processing batch {batch_number}, records starting from ID {records[0][0].id}" + ) + + # Update cursor to the last processed record's ID + last_id = records[-1][0].id + + for record_tuple in records: + record = record_tuple[0] # Extract the actual record from tuple + self.total_records += 1 + + is_consistent, errors, is_zero_sum_error = ( + self.check_record_consistency(record) + ) + + if is_consistent: + self.consistent_records += 1 + else: + self.inconsistent_records += 1 + if is_zero_sum_error: + # Cases where detail sum is 0, count and store details + self.zero_sum_inconsistent_count += 1 + self.zero_sum_inconsistent_details.append( + { + "id": record.id, + "created_at": record.created_at, + "event_type": record.event_type, + "errors": errors, + } + ) + else: + # Unexpected errors where details are non-zero but still unequal, need detailed records + self.non_zero_sum_inconsistent_details.append( + { + "id": record.id, + "created_at": record.created_at, + "event_type": record.event_type, + "errors": errors, + } + ) + + batch_number += 1 + + logger.info("Consistency check completed.") + + def print_summary(self): + """Print summary of the consistency check.""" + print("\n" + "=" * 60) + print("CREDIT EVENT CONSISTENCY CHECK SUMMARY") + print("=" * 60) + print(f"Total records checked: {self.total_records}") + print(f"Consistent records: {self.consistent_records}") + print(f"Inconsistent records: {self.inconsistent_records}") + print( + f" - Zero-sum inconsistent (details sum to 0 but total is not 0): {self.zero_sum_inconsistent_count}" + ) + print( + f" - Non-zero-sum inconsistent (unexpected errors): {len(self.non_zero_sum_inconsistent_details)}" + ) + + if self.total_records > 0: + consistency_rate = (self.consistent_records / self.total_records) * 100 + print(f"Consistency rate: {consistency_rate:.2f}%") + + # Show details for zero-sum inconsistent records + if self.zero_sum_inconsistent_details: + print("\n" + "-" * 40) + print("ZERO-SUM INCONSISTENT RECORDS (Details sum to 0 but total is not 0)") + print("-" * 40) + + # Show first 10 zero-sum inconsistent records + for i, detail in enumerate(self.zero_sum_inconsistent_details[:10]): + print(f"\n{i + 1}. Record ID: {detail['id']}") + print(f" Created at: {detail['created_at']}") + print(f" Event type: {detail['event_type']}") + print(" Errors:") + for error in detail["errors"]: + print(f" - {error}") + + if len(self.zero_sum_inconsistent_details) > 10: + print( + f"\n... and {len(self.zero_sum_inconsistent_details) - 10} more zero-sum inconsistent records." + ) + + # Show details for unexpected errors + if self.non_zero_sum_inconsistent_details: + print("\n" + "-" * 40) + print("NON-ZERO-SUM INCONSISTENT RECORDS (Unexpected Errors)") + print("-" * 40) + + # Show first 10 non-zero-sum inconsistent records + for i, detail in enumerate(self.non_zero_sum_inconsistent_details[:10]): + print(f"\n{i + 1}. Record ID: {detail['id']}") + print(f" Created at: {detail['created_at']}") + print(f" Event type: {detail['event_type']}") + print(" Errors:") + for error in detail["errors"]: + print(f" - {error}") + + if len(self.non_zero_sum_inconsistent_details) > 10: + print( + f"\n... and {len(self.non_zero_sum_inconsistent_details) - 10} more non-zero-sum inconsistent records." + ) + + print("\n" + "=" * 60) + + +async def main(): + """Main function to run the consistency check.""" + logger.info("Starting CreditEvent consistency check...") + + # Initialize database connection + await init_db(**config.db) + + checker = CreditEventConsistencyChecker() + + async with get_session() as session: + await checker.check_all_records(session) + + checker.print_summary() + + logger.info("Consistency check completed.") + + +if __name__ == "__main__": + asyncio.run(main()) diff --git a/scripts/check_credit_event_consistency_with_base.py b/scripts/check_credit_event_consistency_with_base.py new file mode 100644 index 00000000..80b33282 --- /dev/null +++ b/scripts/check_credit_event_consistency_with_base.py @@ -0,0 +1,345 @@ +#!/usr/bin/env python3 +""" +Credit Event Consistency Checker with Base Amount Validation + +This script checks the consistency of credit event records in the database, +including validation of base amount fields and their relationships. + +Base amount validation includes: +1. base_amount = base_free_amount + base_reward_amount + base_permanent_amount +2. base_amount + fee_platform_amount + fee_dev_amount + fee_agent_amount = total_amount +3. Each fee amount should equal the sum of its free/reward/permanent components +4. Base amounts should be consistent with the original credit type breakdown + +Usage: + python scripts/check_credit_event_consistency_with_base.py +""" + +import asyncio +import logging +from decimal import Decimal +from typing import Dict, List + +from sqlalchemy import func, select +from sqlalchemy.ext.asyncio import AsyncSession + +from intentkit.config.config import config +from intentkit.models.credit import CreditEventTable +from intentkit.models.db import get_session, init_db + +# Configure logging +logging.basicConfig( + level=logging.INFO, + format="%(asctime)s - %(levelname)s - %(message)s", + handlers=[ + logging.StreamHandler(), + logging.FileHandler("credit_event_consistency_check.log"), + ], +) +logger = logging.getLogger(__name__) + +# Constants +BATCH_SIZE = 1000 +TOLERANCE = Decimal("0.0001") # Tolerance for decimal comparison + + +def to_decimal(value) -> Decimal: + """Convert value to Decimal, handling None values.""" + if value is None: + return Decimal("0") + return Decimal(str(value)) + + +class CreditEventConsistencyChecker: + """Checker for credit event consistency including base amount validation.""" + + def __init__(self): + self.total_checked = 0 + self.total_inconsistencies = 0 + self.zero_sum_errors = 0 + self.non_zero_sum_errors = 0 + self.base_amount_errors = 0 + self.fee_breakdown_errors = 0 + self.base_fee_total_errors = 0 + self.inconsistent_records: List[Dict] = [] + + async def check_all_events(self) -> None: + """Check all credit events for consistency using cursor-based pagination.""" + + async with get_session() as session: + # Get total count for progress tracking + total_count = await self._get_total_count(session) + logger.info(f"Total credit events to check: {total_count:,}") + + last_id = "" + batch_number = 0 + + while True: + batch_number += 1 + events = await self._get_events_batch(session, last_id, BATCH_SIZE) + + if not events: + break + + logger.info( + f"Processing batch {batch_number}, starting from ID: {last_id or 'beginning'}, " + f"batch size: {len(events)}, progress: {self.total_checked}/{total_count} " + f"({self.total_checked / total_count * 100:.1f}%)" + ) + + for event in events: + await self._check_event_consistency(event) + self.total_checked += 1 + last_id = event.id + + # Log progress every 10 batches + if batch_number % 10 == 0: + logger.info( + f"Progress: {self.total_checked:,}/{total_count:,} events checked " + f"({self.total_checked / total_count * 100:.1f}%), " + f"found {self.total_inconsistencies} inconsistencies" + ) + + await self._log_summary() + + async def _get_total_count(self, session: AsyncSession) -> int: + """Get total count of credit events.""" + stmt = select(func.count(CreditEventTable.id)) + result = await session.scalar(stmt) + return result or 0 + + async def _get_events_batch( + self, session: AsyncSession, last_id: str, batch_size: int + ) -> List[CreditEventTable]: + """Get a batch of credit events using cursor-based pagination.""" + stmt = ( + select(CreditEventTable) + .where(CreditEventTable.id > last_id) + .order_by(CreditEventTable.id) + .limit(batch_size) + ) + result = await session.execute(stmt) + return result.scalars().all() + + async def _check_event_consistency(self, event: CreditEventTable) -> None: + """Check consistency of a single credit event including base amounts.""" + errors = [] + + # Convert all amounts to Decimal for consistent calculation + total_amount = to_decimal(event.total_amount) + free_amount = to_decimal(event.free_amount) + reward_amount = to_decimal(event.reward_amount) + permanent_amount = to_decimal(event.permanent_amount) + + # Base amounts + base_amount = to_decimal(event.base_amount) + base_free_amount = to_decimal(event.base_free_amount) + base_reward_amount = to_decimal(event.base_reward_amount) + base_permanent_amount = to_decimal(event.base_permanent_amount) + + # Fee amounts + fee_platform_amount = to_decimal(event.fee_platform_amount) + fee_dev_amount = to_decimal(event.fee_dev_amount) + fee_agent_amount = to_decimal(event.fee_agent_amount) + + # Fee breakdown amounts + fee_platform_free_amount = to_decimal(event.fee_platform_free_amount) + fee_platform_reward_amount = to_decimal(event.fee_platform_reward_amount) + fee_platform_permanent_amount = to_decimal(event.fee_platform_permanent_amount) + + fee_dev_free_amount = to_decimal(event.fee_dev_free_amount) + fee_dev_reward_amount = to_decimal(event.fee_dev_reward_amount) + fee_dev_permanent_amount = to_decimal(event.fee_dev_permanent_amount) + + fee_agent_free_amount = to_decimal(event.fee_agent_free_amount) + fee_agent_reward_amount = to_decimal(event.fee_agent_reward_amount) + fee_agent_permanent_amount = to_decimal(event.fee_agent_permanent_amount) + + # Check 1: Original consistency - total amount vs credit type amounts + calculated_total = free_amount + reward_amount + permanent_amount + if abs(total_amount - calculated_total) > TOLERANCE: + errors.append( + f"Total amount mismatch: total_amount={total_amount}, " + f"calculated={calculated_total} (free={free_amount} + reward={reward_amount} + permanent={permanent_amount})" + ) + + # Check 2: Fee amounts consistency + # Platform fee breakdown + calculated_platform_fee = ( + fee_platform_free_amount + + fee_platform_reward_amount + + fee_platform_permanent_amount + ) + if abs(fee_platform_amount - calculated_platform_fee) > TOLERANCE: + errors.append( + f"Platform fee breakdown mismatch: fee_platform_amount={fee_platform_amount}, " + f"calculated={calculated_platform_fee} (free={fee_platform_free_amount} + reward={fee_platform_reward_amount} + permanent={fee_platform_permanent_amount})" + ) + self.fee_breakdown_errors += 1 + + # Dev fee breakdown + calculated_dev_fee = ( + fee_dev_free_amount + fee_dev_reward_amount + fee_dev_permanent_amount + ) + if abs(fee_dev_amount - calculated_dev_fee) > TOLERANCE: + errors.append( + f"Dev fee breakdown mismatch: fee_dev_amount={fee_dev_amount}, " + f"calculated={calculated_dev_fee} (free={fee_dev_free_amount} + reward={fee_dev_reward_amount} + permanent={fee_dev_permanent_amount})" + ) + self.fee_breakdown_errors += 1 + + # Agent fee breakdown + calculated_agent_fee = ( + fee_agent_free_amount + fee_agent_reward_amount + fee_agent_permanent_amount + ) + if abs(fee_agent_amount - calculated_agent_fee) > TOLERANCE: + errors.append( + f"Agent fee breakdown mismatch: fee_agent_amount={fee_agent_amount}, " + f"calculated={calculated_agent_fee} (free={fee_agent_free_amount} + reward={fee_agent_reward_amount} + permanent={fee_agent_permanent_amount})" + ) + self.fee_breakdown_errors += 1 + + # Check 3: Base amount consistency + calculated_base_amount = ( + base_free_amount + base_reward_amount + base_permanent_amount + ) + if abs(base_amount - calculated_base_amount) > TOLERANCE: + errors.append( + f"Base amount breakdown mismatch: base_amount={base_amount}, " + f"calculated={calculated_base_amount} (base_free={base_free_amount} + base_reward={base_reward_amount} + base_permanent={base_permanent_amount})" + ) + self.base_amount_errors += 1 + + # Check 4: Base amount + fees = total amount + calculated_total_from_base_and_fees = ( + base_amount + fee_platform_amount + fee_dev_amount + fee_agent_amount + ) + if abs(total_amount - calculated_total_from_base_and_fees) > TOLERANCE: + errors.append( + f"Base + fees != total: total_amount={total_amount}, " + f"base_amount + fees={calculated_total_from_base_and_fees} " + f"(base={base_amount} + platform_fee={fee_platform_amount} + dev_fee={fee_dev_amount} + agent_fee={fee_agent_amount})" + ) + self.base_fee_total_errors += 1 + + # Check 5: Credit type consistency between base amounts and total amounts + # Base free amount should be consistent with free amount minus fees + expected_base_free = ( + free_amount + - fee_platform_free_amount + - fee_dev_free_amount + - fee_agent_free_amount + ) + if abs(base_free_amount - expected_base_free) > TOLERANCE: + errors.append( + f"Base free amount inconsistency: base_free_amount={base_free_amount}, " + f"expected={expected_base_free} (free_amount={free_amount} - platform_fee_free={fee_platform_free_amount} - dev_fee_free={fee_dev_free_amount} - agent_fee_free={fee_agent_free_amount})" + ) + + # Base reward amount should be consistent with reward amount minus fees + expected_base_reward = ( + reward_amount + - fee_platform_reward_amount + - fee_dev_reward_amount + - fee_agent_reward_amount + ) + if abs(base_reward_amount - expected_base_reward) > TOLERANCE: + errors.append( + f"Base reward amount inconsistency: base_reward_amount={base_reward_amount}, " + f"expected={expected_base_reward} (reward_amount={reward_amount} - platform_fee_reward={fee_platform_reward_amount} - dev_fee_reward={fee_dev_reward_amount} - agent_fee_reward={fee_agent_reward_amount})" + ) + + # Base permanent amount should be consistent with permanent amount minus fees + expected_base_permanent = ( + permanent_amount + - fee_platform_permanent_amount + - fee_dev_permanent_amount + - fee_agent_permanent_amount + ) + if abs(base_permanent_amount - expected_base_permanent) > TOLERANCE: + errors.append( + f"Base permanent amount inconsistency: base_permanent_amount={base_permanent_amount}, " + f"expected={expected_base_permanent} (permanent_amount={permanent_amount} - platform_fee_permanent={fee_platform_permanent_amount} - dev_fee_permanent={fee_dev_permanent_amount} - agent_fee_permanent={fee_agent_permanent_amount})" + ) + + if errors: + self.total_inconsistencies += 1 + + # Categorize error type + if total_amount == Decimal("0"): + self.zero_sum_errors += 1 + error_type = "ZERO_SUM_ERROR" + else: + self.non_zero_sum_errors += 1 + error_type = "NON_ZERO_SUM_ERROR" + + inconsistent_record = { + "id": event.id, + "event_type": event.event_type, + "user_id": event.user_id, + "agent_id": event.agent_id, + "total_amount": str(total_amount), + "error_type": error_type, + "errors": errors, + "created_at": event.created_at.isoformat() + if event.created_at + else None, + } + self.inconsistent_records.append(inconsistent_record) + + # Log first few errors for immediate visibility + if self.total_inconsistencies <= 10: + logger.warning( + f"Inconsistency found in event {event.id} ({error_type}): {'; '.join(errors)}" + ) + + async def _log_summary(self) -> None: + """Log summary of the consistency check.""" + logger.info("\n" + "=" * 80) + logger.info("CREDIT EVENT CONSISTENCY CHECK SUMMARY") + logger.info("=" * 80) + logger.info(f"Total events checked: {self.total_checked:,}") + logger.info(f"Total inconsistencies found: {self.total_inconsistencies:,}") + logger.info(f" - Zero-sum errors: {self.zero_sum_errors:,}") + logger.info(f" - Non-zero-sum errors: {self.non_zero_sum_errors:,}") + logger.info(f" - Base amount errors: {self.base_amount_errors:,}") + logger.info(f" - Fee breakdown errors: {self.fee_breakdown_errors:,}") + logger.info(f" - Base+fees!=total errors: {self.base_fee_total_errors:,}") + + if self.total_inconsistencies > 0: + consistency_rate = ( + (self.total_checked - self.total_inconsistencies) + / self.total_checked + * 100 + ) + logger.info(f"Consistency rate: {consistency_rate:.2f}%") + + # Log some example inconsistencies + logger.info("\nExample inconsistencies:") + for i, record in enumerate(self.inconsistent_records[:5]): + logger.info( + f" {i + 1}. Event {record['id']} ({record['error_type']}):" + ) + for error in record["errors"][:2]: # Show first 2 errors per record + logger.info(f" - {error}") + if len(record["errors"]) > 2: + logger.info(f" ... and {len(record['errors']) - 2} more errors") + else: + logger.info("✅ All credit events are consistent!") + + logger.info("=" * 80) + + +async def main(): + """Main function to run the consistency check.""" + logger.info("Starting CreditEvent consistency check with base amount validation...") + + # Initialize database connection + await init_db(**config.db) + + checker = CreditEventConsistencyChecker() + await checker.check_all_events() + + +if __name__ == "__main__": + asyncio.run(main()) diff --git a/scripts/check_credit_transaction_consistency.py b/scripts/check_credit_transaction_consistency.py new file mode 100644 index 00000000..70312e25 --- /dev/null +++ b/scripts/check_credit_transaction_consistency.py @@ -0,0 +1,172 @@ +#!/usr/bin/env python + +""" +Script to identify records in CreditTransactionTable where +free_amount + reward_amount + permanent_amount != change_amount. + +This script helps identify inconsistent data that needs to be migrated. +""" + +import asyncio +import logging +from decimal import ROUND_HALF_UP, Decimal + +from sqlalchemy import text + +from intentkit.config.config import config +from intentkit.models.db import get_session, init_db + +# Configure logging +logging.basicConfig( + level=logging.INFO, + format="%(asctime)s - %(name)s - %(levelname)s - %(message)s", +) +logger = logging.getLogger(__name__) + +# Define the precision for all decimal calculations (4 decimal places) +FOURPLACES = Decimal("0.0001") + + +def to_decimal(value) -> Decimal: + """Convert value to Decimal with proper precision.""" + if value is None: + return Decimal("0") + if isinstance(value, Decimal): + return value.quantize(FOURPLACES, rounding=ROUND_HALF_UP) + return Decimal(str(value)).quantize(FOURPLACES, rounding=ROUND_HALF_UP) + + +async def check_transaction_consistency(): + """Check consistency of credit transaction amounts using SQL aggregation.""" + async with get_session() as session: + # SQL query to count consistent and inconsistent records + consistency_query = """ + SELECT + COUNT(*) as total_count, + SUM(CASE + WHEN ROUND(COALESCE(free_amount, 0) + COALESCE(reward_amount, 0) + COALESCE(permanent_amount, 0), 4) = ROUND(COALESCE(change_amount, 0), 4) + THEN 1 + ELSE 0 + END) as consistent_count, + SUM(CASE + WHEN ROUND(COALESCE(free_amount, 0) + COALESCE(reward_amount, 0) + COALESCE(permanent_amount, 0), 4) != ROUND(COALESCE(change_amount, 0), 4) + THEN 1 + ELSE 0 + END) as inconsistent_count + FROM credit_transactions + """ + + result = await session.execute(text(consistency_query)) + row = result.fetchone() + + total_count = row.total_count + consistent_count = row.consistent_count + inconsistent_count = row.inconsistent_count + + logger.info( + f"Checking {total_count} credit transaction records using SQL aggregation..." + ) + + # Calculate inconsistency rate + inconsistency_rate = ( + (inconsistent_count / total_count * 100) if total_count > 0 else 0 + ) + + # Summary + logger.info("\n" + "=" * 60) + logger.info("CONSISTENCY CHECK SUMMARY") + logger.info("=" * 60) + logger.info(f"Total records checked: {total_count}") + logger.info(f"Inconsistent records found: {inconsistent_count}") + logger.info(f"Consistent records: {consistent_count}") + logger.info(f"Inconsistency rate: {inconsistency_rate:.2f}%") + + if inconsistent_count > 0: + logger.warning( + f"Found {inconsistent_count} records where " + f"free_amount + reward_amount + permanent_amount != change_amount" + ) + logger.info("These records need to be migrated using the migration script.") + else: + logger.info("✅ All records are consistent!") + + return inconsistent_count + + +async def check_missing_event_ids(): + """Check for transactions without event_id.""" + async with get_session() as session: + # SQL query to count transactions without event_id + missing_event_id_query = """ + SELECT COUNT(*) as missing_count + FROM credit_transactions + WHERE event_id IS NULL OR event_id = '' + """ + + result = await session.execute(text(missing_event_id_query)) + missing_count = result.scalar() + + logger.info("\n" + "=" * 60) + logger.info("MISSING EVENT ID CHECK") + logger.info("=" * 60) + + if missing_count > 0: + logger.warning(f"Found {missing_count} transactions without event_id") + + # Get some examples of transactions without event_id + sample_query = """ + SELECT id, account_id, tx_type, credit_type, change_amount, created_at + FROM credit_transactions + WHERE event_id IS NULL OR event_id = '' + ORDER BY created_at DESC + LIMIT 5 + """ + + sample_result = await session.execute(text(sample_query)) + samples = sample_result.fetchall() + + logger.warning("Sample transactions without event_id:") + for sample in samples: + logger.warning( + f" ID: {sample.id}, Account: {sample.account_id}, " + f"Type: {sample.tx_type}, Amount: {sample.change_amount}, " + f"Created: {sample.created_at}" + ) + else: + logger.info("✅ All transactions have event_id!") + + return missing_count + + +async def main(): + """Main function to run the consistency check.""" + try: + await init_db(**config.db) + + # Run both checks + inconsistent_count = await check_transaction_consistency() + missing_event_id_count = await check_missing_event_ids() + + # Final summary + logger.info("\n" + "=" * 60) + logger.info("FINAL SUMMARY") + logger.info("=" * 60) + logger.info(f"Inconsistent amount records: {inconsistent_count}") + logger.info(f"Missing event_id records: {missing_event_id_count}") + + if inconsistent_count > 0 or missing_event_id_count > 0: + logger.info("\nNext steps:") + if inconsistent_count > 0: + logger.info("1. Run the migration script to fix inconsistent records") + if missing_event_id_count > 0: + logger.info("2. Investigate transactions without event_id") + logger.info("3. Re-run this check script to verify fixes") + + return inconsistent_count + except Exception as e: + logger.error(f"Error during consistency check: {e}") + raise + + +if __name__ == "__main__": + asyncio.run(main()) diff --git a/scripts/check_heartbeat.py b/scripts/check_heartbeat.py new file mode 100755 index 00000000..8d336848 --- /dev/null +++ b/scripts/check_heartbeat.py @@ -0,0 +1,75 @@ +#!/usr/bin/env python3 +""" +Check if a heartbeat exists in Redis for a given name. + +Usage: + check_heartbeat.py NAME + +Returns: + 0 - Heartbeat exists (success) + 1 - Heartbeat does not exist or error occurred +""" + +import asyncio +import logging +import sys +from pathlib import Path + +# Add parent directory to path to allow imports +sys.path.insert(0, str(Path(__file__).parent.parent)) + +from intentkit.config.config import config +from intentkit.models.redis import check_heartbeat, get_redis, init_redis + +logging.basicConfig( + level=logging.INFO, format="%(asctime)s - %(name)s - %(levelname)s - %(message)s" +) +logger = logging.getLogger(__name__) + + +async def main(): + """Initialize Redis and check for heartbeat.""" + if len(sys.argv) != 2: + print(f"Usage: {sys.argv[0]} NAME") + sys.exit(1) + + name = sys.argv[1] + redis_client = None + + try: + # Initialize Redis + if config.redis_host: + await init_redis( + host=config.redis_host, + port=config.redis_port, + db=config.redis_db, + ) + else: + logger.error("Redis host not configured") + sys.exit(1) + + # Get Redis client + redis_client = get_redis() + + # Check heartbeat + exists = await check_heartbeat(redis_client, name) + + if exists: + logger.info(f"Heartbeat for '{name}' exists") + return 0 # Success + else: + logger.error(f"Heartbeat for '{name}' does not exist") + return 1 # Failure + + except Exception: + logger.error(f"Heartbeat for '{name}' does not exist, and there was an error") + return 1 # General error + finally: + # Close Redis connection if it was opened + if redis_client is not None: + await redis_client.aclose() + + +if __name__ == "__main__": + exit_code = asyncio.run(main()) + sys.exit(exit_code) diff --git a/scripts/create.sh b/scripts/create.sh new file mode 100755 index 00000000..42ac12d0 --- /dev/null +++ b/scripts/create.sh @@ -0,0 +1,37 @@ +#!/bin/bash + +# If you want to visit a remote server, modify it +BASE_URL="http://localhost:8000" + +# Token is not required in local, if you set the ADMIN_AUTH_ENABLED and ADMIN_JWT_SECRET in a remote server, you put key here +TOKEN="" + +print_usage() { + echo "Usage: sh create.sh AGENT_ID" + exit 1 +} + +# Check if correct number of arguments provided +if [ $# -ne 1 ]; then + print_usage +fi + +AGENT_ID="$1" +echo "Creating agent [${AGENT_ID}] ..." +# Using the provided create command with escaped JSON +HTTP_STATUS=$(curl -s -w "%{http_code}" \ + -X POST \ + -H "Authorization: Bearer ${TOKEN}" \ + -H "Content-Type: application/json" \ + -d "{\"id\":\"${AGENT_ID}\"}" \ + "${BASE_URL}/agents" -o "${AGENT_ID}.response") + +if [ $HTTP_STATUS -ge 400 ]; then + echo "Create failed with HTTP status ${HTTP_STATUS}" + cat "${AGENT_ID}.response" + rm "${AGENT_ID}.response" + exit 1 +fi + +rm "${AGENT_ID}.response" +echo "Create succeeded" diff --git a/scripts/export.sh b/scripts/export.sh new file mode 100755 index 00000000..21e96e52 --- /dev/null +++ b/scripts/export.sh @@ -0,0 +1,29 @@ +#!/bin/bash + +# If you want to visit a remote server, modify it +BASE_URL="http://localhost:8000" + +# Token is not required in local, if you set the ADMIN_AUTH_ENABLED and ADMIN_JWT_SECRET in a remote server, you put key here +TOKEN="" + +print_usage() { + echo "Usage: sh export.sh AGENT_ID" + exit 1 +} + +# Check if correct number of arguments provided +if [ $# -ne 1 ]; then + print_usage +fi + +AGENT_ID="$1" +echo "Exporting agent [${AGENT_ID}] ..." +# Using the provided export command +HTTP_STATUS=$(curl -s -w "%{http_code}" -H "Authorization: Bearer ${TOKEN}" --clobber "${BASE_URL}/agents/${AGENT_ID}/export" -o ${AGENT_ID}.yaml) + +if [ $HTTP_STATUS -ne 200 ]; then + echo "Export failed with HTTP status ${HTTP_STATUS}" + exit 1 +fi + +echo "Export succeeded, the file is saved (typically as ${AGENT_ID}.yaml)" diff --git a/scripts/fix_credit_accounts_from_transactions.py b/scripts/fix_credit_accounts_from_transactions.py new file mode 100644 index 00000000..b9f39bf2 --- /dev/null +++ b/scripts/fix_credit_accounts_from_transactions.py @@ -0,0 +1,340 @@ +#!/usr/bin/env python3 +""" +Credit Account Migration Script + +This script migrates credit data in the CreditAccountTable by recalculating +free_credits, reward_credits, and credits from transaction history. +""" + +import argparse +import asyncio +import logging +from datetime import datetime +from decimal import Decimal +from typing import Tuple + +from sqlalchemy import text +from sqlalchemy.ext.asyncio import AsyncSession + +from intentkit.config.config import config +from intentkit.models.db import get_session, init_db + +# Configure logging +logging.basicConfig( + level=logging.INFO, format="%(asctime)s - %(levelname)s - %(message)s" +) +logger = logging.getLogger(__name__) + + +async def create_backup_table(session: AsyncSession) -> str: + """Create a backup of the credit_accounts table. + + Returns: + The name of the backup table + """ + timestamp = datetime.now().strftime("%Y%m%d_%H%M%S") + backup_table_name = f"credit_accounts_backup_{timestamp}" + + # Create backup table with same structure and data + backup_sql = f""" + CREATE TABLE {backup_table_name} AS + SELECT * FROM credit_accounts; + """ + + await session.execute(text(backup_sql)) + await session.commit() + + logger.info(f"Created backup table: {backup_table_name}") + return backup_table_name + + +async def calculate_credits_from_transactions( + session: AsyncSession, account_id: str +) -> Tuple[Decimal, Decimal, Decimal]: + """Calculate the three types of credits from transactions for an account. + + Args: + session: Database session + account_id: Account ID to calculate for + + Returns: + Tuple of (free_credits, reward_credits, credits) calculated from transactions + """ + # Lock both tables to prevent interference + await session.execute(text("LOCK TABLE credit_accounts IN EXCLUSIVE MODE")) + await session.execute(text("LOCK TABLE credit_transactions IN SHARE MODE")) + + # Calculate credits from transactions using SQL + # Note: permanent_amount corresponds to credits field + # CREDIT transactions add to balance, DEBIT transactions subtract + calc_sql = text(""" + SELECT + COALESCE(SUM( + CASE WHEN credit_debit = 'credit' THEN free_amount + WHEN credit_debit = 'debit' THEN -free_amount + ELSE 0 END + ), 0) as calculated_free_credits, + COALESCE(SUM( + CASE WHEN credit_debit = 'credit' THEN reward_amount + WHEN credit_debit = 'debit' THEN -reward_amount + ELSE 0 END + ), 0) as calculated_reward_credits, + COALESCE(SUM( + CASE WHEN credit_debit = 'credit' THEN permanent_amount + WHEN credit_debit = 'debit' THEN -permanent_amount + ELSE 0 END + ), 0) as calculated_credits + FROM credit_transactions + WHERE account_id = :account_id + """) + + result = await session.execute(calc_sql, {"account_id": account_id}) + row = result.fetchone() + + if row is None: + return Decimal("0"), Decimal("0"), Decimal("0") + + return ( + Decimal(str(row.calculated_free_credits)), + Decimal(str(row.calculated_reward_credits)), + Decimal(str(row.calculated_credits)), + ) + + +async def get_current_account_credits( + session: AsyncSession, account_id: str +) -> Tuple[Decimal, Decimal, Decimal]: + """Get current credit values from account table. + + Args: + session: Database session + account_id: Account ID + + Returns: + Tuple of (free_credits, reward_credits, credits) from account table + """ + query = text(""" + SELECT free_credits, reward_credits, credits + FROM credit_accounts + WHERE id = :account_id + """) + + result = await session.execute(query, {"account_id": account_id}) + row = result.fetchone() + + if row is None: + raise ValueError(f"Account {account_id} not found") + + return ( + Decimal(str(row.free_credits)), + Decimal(str(row.reward_credits)), + Decimal(str(row.credits)), + ) + + +async def update_account_credits( + session: AsyncSession, + account_id: str, + free_credits: Decimal, + reward_credits: Decimal, + credits: Decimal, +) -> None: + """Update account with new credit values. + + Args: + session: Database session + account_id: Account ID to update + free_credits: New free credits value + reward_credits: New reward credits value + credits: New credits value + """ + update_sql = text(""" + UPDATE credit_accounts + SET free_credits = :free_credits, + reward_credits = :reward_credits, + credits = :credits, + updated_at = NOW() + WHERE id = :account_id + """) + + await session.execute( + update_sql, + { + "account_id": account_id, + "free_credits": free_credits, + "reward_credits": reward_credits, + "credits": credits, + }, + ) + + +async def process_single_account( + account_id: str, dry_run: bool = False +) -> tuple[bool, bool]: + """Process a single account in its own session. + + Args: + account_id: Account ID to process + dry_run: If True, only check for changes without updating + + Returns: + Tuple of (success, changed) where: + - success: True if successful, False if there was a mismatch + - changed: True if values were different and needed updating, False if no change + """ + async with get_session() as session: + try: + # Get current values + ( + current_free, + current_reward, + current_permanent, + ) = await get_current_account_credits(session, account_id) + current_total = current_free + current_reward + current_permanent + + # Calculate from transactions + ( + calc_free, + calc_reward, + calc_permanent, + ) = await calculate_credits_from_transactions(session, account_id) + calc_total = calc_free + calc_reward + calc_permanent + + logger.info( + f"Account {account_id}: Current=({current_free}, {current_reward}, {current_permanent}) " + f"Total={current_total}, Calculated=({calc_free}, {calc_reward}, {calc_permanent}) " + f"Total={calc_total}" + ) + + # Check if totals match + if abs(current_total - calc_total) > Decimal("0.0001"): + logger.error( + f"MISMATCH for account {account_id}! " + f"Current total: {current_total}, Calculated total: {calc_total}, " + f"Difference: {current_total - calc_total}" + ) + logger.error( + f"Current: free={current_free}, reward={current_reward}, credits={current_permanent}" + ) + logger.error( + f"Calculated: free={calc_free}, reward={calc_reward}, credits={calc_permanent}" + ) + return False, False + + # Check if values have changed + values_changed = ( + current_free != calc_free + or current_reward != calc_reward + or current_permanent != calc_permanent + ) + + # Update account with calculated values if not dry run and values changed + if not dry_run and values_changed: + await update_account_credits( + session, account_id, calc_free, calc_reward, calc_permanent + ) + await session.commit() + logger.info(f"Successfully updated account {account_id}") + elif values_changed: + logger.info(f"Account {account_id} would be updated (dry run mode)") + else: + logger.debug(f"Account {account_id} values are already correct") + + return True, values_changed + + except Exception as e: + logger.error(f"Error processing account {account_id}: {e}") + await session.rollback() + raise + + +async def get_all_account_ids(session: AsyncSession) -> list[str]: + """Get all account IDs from the database. + + Args: + session: Database session + + Returns: + List of account IDs + """ + query = text("SELECT id FROM credit_accounts ORDER BY id") + result = await session.execute(query) + return [row.id for row in result.fetchall()] + + +async def main(dry_run: bool = False) -> None: + """ + Main migration function. + + Args: + dry_run: If True, only check for mismatches without updating + """ + await init_db(**config.db) + + async with get_session() as session: + # Create backup table + if not dry_run: + backup_table = await create_backup_table(session) + logger.info(f"Backup created: {backup_table}") + + # Get all account IDs + account_ids = await get_all_account_ids(session) + logger.info(f"Found {len(account_ids)} accounts to process") + + # Process each account in its own session + success_count = 0 + mismatch_count = 0 + unchanged_count = 0 + updated_count = 0 + + for i, account_id in enumerate(account_ids, 1): + logger.info(f"Processing account {i}/{len(account_ids)}: {account_id}") + + try: + success, changed = await process_single_account(account_id, dry_run) + if success: + success_count += 1 + if changed: + updated_count += 1 + else: + unchanged_count += 1 + else: + mismatch_count += 1 + if not dry_run: + logger.error( + "Stopping migration due to mismatch. Please investigate." + ) + break + except Exception as e: + logger.error(f"Failed to process account {account_id}: {e}") + mismatch_count += 1 + if not dry_run: + logger.error("Stopping migration due to error. Please investigate.") + break + + logger.info( + f"Migration {'check' if dry_run else 'process'} completed. " + f"Total processed: {success_count}, Unchanged: {unchanged_count}, " + f"Updated: {updated_count}, Mismatches/Errors: {mismatch_count}" + ) + + if mismatch_count > 0: + logger.error( + f"Found {mismatch_count} accounts with mismatches or errors. " + "Please investigate before proceeding." + ) + + +if __name__ == "__main__": + parser = argparse.ArgumentParser( + description="Fix credit account balances from transactions" + ) + parser.add_argument( + "--dry-run", + action="store_true", + help="Only check for mismatches without making changes", + ) + + args = parser.parse_args() + + asyncio.run(main(dry_run=args.dry_run)) diff --git a/scripts/fix_credit_event_consistency.py b/scripts/fix_credit_event_consistency.py new file mode 100644 index 00000000..a40e2bdb --- /dev/null +++ b/scripts/fix_credit_event_consistency.py @@ -0,0 +1,358 @@ +#!/usr/bin/env python3 +""" +Credit Event Consistency Fixer + +This script finds inconsistent credit events and recalculates the 12 detailed amount fields +using the same logic from the expense_skill function, then updates the database records. + +The 12 fields that will be recalculated and updated are: +- free_amount, reward_amount, permanent_amount +- fee_platform_free_amount, fee_platform_reward_amount, fee_platform_permanent_amount +- fee_dev_free_amount, fee_dev_reward_amount, fee_dev_permanent_amount +- fee_agent_free_amount, fee_agent_reward_amount, fee_agent_permanent_amount +""" + +import asyncio +import logging +from decimal import ROUND_HALF_UP, Decimal +from typing import Dict, List, Tuple + +from sqlalchemy import select, update +from sqlalchemy.ext.asyncio import AsyncSession + +from intentkit.config.config import config +from intentkit.models.credit import CreditEventTable +from intentkit.models.db import get_session, init_db + +logging.basicConfig(level=logging.INFO) +logger = logging.getLogger(__name__) + +# Define the precision for all decimal calculations (4 decimal places) +FOURPLACES = Decimal("0.0001") + + +def to_decimal(value) -> Decimal: + """Convert value to Decimal, handling None values.""" + if value is None: + return Decimal("0") + return Decimal(str(value)) + + +class CreditEventConsistencyFixer: + """Fixer for credit event consistency issues.""" + + def __init__(self): + self.total_records = 0 + self.inconsistent_records = 0 + self.fixed_records = 0 + self.failed_fixes = 0 + self.inconsistent_details: List[Dict] = [] + + def check_record_consistency( + self, record: CreditEventTable + ) -> Tuple[bool, List[str]]: + """Check if a single record is consistent. + + Returns: + Tuple of (is_consistent, list_of_errors) + """ + errors = [] + + # Convert all amounts to Decimal for precise calculation + total_amount = to_decimal(record.total_amount) + fee_platform_amount = to_decimal(record.fee_platform_amount) + fee_dev_amount = to_decimal(record.fee_dev_amount) + fee_agent_amount = to_decimal(record.fee_agent_amount) + + # Check detailed amounts for each fee type + platform_free = to_decimal(record.fee_platform_free_amount) + platform_reward = to_decimal(record.fee_platform_reward_amount) + platform_permanent = to_decimal(record.fee_platform_permanent_amount) + platform_sum = platform_free + platform_reward + platform_permanent + + dev_free = to_decimal(record.fee_dev_free_amount) + dev_reward = to_decimal(record.fee_dev_reward_amount) + dev_permanent = to_decimal(record.fee_dev_permanent_amount) + dev_sum = dev_free + dev_reward + dev_permanent + + agent_free = to_decimal(record.fee_agent_free_amount) + agent_reward = to_decimal(record.fee_agent_reward_amount) + agent_permanent = to_decimal(record.fee_agent_permanent_amount) + agent_sum = agent_free + agent_reward + agent_permanent + + # Check total amounts consistency + free_amount = to_decimal(record.free_amount) + reward_amount = to_decimal(record.reward_amount) + permanent_amount = to_decimal(record.permanent_amount) + total_sum = free_amount + reward_amount + permanent_amount + + # Check platform fee consistency + if platform_sum != fee_platform_amount: + errors.append( + f"Platform fee mismatch: {platform_free} + {platform_reward} + {platform_permanent} = {platform_sum} != {fee_platform_amount}" + ) + + # Check dev fee consistency + if dev_sum != fee_dev_amount: + errors.append( + f"Dev fee mismatch: {dev_free} + {dev_reward} + {dev_permanent} = {dev_sum} != {fee_dev_amount}" + ) + + # Check agent fee consistency + if agent_sum != fee_agent_amount: + errors.append( + f"Agent fee mismatch: {agent_free} + {agent_reward} + {agent_permanent} = {agent_sum} != {fee_agent_amount}" + ) + + # Check total amount consistency + if total_sum != total_amount: + errors.append( + f"Total amount mismatch: {free_amount} + {reward_amount} + {permanent_amount} = {total_sum} != {total_amount}" + ) + + return len(errors) == 0, errors + + def calculate_detailed_amounts( + self, record: CreditEventTable + ) -> Dict[str, Decimal]: + """Calculate the 12 detailed amount fields using the same logic as expense_skill. + + Returns: + Dictionary containing the calculated amounts + """ + # Get the total amounts from the record + total_amount = to_decimal(record.total_amount) + fee_platform_amount = to_decimal(record.fee_platform_amount) + fee_dev_amount = to_decimal(record.fee_dev_amount) + fee_agent_amount = to_decimal(record.fee_agent_amount) + + # Get the original credit type amounts + free_amount = to_decimal(record.free_amount) + reward_amount = to_decimal(record.reward_amount) + permanent_amount = to_decimal(record.permanent_amount) + + # Calculate fee_platform amounts by credit type + fee_platform_free_amount = Decimal("0") + fee_platform_reward_amount = Decimal("0") + fee_platform_permanent_amount = Decimal("0") + + if fee_platform_amount > Decimal("0") and total_amount > Decimal("0"): + # Calculate proportions based on the formula + if free_amount > Decimal("0"): + fee_platform_free_amount = ( + free_amount * fee_platform_amount / total_amount + ).quantize(FOURPLACES, rounding=ROUND_HALF_UP) + + if reward_amount > Decimal("0"): + fee_platform_reward_amount = ( + reward_amount * fee_platform_amount / total_amount + ).quantize(FOURPLACES, rounding=ROUND_HALF_UP) + + # Calculate permanent amount as the remainder to ensure the sum equals fee_platform_amount + fee_platform_permanent_amount = ( + fee_platform_amount + - fee_platform_free_amount + - fee_platform_reward_amount + ).quantize(FOURPLACES, rounding=ROUND_HALF_UP) + + # Calculate fee_agent amounts by credit type + fee_agent_free_amount = Decimal("0") + fee_agent_reward_amount = Decimal("0") + fee_agent_permanent_amount = Decimal("0") + + if fee_agent_amount > Decimal("0") and total_amount > Decimal("0"): + # Calculate proportions based on the formula + if free_amount > Decimal("0"): + fee_agent_free_amount = ( + free_amount * fee_agent_amount / total_amount + ).quantize(FOURPLACES, rounding=ROUND_HALF_UP) + + if reward_amount > Decimal("0"): + fee_agent_reward_amount = ( + reward_amount * fee_agent_amount / total_amount + ).quantize(FOURPLACES, rounding=ROUND_HALF_UP) + + # Calculate permanent amount as the remainder to ensure the sum equals fee_agent_amount + fee_agent_permanent_amount = ( + fee_agent_amount - fee_agent_free_amount - fee_agent_reward_amount + ).quantize(FOURPLACES, rounding=ROUND_HALF_UP) + + # Calculate fee_dev amounts by credit type + fee_dev_free_amount = Decimal("0") + fee_dev_reward_amount = Decimal("0") + fee_dev_permanent_amount = Decimal("0") + + if fee_dev_amount > Decimal("0") and total_amount > Decimal("0"): + # Calculate proportions based on the formula + if free_amount > Decimal("0"): + fee_dev_free_amount = ( + free_amount * fee_dev_amount / total_amount + ).quantize(FOURPLACES, rounding=ROUND_HALF_UP) + + if reward_amount > Decimal("0"): + fee_dev_reward_amount = ( + reward_amount * fee_dev_amount / total_amount + ).quantize(FOURPLACES, rounding=ROUND_HALF_UP) + + # Calculate permanent amount as the remainder to ensure the sum equals fee_dev_amount + fee_dev_permanent_amount = ( + fee_dev_amount - fee_dev_free_amount - fee_dev_reward_amount + ).quantize(FOURPLACES, rounding=ROUND_HALF_UP) + + return { + "free_amount": free_amount, + "reward_amount": reward_amount, + "permanent_amount": permanent_amount, + "fee_platform_free_amount": fee_platform_free_amount, + "fee_platform_reward_amount": fee_platform_reward_amount, + "fee_platform_permanent_amount": fee_platform_permanent_amount, + "fee_dev_free_amount": fee_dev_free_amount, + "fee_dev_reward_amount": fee_dev_reward_amount, + "fee_dev_permanent_amount": fee_dev_permanent_amount, + "fee_agent_free_amount": fee_agent_free_amount, + "fee_agent_reward_amount": fee_agent_reward_amount, + "fee_agent_permanent_amount": fee_agent_permanent_amount, + } + + async def fix_inconsistent_record( + self, session: AsyncSession, record: CreditEventTable + ) -> bool: + """Fix a single inconsistent record by recalculating and updating the detailed amounts. + + Returns: + True if the record was successfully fixed, False otherwise + """ + try: + # Calculate the correct detailed amounts + calculated_amounts = self.calculate_detailed_amounts(record) + + # Update the record with the calculated amounts + stmt = ( + update(CreditEventTable) + .where(CreditEventTable.id == record.id) + .values(**calculated_amounts) + ) + await session.execute(stmt) + + return True + + except Exception as e: + logger.error(f"Failed to fix record {record.id}: {str(e)}") + return False + + async def find_and_fix_inconsistent_records(self, session: AsyncSession): + """Find all inconsistent records and fix them.""" + # Query all credit event records + stmt = select(CreditEventTable).order_by(CreditEventTable.created_at) + result = await session.execute(stmt) + records = result.scalars().all() + + self.total_records = len(records) + logger.info(f"Total records to check: {self.total_records}") + + batch_size = 100 + for i in range(0, len(records), batch_size): + batch = records[i : i + batch_size] + logger.info( + f"Processing batch {i // batch_size + 1}, records {i + 1}-{min(i + batch_size, len(records))}" + ) + + batch_fixed_count = 0 + batch_failed_count = 0 + + for record in batch: + is_consistent, errors = self.check_record_consistency(record) + + if not is_consistent: + self.inconsistent_records += 1 + self.inconsistent_details.append( + { + "id": record.id, + "user_id": record.user_id, + "skill_name": record.skill_name, + "total_amount": record.total_amount, + "errors": errors, + } + ) + + # Try to fix the record + if await self.fix_inconsistent_record(session, record): + self.fixed_records += 1 + batch_fixed_count += 1 + else: + self.failed_fixes += 1 + batch_failed_count += 1 + + if batch_fixed_count > 0 or batch_failed_count > 0: + logger.info( + f"Batch {i // batch_size + 1} completed: {batch_fixed_count} fixed, {batch_failed_count} failed" + ) + + # Commit all changes + await session.commit() + logger.info("All fixes committed to database.") + + def print_summary(self): + """Print a summary of the fixing process.""" + print("\n" + "=" * 60) + print("CREDIT EVENT CONSISTENCY FIXER SUMMARY") + print("=" * 60) + print(f"Total records checked: {self.total_records}") + print(f"Inconsistent records found: {self.inconsistent_records}") + print(f"Records successfully fixed: {self.fixed_records}") + print(f"Records failed to fix: {self.failed_fixes}") + if self.total_records > 0: + consistency_rate = ( + (self.total_records - self.inconsistent_records) + / self.total_records + * 100 + ) + print(f"Original consistency rate: {consistency_rate:.2f}%") + final_consistency_rate = ( + (self.total_records - self.failed_fixes) / self.total_records * 100 + ) + print(f"Final consistency rate: {final_consistency_rate:.2f}%") + print("=" * 60) + + # Show details of failed fixes if any + if self.failed_fixes > 0: + print("\n" + "-" * 40) + print("FAILED TO FIX RECORDS") + print("-" * 40) + failed_count = 0 + for detail in self.inconsistent_details: + if failed_count >= self.failed_fixes: + break + print(f"Record ID: {detail['id']}") + print(f"User ID: {detail['user_id']}") + print(f"Skill: {detail['skill_name']}") + print(f"Total Amount: {detail['total_amount']}") + print("Errors:") + for error in detail["errors"]: + print(f" - {error}") + print("-" * 20) + failed_count += 1 + + +async def main(): + """Main function to run the consistency fixer.""" + logger.info("Starting CreditEvent consistency fixer...") + + # Initialize database connection + await init_db(**config.db) + + # Create fixer instance + fixer = CreditEventConsistencyFixer() + + # Run the fixing process + async with get_session() as session: + logger.info("Starting credit event consistency fixing...") + await fixer.find_and_fix_inconsistent_records(session) + + # Print summary + fixer.print_summary() + logger.info("Consistency fixing completed.") + + +if __name__ == "__main__": + asyncio.run(main()) diff --git a/scripts/fix_credit_event_consistency_optimized.py b/scripts/fix_credit_event_consistency_optimized.py new file mode 100644 index 00000000..e7ccb014 --- /dev/null +++ b/scripts/fix_credit_event_consistency_optimized.py @@ -0,0 +1,499 @@ +#!/usr/bin/env python3 +""" +Optimized Credit Event Consistency Fixer + +This is an optimized version of the credit event consistency fixer that addresses +performance bottlenecks in the original script: + +1. Uses streaming/pagination instead of loading all records into memory +2. Implements batch updates for better database performance +3. Uses smaller transaction scopes to avoid long-running transactions +4. Adds concurrent processing for CPU-intensive calculations +5. Optimizes database queries with proper indexing hints + +The 12 fields that will be recalculated and updated are: +- free_amount, reward_amount, permanent_amount +- fee_platform_free_amount, fee_platform_reward_amount, fee_platform_permanent_amount +- fee_dev_free_amount, fee_dev_reward_amount, fee_dev_permanent_amount +- fee_agent_free_amount, fee_agent_reward_amount, fee_agent_permanent_amount +""" + +import asyncio +import logging +import time +from concurrent.futures import ThreadPoolExecutor +from decimal import ROUND_HALF_UP, Decimal +from typing import Dict, List, Optional, Tuple + +from sqlalchemy import select, update +from sqlalchemy.ext.asyncio import AsyncSession + +from intentkit.config.config import config +from intentkit.models.credit import CreditEventTable +from intentkit.models.db import get_session, init_db + +logging.basicConfig(level=logging.INFO) +logger = logging.getLogger(__name__) + +# Define the precision for all decimal calculations (4 decimal places) +FOURPLACES = Decimal("0.0001") + +# Configuration for optimization +PAGE_SIZE = 1000 # Larger page size for streaming +BATCH_UPDATE_SIZE = 50 # Batch size for database updates +MAX_WORKERS = 4 # Number of threads for concurrent processing +COMMIT_INTERVAL = 10 # Commit every N batches + + +def to_decimal(value) -> Decimal: + """Convert value to Decimal, handling None values.""" + if value is None: + return Decimal("0") + return Decimal(str(value)) + + +class OptimizedCreditEventConsistencyFixer: + """Optimized fixer for credit event consistency issues.""" + + def __init__(self): + self.total_records = 0 + self.inconsistent_records = 0 + self.fixed_records = 0 + self.failed_fixes = 0 + self.processed_batches = 0 + self.start_time = time.time() + self.executor = ThreadPoolExecutor(max_workers=MAX_WORKERS) + + def check_record_consistency( + self, record: CreditEventTable + ) -> Tuple[bool, List[str]]: + """Check if a single record is consistent. + + Returns: + Tuple of (is_consistent, list_of_errors) + """ + errors = [] + + # Convert all amounts to Decimal for precise calculation + total_amount = to_decimal(record.total_amount) + fee_platform_amount = to_decimal(record.fee_platform_amount) + fee_dev_amount = to_decimal(record.fee_dev_amount) + fee_agent_amount = to_decimal(record.fee_agent_amount) + + # Check detailed amounts for each fee type + platform_free = to_decimal(record.fee_platform_free_amount) + platform_reward = to_decimal(record.fee_platform_reward_amount) + platform_permanent = to_decimal(record.fee_platform_permanent_amount) + platform_sum = platform_free + platform_reward + platform_permanent + + dev_free = to_decimal(record.fee_dev_free_amount) + dev_reward = to_decimal(record.fee_dev_reward_amount) + dev_permanent = to_decimal(record.fee_dev_permanent_amount) + dev_sum = dev_free + dev_reward + dev_permanent + + agent_free = to_decimal(record.fee_agent_free_amount) + agent_reward = to_decimal(record.fee_agent_reward_amount) + agent_permanent = to_decimal(record.fee_agent_permanent_amount) + agent_sum = agent_free + agent_reward + agent_permanent + + # Check total amounts consistency + free_amount = to_decimal(record.free_amount) + reward_amount = to_decimal(record.reward_amount) + permanent_amount = to_decimal(record.permanent_amount) + total_sum = free_amount + reward_amount + permanent_amount + + # Check platform fee consistency + if platform_sum != fee_platform_amount: + errors.append( + f"Platform fee mismatch: {platform_free} + {platform_reward} + {platform_permanent} = {platform_sum} != {fee_platform_amount}" + ) + + # Check dev fee consistency + if dev_sum != fee_dev_amount: + errors.append( + f"Dev fee mismatch: {dev_free} + {dev_reward} + {dev_permanent} = {dev_sum} != {fee_dev_amount}" + ) + + # Check agent fee consistency + if agent_sum != fee_agent_amount: + errors.append( + f"Agent fee mismatch: {agent_free} + {agent_reward} + {agent_permanent} = {agent_sum} != {fee_agent_amount}" + ) + + # Check total amount consistency + if total_sum != total_amount: + errors.append( + f"Total amount mismatch: {free_amount} + {reward_amount} + {permanent_amount} = {total_sum} != {total_amount}" + ) + + return len(errors) == 0, errors + + def calculate_detailed_amounts( + self, record: CreditEventTable + ) -> Dict[str, Decimal]: + """Calculate the 12 detailed amount fields using the same logic as expense_skill. + + Returns: + Dictionary containing the calculated amounts + """ + # Get the total amounts from the record + total_amount = to_decimal(record.total_amount) + fee_platform_amount = to_decimal(record.fee_platform_amount) + fee_dev_amount = to_decimal(record.fee_dev_amount) + fee_agent_amount = to_decimal(record.fee_agent_amount) + + # Get the original credit type amounts + free_amount = to_decimal(record.free_amount) + reward_amount = to_decimal(record.reward_amount) + permanent_amount = to_decimal(record.permanent_amount) + + # Special handling for records where credit type amounts are 0 + # but total_amount is non-zero - distribute total_amount based on credit_type + if ( + total_amount > Decimal("0") + and free_amount == Decimal("0") + and reward_amount == Decimal("0") + and permanent_amount == Decimal("0") + ): + # Determine which credit type to use for distribution + credit_type = None + if hasattr(record, "credit_type") and record.credit_type: + credit_type = record.credit_type + elif ( + hasattr(record, "credit_types") + and record.credit_types + and len(record.credit_types) > 0 + ): + credit_type = record.credit_types[0] + + # Distribute total_amount to the appropriate credit type field using CreditType enum values + if credit_type == "free_credits": # CreditType.FREE + free_amount = total_amount + elif credit_type == "reward_credits": # CreditType.REWARD + reward_amount = total_amount + elif credit_type == "credits": # CreditType.PERMANENT + permanent_amount = total_amount + else: + raise ValueError( + f"Unknown or missing credit_type: {credit_type} for record {record.id} with total_amount > 0 but all credit fields are 0" + ) + + # Calculate fee_platform amounts by credit type + fee_platform_free_amount = Decimal("0") + fee_platform_reward_amount = Decimal("0") + fee_platform_permanent_amount = Decimal("0") + + if fee_platform_amount > Decimal("0") and total_amount > Decimal("0"): + # Calculate proportions based on the formula + if free_amount > Decimal("0"): + fee_platform_free_amount = ( + free_amount * fee_platform_amount / total_amount + ).quantize(FOURPLACES, rounding=ROUND_HALF_UP) + + if reward_amount > Decimal("0"): + fee_platform_reward_amount = ( + reward_amount * fee_platform_amount / total_amount + ).quantize(FOURPLACES, rounding=ROUND_HALF_UP) + + # Calculate permanent amount as the remainder to ensure the sum equals fee_platform_amount + fee_platform_permanent_amount = ( + fee_platform_amount + - fee_platform_free_amount + - fee_platform_reward_amount + ).quantize(FOURPLACES, rounding=ROUND_HALF_UP) + + # Calculate fee_agent amounts by credit type + fee_agent_free_amount = Decimal("0") + fee_agent_reward_amount = Decimal("0") + fee_agent_permanent_amount = Decimal("0") + + if fee_agent_amount > Decimal("0") and total_amount > Decimal("0"): + # Calculate proportions based on the formula + if free_amount > Decimal("0"): + fee_agent_free_amount = ( + free_amount * fee_agent_amount / total_amount + ).quantize(FOURPLACES, rounding=ROUND_HALF_UP) + + if reward_amount > Decimal("0"): + fee_agent_reward_amount = ( + reward_amount * fee_agent_amount / total_amount + ).quantize(FOURPLACES, rounding=ROUND_HALF_UP) + + # Calculate permanent amount as the remainder to ensure the sum equals fee_agent_amount + fee_agent_permanent_amount = ( + fee_agent_amount - fee_agent_free_amount - fee_agent_reward_amount + ).quantize(FOURPLACES, rounding=ROUND_HALF_UP) + + # Calculate fee_dev amounts by credit type + fee_dev_free_amount = Decimal("0") + fee_dev_reward_amount = Decimal("0") + fee_dev_permanent_amount = Decimal("0") + + if fee_dev_amount > Decimal("0") and total_amount > Decimal("0"): + # Calculate proportions based on the formula + if free_amount > Decimal("0"): + fee_dev_free_amount = ( + free_amount * fee_dev_amount / total_amount + ).quantize(FOURPLACES, rounding=ROUND_HALF_UP) + + if reward_amount > Decimal("0"): + fee_dev_reward_amount = ( + reward_amount * fee_dev_amount / total_amount + ).quantize(FOURPLACES, rounding=ROUND_HALF_UP) + + # Calculate permanent amount as the remainder to ensure the sum equals fee_dev_amount + fee_dev_permanent_amount = ( + fee_dev_amount - fee_dev_free_amount - fee_dev_reward_amount + ).quantize(FOURPLACES, rounding=ROUND_HALF_UP) + + return { + "free_amount": free_amount, + "reward_amount": reward_amount, + "permanent_amount": permanent_amount, + "fee_platform_free_amount": fee_platform_free_amount, + "fee_platform_reward_amount": fee_platform_reward_amount, + "fee_platform_permanent_amount": fee_platform_permanent_amount, + "fee_dev_free_amount": fee_dev_free_amount, + "fee_dev_reward_amount": fee_dev_reward_amount, + "fee_dev_permanent_amount": fee_dev_permanent_amount, + "fee_agent_free_amount": fee_agent_free_amount, + "fee_agent_reward_amount": fee_agent_reward_amount, + "fee_agent_permanent_amount": fee_agent_permanent_amount, + } + + async def process_records_batch( + self, session: AsyncSession, records: List[CreditEventTable] + ) -> Tuple[List[Dict], int, int]: + """Process a batch of records and return updates to be applied. + + Returns: + Tuple of (updates_list, fixed_count, failed_count) + """ + updates = [] + fixed_count = 0 + failed_count = 0 + + # Use thread pool for CPU-intensive consistency checking and calculations + loop = asyncio.get_event_loop() + + # Process records concurrently + tasks = [] + for record in records: + task = loop.run_in_executor( + self.executor, self._process_single_record, record + ) + tasks.append(task) + + results = await asyncio.gather(*tasks, return_exceptions=True) + + for i, result in enumerate(results): + if isinstance(result, Exception): + logger.error(f"Failed to process record {records[i].id}: {result}") + failed_count += 1 + elif result is not None: + updates.append({"id": records[i].id, **result}) + fixed_count += 1 + + return updates, fixed_count, failed_count + + def _process_single_record(self, record: CreditEventTable) -> Optional[Dict]: + """Process a single record (CPU-intensive part). + + Returns: + Dictionary of updates if record needs fixing, None if consistent + """ + try: + is_consistent, _ = self.check_record_consistency(record) + if not is_consistent: + return self.calculate_detailed_amounts(record) + return None + except Exception as e: + raise Exception(f"Error processing record {record.id}: {str(e)}") + + async def batch_update_records( + self, session: AsyncSession, updates: List[Dict] + ) -> Tuple[int, int]: + """Apply batch updates to the database. + + Returns: + Tuple of (successful_updates, failed_updates) + """ + successful = 0 + failed = 0 + + # Process updates in smaller batches to avoid large transactions + for i in range(0, len(updates), BATCH_UPDATE_SIZE): + batch_updates = updates[i : i + BATCH_UPDATE_SIZE] + + try: + # Use bulk update for better performance + for update_data in batch_updates: + record_id = update_data.pop("id") + stmt = ( + update(CreditEventTable) + .where(CreditEventTable.id == record_id) + .values(**update_data) + ) + await session.execute(stmt) + + successful += len(batch_updates) + + except Exception as e: + logger.error(f"Failed to update batch: {str(e)}") + failed += len(batch_updates) + + return successful, failed + + async def stream_records(self, session: AsyncSession, last_id: str, limit: int): + """Stream records using cursor-based pagination to avoid batch drift.""" + stmt = ( + select(CreditEventTable) + .where(CreditEventTable.id > last_id if last_id else True) + .order_by(CreditEventTable.id) + .limit(limit) + ) + result = await session.execute(stmt) + return result.scalars().all() + + async def find_and_fix_inconsistent_records(self, session: AsyncSession): + """Find all inconsistent records and fix them using optimized approach with cursor-based pagination.""" + logger.info( + "Starting credit event consistency fixing with cursor-based pagination..." + ) + + last_id = "" + batch_number = 1 + pending_updates = [] + + while True: + # Stream records using cursor-based pagination + records = await self.stream_records(session, last_id, PAGE_SIZE) + + if not records: + break + + logger.info( + f"Processing batch {batch_number}, records starting from ID {records[0].id}" + ) + + # Update cursor to the last processed record's ID + last_id = records[-1].id + self.total_records += len(records) + + # Process batch concurrently + updates, fixed_count, failed_count = await self.process_records_batch( + session, records + ) + + # Accumulate updates + pending_updates.extend(updates) + self.inconsistent_records += len(updates) + failed_count + self.failed_fixes += failed_count + + # Apply updates in batches and commit periodically + if ( + len(pending_updates) >= BATCH_UPDATE_SIZE + or batch_number % COMMIT_INTERVAL == 0 + ): + if pending_updates: + successful, failed = await self.batch_update_records( + session, pending_updates + ) + self.fixed_records += successful + self.failed_fixes += failed + + # Commit periodically to avoid long transactions + await session.commit() + logger.info(f"Committed {successful} updates, {failed} failed") + + pending_updates = [] + + if fixed_count > 0 or failed_count > 0: + logger.info( + f"Batch {batch_number} completed: {fixed_count} to fix, {failed_count} failed" + ) + + batch_number += 1 + self.processed_batches += 1 + + # Apply any remaining updates + if pending_updates: + successful, failed = await self.batch_update_records( + session, pending_updates + ) + self.fixed_records += successful + self.failed_fixes += failed + await session.commit() + logger.info(f"Final commit: {successful} updates, {failed} failed") + + logger.info("All fixes committed to database.") + + def print_summary(self): + """Print a summary of the fixing process.""" + elapsed_time = time.time() - self.start_time + + print("\n" + "=" * 60) + print("OPTIMIZED CREDIT EVENT CONSISTENCY FIXER SUMMARY") + print("=" * 60) + print(f"Total records checked: {self.total_records}") + print(f"Inconsistent records found: {self.inconsistent_records}") + print(f"Records successfully fixed: {self.fixed_records}") + print(f"Records failed to fix: {self.failed_fixes}") + print(f"Processed batches: {self.processed_batches}") + print(f"Total processing time: {elapsed_time:.2f} seconds") + + if self.total_records > 0: + consistency_rate = ( + (self.total_records - self.inconsistent_records) + / self.total_records + * 100 + ) + print(f"Original consistency rate: {consistency_rate:.2f}%") + final_consistency_rate = ( + (self.total_records - self.failed_fixes) / self.total_records * 100 + ) + print(f"Final consistency rate: {final_consistency_rate:.2f}%") + + records_per_second = ( + self.total_records / elapsed_time if elapsed_time > 0 else 0 + ) + print(f"Processing rate: {records_per_second:.2f} records/second") + + print("=" * 60) + + def __del__(self): + """Cleanup thread pool executor.""" + if hasattr(self, "executor"): + self.executor.shutdown(wait=True) + + +async def main(): + """Main function to run the optimized consistency fixer.""" + logger.info("Starting Optimized CreditEvent consistency fixer...") + + # Initialize database connection + await init_db(**config.db) + + # Create fixer instance + fixer = OptimizedCreditEventConsistencyFixer() + + try: + # Run the fixing process + async with get_session() as session: + logger.info("Starting credit event consistency fixing...") + await fixer.find_and_fix_inconsistent_records(session) + + # Print summary + fixer.print_summary() + logger.info("Consistency fixing completed.") + + except Exception as e: + logger.error(f"Error during processing: {str(e)}") + raise + finally: + # Ensure cleanup + del fixer + + +if __name__ == "__main__": + asyncio.run(main()) diff --git a/scripts/fix_credit_precision.py b/scripts/fix_credit_precision.py new file mode 100755 index 00000000..3a43dc9d --- /dev/null +++ b/scripts/fix_credit_precision.py @@ -0,0 +1,153 @@ +#!/usr/bin/env python + +""" +Data repair script to fix precision issues in credit data. + +This script finds all message events in CreditEventTable and checks if the total_amount +is correctly calculated as the sum of base_amount, fee_platform_amount, and fee_agent_amount. +If there's a discrepancy (usually by 0.0001 due to precision issues), it fixes: +1. The total_amount and balance_after in CreditEventTable +2. The change_amount in the corresponding CreditTransactionTable record +3. The user's account balance in CreditAccountTable +""" + +import asyncio +import logging +from decimal import ROUND_HALF_UP, Decimal + +from sqlalchemy import select + +from intentkit.config.config import config +from intentkit.models.credit import ( + CreditAccountTable, + CreditEventTable, + CreditTransactionTable, + CreditType, + EventType, + TransactionType, +) +from intentkit.models.db import get_session, init_db + +# Configure logging +logging.basicConfig( + level=logging.INFO, + format="%(asctime)s - %(name)s - %(levelname)s - %(message)s", +) +logger = logging.getLogger(__name__) + +# Define the precision for all decimal calculations (4 decimal places) +FOURPLACES = Decimal("0.0001") + + +async def fix_credit_precision(): + """Fix precision issues in credit data for message events.""" + logger.info("Starting credit precision fix script") + + fixed_count = 0 + async with get_session() as session: + # Find all message events + stmt = select(CreditEventTable).where( + CreditEventTable.event_type == EventType.MESSAGE + ) + result = await session.execute(stmt) + events = result.scalars().all() + + logger.info(f"Found {len(events)} message events to check") + + for event in events: + # Calculate what the total should be + base_amount = event.base_amount + fee_platform_amount = event.fee_platform_amount or Decimal("0") + fee_agent_amount = event.fee_agent_amount or Decimal("0") + + # Calculate the correct total with 4 decimal places + correct_total = ( + base_amount + fee_platform_amount + fee_agent_amount + ).quantize(FOURPLACES, rounding=ROUND_HALF_UP) + + # Check if there's a discrepancy + if event.total_amount != correct_total: + # Calculate the difference + difference = correct_total - event.total_amount + + logger.info( + f"Fixing event {event.id}: Current total={event.total_amount}, " + f"Correct total={correct_total}, Difference={difference}" + ) + + # 1. Update the event's total_amount and balance_after + event.total_amount = correct_total + if event.balance_after is not None: + event.balance_after = (event.balance_after - difference).quantize( + FOURPLACES, rounding=ROUND_HALF_UP + ) + + # 2. Find and update the corresponding transaction + tx_stmt = select(CreditTransactionTable).where( + CreditTransactionTable.event_id == event.id, + CreditTransactionTable.tx_type == TransactionType.PAY, + ) + tx_result = await session.execute(tx_stmt) + transaction = tx_result.scalar_one_or_none() + + if transaction: + transaction.change_amount = correct_total + logger.info( + f"Updated transaction {transaction.id} change_amount to {correct_total}" + ) + else: + logger.warning(f"No PAY transaction found for event {event.id}") + + # 3. Update the user's account + # If the correct total is higher than the original, we need to subtract from the account + # If the correct total is lower than the original, we need to add to the account + account_stmt = select(CreditAccountTable).where( + CreditAccountTable.id == event.account_id + ) + account_result = await session.execute(account_stmt) + account = account_result.scalar_one_or_none() + + if account: + # If the event's total increased, the account balance should decrease + # If the event's total decreased, the account balance should increase + if event.credit_type == CreditType.PERMANENT: + account.credits = (account.credits - difference).quantize( + FOURPLACES, rounding=ROUND_HALF_UP + ) + elif event.credit_type == CreditType.FREE: + account.free_credits = ( + account.free_credits - difference + ).quantize(FOURPLACES, rounding=ROUND_HALF_UP) + elif event.credit_type == CreditType.REWARD: + account.reward_credits = ( + account.reward_credits - difference + ).quantize(FOURPLACES, rounding=ROUND_HALF_UP) + + logger.info( + f"Updated account {account.id} balance: " + f"credits={account.credits}, " + f"free_credits={account.free_credits}, " + f"reward_credits={account.reward_credits}" + ) + else: + logger.warning(f"No account found with ID {event.account_id}") + + fixed_count += 1 + + # Commit all changes + await session.commit() + + logger.info(f"Fixed {fixed_count} events with precision issues") + + +async def main(): + """Main entry point for the script.""" + # Initialize the database connection + await init_db(**config.db) + + # Run the fix credit precision function + await fix_credit_precision() + + +if __name__ == "__main__": + asyncio.run(main()) diff --git a/scripts/fix_invalid_wallets.py b/scripts/fix_invalid_wallets.py new file mode 100644 index 00000000..4b97ffff --- /dev/null +++ b/scripts/fix_invalid_wallets.py @@ -0,0 +1,217 @@ +#!/usr/bin/env python3 +""" +Script to fix agents with invalid CDP wallet addresses. + +This script: +1. Finds agents with cdp_wallet_address that don't exist in CDP +2. Clears their wallet data so they can create new wallets on-demand +3. Provides detailed reporting on what was fixed + +Usage: + python scripts/fix_invalid_wallets.py [--dry-run] [--agent-id AGENT_ID] +""" + +import asyncio +import json +import logging +import os +import sys +from typing import List, Optional + +# Add the parent directory to the path to import intentkit modules +sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) + +from coinbase_agentkit import ( + CdpEvmServerWalletProvider, + CdpEvmServerWalletProviderConfig, +) +from sqlalchemy import select, update + +from intentkit.config.config import config +from intentkit.models.agent_data import AgentDataTable +from intentkit.models.db import get_session, init_db + +# Set up logging +logging.basicConfig(level=logging.INFO) +logger = logging.getLogger(__name__) + + +class WalletFixer: + """Fixes agents with invalid CDP wallet addresses.""" + + def __init__(self, dry_run: bool = False): + self.dry_run = dry_run + self.stats = { + "total_agents": 0, + "agents_with_addresses": 0, + "invalid_addresses": 0, + "fixed_agents": 0, + "failed_fixes": 0, + } + + async def check_wallet_exists( + self, address: str, network_id: str = "base-mainnet" + ) -> bool: + """Check if a wallet address can be initialized with CDP.""" + try: + # Try to initialize the wallet provider with the address + wallet_config = CdpEvmServerWalletProviderConfig( + api_key_id=config.cdp_api_key_id, + api_key_secret=config.cdp_api_key_secret, + network_id=network_id, + address=address, + wallet_secret=config.cdp_wallet_secret, + ) + + # Try to create the wallet provider - this will fail if address doesn't exist + CdpEvmServerWalletProvider(wallet_config) # Just try to initialize it + return True + + except Exception as e: + error_msg = str(e).lower() + if "not found" in error_msg or "404" in error_msg: + return False + else: + # For other errors, assume it exists to be safe + return True + + async def find_agents_with_invalid_wallets( + self, agent_id: Optional[str] = None + ) -> List[dict]: + """Find agents with CDP wallet addresses that don't exist.""" + invalid_agents = [] + + async with get_session() as session: + if agent_id: + # Check specific agent + result = await session.execute( + select(AgentDataTable).where(AgentDataTable.id == agent_id) + ) + agents = result.scalars().all() + else: + # Check all agents + result = await session.execute(select(AgentDataTable)) + agents = result.scalars().all() + + for agent in agents: + self.stats["total_agents"] += 1 + + # Extract wallet address from wallet data + wallet_address = None + if agent.cdp_wallet_data: + try: + wallet_data = json.loads(agent.cdp_wallet_data) + wallet_address = wallet_data.get("default_address_id") + except (json.JSONDecodeError, AttributeError): + pass + + if not wallet_address: + continue + + self.stats["agents_with_addresses"] += 1 + + # Check if wallet exists in CDP + exists = await self.check_wallet_exists(wallet_address) + + if not exists: + logger.info(f"Found invalid wallet: {agent.id} -> {wallet_address}") + self.stats["invalid_addresses"] += 1 + invalid_agents.append( + { + "id": agent.id, + "cdp_wallet_address": wallet_address, + "cdp_wallet_data": agent.cdp_wallet_data, + "created_at": agent.created_at, + } + ) + + return invalid_agents + + async def fix_agent_wallet(self, agent_info: dict) -> bool: + """Fix a single agent by clearing invalid wallet data.""" + agent_id = agent_info["id"] + + try: + if not self.dry_run: + async with get_session() as session: + # Clear the invalid wallet data + await session.execute( + update(AgentDataTable) + .where(AgentDataTable.id == agent_id) + .values(cdp_wallet_data=None) + ) + await session.commit() + + logger.info(f"Fixed: {agent_id}") + else: + logger.info(f"[DRY RUN] Would fix: {agent_id}") + + return True + + except Exception as e: + logger.error(f"ERROR fixing {agent_id}: {e}") + return False + + async def run_fix(self, agent_id: Optional[str] = None): + """Run the wallet fix process.""" + + # Find agents with invalid wallets + invalid_agents = await self.find_agents_with_invalid_wallets(agent_id) + + if not invalid_agents: + logger.info("No agents with invalid wallet addresses found") + return + + # Fix each agent + for agent_info in invalid_agents: + success = await self.fix_agent_wallet(agent_info) + + if success: + self.stats["fixed_agents"] += 1 + else: + self.stats["failed_fixes"] += 1 + + self.print_summary() + + def print_summary(self): + """Log fix statistics.""" + + logger.info( + f"Summary: {self.stats['invalid_addresses']} invalid addresses found, {self.stats['fixed_agents']} fixed" + ) + if self.dry_run: + logger.info("*** DRY RUN - NO CHANGES MADE ***") + + +async def main(): + """Main entry point.""" + import argparse + + parser = argparse.ArgumentParser( + description="Fix agents with invalid CDP wallet addresses" + ) + parser.add_argument( + "--dry-run", + action="store_true", + help="Show what would be fixed without making changes", + ) + parser.add_argument("--agent-id", type=str, help="Fix only a specific agent by ID") + + args = parser.parse_args() + + # Initialize database connection + await init_db( + host=config.db.get("host"), + username=config.db.get("username"), + password=config.db.get("password"), + dbname=config.db.get("dbname"), + port=config.db.get("port", "5432"), + auto_migrate=False, + ) + + fixer = WalletFixer(dry_run=args.dry_run) + await fixer.run_fix(agent_id=args.agent_id) + + +if __name__ == "__main__": + asyncio.run(main()) diff --git a/scripts/generate_skill_csv.py b/scripts/generate_skill_csv.py new file mode 100644 index 00000000..e5e12454 --- /dev/null +++ b/scripts/generate_skill_csv.py @@ -0,0 +1,200 @@ +#!/usr/bin/env python3 +""" +Generate initial_skills.csv for SkillTable initialization. +""" + +import csv +import re +import sys +from pathlib import Path + +sys.path.insert(0, str(Path(__file__).parent.parent)) + +# Paths +PROJECT_ROOT = Path(__file__).resolve().parent.parent +SKILLS_DIR = PROJECT_ROOT / "skills" +CDP_INIT = SKILLS_DIR / "cdp" / "__init__.py" +CSV_PATH = PROJECT_ROOT / "skills.csv" + +# Categories with platform-provided keys +PLATFORM_KEY_CATEGORIES = [ + "acolyt", + "allora", + "elfa", + "heurist", + "enso", + "dapplooker", + "twitter", + "cdp", +] + +# Default values for fields (excluding created_at, updated_at) +DEFAULTS = { + "price_tier": 1, + "price_tier_self_key": 1, + "rate_limit_count": "", + "rate_limit_minutes": "", + "key_provider_agent_owner": False, + "key_provider_platform": False, + "key_provider_free": False, + "author": "", +} + + +def _get_skill_rows(): + """Collect skill name and category pairs.""" + rows = [] + found_skills = 0 + processed_skills = set() # Avoid duplicates + + # Process non-cdp categories + for category_dir in SKILLS_DIR.iterdir(): + if ( + not category_dir.is_dir() + or category_dir.name.startswith("__") + or category_dir.name == "cdp" # cdp is handled separately + ): + continue + + category = category_dir.name + print(f"Processing category: {category}") + + # Process each Python file in the category directory + for file in category_dir.glob("*.py"): + if file.name.startswith("__"): + continue + + # Read the file content to extract skill names directly from the source code + try: + content = file.read_text() + + # Look for class definitions that might be skills + class_pattern = re.compile(r"class\s+([^\(\s]+)[^\n]*:") + class_matches = class_pattern.finditer(content) + + for match in class_matches: + class_name = match.group(1) + + # Skip base classes + if class_name.endswith("BaseTool"): + continue + + # Find the class content - everything after the class definition until the next class or end of file + class_start = match.start() + next_class = class_pattern.search(content, match.end()) + if next_class: + class_end = next_class.start() + class_content = content[class_start:class_end] + else: + class_content = content[class_start:] + + # Look for name attribute in the class definition + # Pattern: name: str = "skill_name" + name_pattern = re.compile( + r"\s+name\s*:\s*str\s*=\s*[\'\"]([^\'\"]*)[\'\"](\s|$)" + ) + name_match = name_pattern.search(class_content) + if name_match: + skill_name = name_match.group(1) + + # Skip if we've already processed this skill name + if skill_name in processed_skills: + continue + + # Add to our results + print( + f" Found skill: {class_name} with name='{skill_name}', category={category}" + ) + rows.append((skill_name, category)) + processed_skills.add(skill_name) + found_skills += 1 + else: + print(f" Skipping {class_name}: couldn't find name attribute") + except Exception as e: + print(f"Warning: error processing {file.name}: {e}", file=sys.stderr) + continue + # cdp special handling + lines = CDP_INIT.read_text().splitlines() + in_states = False + for line in lines: + if line.strip().startswith("class SkillStates"): + in_states = True + continue + if in_states: + stripped = line.strip() + if not stripped: + break + if stripped.startswith("#"): + continue + if ":" in stripped: + state_name = stripped.split(":", 1)[0].strip() + rows.append((state_name, "cdp")) + print( + f"Total skills found: {found_skills} (plus {len(rows) - found_skills} from cdp)" + ) + return rows + + +def _check_category_config(category): + """Check if a category's __init__.py contains a Config with api_key field.""" + init_path = SKILLS_DIR / category / "__init__.py" + if not init_path.exists(): + return False + + try: + content = init_path.read_text() + # Look for Config class with api_key field + return re.search(r"class\s+Config\b.*?api_key", content, re.DOTALL) is not None + except Exception: + return False + + +def main(): + rows = _get_skill_rows() + + # Process each category to determine key provider settings + category_settings = {} + for _, category in rows: + if category not in category_settings: + # Check if category needs agent owner key + key_provider_agent_owner = _check_category_config(category) + + # Check if category has platform-provided keys + key_provider_platform = category in PLATFORM_KEY_CATEGORIES + + # Set free flag if neither agent owner nor platform provides keys + key_provider_free = not (key_provider_agent_owner or key_provider_platform) + + category_settings[category] = { + "key_provider_agent_owner": key_provider_agent_owner, + "key_provider_platform": key_provider_platform, + "key_provider_free": key_provider_free, + } + + print( + f"Category {category} settings: agent_owner={key_provider_agent_owner}, " + + f"platform={key_provider_platform}, free={key_provider_free}" + ) + + # Write CSV with appropriate settings for each category + with CSV_PATH.open("w", newline="") as f: + writer = csv.writer(f) + header = ["name", "category"] + list(DEFAULTS.keys()) + writer.writerow(header) + + for name, category in sorted(rows, key=lambda x: (x[1], x[0])): + # Start with default values + values = dict(DEFAULTS) + + # Override with category-specific settings + if category in category_settings: + values.update(category_settings[category]) + + # Write the row + writer.writerow([name, category] + [values[k] for k in DEFAULTS.keys()]) + + print(f"Generated {CSV_PATH}") + + +if __name__ == "__main__": + main() diff --git a/scripts/import.sh b/scripts/import.sh new file mode 100755 index 00000000..0c220503 --- /dev/null +++ b/scripts/import.sh @@ -0,0 +1,39 @@ +#!/bin/bash + +# If you want to visit a remote server, modify it +BASE_URL="http://localhost:8000" + +# Token is not required in local, if you set the ADMIN_AUTH_ENABLED and ADMIN_JWT_SECRET in a remote server, you put key here +TOKEN="" + +print_usage() { + echo "Usage: sh import.sh AGENT_ID" + exit 1 +} + +# Check if correct number of arguments provided +if [ $# -ne 1 ]; then + print_usage +fi + +AGENT_ID="$1" +YAML_FILE="${AGENT_ID}.yaml" +if [ ! -f "${YAML_FILE}" ]; then + echo "Error: File ${YAML_FILE} does not exist!" + exit 1 +fi + +echo "Importing agent ${AGENT_ID}" +# Using the provided import command +HTTP_STATUS=$(curl -s -w "%{http_code}" -X PUT -H "Authorization: Bearer ${TOKEN}" -H "Content-Type: multipart/form-data" \ + -F "file=@${YAML_FILE}" "${BASE_URL}/agents/${AGENT_ID}/import" -o "${AGENT_ID}.response") + +if [ $HTTP_STATUS -ge 400 ]; then + echo "Import failed with HTTP status ${HTTP_STATUS}" + cat "${AGENT_ID}.response" + rm "${AGENT_ID}.response" + exit 1 +fi + +rm "${AGENT_ID}.response" +echo "Import succeeded" diff --git a/scripts/migrate_base_amounts.py b/scripts/migrate_base_amounts.py new file mode 100644 index 00000000..15a879fb --- /dev/null +++ b/scripts/migrate_base_amounts.py @@ -0,0 +1,271 @@ +#!/usr/bin/env python3 +""" +Migration script to populate base_free_amount, base_reward_amount, and base_permanent_amount +for existing credit events where these fields are all zero. + +This script uses the same algorithm as the expense_skill function to calculate the base amounts +by subtracting platform, agent, and dev fees from the respective credit type amounts. +""" + +import asyncio +import logging +from decimal import ROUND_HALF_UP, Decimal +from typing import List + +from sqlalchemy import and_, select, update +from sqlalchemy.ext.asyncio import AsyncSession + +from intentkit.config.config import config +from intentkit.core.credit import FOURPLACES +from intentkit.models.credit import CreditEventTable +from intentkit.models.db import get_session, init_db + +# Configure logging +logging.basicConfig( + level=logging.INFO, format="%(asctime)s - %(levelname)s - %(message)s" +) +logger = logging.getLogger(__name__) + + +def calculate_base_amounts( + event: CreditEventTable, +) -> tuple[Decimal, Decimal, Decimal]: + """ + Calculate base amounts using the same algorithm as expense_skill function. + + Args: + event: CreditEventTable instance + + Returns: + Tuple of (base_free_amount, base_reward_amount, base_permanent_amount) + """ + # Get the credit type amounts + free_amount = event.free_amount or Decimal("0") + reward_amount = event.reward_amount or Decimal("0") + permanent_amount = event.permanent_amount or Decimal("0") + + # Get fee amounts by credit type + fee_platform_free_amount = event.fee_platform_free_amount or Decimal("0") + fee_platform_reward_amount = event.fee_platform_reward_amount or Decimal("0") + fee_platform_permanent_amount = event.fee_platform_permanent_amount or Decimal("0") + + fee_agent_free_amount = event.fee_agent_free_amount or Decimal("0") + fee_agent_reward_amount = event.fee_agent_reward_amount or Decimal("0") + fee_agent_permanent_amount = event.fee_agent_permanent_amount or Decimal("0") + + fee_dev_free_amount = event.fee_dev_free_amount or Decimal("0") + fee_dev_reward_amount = event.fee_dev_reward_amount or Decimal("0") + fee_dev_permanent_amount = event.fee_dev_permanent_amount or Decimal("0") + + # Calculate base amounts by subtracting all fees from respective credit type amounts + base_free_amount = ( + free_amount + - fee_platform_free_amount + - fee_agent_free_amount + - fee_dev_free_amount + ).quantize(FOURPLACES, rounding=ROUND_HALF_UP) + + base_reward_amount = ( + reward_amount + - fee_platform_reward_amount + - fee_agent_reward_amount + - fee_dev_reward_amount + ).quantize(FOURPLACES, rounding=ROUND_HALF_UP) + + base_permanent_amount = ( + permanent_amount + - fee_platform_permanent_amount + - fee_agent_permanent_amount + - fee_dev_permanent_amount + ).quantize(FOURPLACES, rounding=ROUND_HALF_UP) + + return base_free_amount, base_reward_amount, base_permanent_amount + + +async def get_events_to_migrate( + session: AsyncSession, last_id: str, batch_size: int = 1000 +) -> List[CreditEventTable]: + """ + Get credit events that need migration using cursor-based pagination. + + Args: + session: Database session + last_id: Last processed record ID for cursor-based pagination + batch_size: Number of records to process in each batch + + Returns: + List of CreditEventTable instances that need migration + """ + stmt = ( + select(CreditEventTable) + .where( + and_( + CreditEventTable.base_free_amount == Decimal("0"), + CreditEventTable.base_reward_amount == Decimal("0"), + CreditEventTable.base_permanent_amount == Decimal("0"), + CreditEventTable.id > last_id if last_id else True, + ) + ) + .order_by(CreditEventTable.id) + .limit(batch_size) + ) + + result = await session.execute(stmt) + return result.scalars().all() + + +async def migrate_batch(session: AsyncSession, events: List[CreditEventTable]) -> int: + """ + Migrate a batch of credit events using bulk updates for better performance. + + Args: + session: Database session + events: List of events to migrate + + Returns: + Number of events successfully migrated + """ + updates = [] + failed_count = 0 + + # Prepare updates for all events + for event in events: + try: + # Calculate the correct base amounts + ( + base_free_amount, + base_reward_amount, + base_permanent_amount, + ) = calculate_base_amounts(event) + + # Prepare update data + updates.append( + { + "id": event.id, + "base_free_amount": base_free_amount, + "base_reward_amount": base_reward_amount, + "base_permanent_amount": base_permanent_amount, + } + ) + + except Exception as e: + logger.error(f"Error calculating base amounts for event {event.id}: {e}") + failed_count += 1 + continue + + # Apply bulk updates + successful_count = 0 + if updates: + try: + # Use bulk update for better performance + for update_data in updates: + event_id = update_data.pop("id") + stmt = ( + update(CreditEventTable) + .where(CreditEventTable.id == event_id) + .values(**update_data) + ) + await session.execute(stmt) + + await session.commit() + successful_count = len(updates) + logger.info(f"Successfully migrated {successful_count} events") + + except Exception as e: + logger.error(f"Error committing batch updates: {e}") + await session.rollback() + return 0 + + if failed_count > 0: + logger.warning(f"Failed to process {failed_count} events in this batch") + + return successful_count + + +async def get_total_count(session: AsyncSession) -> int: + """ + Get total count of events that need migration. + + Args: + session: Database session + + Returns: + Total count of events to migrate + """ + from sqlalchemy import func + + stmt = select(func.count(CreditEventTable.id)).where( + and_( + CreditEventTable.base_free_amount == Decimal("0"), + CreditEventTable.base_reward_amount == Decimal("0"), + CreditEventTable.base_permanent_amount == Decimal("0"), + ) + ) + + result = await session.execute(stmt) + return result.scalar() or 0 + + +async def main(): + """ + Main migration function using cursor-based pagination. + """ + logger.info("Starting base amounts migration...") + + # Initialize database connection + await init_db(**config.db) + + async with get_session() as session: + # Get total count first + total_count = await get_total_count(session) + logger.info(f"Found {total_count} events to migrate") + + if total_count == 0: + logger.info("No events need migration. Exiting.") + return + + # Process in batches using cursor-based pagination + batch_size = 1000 + total_migrated = 0 + last_id = "" + batch_number = 1 + + while True: + # Get next batch using cursor-based pagination + events = await get_events_to_migrate(session, last_id, batch_size) + + if not events: + logger.info("No more events to migrate") + break + + logger.info( + f"Processing batch {batch_number} of {len(events)} events, starting from ID {events[0].id}..." + ) + + # Update cursor to the last processed record's ID + last_id = events[-1].id + + # Migrate the batch + migrated_count = await migrate_batch(session, events) + total_migrated += migrated_count + + logger.info( + f"Progress: {total_migrated}/{total_count} events migrated ({(total_migrated / total_count) * 100:.1f}%)" + ) + + # If we migrated fewer events than the batch size, log warning + if migrated_count < len(events): + logger.warning( + f"Some events in batch failed to migrate: {migrated_count}/{len(events)} successful" + ) + + batch_number += 1 + + # Small delay to avoid overwhelming the database + await asyncio.sleep(0.1) + + logger.info(f"Migration completed. Total events migrated: {total_migrated}") + + +if __name__ == "__main__": + asyncio.run(main()) diff --git a/scripts/migrate_credit_account_statistics.py b/scripts/migrate_credit_account_statistics.py new file mode 100644 index 00000000..23038a6c --- /dev/null +++ b/scripts/migrate_credit_account_statistics.py @@ -0,0 +1,271 @@ +#!/usr/bin/env python3 +""" +Migration script to populate new statistics fields in credit_accounts table. + +This script calculates and populates the following fields based on transaction history: +- total_income, total_free_income, total_reward_income, total_permanent_income +- total_expense, total_free_expense, total_reward_expense, total_permanent_expense + +The script locks three tables (credit_accounts, credit_transactions, credit_events) +for each record to prevent interference from running programs. +""" + +import asyncio +import logging +from decimal import Decimal +from typing import Dict, List + +from sqlalchemy import text +from sqlalchemy.ext.asyncio import AsyncSession + +from intentkit.config.config import config +from intentkit.models.db import get_session, init_db + +# Configure logging +logging.basicConfig( + level=logging.INFO, + format="%(asctime)s - %(name)s - %(levelname)s - %(message)s", +) +logger = logging.getLogger(__name__) + + +async def create_backup_table(session: AsyncSession) -> None: + """Create a backup of the credit_accounts table before migration.""" + backup_table_name = "credit_accounts_backup_statistics_migration" + + # Check if backup table already exists + check_query = text( + "SELECT EXISTS (SELECT FROM information_schema.tables WHERE table_name = :table_name)" + ) + exists = await session.scalar(check_query, {"table_name": backup_table_name}) + + if exists: + logger.info( + f"Backup table {backup_table_name} already exists, skipping creation" + ) + return + + # Create backup table + backup_query = text( + f"CREATE TABLE {backup_table_name} AS SELECT * FROM credit_accounts" + ) + await session.execute(backup_query) + await session.commit() + logger.info(f"Created backup table: {backup_table_name}") + + +async def calculate_statistics_from_transactions( + session: AsyncSession, account_id: str +) -> Dict[str, Decimal]: + """Calculate statistics for a specific account from transaction history. + + Args: + session: Database session with proper locks + account_id: ID of the credit account + + Returns: + Dictionary with calculated statistics + """ + # Lock tables to prevent concurrent modifications + await session.execute(text("LOCK TABLE credit_accounts IN EXCLUSIVE MODE")) + await session.execute(text("LOCK TABLE credit_transactions IN SHARE MODE")) + await session.execute(text("LOCK TABLE credit_events IN SHARE MODE")) + + # Query to calculate statistics from transactions + query = text(""" + SELECT + -- Income calculations (credit_debit = 'credit') + COALESCE(SUM(CASE WHEN credit_debit = 'credit' THEN change_amount ELSE 0 END), 0) as total_income, + COALESCE(SUM(CASE WHEN credit_debit = 'credit' THEN free_amount ELSE 0 END), 0) as total_free_income, + COALESCE(SUM(CASE WHEN credit_debit = 'credit' THEN reward_amount ELSE 0 END), 0) as total_reward_income, + COALESCE(SUM(CASE WHEN credit_debit = 'credit' THEN permanent_amount ELSE 0 END), 0) as total_permanent_income, + -- Expense calculations (credit_debit = 'debit') + COALESCE(SUM(CASE WHEN credit_debit = 'debit' THEN change_amount ELSE 0 END), 0) as total_expense, + COALESCE(SUM(CASE WHEN credit_debit = 'debit' THEN free_amount ELSE 0 END), 0) as total_free_expense, + COALESCE(SUM(CASE WHEN credit_debit = 'debit' THEN reward_amount ELSE 0 END), 0) as total_reward_expense, + COALESCE(SUM(CASE WHEN credit_debit = 'debit' THEN permanent_amount ELSE 0 END), 0) as total_permanent_expense + FROM credit_transactions + WHERE account_id = :account_id + """) + + result = await session.execute(query, {"account_id": account_id}) + row = result.fetchone() + + if not row: + # No transactions found, return zero values + return { + "total_income": Decimal("0"), + "total_free_income": Decimal("0"), + "total_reward_income": Decimal("0"), + "total_permanent_income": Decimal("0"), + "total_expense": Decimal("0"), + "total_free_expense": Decimal("0"), + "total_reward_expense": Decimal("0"), + "total_permanent_expense": Decimal("0"), + } + + return { + "total_income": Decimal(str(row.total_income)), + "total_free_income": Decimal(str(row.total_free_income)), + "total_reward_income": Decimal(str(row.total_reward_income)), + "total_permanent_income": Decimal(str(row.total_permanent_income)), + "total_expense": Decimal(str(row.total_expense)), + "total_free_expense": Decimal(str(row.total_free_expense)), + "total_reward_expense": Decimal(str(row.total_reward_expense)), + "total_permanent_expense": Decimal(str(row.total_permanent_expense)), + } + + +async def update_account_statistics( + session: AsyncSession, account_id: str, statistics: Dict[str, Decimal] +) -> bool: + """Update account statistics in the database. + + Args: + session: Database session with proper locks + account_id: ID of the credit account + statistics: Dictionary with calculated statistics + + Returns: + True if update was successful, False otherwise + """ + try: + update_query = text(""" + UPDATE credit_accounts + SET + total_income = :total_income, + total_free_income = :total_free_income, + total_reward_income = :total_reward_income, + total_permanent_income = :total_permanent_income, + total_expense = :total_expense, + total_free_expense = :total_free_expense, + total_reward_expense = :total_reward_expense, + total_permanent_expense = :total_permanent_expense, + updated_at = NOW() + WHERE id = :account_id + """) + + result = await session.execute( + update_query, {"account_id": account_id, **statistics} + ) + + if result.rowcount == 0: + logger.error(f"No account found with ID: {account_id}") + return False + + logger.info(f"Updated statistics for account {account_id}") + return True + + except Exception as e: + logger.error(f"Failed to update account {account_id}: {e}") + return False + + +async def process_single_account(account_id: str) -> bool: + """Process a single account with proper transaction management. + + Args: + account_id: ID of the credit account to process + + Returns: + True if processing was successful, False otherwise + """ + async with get_session() as session: + try: + # Calculate statistics from transactions (with table locks) + statistics = await calculate_statistics_from_transactions( + session, account_id + ) + + # Update account with calculated statistics + success = await update_account_statistics(session, account_id, statistics) + + if success: + await session.commit() + logger.info(f"Successfully processed account {account_id}") + return True + else: + await session.rollback() + logger.error(f"Failed to process account {account_id}") + return False + + except Exception as e: + await session.rollback() + logger.error(f"Error processing account {account_id}: {e}") + return False + + +async def get_all_account_ids() -> List[str]: + """Get credit account IDs that need migration (all 8 statistics fields are 0). + + Returns: + List of account IDs that need statistics migration + """ + async with get_session() as session: + query = text(""" + SELECT id FROM credit_accounts + WHERE total_income = 0 + AND total_free_income = 0 + AND total_reward_income = 0 + AND total_permanent_income = 0 + AND total_expense = 0 + AND total_free_expense = 0 + AND total_reward_expense = 0 + AND total_permanent_expense = 0 + ORDER BY created_at + """) + result = await session.execute(query) + return [row.id for row in result.fetchall()] + + +async def main(): + """Main migration function.""" + logger.info("Starting credit account statistics migration") + + try: + # Initialize database connection + await init_db(**config.db) + + # Create backup table + async with get_session() as session: + await create_backup_table(session) + + # Get all account IDs + account_ids = await get_all_account_ids() + logger.info(f"Found {len(account_ids)} accounts to process") + + if not account_ids: + logger.info("No accounts found, migration complete") + return + + # Process each account + success_count = 0 + failure_count = 0 + + for i, account_id in enumerate(account_ids, 1): + logger.info(f"Processing account {i}/{len(account_ids)}: {account_id}") + + success = await process_single_account(account_id) + if success: + success_count += 1 + else: + failure_count += 1 + + # Log progress every 100 accounts + if i % 100 == 0: + logger.info(f"Progress: {i}/{len(account_ids)} accounts processed") + + logger.info( + f"Migration complete: {success_count} successful, {failure_count} failed" + ) + + if failure_count > 0: + logger.warning("Some accounts failed to migrate. Check logs for details.") + + except Exception as e: + logger.error(f"Migration failed with error: {e}") + raise + + +if __name__ == "__main__": + asyncio.run(main()) diff --git a/scripts/migrate_credit_transaction_amounts.py b/scripts/migrate_credit_transaction_amounts.py new file mode 100644 index 00000000..f7ae36e4 --- /dev/null +++ b/scripts/migrate_credit_transaction_amounts.py @@ -0,0 +1,275 @@ +#!/usr/bin/env python3 +""" +Migrate credit transaction amounts based on TransactionType. + +This script updates the three new fields (free_amount, reward_amount, permanent_amount) +in CreditTransactionTable based on the transaction type and corresponding fields +from CreditEventTable. +""" + +import argparse +import asyncio +import logging + +from sqlalchemy import text +from sqlalchemy.ext.asyncio import AsyncSession + +from intentkit.config.config import config +from intentkit.models.db import get_session, init_db + +# Configure logging +logging.basicConfig( + level=logging.INFO, format="%(asctime)s - %(levelname)s - %(message)s" +) +logger = logging.getLogger(__name__) + + +async def count_inconsistent_transactions(session: AsyncSession) -> int: + """ + Count transactions where free_amount + reward_amount + permanent_amount != change_amount. + """ + query = text(""" + SELECT COUNT(*) as count + FROM credit_transactions ct + JOIN credit_events ce ON ct.event_id = ce.id + WHERE ROUND(COALESCE(ct.free_amount, 0) + COALESCE(ct.reward_amount, 0) + COALESCE(ct.permanent_amount, 0), 4) + != ROUND(COALESCE(ct.change_amount, 0), 4) + """) + result = await session.execute(query) + return result.scalar() + + +async def migrate_transaction_amounts( + session: AsyncSession, dry_run: bool = True +) -> None: + """ + Migrate transaction amounts based on TransactionType. + """ + # Count records to migrate + total_records = await count_inconsistent_transactions(session) + logger.info(f"Found {total_records} transactions to migrate") + + if total_records == 0: + logger.info("No records to migrate") + return + + if dry_run: + logger.info("DRY RUN MODE - No actual changes will be made") + # Preview first 10 records that would be updated + preview_query = text(""" + SELECT + ct.id, + ct.tx_type, + ct.change_amount, + ct.free_amount as current_free, + ct.reward_amount as current_reward, + ct.permanent_amount as current_permanent, + CASE + WHEN ct.tx_type IN ('pay', 'recharge', 'refund', 'adjustment', 'refill', 'reward', 'event_reward', 'recharge_bonus') THEN + COALESCE(ce.free_amount, 0) + WHEN ct.tx_type IN ('receive_base_llm', 'receive_base_skill', 'receive_base_memory', 'receive_base_voice', 'receive_base_knowledge') THEN + COALESCE(ce.base_free_amount, 0) + WHEN ct.tx_type = 'receive_fee_dev' THEN + COALESCE(ce.fee_dev_free_amount, 0) + WHEN ct.tx_type = 'receive_fee_agent' THEN + COALESCE(ce.fee_agent_free_amount, 0) + WHEN ct.tx_type = 'receive_fee_platform' THEN + COALESCE(ce.fee_platform_free_amount, 0) + ELSE 0 + END as new_free, + CASE + WHEN ct.tx_type IN ('pay', 'recharge', 'refund', 'adjustment', 'refill', 'reward', 'event_reward', 'recharge_bonus') THEN + COALESCE(ce.reward_amount, 0) + WHEN ct.tx_type IN ('receive_base_llm', 'receive_base_skill', 'receive_base_memory', 'receive_base_voice', 'receive_base_knowledge') THEN + COALESCE(ce.base_reward_amount, 0) + WHEN ct.tx_type = 'receive_fee_dev' THEN + COALESCE(ce.fee_dev_reward_amount, 0) + WHEN ct.tx_type = 'receive_fee_agent' THEN + COALESCE(ce.fee_agent_reward_amount, 0) + WHEN ct.tx_type = 'receive_fee_platform' THEN + COALESCE(ce.fee_platform_reward_amount, 0) + ELSE 0 + END as new_reward, + CASE + WHEN ct.tx_type IN ('pay', 'recharge', 'refund', 'adjustment', 'refill', 'reward', 'event_reward', 'recharge_bonus') THEN + COALESCE(ce.permanent_amount, 0) + WHEN ct.tx_type IN ('receive_base_llm', 'receive_base_skill', 'receive_base_memory', 'receive_base_voice', 'receive_base_knowledge') THEN + COALESCE(ce.base_permanent_amount, 0) + WHEN ct.tx_type = 'receive_fee_dev' THEN + COALESCE(ce.fee_dev_permanent_amount, 0) + WHEN ct.tx_type = 'receive_fee_agent' THEN + COALESCE(ce.fee_agent_permanent_amount, 0) + WHEN ct.tx_type = 'receive_fee_platform' THEN + COALESCE(ce.fee_platform_permanent_amount, 0) + ELSE 0 + END as new_permanent + FROM credit_transactions ct + JOIN credit_events ce ON ct.event_id = ce.id + WHERE ROUND(COALESCE(ct.free_amount, 0) + COALESCE(ct.reward_amount, 0) + COALESCE(ct.permanent_amount, 0), 4) + != ROUND(COALESCE(ct.change_amount, 0), 4) + LIMIT 10 + """) + result = await session.execute(preview_query) + records = result.fetchall() + + logger.info("Preview of records to be updated:") + for record in records: + logger.info( + f"ID: {record.id}, Type: {record.tx_type}, Change: {record.change_amount}, " + f"Free: {record.current_free} -> {record.new_free}, " + f"Reward: {record.current_reward} -> {record.new_reward}, " + f"Permanent: {record.current_permanent} -> {record.new_permanent}" + ) + + logger.info( + f"Total {total_records} records would be updated. Use --execute to perform actual migration." + ) + return + + # Perform actual migration with multiple UPDATE statements for different transaction types + logger.info("Starting actual migration...") + + # Group 1: PAY, RECHARGE, REFUND, ADJUSTMENT, REFILL, REWARD, EVENT_REWARD, RECHARGE_BONUS + # These map to event's free_amount, reward_amount, permanent_amount + update_query_group1 = text(""" + UPDATE credit_transactions + SET + free_amount = COALESCE(ce.free_amount, 0), + reward_amount = COALESCE(ce.reward_amount, 0), + permanent_amount = COALESCE(ce.permanent_amount, 0) + FROM credit_events ce + WHERE credit_transactions.event_id = ce.id + AND credit_transactions.tx_type IN ('pay', 'recharge', 'refund', 'adjustment', 'refill', 'reward', 'event_reward', 'recharge_bonus') + AND ROUND(COALESCE(credit_transactions.free_amount, 0) + COALESCE(credit_transactions.reward_amount, 0) + COALESCE(credit_transactions.permanent_amount, 0), 4) + != ROUND(COALESCE(credit_transactions.change_amount, 0), 4) + """) + + # Group 2: RECEIVE_BASE_* types + # These map to event's base_free_amount, base_reward_amount, base_permanent_amount + update_query_group2 = text(""" + UPDATE credit_transactions + SET + free_amount = COALESCE(ce.base_free_amount, 0), + reward_amount = COALESCE(ce.base_reward_amount, 0), + permanent_amount = COALESCE(ce.base_permanent_amount, 0) + FROM credit_events ce + WHERE credit_transactions.event_id = ce.id + AND credit_transactions.tx_type IN ('receive_base_llm', 'receive_base_skill', 'receive_base_memory', 'receive_base_voice', 'receive_base_knowledge') + AND ROUND(COALESCE(credit_transactions.free_amount, 0) + COALESCE(credit_transactions.reward_amount, 0) + COALESCE(credit_transactions.permanent_amount, 0), 4) + != ROUND(COALESCE(credit_transactions.change_amount, 0), 4) + """) + + # Group 3: RECEIVE_FEE_DEV + # Maps to event's fee_dev_free_amount, fee_dev_reward_amount, fee_dev_permanent_amount + update_query_group3 = text(""" + UPDATE credit_transactions + SET + free_amount = COALESCE(ce.fee_dev_free_amount, 0), + reward_amount = COALESCE(ce.fee_dev_reward_amount, 0), + permanent_amount = COALESCE(ce.fee_dev_permanent_amount, 0) + FROM credit_events ce + WHERE credit_transactions.event_id = ce.id + AND credit_transactions.tx_type = 'receive_fee_dev' + AND ROUND(COALESCE(credit_transactions.free_amount, 0) + COALESCE(credit_transactions.reward_amount, 0) + COALESCE(credit_transactions.permanent_amount, 0), 4) + != ROUND(COALESCE(credit_transactions.change_amount, 0), 4) + """) + + # Group 4: RECEIVE_FEE_AGENT + # Maps to event's fee_agent_free_amount, fee_agent_reward_amount, fee_agent_permanent_amount + update_query_group4 = text(""" + UPDATE credit_transactions + SET + free_amount = COALESCE(ce.fee_agent_free_amount, 0), + reward_amount = COALESCE(ce.fee_agent_reward_amount, 0), + permanent_amount = COALESCE(ce.fee_agent_permanent_amount, 0) + FROM credit_events ce + WHERE credit_transactions.event_id = ce.id + AND credit_transactions.tx_type = 'receive_fee_agent' + AND ROUND(COALESCE(credit_transactions.free_amount, 0) + COALESCE(credit_transactions.reward_amount, 0) + COALESCE(credit_transactions.permanent_amount, 0), 4) + != ROUND(COALESCE(credit_transactions.change_amount, 0), 4) + """) + + # Group 5: RECEIVE_FEE_PLATFORM + # Maps to event's fee_platform_free_amount, fee_platform_reward_amount, fee_platform_permanent_amount + update_query_group5 = text(""" + UPDATE credit_transactions + SET + free_amount = COALESCE(ce.fee_platform_free_amount, 0), + reward_amount = COALESCE(ce.fee_platform_reward_amount, 0), + permanent_amount = COALESCE(ce.fee_platform_permanent_amount, 0) + FROM credit_events ce + WHERE credit_transactions.event_id = ce.id + AND credit_transactions.tx_type = 'receive_fee_platform' + AND ROUND(COALESCE(credit_transactions.free_amount, 0) + COALESCE(credit_transactions.reward_amount, 0) + COALESCE(credit_transactions.permanent_amount, 0), 4) + != ROUND(COALESCE(credit_transactions.change_amount, 0), 4) + """) + + # Execute all update queries + queries = [ + ("Group 1 (pay, recharge, etc.)", update_query_group1), + ("Group 2 (receive_base_*)", update_query_group2), + ("Group 3 (receive_fee_dev)", update_query_group3), + ("Group 4 (receive_fee_agent)", update_query_group4), + ("Group 5 (receive_fee_platform)", update_query_group5), + ] + + total_updated = 0 + for group_name, query in queries: + result = await session.execute(query) + updated_count = result.rowcount + logger.info(f"{group_name}: Updated {updated_count} records") + total_updated += updated_count + + await session.commit() + logger.info( + f"Migration completed successfully. Total updated: {total_updated} records" + ) + + +async def verify_migration(session: AsyncSession) -> None: + """ + Verify the migration by checking for remaining inconsistencies. + """ + inconsistent_count = await count_inconsistent_transactions(session) + + if inconsistent_count == 0: + logger.info("✅ Migration verification passed: All records are now consistent") + else: + logger.warning( + f"âš ī¸ Migration verification found {inconsistent_count} records still inconsistent" + ) + logger.warning( + "This may indicate data integrity issues that require manual review" + ) + + +async def main() -> None: + """ + Main function to run the migration. + """ + parser = argparse.ArgumentParser( + description="Migrate credit transaction amounts based on TransactionType" + ) + parser.add_argument( + "--execute", + action="store_true", + help="Execute the migration (default is dry-run mode)", + ) + args = parser.parse_args() + + # Initialize database connection + await init_db(**config.db) + + async with get_session() as session: + try: + await migrate_transaction_amounts(session, dry_run=not args.execute) + if args.execute: + await verify_migration(session) + except Exception as e: + logger.error(f"Migration failed: {e}") + await session.rollback() + raise + + +if __name__ == "__main__": + asyncio.run(main()) diff --git a/scripts/sync_schema.py b/scripts/sync_schema.py new file mode 100644 index 00000000..54a0fcbe --- /dev/null +++ b/scripts/sync_schema.py @@ -0,0 +1,175 @@ +#!/usr/bin/env python + +"""Script to synchronize schema.json files with their corresponding Config classes.""" + +import json +from collections import OrderedDict +from pathlib import Path + +# Root directory of the project +ROOT_DIR = Path(__file__).parent.parent +SKILLS_DIR = ROOT_DIR / "skills" + + +def update_enabled_field(schema_path: Path, schema: dict) -> bool: + """Update the 'enabled' field in the schema.json file. + + Args: + schema_path: The path to the schema.json file + schema: The loaded schema dictionary + + Returns: + bool: True if changes were made, False otherwise + """ + changes_made = False + + # Check if 'enabled' field is in the schema + if "enabled" in schema.get("properties", {}): + # Update the default value to False if it's not already + if schema["properties"]["enabled"].get("default") is not False: + schema["properties"]["enabled"]["default"] = False + changes_made = True + print(f"Updated 'enabled' default value to False in {schema_path}") + else: + # Add the 'enabled' field to the schema properties + if "properties" not in schema: + schema["properties"] = {} + + schema["properties"]["enabled"] = { + "type": "boolean", + "title": "Enabled", + "description": "Whether this skill is enabled", + "default": False, + } + + # Update the required fields if it exists + if "required" in schema and "enabled" not in schema["required"]: + schema["required"].append("enabled") + elif "required" not in schema: + schema["required"] = ["enabled"] + + changes_made = True + print(f"Added 'enabled' field to {schema_path}") + + return changes_made + + +def update_states_field(schema_path: Path, schema: dict) -> bool: + """Update the 'states' field in the schema.json file. + + Args: + schema_path: The path to the schema.json file + schema: The loaded schema dictionary + + Returns: + bool: True if changes were made, False otherwise + """ + changes_made = False + + # Check if 'states' field exists in the schema + if "states" in schema.get("properties", {}): + states_schema = schema["properties"]["states"] + + # Ensure states has the correct structure + if "type" not in states_schema: + states_schema["type"] = "object" + changes_made = True + + if "description" not in states_schema: + skill_name = schema_path.parent.name.capitalize() + states_schema["description"] = ( + f"States for each {skill_name} skill (disabled, public, or private)" + ) + changes_made = True + + # Check if properties exists in states + if "properties" in states_schema: + # Update each state property to ensure it has the correct enum and default value + for state_name, state_props in states_schema["properties"].items(): + # Ensure enum values are correct + if "enum" not in state_props or set(state_props["enum"]) != { + "disabled", + "public", + "private", + }: + state_props["enum"] = ["disabled", "public", "private"] + changes_made = True + + # Add or update default value to "disabled" + if "default" not in state_props or state_props["default"] != "disabled": + state_props["default"] = "disabled" + changes_made = True + + return changes_made + + +def reorder_properties(schema: dict) -> bool: + """Reorder properties to place 'enabled' first. + + Args: + schema: The loaded schema dictionary + + Returns: + bool: True if changes were made, False otherwise + """ + changes_made = False + + if "properties" in schema and "enabled" in schema["properties"]: + # Create a new ordered dictionary with 'enabled' first + new_properties = OrderedDict() + + # Add enabled first + new_properties["enabled"] = schema["properties"]["enabled"] + + # Add all other properties + for key, value in schema["properties"].items(): + if key != "enabled": + new_properties[key] = value + + # Replace the properties with the reordered one + schema["properties"] = new_properties + changes_made = True + + return changes_made + + +def update_schema_json(schema_path: Path) -> None: + """Update the schema.json file with necessary changes. + + Args: + schema_path: The path to the schema.json file + """ + # Check if schema.json exists + if not schema_path.exists(): + print(f"No schema.json found at {schema_path}") + return + + # Load the existing schema + with open(schema_path, "r") as f: + schema = json.load(f) + + # Apply updates + enabled_changes = update_enabled_field(schema_path, schema) + states_changes = update_states_field(schema_path, schema) + order_changes = reorder_properties(schema) + + # Save the schema if changes were made + if enabled_changes or states_changes or order_changes: + with open(schema_path, "w") as f: + json.dump(schema, f, indent=2) + print(f"Updated schema.json at {schema_path}") + else: + print(f"No changes needed for {schema_path}") + + +def main(): + """Main function to synchronize all schema.json files.""" + # Find all schema.json files in the skills directory + schema_files = list(SKILLS_DIR.glob("*/schema.json")) + + for schema_path in schema_files: + update_schema_json(schema_path) + + +if __name__ == "__main__": + main() diff --git a/scripts/sync_states_schema.py b/scripts/sync_states_schema.py new file mode 100644 index 00000000..34d5e8a4 --- /dev/null +++ b/scripts/sync_states_schema.py @@ -0,0 +1,309 @@ +#!/usr/bin/env python + +"""Script to synchronize states field in schema.json files with Config classes and update descriptions.""" + +import ast +import json +import os +from pathlib import Path +from typing import Dict, Optional, Set, Tuple + +# Root directory of the project +ROOT_DIR = Path(__file__).parent.parent +SKILLS_DIR = ROOT_DIR / "skills" + +# Folders to exclude from processing +EXCLUDED_FOLDERS = ["cdp", "goat", "defillama"] + + +def get_skill_states_from_file(skill_dir: Path) -> Optional[Set[str]]: + """Extract state names from the SkillStates class in __init__.py. + + Args: + skill_dir: The directory of the skill + + Returns: + A set of state names if found, None otherwise + """ + init_file = skill_dir / "__init__.py" + if not init_file.exists(): + print(f"No __init__.py found in {skill_dir}") + return None + + try: + # Read the file content + with open(init_file, "r") as f: + content = f.read() + + # Parse the file into an AST + tree = ast.parse(content) + + # Find the SkillStates class + for node in ast.walk(tree): + if isinstance(node, ast.ClassDef) and node.name == "SkillStates": + # Look for annotations in the class + states = set() + for child in node.body: + if isinstance(child, ast.AnnAssign) and isinstance( + child.target, ast.Name + ): + states.add(child.target.id) + return states + + print(f"No SkillStates class found in {init_file}") + return None + except Exception as e: + print(f"Error parsing {init_file}: {e}") + return None + + +def find_skill_classes(skill_dir: Path) -> Dict[str, Tuple[str, str]]: + """Find all skill classes in a skill directory and extract their descriptions. + + Args: + skill_dir: The directory of the skill + + Returns: + A dictionary mapping skill class names to (description, file_path) tuples + """ + skill_classes = {} + + # Walk through all Python files in the skill directory + for root, _, files in os.walk(skill_dir): + for file in files: + if file.endswith(".py") and file != "__init__.py": + file_path = Path(root) / file + try: + # Read the file content + with open(file_path, "r") as f: + content = f.read() + + # Parse the file into an AST + tree = ast.parse(content) + + # Find classes with a description attribute + for node in ast.walk(tree): + if isinstance(node, ast.ClassDef): + # Look for a description attribute in the class body + for child in node.body: + if ( + isinstance(child, ast.AnnAssign) + and isinstance(child.target, ast.Name) + and child.target.id == "description" + ): + # Check if it's a string assignment + if isinstance( + child.value, ast.Constant + ) and isinstance(child.value.value, str): + description = child.value.value.strip() + class_name = node.name.lower() + skill_classes[class_name] = ( + description, + str(file_path), + ) + elif isinstance(child, ast.Assign): + for target in child.targets: + if ( + isinstance(target, ast.Name) + and target.id == "description" + ): + # Check if it's a string assignment + if isinstance( + child.value, ast.Constant + ) and isinstance(child.value.value, str): + description = child.value.value.strip() + class_name = node.name.lower() + skill_classes[class_name] = ( + description, + str(file_path), + ) + # Handle multi-line string literals + elif isinstance(child.value, ast.JoinedStr): + description = "" + for value in child.value.values: + if isinstance(value, ast.Constant): + description += str(value.value) + class_name = node.name.lower() + skill_classes[class_name] = ( + description.strip(), + str(file_path), + ) + except Exception as e: + print(f"Error parsing {file_path}: {e}") + + return skill_classes + + +def map_state_to_skill_class( + state_name: str, skill_classes: Dict[str, Tuple[str, str]], skill_name: str +) -> Optional[str]: + """Map a state name to a skill class description. + + Args: + state_name: The name of the state + skill_classes: Dictionary of skill classes with their descriptions + skill_name: The name of the skill directory + + Returns: + The description of the skill class if found, None otherwise + """ + # Try direct mapping (state_name -> ClassName) + # Convert snake_case to CamelCase for class name matching + class_name = "".join(word.capitalize() for word in state_name.split("_")).lower() + if class_name in skill_classes: + return skill_classes[class_name][0] + + # Try with skill name prefix (e.g., "send_message" -> "SlackSendMessage") + prefixed_class_name = f"{skill_name}{class_name}".lower() + if prefixed_class_name in skill_classes: + return skill_classes[prefixed_class_name][0] + + # Try partial matching + for cls_name, (description, _) in skill_classes.items(): + # Check if the state name is contained in the class name (case insensitive) + if state_name.lower() in cls_name.lower(): + return description + + # Check if the last part of the state name matches the last part of the class name + state_parts = state_name.lower().split("_") + class_parts = cls_name.lower().replace(skill_name.lower(), "").split() + + if state_parts and class_parts and state_parts[-1] == class_parts[-1].lower(): + return description + + return None + + +def update_states_schema(schema_path: Path) -> None: + """Update the states field in schema.json files based on SkillStates class and skill descriptions. + + Args: + schema_path: The path to the schema.json file + """ + # Check if schema.json exists + if not schema_path.exists(): + print(f"No schema.json found at {schema_path}") + return + + # Get the skill directory + skill_dir = schema_path.parent + skill_name = skill_dir.name + + # Skip excluded folders + if skill_name in EXCLUDED_FOLDERS: + print(f"Skipping {skill_name} as it's in the excluded list") + return + + # Get the state names from the SkillStates class + class_states = get_skill_states_from_file(skill_dir) + if not class_states: + print(f"No states found for {skill_name}") + return + + # Find all skill classes and their descriptions + skill_classes = find_skill_classes(skill_dir) + if not skill_classes: + print(f"No skill classes found for {skill_name}") + + # Load the existing schema + with open(schema_path, "r") as f: + schema = json.load(f) + + changes_made = False + + # Check if 'states' field exists in the schema + if "states" not in schema.get("properties", {}): + # Add the states field + if "properties" not in schema: + schema["properties"] = {} + + schema["properties"]["states"] = { + "type": "object", + "properties": {}, + "description": f"States for each {skill_name.capitalize()} skill (disabled, public, or private)", + } + changes_made = True + + states_schema = schema["properties"]["states"] + + # Ensure states has the correct structure + if "type" not in states_schema: + states_schema["type"] = "object" + changes_made = True + + if "description" not in states_schema: + states_schema["description"] = ( + f"States for each {skill_name.capitalize()} skill (disabled, public, or private)" + ) + changes_made = True + + # Ensure properties exists in states + if "properties" not in states_schema: + states_schema["properties"] = {} + changes_made = True + + # Add missing states from the class to the schema + for state_name in class_states: + # Get the description from the skill class if available + description = map_state_to_skill_class(state_name, skill_classes, skill_name) + default_description = f"State for {state_name}" + + if state_name not in states_schema["properties"]: + # Add the state to the schema + states_schema["properties"][state_name] = { + "type": "string", + "title": " ".join(word.capitalize() for word in state_name.split("_")), + "enum": ["disabled", "public", "private"], + "description": description if description else default_description, + "default": "disabled", + } + changes_made = True + print(f"Added state '{state_name}' to {schema_path}") + else: + # Update existing state + state_props = states_schema["properties"][state_name] + + # Ensure enum values are correct + if "enum" not in state_props or set(state_props["enum"]) != { + "disabled", + "public", + "private", + }: + state_props["enum"] = ["disabled", "public", "private"] + changes_made = True + + # Add or update default value to "disabled" + if "default" not in state_props or state_props["default"] != "disabled": + state_props["default"] = "disabled" + changes_made = True + + # Update description if we have a better one from the skill class + if description and ( + "description" not in state_props + or state_props["description"] == default_description + ): + state_props["description"] = description + changes_made = True + print(f"Updated description for state '{state_name}' in {schema_path}") + + # Save the updated schema if changes were made + if changes_made: + with open(schema_path, "w") as f: + json.dump(schema, f, indent=2) + + print(f"Updated states schema at {schema_path}") + else: + print(f"No changes needed for {schema_path}") + + +def main(): + """Main function to synchronize all schema.json files.""" + # Find all schema.json files in the skills directory + schema_files = list(SKILLS_DIR.glob("*/schema.json")) + + for schema_path in schema_files: + update_states_schema(schema_path) + + +if __name__ == "__main__": + main() diff --git a/test_new_wording.py b/test_new_wording.py new file mode 100644 index 00000000..e69de29b diff --git a/test_user_info.py b/test_user_info.py new file mode 100644 index 00000000..e69de29b diff --git a/uv.lock b/uv.lock new file mode 100644 index 00000000..64ae577e --- /dev/null +++ b/uv.lock @@ -0,0 +1,3207 @@ +version = 1 +revision = 3 +requires-python = "==3.12.*" + +[manifest] +members = [ + "intentkit", + "intentkit-workspace", +] + +[[package]] +name = "aiofiles" +version = "24.1.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/0b/03/a88171e277e8caa88a4c77808c20ebb04ba74cc4681bf1e9416c862de237/aiofiles-24.1.0.tar.gz", hash = "sha256:22a075c9e5a3810f0c2e48f3008c94d68c65d763b9b03857924c99e57355166c", size = 30247, upload-time = "2024-06-24T11:02:03.584Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/a5/45/30bb92d442636f570cb5651bc661f52b610e2eec3f891a5dc3a4c3667db0/aiofiles-24.1.0-py3-none-any.whl", hash = "sha256:b4ec55f4195e3eb5d7abd1bf7e061763e864dd4954231fb8539a0ef8bb8260e5", size = 15896, upload-time = "2024-06-24T11:02:01.529Z" }, +] + +[[package]] +name = "aiogram" +version = "3.22.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "aiofiles" }, + { name = "aiohttp" }, + { name = "certifi" }, + { name = "magic-filter" }, + { name = "pydantic" }, + { name = "typing-extensions" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/92/2c/fe0845a97f6126357d20163ede8f76bc161f73122123c6548ca19d9a12c7/aiogram-3.22.0.tar.gz", hash = "sha256:c483f81e37aeea8e7f592c9bd14f6acc80d9b7a2698e296a45bf47ff60a98510", size = 1520414, upload-time = "2025-08-17T16:20:45.471Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/ba/e5/9f9fae7b50ed502e33121dd62a7e9b076d00630eaafe1dd7fda64f7e8625/aiogram-3.22.0-py3-none-any.whl", hash = "sha256:1c6eceb078ff62cf0556a5466cf3e7e8119678c26cc56803b7ac5f73633934a8", size = 698216, upload-time = "2025-08-17T16:20:43.354Z" }, +] + +[[package]] +name = "aiohappyeyeballs" +version = "2.6.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/26/30/f84a107a9c4331c14b2b586036f40965c128aa4fee4dda5d3d51cb14ad54/aiohappyeyeballs-2.6.1.tar.gz", hash = "sha256:c3f9d0113123803ccadfdf3f0faa505bc78e6a72d1cc4806cbd719826e943558", size = 22760, upload-time = "2025-03-12T01:42:48.764Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/0f/15/5bf3b99495fb160b63f95972b81750f18f7f4e02ad051373b669d17d44f2/aiohappyeyeballs-2.6.1-py3-none-any.whl", hash = "sha256:f349ba8f4b75cb25c99c5c2d84e997e485204d2902a9597802b0371f09331fb8", size = 15265, upload-time = "2025-03-12T01:42:47.083Z" }, +] + +[[package]] +name = "aiohttp" +version = "3.11.16" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "aiohappyeyeballs" }, + { name = "aiosignal" }, + { name = "attrs" }, + { name = "frozenlist" }, + { name = "multidict" }, + { name = "propcache" }, + { name = "yarl" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/f1/d9/1c4721d143e14af753f2bf5e3b681883e1f24b592c0482df6fa6e33597fa/aiohttp-3.11.16.tar.gz", hash = "sha256:16f8a2c9538c14a557b4d309ed4d0a7c60f0253e8ed7b6c9a2859a7582f8b1b8", size = 7676826, upload-time = "2025-04-02T02:17:44.74Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/db/38/100d01cbc60553743baf0fba658cb125f8ad674a8a771f765cdc155a890d/aiohttp-3.11.16-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:911a6e91d08bb2c72938bc17f0a2d97864c531536b7832abee6429d5296e5b27", size = 704881, upload-time = "2025-04-02T02:16:09.26Z" }, + { url = "https://files.pythonhosted.org/packages/21/ed/b4102bb6245e36591209e29f03fe87e7956e54cb604ee12e20f7eb47f994/aiohttp-3.11.16-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:6ac13b71761e49d5f9e4d05d33683bbafef753e876e8e5a7ef26e937dd766713", size = 464564, upload-time = "2025-04-02T02:16:10.781Z" }, + { url = "https://files.pythonhosted.org/packages/3b/e1/a9ab6c47b62ecee080eeb33acd5352b40ecad08fb2d0779bcc6739271745/aiohttp-3.11.16-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:fd36c119c5d6551bce374fcb5c19269638f8d09862445f85a5a48596fd59f4bb", size = 456548, upload-time = "2025-04-02T02:16:12.764Z" }, + { url = "https://files.pythonhosted.org/packages/80/ad/216c6f71bdff2becce6c8776f0aa32cb0fa5d83008d13b49c3208d2e4016/aiohttp-3.11.16-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d489d9778522fbd0f8d6a5c6e48e3514f11be81cb0a5954bdda06f7e1594b321", size = 1691749, upload-time = "2025-04-02T02:16:14.304Z" }, + { url = "https://files.pythonhosted.org/packages/bd/ea/7df7bcd3f4e734301605f686ffc87993f2d51b7acb6bcc9b980af223f297/aiohttp-3.11.16-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:69a2cbd61788d26f8f1e626e188044834f37f6ae3f937bd9f08b65fc9d7e514e", size = 1736874, upload-time = "2025-04-02T02:16:16.538Z" }, + { url = "https://files.pythonhosted.org/packages/51/41/c7724b9c87a29b7cfd1202ec6446bae8524a751473d25e2ff438bc9a02bf/aiohttp-3.11.16-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:cd464ba806e27ee24a91362ba3621bfc39dbbb8b79f2e1340201615197370f7c", size = 1786885, upload-time = "2025-04-02T02:16:18.268Z" }, + { url = "https://files.pythonhosted.org/packages/86/b3/f61f8492fa6569fa87927ad35a40c159408862f7e8e70deaaead349e2fba/aiohttp-3.11.16-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1ce63ae04719513dd2651202352a2beb9f67f55cb8490c40f056cea3c5c355ce", size = 1698059, upload-time = "2025-04-02T02:16:20.234Z" }, + { url = "https://files.pythonhosted.org/packages/ce/be/7097cf860a9ce8bbb0e8960704e12869e111abcd3fbd245153373079ccec/aiohttp-3.11.16-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:09b00dd520d88eac9d1768439a59ab3d145065c91a8fab97f900d1b5f802895e", size = 1626527, upload-time = "2025-04-02T02:16:22.092Z" }, + { url = "https://files.pythonhosted.org/packages/1d/1d/aaa841c340e8c143a8d53a1f644c2a2961c58cfa26e7b398d6bf75cf5d23/aiohttp-3.11.16-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:7f6428fee52d2bcf96a8aa7b62095b190ee341ab0e6b1bcf50c615d7966fd45b", size = 1644036, upload-time = "2025-04-02T02:16:23.707Z" }, + { url = "https://files.pythonhosted.org/packages/2c/88/59d870f76e9345e2b149f158074e78db457985c2b4da713038d9da3020a8/aiohttp-3.11.16-cp312-cp312-musllinux_1_2_armv7l.whl", hash = "sha256:13ceac2c5cdcc3f64b9015710221ddf81c900c5febc505dbd8f810e770011540", size = 1685270, upload-time = "2025-04-02T02:16:25.874Z" }, + { url = "https://files.pythonhosted.org/packages/2b/b1/c6686948d4c79c3745595efc469a9f8a43cab3c7efc0b5991be65d9e8cb8/aiohttp-3.11.16-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:fadbb8f1d4140825069db3fedbbb843290fd5f5bc0a5dbd7eaf81d91bf1b003b", size = 1650852, upload-time = "2025-04-02T02:16:27.556Z" }, + { url = "https://files.pythonhosted.org/packages/fe/94/3e42a6916fd3441721941e0f1b8438e1ce2a4c49af0e28e0d3c950c9b3c9/aiohttp-3.11.16-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:6a792ce34b999fbe04a7a71a90c74f10c57ae4c51f65461a411faa70e154154e", size = 1704481, upload-time = "2025-04-02T02:16:29.573Z" }, + { url = "https://files.pythonhosted.org/packages/b1/6d/6ab5854ff59b27075c7a8c610597d2b6c38945f9a1284ee8758bc3720ff6/aiohttp-3.11.16-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:f4065145bf69de124accdd17ea5f4dc770da0a6a6e440c53f6e0a8c27b3e635c", size = 1735370, upload-time = "2025-04-02T02:16:31.191Z" }, + { url = "https://files.pythonhosted.org/packages/73/2a/08a68eec3c99a6659067d271d7553e4d490a0828d588e1daa3970dc2b771/aiohttp-3.11.16-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:fa73e8c2656a3653ae6c307b3f4e878a21f87859a9afab228280ddccd7369d71", size = 1697619, upload-time = "2025-04-02T02:16:32.873Z" }, + { url = "https://files.pythonhosted.org/packages/61/d5/fea8dbbfb0cd68fbb56f0ae913270a79422d9a41da442a624febf72d2aaf/aiohttp-3.11.16-cp312-cp312-win32.whl", hash = "sha256:f244b8e541f414664889e2c87cac11a07b918cb4b540c36f7ada7bfa76571ea2", size = 411710, upload-time = "2025-04-02T02:16:34.525Z" }, + { url = "https://files.pythonhosted.org/packages/33/fb/41cde15fbe51365024550bf77b95a4fc84ef41365705c946da0421f0e1e0/aiohttp-3.11.16-cp312-cp312-win_amd64.whl", hash = "sha256:23a15727fbfccab973343b6d1b7181bfb0b4aa7ae280f36fd2f90f5476805682", size = 438012, upload-time = "2025-04-02T02:16:36.103Z" }, +] + +[[package]] +name = "aiohttp-retry" +version = "2.9.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "aiohttp" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/9d/61/ebda4d8e3d8cfa1fd3db0fb428db2dd7461d5742cea35178277ad180b033/aiohttp_retry-2.9.1.tar.gz", hash = "sha256:8eb75e904ed4ee5c2ec242fefe85bf04240f685391c4879d8f541d6028ff01f1", size = 13608, upload-time = "2024-11-06T10:44:54.574Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/1a/99/84ba7273339d0f3dfa57901b846489d2e5c2cd731470167757f1935fffbd/aiohttp_retry-2.9.1-py3-none-any.whl", hash = "sha256:66d2759d1921838256a05a3f80ad7e724936f083e35be5abb5e16eed6be6dc54", size = 9981, upload-time = "2024-11-06T10:44:52.917Z" }, +] + +[[package]] +name = "aiosignal" +version = "1.4.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "frozenlist" }, + { name = "typing-extensions" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/61/62/06741b579156360248d1ec624842ad0edf697050bbaf7c3e46394e106ad1/aiosignal-1.4.0.tar.gz", hash = "sha256:f47eecd9468083c2029cc99945502cb7708b082c232f9aca65da147157b251c7", size = 25007, upload-time = "2025-07-03T22:54:43.528Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/fb/76/641ae371508676492379f16e2fa48f4e2c11741bd63c48be4b12a6b09cba/aiosignal-1.4.0-py3-none-any.whl", hash = "sha256:053243f8b92b990551949e63930a839ff0cf0b0ebbe0597b0f3fb19e1a0fe82e", size = 7490, upload-time = "2025-07-03T22:54:42.156Z" }, +] + +[[package]] +name = "aiosqlite" +version = "0.21.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "typing-extensions" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/13/7d/8bca2bf9a247c2c5dfeec1d7a5f40db6518f88d314b8bca9da29670d2671/aiosqlite-0.21.0.tar.gz", hash = "sha256:131bb8056daa3bc875608c631c678cda73922a2d4ba8aec373b19f18c17e7aa3", size = 13454, upload-time = "2025-02-03T07:30:16.235Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/f5/10/6c25ed6de94c49f88a91fa5018cb4c0f3625f31d5be9f771ebe5cc7cd506/aiosqlite-0.21.0-py3-none-any.whl", hash = "sha256:2549cf4057f95f53dcba16f2b64e8e2791d7e1adedb13197dd8ed77bb226d7d0", size = 15792, upload-time = "2025-02-03T07:30:13.6Z" }, +] + +[[package]] +name = "alembic" +version = "1.16.5" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "mako" }, + { name = "sqlalchemy" }, + { name = "typing-extensions" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/9a/ca/4dc52902cf3491892d464f5265a81e9dff094692c8a049a3ed6a05fe7ee8/alembic-1.16.5.tar.gz", hash = "sha256:a88bb7f6e513bd4301ecf4c7f2206fe93f9913f9b48dac3b78babde2d6fe765e", size = 1969868, upload-time = "2025-08-27T18:02:05.668Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/39/4a/4c61d4c84cfd9befb6fa08a702535b27b21fff08c946bc2f6139decbf7f7/alembic-1.16.5-py3-none-any.whl", hash = "sha256:e845dfe090c5ffa7b92593ae6687c5cb1a101e91fa53868497dbd79847f9dbe3", size = 247355, upload-time = "2025-08-27T18:02:07.37Z" }, +] + +[[package]] +name = "allora-sdk" +version = "0.2.3" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "aiohttp" }, + { name = "annotated-types" }, + { name = "cachetools" }, + { name = "certifi" }, + { name = "chardet" }, + { name = "charset-normalizer" }, + { name = "colorama" }, + { name = "distlib" }, + { name = "filelock" }, + { name = "idna" }, + { name = "packaging" }, + { name = "platformdirs" }, + { name = "pluggy" }, + { name = "pydantic" }, + { name = "pydantic-core" }, + { name = "pyproject-api" }, + { name = "requests" }, + { name = "tox" }, + { name = "typing-extensions" }, + { name = "urllib3" }, + { name = "virtualenv" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/96/90/46df3515a79bcad5733633f38a7a8d6c7826c48c3e48fa89e2d081bc70f9/allora_sdk-0.2.3.tar.gz", hash = "sha256:d976c17816566114f45327cc843d9a996d807e48d1697c3cbd4355095c5a04c6", size = 6273, upload-time = "2025-04-14T11:35:53.406Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/f2/65/73e1ecfcc802b6178377c2156190dfc84639d9784299119e1b4ca4ca1c68/allora_sdk-0.2.3-py3-none-any.whl", hash = "sha256:ca71c39f7f6410dbb9bc7ab6569eb91a8515490d0c6cc54f9512b7af35ab214e", size = 5008, upload-time = "2025-04-14T11:35:52.01Z" }, +] + +[[package]] +name = "annotated-types" +version = "0.7.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/ee/67/531ea369ba64dcff5ec9c3402f9f51bf748cec26dde048a2f973a4eea7f5/annotated_types-0.7.0.tar.gz", hash = "sha256:aff07c09a53a08bc8cfccb9c85b05f1aa9a2a6f23728d790723543408344ce89", size = 16081, upload-time = "2024-05-20T21:33:25.928Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/78/b6/6307fbef88d9b5ee7421e68d78a9f162e0da4900bc5f5793f6d3d0e34fb8/annotated_types-0.7.0-py3-none-any.whl", hash = "sha256:1f02e8b43a8fbbc3f3e0d4f0f4bfc8131bcb4eebe8849b8e5c773f3a1c582a53", size = 13643, upload-time = "2024-05-20T21:33:24.1Z" }, +] + +[[package]] +name = "anthropic" +version = "0.64.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "anyio" }, + { name = "distro" }, + { name = "httpx" }, + { name = "jiter" }, + { name = "pydantic" }, + { name = "sniffio" }, + { name = "typing-extensions" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/d8/4f/f2b880cba1a76f3acc7d5eb2ae217632eac1b8cef5ed3027493545c59eba/anthropic-0.64.0.tar.gz", hash = "sha256:3d496c91a63dff64f451b3e8e4b238a9640bf87b0c11d0b74ddc372ba5a3fe58", size = 427893, upload-time = "2025-08-13T17:09:49.915Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/a9/b2/2d268bcd5d6441df9dc0ebebc67107657edb8b0150d3fda1a5b81d1bec45/anthropic-0.64.0-py3-none-any.whl", hash = "sha256:6f5f7d913a6a95eb7f8e1bda4e75f76670e8acd8d4cd965e02e2a256b0429dd1", size = 297244, upload-time = "2025-08-13T17:09:47.908Z" }, +] + +[[package]] +name = "anyio" +version = "4.10.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "idna" }, + { name = "sniffio" }, + { name = "typing-extensions" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/f1/b4/636b3b65173d3ce9a38ef5f0522789614e590dab6a8d505340a4efe4c567/anyio-4.10.0.tar.gz", hash = "sha256:3f3fae35c96039744587aa5b8371e7e8e603c0702999535961dd336026973ba6", size = 213252, upload-time = "2025-08-04T08:54:26.451Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/6f/12/e5e0282d673bb9746bacfb6e2dba8719989d3660cdb2ea79aee9a9651afb/anyio-4.10.0-py3-none-any.whl", hash = "sha256:60e474ac86736bbfd6f210f7a61218939c318f43f9972497381f1c5e930ed3d1", size = 107213, upload-time = "2025-08-04T08:54:24.882Z" }, +] + +[[package]] +name = "apscheduler" +version = "3.11.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "tzlocal" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/4e/00/6d6814ddc19be2df62c8c898c4df6b5b1914f3bd024b780028caa392d186/apscheduler-3.11.0.tar.gz", hash = "sha256:4c622d250b0955a65d5d0eb91c33e6d43fd879834bf541e0a18661ae60460133", size = 107347, upload-time = "2024-11-24T19:39:26.463Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/d0/ae/9a053dd9229c0fde6b1f1f33f609ccff1ee79ddda364c756a924c6d8563b/APScheduler-3.11.0-py3-none-any.whl", hash = "sha256:fc134ca32e50f5eadcc4938e3a4545ab19131435e851abb40b34d63d5141c6da", size = 64004, upload-time = "2024-11-24T19:39:24.442Z" }, +] + +[[package]] +name = "asn1crypto" +version = "1.5.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/de/cf/d547feed25b5244fcb9392e288ff9fdc3280b10260362fc45d37a798a6ee/asn1crypto-1.5.1.tar.gz", hash = "sha256:13ae38502be632115abf8a24cbe5f4da52e3b5231990aff31123c805306ccb9c", size = 121080, upload-time = "2022-03-15T14:46:52.889Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/c9/7f/09065fd9e27da0eda08b4d6897f1c13535066174cc023af248fc2a8d5e5a/asn1crypto-1.5.1-py2.py3-none-any.whl", hash = "sha256:db4e40728b728508912cbb3d44f19ce188f218e9eba635821bb4b68564f8fd67", size = 105045, upload-time = "2022-03-15T14:46:51.055Z" }, +] + +[[package]] +name = "async-lru" +version = "2.0.5" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/b2/4d/71ec4d3939dc755264f680f6c2b4906423a304c3d18e96853f0a595dfe97/async_lru-2.0.5.tar.gz", hash = "sha256:481d52ccdd27275f42c43a928b4a50c3bfb2d67af4e78b170e3e0bb39c66e5bb", size = 10380, upload-time = "2025-03-16T17:25:36.919Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/03/49/d10027df9fce941cb8184e78a02857af36360d33e1721df81c5ed2179a1a/async_lru-2.0.5-py3-none-any.whl", hash = "sha256:ab95404d8d2605310d345932697371a5f40def0487c03d6d0ad9138de52c9943", size = 6069, upload-time = "2025-03-16T17:25:35.422Z" }, +] + +[[package]] +name = "asyncpg" +version = "0.30.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/2f/4c/7c991e080e106d854809030d8584e15b2e996e26f16aee6d757e387bc17d/asyncpg-0.30.0.tar.gz", hash = "sha256:c551e9928ab6707602f44811817f82ba3c446e018bfe1d3abecc8ba5f3eac851", size = 957746, upload-time = "2024-10-20T00:30:41.127Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/4b/64/9d3e887bb7b01535fdbc45fbd5f0a8447539833b97ee69ecdbb7a79d0cb4/asyncpg-0.30.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:c902a60b52e506d38d7e80e0dd5399f657220f24635fee368117b8b5fce1142e", size = 673162, upload-time = "2024-10-20T00:29:41.88Z" }, + { url = "https://files.pythonhosted.org/packages/6e/eb/8b236663f06984f212a087b3e849731f917ab80f84450e943900e8ca4052/asyncpg-0.30.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:aca1548e43bbb9f0f627a04666fedaca23db0a31a84136ad1f868cb15deb6e3a", size = 637025, upload-time = "2024-10-20T00:29:43.352Z" }, + { url = "https://files.pythonhosted.org/packages/cc/57/2dc240bb263d58786cfaa60920779af6e8d32da63ab9ffc09f8312bd7a14/asyncpg-0.30.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6c2a2ef565400234a633da0eafdce27e843836256d40705d83ab7ec42074efb3", size = 3496243, upload-time = "2024-10-20T00:29:44.922Z" }, + { url = "https://files.pythonhosted.org/packages/f4/40/0ae9d061d278b10713ea9021ef6b703ec44698fe32178715a501ac696c6b/asyncpg-0.30.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1292b84ee06ac8a2ad8e51c7475aa309245874b61333d97411aab835c4a2f737", size = 3575059, upload-time = "2024-10-20T00:29:46.891Z" }, + { url = "https://files.pythonhosted.org/packages/c3/75/d6b895a35a2c6506952247640178e5f768eeb28b2e20299b6a6f1d743ba0/asyncpg-0.30.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:0f5712350388d0cd0615caec629ad53c81e506b1abaaf8d14c93f54b35e3595a", size = 3473596, upload-time = "2024-10-20T00:29:49.201Z" }, + { url = "https://files.pythonhosted.org/packages/c8/e7/3693392d3e168ab0aebb2d361431375bd22ffc7b4a586a0fc060d519fae7/asyncpg-0.30.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:db9891e2d76e6f425746c5d2da01921e9a16b5a71a1c905b13f30e12a257c4af", size = 3641632, upload-time = "2024-10-20T00:29:50.768Z" }, + { url = "https://files.pythonhosted.org/packages/32/ea/15670cea95745bba3f0352341db55f506a820b21c619ee66b7d12ea7867d/asyncpg-0.30.0-cp312-cp312-win32.whl", hash = "sha256:68d71a1be3d83d0570049cd1654a9bdfe506e794ecc98ad0873304a9f35e411e", size = 560186, upload-time = "2024-10-20T00:29:52.394Z" }, + { url = "https://files.pythonhosted.org/packages/7e/6b/fe1fad5cee79ca5f5c27aed7bd95baee529c1bf8a387435c8ba4fe53d5c1/asyncpg-0.30.0-cp312-cp312-win_amd64.whl", hash = "sha256:9a0292c6af5c500523949155ec17b7fe01a00ace33b68a476d6b5059f9630305", size = 621064, upload-time = "2024-10-20T00:29:53.757Z" }, +] + +[[package]] +name = "attrs" +version = "25.3.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/5a/b0/1367933a8532ee6ff8d63537de4f1177af4bff9f3e829baf7331f595bb24/attrs-25.3.0.tar.gz", hash = "sha256:75d7cefc7fb576747b2c81b4442d4d4a1ce0900973527c011d1030fd3bf4af1b", size = 812032, upload-time = "2025-03-13T11:10:22.779Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/77/06/bb80f5f86020c4551da315d78b3ab75e8228f89f0162f2c3a819e407941a/attrs-25.3.0-py3-none-any.whl", hash = "sha256:427318ce031701fea540783410126f03899a97ffc6f61596ad581ac2e40e3bc3", size = 63815, upload-time = "2025-03-13T11:10:21.14Z" }, +] + +[[package]] +name = "aws-secretsmanager-caching" +version = "1.1.3" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "botocore" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/9f/d3/e6bb9f29da0081b9d00490a43612a47dbc4996e195d7e57e013124166b73/aws_secretsmanager_caching-1.1.3.tar.gz", hash = "sha256:f6d6ec9d43e0dbe4f6d5debdf36b4cb691d15a967b358b2575f5d91974a6c0ff", size = 27267, upload-time = "2024-06-20T21:14:50.283Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/76/2d/f6ffed80e0299b14c5b945d208ba892f2a13270dff7d88f439890b4fd315/aws_secretsmanager_caching-1.1.3-py3-none-any.whl", hash = "sha256:5dd8588520335ca5cc7f5ae5948e5e85f2f5b58c1341bda0db4acf6399806f78", size = 18427, upload-time = "2024-06-20T21:14:41.638Z" }, +] + +[[package]] +name = "base58" +version = "2.1.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/7f/45/8ae61209bb9015f516102fa559a2914178da1d5868428bd86a1b4421141d/base58-2.1.1.tar.gz", hash = "sha256:c5d0cb3f5b6e81e8e35da5754388ddcc6d0d14b6c6a132cb93d69ed580a7278c", size = 6528, upload-time = "2021-10-30T22:12:17.858Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/4a/45/ec96b29162a402fc4c1c5512d114d7b3787b9d1c2ec241d9568b4816ee23/base58-2.1.1-py3-none-any.whl", hash = "sha256:11a36f4d3ce51dfc1043f3218591ac4eb1ceb172919cebe05b52a5bcc8d245c2", size = 5621, upload-time = "2021-10-30T22:12:16.658Z" }, +] + +[[package]] +name = "bcl" +version = "2.3.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "cffi" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/8c/78/57a3b26ac13312ed5901f1089f0351dfd958d19e96242d557e25c1498a95/bcl-2.3.1.tar.gz", hash = "sha256:2a10f1e4fde1c146594fe835f29c9c9753a9f1c449617578c1473d6371da9853", size = 16823, upload-time = "2022-10-04T01:56:50.961Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/82/93/f712cab57d0424ff65b380e22cb286b35b8bc0ba7997926dc18c8600f451/bcl-2.3.1-cp310-abi3-macosx_10_10_universal2.whl", hash = "sha256:cf59d66d4dd653b43b197ad5fc140a131db7f842c192d9836f5a6fe2bee9019e", size = 525696, upload-time = "2022-10-04T01:56:15.925Z" }, + { url = "https://files.pythonhosted.org/packages/1a/a7/984bdb769c5ad2549fafc9365b0f6156fbeeec7df524eb064e65b164f8d0/bcl-2.3.1-cp310-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a7696201b8111e877d21c1afd5a376f27975688658fa9001278f15e9fa3da2e0", size = 740158, upload-time = "2022-10-04T01:56:18.596Z" }, + { url = "https://files.pythonhosted.org/packages/36/e3/c860ae7aa62ddacf0ff4e1d2c9741f0d2ab65fec00e3890e8ac0f5463629/bcl-2.3.1-cp310-abi3-win32.whl", hash = "sha256:28f55e08e929309eacf09118b29ffb4d110ce3702eef18e98b8b413d0dfb1bf9", size = 88671, upload-time = "2022-10-04T01:56:20.644Z" }, + { url = "https://files.pythonhosted.org/packages/30/2e/a78ec72cfc2d6f438bd2978e81e05e708953434db8614a9f4f20bb7fa606/bcl-2.3.1-cp310-abi3-win_amd64.whl", hash = "sha256:f65e9f347b76964d91294964559da05cdcefb1f0bdfe90b6173892de3598a810", size = 96393, upload-time = "2022-10-04T01:56:22.475Z" }, + { url = "https://files.pythonhosted.org/packages/25/f0/63337a824e34d0a3f48f2739d902c9c7d30524d4fc23ad73a3dcdad82e05/bcl-2.3.1-cp37-abi3-macosx_10_9_x86_64.whl", hash = "sha256:edb8277faee90121a248d26b308f4f007da1faedfd98d246841fb0f108e47db2", size = 315551, upload-time = "2022-10-04T01:56:24.025Z" }, + { url = "https://files.pythonhosted.org/packages/00/1a/20ea61d352d5804df96baf8ca70401b17db8d748a81d4225f223f2580022/bcl-2.3.1-cp37-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:99aff16e0da7a3b678c6cba9be24760eda75c068cba2b85604cf41818e2ba732", size = 740123, upload-time = "2022-10-04T01:56:26.995Z" }, + { url = "https://files.pythonhosted.org/packages/5f/a8/2714e3f7d5643f487b0ecd49b21fa8db2d9572901baa49a6e0457a3b0c19/bcl-2.3.1-cp37-abi3-win32.whl", hash = "sha256:17d2e7dbe852c4447a7a2ff179dc466a3b8809ad1f151c4625ef7feff167fcaf", size = 88674, upload-time = "2022-10-04T01:56:28.518Z" }, + { url = "https://files.pythonhosted.org/packages/26/69/6fab32cd6888887ed9113b806854ac696a76cf77febdacc6c5d4271cba8e/bcl-2.3.1-cp37-abi3-win_amd64.whl", hash = "sha256:fb778e77653735ac0bd2376636cba27ad972e0888227d4b40f49ea7ca5bceefa", size = 96395, upload-time = "2022-10-04T01:56:29.948Z" }, + { url = "https://files.pythonhosted.org/packages/ab/7a/06d9297f9805da15775615bb9229b38eb28f1e113cdd05d0e7bbcc3429e4/bcl-2.3.1-cp38-abi3-macosx_10_9_x86_64.whl", hash = "sha256:f6d551e139fa1544f7c822be57b0a8da2dff791c7ffa152bf371e3a8712b8b62", size = 315576, upload-time = "2022-10-04T01:56:32.63Z" }, + { url = "https://files.pythonhosted.org/packages/7b/15/c244b97a2ffb839fc763cbd2ce65b9290c166e279aa9fc05f046e8feb372/bcl-2.3.1-cp38-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:447835deb112f75f89cca34e34957a36e355a102a37a7b41e83e5502b11fc10a", size = 740435, upload-time = "2022-10-04T01:56:35.392Z" }, + { url = "https://files.pythonhosted.org/packages/6f/ff/25eaaf928078fc266d5f4cd485206acaec43c6a9311cf809114833bc24c4/bcl-2.3.1-cp38-abi3-win32.whl", hash = "sha256:1d8e0a25921ee705840219ed3c78e1d2e9d0d73cb2007c2708af57489bd6ce57", size = 88675, upload-time = "2022-10-04T01:56:36.943Z" }, + { url = "https://files.pythonhosted.org/packages/85/e3/a0e02b0da403503015c2196e812c8d3781ffcd94426ce5baf7f4bbfa8533/bcl-2.3.1-cp38-abi3-win_amd64.whl", hash = "sha256:a7312d21f5e8960b121fadbd950659bc58745282c1c2415e13150590d2bb271e", size = 96399, upload-time = "2022-10-04T01:56:38.555Z" }, + { url = "https://files.pythonhosted.org/packages/08/ad/a46220911bd7795f9aec10b195e1828b2e48c2015ef7e088447cba5e9089/bcl-2.3.1-cp39-abi3-macosx_10_10_universal2.whl", hash = "sha256:bb695832cb555bb0e3dee985871e6cfc2d5314fb69bbf62297f81ba645e99257", size = 525703, upload-time = "2022-10-04T01:56:40.722Z" }, + { url = "https://files.pythonhosted.org/packages/d8/3a/e8395071a89a7199363990968d438b77c55d55cce556327c98d5ce7975d1/bcl-2.3.1-cp39-abi3-macosx_10_9_x86_64.whl", hash = "sha256:0922349eb5ffd19418f46c40469d132c6e0aea0e47fec48a69bec5191ee56bec", size = 315583, upload-time = "2022-10-04T01:56:42.88Z" }, + { url = "https://files.pythonhosted.org/packages/b5/f9/2be5d88275d3d7e79cdbc8d52659b02b752d44f2bf90addb987d1fb96752/bcl-2.3.1-cp39-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:97117d57cf90679dd1b28f1039fa2090f5561d3c1ee4fe4e78d1b0680cc39b8d", size = 740137, upload-time = "2022-10-04T01:56:46.148Z" }, + { url = "https://files.pythonhosted.org/packages/7f/94/a3613caee8ca933902831343cc1040bcf3bb736cc9f38b2b4a7766292585/bcl-2.3.1-cp39-abi3-win32.whl", hash = "sha256:a5823f1b655a37259a06aa348bbc2e7a38d39d0e1683ea0596b888b7ef56d378", size = 88675, upload-time = "2022-10-04T01:56:47.459Z" }, + { url = "https://files.pythonhosted.org/packages/9e/45/302d6712a8ff733a259446a7d24ff3c868715103032f50eef0d93ba70221/bcl-2.3.1-cp39-abi3-win_amd64.whl", hash = "sha256:52cf26c4ecd76e806c6576c4848633ff44ebfff528fca63ad0e52085b6ba5aa9", size = 96394, upload-time = "2022-10-04T01:56:48.909Z" }, +] + +[[package]] +name = "bcrypt" +version = "4.3.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/bb/5d/6d7433e0f3cd46ce0b43cd65e1db465ea024dbb8216fb2404e919c2ad77b/bcrypt-4.3.0.tar.gz", hash = "sha256:3a3fd2204178b6d2adcf09cb4f6426ffef54762577a7c9b54c159008cb288c18", size = 25697, upload-time = "2025-02-28T01:24:09.174Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/11/22/5ada0b9af72b60cbc4c9a399fdde4af0feaa609d27eb0adc61607997a3fa/bcrypt-4.3.0-cp38-abi3-macosx_10_12_universal2.whl", hash = "sha256:f81b0ed2639568bf14749112298f9e4e2b28853dab50a8b357e31798686a036d", size = 498019, upload-time = "2025-02-28T01:23:05.838Z" }, + { url = "https://files.pythonhosted.org/packages/b8/8c/252a1edc598dc1ce57905be173328eda073083826955ee3c97c7ff5ba584/bcrypt-4.3.0-cp38-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:864f8f19adbe13b7de11ba15d85d4a428c7e2f344bac110f667676a0ff84924b", size = 279174, upload-time = "2025-02-28T01:23:07.274Z" }, + { url = "https://files.pythonhosted.org/packages/29/5b/4547d5c49b85f0337c13929f2ccbe08b7283069eea3550a457914fc078aa/bcrypt-4.3.0-cp38-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3e36506d001e93bffe59754397572f21bb5dc7c83f54454c990c74a468cd589e", size = 283870, upload-time = "2025-02-28T01:23:09.151Z" }, + { url = "https://files.pythonhosted.org/packages/be/21/7dbaf3fa1745cb63f776bb046e481fbababd7d344c5324eab47f5ca92dd2/bcrypt-4.3.0-cp38-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:842d08d75d9fe9fb94b18b071090220697f9f184d4547179b60734846461ed59", size = 279601, upload-time = "2025-02-28T01:23:11.461Z" }, + { url = "https://files.pythonhosted.org/packages/6d/64/e042fc8262e971347d9230d9abbe70d68b0a549acd8611c83cebd3eaec67/bcrypt-4.3.0-cp38-abi3-manylinux_2_28_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:7c03296b85cb87db865d91da79bf63d5609284fc0cab9472fdd8367bbd830753", size = 297660, upload-time = "2025-02-28T01:23:12.989Z" }, + { url = "https://files.pythonhosted.org/packages/50/b8/6294eb84a3fef3b67c69b4470fcdd5326676806bf2519cda79331ab3c3a9/bcrypt-4.3.0-cp38-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:62f26585e8b219cdc909b6a0069efc5e4267e25d4a3770a364ac58024f62a761", size = 284083, upload-time = "2025-02-28T01:23:14.5Z" }, + { url = "https://files.pythonhosted.org/packages/62/e6/baff635a4f2c42e8788fe1b1633911c38551ecca9a749d1052d296329da6/bcrypt-4.3.0-cp38-abi3-manylinux_2_34_aarch64.whl", hash = "sha256:beeefe437218a65322fbd0069eb437e7c98137e08f22c4660ac2dc795c31f8bb", size = 279237, upload-time = "2025-02-28T01:23:16.686Z" }, + { url = "https://files.pythonhosted.org/packages/39/48/46f623f1b0c7dc2e5de0b8af5e6f5ac4cc26408ac33f3d424e5ad8da4a90/bcrypt-4.3.0-cp38-abi3-manylinux_2_34_x86_64.whl", hash = "sha256:97eea7408db3a5bcce4a55d13245ab3fa566e23b4c67cd227062bb49e26c585d", size = 283737, upload-time = "2025-02-28T01:23:18.897Z" }, + { url = "https://files.pythonhosted.org/packages/49/8b/70671c3ce9c0fca4a6cc3cc6ccbaa7e948875a2e62cbd146e04a4011899c/bcrypt-4.3.0-cp38-abi3-musllinux_1_1_aarch64.whl", hash = "sha256:191354ebfe305e84f344c5964c7cd5f924a3bfc5d405c75ad07f232b6dffb49f", size = 312741, upload-time = "2025-02-28T01:23:21.041Z" }, + { url = "https://files.pythonhosted.org/packages/27/fb/910d3a1caa2d249b6040a5caf9f9866c52114d51523ac2fb47578a27faee/bcrypt-4.3.0-cp38-abi3-musllinux_1_1_x86_64.whl", hash = "sha256:41261d64150858eeb5ff43c753c4b216991e0ae16614a308a15d909503617732", size = 316472, upload-time = "2025-02-28T01:23:23.183Z" }, + { url = "https://files.pythonhosted.org/packages/dc/cf/7cf3a05b66ce466cfb575dbbda39718d45a609daa78500f57fa9f36fa3c0/bcrypt-4.3.0-cp38-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:33752b1ba962ee793fa2b6321404bf20011fe45b9afd2a842139de3011898fef", size = 343606, upload-time = "2025-02-28T01:23:25.361Z" }, + { url = "https://files.pythonhosted.org/packages/e3/b8/e970ecc6d7e355c0d892b7f733480f4aa8509f99b33e71550242cf0b7e63/bcrypt-4.3.0-cp38-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:50e6e80a4bfd23a25f5c05b90167c19030cf9f87930f7cb2eacb99f45d1c3304", size = 362867, upload-time = "2025-02-28T01:23:26.875Z" }, + { url = "https://files.pythonhosted.org/packages/a9/97/8d3118efd8354c555a3422d544163f40d9f236be5b96c714086463f11699/bcrypt-4.3.0-cp38-abi3-win32.whl", hash = "sha256:67a561c4d9fb9465ec866177e7aebcad08fe23aaf6fbd692a6fab69088abfc51", size = 160589, upload-time = "2025-02-28T01:23:28.381Z" }, + { url = "https://files.pythonhosted.org/packages/29/07/416f0b99f7f3997c69815365babbc2e8754181a4b1899d921b3c7d5b6f12/bcrypt-4.3.0-cp38-abi3-win_amd64.whl", hash = "sha256:584027857bc2843772114717a7490a37f68da563b3620f78a849bcb54dc11e62", size = 152794, upload-time = "2025-02-28T01:23:30.187Z" }, + { url = "https://files.pythonhosted.org/packages/6e/c1/3fa0e9e4e0bfd3fd77eb8b52ec198fd6e1fd7e9402052e43f23483f956dd/bcrypt-4.3.0-cp39-abi3-macosx_10_12_universal2.whl", hash = "sha256:0d3efb1157edebfd9128e4e46e2ac1a64e0c1fe46fb023158a407c7892b0f8c3", size = 498969, upload-time = "2025-02-28T01:23:31.945Z" }, + { url = "https://files.pythonhosted.org/packages/ce/d4/755ce19b6743394787fbd7dff6bf271b27ee9b5912a97242e3caf125885b/bcrypt-4.3.0-cp39-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:08bacc884fd302b611226c01014eca277d48f0a05187666bca23aac0dad6fe24", size = 279158, upload-time = "2025-02-28T01:23:34.161Z" }, + { url = "https://files.pythonhosted.org/packages/9b/5d/805ef1a749c965c46b28285dfb5cd272a7ed9fa971f970435a5133250182/bcrypt-4.3.0-cp39-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f6746e6fec103fcd509b96bacdfdaa2fbde9a553245dbada284435173a6f1aef", size = 284285, upload-time = "2025-02-28T01:23:35.765Z" }, + { url = "https://files.pythonhosted.org/packages/ab/2b/698580547a4a4988e415721b71eb45e80c879f0fb04a62da131f45987b96/bcrypt-4.3.0-cp39-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:afe327968aaf13fc143a56a3360cb27d4ad0345e34da12c7290f1b00b8fe9a8b", size = 279583, upload-time = "2025-02-28T01:23:38.021Z" }, + { url = "https://files.pythonhosted.org/packages/f2/87/62e1e426418204db520f955ffd06f1efd389feca893dad7095bf35612eec/bcrypt-4.3.0-cp39-abi3-manylinux_2_28_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:d9af79d322e735b1fc33404b5765108ae0ff232d4b54666d46730f8ac1a43676", size = 297896, upload-time = "2025-02-28T01:23:39.575Z" }, + { url = "https://files.pythonhosted.org/packages/cb/c6/8fedca4c2ada1b6e889c52d2943b2f968d3427e5d65f595620ec4c06fa2f/bcrypt-4.3.0-cp39-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:f1e3ffa1365e8702dc48c8b360fef8d7afeca482809c5e45e653af82ccd088c1", size = 284492, upload-time = "2025-02-28T01:23:40.901Z" }, + { url = "https://files.pythonhosted.org/packages/4d/4d/c43332dcaaddb7710a8ff5269fcccba97ed3c85987ddaa808db084267b9a/bcrypt-4.3.0-cp39-abi3-manylinux_2_34_aarch64.whl", hash = "sha256:3004df1b323d10021fda07a813fd33e0fd57bef0e9a480bb143877f6cba996fe", size = 279213, upload-time = "2025-02-28T01:23:42.653Z" }, + { url = "https://files.pythonhosted.org/packages/dc/7f/1e36379e169a7df3a14a1c160a49b7b918600a6008de43ff20d479e6f4b5/bcrypt-4.3.0-cp39-abi3-manylinux_2_34_x86_64.whl", hash = "sha256:531457e5c839d8caea9b589a1bcfe3756b0547d7814e9ce3d437f17da75c32b0", size = 284162, upload-time = "2025-02-28T01:23:43.964Z" }, + { url = "https://files.pythonhosted.org/packages/1c/0a/644b2731194b0d7646f3210dc4d80c7fee3ecb3a1f791a6e0ae6bb8684e3/bcrypt-4.3.0-cp39-abi3-musllinux_1_1_aarch64.whl", hash = "sha256:17a854d9a7a476a89dcef6c8bd119ad23e0f82557afbd2c442777a16408e614f", size = 312856, upload-time = "2025-02-28T01:23:46.011Z" }, + { url = "https://files.pythonhosted.org/packages/dc/62/2a871837c0bb6ab0c9a88bf54de0fc021a6a08832d4ea313ed92a669d437/bcrypt-4.3.0-cp39-abi3-musllinux_1_1_x86_64.whl", hash = "sha256:6fb1fd3ab08c0cbc6826a2e0447610c6f09e983a281b919ed721ad32236b8b23", size = 316726, upload-time = "2025-02-28T01:23:47.575Z" }, + { url = "https://files.pythonhosted.org/packages/0c/a1/9898ea3faac0b156d457fd73a3cb9c2855c6fd063e44b8522925cdd8ce46/bcrypt-4.3.0-cp39-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:e965a9c1e9a393b8005031ff52583cedc15b7884fce7deb8b0346388837d6cfe", size = 343664, upload-time = "2025-02-28T01:23:49.059Z" }, + { url = "https://files.pythonhosted.org/packages/40/f2/71b4ed65ce38982ecdda0ff20c3ad1b15e71949c78b2c053df53629ce940/bcrypt-4.3.0-cp39-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:79e70b8342a33b52b55d93b3a59223a844962bef479f6a0ea318ebbcadf71505", size = 363128, upload-time = "2025-02-28T01:23:50.399Z" }, + { url = "https://files.pythonhosted.org/packages/11/99/12f6a58eca6dea4be992d6c681b7ec9410a1d9f5cf368c61437e31daa879/bcrypt-4.3.0-cp39-abi3-win32.whl", hash = "sha256:b4d4e57f0a63fd0b358eb765063ff661328f69a04494427265950c71b992a39a", size = 160598, upload-time = "2025-02-28T01:23:51.775Z" }, + { url = "https://files.pythonhosted.org/packages/a9/cf/45fb5261ece3e6b9817d3d82b2f343a505fd58674a92577923bc500bd1aa/bcrypt-4.3.0-cp39-abi3-win_amd64.whl", hash = "sha256:e53e074b120f2877a35cc6c736b8eb161377caae8925c17688bd46ba56daaa5b", size = 152799, upload-time = "2025-02-28T01:23:53.139Z" }, +] + +[[package]] +name = "beautifulsoup4" +version = "4.13.5" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "soupsieve" }, + { name = "typing-extensions" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/85/2e/3e5079847e653b1f6dc647aa24549d68c6addb4c595cc0d902d1b19308ad/beautifulsoup4-4.13.5.tar.gz", hash = "sha256:5e70131382930e7c3de33450a2f54a63d5e4b19386eab43a5b34d594268f3695", size = 622954, upload-time = "2025-08-24T14:06:13.168Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/04/eb/f4151e0c7377a6e08a38108609ba5cede57986802757848688aeedd1b9e8/beautifulsoup4-4.13.5-py3-none-any.whl", hash = "sha256:642085eaa22233aceadff9c69651bc51e8bf3f874fb6d7104ece2beb24b47c4a", size = 105113, upload-time = "2025-08-24T14:06:14.884Z" }, +] + +[[package]] +name = "bip32" +version = "4.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "coincurve" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/4a/37/b69968b1b6eaea1fedb8efdb1862d86e92b6f68e182f39c764f894984db5/bip32-4.0.tar.gz", hash = "sha256:8035588f252f569bb414bc60df151ae431fc1c6789a19488a32890532ef3a2fc", size = 21662, upload-time = "2024-09-07T12:40:26.388Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/00/bd/dcf1650776a241c10a2bc6826b6e23ff63bf55373bb053b716c69c463758/bip32-4.0-py3-none-any.whl", hash = "sha256:9728b38336129c00e1f870bbb3e328c9632d51c1bddeef4011fd3115cb3aeff9", size = 12898, upload-time = "2024-09-07T12:40:25.358Z" }, +] + +[[package]] +name = "bitarray" +version = "3.7.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/99/b6/282f5f0331b3877d4e79a8aa1cf63b5113a10f035a39bef1fa1dfe9e9e09/bitarray-3.7.1.tar.gz", hash = "sha256:795b1760418ab750826420ae24f06f392c08e21dc234f0a369a69cc00444f8ec", size = 150474, upload-time = "2025-08-28T22:18:15.346Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/89/27/46b5b4dabecf84f750587cded3640658448d27c59f4dd2cbaa589085f43a/bitarray-3.7.1-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:b99a0347bc6131046c19e056a113daa34d7df99f1f45510161bc78bc8461a470", size = 147349, upload-time = "2025-08-28T22:15:32.729Z" }, + { url = "https://files.pythonhosted.org/packages/f9/1e/7f61150577127a1540136ba8a63ba17c661a17e721e03404fcd5833a4a05/bitarray-3.7.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:d7e274ac1975e55ebfb8166cce27e13dc99120c1d6ce9e490d7a716b9be9abb5", size = 143922, upload-time = "2025-08-28T22:15:33.963Z" }, + { url = "https://files.pythonhosted.org/packages/ca/b2/7c852472df8c644d05530bc0ad586fead5f23a9d176873c2c54f57e16b4e/bitarray-3.7.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3b9a2eb7d2e0e9c2f25256d2663c0a2a4798fe3110e3ddbbb1a7b71740b4de08", size = 330277, upload-time = "2025-08-28T22:15:34.997Z" }, + { url = "https://files.pythonhosted.org/packages/7b/38/681340eea0997c48ef2dbf1acb0786090518704ca32f9a2c3c669bdea08e/bitarray-3.7.1-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e15e70a3cf5bb519e2448524d689c02ff6bcd4750587a517e2bffee06065bf27", size = 349562, upload-time = "2025-08-28T22:15:36.554Z" }, + { url = "https://files.pythonhosted.org/packages/c4/f4/6fc43f896af85c5b10a74b1d8a87c05915464869594131a2d7731707a108/bitarray-3.7.1-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:c65257899bb8faf6a111297b4ff0066324a6b901318582c0453a01422c3bcd5a", size = 341249, upload-time = "2025-08-28T22:15:37.774Z" }, + { url = "https://files.pythonhosted.org/packages/89/c7/1f71164799cacd44964ead87e1fc7e2f0ddec6d0519515a82d54eb8c8a13/bitarray-3.7.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:38b0261483c59bb39ae9300ad46bf0bbf431ab604266382d986a349c96171b36", size = 332874, upload-time = "2025-08-28T22:15:38.935Z" }, + { url = "https://files.pythonhosted.org/packages/95/cd/4d7c19064fa7fe94c2818712695fa186a1d0bb9c5cb0cf34693df81d3202/bitarray-3.7.1-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d2b1ed363a4ef5622dccbf7822f01b51195062c4f382b28c9bd125d046d0324c", size = 321107, upload-time = "2025-08-28T22:15:40.071Z" }, + { url = "https://files.pythonhosted.org/packages/1e/d2/7d5ffe491c70614c0eb4a0186666efe925a02e25ed80ebd19c5fcb1c62e8/bitarray-3.7.1-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:dfde50ae55e075dcd5801e2c3ea0e749c849ed2cbbee991af0f97f1bdbadb2a6", size = 324999, upload-time = "2025-08-28T22:15:41.241Z" }, + { url = "https://files.pythonhosted.org/packages/11/d9/95fb87ec72c01169dad574baf7bc9e0d2bb73975d7ea29a83920a38646f4/bitarray-3.7.1-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:45660e2fabcdc1bab9699a468b312f47956300d41d6a2ea91c8f067572aaf38a", size = 321816, upload-time = "2025-08-28T22:15:42.417Z" }, + { url = "https://files.pythonhosted.org/packages/6b/3d/57ac96bbd125df75219c59afa297242054c09f22548aff028a8cefa8f120/bitarray-3.7.1-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:7b4a41dc183d7d16750634f65566205990f94144755a39f33da44c0350c3e1a8", size = 349342, upload-time = "2025-08-28T22:15:43.997Z" }, + { url = "https://files.pythonhosted.org/packages/a9/14/d28f7456d2c3b3f7898186498b6d7fd3eecab267c300fb333fc2a8d55965/bitarray-3.7.1-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:8b8e07374d60040b24d1a158895d9758424db13be63d4b2fe1870e37f9dec009", size = 350501, upload-time = "2025-08-28T22:15:45.377Z" }, + { url = "https://files.pythonhosted.org/packages/bb/a4/0f803dc446e602b21e61315f5fa2cdec02a65340147b08f7efadba559f38/bitarray-3.7.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:f31d8c2168bf2a52e4539232392352832c2296e07e0e14b6e06a44da574099ba", size = 331362, upload-time = "2025-08-28T22:15:46.577Z" }, + { url = "https://files.pythonhosted.org/packages/c9/03/25e4c4b91a33f1eae0a9e9b2b11f1eaed14e37499abbde154ff33888f5f5/bitarray-3.7.1-cp312-cp312-win32.whl", hash = "sha256:fe1f1f4010244cb07f6a079854a12e1627e4fb9ea99d672f2ceccaf6653ca514", size = 141474, upload-time = "2025-08-28T22:15:48.185Z" }, + { url = "https://files.pythonhosted.org/packages/25/53/98efa8ee389e4cbd91fc7c87bfebd4e11d6f8a027eb3f9be42d1addf1f51/bitarray-3.7.1-cp312-cp312-win_amd64.whl", hash = "sha256:f41a4b57cbc128a699e9d716a56c90c7fc76554e680fe2962f49cc4d8688b051", size = 148458, upload-time = "2025-08-28T22:15:49.256Z" }, +] + +[[package]] +name = "boto3" +version = "1.40.21" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "botocore" }, + { name = "jmespath" }, + { name = "s3transfer" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/d8/54/5ba3f69a892ff486f5925008da21618665cf321880f279e9605399d9cec3/boto3-1.40.21.tar.gz", hash = "sha256:876ccc0b25517b992bd27976282510773a11ebc771aa5b836a238ea426c82187", size = 111590, upload-time = "2025-08-29T19:20:57.901Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/86/76/48b982bb504ffbff8eb5522df8c144b98cdc38d574b3c55db1d82b5c0c7f/boto3-1.40.21-py3-none-any.whl", hash = "sha256:3772fb828864d3b7046c8bdf2f4860aaca4a79f25b7b060206c6a5f4944ea7f9", size = 139322, upload-time = "2025-08-29T19:20:55.888Z" }, +] + +[[package]] +name = "botocore" +version = "1.40.21" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "jmespath" }, + { name = "python-dateutil" }, + { name = "urllib3" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/50/11/d9a500a0e86b74017854e3ff12fd943f74f4358337799e0b272eaa6b4e27/botocore-1.40.21.tar.gz", hash = "sha256:f77e9c199df0252b14ea739a9ac99723940f6bde90f4c2e7802701553a62827b", size = 14321194, upload-time = "2025-08-29T19:20:46.892Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/df/6a/effb671afa31d35805d0760b45676136fd1209e263641861456b4566ae9b/botocore-1.40.21-py3-none-any.whl", hash = "sha256:574ecf9b68c1721650024a27e00e0080b6f141c281ebfce49e0d302969270ef4", size = 13993859, upload-time = "2025-08-29T19:20:41.404Z" }, +] + +[[package]] +name = "cachetools" +version = "5.5.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/c3/38/a0f315319737ecf45b4319a8cd1f3a908e29d9277b46942263292115eee7/cachetools-5.5.0.tar.gz", hash = "sha256:2cc24fb4cbe39633fb7badd9db9ca6295d766d9c2995f245725a46715d050f2a", size = 27661, upload-time = "2024-08-18T20:28:44.639Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/a4/07/14f8ad37f2d12a5ce41206c21820d8cb6561b728e51fad4530dff0552a67/cachetools-5.5.0-py3-none-any.whl", hash = "sha256:02134e8439cdc2ffb62023ce1debca2944c3f289d66bb17ead3ab3dede74b292", size = 9524, upload-time = "2024-08-18T20:28:43.404Z" }, +] + +[[package]] +name = "cdp-sdk" +version = "1.31.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "aiohttp" }, + { name = "aiohttp-retry" }, + { name = "base58" }, + { name = "cryptography" }, + { name = "nest-asyncio" }, + { name = "pydantic" }, + { name = "pyjwt" }, + { name = "python-dateutil" }, + { name = "solana" }, + { name = "solders" }, + { name = "urllib3" }, + { name = "web3" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/a6/66/c5afeb752754e4efc83e07803595d6dbdfd6c6ad83ae6576e39c106696b5/cdp_sdk-1.31.0.tar.gz", hash = "sha256:d64591d89fa35e07536f4948e08505d140e13d58e7d8c98d2e3f44a82eb629ae", size = 317695, upload-time = "2025-08-19T19:30:25.288Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/5f/89/6a5ba1c42b388e08cfc0974a59d8432e8ed904d181e91d7a64038ea5dd19/cdp_sdk-1.31.0-py3-none-any.whl", hash = "sha256:b157cbc52c84cde301903cdc4c253aa97a5d629480e558a1f26345d4c7ef62b1", size = 792658, upload-time = "2025-08-19T19:30:23.508Z" }, +] + +[[package]] +name = "certifi" +version = "2024.12.14" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/0f/bd/1d41ee578ce09523c81a15426705dd20969f5abf006d1afe8aeff0dd776a/certifi-2024.12.14.tar.gz", hash = "sha256:b650d30f370c2b724812bee08008be0c4163b163ddaec3f2546c1caf65f191db", size = 166010, upload-time = "2024-12-14T13:52:38.02Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/a5/32/8f6669fc4798494966bf446c8c4a162e0b5d893dff088afddf76414f70e1/certifi-2024.12.14-py3-none-any.whl", hash = "sha256:1275f7a45be9464efc1173084eaa30f866fe2e47d389406136d332ed4967ec56", size = 164927, upload-time = "2024-12-14T13:52:36.114Z" }, +] + +[[package]] +name = "cffi" +version = "1.17.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "pycparser" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/fc/97/c783634659c2920c3fc70419e3af40972dbaf758daa229a7d6ea6135c90d/cffi-1.17.1.tar.gz", hash = "sha256:1c39c6016c32bc48dd54561950ebd6836e1670f2ae46128f67cf49e789c52824", size = 516621, upload-time = "2024-09-04T20:45:21.852Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/5a/84/e94227139ee5fb4d600a7a4927f322e1d4aea6fdc50bd3fca8493caba23f/cffi-1.17.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:805b4371bf7197c329fcb3ead37e710d1bca9da5d583f5073b799d5c5bd1eee4", size = 183178, upload-time = "2024-09-04T20:44:12.232Z" }, + { url = "https://files.pythonhosted.org/packages/da/ee/fb72c2b48656111c4ef27f0f91da355e130a923473bf5ee75c5643d00cca/cffi-1.17.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:733e99bc2df47476e3848417c5a4540522f234dfd4ef3ab7fafdf555b082ec0c", size = 178840, upload-time = "2024-09-04T20:44:13.739Z" }, + { url = "https://files.pythonhosted.org/packages/cc/b6/db007700f67d151abadf508cbfd6a1884f57eab90b1bb985c4c8c02b0f28/cffi-1.17.1-cp312-cp312-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1257bdabf294dceb59f5e70c64a3e2f462c30c7ad68092d01bbbfb1c16b1ba36", size = 454803, upload-time = "2024-09-04T20:44:15.231Z" }, + { url = "https://files.pythonhosted.org/packages/1a/df/f8d151540d8c200eb1c6fba8cd0dfd40904f1b0682ea705c36e6c2e97ab3/cffi-1.17.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:da95af8214998d77a98cc14e3a3bd00aa191526343078b530ceb0bd710fb48a5", size = 478850, upload-time = "2024-09-04T20:44:17.188Z" }, + { url = "https://files.pythonhosted.org/packages/28/c0/b31116332a547fd2677ae5b78a2ef662dfc8023d67f41b2a83f7c2aa78b1/cffi-1.17.1-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d63afe322132c194cf832bfec0dc69a99fb9bb6bbd550f161a49e9e855cc78ff", size = 485729, upload-time = "2024-09-04T20:44:18.688Z" }, + { url = "https://files.pythonhosted.org/packages/91/2b/9a1ddfa5c7f13cab007a2c9cc295b70fbbda7cb10a286aa6810338e60ea1/cffi-1.17.1-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f79fc4fc25f1c8698ff97788206bb3c2598949bfe0fef03d299eb1b5356ada99", size = 471256, upload-time = "2024-09-04T20:44:20.248Z" }, + { url = "https://files.pythonhosted.org/packages/b2/d5/da47df7004cb17e4955df6a43d14b3b4ae77737dff8bf7f8f333196717bf/cffi-1.17.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b62ce867176a75d03a665bad002af8e6d54644fad99a3c70905c543130e39d93", size = 479424, upload-time = "2024-09-04T20:44:21.673Z" }, + { url = "https://files.pythonhosted.org/packages/0b/ac/2a28bcf513e93a219c8a4e8e125534f4f6db03e3179ba1c45e949b76212c/cffi-1.17.1-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:386c8bf53c502fff58903061338ce4f4950cbdcb23e2902d86c0f722b786bbe3", size = 484568, upload-time = "2024-09-04T20:44:23.245Z" }, + { url = "https://files.pythonhosted.org/packages/d4/38/ca8a4f639065f14ae0f1d9751e70447a261f1a30fa7547a828ae08142465/cffi-1.17.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:4ceb10419a9adf4460ea14cfd6bc43d08701f0835e979bf821052f1805850fe8", size = 488736, upload-time = "2024-09-04T20:44:24.757Z" }, + { url = "https://files.pythonhosted.org/packages/86/c5/28b2d6f799ec0bdecf44dced2ec5ed43e0eb63097b0f58c293583b406582/cffi-1.17.1-cp312-cp312-win32.whl", hash = "sha256:a08d7e755f8ed21095a310a693525137cfe756ce62d066e53f502a83dc550f65", size = 172448, upload-time = "2024-09-04T20:44:26.208Z" }, + { url = "https://files.pythonhosted.org/packages/50/b9/db34c4755a7bd1cb2d1603ac3863f22bcecbd1ba29e5ee841a4bc510b294/cffi-1.17.1-cp312-cp312-win_amd64.whl", hash = "sha256:51392eae71afec0d0c8fb1a53b204dbb3bcabcb3c9b807eedf3e1e6ccf2de903", size = 181976, upload-time = "2024-09-04T20:44:27.578Z" }, +] + +[[package]] +name = "chardet" +version = "5.2.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/f3/0d/f7b6ab21ec75897ed80c17d79b15951a719226b9fababf1e40ea74d69079/chardet-5.2.0.tar.gz", hash = "sha256:1b3b6ff479a8c414bc3fa2c0852995695c4a026dcd6d0633b2dd092ca39c1cf7", size = 2069618, upload-time = "2023-08-01T19:23:02.662Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/38/6f/f5fbc992a329ee4e0f288c1fe0e2ad9485ed064cac731ed2fe47dcc38cbf/chardet-5.2.0-py3-none-any.whl", hash = "sha256:e1cf59446890a00105fe7b7912492ea04b6e6f06d4b742b2c788469e34c82970", size = 199385, upload-time = "2023-08-01T19:23:00.661Z" }, +] + +[[package]] +name = "charset-normalizer" +version = "3.4.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/16/b0/572805e227f01586461c80e0fd25d65a2115599cc9dad142fee4b747c357/charset_normalizer-3.4.1.tar.gz", hash = "sha256:44251f18cd68a75b56585dd00dae26183e102cd5e0f9f1466e6df5da2ed64ea3", size = 123188, upload-time = "2024-12-24T18:12:35.43Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/0a/9a/dd1e1cdceb841925b7798369a09279bd1cf183cef0f9ddf15a3a6502ee45/charset_normalizer-3.4.1-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:73d94b58ec7fecbc7366247d3b0b10a21681004153238750bb67bd9012414545", size = 196105, upload-time = "2024-12-24T18:10:38.83Z" }, + { url = "https://files.pythonhosted.org/packages/d3/8c/90bfabf8c4809ecb648f39794cf2a84ff2e7d2a6cf159fe68d9a26160467/charset_normalizer-3.4.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:dad3e487649f498dd991eeb901125411559b22e8d7ab25d3aeb1af367df5efd7", size = 140404, upload-time = "2024-12-24T18:10:44.272Z" }, + { url = "https://files.pythonhosted.org/packages/ad/8f/e410d57c721945ea3b4f1a04b74f70ce8fa800d393d72899f0a40526401f/charset_normalizer-3.4.1-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:c30197aa96e8eed02200a83fba2657b4c3acd0f0aa4bdc9f6c1af8e8962e0757", size = 150423, upload-time = "2024-12-24T18:10:45.492Z" }, + { url = "https://files.pythonhosted.org/packages/f0/b8/e6825e25deb691ff98cf5c9072ee0605dc2acfca98af70c2d1b1bc75190d/charset_normalizer-3.4.1-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2369eea1ee4a7610a860d88f268eb39b95cb588acd7235e02fd5a5601773d4fa", size = 143184, upload-time = "2024-12-24T18:10:47.898Z" }, + { url = "https://files.pythonhosted.org/packages/3e/a2/513f6cbe752421f16d969e32f3583762bfd583848b763913ddab8d9bfd4f/charset_normalizer-3.4.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bc2722592d8998c870fa4e290c2eec2c1569b87fe58618e67d38b4665dfa680d", size = 145268, upload-time = "2024-12-24T18:10:50.589Z" }, + { url = "https://files.pythonhosted.org/packages/74/94/8a5277664f27c3c438546f3eb53b33f5b19568eb7424736bdc440a88a31f/charset_normalizer-3.4.1-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ffc9202a29ab3920fa812879e95a9e78b2465fd10be7fcbd042899695d75e616", size = 147601, upload-time = "2024-12-24T18:10:52.541Z" }, + { url = "https://files.pythonhosted.org/packages/7c/5f/6d352c51ee763623a98e31194823518e09bfa48be2a7e8383cf691bbb3d0/charset_normalizer-3.4.1-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:804a4d582ba6e5b747c625bf1255e6b1507465494a40a2130978bda7b932c90b", size = 141098, upload-time = "2024-12-24T18:10:53.789Z" }, + { url = "https://files.pythonhosted.org/packages/78/d4/f5704cb629ba5ab16d1d3d741396aec6dc3ca2b67757c45b0599bb010478/charset_normalizer-3.4.1-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:0f55e69f030f7163dffe9fd0752b32f070566451afe180f99dbeeb81f511ad8d", size = 149520, upload-time = "2024-12-24T18:10:55.048Z" }, + { url = "https://files.pythonhosted.org/packages/c5/96/64120b1d02b81785f222b976c0fb79a35875457fa9bb40827678e54d1bc8/charset_normalizer-3.4.1-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:c4c3e6da02df6fa1410a7680bd3f63d4f710232d3139089536310d027950696a", size = 152852, upload-time = "2024-12-24T18:10:57.647Z" }, + { url = "https://files.pythonhosted.org/packages/84/c9/98e3732278a99f47d487fd3468bc60b882920cef29d1fa6ca460a1fdf4e6/charset_normalizer-3.4.1-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:5df196eb874dae23dcfb968c83d4f8fdccb333330fe1fc278ac5ceeb101003a9", size = 150488, upload-time = "2024-12-24T18:10:59.43Z" }, + { url = "https://files.pythonhosted.org/packages/13/0e/9c8d4cb99c98c1007cc11eda969ebfe837bbbd0acdb4736d228ccaabcd22/charset_normalizer-3.4.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:e358e64305fe12299a08e08978f51fc21fac060dcfcddd95453eabe5b93ed0e1", size = 146192, upload-time = "2024-12-24T18:11:00.676Z" }, + { url = "https://files.pythonhosted.org/packages/b2/21/2b6b5b860781a0b49427309cb8670785aa543fb2178de875b87b9cc97746/charset_normalizer-3.4.1-cp312-cp312-win32.whl", hash = "sha256:9b23ca7ef998bc739bf6ffc077c2116917eabcc901f88da1b9856b210ef63f35", size = 95550, upload-time = "2024-12-24T18:11:01.952Z" }, + { url = "https://files.pythonhosted.org/packages/21/5b/1b390b03b1d16c7e382b561c5329f83cc06623916aab983e8ab9239c7d5c/charset_normalizer-3.4.1-cp312-cp312-win_amd64.whl", hash = "sha256:6ff8a4a60c227ad87030d76e99cd1698345d4491638dfa6673027c48b3cd395f", size = 102785, upload-time = "2024-12-24T18:11:03.142Z" }, + { url = "https://files.pythonhosted.org/packages/0e/f6/65ecc6878a89bb1c23a086ea335ad4bf21a588990c3f535a227b9eea9108/charset_normalizer-3.4.1-py3-none-any.whl", hash = "sha256:d98b1668f06378c6dbefec3b92299716b931cd4e6061f3c875a71ced1780ab85", size = 49767, upload-time = "2024-12-24T18:12:32.852Z" }, +] + +[[package]] +name = "ckzg" +version = "2.1.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/55/df/f6db8e83bd4594c1ea685cd37fb81d5399e55765aae16d1a8a9502598f4e/ckzg-2.1.1.tar.gz", hash = "sha256:d6b306b7ec93a24e4346aa53d07f7f75053bc0afc7398e35fa649e5f9d48fcc4", size = 1120500, upload-time = "2025-03-31T21:24:12.324Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/93/a1/9c07513dd0ea01e5db727e67bd2660f3b300a4511281cdb8d5e04afa1cfd/ckzg-2.1.1-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:c60e8903344ce98ce036f0fabacce952abb714cad4607198b2f0961c28b8aa72", size = 116421, upload-time = "2025-03-31T21:22:46.434Z" }, + { url = "https://files.pythonhosted.org/packages/27/04/b69a0dfbb2722a14c98a52973f276679151ec56a14178cb48e6f2e1697bc/ckzg-2.1.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:a4299149dd72448e5a8d2d1cc6cc7472c92fc9d9f00b1377f5b017c089d9cd92", size = 100216, upload-time = "2025-03-31T21:22:47.633Z" }, + { url = "https://files.pythonhosted.org/packages/2e/24/9cc850d0b8ead395ad5064de67c7c91adacaf31b6b35292ab53fbd93270b/ckzg-2.1.1-cp312-cp312-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:025dd31ffdcc799f3ff842570a2a6683b6c5b01567da0109c0c05d11768729c4", size = 175764, upload-time = "2025-03-31T21:22:48.768Z" }, + { url = "https://files.pythonhosted.org/packages/c0/c1/eb13ba399082a98b932f10b230ec08e6456051c0ce3886b3f6d8548d11ab/ckzg-2.1.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9b42ab8385c273f40a693657c09d2bba40cb4f4666141e263906ba2e519e80bd", size = 161885, upload-time = "2025-03-31T21:22:50.05Z" }, + { url = "https://files.pythonhosted.org/packages/57/c7/58baa64199781950c5a8c6139a46e1acff0f057a36e56769817400eb87fb/ckzg-2.1.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1be3890fc1543f4fcfc0063e4baf5c036eb14bcf736dabdc6171ab017e0f1671", size = 170757, upload-time = "2025-03-31T21:22:51.282Z" }, + { url = "https://files.pythonhosted.org/packages/65/bd/4b8e1c70972c98829371b7004dc750a45268c5d3442d602e1b62f13ca867/ckzg-2.1.1-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:b754210ded172968b201e2d7252573af6bf52d6ad127ddd13d0b9a45a51dae7b", size = 173761, upload-time = "2025-03-31T21:22:52.6Z" }, + { url = "https://files.pythonhosted.org/packages/1f/32/c3fd1002f97ba3e0c5b1d9ab2c8fb7a6f475fa9b80ed9c4fa55975501a54/ckzg-2.1.1-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:b2f8fda87865897a269c4e951e3826c2e814427a6cdfed6731cccfe548f12b36", size = 188666, upload-time = "2025-03-31T21:22:53.47Z" }, + { url = "https://files.pythonhosted.org/packages/e2/d9/91cf5a8169ee60c9397c975163cbca34432571f94facec5f8c0086bb47d8/ckzg-2.1.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:98e70b5923d77c7359432490145e9d1ab0bf873eb5de56ec53f4a551d7eaec79", size = 183652, upload-time = "2025-03-31T21:22:54.351Z" }, + { url = "https://files.pythonhosted.org/packages/25/d4/8c9f6b852f99926862344b29f0c59681916ccfec2ac60a85952a369e0bca/ckzg-2.1.1-cp312-cp312-win_amd64.whl", hash = "sha256:42af7bde4ca45469cd93a96c3d15d69d51d40e7f0d30e3a20711ebd639465fcb", size = 98816, upload-time = "2025-03-31T21:22:55.23Z" }, +] + +[[package]] +name = "click" +version = "8.2.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "colorama", marker = "sys_platform == 'win32'" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/60/6c/8ca2efa64cf75a977a0d7fac081354553ebe483345c734fb6b6515d96bbc/click-8.2.1.tar.gz", hash = "sha256:27c491cc05d968d271d5a1db13e3b5a184636d9d930f148c50b038f0d0646202", size = 286342, upload-time = "2025-05-20T23:19:49.832Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/85/32/10bb5764d90a8eee674e9dc6f4db6a0ab47c8c4d0d83c27f7c39ac415a4d/click-8.2.1-py3-none-any.whl", hash = "sha256:61a3265b914e850b85317d0b3109c7f8cd35a670f963866005d6ef1d5175a12b", size = 102215, upload-time = "2025-05-20T23:19:47.796Z" }, +] + +[[package]] +name = "coinbase-agentkit" +version = "0.6.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "allora-sdk" }, + { name = "cdp-sdk" }, + { name = "ecdsa" }, + { name = "jsonschema" }, + { name = "nilql" }, + { name = "paramiko" }, + { name = "pydantic" }, + { name = "pyjwt", extra = ["crypto"] }, + { name = "python-dotenv" }, + { name = "requests" }, + { name = "web3" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/5c/e1/6a21afddfffe91b53b6865bae9e6a910b4d595eb05feab25e59a382e6242/coinbase_agentkit-0.6.0.tar.gz", hash = "sha256:b0700af4a3a736254dc1f308d3673e238817067fdee91b5e5d44c9e7fda7052f", size = 102816, upload-time = "2025-05-30T19:26:08.424Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/85/35/76010c76941a8a0f47e1d49ffc0cd1ec0eac5a6746eca9bb7a59c388ccb9/coinbase_agentkit-0.6.0-py3-none-any.whl", hash = "sha256:9847a4037accd0de25743fd8b941ca4666715454db0146a809030f9b80d31535", size = 163009, upload-time = "2025-05-30T19:26:06.141Z" }, +] + +[[package]] +name = "coinbase-agentkit-langchain" +version = "0.5.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "coinbase-agentkit" }, + { name = "langchain" }, + { name = "nest-asyncio" }, + { name = "python-dotenv" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/11/07/c3d2cc1015e34aab038775f5a599251431f5d9c087026bd59ee2606ba555/coinbase_agentkit_langchain-0.5.0.tar.gz", hash = "sha256:52a76976da0cf09673b5f0aa7b5bec8bd34316b0a63e798a3e9fb9dc36b85259", size = 2954, upload-time = "2025-05-30T19:54:42.699Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/4c/62/ebdaf7dd4fe7278f8c17d8f4b6e5e4c39c91769aee8e6d8aa12263fb443d/coinbase_agentkit_langchain-0.5.0-py3-none-any.whl", hash = "sha256:356d0e839f2c13bb550473737ac98ecb6edc3a88697f7b9bacf898a7833a00ac", size = 2625, upload-time = "2025-05-30T19:54:41.423Z" }, +] + +[[package]] +name = "coincurve" +version = "20.0.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "asn1crypto" }, + { name = "cffi" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/d9/4c/9e5e51e6c12cec6444c86697992f9c6ccffa19f84d042ff939c8b89206ff/coincurve-20.0.0.tar.gz", hash = "sha256:872419e404300302e938849b6b92a196fabdad651060b559dc310e52f8392829", size = 122865, upload-time = "2024-06-02T18:15:50.787Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/8f/24/e1bf259dd57186fbdc7cec51909db320884162cfad5ec72cbaa63573ff9d/coincurve-20.0.0-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:4df4416a6c0370d777aa725a25b14b04e45aa228da1251c258ff91444643f688", size = 1255671, upload-time = "2024-06-02T18:14:57.863Z" }, + { url = "https://files.pythonhosted.org/packages/0a/c5/1817f87d1cd5ff50d8537fe60fb96f66b76dd02da885d970952e6189a801/coincurve-20.0.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:1ccc3e4db55abf3fc0e604a187fdb05f0702bc5952e503d9a75f4ae6eeb4cb3a", size = 1255565, upload-time = "2024-06-02T18:14:59.128Z" }, + { url = "https://files.pythonhosted.org/packages/90/9f/35e15f993717ed1dcc4c26d9771f073a1054af26808a0f421783bb4cd7e0/coincurve-20.0.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ac8335b1658a2ef5b3eb66d52647742fe8c6f413ad5b9d5310d7ea6d8060d40f", size = 1191953, upload-time = "2024-06-02T18:15:01.047Z" }, + { url = "https://files.pythonhosted.org/packages/4a/3d/6a9bc32e69b738b5e05f5027bace1da6722352a4a447e495d3c03a601d99/coincurve-20.0.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c7ac025e485a0229fd5394e0bf6b4a75f8a4f6cee0dcf6f0b01a2ef05c5210ff", size = 1194425, upload-time = "2024-06-02T18:15:02.919Z" }, + { url = "https://files.pythonhosted.org/packages/1a/a6/15424973dc47fc7c87e3c0f8859f6f1b1032582ee9f1b85fdd5d1e33d630/coincurve-20.0.0-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e46e3f1c21b3330857bcb1a3a5b942f645c8bce912a8a2b252216f34acfe4195", size = 1204678, upload-time = "2024-06-02T18:15:04.308Z" }, + { url = "https://files.pythonhosted.org/packages/6a/e7/71ddb4d66c11c4ad13e729362f8852e048ae452eba3dfcf57751842bb292/coincurve-20.0.0-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:df9ff9b17a1d27271bf476cf3fa92df4c151663b11a55d8cea838b8f88d83624", size = 1215395, upload-time = "2024-06-02T18:15:05.701Z" }, + { url = "https://files.pythonhosted.org/packages/b9/7d/03e0a19cfff1d86f5d019afc69cfbff02caada701ed5a4a50abc63d4261c/coincurve-20.0.0-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:4155759f071375699282e03b3d95fb473ee05c022641c077533e0d906311e57a", size = 1204552, upload-time = "2024-06-02T18:15:07.107Z" }, + { url = "https://files.pythonhosted.org/packages/07/cd/e9bd4ca7d931653a35c74194da04191a9aecc54b8f48a554cd538dc810e4/coincurve-20.0.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:0530b9dd02fc6f6c2916716974b79bdab874227f560c422801ade290e3fc5013", size = 1209392, upload-time = "2024-06-02T18:15:08.663Z" }, + { url = "https://files.pythonhosted.org/packages/99/54/260053f14f74b99b645084231e1c76994134ded49407a3bba23a8ffc0ff6/coincurve-20.0.0-cp312-cp312-win_amd64.whl", hash = "sha256:eacf9c0ce8739c84549a89c083b1f3526c8780b84517ee75d6b43d276e55f8a0", size = 1198932, upload-time = "2024-06-02T18:15:10.786Z" }, + { url = "https://files.pythonhosted.org/packages/b4/b5/c465e09345dd38b9415f5d47ae7683b3f461db02fcc03e699b6b5687ab2b/coincurve-20.0.0-cp312-cp312-win_arm64.whl", hash = "sha256:52a67bfddbd6224dfa42085c88ad176559801b57d6a8bd30d92ee040de88b7b3", size = 1193324, upload-time = "2024-06-02T18:15:12.511Z" }, +] + +[[package]] +name = "colorama" +version = "0.4.6" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/d8/53/6f443c9a4a8358a93a6792e2acffb9d9d5cb0a5cfd8802644b7b1c9a02e4/colorama-0.4.6.tar.gz", hash = "sha256:08695f5cb7ed6e0531a20572697297273c47b8cae5a63ffc6d6ed5c201be6e44", size = 27697, upload-time = "2022-10-25T02:36:22.414Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/d1/d6/3965ed04c63042e047cb6a3e6ed1a63a35087b6a609aa3a15ed8ac56c221/colorama-0.4.6-py2.py3-none-any.whl", hash = "sha256:4f1d9991f5acc0ca119f9d443620b77f9d6b33703e51011c16baf57afb285fc6", size = 25335, upload-time = "2022-10-25T02:36:20.889Z" }, +] + +[[package]] +name = "construct" +version = "2.10.68" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/e0/b7/a4a032e94bcfdff481f2e6fecd472794d9da09f474a2185ed33b2c7cad64/construct-2.10.68.tar.gz", hash = "sha256:7b2a3fd8e5f597a5aa1d614c3bd516fa065db01704c72a1efaaeec6ef23d8b45", size = 57856, upload-time = "2022-02-21T23:09:15.1Z" } + +[[package]] +name = "construct-typing" +version = "0.6.2" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "construct" }, + { name = "typing-extensions" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/f1/13/c609e60a687252813aa4b69f989f42754ccd5e217717216fc852eefedfd7/construct-typing-0.6.2.tar.gz", hash = "sha256:948e998cfc003681dc34f2d071c3a688cf35b805cbe107febbc488ef967ccba1", size = 22029, upload-time = "2023-08-03T07:31:06.205Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/b2/0b/ab3ce2b27dd74b6a6703065bd304ea8211ff4de3b1c304446ed95234177b/construct_typing-0.6.2-py3-none-any.whl", hash = "sha256:ebea6989ac622d0c4eb457092cef0c7bfbcfa110bd018670fea7064d0bc09e47", size = 23298, upload-time = "2023-08-03T07:31:04.545Z" }, +] + +[[package]] +name = "cron-validator" +version = "1.0.8" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "python-dateutil" }, + { name = "pytz" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/31/55/d3d8e7acad9dc3f54133df0972c79d38f2c6fc4be41f281a396c3afe4411/cron-validator-1.0.8.tar.gz", hash = "sha256:dd485257adb6f590b3e9433f641440c801d307015259c1ee3eb6e21c964c8026", size = 9657, upload-time = "2023-06-19T10:09:48.434Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/49/0c/d6bf9d572fb2ce3404fe37b794cf10dd44566368f7084c71b6028d3818ff/cron_validator-1.0.8-py3-none-any.whl", hash = "sha256:6477fcc3d60bfbd1ec00a708f0b8b5136c1fef8140c10effea1f45b79d778653", size = 7830, upload-time = "2023-06-19T10:09:44.881Z" }, +] + +[[package]] +name = "cryptography" +version = "45.0.7" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "cffi", marker = "platform_python_implementation != 'PyPy'" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/a7/35/c495bffc2056f2dadb32434f1feedd79abde2a7f8363e1974afa9c33c7e2/cryptography-45.0.7.tar.gz", hash = "sha256:4b1654dfc64ea479c242508eb8c724044f1e964a47d1d1cacc5132292d851971", size = 744980, upload-time = "2025-09-01T11:15:03.146Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/0c/91/925c0ac74362172ae4516000fe877912e33b5983df735ff290c653de4913/cryptography-45.0.7-cp311-abi3-macosx_10_9_universal2.whl", hash = "sha256:3be4f21c6245930688bd9e162829480de027f8bf962ede33d4f8ba7d67a00cee", size = 7041105, upload-time = "2025-09-01T11:13:59.684Z" }, + { url = "https://files.pythonhosted.org/packages/fc/63/43641c5acce3a6105cf8bd5baeceeb1846bb63067d26dae3e5db59f1513a/cryptography-45.0.7-cp311-abi3-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:67285f8a611b0ebc0857ced2081e30302909f571a46bfa7a3cc0ad303fe015c6", size = 4205799, upload-time = "2025-09-01T11:14:02.517Z" }, + { url = "https://files.pythonhosted.org/packages/bc/29/c238dd9107f10bfde09a4d1c52fd38828b1aa353ced11f358b5dd2507d24/cryptography-45.0.7-cp311-abi3-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:577470e39e60a6cd7780793202e63536026d9b8641de011ed9d8174da9ca5339", size = 4430504, upload-time = "2025-09-01T11:14:04.522Z" }, + { url = "https://files.pythonhosted.org/packages/62/62/24203e7cbcc9bd7c94739428cd30680b18ae6b18377ae66075c8e4771b1b/cryptography-45.0.7-cp311-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:4bd3e5c4b9682bc112d634f2c6ccc6736ed3635fc3319ac2bb11d768cc5a00d8", size = 4209542, upload-time = "2025-09-01T11:14:06.309Z" }, + { url = "https://files.pythonhosted.org/packages/cd/e3/e7de4771a08620eef2389b86cd87a2c50326827dea5528feb70595439ce4/cryptography-45.0.7-cp311-abi3-manylinux_2_28_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:465ccac9d70115cd4de7186e60cfe989de73f7bb23e8a7aa45af18f7412e75bf", size = 3889244, upload-time = "2025-09-01T11:14:08.152Z" }, + { url = "https://files.pythonhosted.org/packages/96/b8/bca71059e79a0bb2f8e4ec61d9c205fbe97876318566cde3b5092529faa9/cryptography-45.0.7-cp311-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:16ede8a4f7929b4b7ff3642eba2bf79aa1d71f24ab6ee443935c0d269b6bc513", size = 4461975, upload-time = "2025-09-01T11:14:09.755Z" }, + { url = "https://files.pythonhosted.org/packages/58/67/3f5b26937fe1218c40e95ef4ff8d23c8dc05aa950d54200cc7ea5fb58d28/cryptography-45.0.7-cp311-abi3-manylinux_2_34_aarch64.whl", hash = "sha256:8978132287a9d3ad6b54fcd1e08548033cc09dc6aacacb6c004c73c3eb5d3ac3", size = 4209082, upload-time = "2025-09-01T11:14:11.229Z" }, + { url = "https://files.pythonhosted.org/packages/0e/e4/b3e68a4ac363406a56cf7b741eeb80d05284d8c60ee1a55cdc7587e2a553/cryptography-45.0.7-cp311-abi3-manylinux_2_34_x86_64.whl", hash = "sha256:b6a0e535baec27b528cb07a119f321ac024592388c5681a5ced167ae98e9fff3", size = 4460397, upload-time = "2025-09-01T11:14:12.924Z" }, + { url = "https://files.pythonhosted.org/packages/22/49/2c93f3cd4e3efc8cb22b02678c1fad691cff9dd71bb889e030d100acbfe0/cryptography-45.0.7-cp311-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:a24ee598d10befaec178efdff6054bc4d7e883f615bfbcd08126a0f4931c83a6", size = 4337244, upload-time = "2025-09-01T11:14:14.431Z" }, + { url = "https://files.pythonhosted.org/packages/04/19/030f400de0bccccc09aa262706d90f2ec23d56bc4eb4f4e8268d0ddf3fb8/cryptography-45.0.7-cp311-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:fa26fa54c0a9384c27fcdc905a2fb7d60ac6e47d14bc2692145f2b3b1e2cfdbd", size = 4568862, upload-time = "2025-09-01T11:14:16.185Z" }, + { url = "https://files.pythonhosted.org/packages/29/56/3034a3a353efa65116fa20eb3c990a8c9f0d3db4085429040a7eef9ada5f/cryptography-45.0.7-cp311-abi3-win32.whl", hash = "sha256:bef32a5e327bd8e5af915d3416ffefdbe65ed975b646b3805be81b23580b57b8", size = 2936578, upload-time = "2025-09-01T11:14:17.638Z" }, + { url = "https://files.pythonhosted.org/packages/b3/61/0ab90f421c6194705a99d0fa9f6ee2045d916e4455fdbb095a9c2c9a520f/cryptography-45.0.7-cp311-abi3-win_amd64.whl", hash = "sha256:3808e6b2e5f0b46d981c24d79648e5c25c35e59902ea4391a0dcb3e667bf7443", size = 3405400, upload-time = "2025-09-01T11:14:18.958Z" }, + { url = "https://files.pythonhosted.org/packages/63/e8/c436233ddf19c5f15b25ace33979a9dd2e7aa1a59209a0ee8554179f1cc0/cryptography-45.0.7-cp37-abi3-macosx_10_9_universal2.whl", hash = "sha256:bfb4c801f65dd61cedfc61a83732327fafbac55a47282e6f26f073ca7a41c3b2", size = 7021824, upload-time = "2025-09-01T11:14:20.954Z" }, + { url = "https://files.pythonhosted.org/packages/bc/4c/8f57f2500d0ccd2675c5d0cc462095adf3faa8c52294ba085c036befb901/cryptography-45.0.7-cp37-abi3-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:81823935e2f8d476707e85a78a405953a03ef7b7b4f55f93f7c2d9680e5e0691", size = 4202233, upload-time = "2025-09-01T11:14:22.454Z" }, + { url = "https://files.pythonhosted.org/packages/eb/ac/59b7790b4ccaed739fc44775ce4645c9b8ce54cbec53edf16c74fd80cb2b/cryptography-45.0.7-cp37-abi3-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:3994c809c17fc570c2af12c9b840d7cea85a9fd3e5c0e0491f4fa3c029216d59", size = 4423075, upload-time = "2025-09-01T11:14:24.287Z" }, + { url = "https://files.pythonhosted.org/packages/b8/56/d4f07ea21434bf891faa088a6ac15d6d98093a66e75e30ad08e88aa2b9ba/cryptography-45.0.7-cp37-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:dad43797959a74103cb59c5dac71409f9c27d34c8a05921341fb64ea8ccb1dd4", size = 4204517, upload-time = "2025-09-01T11:14:25.679Z" }, + { url = "https://files.pythonhosted.org/packages/e8/ac/924a723299848b4c741c1059752c7cfe09473b6fd77d2920398fc26bfb53/cryptography-45.0.7-cp37-abi3-manylinux_2_28_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:ce7a453385e4c4693985b4a4a3533e041558851eae061a58a5405363b098fcd3", size = 3882893, upload-time = "2025-09-01T11:14:27.1Z" }, + { url = "https://files.pythonhosted.org/packages/83/dc/4dab2ff0a871cc2d81d3ae6d780991c0192b259c35e4d83fe1de18b20c70/cryptography-45.0.7-cp37-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:b04f85ac3a90c227b6e5890acb0edbaf3140938dbecf07bff618bf3638578cf1", size = 4450132, upload-time = "2025-09-01T11:14:28.58Z" }, + { url = "https://files.pythonhosted.org/packages/12/dd/b2882b65db8fc944585d7fb00d67cf84a9cef4e77d9ba8f69082e911d0de/cryptography-45.0.7-cp37-abi3-manylinux_2_34_aarch64.whl", hash = "sha256:48c41a44ef8b8c2e80ca4527ee81daa4c527df3ecbc9423c41a420a9559d0e27", size = 4204086, upload-time = "2025-09-01T11:14:30.572Z" }, + { url = "https://files.pythonhosted.org/packages/5d/fa/1d5745d878048699b8eb87c984d4ccc5da4f5008dfd3ad7a94040caca23a/cryptography-45.0.7-cp37-abi3-manylinux_2_34_x86_64.whl", hash = "sha256:f3df7b3d0f91b88b2106031fd995802a2e9ae13e02c36c1fc075b43f420f3a17", size = 4449383, upload-time = "2025-09-01T11:14:32.046Z" }, + { url = "https://files.pythonhosted.org/packages/36/8b/fc61f87931bc030598e1876c45b936867bb72777eac693e905ab89832670/cryptography-45.0.7-cp37-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:dd342f085542f6eb894ca00ef70236ea46070c8a13824c6bde0dfdcd36065b9b", size = 4332186, upload-time = "2025-09-01T11:14:33.95Z" }, + { url = "https://files.pythonhosted.org/packages/0b/11/09700ddad7443ccb11d674efdbe9a832b4455dc1f16566d9bd3834922ce5/cryptography-45.0.7-cp37-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:1993a1bb7e4eccfb922b6cd414f072e08ff5816702a0bdb8941c247a6b1b287c", size = 4561639, upload-time = "2025-09-01T11:14:35.343Z" }, + { url = "https://files.pythonhosted.org/packages/71/ed/8f4c1337e9d3b94d8e50ae0b08ad0304a5709d483bfcadfcc77a23dbcb52/cryptography-45.0.7-cp37-abi3-win32.whl", hash = "sha256:18fcf70f243fe07252dcb1b268a687f2358025ce32f9f88028ca5c364b123ef5", size = 2926552, upload-time = "2025-09-01T11:14:36.929Z" }, + { url = "https://files.pythonhosted.org/packages/bc/ff/026513ecad58dacd45d1d24ebe52b852165a26e287177de1d545325c0c25/cryptography-45.0.7-cp37-abi3-win_amd64.whl", hash = "sha256:7285a89df4900ed3bfaad5679b1e668cb4b38a8de1ccbfc84b05f34512da0a90", size = 3392742, upload-time = "2025-09-01T11:14:38.368Z" }, +] + +[[package]] +name = "cytoolz" +version = "1.0.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "toolz" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/a7/f9/3243eed3a6545c2a33a21f74f655e3fcb5d2192613cd3db81a93369eb339/cytoolz-1.0.1.tar.gz", hash = "sha256:89cc3161b89e1bb3ed7636f74ed2e55984fd35516904fc878cae216e42b2c7d6", size = 626652, upload-time = "2024-12-13T05:47:36.672Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/d8/e8/218098344ed2cb5f8441fade9b2428e435e7073962374a9c71e59ac141a7/cytoolz-1.0.1-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:fcb8f7d0d65db1269022e7e0428471edee8c937bc288ebdcb72f13eaa67c2fe4", size = 414121, upload-time = "2024-12-13T05:45:26.588Z" }, + { url = "https://files.pythonhosted.org/packages/de/27/4d729a5653718109262b758fec1a959aa9facb74c15460d9074dc76d6635/cytoolz-1.0.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:207d4e4b445e087e65556196ff472ff134370d9a275d591724142e255f384662", size = 390904, upload-time = "2024-12-13T05:45:27.718Z" }, + { url = "https://files.pythonhosted.org/packages/72/c0/cbabfa788bab9c6038953bf9478adaec06e88903a726946ea7c88092f5c4/cytoolz-1.0.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:21cdf6bac6fd843f3b20280a66fd8df20dea4c58eb7214a2cd8957ec176f0bb3", size = 2090734, upload-time = "2024-12-13T05:45:30.515Z" }, + { url = "https://files.pythonhosted.org/packages/c3/66/369262c60f9423c2da82a60864a259c852f1aa122aced4acd2c679af58c0/cytoolz-1.0.1-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:4a55ec098036c0dea9f3bdc021f8acd9d105a945227d0811589f0573f21c9ce1", size = 2155933, upload-time = "2024-12-13T05:45:32.721Z" }, + { url = "https://files.pythonhosted.org/packages/aa/4e/ee55186802f8d24b5fbf9a11405ccd1203b30eded07cc17750618219b94e/cytoolz-1.0.1-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a13ab79ff4ce202e03ab646a2134696988b554b6dc4b71451e948403db1331d8", size = 2171903, upload-time = "2024-12-13T05:45:34.205Z" }, + { url = "https://files.pythonhosted.org/packages/a1/96/bd1a9f3396e9b7f618db8cd08d15630769ce3c8b7d0534f92cd639c977ae/cytoolz-1.0.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4e2d944799026e1ff08a83241f1027a2d9276c41f7a74224cd98b7df6e03957d", size = 2125270, upload-time = "2024-12-13T05:45:36.982Z" }, + { url = "https://files.pythonhosted.org/packages/28/48/2a3762873091c88a69e161111cfbc6c222ff145d57ff011a642b169f04f1/cytoolz-1.0.1-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:88ba85834cd523b91fdf10325e1e6d71c798de36ea9bdc187ca7bd146420de6f", size = 1973967, upload-time = "2024-12-13T05:45:39.505Z" }, + { url = "https://files.pythonhosted.org/packages/e4/50/500bd69774bdc49a4d78ec8779eb6ac7c1a9d706bfd91cf2a1dba604373a/cytoolz-1.0.1-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:5a750b1af7e8bf6727f588940b690d69e25dc47cce5ce467925a76561317eaf7", size = 2021695, upload-time = "2024-12-13T05:45:40.911Z" }, + { url = "https://files.pythonhosted.org/packages/e4/4e/ba5a0ce34869495eb50653de8d676847490cf13a2cac1760fc4d313e78de/cytoolz-1.0.1-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:44a71870f7eae31d263d08b87da7c2bf1176f78892ed8bdade2c2850478cb126", size = 2010177, upload-time = "2024-12-13T05:45:42.48Z" }, + { url = "https://files.pythonhosted.org/packages/87/57/615c630b3089a13adb15351d958d227430cf624f03b1dd39eb52c34c1f59/cytoolz-1.0.1-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:c8231b9abbd8e368e036f4cc2e16902c9482d4cf9e02a6147ed0e9a3cd4a9ab0", size = 2154321, upload-time = "2024-12-13T05:45:43.979Z" }, + { url = "https://files.pythonhosted.org/packages/7f/0f/fe1aa2d931e3b35ecc05215bd75da945ea7346095b3b6f6027164e602d5a/cytoolz-1.0.1-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:aa87599ccc755de5a096a4d6c34984de6cd9dc928a0c5eaa7607457317aeaf9b", size = 2188374, upload-time = "2024-12-13T05:45:46.783Z" }, + { url = "https://files.pythonhosted.org/packages/de/fa/fd363d97a641b6d0e2fd1d5c35b8fd41d9ccaeb4df56302f53bf23a58e3a/cytoolz-1.0.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:67cd16537df51baabde3baa770ab7b8d16839c4d21219d5b96ac59fb012ebd2d", size = 2077911, upload-time = "2024-12-13T05:45:48.219Z" }, + { url = "https://files.pythonhosted.org/packages/d9/68/0a22946b98ae5201b54ccb4e651295285c0fb79406022b6ee8b2f791940c/cytoolz-1.0.1-cp312-cp312-win32.whl", hash = "sha256:fb988c333f05ee30ad4693fe4da55d95ec0bb05775d2b60191236493ea2e01f9", size = 321903, upload-time = "2024-12-13T05:45:50.3Z" }, + { url = "https://files.pythonhosted.org/packages/62/1a/f3903197956055032f8cb297342e2dff07e50f83991aebfe5b4c4fcb55e4/cytoolz-1.0.1-cp312-cp312-win_amd64.whl", hash = "sha256:8f89c48d8e5aec55ffd566a8ec858706d70ed0c6a50228eca30986bfa5b4da8b", size = 364490, upload-time = "2024-12-13T05:45:51.494Z" }, +] + +[[package]] +name = "dataclasses-json" +version = "0.6.7" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "marshmallow" }, + { name = "typing-inspect" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/64/a4/f71d9cf3a5ac257c993b5ca3f93df5f7fb395c725e7f1e6479d2514173c3/dataclasses_json-0.6.7.tar.gz", hash = "sha256:b6b3e528266ea45b9535223bc53ca645f5208833c29229e847b3f26a1cc55fc0", size = 32227, upload-time = "2024-06-09T16:20:19.103Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/c3/be/d0d44e092656fe7a06b55e6103cbce807cdbdee17884a5367c68c9860853/dataclasses_json-0.6.7-py3-none-any.whl", hash = "sha256:0dbf33f26c8d5305befd61b39d2b3414e8a407bedc2834dea9b8d642666fb40a", size = 28686, upload-time = "2024-06-09T16:20:16.715Z" }, +] + +[[package]] +name = "deprecation" +version = "2.1.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "packaging" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/5a/d3/8ae2869247df154b64c1884d7346d412fed0c49df84db635aab2d1c40e62/deprecation-2.1.0.tar.gz", hash = "sha256:72b3bde64e5d778694b0cf68178aed03d15e15477116add3fb773e581f9518ff", size = 173788, upload-time = "2020-04-20T14:23:38.738Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/02/c3/253a89ee03fc9b9682f1541728eb66db7db22148cd94f89ab22528cd1e1b/deprecation-2.1.0-py2.py3-none-any.whl", hash = "sha256:a10811591210e1fb0e768a8c25517cabeabcba6f0bf96564f8ff45189f90b14a", size = 11178, upload-time = "2020-04-20T14:23:36.581Z" }, +] + +[[package]] +name = "deptry" +version = "0.23.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "click" }, + { name = "colorama", marker = "sys_platform == 'win32'" }, + { name = "packaging" }, + { name = "requirements-parser" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/a3/31/3e2f4a9b43bd807b28a49d673b9b5f8dcc7265d43950b24e875ba90e6205/deptry-0.23.1.tar.gz", hash = "sha256:5d23e0ef25f3c56405c05383a476edda55944563c5c47a3e9249ed3ec860d382", size = 460016, upload-time = "2025-07-31T05:54:49.681Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/cb/d0/9785c0e7fdab12f5324467d70ba65ad03b9d4071a13fc182b6d98bab6208/deptry-0.23.1-cp39-abi3-macosx_10_12_x86_64.whl", hash = "sha256:f0b231d098fb5b48d8973c9f192c353ffdd395770063424969fa7f15ddfea7d8", size = 1768731, upload-time = "2025-07-31T05:54:47.348Z" }, + { url = "https://files.pythonhosted.org/packages/c5/4b/46aded35e0de153936b2214e49e5935179eed9f23cbd3a9a0cd9a5ab0abd/deptry-0.23.1-cp39-abi3-macosx_11_0_arm64.whl", hash = "sha256:bf057f514bb2fa18a2b192a7f7372bd14577ff46b11486933e8383dfef461983", size = 1667240, upload-time = "2025-07-31T05:54:43.956Z" }, + { url = "https://files.pythonhosted.org/packages/ef/f7/206330f68280a1af7edb8bea87f383dbaa4e3b02b37199d40f86e4c43048/deptry-0.23.1-cp39-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1ee3f5663bb1c048e2aaf25a4d9e6d09cc1f3b3396ee248980878c6a6c9c0e21", size = 1772019, upload-time = "2025-07-31T05:54:31.165Z" }, + { url = "https://files.pythonhosted.org/packages/c5/80/51a9e94349b47013e2fd78fd221b12202a7866cd2e0882cfd87d63055e88/deptry-0.23.1-cp39-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ae0366dc5f50a5fb29cf90de1110c5e368513de6c1b2dac439f2817f3f752616", size = 1855973, upload-time = "2025-07-31T05:54:37.733Z" }, + { url = "https://files.pythonhosted.org/packages/d5/7a/bff10ddd26ce39c56a9a35bdc98fcf44c2befe5954c8da4bb895e3f750bb/deptry-0.23.1-cp39-abi3-musllinux_1_1_aarch64.whl", hash = "sha256:ab156a90a9eda5819aeb1c1da585dd4d5ec509029399a38771a49e78f40db90f", size = 1946957, upload-time = "2025-07-31T05:54:34.567Z" }, + { url = "https://files.pythonhosted.org/packages/7e/b6/c80b190cbd817d1f75f8d02d4b6f4d430b2f3014a09d3895684e291e473b/deptry-0.23.1-cp39-abi3-musllinux_1_1_x86_64.whl", hash = "sha256:651c7eb168233755152fcc468713c024d64a03069645187edb4a17ba61ce6133", size = 2025282, upload-time = "2025-07-31T05:54:40.906Z" }, + { url = "https://files.pythonhosted.org/packages/3c/58/1dfb7a6c4ec2daf123264d2c30f53f45791fee46cd0244be5bf97597d2aa/deptry-0.23.1-cp39-abi3-win_amd64.whl", hash = "sha256:8da1e8f70e7086ebc228f3a4a3cfb5aa127b09b5eef60d694503d6bb79809025", size = 1631377, upload-time = "2025-07-31T05:54:51.951Z" }, + { url = "https://files.pythonhosted.org/packages/18/d3/667b974cf42fc50245a8028beb9966643ee214ca567cc6df6e876feca5ed/deptry-0.23.1-cp39-abi3-win_arm64.whl", hash = "sha256:f589497a5809717db4dcf2aa840f2847c0a4c489331608e538850b6a9ab1c30b", size = 1551113, upload-time = "2025-07-31T05:54:50.679Z" }, +] + +[[package]] +name = "distlib" +version = "0.3.9" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/0d/dd/1bec4c5ddb504ca60fc29472f3d27e8d4da1257a854e1d96742f15c1d02d/distlib-0.3.9.tar.gz", hash = "sha256:a60f20dea646b8a33f3e7772f74dc0b2d0772d2837ee1342a00645c81edf9403", size = 613923, upload-time = "2024-10-09T18:35:47.551Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/91/a1/cf2472db20f7ce4a6be1253a81cfdf85ad9c7885ffbed7047fb72c24cf87/distlib-0.3.9-py2.py3-none-any.whl", hash = "sha256:47f8c22fd27c27e25a65601af709b38e4f0a45ea4fc2e710f65755fa8caaaf87", size = 468973, upload-time = "2024-10-09T18:35:44.272Z" }, +] + +[[package]] +name = "distro" +version = "1.9.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/fc/f8/98eea607f65de6527f8a2e8885fc8015d3e6f5775df186e443e0964a11c3/distro-1.9.0.tar.gz", hash = "sha256:2fa77c6fd8940f116ee1d6b94a2f90b13b5ea8d019b98bc8bafdcabcdd9bdbed", size = 60722, upload-time = "2023-12-24T09:54:32.31Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/12/b3/231ffd4ab1fc9d679809f356cebee130ac7daa00d6d6f3206dd4fd137e9e/distro-1.9.0-py3-none-any.whl", hash = "sha256:7bffd925d65168f85027d8da9af6bddab658135b840670a223589bc0c8ef02b2", size = 20277, upload-time = "2023-12-24T09:54:30.421Z" }, +] + +[[package]] +name = "dydantic" +version = "0.0.8" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "pydantic" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/08/c5/2d097e5a4816b15186c1ae06c5cfe3c332e69a0f3556dc6cee2d370acf2a/dydantic-0.0.8.tar.gz", hash = "sha256:14a31d4cdfce314ce3e69e8f8c7c46cbc26ce3ce4485de0832260386c612942f", size = 8115, upload-time = "2025-01-29T20:36:13.771Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/7a/7c/a1b120141a300853d82291faf0ba1a95133fa390e4b7d773647b69c8c0f4/dydantic-0.0.8-py3-none-any.whl", hash = "sha256:cd0a991f523bd8632699872f1c0c4278415dd04783e36adec5428defa0afb721", size = 8637, upload-time = "2025-01-29T20:36:12.217Z" }, +] + +[[package]] +name = "ecdsa" +version = "0.19.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "six" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/c0/1f/924e3caae75f471eae4b26bd13b698f6af2c44279f67af317439c2f4c46a/ecdsa-0.19.1.tar.gz", hash = "sha256:478cba7b62555866fcb3bb3fe985e06decbdb68ef55713c4e5ab98c57d508e61", size = 201793, upload-time = "2025-03-13T11:52:43.25Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/cb/a3/460c57f094a4a165c84a1341c373b0a4f5ec6ac244b998d5021aade89b77/ecdsa-0.19.1-py2.py3-none-any.whl", hash = "sha256:30638e27cf77b7e15c4c4cc1973720149e1033827cfd00661ca5c8cc0cdb24c3", size = 150607, upload-time = "2025-03-13T11:52:41.757Z" }, +] + +[[package]] +name = "egcd" +version = "2.0.2" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/63/f5/c0c0808f8a3f8a4af605b48a241b16a634ceddd41b5e3ee05ae2fd9e1e42/egcd-2.0.2.tar.gz", hash = "sha256:3b05b0feb67549f8f76c97afed36c53252c0d7cb9a65bf4e6ca8b99110fb77f2", size = 6952, upload-time = "2024-12-31T21:05:21.984Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/a3/e7/9d984faee490e50a495b50d0a87c42fe661252f9513157776d8cb2724445/egcd-2.0.2-py3-none-any.whl", hash = "sha256:2f0576a651b4aa9e9c4640bba078f9741d1624f386b55cb5363a79ae4b564bd2", size = 7187, upload-time = "2024-12-31T21:05:19.098Z" }, +] + +[[package]] +name = "epyxid" +version = "0.3.3" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/a4/6e/0c7c674117ca089890eb311d17b29035dd2226978f29288b8bcbece9ace7/epyxid-0.3.3.tar.gz", hash = "sha256:3fbb54b96b5c1fdc1cb2484c992e450beaeb21a299ba5fbb6fcf8a2b04ee4249", size = 10579, upload-time = "2025-01-13T08:24:51.227Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/a3/44/53bd1ef40092013712359c4a8e1f74df321076f889ef3d59271fa2c094cf/epyxid-0.3.3-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:2dbd91b3e31d6a722cb621f62300f6e75c1f67b56582a16d52193ca589e66adb", size = 264092, upload-time = "2025-01-13T08:24:47.174Z" }, + { url = "https://files.pythonhosted.org/packages/b8/fd/6a0df28c23af1343e79abdfcbdd79084a8e98279f1405e09c035def88d27/epyxid-0.3.3-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:87394b46a9b6a70693afd2b2448bdcea6bb4e47462d3658f177c80e64ee4307b", size = 256315, upload-time = "2025-01-13T08:24:39.447Z" }, + { url = "https://files.pythonhosted.org/packages/a1/68/7c4e83ba6d2c8588b70ea10a9ffaf22763a9d14d80dfa54fa86a26e34a7b/epyxid-0.3.3-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:af1be68bb3ca25cd0e4f2c3fd9d42dc652d0e5dc5f80ac26bb4fb3481fc0924d", size = 280706, upload-time = "2025-01-13T08:23:18.882Z" }, + { url = "https://files.pythonhosted.org/packages/5c/0a/d401924e611a06d3bc72432669eaa6d8062e1321ac29896db713b2768bb6/epyxid-0.3.3-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:ed2a83f783324e2fc31639de40b253dc3dd6cbe1477ac5f46e3c3a0194da6665", size = 295278, upload-time = "2025-01-13T08:23:33.028Z" }, + { url = "https://files.pythonhosted.org/packages/dd/f0/6020ef13af523f52f2911b14784168bfd82caddc88731996da9f92977bc9/epyxid-0.3.3-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d8eebaf3cd173f4b57749081dab32b3aa7fba1ae5de455559ab143a6dd84571f", size = 312316, upload-time = "2025-01-13T08:23:48.777Z" }, + { url = "https://files.pythonhosted.org/packages/40/40/c8dbcf13c48dd330748a63d9524f876da6c0fdb07d3b324ba2665f9744ef/epyxid-0.3.3-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:103ba7f38c376e4534582a24a6a71ac169c7d936d44b1ff9a73a30f72cc39919", size = 361536, upload-time = "2025-01-13T08:24:03.133Z" }, + { url = "https://files.pythonhosted.org/packages/b1/e9/b40b9b02e9a41b749403f46d96aac4d9038b65820950ba405ebb212d6e72/epyxid-0.3.3-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0d8c7044b57eb62f09eebcc33607282189df6b2312f87f8f1c654204c27c6c60", size = 285577, upload-time = "2025-01-13T08:24:29.292Z" }, + { url = "https://files.pythonhosted.org/packages/f8/06/a51158d6aad4196a1dea8996553a94c20509b27c14891efa02f74c898d23/epyxid-0.3.3-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:02dc9fc6bbfea1b6f2e38e2436df66be91034ef609d53f77049714ad6f027072", size = 299404, upload-time = "2025-01-13T08:24:18.594Z" }, + { url = "https://files.pythonhosted.org/packages/76/70/8a0584c630fc5480b3f824bcce66afc6e61663731fd18b0be7c0bfaf40c2/epyxid-0.3.3-cp312-cp312-win32.whl", hash = "sha256:923dd761bd4615113a318c01fec02d364f574b59a45ca49ad706a50db8883161", size = 163283, upload-time = "2025-01-13T08:25:06.163Z" }, + { url = "https://files.pythonhosted.org/packages/9c/e9/dcb5cc97a049d2648324b8faa33df01c9f78938f682713fef19f4618679f/epyxid-0.3.3-cp312-cp312-win_amd64.whl", hash = "sha256:47faf9e3acc2733155c42ff878ad054645f856c1a1f34a63c85ed91446ebfdf4", size = 169123, upload-time = "2025-01-13T08:24:54.817Z" }, +] + +[[package]] +name = "eth-abi" +version = "5.2.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "eth-typing" }, + { name = "eth-utils" }, + { name = "parsimonious" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/00/71/d9e1380bd77fd22f98b534699af564f189b56d539cc2b9dab908d4e4c242/eth_abi-5.2.0.tar.gz", hash = "sha256:178703fa98c07d8eecd5ae569e7e8d159e493ebb6eeb534a8fe973fbc4e40ef0", size = 49797, upload-time = "2025-01-14T16:29:34.629Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/7a/b4/2f3982c4cbcbf5eeb6aec62df1533c0e63c653b3021ff338d44944405676/eth_abi-5.2.0-py3-none-any.whl", hash = "sha256:17abe47560ad753f18054f5b3089fcb588f3e3a092136a416b6c1502cb7e8877", size = 28511, upload-time = "2025-01-14T16:29:31.862Z" }, +] + +[[package]] +name = "eth-account" +version = "0.13.7" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "bitarray" }, + { name = "ckzg" }, + { name = "eth-abi" }, + { name = "eth-keyfile" }, + { name = "eth-keys" }, + { name = "eth-rlp" }, + { name = "eth-utils" }, + { name = "hexbytes" }, + { name = "pydantic" }, + { name = "rlp" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/74/cf/20f76a29be97339c969fd765f1237154286a565a1d61be98e76bb7af946a/eth_account-0.13.7.tar.gz", hash = "sha256:5853ecbcbb22e65411176f121f5f24b8afeeaf13492359d254b16d8b18c77a46", size = 935998, upload-time = "2025-04-21T21:11:21.204Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/46/18/088fb250018cbe665bc2111974301b2d59f294a565aff7564c4df6878da2/eth_account-0.13.7-py3-none-any.whl", hash = "sha256:39727de8c94d004ff61d10da7587509c04d2dc7eac71e04830135300bdfc6d24", size = 587452, upload-time = "2025-04-21T21:11:18.346Z" }, +] + +[[package]] +name = "eth-hash" +version = "0.7.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/ee/38/577b7bc9380ef9dff0f1dffefe0c9a1ded2385e7a06c306fd95afb6f9451/eth_hash-0.7.1.tar.gz", hash = "sha256:d2411a403a0b0a62e8247b4117932d900ffb4c8c64b15f92620547ca5ce46be5", size = 12227, upload-time = "2025-01-13T21:29:21.765Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/eb/db/f8775490669d28aca24871c67dd56b3e72105cb3bcae9a4ec65dd70859b3/eth_hash-0.7.1-py3-none-any.whl", hash = "sha256:0fb1add2adf99ef28883fd6228eb447ef519ea72933535ad1a0b28c6f65f868a", size = 8028, upload-time = "2025-01-13T21:29:19.365Z" }, +] + +[package.optional-dependencies] +pycryptodome = [ + { name = "pycryptodome" }, +] + +[[package]] +name = "eth-keyfile" +version = "0.8.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "eth-keys" }, + { name = "eth-utils" }, + { name = "pycryptodome" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/35/66/dd823b1537befefbbff602e2ada88f1477c5b40ec3731e3d9bc676c5f716/eth_keyfile-0.8.1.tar.gz", hash = "sha256:9708bc31f386b52cca0969238ff35b1ac72bd7a7186f2a84b86110d3c973bec1", size = 12267, upload-time = "2024-04-23T20:28:53.862Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/88/fc/48a586175f847dd9e05e5b8994d2fe8336098781ec2e9836a2ad94280281/eth_keyfile-0.8.1-py3-none-any.whl", hash = "sha256:65387378b82fe7e86d7cb9f8d98e6d639142661b2f6f490629da09fddbef6d64", size = 7510, upload-time = "2024-04-23T20:28:51.063Z" }, +] + +[[package]] +name = "eth-keys" +version = "0.7.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "eth-typing" }, + { name = "eth-utils" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/58/11/1ed831c50bd74f57829aa06e58bd82a809c37e070ee501c953b9ac1f1552/eth_keys-0.7.0.tar.gz", hash = "sha256:79d24fd876201df67741de3e3fefb3f4dbcbb6ace66e47e6fe662851a4547814", size = 30166, upload-time = "2025-04-07T17:40:21.697Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/4d/25/0ae00f2b0095e559d61ad3dc32171bd5a29dfd95ab04b4edd641f7c75f72/eth_keys-0.7.0-py3-none-any.whl", hash = "sha256:b0cdda8ffe8e5ba69c7c5ca33f153828edcace844f67aabd4542d7de38b159cf", size = 20656, upload-time = "2025-04-07T17:40:20.441Z" }, +] + +[[package]] +name = "eth-rlp" +version = "2.2.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "eth-utils" }, + { name = "hexbytes" }, + { name = "rlp" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/7f/ea/ad39d001fa9fed07fad66edb00af701e29b48be0ed44a3bcf58cb3adf130/eth_rlp-2.2.0.tar.gz", hash = "sha256:5e4b2eb1b8213e303d6a232dfe35ab8c29e2d3051b86e8d359def80cd21db83d", size = 7720, upload-time = "2025-02-04T21:51:08.134Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/99/3b/57efe2bc2df0980680d57c01a36516cd3171d2319ceb30e675de19fc2cc5/eth_rlp-2.2.0-py3-none-any.whl", hash = "sha256:5692d595a741fbaef1203db6a2fedffbd2506d31455a6ad378c8449ee5985c47", size = 4446, upload-time = "2025-02-04T21:51:05.823Z" }, +] + +[[package]] +name = "eth-typing" +version = "5.2.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "typing-extensions" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/60/54/62aa24b9cc708f06316167ee71c362779c8ed21fc8234a5cd94a8f53b623/eth_typing-5.2.1.tar.gz", hash = "sha256:7557300dbf02a93c70fa44af352b5c4a58f94e997a0fd6797fb7d1c29d9538ee", size = 21806, upload-time = "2025-04-14T20:39:28.217Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/30/72/c370bbe4c53da7bf998d3523f5a0f38867654923a82192df88d0705013d3/eth_typing-5.2.1-py3-none-any.whl", hash = "sha256:b0c2812ff978267563b80e9d701f487dd926f1d376d674f3b535cfe28b665d3d", size = 19163, upload-time = "2025-04-14T20:39:26.571Z" }, +] + +[[package]] +name = "eth-utils" +version = "5.3.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "cytoolz", marker = "implementation_name == 'cpython'" }, + { name = "eth-hash" }, + { name = "eth-typing" }, + { name = "pydantic" }, + { name = "toolz", marker = "implementation_name == 'pypy'" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/e6/e1/ee3a8728227c3558853e63ff35bd4c449abdf5022a19601369400deacd39/eth_utils-5.3.1.tar.gz", hash = "sha256:c94e2d2abd024a9a42023b4ddc1c645814ff3d6a737b33d5cfd890ebf159c2d1", size = 123506, upload-time = "2025-08-27T16:37:17.378Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/bf/4d/257cdc01ada430b8e84b9f2385c2553f33218f5b47da9adf0a616308d4b7/eth_utils-5.3.1-py3-none-any.whl", hash = "sha256:1f5476d8f29588d25b8ae4987e1ffdfae6d4c09026e476c4aad13b32dda3ead0", size = 102529, upload-time = "2025-08-27T16:37:15.449Z" }, +] + +[[package]] +name = "faiss-cpu" +version = "1.12.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "numpy" }, + { name = "packaging" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/7d/80/bb75a7ed6e824dea452a24d3434a72ed799324a688b10b047d441d270185/faiss_cpu-1.12.0.tar.gz", hash = "sha256:2f87cbcd603f3ed464ebceb857971fdebc318de938566c9ae2b82beda8e953c0", size = 69292, upload-time = "2025-08-13T06:07:26.553Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/4a/58/602ed184d35742eb240cbfea237bd214f2ae7f01cb369c39f4dff392f7c9/faiss_cpu-1.12.0-cp312-cp312-macosx_13_0_x86_64.whl", hash = "sha256:9b54990fcbcf90e37393909d4033520237194263c93ab6dbfae0616ef9af242b", size = 8034413, upload-time = "2025-08-13T06:06:05.564Z" }, + { url = "https://files.pythonhosted.org/packages/83/d5/f84c3d0e022cdeb73ff8406a6834a7698829fa242eb8590ddf8a0b09357f/faiss_cpu-1.12.0-cp312-cp312-macosx_14_0_arm64.whl", hash = "sha256:a5f5bca7e1a3e0a98480d1e2748fc86d12c28d506173e460e6746886ff0e08de", size = 3362034, upload-time = "2025-08-13T06:06:07.091Z" }, + { url = "https://files.pythonhosted.org/packages/19/89/a4ba4d285ea4f9b0824bf31ebded3171da08bfcf5376f4771cc5481f72cd/faiss_cpu-1.12.0-cp312-cp312-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:016e391f49933875b8d60d47f282f2e93d8ea9f9ffbda82467aa771b11a237db", size = 3834319, upload-time = "2025-08-13T06:06:08.86Z" }, + { url = "https://files.pythonhosted.org/packages/4c/c9/be4e52fd96be601fefb313c26e1259ac2e6b556fb08cc392db641baba8c7/faiss_cpu-1.12.0-cp312-cp312-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:c2e4963c7188f57cfba248f09ebd8a14c76b5ffb87382603ccd4576f2da39d74", size = 31421585, upload-time = "2025-08-13T06:06:10.643Z" }, + { url = "https://files.pythonhosted.org/packages/4b/aa/12c6723ce30df721a6bace21398559c0367c5418c04139babc2d26d8d158/faiss_cpu-1.12.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:88bfe134f8c7cd2dda7df34f2619448906624962c8207efdd6eb1647e2f5338b", size = 9762449, upload-time = "2025-08-13T06:06:13.373Z" }, + { url = "https://files.pythonhosted.org/packages/67/15/ed2c9de47c3ebae980d6938f0ec12d739231438958bc5ab2d636b272d913/faiss_cpu-1.12.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:9243ee4c224a0d74419040503f22bf067462a040281bf6f3f107ab205c97d438", size = 24156525, upload-time = "2025-08-13T06:06:15.307Z" }, + { url = "https://files.pythonhosted.org/packages/c9/b8/6911de6b8fdcfa76144680c2195df6ce7e0cc920a8be8c5bbd2dfe5e3c37/faiss_cpu-1.12.0-cp312-cp312-win_amd64.whl", hash = "sha256:6b8012353d50d9bc81bcfe35b226d0e5bfad345fdebe0da31848395ebc83816d", size = 18169636, upload-time = "2025-08-13T06:06:17.613Z" }, + { url = "https://files.pythonhosted.org/packages/2f/69/d2b0f434b0ae35344280346b58d2b9a251609333424f3289c54506e60c51/faiss_cpu-1.12.0-cp312-cp312-win_arm64.whl", hash = "sha256:8b4f5b18cbe335322a51d2785bb044036609c35bfac5915bff95eadc10e89ef1", size = 8012423, upload-time = "2025-08-13T06:06:19.73Z" }, +] + +[[package]] +name = "fastapi" +version = "0.116.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "pydantic" }, + { name = "starlette" }, + { name = "typing-extensions" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/78/d7/6c8b3bfe33eeffa208183ec037fee0cce9f7f024089ab1c5d12ef04bd27c/fastapi-0.116.1.tar.gz", hash = "sha256:ed52cbf946abfd70c5a0dccb24673f0670deeb517a88b3544d03c2a6bf283143", size = 296485, upload-time = "2025-07-11T16:22:32.057Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/e5/47/d63c60f59a59467fda0f93f46335c9d18526d7071f025cb5b89d5353ea42/fastapi-0.116.1-py3-none-any.whl", hash = "sha256:c46ac7c312df840f0c9e220f7964bada936781bc4e2e6eb71f1c4d7553786565", size = 95631, upload-time = "2025-07-11T16:22:30.485Z" }, +] + +[[package]] +name = "filelock" +version = "3.16.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/9d/db/3ef5bb276dae18d6ec2124224403d1d67bccdbefc17af4cc8f553e341ab1/filelock-3.16.1.tar.gz", hash = "sha256:c249fbfcd5db47e5e2d6d62198e565475ee65e4831e2561c8e313fa7eb961435", size = 18037, upload-time = "2024-09-17T19:02:01.779Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/b9/f8/feced7779d755758a52d1f6635d990b8d98dc0a29fa568bbe0625f18fdf3/filelock-3.16.1-py3-none-any.whl", hash = "sha256:2082e5703d51fbf98ea75855d9d5527e33d8ff23099bec374a134febee6946b0", size = 16163, upload-time = "2024-09-17T19:02:00.268Z" }, +] + +[[package]] +name = "filetype" +version = "1.2.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/bb/29/745f7d30d47fe0f251d3ad3dc2978a23141917661998763bebb6da007eb1/filetype-1.2.0.tar.gz", hash = "sha256:66b56cd6474bf41d8c54660347d37afcc3f7d1970648de365c102ef77548aadb", size = 998020, upload-time = "2022-11-02T17:34:04.141Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/18/79/1b8fa1bb3568781e84c9200f951c735f3f157429f44be0495da55894d620/filetype-1.2.0-py2.py3-none-any.whl", hash = "sha256:7ce71b6880181241cf7ac8697a2f1eb6a8bd9b429f7ad6d27b8db9ba5f1c2d25", size = 19970, upload-time = "2022-11-02T17:34:01.425Z" }, +] + +[[package]] +name = "frozenlist" +version = "1.7.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/79/b1/b64018016eeb087db503b038296fd782586432b9c077fc5c7839e9cb6ef6/frozenlist-1.7.0.tar.gz", hash = "sha256:2e310d81923c2437ea8670467121cc3e9b0f76d3043cc1d2331d56c7fb7a3a8f", size = 45078, upload-time = "2025-06-09T23:02:35.538Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/ef/a2/c8131383f1e66adad5f6ecfcce383d584ca94055a34d683bbb24ac5f2f1c/frozenlist-1.7.0-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:3dbf9952c4bb0e90e98aec1bd992b3318685005702656bc6f67c1a32b76787f2", size = 81424, upload-time = "2025-06-09T23:00:42.24Z" }, + { url = "https://files.pythonhosted.org/packages/4c/9d/02754159955088cb52567337d1113f945b9e444c4960771ea90eb73de8db/frozenlist-1.7.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:1f5906d3359300b8a9bb194239491122e6cf1444c2efb88865426f170c262cdb", size = 47952, upload-time = "2025-06-09T23:00:43.481Z" }, + { url = "https://files.pythonhosted.org/packages/01/7a/0046ef1bd6699b40acd2067ed6d6670b4db2f425c56980fa21c982c2a9db/frozenlist-1.7.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:3dabd5a8f84573c8d10d8859a50ea2dec01eea372031929871368c09fa103478", size = 46688, upload-time = "2025-06-09T23:00:44.793Z" }, + { url = "https://files.pythonhosted.org/packages/d6/a2/a910bafe29c86997363fb4c02069df4ff0b5bc39d33c5198b4e9dd42d8f8/frozenlist-1.7.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:aa57daa5917f1738064f302bf2626281a1cb01920c32f711fbc7bc36111058a8", size = 243084, upload-time = "2025-06-09T23:00:46.125Z" }, + { url = "https://files.pythonhosted.org/packages/64/3e/5036af9d5031374c64c387469bfcc3af537fc0f5b1187d83a1cf6fab1639/frozenlist-1.7.0-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:c193dda2b6d49f4c4398962810fa7d7c78f032bf45572b3e04dd5249dff27e08", size = 233524, upload-time = "2025-06-09T23:00:47.73Z" }, + { url = "https://files.pythonhosted.org/packages/06/39/6a17b7c107a2887e781a48ecf20ad20f1c39d94b2a548c83615b5b879f28/frozenlist-1.7.0-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:bfe2b675cf0aaa6d61bf8fbffd3c274b3c9b7b1623beb3809df8a81399a4a9c4", size = 248493, upload-time = "2025-06-09T23:00:49.742Z" }, + { url = "https://files.pythonhosted.org/packages/be/00/711d1337c7327d88c44d91dd0f556a1c47fb99afc060ae0ef66b4d24793d/frozenlist-1.7.0-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:8fc5d5cda37f62b262405cf9652cf0856839c4be8ee41be0afe8858f17f4c94b", size = 244116, upload-time = "2025-06-09T23:00:51.352Z" }, + { url = "https://files.pythonhosted.org/packages/24/fe/74e6ec0639c115df13d5850e75722750adabdc7de24e37e05a40527ca539/frozenlist-1.7.0-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b0d5ce521d1dd7d620198829b87ea002956e4319002ef0bc8d3e6d045cb4646e", size = 224557, upload-time = "2025-06-09T23:00:52.855Z" }, + { url = "https://files.pythonhosted.org/packages/8d/db/48421f62a6f77c553575201e89048e97198046b793f4a089c79a6e3268bd/frozenlist-1.7.0-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:488d0a7d6a0008ca0db273c542098a0fa9e7dfaa7e57f70acef43f32b3f69dca", size = 241820, upload-time = "2025-06-09T23:00:54.43Z" }, + { url = "https://files.pythonhosted.org/packages/1d/fa/cb4a76bea23047c8462976ea7b7a2bf53997a0ca171302deae9d6dd12096/frozenlist-1.7.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:15a7eaba63983d22c54d255b854e8108e7e5f3e89f647fc854bd77a237e767df", size = 236542, upload-time = "2025-06-09T23:00:56.409Z" }, + { url = "https://files.pythonhosted.org/packages/5d/32/476a4b5cfaa0ec94d3f808f193301debff2ea42288a099afe60757ef6282/frozenlist-1.7.0-cp312-cp312-musllinux_1_2_armv7l.whl", hash = "sha256:1eaa7e9c6d15df825bf255649e05bd8a74b04a4d2baa1ae46d9c2d00b2ca2cb5", size = 249350, upload-time = "2025-06-09T23:00:58.468Z" }, + { url = "https://files.pythonhosted.org/packages/8d/ba/9a28042f84a6bf8ea5dbc81cfff8eaef18d78b2a1ad9d51c7bc5b029ad16/frozenlist-1.7.0-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:e4389e06714cfa9d47ab87f784a7c5be91d3934cd6e9a7b85beef808297cc025", size = 225093, upload-time = "2025-06-09T23:01:00.015Z" }, + { url = "https://files.pythonhosted.org/packages/bc/29/3a32959e68f9cf000b04e79ba574527c17e8842e38c91d68214a37455786/frozenlist-1.7.0-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:73bd45e1488c40b63fe5a7df892baf9e2a4d4bb6409a2b3b78ac1c6236178e01", size = 245482, upload-time = "2025-06-09T23:01:01.474Z" }, + { url = "https://files.pythonhosted.org/packages/80/e8/edf2f9e00da553f07f5fa165325cfc302dead715cab6ac8336a5f3d0adc2/frozenlist-1.7.0-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:99886d98e1643269760e5fe0df31e5ae7050788dd288947f7f007209b8c33f08", size = 249590, upload-time = "2025-06-09T23:01:02.961Z" }, + { url = "https://files.pythonhosted.org/packages/1c/80/9a0eb48b944050f94cc51ee1c413eb14a39543cc4f760ed12657a5a3c45a/frozenlist-1.7.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:290a172aae5a4c278c6da8a96222e6337744cd9c77313efe33d5670b9f65fc43", size = 237785, upload-time = "2025-06-09T23:01:05.095Z" }, + { url = "https://files.pythonhosted.org/packages/f3/74/87601e0fb0369b7a2baf404ea921769c53b7ae00dee7dcfe5162c8c6dbf0/frozenlist-1.7.0-cp312-cp312-win32.whl", hash = "sha256:426c7bc70e07cfebc178bc4c2bf2d861d720c4fff172181eeb4a4c41d4ca2ad3", size = 39487, upload-time = "2025-06-09T23:01:06.54Z" }, + { url = "https://files.pythonhosted.org/packages/0b/15/c026e9a9fc17585a9d461f65d8593d281fedf55fbf7eb53f16c6df2392f9/frozenlist-1.7.0-cp312-cp312-win_amd64.whl", hash = "sha256:563b72efe5da92e02eb68c59cb37205457c977aa7a449ed1b37e6939e5c47c6a", size = 43874, upload-time = "2025-06-09T23:01:07.752Z" }, + { url = "https://files.pythonhosted.org/packages/ee/45/b82e3c16be2182bff01179db177fe144d58b5dc787a7d4492c6ed8b9317f/frozenlist-1.7.0-py3-none-any.whl", hash = "sha256:9a5af342e34f7e97caf8c995864c7a396418ae2859cc6fdf1b1073020d516a7e", size = 13106, upload-time = "2025-06-09T23:02:34.204Z" }, +] + +[[package]] +name = "gotrue" +version = "2.12.4" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "httpx", extra = ["http2"] }, + { name = "pydantic" }, + { name = "pyjwt" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/aa/6c/fe920e91959bd211325860332be5898b6b53d6ccd873c053fc5cc829020c/gotrue-2.12.4.tar.gz", hash = "sha256:35d2e58e066486321f4dff0033b30a53d057c7f436c15287122fa0cb833029b1", size = 34817, upload-time = "2025-08-08T15:55:49.393Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/9a/2f/0e68d566d9339b8d320399d755704e17a94a123cf22f124b4ab2f686bcc3/gotrue-2.12.4-py3-none-any.whl", hash = "sha256:cf36dfcebc1da63b8d1e7b93eb1a35dfee3dcb1e1376833c256464010eb5fcd6", size = 42783, upload-time = "2025-08-08T15:55:48.289Z" }, +] + +[[package]] +name = "greenlet" +version = "3.2.4" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/03/b8/704d753a5a45507a7aab61f18db9509302ed3d0a27ac7e0359ec2905b1a6/greenlet-3.2.4.tar.gz", hash = "sha256:0dca0d95ff849f9a364385f36ab49f50065d76964944638be9691e1832e9f86d", size = 188260, upload-time = "2025-08-07T13:24:33.51Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/44/69/9b804adb5fd0671f367781560eb5eb586c4d495277c93bde4307b9e28068/greenlet-3.2.4-cp312-cp312-macosx_11_0_universal2.whl", hash = "sha256:3b67ca49f54cede0186854a008109d6ee71f66bd57bb36abd6d0a0267b540cdd", size = 274079, upload-time = "2025-08-07T13:15:45.033Z" }, + { url = "https://files.pythonhosted.org/packages/46/e9/d2a80c99f19a153eff70bc451ab78615583b8dac0754cfb942223d2c1a0d/greenlet-3.2.4-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:ddf9164e7a5b08e9d22511526865780a576f19ddd00d62f8a665949327fde8bb", size = 640997, upload-time = "2025-08-07T13:42:56.234Z" }, + { url = "https://files.pythonhosted.org/packages/3b/16/035dcfcc48715ccd345f3a93183267167cdd162ad123cd93067d86f27ce4/greenlet-3.2.4-cp312-cp312-manylinux2014_ppc64le.manylinux_2_17_ppc64le.whl", hash = "sha256:f28588772bb5fb869a8eb331374ec06f24a83a9c25bfa1f38b6993afe9c1e968", size = 655185, upload-time = "2025-08-07T13:45:27.624Z" }, + { url = "https://files.pythonhosted.org/packages/31/da/0386695eef69ffae1ad726881571dfe28b41970173947e7c558d9998de0f/greenlet-3.2.4-cp312-cp312-manylinux2014_s390x.manylinux_2_17_s390x.whl", hash = "sha256:5c9320971821a7cb77cfab8d956fa8e39cd07ca44b6070db358ceb7f8797c8c9", size = 649926, upload-time = "2025-08-07T13:53:15.251Z" }, + { url = "https://files.pythonhosted.org/packages/68/88/69bf19fd4dc19981928ceacbc5fd4bb6bc2215d53199e367832e98d1d8fe/greenlet-3.2.4-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:c60a6d84229b271d44b70fb6e5fa23781abb5d742af7b808ae3f6efd7c9c60f6", size = 651839, upload-time = "2025-08-07T13:18:30.281Z" }, + { url = "https://files.pythonhosted.org/packages/19/0d/6660d55f7373b2ff8152401a83e02084956da23ae58cddbfb0b330978fe9/greenlet-3.2.4-cp312-cp312-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:3b3812d8d0c9579967815af437d96623f45c0f2ae5f04e366de62a12d83a8fb0", size = 607586, upload-time = "2025-08-07T13:18:28.544Z" }, + { url = "https://files.pythonhosted.org/packages/8e/1a/c953fdedd22d81ee4629afbb38d2f9d71e37d23caace44775a3a969147d4/greenlet-3.2.4-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:abbf57b5a870d30c4675928c37278493044d7c14378350b3aa5d484fa65575f0", size = 1123281, upload-time = "2025-08-07T13:42:39.858Z" }, + { url = "https://files.pythonhosted.org/packages/3f/c7/12381b18e21aef2c6bd3a636da1088b888b97b7a0362fac2e4de92405f97/greenlet-3.2.4-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:20fb936b4652b6e307b8f347665e2c615540d4b42b3b4c8a321d8286da7e520f", size = 1151142, upload-time = "2025-08-07T13:18:22.981Z" }, + { url = "https://files.pythonhosted.org/packages/e9/08/b0814846b79399e585f974bbeebf5580fbe59e258ea7be64d9dfb253c84f/greenlet-3.2.4-cp312-cp312-win_amd64.whl", hash = "sha256:a7d4e128405eea3814a12cc2605e0e6aedb4035bf32697f72deca74de4105e02", size = 299899, upload-time = "2025-08-07T13:38:53.448Z" }, +] + +[[package]] +name = "gunicorn" +version = "23.0.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "packaging" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/34/72/9614c465dc206155d93eff0ca20d42e1e35afc533971379482de953521a4/gunicorn-23.0.0.tar.gz", hash = "sha256:f014447a0101dc57e294f6c18ca6b40227a4c90e9bdb586042628030cba004ec", size = 375031, upload-time = "2024-08-10T20:25:27.378Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/cb/7d/6dac2a6e1eba33ee43f318edbed4ff29151a49b5d37f080aad1e6469bca4/gunicorn-23.0.0-py3-none-any.whl", hash = "sha256:ec400d38950de4dfd418cff8328b2c8faed0edb0d517d3394e457c317908ca4d", size = 85029, upload-time = "2024-08-10T20:25:24.996Z" }, +] + +[[package]] +name = "h11" +version = "0.16.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/01/ee/02a2c011bdab74c6fb3c75474d40b3052059d95df7e73351460c8588d963/h11-0.16.0.tar.gz", hash = "sha256:4e35b956cf45792e4caa5885e69fba00bdbc6ffafbfa020300e549b208ee5ff1", size = 101250, upload-time = "2025-04-24T03:35:25.427Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/04/4b/29cac41a4d98d144bf5f6d33995617b185d14b22401f75ca86f384e87ff1/h11-0.16.0-py3-none-any.whl", hash = "sha256:63cf8bbe7522de3bf65932fda1d9c2772064ffb3dae62d55932da54b31cb6c86", size = 37515, upload-time = "2025-04-24T03:35:24.344Z" }, +] + +[[package]] +name = "h2" +version = "4.3.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "hpack" }, + { name = "hyperframe" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/1d/17/afa56379f94ad0fe8defd37d6eb3f89a25404ffc71d4d848893d270325fc/h2-4.3.0.tar.gz", hash = "sha256:6c59efe4323fa18b47a632221a1888bd7fde6249819beda254aeca909f221bf1", size = 2152026, upload-time = "2025-08-23T18:12:19.778Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/69/b2/119f6e6dcbd96f9069ce9a2665e0146588dc9f88f29549711853645e736a/h2-4.3.0-py3-none-any.whl", hash = "sha256:c438f029a25f7945c69e0ccf0fb951dc3f73a5f6412981daee861431b70e2bdd", size = 61779, upload-time = "2025-08-23T18:12:17.779Z" }, +] + +[[package]] +name = "hexbytes" +version = "1.3.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/7f/87/adf4635b4b8c050283d74e6db9a81496063229c9263e6acc1903ab79fbec/hexbytes-1.3.1.tar.gz", hash = "sha256:a657eebebdfe27254336f98d8af6e2236f3f83aed164b87466b6cf6c5f5a4765", size = 8633, upload-time = "2025-05-14T16:45:17.5Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/8d/e0/3b31492b1c89da3c5a846680517871455b30c54738486fc57ac79a5761bd/hexbytes-1.3.1-py3-none-any.whl", hash = "sha256:da01ff24a1a9a2b1881c4b85f0e9f9b0f51b526b379ffa23832ae7899d29c2c7", size = 5074, upload-time = "2025-05-14T16:45:16.179Z" }, +] + +[[package]] +name = "hpack" +version = "4.1.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/2c/48/71de9ed269fdae9c8057e5a4c0aa7402e8bb16f2c6e90b3aa53327b113f8/hpack-4.1.0.tar.gz", hash = "sha256:ec5eca154f7056aa06f196a557655c5b009b382873ac8d1e66e79e87535f1dca", size = 51276, upload-time = "2025-01-22T21:44:58.347Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/07/c6/80c95b1b2b94682a72cbdbfb85b81ae2daffa4291fbfa1b1464502ede10d/hpack-4.1.0-py3-none-any.whl", hash = "sha256:157ac792668d995c657d93111f46b4535ed114f0c9c8d672271bbec7eae1b496", size = 34357, upload-time = "2025-01-22T21:44:56.92Z" }, +] + +[[package]] +name = "httpcore" +version = "1.0.9" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "certifi" }, + { name = "h11" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/06/94/82699a10bca87a5556c9c59b5963f2d039dbd239f25bc2a63907a05a14cb/httpcore-1.0.9.tar.gz", hash = "sha256:6e34463af53fd2ab5d807f399a9b45ea31c3dfa2276f15a2c3f00afff6e176e8", size = 85484, upload-time = "2025-04-24T22:06:22.219Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/7e/f5/f66802a942d491edb555dd61e3a9961140fd64c90bce1eafd741609d334d/httpcore-1.0.9-py3-none-any.whl", hash = "sha256:2d400746a40668fc9dec9810239072b40b4484b640a8c38fd654a024c7a1bf55", size = 78784, upload-time = "2025-04-24T22:06:20.566Z" }, +] + +[[package]] +name = "httpx" +version = "0.28.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "anyio" }, + { name = "certifi" }, + { name = "httpcore" }, + { name = "idna" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/b1/df/48c586a5fe32a0f01324ee087459e112ebb7224f646c0b5023f5e79e9956/httpx-0.28.1.tar.gz", hash = "sha256:75e98c5f16b0f35b567856f597f06ff2270a374470a5c2392242528e3e3e42fc", size = 141406, upload-time = "2024-12-06T15:37:23.222Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/2a/39/e50c7c3a983047577ee07d2a9e53faf5a69493943ec3f6a384bdc792deb2/httpx-0.28.1-py3-none-any.whl", hash = "sha256:d909fcccc110f8c7faf814ca82a9a4d816bc5a6dbfea25d6591d6985b8ba59ad", size = 73517, upload-time = "2024-12-06T15:37:21.509Z" }, +] + +[package.optional-dependencies] +http2 = [ + { name = "h2" }, +] + +[[package]] +name = "httpx-sse" +version = "0.4.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/6e/fa/66bd985dd0b7c109a3bcb89272ee0bfb7e2b4d06309ad7b38ff866734b2a/httpx_sse-0.4.1.tar.gz", hash = "sha256:8f44d34414bc7b21bf3602713005c5df4917884f76072479b21f68befa4ea26e", size = 12998, upload-time = "2025-06-24T13:21:05.71Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/25/0a/6269e3473b09aed2dab8aa1a600c70f31f00ae1349bee30658f7e358a159/httpx_sse-0.4.1-py3-none-any.whl", hash = "sha256:cba42174344c3a5b06f255ce65b350880f962d99ead85e776f23c6618a377a37", size = 8054, upload-time = "2025-06-24T13:21:04.772Z" }, +] + +[[package]] +name = "hyperframe" +version = "6.1.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/02/e7/94f8232d4a74cc99514c13a9f995811485a6903d48e5d952771ef6322e30/hyperframe-6.1.0.tar.gz", hash = "sha256:f630908a00854a7adeabd6382b43923a4c4cd4b821fcb527e6ab9e15382a3b08", size = 26566, upload-time = "2025-01-22T21:41:49.302Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/48/30/47d0bf6072f7252e6521f3447ccfa40b421b6824517f82854703d0f5a98b/hyperframe-6.1.0-py3-none-any.whl", hash = "sha256:b03380493a519fce58ea5af42e4a42317bf9bd425596f7a0835ffce80f1a42e5", size = 13007, upload-time = "2025-01-22T21:41:47.295Z" }, +] + +[[package]] +name = "idna" +version = "3.10" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/f1/70/7703c29685631f5a7590aa73f1f1d3fa9a380e654b86af429e0934a32f7d/idna-3.10.tar.gz", hash = "sha256:12f65c9b470abda6dc35cf8e63cc574b1c52b11df2c86030af0ac09b01b13ea9", size = 190490, upload-time = "2024-09-15T18:07:39.745Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/76/c6/c88e154df9c4e1a2a66ccf0005a88dfb2650c1dffb6f5ce603dfbd452ce3/idna-3.10-py3-none-any.whl", hash = "sha256:946d195a0d259cbba61165e88e65941f16e9b36ea6ddb97f00452bae8b1287d3", size = 70442, upload-time = "2024-09-15T18:07:37.964Z" }, +] + +[[package]] +name = "iniconfig" +version = "2.1.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/f2/97/ebf4da567aa6827c909642694d71c9fcf53e5b504f2d96afea02718862f3/iniconfig-2.1.0.tar.gz", hash = "sha256:3abbd2e30b36733fee78f9c7f7308f2d0050e88f0087fd25c2645f63c773e1c7", size = 4793, upload-time = "2025-03-19T20:09:59.721Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/2c/e1/e6716421ea10d38022b952c159d5161ca1193197fb744506875fbb87ea7b/iniconfig-2.1.0-py3-none-any.whl", hash = "sha256:9deba5723312380e77435581c6bf4935c94cbfab9b1ed33ef8d238ea168eb760", size = 6050, upload-time = "2025-03-19T20:10:01.071Z" }, +] + +[[package]] +name = "intentkit" +version = "0.0.1" +source = { editable = "intentkit" } +dependencies = [ + { name = "aiohttp" }, + { name = "aiosqlite" }, + { name = "asyncpg" }, + { name = "aws-secretsmanager-caching" }, + { name = "bip32" }, + { name = "boto3" }, + { name = "botocore" }, + { name = "cdp-sdk" }, + { name = "coinbase-agentkit" }, + { name = "coinbase-agentkit-langchain" }, + { name = "cron-validator" }, + { name = "epyxid" }, + { name = "eth-keys" }, + { name = "eth-utils" }, + { name = "fastapi" }, + { name = "filetype" }, + { name = "httpx" }, + { name = "jsonref" }, + { name = "langchain" }, + { name = "langchain-community" }, + { name = "langchain-core" }, + { name = "langchain-deepseek" }, + { name = "langchain-mcp-adapters" }, + { name = "langchain-openai" }, + { name = "langchain-text-splitters" }, + { name = "langchain-xai" }, + { name = "langgraph" }, + { name = "langgraph-checkpoint" }, + { name = "langgraph-checkpoint-postgres" }, + { name = "langgraph-prebuilt" }, + { name = "langmem" }, + { name = "mypy-boto3-s3" }, + { name = "openai" }, + { name = "pillow" }, + { name = "psycopg" }, + { name = "psycopg-pool" }, + { name = "pydantic" }, + { name = "python-dotenv" }, + { name = "pytz" }, + { name = "pyyaml" }, + { name = "redis" }, + { name = "requests" }, + { name = "requests-oauthlib" }, + { name = "slack-sdk" }, + { name = "sqlalchemy", extra = ["asyncio"] }, + { name = "starlette" }, + { name = "supabase" }, + { name = "tenacity" }, + { name = "tweepy", extra = ["async"] }, + { name = "uvicorn" }, + { name = "web3" }, +] + +[package.dev-dependencies] +dev = [ + { name = "jsonschema" }, + { name = "pytest" }, + { name = "pytest-asyncio" }, + { name = "ruff" }, +] + +[package.metadata] +requires-dist = [ + { name = "aiohttp", specifier = ">=3.11.16" }, + { name = "aiosqlite", specifier = ">=0.21.0" }, + { name = "asyncpg", specifier = ">=0.30.0" }, + { name = "aws-secretsmanager-caching", specifier = ">=1.1.3" }, + { name = "bip32", specifier = ">=2.0.0" }, + { name = "boto3", specifier = ">=1.37.23,<2.0.0" }, + { name = "botocore", specifier = ">=1.35.97" }, + { name = "cdp-sdk", specifier = ">=1.22.0" }, + { name = "coinbase-agentkit", specifier = ">=0.6.0,<0.7.0" }, + { name = "coinbase-agentkit-langchain", specifier = ">=0.5.0" }, + { name = "cron-validator", specifier = ">=1.0.8,<2.0.0" }, + { name = "epyxid", specifier = ">=0.3.3" }, + { name = "eth-keys", specifier = ">=0.4.0" }, + { name = "eth-utils", specifier = ">=2.1.0" }, + { name = "fastapi", specifier = ">=0.115.8" }, + { name = "filetype", specifier = ">=1.2.0,<2.0.0" }, + { name = "httpx", specifier = ">=0.28.1" }, + { name = "jsonref", specifier = ">=1.1.0" }, + { name = "langchain", specifier = ">=0.3.25,<0.4.0" }, + { name = "langchain-community", specifier = ">=0.3.19" }, + { name = "langchain-core", specifier = ">=0.3.43" }, + { name = "langchain-deepseek", specifier = ">=0.1.4" }, + { name = "langchain-mcp-adapters", specifier = ">=0.0.11" }, + { name = "langchain-openai", specifier = ">=0.3.8" }, + { name = "langchain-text-splitters", specifier = ">=0.3.8" }, + { name = "langchain-xai", specifier = ">=0.2.1" }, + { name = "langgraph", specifier = ">=0.6.1,<0.7.0" }, + { name = "langgraph-checkpoint", specifier = ">=2.0.18" }, + { name = "langgraph-checkpoint-postgres", specifier = ">=2.0.16,<2.0.23" }, + { name = "langgraph-prebuilt", specifier = ">=0.6.1,<0.7.0" }, + { name = "langmem", specifier = ">=0.0.27" }, + { name = "mypy-boto3-s3", specifier = ">=1.37.24,<2.0.0" }, + { name = "openai", specifier = ">=1.59.6" }, + { name = "pillow", specifier = ">=11.1.0,<12.0.0" }, + { name = "psycopg", specifier = ">=3.2.9" }, + { name = "psycopg-pool", specifier = ">=3.2.4" }, + { name = "pydantic", specifier = ">=2.10.6,<2.11.0" }, + { name = "python-dotenv", specifier = ">=1.0.1" }, + { name = "pytz", specifier = ">=2025.1" }, + { name = "pyyaml", specifier = ">=6.0.2" }, + { name = "redis", specifier = ">=5.2.1,<7.0.0" }, + { name = "requests", specifier = ">=2.32.3" }, + { name = "requests-oauthlib", specifier = ">=2.0.0" }, + { name = "slack-sdk", specifier = ">=3.34.0" }, + { name = "sqlalchemy", extras = ["asyncio"], specifier = ">=2.0.37" }, + { name = "starlette", specifier = ">=0.47.1" }, + { name = "supabase", specifier = ">=2.16.0" }, + { name = "tenacity", specifier = ">=9.1.2" }, + { name = "tweepy", extras = ["async"], specifier = ">=4.15.0" }, + { name = "uvicorn", specifier = ">=0.34.0,<1.0.0" }, + { name = "web3", specifier = ">=7.10.0" }, +] + +[package.metadata.requires-dev] +dev = [ + { name = "jsonschema", specifier = ">=4.21.1,<5" }, + { name = "pytest", specifier = ">=7.0.0" }, + { name = "pytest-asyncio", specifier = ">=0.21.0" }, + { name = "ruff", specifier = ">=0.11.9,<0.12" }, +] + +[[package]] +name = "intentkit-workspace" +version = "0.5.0" +source = { virtual = "." } +dependencies = [ + { name = "aiogram" }, + { name = "aiohttp" }, + { name = "aiosqlite" }, + { name = "alembic" }, + { name = "anyio" }, + { name = "apscheduler" }, + { name = "asyncpg" }, + { name = "aws-secretsmanager-caching" }, + { name = "beautifulsoup4" }, + { name = "bip32" }, + { name = "boto3" }, + { name = "botocore" }, + { name = "cdp-sdk" }, + { name = "coinbase-agentkit" }, + { name = "coinbase-agentkit-langchain" }, + { name = "cron-validator" }, + { name = "epyxid" }, + { name = "eth-keys" }, + { name = "eth-utils" }, + { name = "faiss-cpu" }, + { name = "fastapi" }, + { name = "filetype" }, + { name = "gunicorn" }, + { name = "httpx" }, + { name = "intentkit" }, + { name = "jsonref" }, + { name = "jsonschema" }, + { name = "langchain" }, + { name = "langchain-community" }, + { name = "langchain-core" }, + { name = "langchain-deepseek" }, + { name = "langchain-mcp-adapters" }, + { name = "langchain-openai" }, + { name = "langchain-postgres" }, + { name = "langchain-text-splitters" }, + { name = "langchain-xai" }, + { name = "langgraph" }, + { name = "langgraph-checkpoint" }, + { name = "langgraph-checkpoint-postgres" }, + { name = "langgraph-prebuilt" }, + { name = "langmem" }, + { name = "mypy-boto3-s3" }, + { name = "openai" }, + { name = "pgvector" }, + { name = "pillow" }, + { name = "psycopg" }, + { name = "psycopg-pool" }, + { name = "psycopg2-binary" }, + { name = "pydantic" }, + { name = "pydantic-settings" }, + { name = "pyjwt" }, + { name = "python-dotenv" }, + { name = "python-multipart" }, + { name = "pytz" }, + { name = "pyyaml" }, + { name = "redis" }, + { name = "requests" }, + { name = "requests-oauthlib" }, + { name = "sentry-sdk", extra = ["fastapi"] }, + { name = "slack-sdk" }, + { name = "sqlalchemy", extra = ["asyncio"] }, + { name = "starlette" }, + { name = "supabase" }, + { name = "telegramify-markdown" }, + { name = "tenacity" }, + { name = "tweepy", extra = ["async"] }, + { name = "uvicorn" }, + { name = "web3" }, +] + +[package.dev-dependencies] +dev = [ + { name = "deptry" }, + { name = "jsonschema" }, + { name = "ruff" }, +] + +[package.metadata] +requires-dist = [ + { name = "aiogram", specifier = ">=3.17.0" }, + { name = "aiohttp", specifier = ">=3.11.16" }, + { name = "aiosqlite", specifier = ">=0.21.0" }, + { name = "alembic", specifier = ">=1.14.0" }, + { name = "anyio", specifier = ">=4.8.0" }, + { name = "apscheduler", specifier = ">=3.11.0" }, + { name = "asyncpg", specifier = ">=0.30.0" }, + { name = "aws-secretsmanager-caching", specifier = ">=1.1.3" }, + { name = "beautifulsoup4", specifier = ">=4.13.4" }, + { name = "bip32", specifier = ">=2.0.0" }, + { name = "boto3", specifier = ">=1.37.23,<2.0.0" }, + { name = "botocore", specifier = ">=1.35.97" }, + { name = "cdp-sdk", specifier = ">=1.22.0" }, + { name = "coinbase-agentkit", specifier = ">=0.6.0,<0.7.0" }, + { name = "coinbase-agentkit-langchain", specifier = ">=0.5.0" }, + { name = "cron-validator", specifier = ">=1.0.8,<2.0.0" }, + { name = "epyxid", specifier = ">=0.3.3" }, + { name = "eth-keys", specifier = ">=0.4.0" }, + { name = "eth-utils", specifier = ">=2.1.0" }, + { name = "faiss-cpu", specifier = ">=1.11.0" }, + { name = "fastapi", specifier = ">=0.115.8" }, + { name = "filetype", specifier = ">=1.2.0,<2.0.0" }, + { name = "gunicorn", specifier = ">=23.0.0" }, + { name = "httpx", specifier = ">=0.28.1" }, + { name = "intentkit", editable = "intentkit" }, + { name = "jsonref", specifier = ">=1.1.0" }, + { name = "jsonschema", specifier = ">=4.24.0" }, + { name = "langchain", specifier = ">=0.3.25,<0.4.0" }, + { name = "langchain-community", specifier = ">=0.3.19" }, + { name = "langchain-core", specifier = ">=0.3.43" }, + { name = "langchain-deepseek", specifier = ">=0.1.4" }, + { name = "langchain-mcp-adapters", specifier = ">=0.0.11" }, + { name = "langchain-openai", specifier = ">=0.3.8" }, + { name = "langchain-postgres", specifier = ">=0.0.13" }, + { name = "langchain-text-splitters", specifier = ">=0.3.8" }, + { name = "langchain-xai", specifier = ">=0.2.1" }, + { name = "langgraph", specifier = ">=0.6.1,<0.7.0" }, + { name = "langgraph-checkpoint", specifier = ">=2.0.18" }, + { name = "langgraph-checkpoint-postgres", specifier = ">=2.0.16,<2.0.23" }, + { name = "langgraph-prebuilt", specifier = ">=0.6.1,<0.7.0" }, + { name = "langmem", specifier = ">=0.0.27" }, + { name = "mypy-boto3-s3", specifier = ">=1.37.24,<2.0.0" }, + { name = "openai", specifier = ">=1.59.6" }, + { name = "pgvector", specifier = ">=0.3.6" }, + { name = "pillow", specifier = ">=11.1.0,<12.0.0" }, + { name = "psycopg", specifier = ">=3.2.9" }, + { name = "psycopg-pool", specifier = ">=3.2.4" }, + { name = "psycopg2-binary", specifier = ">=2.9.10,<3.0.0" }, + { name = "pydantic", specifier = ">=2.10.6,<2.11.0" }, + { name = "pydantic-settings", specifier = ">=2.8.1" }, + { name = "pyjwt", specifier = ">=2.10.1" }, + { name = "python-dotenv", specifier = ">=1.0.1" }, + { name = "python-multipart", specifier = ">=0.0.20" }, + { name = "pytz", specifier = ">=2025.1" }, + { name = "pyyaml", specifier = ">=6.0.2" }, + { name = "redis", specifier = ">=5.2.1,<7.0.0" }, + { name = "requests", specifier = ">=2.32.3" }, + { name = "requests-oauthlib", specifier = ">=2.0.0" }, + { name = "sentry-sdk", extras = ["fastapi"], specifier = ">=2.20.0" }, + { name = "slack-sdk", specifier = ">=3.34.0" }, + { name = "sqlalchemy", extras = ["asyncio"], specifier = ">=2.0.37" }, + { name = "starlette", specifier = ">=0.47.1" }, + { name = "supabase", specifier = ">=2.16.0" }, + { name = "telegramify-markdown", specifier = ">=0.5.0,<0.6.0" }, + { name = "tenacity", specifier = ">=9.1.2" }, + { name = "tweepy", extras = ["async"], specifier = ">=4.15.0" }, + { name = "uvicorn", specifier = ">=0.34.0,<1.0.0" }, + { name = "web3", specifier = ">=7.10.0" }, +] + +[package.metadata.requires-dev] +dev = [ + { name = "deptry", specifier = ">=0.23.0" }, + { name = "jsonschema", specifier = ">=4.21.1,<5" }, + { name = "ruff", specifier = ">=0.11.9,<0.12" }, +] + +[[package]] +name = "jiter" +version = "0.10.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/ee/9d/ae7ddb4b8ab3fb1b51faf4deb36cb48a4fbbd7cb36bad6a5fca4741306f7/jiter-0.10.0.tar.gz", hash = "sha256:07a7142c38aacc85194391108dc91b5b57093c978a9932bd86a36862759d9500", size = 162759, upload-time = "2025-05-18T19:04:59.73Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/6d/b5/348b3313c58f5fbfb2194eb4d07e46a35748ba6e5b3b3046143f3040bafa/jiter-0.10.0-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:1e274728e4a5345a6dde2d343c8da018b9d4bd4350f5a472fa91f66fda44911b", size = 312262, upload-time = "2025-05-18T19:03:44.637Z" }, + { url = "https://files.pythonhosted.org/packages/9c/4a/6a2397096162b21645162825f058d1709a02965606e537e3304b02742e9b/jiter-0.10.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:7202ae396446c988cb2a5feb33a543ab2165b786ac97f53b59aafb803fef0744", size = 320124, upload-time = "2025-05-18T19:03:46.341Z" }, + { url = "https://files.pythonhosted.org/packages/2a/85/1ce02cade7516b726dd88f59a4ee46914bf79d1676d1228ef2002ed2f1c9/jiter-0.10.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:23ba7722d6748b6920ed02a8f1726fb4b33e0fd2f3f621816a8b486c66410ab2", size = 345330, upload-time = "2025-05-18T19:03:47.596Z" }, + { url = "https://files.pythonhosted.org/packages/75/d0/bb6b4f209a77190ce10ea8d7e50bf3725fc16d3372d0a9f11985a2b23eff/jiter-0.10.0-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:371eab43c0a288537d30e1f0b193bc4eca90439fc08a022dd83e5e07500ed026", size = 369670, upload-time = "2025-05-18T19:03:49.334Z" }, + { url = "https://files.pythonhosted.org/packages/a0/f5/a61787da9b8847a601e6827fbc42ecb12be2c925ced3252c8ffcb56afcaf/jiter-0.10.0-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:6c675736059020365cebc845a820214765162728b51ab1e03a1b7b3abb70f74c", size = 489057, upload-time = "2025-05-18T19:03:50.66Z" }, + { url = "https://files.pythonhosted.org/packages/12/e4/6f906272810a7b21406c760a53aadbe52e99ee070fc5c0cb191e316de30b/jiter-0.10.0-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:0c5867d40ab716e4684858e4887489685968a47e3ba222e44cde6e4a2154f959", size = 389372, upload-time = "2025-05-18T19:03:51.98Z" }, + { url = "https://files.pythonhosted.org/packages/e2/ba/77013b0b8ba904bf3762f11e0129b8928bff7f978a81838dfcc958ad5728/jiter-0.10.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:395bb9a26111b60141757d874d27fdea01b17e8fac958b91c20128ba8f4acc8a", size = 352038, upload-time = "2025-05-18T19:03:53.703Z" }, + { url = "https://files.pythonhosted.org/packages/67/27/c62568e3ccb03368dbcc44a1ef3a423cb86778a4389e995125d3d1aaa0a4/jiter-0.10.0-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:6842184aed5cdb07e0c7e20e5bdcfafe33515ee1741a6835353bb45fe5d1bd95", size = 391538, upload-time = "2025-05-18T19:03:55.046Z" }, + { url = "https://files.pythonhosted.org/packages/c0/72/0d6b7e31fc17a8fdce76164884edef0698ba556b8eb0af9546ae1a06b91d/jiter-0.10.0-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:62755d1bcea9876770d4df713d82606c8c1a3dca88ff39046b85a048566d56ea", size = 523557, upload-time = "2025-05-18T19:03:56.386Z" }, + { url = "https://files.pythonhosted.org/packages/2f/09/bc1661fbbcbeb6244bd2904ff3a06f340aa77a2b94e5a7373fd165960ea3/jiter-0.10.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:533efbce2cacec78d5ba73a41756beff8431dfa1694b6346ce7af3a12c42202b", size = 514202, upload-time = "2025-05-18T19:03:57.675Z" }, + { url = "https://files.pythonhosted.org/packages/1b/84/5a5d5400e9d4d54b8004c9673bbe4403928a00d28529ff35b19e9d176b19/jiter-0.10.0-cp312-cp312-win32.whl", hash = "sha256:8be921f0cadd245e981b964dfbcd6fd4bc4e254cdc069490416dd7a2632ecc01", size = 211781, upload-time = "2025-05-18T19:03:59.025Z" }, + { url = "https://files.pythonhosted.org/packages/9b/52/7ec47455e26f2d6e5f2ea4951a0652c06e5b995c291f723973ae9e724a65/jiter-0.10.0-cp312-cp312-win_amd64.whl", hash = "sha256:a7c7d785ae9dda68c2678532a5a1581347e9c15362ae9f6e68f3fdbfb64f2e49", size = 206176, upload-time = "2025-05-18T19:04:00.305Z" }, +] + +[[package]] +name = "jmespath" +version = "1.0.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/00/2a/e867e8531cf3e36b41201936b7fa7ba7b5702dbef42922193f05c8976cd6/jmespath-1.0.1.tar.gz", hash = "sha256:90261b206d6defd58fdd5e85f478bf633a2901798906be2ad389150c5c60edbe", size = 25843, upload-time = "2022-06-17T18:00:12.224Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/31/b4/b9b800c45527aadd64d5b442f9b932b00648617eb5d63d2c7a6587b7cafc/jmespath-1.0.1-py3-none-any.whl", hash = "sha256:02e2e4cc71b5bcab88332eebf907519190dd9e6e82107fa7f83b1003a6252980", size = 20256, upload-time = "2022-06-17T18:00:10.251Z" }, +] + +[[package]] +name = "jsonalias" +version = "0.1.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/ec/45/ee7e17002cb7f3264f755ff6a1a72c55d1830e07808d643167d2a2277c4f/jsonalias-0.1.1.tar.gz", hash = "sha256:64f04d935397d579fc94509e1fcb6212f2d081235d9d6395bd10baedf760a769", size = 1095, upload-time = "2022-10-28T22:57:56.224Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/41/ed/05aebce69f78c104feff2ffcdd5a6f9d668a208aba3a8bf56e3750809fd8/jsonalias-0.1.1-py3-none-any.whl", hash = "sha256:a56d2888e6397812c606156504e861e8ec00e188005af149f003c787db3d3f18", size = 1312, upload-time = "2022-10-28T22:57:54.763Z" }, +] + +[[package]] +name = "jsonpatch" +version = "1.33" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "jsonpointer" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/42/78/18813351fe5d63acad16aec57f94ec2b70a09e53ca98145589e185423873/jsonpatch-1.33.tar.gz", hash = "sha256:9fcd4009c41e6d12348b4a0ff2563ba56a2923a7dfee731d004e212e1ee5030c", size = 21699, upload-time = "2023-06-26T12:07:29.144Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/73/07/02e16ed01e04a374e644b575638ec7987ae846d25ad97bcc9945a3ee4b0e/jsonpatch-1.33-py2.py3-none-any.whl", hash = "sha256:0ae28c0cd062bbd8b8ecc26d7d164fbbea9652a1a3693f3b956c1eae5145dade", size = 12898, upload-time = "2023-06-16T21:01:28.466Z" }, +] + +[[package]] +name = "jsonpointer" +version = "3.0.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/6a/0a/eebeb1fa92507ea94016a2a790b93c2ae41a7e18778f85471dc54475ed25/jsonpointer-3.0.0.tar.gz", hash = "sha256:2b2d729f2091522d61c3b31f82e11870f60b68f43fbc705cb76bf4b832af59ef", size = 9114, upload-time = "2024-06-10T19:24:42.462Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/71/92/5e77f98553e9e75130c78900d000368476aed74276eb8ae8796f65f00918/jsonpointer-3.0.0-py2.py3-none-any.whl", hash = "sha256:13e088adc14fca8b6aa8177c044e12701e6ad4b28ff10e65f2267a90109c9942", size = 7595, upload-time = "2024-06-10T19:24:40.698Z" }, +] + +[[package]] +name = "jsonref" +version = "1.1.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/aa/0d/c1f3277e90ccdb50d33ed5ba1ec5b3f0a242ed8c1b1a85d3afeb68464dca/jsonref-1.1.0.tar.gz", hash = "sha256:32fe8e1d85af0fdefbebce950af85590b22b60f9e95443176adbde4e1ecea552", size = 8814, upload-time = "2023-01-16T16:10:04.455Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/0c/ec/e1db9922bceb168197a558a2b8c03a7963f1afe93517ddd3cf99f202f996/jsonref-1.1.0-py3-none-any.whl", hash = "sha256:590dc7773df6c21cbf948b5dac07a72a251db28b0238ceecce0a2abfa8ec30a9", size = 9425, upload-time = "2023-01-16T16:10:02.255Z" }, +] + +[[package]] +name = "jsonschema" +version = "4.25.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "attrs" }, + { name = "jsonschema-specifications" }, + { name = "referencing" }, + { name = "rpds-py" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/74/69/f7185de793a29082a9f3c7728268ffb31cb5095131a9c139a74078e27336/jsonschema-4.25.1.tar.gz", hash = "sha256:e4a9655ce0da0c0b67a085847e00a3a51449e1157f4f75e9fb5aa545e122eb85", size = 357342, upload-time = "2025-08-18T17:03:50.038Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/bf/9c/8c95d856233c1f82500c2450b8c68576b4cf1c871db3afac5c34ff84e6fd/jsonschema-4.25.1-py3-none-any.whl", hash = "sha256:3fba0169e345c7175110351d456342c364814cfcf3b964ba4587f22915230a63", size = 90040, upload-time = "2025-08-18T17:03:48.373Z" }, +] + +[[package]] +name = "jsonschema-specifications" +version = "2025.4.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "referencing" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/bf/ce/46fbd9c8119cfc3581ee5643ea49464d168028cfb5caff5fc0596d0cf914/jsonschema_specifications-2025.4.1.tar.gz", hash = "sha256:630159c9f4dbea161a6a2205c3011cc4f18ff381b189fff48bb39b9bf26ae608", size = 15513, upload-time = "2025-04-23T12:34:07.418Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/01/0e/b27cdbaccf30b890c40ed1da9fd4a3593a5cf94dae54fb34f8a4b74fcd3f/jsonschema_specifications-2025.4.1-py3-none-any.whl", hash = "sha256:4653bffbd6584f7de83a67e0d620ef16900b390ddc7939d56684d6c81e33f1af", size = 18437, upload-time = "2025-04-23T12:34:05.422Z" }, +] + +[[package]] +name = "lagrange" +version = "3.0.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/07/9d/4b6470fd6769b0943fbda9b30e2068bb8d9940be2977b1e80a184d527fa6/lagrange-3.0.1.tar.gz", hash = "sha256:272f352a676679ee318b0b302054f667f23afb73d10063cd3926c612527e09f1", size = 6894, upload-time = "2025-01-01T01:33:14.999Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/3e/d8/f1c3ff60a8b3e114cfb3e9eed75140d2a3e1e766791cfe2f210a5c736d61/lagrange-3.0.1-py3-none-any.whl", hash = "sha256:d473913d901f0c257456c505e4a94450f2e4a2f147460a68ad0cfb9ea33a6d0a", size = 6905, upload-time = "2025-01-01T01:33:11.031Z" }, +] + +[[package]] +name = "langchain" +version = "0.3.27" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "langchain-core" }, + { name = "langchain-text-splitters" }, + { name = "langsmith" }, + { name = "pydantic" }, + { name = "pyyaml" }, + { name = "requests" }, + { name = "sqlalchemy" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/83/f6/f4f7f3a56626fe07e2bb330feb61254dbdf06c506e6b59a536a337da51cf/langchain-0.3.27.tar.gz", hash = "sha256:aa6f1e6274ff055d0fd36254176770f356ed0a8994297d1df47df341953cec62", size = 10233809, upload-time = "2025-07-24T14:42:32.959Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/f6/d5/4861816a95b2f6993f1360cfb605aacb015506ee2090433a71de9cca8477/langchain-0.3.27-py3-none-any.whl", hash = "sha256:7b20c4f338826acb148d885b20a73a16e410ede9ee4f19bb02011852d5f98798", size = 1018194, upload-time = "2025-07-24T14:42:30.23Z" }, +] + +[[package]] +name = "langchain-anthropic" +version = "0.3.19" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "anthropic" }, + { name = "langchain-core" }, + { name = "pydantic" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/1f/ab/bdaefa42fdab238efff45eb28c6cd74c011979092408decdae22c0bf7e66/langchain_anthropic-0.3.19.tar.gz", hash = "sha256:e62259382586ee5c44e9a9459d00b74a7e191550e5fadfad28f0daa5d143d745", size = 281502, upload-time = "2025-08-18T18:33:36.811Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/a2/69/64473db52d02715f3815df3b25c9816b5801a58762a5ae62a3e5b84169a0/langchain_anthropic-0.3.19-py3-none-any.whl", hash = "sha256:5b5372ef7e10ee32b4308b4d9e1ed623c360b7d0a233c017e5209ad8118d5ab7", size = 31775, upload-time = "2025-08-18T18:33:35.596Z" }, +] + +[[package]] +name = "langchain-community" +version = "0.3.27" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "aiohttp" }, + { name = "dataclasses-json" }, + { name = "httpx-sse" }, + { name = "langchain" }, + { name = "langchain-core" }, + { name = "langsmith" }, + { name = "numpy" }, + { name = "pydantic-settings" }, + { name = "pyyaml" }, + { name = "requests" }, + { name = "sqlalchemy" }, + { name = "tenacity" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/5c/76/200494f6de488217a196c4369e665d26b94c8c3642d46e2fd62f9daf0a3a/langchain_community-0.3.27.tar.gz", hash = "sha256:e1037c3b9da0c6d10bf06e838b034eb741e016515c79ef8f3f16e53ead33d882", size = 33237737, upload-time = "2025-07-02T18:47:02.329Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/c8/bc/f8c7dae8321d37ed39ac9d7896617c4203248240a4835b136e3724b3bb62/langchain_community-0.3.27-py3-none-any.whl", hash = "sha256:581f97b795f9633da738ea95da9cb78f8879b538090c9b7a68c0aed49c828f0d", size = 2530442, upload-time = "2025-07-02T18:47:00.246Z" }, +] + +[[package]] +name = "langchain-core" +version = "0.3.75" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "jsonpatch" }, + { name = "langsmith" }, + { name = "packaging" }, + { name = "pydantic" }, + { name = "pyyaml" }, + { name = "tenacity" }, + { name = "typing-extensions" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/06/63/270b71a23e849984505ddc7c5c9fd3f4bd9cb14b1a484ee44c4e51c33cc2/langchain_core-0.3.75.tar.gz", hash = "sha256:ab0eb95a06ed6043f76162e6086b45037690cb70b7f090bd83b5ebb8a05b70ed", size = 570876, upload-time = "2025-08-26T15:24:12.246Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/fb/42/0d0221cce6f168f644d7d96cb6c87c4e42fc55d2941da7a36e970e3ab8ab/langchain_core-0.3.75-py3-none-any.whl", hash = "sha256:03ca1fadf955ee3c7d5806a841f4b3a37b816acea5e61a7e6ba1298c05eea7f5", size = 443986, upload-time = "2025-08-26T15:24:10.883Z" }, +] + +[[package]] +name = "langchain-deepseek" +version = "0.1.4" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "langchain-core" }, + { name = "langchain-openai" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/8a/64/15fe061165574b3ba80011d96cb3f428f9e6f5631cd76058b028333023db/langchain_deepseek-0.1.4.tar.gz", hash = "sha256:dc105138aee4fce03badd0521e69d5508b37f5c087d92b3e8481ffb8f9563d33", size = 8937, upload-time = "2025-07-22T17:37:42.798Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/39/d3/0da6f3f5548e1a771d0500e17da723996f8ba1398bcef7198a2ecbca6e1b/langchain_deepseek-0.1.4-py3-none-any.whl", hash = "sha256:9ce3dbfc7a40f221657ffe31e8623ea6e397f6c90de2a58d38204ac63e8f41ff", size = 7440, upload-time = "2025-07-22T17:37:41.618Z" }, +] + +[[package]] +name = "langchain-mcp-adapters" +version = "0.1.7" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "langchain-core" }, + { name = "mcp" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/01/5f/f6d2b724f7100bd53e4413a72dd7b77a61e5284549c9de63ba043e63d163/langchain_mcp_adapters-0.1.7.tar.gz", hash = "sha256:b5d0ab520211d8c12cfc4df83fd6335f8197a3557ee7ca4f14e3380846610535", size = 20023, upload-time = "2025-06-05T20:18:39.425Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/01/05/b6cf2f4651c9429b374c8837117ecc0619bfb8b6e106ce8390f2c932a293/langchain_mcp_adapters-0.1.7-py3-none-any.whl", hash = "sha256:6b3ded5f51b311e67cefa87b500f776c454caf2269d2eae4b2338ecec19a9258", size = 13318, upload-time = "2025-06-05T20:18:38.477Z" }, +] + +[[package]] +name = "langchain-openai" +version = "0.3.32" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "langchain-core" }, + { name = "openai" }, + { name = "tiktoken" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/7e/19/167d9ad1b6bb75406c4acceda01ef0dc1101c7f629f74441fe8a787fb190/langchain_openai-0.3.32.tar.gz", hash = "sha256:782ad669bd1bdb964456d8882c5178717adcfceecb482cc20005f770e43d346d", size = 782982, upload-time = "2025-08-26T14:25:27.917Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/e6/3d/e22ee65fff79afe7bdfbd67844243eb279b440c882dad9e4262dcc87131f/langchain_openai-0.3.32-py3-none-any.whl", hash = "sha256:3354f76822f7cc76d8069831fe2a77f9bc7ff3b4f13af788bd94e4c6e853b400", size = 74531, upload-time = "2025-08-26T14:25:26.542Z" }, +] + +[[package]] +name = "langchain-postgres" +version = "0.0.15" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "asyncpg" }, + { name = "langchain-core" }, + { name = "numpy" }, + { name = "pgvector" }, + { name = "psycopg" }, + { name = "psycopg-pool" }, + { name = "sqlalchemy" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/1f/b9/b9eb61d2f2679bacea0bea02e71e715c18f02c9f72f6c3d523fe7f7f65be/langchain_postgres-0.0.15.tar.gz", hash = "sha256:d6be01ab3a802881e7dcd16439a4efda5a8eba15c368e04fe9a96134ad90854e", size = 198495, upload-time = "2025-06-24T14:19:21.324Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/15/5e/572c90fce17462bfcb7e7b7ac2e24bbdbaced338fb271172c7b96a24ccee/langchain_postgres-0.0.15-py3-none-any.whl", hash = "sha256:dc3d083f6e2ac08fe918f658b63886586be62c057cab0ad30c1c6b38023d99b7", size = 45059, upload-time = "2025-06-24T14:19:19.781Z" }, +] + +[[package]] +name = "langchain-text-splitters" +version = "0.3.11" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "langchain-core" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/11/43/dcda8fd25f0b19cb2835f2f6bb67f26ad58634f04ac2d8eae00526b0fa55/langchain_text_splitters-0.3.11.tar.gz", hash = "sha256:7a50a04ada9a133bbabb80731df7f6ddac51bc9f1b9cab7fa09304d71d38a6cc", size = 46458, upload-time = "2025-08-31T23:02:58.316Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/58/0d/41a51b40d24ff0384ec4f7ab8dd3dcea8353c05c973836b5e289f1465d4f/langchain_text_splitters-0.3.11-py3-none-any.whl", hash = "sha256:cf079131166a487f1372c8ab5d0bfaa6c0a4291733d9c43a34a16ac9bcd6a393", size = 33845, upload-time = "2025-08-31T23:02:57.195Z" }, +] + +[[package]] +name = "langchain-xai" +version = "0.2.5" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "aiohttp" }, + { name = "langchain-core" }, + { name = "langchain-openai" }, + { name = "requests" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/3e/6c/f491dc55c7f91fe8196b59b6111788ee62b2d4efd629230788caa08e0fd6/langchain_xai-0.2.5.tar.gz", hash = "sha256:e94b17d4928aaa26998acf7cff537bb6b3c72468177e00ed7de7747dc9b2ecdc", size = 11086, upload-time = "2025-07-22T17:22:21.238Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/bf/50/fc6ae91c9b40ca8259d501c0754d91136ac98e2502c67b4a55b78a426fba/langchain_xai-0.2.5-py3-none-any.whl", hash = "sha256:8d351393eddc0ad024a82a1120b815e6e3ccfe83390cbc65a2fce81786584795", size = 9297, upload-time = "2025-07-22T17:22:20.173Z" }, +] + +[[package]] +name = "langgraph" +version = "0.6.6" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "langchain-core" }, + { name = "langgraph-checkpoint" }, + { name = "langgraph-prebuilt" }, + { name = "langgraph-sdk" }, + { name = "pydantic" }, + { name = "xxhash" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/02/2b/59f0b2985467ec84b006dd41ec31c0aae43a7f16722d5514292500b871c9/langgraph-0.6.6.tar.gz", hash = "sha256:e7d3cefacf356f8c01721b166b67b3bf581659d5361a3530f59ecd9b8448eca7", size = 465452, upload-time = "2025-08-20T04:02:13.915Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/e4/ef/81fce0a80925cd89987aa641ff01573e3556a24f2d205112862a69df7fd3/langgraph-0.6.6-py3-none-any.whl", hash = "sha256:a2283a5236abba6c8307c1a485c04e8a0f0ffa2be770878782a7bf2deb8d7954", size = 153274, upload-time = "2025-08-20T04:02:12.251Z" }, +] + +[[package]] +name = "langgraph-checkpoint" +version = "2.1.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "langchain-core" }, + { name = "ormsgpack" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/73/3e/d00eb2b56c3846a0cabd2e5aa71c17a95f882d4f799a6ffe96a19b55eba9/langgraph_checkpoint-2.1.1.tar.gz", hash = "sha256:72038c0f9e22260cb9bff1f3ebe5eb06d940b7ee5c1e4765019269d4f21cf92d", size = 136256, upload-time = "2025-07-17T13:07:52.411Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/4c/dd/64686797b0927fb18b290044be12ae9d4df01670dce6bb2498d5ab65cb24/langgraph_checkpoint-2.1.1-py3-none-any.whl", hash = "sha256:5a779134fd28134a9a83d078be4450bbf0e0c79fdf5e992549658899e6fc5ea7", size = 43925, upload-time = "2025-07-17T13:07:51.023Z" }, +] + +[[package]] +name = "langgraph-checkpoint-postgres" +version = "2.0.22" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "langgraph-checkpoint" }, + { name = "orjson" }, + { name = "psycopg" }, + { name = "psycopg-pool" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/56/10/bfab9d031c0eeff9785e28ebcb79107b0a4c57ad3e0f21935679935f77ee/langgraph_checkpoint_postgres-2.0.22.tar.gz", hash = "sha256:4b58346f9d7d44994fc8141310bbd3429fe0e17a18c4a606bf3d7ff673325391", size = 118024, upload-time = "2025-07-10T22:45:24.941Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/2b/26/9594505e5698f40e9a76d66c06f412c48a119c3ca41493c432d66f7e1e44/langgraph_checkpoint_postgres-2.0.22-py3-none-any.whl", hash = "sha256:81623697050ea755abd3cab936e60ae0203c0c492675b16d4d4608da8b586bd5", size = 40339, upload-time = "2025-07-10T22:45:23.737Z" }, +] + +[[package]] +name = "langgraph-prebuilt" +version = "0.6.4" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "langchain-core" }, + { name = "langgraph-checkpoint" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/d6/21/9b198d11732101ee8cdf30af98d0b4f11254c768de15173e57f5260fd14b/langgraph_prebuilt-0.6.4.tar.gz", hash = "sha256:e9e53b906ee5df46541d1dc5303239e815d3ec551e52bb03dd6463acc79ec28f", size = 125695, upload-time = "2025-08-07T18:17:57.333Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/0a/7f/973b0d9729d9693d6e5b4bc5f3ae41138d194cb7b16b0ed230020beeb13a/langgraph_prebuilt-0.6.4-py3-none-any.whl", hash = "sha256:819f31d88b84cb2729ff1b79db2d51e9506b8fb7aaacfc0d359d4fe16e717344", size = 28025, upload-time = "2025-08-07T18:17:56.493Z" }, +] + +[[package]] +name = "langgraph-sdk" +version = "0.2.4" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "httpx" }, + { name = "orjson" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/e1/2a/c3194c5ddac862e2d1d70773485eda52578453347c60cdce768e6794a720/langgraph_sdk-0.2.4.tar.gz", hash = "sha256:65c37fdb1d6f572deb7d5dda45aab79916c13c276a1e08b29309ae7729567a36", size = 79741, upload-time = "2025-08-28T23:37:00.26Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/6e/f0/e0ca3e90fd89f38d2f4cff88c8f90c327ad5a35b528586a1d6811b38adb3/langgraph_sdk-0.2.4-py3-none-any.whl", hash = "sha256:b9dfbe77abc86ee6280b6d328320b7e1ca2b303b5fd89540dc5a28f780222cd1", size = 53964, upload-time = "2025-08-28T23:36:58.903Z" }, +] + +[[package]] +name = "langmem" +version = "0.0.29" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "langchain" }, + { name = "langchain-anthropic" }, + { name = "langchain-core" }, + { name = "langchain-openai" }, + { name = "langgraph" }, + { name = "langgraph-checkpoint" }, + { name = "langsmith" }, + { name = "trustcall" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/ec/75/a58f56a1f635003919f1c5c356a4247d8136d9183b63b9f52599aa7a8710/langmem-0.0.29.tar.gz", hash = "sha256:9a4a7bfcbde87f02494caf6add55c0cdd49c5a1a6396e19fe12a56ba6fb96267", size = 206315, upload-time = "2025-07-28T19:55:33.437Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/f5/6a/ea17974afc18dbf278bbfaaa1331e3dfef979cf42bfae1dc695b5e4ea750/langmem-0.0.29-py3-none-any.whl", hash = "sha256:3e0b56d3e4077e96dab45616e2800c9550bf61c1e1eee4c119ec704518037d8c", size = 67127, upload-time = "2025-07-28T19:55:32.279Z" }, +] + +[[package]] +name = "langsmith" +version = "0.4.21" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "httpx" }, + { name = "orjson", marker = "platform_python_implementation != 'PyPy'" }, + { name = "packaging" }, + { name = "pydantic" }, + { name = "requests" }, + { name = "requests-toolbelt" }, + { name = "zstandard" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/20/ad/2c89c128b575d89ba7c7af05de6d715703fe9648110f2ec3c90255956681/langsmith-0.4.21.tar.gz", hash = "sha256:eabfb66970d50964918acba524b407e8cfe585448013e3135ca92388c858d009", size = 938666, upload-time = "2025-08-29T21:46:27.174Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/7d/79/5ccad558563861f7ae6a77aeba259578c35192e9c109b0142fcf490b3c50/langsmith-0.4.21-py3-none-any.whl", hash = "sha256:15b189e2e7a3337a07cf250d91e158efcd0b39458735dc9e583c56dd0f21e4e0", size = 378494, upload-time = "2025-08-29T21:46:24.714Z" }, +] + +[[package]] +name = "magic-filter" +version = "1.0.12" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/e6/08/da7c2cc7398cc0376e8da599d6330a437c01d3eace2f2365f300e0f3f758/magic_filter-1.0.12.tar.gz", hash = "sha256:4751d0b579a5045d1dc250625c4c508c18c3def5ea6afaf3957cb4530d03f7f9", size = 11071, upload-time = "2023-10-01T12:33:19.006Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/cc/75/f620449f0056eff0ec7c1b1e088f71068eb4e47a46eb54f6c065c6ad7675/magic_filter-1.0.12-py3-none-any.whl", hash = "sha256:e5929e544f310c2b1f154318db8c5cdf544dd658efa998172acd2e4ba0f6c6a6", size = 11335, upload-time = "2023-10-01T12:33:17.711Z" }, +] + +[[package]] +name = "mako" +version = "1.3.10" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "markupsafe" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/9e/38/bd5b78a920a64d708fe6bc8e0a2c075e1389d53bef8413725c63ba041535/mako-1.3.10.tar.gz", hash = "sha256:99579a6f39583fa7e5630a28c3c1f440e4e97a414b80372649c0ce338da2ea28", size = 392474, upload-time = "2025-04-10T12:44:31.16Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/87/fb/99f81ac72ae23375f22b7afdb7642aba97c00a713c217124420147681a2f/mako-1.3.10-py3-none-any.whl", hash = "sha256:baef24a52fc4fc514a0887ac600f9f1cff3d82c61d4d700a1fa84d597b88db59", size = 78509, upload-time = "2025-04-10T12:50:53.297Z" }, +] + +[[package]] +name = "markupsafe" +version = "3.0.2" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/b2/97/5d42485e71dfc078108a86d6de8fa46db44a1a9295e89c5d6d4a06e23a62/markupsafe-3.0.2.tar.gz", hash = "sha256:ee55d3edf80167e48ea11a923c7386f4669df67d7994554387f84e7d8b0a2bf0", size = 20537, upload-time = "2024-10-18T15:21:54.129Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/22/09/d1f21434c97fc42f09d290cbb6350d44eb12f09cc62c9476effdb33a18aa/MarkupSafe-3.0.2-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:9778bd8ab0a994ebf6f84c2b949e65736d5575320a17ae8984a77fab08db94cf", size = 14274, upload-time = "2024-10-18T15:21:13.777Z" }, + { url = "https://files.pythonhosted.org/packages/6b/b0/18f76bba336fa5aecf79d45dcd6c806c280ec44538b3c13671d49099fdd0/MarkupSafe-3.0.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:846ade7b71e3536c4e56b386c2a47adf5741d2d8b94ec9dc3e92e5e1ee1e2225", size = 12348, upload-time = "2024-10-18T15:21:14.822Z" }, + { url = "https://files.pythonhosted.org/packages/e0/25/dd5c0f6ac1311e9b40f4af06c78efde0f3b5cbf02502f8ef9501294c425b/MarkupSafe-3.0.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1c99d261bd2d5f6b59325c92c73df481e05e57f19837bdca8413b9eac4bd8028", size = 24149, upload-time = "2024-10-18T15:21:15.642Z" }, + { url = "https://files.pythonhosted.org/packages/f3/f0/89e7aadfb3749d0f52234a0c8c7867877876e0a20b60e2188e9850794c17/MarkupSafe-3.0.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e17c96c14e19278594aa4841ec148115f9c7615a47382ecb6b82bd8fea3ab0c8", size = 23118, upload-time = "2024-10-18T15:21:17.133Z" }, + { url = "https://files.pythonhosted.org/packages/d5/da/f2eeb64c723f5e3777bc081da884b414671982008c47dcc1873d81f625b6/MarkupSafe-3.0.2-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:88416bd1e65dcea10bc7569faacb2c20ce071dd1f87539ca2ab364bf6231393c", size = 22993, upload-time = "2024-10-18T15:21:18.064Z" }, + { url = "https://files.pythonhosted.org/packages/da/0e/1f32af846df486dce7c227fe0f2398dc7e2e51d4a370508281f3c1c5cddc/MarkupSafe-3.0.2-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:2181e67807fc2fa785d0592dc2d6206c019b9502410671cc905d132a92866557", size = 24178, upload-time = "2024-10-18T15:21:18.859Z" }, + { url = "https://files.pythonhosted.org/packages/c4/f6/bb3ca0532de8086cbff5f06d137064c8410d10779c4c127e0e47d17c0b71/MarkupSafe-3.0.2-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:52305740fe773d09cffb16f8ed0427942901f00adedac82ec8b67752f58a1b22", size = 23319, upload-time = "2024-10-18T15:21:19.671Z" }, + { url = "https://files.pythonhosted.org/packages/a2/82/8be4c96ffee03c5b4a034e60a31294daf481e12c7c43ab8e34a1453ee48b/MarkupSafe-3.0.2-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:ad10d3ded218f1039f11a75f8091880239651b52e9bb592ca27de44eed242a48", size = 23352, upload-time = "2024-10-18T15:21:20.971Z" }, + { url = "https://files.pythonhosted.org/packages/51/ae/97827349d3fcffee7e184bdf7f41cd6b88d9919c80f0263ba7acd1bbcb18/MarkupSafe-3.0.2-cp312-cp312-win32.whl", hash = "sha256:0f4ca02bea9a23221c0182836703cbf8930c5e9454bacce27e767509fa286a30", size = 15097, upload-time = "2024-10-18T15:21:22.646Z" }, + { url = "https://files.pythonhosted.org/packages/c1/80/a61f99dc3a936413c3ee4e1eecac96c0da5ed07ad56fd975f1a9da5bc630/MarkupSafe-3.0.2-cp312-cp312-win_amd64.whl", hash = "sha256:8e06879fc22a25ca47312fbe7c8264eb0b662f6db27cb2d3bbbc74b1df4b9b87", size = 15601, upload-time = "2024-10-18T15:21:23.499Z" }, +] + +[[package]] +name = "marshmallow" +version = "3.26.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "packaging" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/ab/5e/5e53d26b42ab75491cda89b871dab9e97c840bf12c63ec58a1919710cd06/marshmallow-3.26.1.tar.gz", hash = "sha256:e6d8affb6cb61d39d26402096dc0aee12d5a26d490a121f118d2e81dc0719dc6", size = 221825, upload-time = "2025-02-03T15:32:25.093Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/34/75/51952c7b2d3873b44a0028b1bd26a25078c18f92f256608e8d1dc61b39fd/marshmallow-3.26.1-py3-none-any.whl", hash = "sha256:3350409f20a70a7e4e11a27661187b77cdcaeb20abca41c1454fe33636bea09c", size = 50878, upload-time = "2025-02-03T15:32:22.295Z" }, +] + +[[package]] +name = "mcp" +version = "1.12.4" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "anyio" }, + { name = "httpx" }, + { name = "httpx-sse" }, + { name = "jsonschema" }, + { name = "pydantic" }, + { name = "pydantic-settings" }, + { name = "python-multipart" }, + { name = "pywin32", marker = "sys_platform == 'win32'" }, + { name = "sse-starlette" }, + { name = "starlette" }, + { name = "uvicorn", marker = "sys_platform != 'emscripten'" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/31/88/f6cb7e7c260cd4b4ce375f2b1614b33ce401f63af0f49f7141a2e9bf0a45/mcp-1.12.4.tar.gz", hash = "sha256:0765585e9a3a5916a3c3ab8659330e493adc7bd8b2ca6120c2d7a0c43e034ca5", size = 431148, upload-time = "2025-08-07T20:31:18.082Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/ad/68/316cbc54b7163fa22571dcf42c9cc46562aae0a021b974e0a8141e897200/mcp-1.12.4-py3-none-any.whl", hash = "sha256:7aa884648969fab8e78b89399d59a683202972e12e6bc9a1c88ce7eda7743789", size = 160145, upload-time = "2025-08-07T20:31:15.69Z" }, +] + +[[package]] +name = "mistletoe" +version = "1.4.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/11/96/ea46a376a7c4cd56955ecdfff0ea68de43996a4e6d1aee4599729453bd11/mistletoe-1.4.0.tar.gz", hash = "sha256:1630f906e5e4bbe66fdeb4d29d277e2ea515d642bb18a9b49b136361a9818c9d", size = 107203, upload-time = "2024-07-14T10:17:35.212Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/2a/0f/b5e545f0c7962be90366af3418989b12cf441d9da1e5d89d88f2f3e5cf8f/mistletoe-1.4.0-py3-none-any.whl", hash = "sha256:44a477803861de1237ba22e375c6b617690a31d2902b47279d1f8f7ed498a794", size = 51304, upload-time = "2024-07-14T10:17:33.243Z" }, +] + +[[package]] +name = "multidict" +version = "6.6.4" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/69/7f/0652e6ed47ab288e3756ea9c0df8b14950781184d4bd7883f4d87dd41245/multidict-6.6.4.tar.gz", hash = "sha256:d2d4e4787672911b48350df02ed3fa3fffdc2f2e8ca06dd6afdf34189b76a9dd", size = 101843, upload-time = "2025-08-11T12:08:48.217Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/05/f6/512ffd8fd8b37fb2680e5ac35d788f1d71bbaf37789d21a820bdc441e565/multidict-6.6.4-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:0ffb87be160942d56d7b87b0fdf098e81ed565add09eaa1294268c7f3caac4c8", size = 76516, upload-time = "2025-08-11T12:06:53.393Z" }, + { url = "https://files.pythonhosted.org/packages/99/58/45c3e75deb8855c36bd66cc1658007589662ba584dbf423d01df478dd1c5/multidict-6.6.4-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:d191de6cbab2aff5de6c5723101705fd044b3e4c7cfd587a1929b5028b9714b3", size = 45394, upload-time = "2025-08-11T12:06:54.555Z" }, + { url = "https://files.pythonhosted.org/packages/fd/ca/e8c4472a93a26e4507c0b8e1f0762c0d8a32de1328ef72fd704ef9cc5447/multidict-6.6.4-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:38a0956dd92d918ad5feff3db8fcb4a5eb7dba114da917e1a88475619781b57b", size = 43591, upload-time = "2025-08-11T12:06:55.672Z" }, + { url = "https://files.pythonhosted.org/packages/05/51/edf414f4df058574a7265034d04c935aa84a89e79ce90fcf4df211f47b16/multidict-6.6.4-cp312-cp312-manylinux1_i686.manylinux2014_i686.manylinux_2_17_i686.manylinux_2_5_i686.whl", hash = "sha256:6865f6d3b7900ae020b495d599fcf3765653bc927951c1abb959017f81ae8287", size = 237215, upload-time = "2025-08-11T12:06:57.213Z" }, + { url = "https://files.pythonhosted.org/packages/c8/45/8b3d6dbad8cf3252553cc41abea09ad527b33ce47a5e199072620b296902/multidict-6.6.4-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:0a2088c126b6f72db6c9212ad827d0ba088c01d951cee25e758c450da732c138", size = 258299, upload-time = "2025-08-11T12:06:58.946Z" }, + { url = "https://files.pythonhosted.org/packages/3c/e8/8ca2e9a9f5a435fc6db40438a55730a4bf4956b554e487fa1b9ae920f825/multidict-6.6.4-cp312-cp312-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:0f37bed7319b848097085d7d48116f545985db988e2256b2e6f00563a3416ee6", size = 242357, upload-time = "2025-08-11T12:07:00.301Z" }, + { url = "https://files.pythonhosted.org/packages/0f/84/80c77c99df05a75c28490b2af8f7cba2a12621186e0a8b0865d8e745c104/multidict-6.6.4-cp312-cp312-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:01368e3c94032ba6ca0b78e7ccb099643466cf24f8dc8eefcfdc0571d56e58f9", size = 268369, upload-time = "2025-08-11T12:07:01.638Z" }, + { url = "https://files.pythonhosted.org/packages/0d/e9/920bfa46c27b05fb3e1ad85121fd49f441492dca2449c5bcfe42e4565d8a/multidict-6.6.4-cp312-cp312-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:8fe323540c255db0bffee79ad7f048c909f2ab0edb87a597e1c17da6a54e493c", size = 269341, upload-time = "2025-08-11T12:07:02.943Z" }, + { url = "https://files.pythonhosted.org/packages/af/65/753a2d8b05daf496f4a9c367fe844e90a1b2cac78e2be2c844200d10cc4c/multidict-6.6.4-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:b8eb3025f17b0a4c3cd08cda49acf312a19ad6e8a4edd9dbd591e6506d999402", size = 256100, upload-time = "2025-08-11T12:07:04.564Z" }, + { url = "https://files.pythonhosted.org/packages/09/54/655be13ae324212bf0bc15d665a4e34844f34c206f78801be42f7a0a8aaa/multidict-6.6.4-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:bbc14f0365534d35a06970d6a83478b249752e922d662dc24d489af1aa0d1be7", size = 253584, upload-time = "2025-08-11T12:07:05.914Z" }, + { url = "https://files.pythonhosted.org/packages/5c/74/ab2039ecc05264b5cec73eb018ce417af3ebb384ae9c0e9ed42cb33f8151/multidict-6.6.4-cp312-cp312-musllinux_1_2_armv7l.whl", hash = "sha256:75aa52fba2d96bf972e85451b99d8e19cc37ce26fd016f6d4aa60da9ab2b005f", size = 251018, upload-time = "2025-08-11T12:07:08.301Z" }, + { url = "https://files.pythonhosted.org/packages/af/0a/ccbb244ac848e56c6427f2392741c06302bbfba49c0042f1eb3c5b606497/multidict-6.6.4-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:4fefd4a815e362d4f011919d97d7b4a1e566f1dde83dc4ad8cfb5b41de1df68d", size = 251477, upload-time = "2025-08-11T12:07:10.248Z" }, + { url = "https://files.pythonhosted.org/packages/0e/b0/0ed49bba775b135937f52fe13922bc64a7eaf0a3ead84a36e8e4e446e096/multidict-6.6.4-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:db9801fe021f59a5b375ab778973127ca0ac52429a26e2fd86aa9508f4d26eb7", size = 263575, upload-time = "2025-08-11T12:07:11.928Z" }, + { url = "https://files.pythonhosted.org/packages/3e/d9/7fb85a85e14de2e44dfb6a24f03c41e2af8697a6df83daddb0e9b7569f73/multidict-6.6.4-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:a650629970fa21ac1fb06ba25dabfc5b8a2054fcbf6ae97c758aa956b8dba802", size = 259649, upload-time = "2025-08-11T12:07:13.244Z" }, + { url = "https://files.pythonhosted.org/packages/03/9e/b3a459bcf9b6e74fa461a5222a10ff9b544cb1cd52fd482fb1b75ecda2a2/multidict-6.6.4-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:452ff5da78d4720d7516a3a2abd804957532dd69296cb77319c193e3ffb87e24", size = 251505, upload-time = "2025-08-11T12:07:14.57Z" }, + { url = "https://files.pythonhosted.org/packages/86/a2/8022f78f041dfe6d71e364001a5cf987c30edfc83c8a5fb7a3f0974cff39/multidict-6.6.4-cp312-cp312-win32.whl", hash = "sha256:8c2fcb12136530ed19572bbba61b407f655e3953ba669b96a35036a11a485793", size = 41888, upload-time = "2025-08-11T12:07:15.904Z" }, + { url = "https://files.pythonhosted.org/packages/c7/eb/d88b1780d43a56db2cba24289fa744a9d216c1a8546a0dc3956563fd53ea/multidict-6.6.4-cp312-cp312-win_amd64.whl", hash = "sha256:047d9425860a8c9544fed1b9584f0c8bcd31bcde9568b047c5e567a1025ecd6e", size = 46072, upload-time = "2025-08-11T12:07:17.045Z" }, + { url = "https://files.pythonhosted.org/packages/9f/16/b929320bf5750e2d9d4931835a4c638a19d2494a5b519caaaa7492ebe105/multidict-6.6.4-cp312-cp312-win_arm64.whl", hash = "sha256:14754eb72feaa1e8ae528468f24250dd997b8e2188c3d2f593f9eba259e4b364", size = 43222, upload-time = "2025-08-11T12:07:18.328Z" }, + { url = "https://files.pythonhosted.org/packages/fd/69/b547032297c7e63ba2af494edba695d781af8a0c6e89e4d06cf848b21d80/multidict-6.6.4-py3-none-any.whl", hash = "sha256:27d8f8e125c07cb954e54d75d04905a9bba8a439c1d84aca94949d4d03d8601c", size = 12313, upload-time = "2025-08-11T12:08:46.891Z" }, +] + +[[package]] +name = "mypy-boto3-s3" +version = "1.40.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/06/d7/b2100702d2f200fdb3468e419c729790bd8543ee0af6f6d63d8dfdab4e28/mypy_boto3_s3-1.40.0.tar.gz", hash = "sha256:99a4a27f04d62fe0b31032f274f2e19889fa66424413617a9416873c48567f1d", size = 75924, upload-time = "2025-07-31T19:50:01.979Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/43/4f/4d32cd202d8c8c7e11e44dd288f66b8985e6ee4402b9a0891b7b94ff6cc6/mypy_boto3_s3-1.40.0-py3-none-any.whl", hash = "sha256:5736b7780d57a156312d8d136462c207671d0236b0355704b5754496bb712bc8", size = 82710, upload-time = "2025-07-31T19:49:59.713Z" }, +] + +[[package]] +name = "mypy-extensions" +version = "1.1.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/a2/6e/371856a3fb9d31ca8dac321cda606860fa4548858c0cc45d9d1d4ca2628b/mypy_extensions-1.1.0.tar.gz", hash = "sha256:52e68efc3284861e772bbcd66823fde5ae21fd2fdb51c62a211403730b916558", size = 6343, upload-time = "2025-04-22T14:54:24.164Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/79/7b/2c79738432f5c924bef5071f933bcc9efd0473bac3b4aa584a6f7c1c8df8/mypy_extensions-1.1.0-py3-none-any.whl", hash = "sha256:1be4cccdb0f2482337c4743e60421de3a356cd97508abadd57d47403e94f5505", size = 4963, upload-time = "2025-04-22T14:54:22.983Z" }, +] + +[[package]] +name = "nest-asyncio" +version = "1.6.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/83/f8/51569ac65d696c8ecbee95938f89d4abf00f47d58d48f6fbabfe8f0baefe/nest_asyncio-1.6.0.tar.gz", hash = "sha256:6f172d5449aca15afd6c646851f4e31e02c598d553a667e38cafa997cfec55fe", size = 7418, upload-time = "2024-01-21T14:25:19.227Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/a0/c4/c2971a3ba4c6103a3d10c4b0f24f461ddc027f0f09763220cf35ca1401b3/nest_asyncio-1.6.0-py3-none-any.whl", hash = "sha256:87af6efd6b5e897c81050477ef65c62e2b2f35d51703cae01aff2905b1852e1c", size = 5195, upload-time = "2024-01-21T14:25:17.223Z" }, +] + +[[package]] +name = "nilql" +version = "0.0.0a13" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "bcl" }, + { name = "lagrange" }, + { name = "pailliers" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/3f/67/59f4b6ceac7c7719e4830a8f017d7d289b48da20dc2791f55ba3d2968bbb/nilql-0.0.0a13.tar.gz", hash = "sha256:cb0011ff704001c83209a3185d89a8b0a16cec8089551469b5c19d52659dd359", size = 19329, upload-time = "2025-04-12T05:08:18.383Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/13/b0/8b8641ddd3d40aaee0fccb3a167d7ee31f0021c93c93c0a19ddc50942d9a/nilql-0.0.0a13-py3-none-any.whl", hash = "sha256:f369a0530340ef890d9916d748147c0c54eb8a1b9f79dd6f3124c546ec39d9f3", size = 13826, upload-time = "2025-04-12T05:08:17.043Z" }, +] + +[[package]] +name = "numpy" +version = "2.3.2" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/37/7d/3fec4199c5ffb892bed55cff901e4f39a58c81df9c44c280499e92cad264/numpy-2.3.2.tar.gz", hash = "sha256:e0486a11ec30cdecb53f184d496d1c6a20786c81e55e41640270130056f8ee48", size = 20489306, upload-time = "2025-07-24T21:32:07.553Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/00/6d/745dd1c1c5c284d17725e5c802ca4d45cfc6803519d777f087b71c9f4069/numpy-2.3.2-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:bc3186bea41fae9d8e90c2b4fb5f0a1f5a690682da79b92574d63f56b529080b", size = 20956420, upload-time = "2025-07-24T20:28:18.002Z" }, + { url = "https://files.pythonhosted.org/packages/bc/96/e7b533ea5740641dd62b07a790af5d9d8fec36000b8e2d0472bd7574105f/numpy-2.3.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:2f4f0215edb189048a3c03bd5b19345bdfa7b45a7a6f72ae5945d2a28272727f", size = 14184660, upload-time = "2025-07-24T20:28:39.522Z" }, + { url = "https://files.pythonhosted.org/packages/2b/53/102c6122db45a62aa20d1b18c9986f67e6b97e0d6fbc1ae13e3e4c84430c/numpy-2.3.2-cp312-cp312-macosx_14_0_arm64.whl", hash = "sha256:8b1224a734cd509f70816455c3cffe13a4f599b1bf7130f913ba0e2c0b2006c0", size = 5113382, upload-time = "2025-07-24T20:28:48.544Z" }, + { url = "https://files.pythonhosted.org/packages/2b/21/376257efcbf63e624250717e82b4fae93d60178f09eb03ed766dbb48ec9c/numpy-2.3.2-cp312-cp312-macosx_14_0_x86_64.whl", hash = "sha256:3dcf02866b977a38ba3ec10215220609ab9667378a9e2150615673f3ffd6c73b", size = 6647258, upload-time = "2025-07-24T20:28:59.104Z" }, + { url = "https://files.pythonhosted.org/packages/91/ba/f4ebf257f08affa464fe6036e13f2bf9d4642a40228781dc1235da81be9f/numpy-2.3.2-cp312-cp312-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:572d5512df5470f50ada8d1972c5f1082d9a0b7aa5944db8084077570cf98370", size = 14281409, upload-time = "2025-07-24T20:40:30.298Z" }, + { url = "https://files.pythonhosted.org/packages/59/ef/f96536f1df42c668cbacb727a8c6da7afc9c05ece6d558927fb1722693e1/numpy-2.3.2-cp312-cp312-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:8145dd6d10df13c559d1e4314df29695613575183fa2e2d11fac4c208c8a1f73", size = 16641317, upload-time = "2025-07-24T20:40:56.625Z" }, + { url = "https://files.pythonhosted.org/packages/f6/a7/af813a7b4f9a42f498dde8a4c6fcbff8100eed00182cc91dbaf095645f38/numpy-2.3.2-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:103ea7063fa624af04a791c39f97070bf93b96d7af7eb23530cd087dc8dbe9dc", size = 16056262, upload-time = "2025-07-24T20:41:20.797Z" }, + { url = "https://files.pythonhosted.org/packages/8b/5d/41c4ef8404caaa7f05ed1cfb06afe16a25895260eacbd29b4d84dff2920b/numpy-2.3.2-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:fc927d7f289d14f5e037be917539620603294454130b6de200091e23d27dc9be", size = 18579342, upload-time = "2025-07-24T20:41:50.753Z" }, + { url = "https://files.pythonhosted.org/packages/a1/4f/9950e44c5a11636f4a3af6e825ec23003475cc9a466edb7a759ed3ea63bd/numpy-2.3.2-cp312-cp312-win32.whl", hash = "sha256:d95f59afe7f808c103be692175008bab926b59309ade3e6d25009e9a171f7036", size = 6320610, upload-time = "2025-07-24T20:42:01.551Z" }, + { url = "https://files.pythonhosted.org/packages/7c/2f/244643a5ce54a94f0a9a2ab578189c061e4a87c002e037b0829dd77293b6/numpy-2.3.2-cp312-cp312-win_amd64.whl", hash = "sha256:9e196ade2400c0c737d93465327d1ae7c06c7cb8a1756121ebf54b06ca183c7f", size = 12786292, upload-time = "2025-07-24T20:42:20.738Z" }, + { url = "https://files.pythonhosted.org/packages/54/cd/7b5f49d5d78db7badab22d8323c1b6ae458fbf86c4fdfa194ab3cd4eb39b/numpy-2.3.2-cp312-cp312-win_arm64.whl", hash = "sha256:ee807923782faaf60d0d7331f5e86da7d5e3079e28b291973c545476c2b00d07", size = 10194071, upload-time = "2025-07-24T20:42:36.657Z" }, +] + +[[package]] +name = "oauthlib" +version = "3.3.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/0b/5f/19930f824ffeb0ad4372da4812c50edbd1434f678c90c2733e1188edfc63/oauthlib-3.3.1.tar.gz", hash = "sha256:0f0f8aa759826a193cf66c12ea1af1637f87b9b4622d46e866952bb022e538c9", size = 185918, upload-time = "2025-06-19T22:48:08.269Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/be/9c/92789c596b8df838baa98fa71844d84283302f7604ed565dafe5a6b5041a/oauthlib-3.3.1-py3-none-any.whl", hash = "sha256:88119c938d2b8fb88561af5f6ee0eec8cc8d552b7bb1f712743136eb7523b7a1", size = 160065, upload-time = "2025-06-19T22:48:06.508Z" }, +] + +[[package]] +name = "openai" +version = "1.102.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "anyio" }, + { name = "distro" }, + { name = "httpx" }, + { name = "jiter" }, + { name = "pydantic" }, + { name = "sniffio" }, + { name = "tqdm" }, + { name = "typing-extensions" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/07/55/da5598ed5c6bdd9939633854049cddc5cbac0da938dfcfcb3c6b119c16c0/openai-1.102.0.tar.gz", hash = "sha256:2e0153bcd64a6523071e90211cbfca1f2bbc5ceedd0993ba932a5869f93b7fc9", size = 519027, upload-time = "2025-08-26T20:50:29.397Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/bd/0d/c9e7016d82c53c5b5e23e2bad36daebb8921ed44f69c0a985c6529a35106/openai-1.102.0-py3-none-any.whl", hash = "sha256:d751a7e95e222b5325306362ad02a7aa96e1fab3ed05b5888ce1c7ca63451345", size = 812015, upload-time = "2025-08-26T20:50:27.219Z" }, +] + +[[package]] +name = "orjson" +version = "3.11.3" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/be/4d/8df5f83256a809c22c4d6792ce8d43bb503be0fb7a8e4da9025754b09658/orjson-3.11.3.tar.gz", hash = "sha256:1c0603b1d2ffcd43a411d64797a19556ef76958aef1c182f22dc30860152a98a", size = 5482394, upload-time = "2025-08-26T17:46:43.171Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/3d/b0/a7edab2a00cdcb2688e1c943401cb3236323e7bfd2839815c6131a3742f4/orjson-3.11.3-cp312-cp312-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:8c752089db84333e36d754c4baf19c0e1437012242048439c7e80eb0e6426e3b", size = 238259, upload-time = "2025-08-26T17:45:15.093Z" }, + { url = "https://files.pythonhosted.org/packages/e1/c6/ff4865a9cc398a07a83342713b5932e4dc3cb4bf4bc04e8f83dedfc0d736/orjson-3.11.3-cp312-cp312-macosx_15_0_arm64.whl", hash = "sha256:9b8761b6cf04a856eb544acdd82fc594b978f12ac3602d6374a7edb9d86fd2c2", size = 127633, upload-time = "2025-08-26T17:45:16.417Z" }, + { url = "https://files.pythonhosted.org/packages/6e/e6/e00bea2d9472f44fe8794f523e548ce0ad51eb9693cf538a753a27b8bda4/orjson-3.11.3-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8b13974dc8ac6ba22feaa867fc19135a3e01a134b4f7c9c28162fed4d615008a", size = 123061, upload-time = "2025-08-26T17:45:17.673Z" }, + { url = "https://files.pythonhosted.org/packages/54/31/9fbb78b8e1eb3ac605467cb846e1c08d0588506028b37f4ee21f978a51d4/orjson-3.11.3-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:f83abab5bacb76d9c821fd5c07728ff224ed0e52d7a71b7b3de822f3df04e15c", size = 127956, upload-time = "2025-08-26T17:45:19.172Z" }, + { url = "https://files.pythonhosted.org/packages/36/88/b0604c22af1eed9f98d709a96302006915cfd724a7ebd27d6dd11c22d80b/orjson-3.11.3-cp312-cp312-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e6fbaf48a744b94091a56c62897b27c31ee2da93d826aa5b207131a1e13d4064", size = 130790, upload-time = "2025-08-26T17:45:20.586Z" }, + { url = "https://files.pythonhosted.org/packages/0e/9d/1c1238ae9fffbfed51ba1e507731b3faaf6b846126a47e9649222b0fd06f/orjson-3.11.3-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:bc779b4f4bba2847d0d2940081a7b6f7b5877e05408ffbb74fa1faf4a136c424", size = 132385, upload-time = "2025-08-26T17:45:22.036Z" }, + { url = "https://files.pythonhosted.org/packages/a3/b5/c06f1b090a1c875f337e21dd71943bc9d84087f7cdf8c6e9086902c34e42/orjson-3.11.3-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:bd4b909ce4c50faa2192da6bb684d9848d4510b736b0611b6ab4020ea6fd2d23", size = 135305, upload-time = "2025-08-26T17:45:23.4Z" }, + { url = "https://files.pythonhosted.org/packages/a0/26/5f028c7d81ad2ebbf84414ba6d6c9cac03f22f5cd0d01eb40fb2d6a06b07/orjson-3.11.3-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:524b765ad888dc5518bbce12c77c2e83dee1ed6b0992c1790cc5fb49bb4b6667", size = 132875, upload-time = "2025-08-26T17:45:25.182Z" }, + { url = "https://files.pythonhosted.org/packages/fe/d4/b8df70d9cfb56e385bf39b4e915298f9ae6c61454c8154a0f5fd7efcd42e/orjson-3.11.3-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:84fd82870b97ae3cdcea9d8746e592b6d40e1e4d4527835fc520c588d2ded04f", size = 130940, upload-time = "2025-08-26T17:45:27.209Z" }, + { url = "https://files.pythonhosted.org/packages/da/5e/afe6a052ebc1a4741c792dd96e9f65bf3939d2094e8b356503b68d48f9f5/orjson-3.11.3-cp312-cp312-musllinux_1_2_armv7l.whl", hash = "sha256:fbecb9709111be913ae6879b07bafd4b0785b44c1eb5cac8ac76da048b3885a1", size = 403852, upload-time = "2025-08-26T17:45:28.478Z" }, + { url = "https://files.pythonhosted.org/packages/f8/90/7bbabafeb2ce65915e9247f14a56b29c9334003536009ef5b122783fe67e/orjson-3.11.3-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:9dba358d55aee552bd868de348f4736ca5a4086d9a62e2bfbbeeb5629fe8b0cc", size = 146293, upload-time = "2025-08-26T17:45:29.86Z" }, + { url = "https://files.pythonhosted.org/packages/27/b3/2d703946447da8b093350570644a663df69448c9d9330e5f1d9cce997f20/orjson-3.11.3-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:eabcf2e84f1d7105f84580e03012270c7e97ecb1fb1618bda395061b2a84a049", size = 135470, upload-time = "2025-08-26T17:45:31.243Z" }, + { url = "https://files.pythonhosted.org/packages/38/70/b14dcfae7aff0e379b0119c8a812f8396678919c431efccc8e8a0263e4d9/orjson-3.11.3-cp312-cp312-win32.whl", hash = "sha256:3782d2c60b8116772aea8d9b7905221437fdf53e7277282e8d8b07c220f96cca", size = 136248, upload-time = "2025-08-26T17:45:32.567Z" }, + { url = "https://files.pythonhosted.org/packages/35/b8/9e3127d65de7fff243f7f3e53f59a531bf6bb295ebe5db024c2503cc0726/orjson-3.11.3-cp312-cp312-win_amd64.whl", hash = "sha256:79b44319268af2eaa3e315b92298de9a0067ade6e6003ddaef72f8e0bedb94f1", size = 131437, upload-time = "2025-08-26T17:45:34.949Z" }, + { url = "https://files.pythonhosted.org/packages/51/92/a946e737d4d8a7fd84a606aba96220043dcc7d6988b9e7551f7f6d5ba5ad/orjson-3.11.3-cp312-cp312-win_arm64.whl", hash = "sha256:0e92a4e83341ef79d835ca21b8bd13e27c859e4e9e4d7b63defc6e58462a3710", size = 125978, upload-time = "2025-08-26T17:45:36.422Z" }, +] + +[[package]] +name = "ormsgpack" +version = "1.10.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/92/36/44eed5ef8ce93cded76a576780bab16425ce7876f10d3e2e6265e46c21ea/ormsgpack-1.10.0.tar.gz", hash = "sha256:7f7a27efd67ef22d7182ec3b7fa7e9d147c3ad9be2a24656b23c989077e08b16", size = 58629, upload-time = "2025-05-24T19:07:53.944Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/99/95/f3ab1a7638f6aa9362e87916bb96087fbbc5909db57e19f12ad127560e1e/ormsgpack-1.10.0-cp312-cp312-macosx_10_12_x86_64.macosx_11_0_arm64.macosx_10_12_universal2.whl", hash = "sha256:4e159d50cd4064d7540e2bc6a0ab66eab70b0cc40c618b485324ee17037527c0", size = 376806, upload-time = "2025-05-24T19:07:17.221Z" }, + { url = "https://files.pythonhosted.org/packages/6c/2b/42f559f13c0b0f647b09d749682851d47c1a7e48308c43612ae6833499c8/ormsgpack-1.10.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:eeb47c85f3a866e29279d801115b554af0fefc409e2ed8aa90aabfa77efe5cc6", size = 204433, upload-time = "2025-05-24T19:07:18.569Z" }, + { url = "https://files.pythonhosted.org/packages/45/42/1ca0cb4d8c80340a89a4af9e6d8951fb8ba0d076a899d2084eadf536f677/ormsgpack-1.10.0-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:c28249574934534c9bd5dce5485c52f21bcea0ee44d13ece3def6e3d2c3798b5", size = 215547, upload-time = "2025-05-24T19:07:20.245Z" }, + { url = "https://files.pythonhosted.org/packages/0a/38/184a570d7c44c0260bc576d1daaac35b2bfd465a50a08189518505748b9a/ormsgpack-1.10.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1957dcadbb16e6a981cd3f9caef9faf4c2df1125e2a1b702ee8236a55837ce07", size = 216746, upload-time = "2025-05-24T19:07:21.83Z" }, + { url = "https://files.pythonhosted.org/packages/69/2f/1aaffd08f6b7fdc2a57336a80bdfb8df24e6a65ada5aa769afecfcbc6cc6/ormsgpack-1.10.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:3b29412558c740bf6bac156727aa85ac67f9952cd6f071318f29ee72e1a76044", size = 384783, upload-time = "2025-05-24T19:07:23.674Z" }, + { url = "https://files.pythonhosted.org/packages/a9/63/3e53d6f43bb35e00c98f2b8ab2006d5138089ad254bc405614fbf0213502/ormsgpack-1.10.0-cp312-cp312-musllinux_1_2_armv7l.whl", hash = "sha256:6933f350c2041ec189fe739f0ba7d6117c8772f5bc81f45b97697a84d03020dd", size = 479076, upload-time = "2025-05-24T19:07:25.047Z" }, + { url = "https://files.pythonhosted.org/packages/b8/19/fa1121b03b61402bb4d04e35d164e2320ef73dfb001b57748110319dd014/ormsgpack-1.10.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:9a86de06d368fcc2e58b79dece527dc8ca831e0e8b9cec5d6e633d2777ec93d0", size = 390447, upload-time = "2025-05-24T19:07:26.568Z" }, + { url = "https://files.pythonhosted.org/packages/b0/0d/73143ecb94ac4a5dcba223402139240a75dee0cc6ba8a543788a5646407a/ormsgpack-1.10.0-cp312-cp312-win_amd64.whl", hash = "sha256:35fa9f81e5b9a0dab42e09a73f7339ecffdb978d6dbf9deb2ecf1e9fc7808722", size = 121401, upload-time = "2025-05-24T19:07:28.308Z" }, +] + +[[package]] +name = "packaging" +version = "24.2" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/d0/63/68dbb6eb2de9cb10ee4c9c14a0148804425e13c4fb20d61cce69f53106da/packaging-24.2.tar.gz", hash = "sha256:c228a6dc5e932d346bc5739379109d49e8853dd8223571c7c5b55260edc0b97f", size = 163950, upload-time = "2024-11-08T09:47:47.202Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/88/ef/eb23f262cca3c0c4eb7ab1933c3b1f03d021f2c48f54763065b6f0e321be/packaging-24.2-py3-none-any.whl", hash = "sha256:09abb1bccd265c01f4a3aa3f7a7db064b36514d2cba19a2f694fe6150451a759", size = 65451, upload-time = "2024-11-08T09:47:44.722Z" }, +] + +[[package]] +name = "pailliers" +version = "0.2.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "egcd" }, + { name = "rabinmiller" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/b5/c2/578c08af348247c025179e9f22d4970549fd58635d3881a9ac86192b159b/pailliers-0.2.0.tar.gz", hash = "sha256:a1d3d7d840594f51073e531078b3da4dc5a7a527b410102a0f0fa65d6c222871", size = 8919, upload-time = "2025-01-01T23:18:57.343Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/8e/0e/d793836d158ea15f7705e8ae705d73991f58e3eda0dde07e64bc423a4c12/pailliers-0.2.0-py3-none-any.whl", hash = "sha256:ad0ddc72be63f9b3c10200e23178fe527b566c4aa86659ab54a8faeb367ac7d6", size = 7404, upload-time = "2025-01-01T23:18:54.718Z" }, +] + +[[package]] +name = "paramiko" +version = "3.5.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "bcrypt" }, + { name = "cryptography" }, + { name = "pynacl" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/7d/15/ad6ce226e8138315f2451c2aeea985bf35ee910afb477bae7477dc3a8f3b/paramiko-3.5.1.tar.gz", hash = "sha256:b2c665bc45b2b215bd7d7f039901b14b067da00f3a11e6640995fd58f2664822", size = 1566110, upload-time = "2025-02-04T02:37:59.783Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/15/f8/c7bd0ef12954a81a1d3cea60a13946bd9a49a0036a5927770c461eade7ae/paramiko-3.5.1-py3-none-any.whl", hash = "sha256:43b9a0501fc2b5e70680388d9346cf252cfb7d00b0667c39e80eb43a408b8f61", size = 227298, upload-time = "2025-02-04T02:37:57.672Z" }, +] + +[[package]] +name = "parsimonious" +version = "0.10.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "regex" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/7b/91/abdc50c4ef06fdf8d047f60ee777ca9b2a7885e1a9cea81343fbecda52d7/parsimonious-0.10.0.tar.gz", hash = "sha256:8281600da180ec8ae35427a4ab4f7b82bfec1e3d1e52f80cb60ea82b9512501c", size = 52172, upload-time = "2022-09-03T17:01:17.004Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/aa/0f/c8b64d9b54ea631fcad4e9e3c8dbe8c11bb32a623be94f22974c88e71eaf/parsimonious-0.10.0-py3-none-any.whl", hash = "sha256:982ab435fabe86519b57f6b35610aa4e4e977e9f02a14353edf4bbc75369fc0f", size = 48427, upload-time = "2022-09-03T17:01:13.814Z" }, +] + +[[package]] +name = "pgvector" +version = "0.3.6" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "numpy" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/7d/d8/fd6009cee3e03214667df488cdcf9609461d729968da94e4f95d6359d304/pgvector-0.3.6.tar.gz", hash = "sha256:31d01690e6ea26cea8a633cde5f0f55f5b246d9c8292d68efdef8c22ec994ade", size = 25421, upload-time = "2024-10-27T00:15:09.632Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/fb/81/f457d6d361e04d061bef413749a6e1ab04d98cfeec6d8abcfe40184750f3/pgvector-0.3.6-py3-none-any.whl", hash = "sha256:f6c269b3c110ccb7496bac87202148ed18f34b390a0189c783e351062400a75a", size = 24880, upload-time = "2024-10-27T00:15:08.045Z" }, +] + +[[package]] +name = "pillow" +version = "11.3.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/f3/0d/d0d6dea55cd152ce3d6767bb38a8fc10e33796ba4ba210cbab9354b6d238/pillow-11.3.0.tar.gz", hash = "sha256:3828ee7586cd0b2091b6209e5ad53e20d0649bbe87164a459d0676e035e8f523", size = 47113069, upload-time = "2025-07-01T09:16:30.666Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/40/fe/1bc9b3ee13f68487a99ac9529968035cca2f0a51ec36892060edcc51d06a/pillow-11.3.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:fdae223722da47b024b867c1ea0be64e0df702c5e0a60e27daad39bf960dd1e4", size = 5278800, upload-time = "2025-07-01T09:14:17.648Z" }, + { url = "https://files.pythonhosted.org/packages/2c/32/7e2ac19b5713657384cec55f89065fb306b06af008cfd87e572035b27119/pillow-11.3.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:921bd305b10e82b4d1f5e802b6850677f965d8394203d182f078873851dada69", size = 4686296, upload-time = "2025-07-01T09:14:19.828Z" }, + { url = "https://files.pythonhosted.org/packages/8e/1e/b9e12bbe6e4c2220effebc09ea0923a07a6da1e1f1bfbc8d7d29a01ce32b/pillow-11.3.0-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:eb76541cba2f958032d79d143b98a3a6b3ea87f0959bbe256c0b5e416599fd5d", size = 5871726, upload-time = "2025-07-03T13:10:04.448Z" }, + { url = "https://files.pythonhosted.org/packages/8d/33/e9200d2bd7ba00dc3ddb78df1198a6e80d7669cce6c2bdbeb2530a74ec58/pillow-11.3.0-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:67172f2944ebba3d4a7b54f2e95c786a3a50c21b88456329314caaa28cda70f6", size = 7644652, upload-time = "2025-07-03T13:10:10.391Z" }, + { url = "https://files.pythonhosted.org/packages/41/f1/6f2427a26fc683e00d985bc391bdd76d8dd4e92fac33d841127eb8fb2313/pillow-11.3.0-cp312-cp312-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:97f07ed9f56a3b9b5f49d3661dc9607484e85c67e27f3e8be2c7d28ca032fec7", size = 5977787, upload-time = "2025-07-01T09:14:21.63Z" }, + { url = "https://files.pythonhosted.org/packages/e4/c9/06dd4a38974e24f932ff5f98ea3c546ce3f8c995d3f0985f8e5ba48bba19/pillow-11.3.0-cp312-cp312-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:676b2815362456b5b3216b4fd5bd89d362100dc6f4945154ff172e206a22c024", size = 6645236, upload-time = "2025-07-01T09:14:23.321Z" }, + { url = "https://files.pythonhosted.org/packages/40/e7/848f69fb79843b3d91241bad658e9c14f39a32f71a301bcd1d139416d1be/pillow-11.3.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:3e184b2f26ff146363dd07bde8b711833d7b0202e27d13540bfe2e35a323a809", size = 6086950, upload-time = "2025-07-01T09:14:25.237Z" }, + { url = "https://files.pythonhosted.org/packages/0b/1a/7cff92e695a2a29ac1958c2a0fe4c0b2393b60aac13b04a4fe2735cad52d/pillow-11.3.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:6be31e3fc9a621e071bc17bb7de63b85cbe0bfae91bb0363c893cbe67247780d", size = 6723358, upload-time = "2025-07-01T09:14:27.053Z" }, + { url = "https://files.pythonhosted.org/packages/26/7d/73699ad77895f69edff76b0f332acc3d497f22f5d75e5360f78cbcaff248/pillow-11.3.0-cp312-cp312-win32.whl", hash = "sha256:7b161756381f0918e05e7cb8a371fff367e807770f8fe92ecb20d905d0e1c149", size = 6275079, upload-time = "2025-07-01T09:14:30.104Z" }, + { url = "https://files.pythonhosted.org/packages/8c/ce/e7dfc873bdd9828f3b6e5c2bbb74e47a98ec23cc5c74fc4e54462f0d9204/pillow-11.3.0-cp312-cp312-win_amd64.whl", hash = "sha256:a6444696fce635783440b7f7a9fc24b3ad10a9ea3f0ab66c5905be1c19ccf17d", size = 6986324, upload-time = "2025-07-01T09:14:31.899Z" }, + { url = "https://files.pythonhosted.org/packages/16/8f/b13447d1bf0b1f7467ce7d86f6e6edf66c0ad7cf44cf5c87a37f9bed9936/pillow-11.3.0-cp312-cp312-win_arm64.whl", hash = "sha256:2aceea54f957dd4448264f9bf40875da0415c83eb85f55069d89c0ed436e3542", size = 2423067, upload-time = "2025-07-01T09:14:33.709Z" }, +] + +[[package]] +name = "platformdirs" +version = "4.3.6" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/13/fc/128cc9cb8f03208bdbf93d3aa862e16d376844a14f9a0ce5cf4507372de4/platformdirs-4.3.6.tar.gz", hash = "sha256:357fb2acbc885b0419afd3ce3ed34564c13c9b95c89360cd9563f73aa5e2b907", size = 21302, upload-time = "2024-09-17T19:06:50.688Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/3c/a6/bc1012356d8ece4d66dd75c4b9fc6c1f6650ddd5991e421177d9f8f671be/platformdirs-4.3.6-py3-none-any.whl", hash = "sha256:73e575e1408ab8103900836b97580d5307456908a03e92031bab39e4554cc3fb", size = 18439, upload-time = "2024-09-17T19:06:49.212Z" }, +] + +[[package]] +name = "pluggy" +version = "1.5.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/96/2d/02d4312c973c6050a18b314a5ad0b3210edb65a906f868e31c111dede4a6/pluggy-1.5.0.tar.gz", hash = "sha256:2cffa88e94fdc978c4c574f15f9e59b7f4201d439195c3715ca9e2486f1d0cf1", size = 67955, upload-time = "2024-04-20T21:34:42.531Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/88/5f/e351af9a41f866ac3f1fac4ca0613908d9a41741cfcf2228f4ad853b697d/pluggy-1.5.0-py3-none-any.whl", hash = "sha256:44e1ad92c8ca002de6377e165f3e0f1be63266ab4d554740532335b9d75ea669", size = 20556, upload-time = "2024-04-20T21:34:40.434Z" }, +] + +[[package]] +name = "postgrest" +version = "1.1.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "deprecation" }, + { name = "httpx", extra = ["http2"] }, + { name = "pydantic" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/6e/3e/1b50568e1f5db0bdced4a82c7887e37326585faef7ca43ead86849cb4861/postgrest-1.1.1.tar.gz", hash = "sha256:f3bb3e8c4602775c75c844a31f565f5f3dd584df4d36d683f0b67d01a86be322", size = 15431, upload-time = "2025-06-23T19:21:34.742Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/a4/71/188a50ea64c17f73ff4df5196ec1553a8f1723421eb2d1069c73bab47d78/postgrest-1.1.1-py3-none-any.whl", hash = "sha256:98a6035ee1d14288484bfe36235942c5fb2d26af6d8120dfe3efbe007859251a", size = 22366, upload-time = "2025-06-23T19:21:33.637Z" }, +] + +[[package]] +name = "propcache" +version = "0.3.2" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/a6/16/43264e4a779dd8588c21a70f0709665ee8f611211bdd2c87d952cfa7c776/propcache-0.3.2.tar.gz", hash = "sha256:20d7d62e4e7ef05f221e0db2856b979540686342e7dd9973b815599c7057e168", size = 44139, upload-time = "2025-06-09T22:56:06.081Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/a8/42/9ca01b0a6f48e81615dca4765a8f1dd2c057e0540f6116a27dc5ee01dfb6/propcache-0.3.2-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:8de106b6c84506b31c27168582cd3cb3000a6412c16df14a8628e5871ff83c10", size = 73674, upload-time = "2025-06-09T22:54:30.551Z" }, + { url = "https://files.pythonhosted.org/packages/af/6e/21293133beb550f9c901bbece755d582bfaf2176bee4774000bd4dd41884/propcache-0.3.2-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:28710b0d3975117239c76600ea351934ac7b5ff56e60953474342608dbbb6154", size = 43570, upload-time = "2025-06-09T22:54:32.296Z" }, + { url = "https://files.pythonhosted.org/packages/0c/c8/0393a0a3a2b8760eb3bde3c147f62b20044f0ddac81e9d6ed7318ec0d852/propcache-0.3.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:ce26862344bdf836650ed2487c3d724b00fbfec4233a1013f597b78c1cb73615", size = 43094, upload-time = "2025-06-09T22:54:33.929Z" }, + { url = "https://files.pythonhosted.org/packages/37/2c/489afe311a690399d04a3e03b069225670c1d489eb7b044a566511c1c498/propcache-0.3.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bca54bd347a253af2cf4544bbec232ab982f4868de0dd684246b67a51bc6b1db", size = 226958, upload-time = "2025-06-09T22:54:35.186Z" }, + { url = "https://files.pythonhosted.org/packages/9d/ca/63b520d2f3d418c968bf596839ae26cf7f87bead026b6192d4da6a08c467/propcache-0.3.2-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:55780d5e9a2ddc59711d727226bb1ba83a22dd32f64ee15594b9392b1f544eb1", size = 234894, upload-time = "2025-06-09T22:54:36.708Z" }, + { url = "https://files.pythonhosted.org/packages/11/60/1d0ed6fff455a028d678df30cc28dcee7af77fa2b0e6962ce1df95c9a2a9/propcache-0.3.2-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:035e631be25d6975ed87ab23153db6a73426a48db688070d925aa27e996fe93c", size = 233672, upload-time = "2025-06-09T22:54:38.062Z" }, + { url = "https://files.pythonhosted.org/packages/37/7c/54fd5301ef38505ab235d98827207176a5c9b2aa61939b10a460ca53e123/propcache-0.3.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ee6f22b6eaa39297c751d0e80c0d3a454f112f5c6481214fcf4c092074cecd67", size = 224395, upload-time = "2025-06-09T22:54:39.634Z" }, + { url = "https://files.pythonhosted.org/packages/ee/1a/89a40e0846f5de05fdc6779883bf46ba980e6df4d2ff8fb02643de126592/propcache-0.3.2-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7ca3aee1aa955438c4dba34fc20a9f390e4c79967257d830f137bd5a8a32ed3b", size = 212510, upload-time = "2025-06-09T22:54:41.565Z" }, + { url = "https://files.pythonhosted.org/packages/5e/33/ca98368586c9566a6b8d5ef66e30484f8da84c0aac3f2d9aec6d31a11bd5/propcache-0.3.2-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:7a4f30862869fa2b68380d677cc1c5fcf1e0f2b9ea0cf665812895c75d0ca3b8", size = 222949, upload-time = "2025-06-09T22:54:43.038Z" }, + { url = "https://files.pythonhosted.org/packages/ba/11/ace870d0aafe443b33b2f0b7efdb872b7c3abd505bfb4890716ad7865e9d/propcache-0.3.2-cp312-cp312-musllinux_1_2_armv7l.whl", hash = "sha256:b77ec3c257d7816d9f3700013639db7491a434644c906a2578a11daf13176251", size = 217258, upload-time = "2025-06-09T22:54:44.376Z" }, + { url = "https://files.pythonhosted.org/packages/5b/d2/86fd6f7adffcfc74b42c10a6b7db721d1d9ca1055c45d39a1a8f2a740a21/propcache-0.3.2-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:cab90ac9d3f14b2d5050928483d3d3b8fb6b4018893fc75710e6aa361ecb2474", size = 213036, upload-time = "2025-06-09T22:54:46.243Z" }, + { url = "https://files.pythonhosted.org/packages/07/94/2d7d1e328f45ff34a0a284cf5a2847013701e24c2a53117e7c280a4316b3/propcache-0.3.2-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:0b504d29f3c47cf6b9e936c1852246c83d450e8e063d50562115a6be6d3a2535", size = 227684, upload-time = "2025-06-09T22:54:47.63Z" }, + { url = "https://files.pythonhosted.org/packages/b7/05/37ae63a0087677e90b1d14710e532ff104d44bc1efa3b3970fff99b891dc/propcache-0.3.2-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:ce2ac2675a6aa41ddb2a0c9cbff53780a617ac3d43e620f8fd77ba1c84dcfc06", size = 234562, upload-time = "2025-06-09T22:54:48.982Z" }, + { url = "https://files.pythonhosted.org/packages/a4/7c/3f539fcae630408d0bd8bf3208b9a647ccad10976eda62402a80adf8fc34/propcache-0.3.2-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:62b4239611205294cc433845b914131b2a1f03500ff3c1ed093ed216b82621e1", size = 222142, upload-time = "2025-06-09T22:54:50.424Z" }, + { url = "https://files.pythonhosted.org/packages/7c/d2/34b9eac8c35f79f8a962546b3e97e9d4b990c420ee66ac8255d5d9611648/propcache-0.3.2-cp312-cp312-win32.whl", hash = "sha256:df4a81b9b53449ebc90cc4deefb052c1dd934ba85012aa912c7ea7b7e38b60c1", size = 37711, upload-time = "2025-06-09T22:54:52.072Z" }, + { url = "https://files.pythonhosted.org/packages/19/61/d582be5d226cf79071681d1b46b848d6cb03d7b70af7063e33a2787eaa03/propcache-0.3.2-cp312-cp312-win_amd64.whl", hash = "sha256:7046e79b989d7fe457bb755844019e10f693752d169076138abf17f31380800c", size = 41479, upload-time = "2025-06-09T22:54:53.234Z" }, + { url = "https://files.pythonhosted.org/packages/cc/35/cc0aaecf278bb4575b8555f2b137de5ab821595ddae9da9d3cd1da4072c7/propcache-0.3.2-py3-none-any.whl", hash = "sha256:98f1ec44fb675f5052cccc8e609c46ed23a35a1cfd18545ad4e29002d858a43f", size = 12663, upload-time = "2025-06-09T22:56:04.484Z" }, +] + +[[package]] +name = "psycopg" +version = "3.2.9" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "typing-extensions" }, + { name = "tzdata", marker = "sys_platform == 'win32'" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/27/4a/93a6ab570a8d1a4ad171a1f4256e205ce48d828781312c0bbaff36380ecb/psycopg-3.2.9.tar.gz", hash = "sha256:2fbb46fcd17bc81f993f28c47f1ebea38d66ae97cc2dbc3cad73b37cefbff700", size = 158122, upload-time = "2025-05-13T16:11:15.533Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/44/b0/a73c195a56eb6b92e937a5ca58521a5c3346fb233345adc80fd3e2f542e2/psycopg-3.2.9-py3-none-any.whl", hash = "sha256:01a8dadccdaac2123c916208c96e06631641c0566b22005493f09663c7a8d3b6", size = 202705, upload-time = "2025-05-13T16:06:26.584Z" }, +] + +[[package]] +name = "psycopg-pool" +version = "3.2.6" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "typing-extensions" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/cf/13/1e7850bb2c69a63267c3dbf37387d3f71a00fd0e2fa55c5db14d64ba1af4/psycopg_pool-3.2.6.tar.gz", hash = "sha256:0f92a7817719517212fbfe2fd58b8c35c1850cdd2a80d36b581ba2085d9148e5", size = 29770, upload-time = "2025-02-26T12:03:47.129Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/47/fd/4feb52a55c1a4bd748f2acaed1903ab54a723c47f6d0242780f4d97104d4/psycopg_pool-3.2.6-py3-none-any.whl", hash = "sha256:5887318a9f6af906d041a0b1dc1c60f8f0dda8340c2572b74e10907b51ed5da7", size = 38252, upload-time = "2025-02-26T12:03:45.073Z" }, +] + +[[package]] +name = "psycopg2-binary" +version = "2.9.10" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/cb/0e/bdc8274dc0585090b4e3432267d7be4dfbfd8971c0fa59167c711105a6bf/psycopg2-binary-2.9.10.tar.gz", hash = "sha256:4b3df0e6990aa98acda57d983942eff13d824135fe2250e6522edaa782a06de2", size = 385764, upload-time = "2024-10-16T11:24:58.126Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/49/7d/465cc9795cf76f6d329efdafca74693714556ea3891813701ac1fee87545/psycopg2_binary-2.9.10-cp312-cp312-macosx_12_0_x86_64.whl", hash = "sha256:880845dfe1f85d9d5f7c412efea7a08946a46894537e4e5d091732eb1d34d9a0", size = 3044771, upload-time = "2024-10-16T11:20:35.234Z" }, + { url = "https://files.pythonhosted.org/packages/8b/31/6d225b7b641a1a2148e3ed65e1aa74fc86ba3fee850545e27be9e1de893d/psycopg2_binary-2.9.10-cp312-cp312-macosx_14_0_arm64.whl", hash = "sha256:9440fa522a79356aaa482aa4ba500b65f28e5d0e63b801abf6aa152a29bd842a", size = 3275336, upload-time = "2024-10-16T11:20:38.742Z" }, + { url = "https://files.pythonhosted.org/packages/30/b7/a68c2b4bff1cbb1728e3ec864b2d92327c77ad52edcd27922535a8366f68/psycopg2_binary-2.9.10-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e3923c1d9870c49a2d44f795df0c889a22380d36ef92440ff618ec315757e539", size = 2851637, upload-time = "2024-10-16T11:20:42.145Z" }, + { url = "https://files.pythonhosted.org/packages/0b/b1/cfedc0e0e6f9ad61f8657fd173b2f831ce261c02a08c0b09c652b127d813/psycopg2_binary-2.9.10-cp312-cp312-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7b2c956c028ea5de47ff3a8d6b3cc3330ab45cf0b7c3da35a2d6ff8420896526", size = 3082097, upload-time = "2024-10-16T11:20:46.185Z" }, + { url = "https://files.pythonhosted.org/packages/18/ed/0a8e4153c9b769f59c02fb5e7914f20f0b2483a19dae7bf2db54b743d0d0/psycopg2_binary-2.9.10-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f758ed67cab30b9a8d2833609513ce4d3bd027641673d4ebc9c067e4d208eec1", size = 3264776, upload-time = "2024-10-16T11:20:50.879Z" }, + { url = "https://files.pythonhosted.org/packages/10/db/d09da68c6a0cdab41566b74e0a6068a425f077169bed0946559b7348ebe9/psycopg2_binary-2.9.10-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8cd9b4f2cfab88ed4a9106192de509464b75a906462fb846b936eabe45c2063e", size = 3020968, upload-time = "2024-10-16T11:20:56.819Z" }, + { url = "https://files.pythonhosted.org/packages/94/28/4d6f8c255f0dfffb410db2b3f9ac5218d959a66c715c34cac31081e19b95/psycopg2_binary-2.9.10-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:6dc08420625b5a20b53551c50deae6e231e6371194fa0651dbe0fb206452ae1f", size = 2872334, upload-time = "2024-10-16T11:21:02.411Z" }, + { url = "https://files.pythonhosted.org/packages/05/f7/20d7bf796593c4fea95e12119d6cc384ff1f6141a24fbb7df5a668d29d29/psycopg2_binary-2.9.10-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:d7cd730dfa7c36dbe8724426bf5612798734bff2d3c3857f36f2733f5bfc7c00", size = 2822722, upload-time = "2024-10-16T11:21:09.01Z" }, + { url = "https://files.pythonhosted.org/packages/4d/e4/0c407ae919ef626dbdb32835a03b6737013c3cc7240169843965cada2bdf/psycopg2_binary-2.9.10-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:155e69561d54d02b3c3209545fb08938e27889ff5a10c19de8d23eb5a41be8a5", size = 2920132, upload-time = "2024-10-16T11:21:16.339Z" }, + { url = "https://files.pythonhosted.org/packages/2d/70/aa69c9f69cf09a01da224909ff6ce8b68faeef476f00f7ec377e8f03be70/psycopg2_binary-2.9.10-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:c3cc28a6fd5a4a26224007712e79b81dbaee2ffb90ff406256158ec4d7b52b47", size = 2959312, upload-time = "2024-10-16T11:21:25.584Z" }, + { url = "https://files.pythonhosted.org/packages/d3/bd/213e59854fafe87ba47814bf413ace0dcee33a89c8c8c814faca6bc7cf3c/psycopg2_binary-2.9.10-cp312-cp312-win32.whl", hash = "sha256:ec8a77f521a17506a24a5f626cb2aee7850f9b69a0afe704586f63a464f3cd64", size = 1025191, upload-time = "2024-10-16T11:21:29.912Z" }, + { url = "https://files.pythonhosted.org/packages/92/29/06261ea000e2dc1e22907dbbc483a1093665509ea586b29b8986a0e56733/psycopg2_binary-2.9.10-cp312-cp312-win_amd64.whl", hash = "sha256:18c5ee682b9c6dd3696dad6e54cc7ff3a1a9020df6a5c0f861ef8bfd338c3ca0", size = 1164031, upload-time = "2024-10-16T11:21:34.211Z" }, +] + +[[package]] +name = "pycparser" +version = "2.22" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/1d/b2/31537cf4b1ca988837256c910a668b553fceb8f069bedc4b1c826024b52c/pycparser-2.22.tar.gz", hash = "sha256:491c8be9c040f5390f5bf44a5b07752bd07f56edf992381b05c701439eec10f6", size = 172736, upload-time = "2024-03-30T13:22:22.564Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/13/a3/a812df4e2dd5696d1f351d58b8fe16a405b234ad2886a0dab9183fb78109/pycparser-2.22-py3-none-any.whl", hash = "sha256:c3702b6d3dd8c7abc1afa565d7e63d53a1d0bd86cdc24edd75470f4de499cfcc", size = 117552, upload-time = "2024-03-30T13:22:20.476Z" }, +] + +[[package]] +name = "pycryptodome" +version = "3.23.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/8e/a6/8452177684d5e906854776276ddd34eca30d1b1e15aa1ee9cefc289a33f5/pycryptodome-3.23.0.tar.gz", hash = "sha256:447700a657182d60338bab09fdb27518f8856aecd80ae4c6bdddb67ff5da44ef", size = 4921276, upload-time = "2025-05-17T17:21:45.242Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/db/6c/a1f71542c969912bb0e106f64f60a56cc1f0fabecf9396f45accbe63fa68/pycryptodome-3.23.0-cp37-abi3-macosx_10_9_universal2.whl", hash = "sha256:187058ab80b3281b1de11c2e6842a357a1f71b42cb1e15bce373f3d238135c27", size = 2495627, upload-time = "2025-05-17T17:20:47.139Z" }, + { url = "https://files.pythonhosted.org/packages/6e/4e/a066527e079fc5002390c8acdd3aca431e6ea0a50ffd7201551175b47323/pycryptodome-3.23.0-cp37-abi3-macosx_10_9_x86_64.whl", hash = "sha256:cfb5cd445280c5b0a4e6187a7ce8de5a07b5f3f897f235caa11f1f435f182843", size = 1640362, upload-time = "2025-05-17T17:20:50.392Z" }, + { url = "https://files.pythonhosted.org/packages/50/52/adaf4c8c100a8c49d2bd058e5b551f73dfd8cb89eb4911e25a0c469b6b4e/pycryptodome-3.23.0-cp37-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:67bd81fcbe34f43ad9422ee8fd4843c8e7198dd88dd3d40e6de42ee65fbe1490", size = 2182625, upload-time = "2025-05-17T17:20:52.866Z" }, + { url = "https://files.pythonhosted.org/packages/5f/e9/a09476d436d0ff1402ac3867d933c61805ec2326c6ea557aeeac3825604e/pycryptodome-3.23.0-cp37-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c8987bd3307a39bc03df5c8e0e3d8be0c4c3518b7f044b0f4c15d1aa78f52575", size = 2268954, upload-time = "2025-05-17T17:20:55.027Z" }, + { url = "https://files.pythonhosted.org/packages/f9/c5/ffe6474e0c551d54cab931918127c46d70cab8f114e0c2b5a3c071c2f484/pycryptodome-3.23.0-cp37-abi3-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:aa0698f65e5b570426fc31b8162ed4603b0c2841cbb9088e2b01641e3065915b", size = 2308534, upload-time = "2025-05-17T17:20:57.279Z" }, + { url = "https://files.pythonhosted.org/packages/18/28/e199677fc15ecf43010f2463fde4c1a53015d1fe95fb03bca2890836603a/pycryptodome-3.23.0-cp37-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:53ecbafc2b55353edcebd64bf5da94a2a2cdf5090a6915bcca6eca6cc452585a", size = 2181853, upload-time = "2025-05-17T17:20:59.322Z" }, + { url = "https://files.pythonhosted.org/packages/ce/ea/4fdb09f2165ce1365c9eaefef36625583371ee514db58dc9b65d3a255c4c/pycryptodome-3.23.0-cp37-abi3-musllinux_1_2_i686.whl", hash = "sha256:156df9667ad9f2ad26255926524e1c136d6664b741547deb0a86a9acf5ea631f", size = 2342465, upload-time = "2025-05-17T17:21:03.83Z" }, + { url = "https://files.pythonhosted.org/packages/22/82/6edc3fc42fe9284aead511394bac167693fb2b0e0395b28b8bedaa07ef04/pycryptodome-3.23.0-cp37-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:dea827b4d55ee390dc89b2afe5927d4308a8b538ae91d9c6f7a5090f397af1aa", size = 2267414, upload-time = "2025-05-17T17:21:06.72Z" }, + { url = "https://files.pythonhosted.org/packages/59/fe/aae679b64363eb78326c7fdc9d06ec3de18bac68be4b612fc1fe8902693c/pycryptodome-3.23.0-cp37-abi3-win32.whl", hash = "sha256:507dbead45474b62b2bbe318eb1c4c8ee641077532067fec9c1aa82c31f84886", size = 1768484, upload-time = "2025-05-17T17:21:08.535Z" }, + { url = "https://files.pythonhosted.org/packages/54/2f/e97a1b8294db0daaa87012c24a7bb714147c7ade7656973fd6c736b484ff/pycryptodome-3.23.0-cp37-abi3-win_amd64.whl", hash = "sha256:c75b52aacc6c0c260f204cbdd834f76edc9fb0d8e0da9fbf8352ef58202564e2", size = 1799636, upload-time = "2025-05-17T17:21:10.393Z" }, + { url = "https://files.pythonhosted.org/packages/18/3d/f9441a0d798bf2b1e645adc3265e55706aead1255ccdad3856dbdcffec14/pycryptodome-3.23.0-cp37-abi3-win_arm64.whl", hash = "sha256:11eeeb6917903876f134b56ba11abe95c0b0fd5e3330def218083c7d98bbcb3c", size = 1703675, upload-time = "2025-05-17T17:21:13.146Z" }, +] + +[[package]] +name = "pydantic" +version = "2.10.6" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "annotated-types" }, + { name = "pydantic-core" }, + { name = "typing-extensions" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/b7/ae/d5220c5c52b158b1de7ca89fc5edb72f304a70a4c540c84c8844bf4008de/pydantic-2.10.6.tar.gz", hash = "sha256:ca5daa827cce33de7a42be142548b0096bf05a7e7b365aebfa5f8eeec7128236", size = 761681, upload-time = "2025-01-24T01:42:12.693Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/f4/3c/8cc1cc84deffa6e25d2d0c688ebb80635dfdbf1dbea3e30c541c8cf4d860/pydantic-2.10.6-py3-none-any.whl", hash = "sha256:427d664bf0b8a2b34ff5dd0f5a18df00591adcee7198fbd71981054cef37b584", size = 431696, upload-time = "2025-01-24T01:42:10.371Z" }, +] + +[[package]] +name = "pydantic-core" +version = "2.27.2" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "typing-extensions" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/fc/01/f3e5ac5e7c25833db5eb555f7b7ab24cd6f8c322d3a3ad2d67a952dc0abc/pydantic_core-2.27.2.tar.gz", hash = "sha256:eb026e5a4c1fee05726072337ff51d1efb6f59090b7da90d30ea58625b1ffb39", size = 413443, upload-time = "2024-12-18T11:31:54.917Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/d6/74/51c8a5482ca447871c93e142d9d4a92ead74de6c8dc5e66733e22c9bba89/pydantic_core-2.27.2-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:9e0c8cfefa0ef83b4da9588448b6d8d2a2bf1a53c3f1ae5fca39eb3061e2f0b0", size = 1893127, upload-time = "2024-12-18T11:28:30.346Z" }, + { url = "https://files.pythonhosted.org/packages/d3/f3/c97e80721735868313c58b89d2de85fa80fe8dfeeed84dc51598b92a135e/pydantic_core-2.27.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:83097677b8e3bd7eaa6775720ec8e0405f1575015a463285a92bfdfe254529ef", size = 1811340, upload-time = "2024-12-18T11:28:32.521Z" }, + { url = "https://files.pythonhosted.org/packages/9e/91/840ec1375e686dbae1bd80a9e46c26a1e0083e1186abc610efa3d9a36180/pydantic_core-2.27.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:172fce187655fece0c90d90a678424b013f8fbb0ca8b036ac266749c09438cb7", size = 1822900, upload-time = "2024-12-18T11:28:34.507Z" }, + { url = "https://files.pythonhosted.org/packages/f6/31/4240bc96025035500c18adc149aa6ffdf1a0062a4b525c932065ceb4d868/pydantic_core-2.27.2-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:519f29f5213271eeeeb3093f662ba2fd512b91c5f188f3bb7b27bc5973816934", size = 1869177, upload-time = "2024-12-18T11:28:36.488Z" }, + { url = "https://files.pythonhosted.org/packages/fa/20/02fbaadb7808be578317015c462655c317a77a7c8f0ef274bc016a784c54/pydantic_core-2.27.2-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:05e3a55d124407fffba0dd6b0c0cd056d10e983ceb4e5dbd10dda135c31071d6", size = 2038046, upload-time = "2024-12-18T11:28:39.409Z" }, + { url = "https://files.pythonhosted.org/packages/06/86/7f306b904e6c9eccf0668248b3f272090e49c275bc488a7b88b0823444a4/pydantic_core-2.27.2-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:9c3ed807c7b91de05e63930188f19e921d1fe90de6b4f5cd43ee7fcc3525cb8c", size = 2685386, upload-time = "2024-12-18T11:28:41.221Z" }, + { url = "https://files.pythonhosted.org/packages/8d/f0/49129b27c43396581a635d8710dae54a791b17dfc50c70164866bbf865e3/pydantic_core-2.27.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6fb4aadc0b9a0c063206846d603b92030eb6f03069151a625667f982887153e2", size = 1997060, upload-time = "2024-12-18T11:28:44.709Z" }, + { url = "https://files.pythonhosted.org/packages/0d/0f/943b4af7cd416c477fd40b187036c4f89b416a33d3cc0ab7b82708a667aa/pydantic_core-2.27.2-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:28ccb213807e037460326424ceb8b5245acb88f32f3d2777427476e1b32c48c4", size = 2004870, upload-time = "2024-12-18T11:28:46.839Z" }, + { url = "https://files.pythonhosted.org/packages/35/40/aea70b5b1a63911c53a4c8117c0a828d6790483f858041f47bab0b779f44/pydantic_core-2.27.2-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:de3cd1899e2c279b140adde9357c4495ed9d47131b4a4eaff9052f23398076b3", size = 1999822, upload-time = "2024-12-18T11:28:48.896Z" }, + { url = "https://files.pythonhosted.org/packages/f2/b3/807b94fd337d58effc5498fd1a7a4d9d59af4133e83e32ae39a96fddec9d/pydantic_core-2.27.2-cp312-cp312-musllinux_1_1_armv7l.whl", hash = "sha256:220f892729375e2d736b97d0e51466252ad84c51857d4d15f5e9692f9ef12be4", size = 2130364, upload-time = "2024-12-18T11:28:50.755Z" }, + { url = "https://files.pythonhosted.org/packages/fc/df/791c827cd4ee6efd59248dca9369fb35e80a9484462c33c6649a8d02b565/pydantic_core-2.27.2-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:a0fcd29cd6b4e74fe8ddd2c90330fd8edf2e30cb52acda47f06dd615ae72da57", size = 2158303, upload-time = "2024-12-18T11:28:54.122Z" }, + { url = "https://files.pythonhosted.org/packages/9b/67/4e197c300976af185b7cef4c02203e175fb127e414125916bf1128b639a9/pydantic_core-2.27.2-cp312-cp312-win32.whl", hash = "sha256:1e2cb691ed9834cd6a8be61228471d0a503731abfb42f82458ff27be7b2186fc", size = 1834064, upload-time = "2024-12-18T11:28:56.074Z" }, + { url = "https://files.pythonhosted.org/packages/1f/ea/cd7209a889163b8dcca139fe32b9687dd05249161a3edda62860430457a5/pydantic_core-2.27.2-cp312-cp312-win_amd64.whl", hash = "sha256:cc3f1a99a4f4f9dd1de4fe0312c114e740b5ddead65bb4102884b384c15d8bc9", size = 1989046, upload-time = "2024-12-18T11:28:58.107Z" }, + { url = "https://files.pythonhosted.org/packages/bc/49/c54baab2f4658c26ac633d798dab66b4c3a9bbf47cff5284e9c182f4137a/pydantic_core-2.27.2-cp312-cp312-win_arm64.whl", hash = "sha256:3911ac9284cd8a1792d3cb26a2da18f3ca26c6908cc434a18f730dc0db7bfa3b", size = 1885092, upload-time = "2024-12-18T11:29:01.335Z" }, +] + +[[package]] +name = "pydantic-settings" +version = "2.10.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "pydantic" }, + { name = "python-dotenv" }, + { name = "typing-inspection" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/68/85/1ea668bbab3c50071ca613c6ab30047fb36ab0da1b92fa8f17bbc38fd36c/pydantic_settings-2.10.1.tar.gz", hash = "sha256:06f0062169818d0f5524420a360d632d5857b83cffd4d42fe29597807a1614ee", size = 172583, upload-time = "2025-06-24T13:26:46.841Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/58/f0/427018098906416f580e3cf1366d3b1abfb408a0652e9f31600c24a1903c/pydantic_settings-2.10.1-py3-none-any.whl", hash = "sha256:a60952460b99cf661dc25c29c0ef171721f98bfcb52ef8d9ea4c943d7c8cc796", size = 45235, upload-time = "2025-06-24T13:26:45.485Z" }, +] + +[[package]] +name = "pygments" +version = "2.19.2" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/b0/77/a5b8c569bf593b0140bde72ea885a803b82086995367bf2037de0159d924/pygments-2.19.2.tar.gz", hash = "sha256:636cb2477cec7f8952536970bc533bc43743542f70392ae026374600add5b887", size = 4968631, upload-time = "2025-06-21T13:39:12.283Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/c7/21/705964c7812476f378728bdf590ca4b771ec72385c533964653c68e86bdc/pygments-2.19.2-py3-none-any.whl", hash = "sha256:86540386c03d588bb81d44bc3928634ff26449851e99741617ecb9037ee5ec0b", size = 1225217, upload-time = "2025-06-21T13:39:07.939Z" }, +] + +[[package]] +name = "pyjwt" +version = "2.10.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/e7/46/bd74733ff231675599650d3e47f361794b22ef3e3770998dda30d3b63726/pyjwt-2.10.1.tar.gz", hash = "sha256:3cc5772eb20009233caf06e9d8a0577824723b44e6648ee0a2aedb6cf9381953", size = 87785, upload-time = "2024-11-28T03:43:29.933Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/61/ad/689f02752eeec26aed679477e80e632ef1b682313be70793d798c1d5fc8f/PyJWT-2.10.1-py3-none-any.whl", hash = "sha256:dcdd193e30abefd5debf142f9adfcdd2b58004e644f25406ffaebd50bd98dacb", size = 22997, upload-time = "2024-11-28T03:43:27.893Z" }, +] + +[package.optional-dependencies] +crypto = [ + { name = "cryptography" }, +] + +[[package]] +name = "pynacl" +version = "1.5.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "cffi" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/a7/22/27582568be639dfe22ddb3902225f91f2f17ceff88ce80e4db396c8986da/PyNaCl-1.5.0.tar.gz", hash = "sha256:8ac7448f09ab85811607bdd21ec2464495ac8b7c66d146bf545b0f08fb9220ba", size = 3392854, upload-time = "2022-01-07T22:05:41.134Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/ce/75/0b8ede18506041c0bf23ac4d8e2971b4161cd6ce630b177d0a08eb0d8857/PyNaCl-1.5.0-cp36-abi3-macosx_10_10_universal2.whl", hash = "sha256:401002a4aaa07c9414132aaed7f6836ff98f59277a234704ff66878c2ee4a0d1", size = 349920, upload-time = "2022-01-07T22:05:49.156Z" }, + { url = "https://files.pythonhosted.org/packages/59/bb/fddf10acd09637327a97ef89d2a9d621328850a72f1fdc8c08bdf72e385f/PyNaCl-1.5.0-cp36-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_24_aarch64.whl", hash = "sha256:52cb72a79269189d4e0dc537556f4740f7f0a9ec41c1322598799b0bdad4ef92", size = 601722, upload-time = "2022-01-07T22:05:50.989Z" }, + { url = "https://files.pythonhosted.org/packages/5d/70/87a065c37cca41a75f2ce113a5a2c2aa7533be648b184ade58971b5f7ccc/PyNaCl-1.5.0-cp36-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a36d4a9dda1f19ce6e03c9a784a2921a4b726b02e1c736600ca9c22029474394", size = 680087, upload-time = "2022-01-07T22:05:52.539Z" }, + { url = "https://files.pythonhosted.org/packages/ee/87/f1bb6a595f14a327e8285b9eb54d41fef76c585a0edef0a45f6fc95de125/PyNaCl-1.5.0-cp36-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_24_x86_64.whl", hash = "sha256:0c84947a22519e013607c9be43706dd42513f9e6ae5d39d3613ca1e142fba44d", size = 856678, upload-time = "2022-01-07T22:05:54.251Z" }, + { url = "https://files.pythonhosted.org/packages/66/28/ca86676b69bf9f90e710571b67450508484388bfce09acf8a46f0b8c785f/PyNaCl-1.5.0-cp36-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:06b8f6fa7f5de8d5d2f7573fe8c863c051225a27b61e6860fd047b1775807858", size = 1133660, upload-time = "2022-01-07T22:05:56.056Z" }, + { url = "https://files.pythonhosted.org/packages/3d/85/c262db650e86812585e2bc59e497a8f59948a005325a11bbbc9ecd3fe26b/PyNaCl-1.5.0-cp36-abi3-musllinux_1_1_aarch64.whl", hash = "sha256:a422368fc821589c228f4c49438a368831cb5bbc0eab5ebe1d7fac9dded6567b", size = 663824, upload-time = "2022-01-07T22:05:57.434Z" }, + { url = "https://files.pythonhosted.org/packages/fd/1a/cc308a884bd299b651f1633acb978e8596c71c33ca85e9dc9fa33a5399b9/PyNaCl-1.5.0-cp36-abi3-musllinux_1_1_x86_64.whl", hash = "sha256:61f642bf2378713e2c2e1de73444a3778e5f0a38be6fee0fe532fe30060282ff", size = 1117912, upload-time = "2022-01-07T22:05:58.665Z" }, + { url = "https://files.pythonhosted.org/packages/25/2d/b7df6ddb0c2a33afdb358f8af6ea3b8c4d1196ca45497dd37a56f0c122be/PyNaCl-1.5.0-cp36-abi3-win32.whl", hash = "sha256:e46dae94e34b085175f8abb3b0aaa7da40767865ac82c928eeb9e57e1ea8a543", size = 204624, upload-time = "2022-01-07T22:06:00.085Z" }, + { url = "https://files.pythonhosted.org/packages/5e/22/d3db169895faaf3e2eda892f005f433a62db2decbcfbc2f61e6517adfa87/PyNaCl-1.5.0-cp36-abi3-win_amd64.whl", hash = "sha256:20f42270d27e1b6a29f54032090b972d97f0a1b0948cc52392041ef7831fee93", size = 212141, upload-time = "2022-01-07T22:06:01.861Z" }, +] + +[[package]] +name = "pyproject-api" +version = "1.8.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "packaging" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/bb/19/441e0624a8afedd15bbcce96df1b80479dd0ff0d965f5ce8fde4f2f6ffad/pyproject_api-1.8.0.tar.gz", hash = "sha256:77b8049f2feb5d33eefcc21b57f1e279636277a8ac8ad6b5871037b243778496", size = 22340, upload-time = "2024-09-18T23:18:37.805Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/ba/f4/3c4ddfcc0c19c217c6de513842d286de8021af2f2ab79bbb86c00342d778/pyproject_api-1.8.0-py3-none-any.whl", hash = "sha256:3d7d347a047afe796fd5d1885b1e391ba29be7169bd2f102fcd378f04273d228", size = 13100, upload-time = "2024-09-18T23:18:35.927Z" }, +] + +[[package]] +name = "pytest" +version = "8.4.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "colorama", marker = "sys_platform == 'win32'" }, + { name = "iniconfig" }, + { name = "packaging" }, + { name = "pluggy" }, + { name = "pygments" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/08/ba/45911d754e8eba3d5a841a5ce61a65a685ff1798421ac054f85aa8747dfb/pytest-8.4.1.tar.gz", hash = "sha256:7c67fd69174877359ed9371ec3af8a3d2b04741818c51e5e99cc1742251fa93c", size = 1517714, upload-time = "2025-06-18T05:48:06.109Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/29/16/c8a903f4c4dffe7a12843191437d7cd8e32751d5de349d45d3fe69544e87/pytest-8.4.1-py3-none-any.whl", hash = "sha256:539c70ba6fcead8e78eebbf1115e8b589e7565830d7d006a8723f19ac8a0afb7", size = 365474, upload-time = "2025-06-18T05:48:03.955Z" }, +] + +[[package]] +name = "pytest-asyncio" +version = "1.1.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "pytest" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/4e/51/f8794af39eeb870e87a8c8068642fc07bce0c854d6865d7dd0f2a9d338c2/pytest_asyncio-1.1.0.tar.gz", hash = "sha256:796aa822981e01b68c12e4827b8697108f7205020f24b5793b3c41555dab68ea", size = 46652, upload-time = "2025-07-16T04:29:26.393Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/c7/9d/bf86eddabf8c6c9cb1ea9a869d6873b46f105a5d292d3a6f7071f5b07935/pytest_asyncio-1.1.0-py3-none-any.whl", hash = "sha256:5fe2d69607b0bd75c656d1211f969cadba035030156745ee09e7d71740e58ecf", size = 15157, upload-time = "2025-07-16T04:29:24.929Z" }, +] + +[[package]] +name = "python-dateutil" +version = "2.9.0.post0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "six" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/66/c0/0c8b6ad9f17a802ee498c46e004a0eb49bc148f2fd230864601a86dcf6db/python-dateutil-2.9.0.post0.tar.gz", hash = "sha256:37dd54208da7e1cd875388217d5e00ebd4179249f90fb72437e91a35459a0ad3", size = 342432, upload-time = "2024-03-01T18:36:20.211Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/ec/57/56b9bcc3c9c6a792fcbaf139543cee77261f3651ca9da0c93f5c1221264b/python_dateutil-2.9.0.post0-py2.py3-none-any.whl", hash = "sha256:a8b2bc7bffae282281c8140a97d3aa9c14da0b136dfe83f850eea9a5f7470427", size = 229892, upload-time = "2024-03-01T18:36:18.57Z" }, +] + +[[package]] +name = "python-dotenv" +version = "1.1.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/f6/b0/4bc07ccd3572a2f9df7e6782f52b0c6c90dcbb803ac4a167702d7d0dfe1e/python_dotenv-1.1.1.tar.gz", hash = "sha256:a8a6399716257f45be6a007360200409fce5cda2661e3dec71d23dc15f6189ab", size = 41978, upload-time = "2025-06-24T04:21:07.341Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/5f/ed/539768cf28c661b5b068d66d96a2f155c4971a5d55684a514c1a0e0dec2f/python_dotenv-1.1.1-py3-none-any.whl", hash = "sha256:31f23644fe2602f88ff55e1f5c79ba497e01224ee7737937930c448e4d0e24dc", size = 20556, upload-time = "2025-06-24T04:21:06.073Z" }, +] + +[[package]] +name = "python-multipart" +version = "0.0.20" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/f3/87/f44d7c9f274c7ee665a29b885ec97089ec5dc034c7f3fafa03da9e39a09e/python_multipart-0.0.20.tar.gz", hash = "sha256:8dd0cab45b8e23064ae09147625994d090fa46f5b0d1e13af944c331a7fa9d13", size = 37158, upload-time = "2024-12-16T19:45:46.972Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/45/58/38b5afbc1a800eeea951b9285d3912613f2603bdf897a4ab0f4bd7f405fc/python_multipart-0.0.20-py3-none-any.whl", hash = "sha256:8a62d3a8335e06589fe01f2a3e178cdcc632f3fbe0d492ad9ee0ec35aab1f104", size = 24546, upload-time = "2024-12-16T19:45:44.423Z" }, +] + +[[package]] +name = "pytz" +version = "2025.2" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/f8/bf/abbd3cdfb8fbc7fb3d4d38d320f2441b1e7cbe29be4f23797b4a2b5d8aac/pytz-2025.2.tar.gz", hash = "sha256:360b9e3dbb49a209c21ad61809c7fb453643e048b38924c765813546746e81c3", size = 320884, upload-time = "2025-03-25T02:25:00.538Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/81/c4/34e93fe5f5429d7570ec1fa436f1986fb1f00c3e0f43a589fe2bbcd22c3f/pytz-2025.2-py2.py3-none-any.whl", hash = "sha256:5ddf76296dd8c44c26eb8f4b6f35488f3ccbf6fbbd7adee0b7262d43f0ec2f00", size = 509225, upload-time = "2025-03-25T02:24:58.468Z" }, +] + +[[package]] +name = "pyunormalize" +version = "16.0.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/b3/08/568036c725dac746ecb267bb749ef930fb7907454fe69fce83c8557287fb/pyunormalize-16.0.0.tar.gz", hash = "sha256:2e1dfbb4a118154ae26f70710426a52a364b926c9191f764601f5a8cb12761f7", size = 49968, upload-time = "2024-09-17T17:08:18.245Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/39/f9/9d86e56f716e0651194a5ad58be9c146fcaf1de6901ac6f3cd3affeeb74e/pyunormalize-16.0.0-py3-none-any.whl", hash = "sha256:c647d95e5d1e2ea9a2f448d1d95d8518348df24eab5c3fd32d2b5c3300a49152", size = 49173, upload-time = "2024-09-17T17:08:17.078Z" }, +] + +[[package]] +name = "pywin32" +version = "311" +source = { registry = "https://pypi.org/simple" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/e7/ab/01ea1943d4eba0f850c3c61e78e8dd59757ff815ff3ccd0a84de5f541f42/pywin32-311-cp312-cp312-win32.whl", hash = "sha256:750ec6e621af2b948540032557b10a2d43b0cee2ae9758c54154d711cc852d31", size = 8706543, upload-time = "2025-07-14T20:13:20.765Z" }, + { url = "https://files.pythonhosted.org/packages/d1/a8/a0e8d07d4d051ec7502cd58b291ec98dcc0c3fff027caad0470b72cfcc2f/pywin32-311-cp312-cp312-win_amd64.whl", hash = "sha256:b8c095edad5c211ff31c05223658e71bf7116daa0ecf3ad85f3201ea3190d067", size = 9495040, upload-time = "2025-07-14T20:13:22.543Z" }, + { url = "https://files.pythonhosted.org/packages/ba/3a/2ae996277b4b50f17d61f0603efd8253cb2d79cc7ae159468007b586396d/pywin32-311-cp312-cp312-win_arm64.whl", hash = "sha256:e286f46a9a39c4a18b319c28f59b61de793654af2f395c102b4f819e584b5852", size = 8710102, upload-time = "2025-07-14T20:13:24.682Z" }, +] + +[[package]] +name = "pyyaml" +version = "6.0.2" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/54/ed/79a089b6be93607fa5cdaedf301d7dfb23af5f25c398d5ead2525b063e17/pyyaml-6.0.2.tar.gz", hash = "sha256:d584d9ec91ad65861cc08d42e834324ef890a082e591037abe114850ff7bbc3e", size = 130631, upload-time = "2024-08-06T20:33:50.674Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/86/0c/c581167fc46d6d6d7ddcfb8c843a4de25bdd27e4466938109ca68492292c/PyYAML-6.0.2-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:c70c95198c015b85feafc136515252a261a84561b7b1d51e3384e0655ddf25ab", size = 183873, upload-time = "2024-08-06T20:32:25.131Z" }, + { url = "https://files.pythonhosted.org/packages/a8/0c/38374f5bb272c051e2a69281d71cba6fdb983413e6758b84482905e29a5d/PyYAML-6.0.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:ce826d6ef20b1bc864f0a68340c8b3287705cae2f8b4b1d932177dcc76721725", size = 173302, upload-time = "2024-08-06T20:32:26.511Z" }, + { url = "https://files.pythonhosted.org/packages/c3/93/9916574aa8c00aa06bbac729972eb1071d002b8e158bd0e83a3b9a20a1f7/PyYAML-6.0.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1f71ea527786de97d1a0cc0eacd1defc0985dcf6b3f17bb77dcfc8c34bec4dc5", size = 739154, upload-time = "2024-08-06T20:32:28.363Z" }, + { url = "https://files.pythonhosted.org/packages/95/0f/b8938f1cbd09739c6da569d172531567dbcc9789e0029aa070856f123984/PyYAML-6.0.2-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:9b22676e8097e9e22e36d6b7bda33190d0d400f345f23d4065d48f4ca7ae0425", size = 766223, upload-time = "2024-08-06T20:32:30.058Z" }, + { url = "https://files.pythonhosted.org/packages/b9/2b/614b4752f2e127db5cc206abc23a8c19678e92b23c3db30fc86ab731d3bd/PyYAML-6.0.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:80bab7bfc629882493af4aa31a4cfa43a4c57c83813253626916b8c7ada83476", size = 767542, upload-time = "2024-08-06T20:32:31.881Z" }, + { url = "https://files.pythonhosted.org/packages/d4/00/dd137d5bcc7efea1836d6264f049359861cf548469d18da90cd8216cf05f/PyYAML-6.0.2-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:0833f8694549e586547b576dcfaba4a6b55b9e96098b36cdc7ebefe667dfed48", size = 731164, upload-time = "2024-08-06T20:32:37.083Z" }, + { url = "https://files.pythonhosted.org/packages/c9/1f/4f998c900485e5c0ef43838363ba4a9723ac0ad73a9dc42068b12aaba4e4/PyYAML-6.0.2-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:8b9c7197f7cb2738065c481a0461e50ad02f18c78cd75775628afb4d7137fb3b", size = 756611, upload-time = "2024-08-06T20:32:38.898Z" }, + { url = "https://files.pythonhosted.org/packages/df/d1/f5a275fdb252768b7a11ec63585bc38d0e87c9e05668a139fea92b80634c/PyYAML-6.0.2-cp312-cp312-win32.whl", hash = "sha256:ef6107725bd54b262d6dedcc2af448a266975032bc85ef0172c5f059da6325b4", size = 140591, upload-time = "2024-08-06T20:32:40.241Z" }, + { url = "https://files.pythonhosted.org/packages/0c/e8/4f648c598b17c3d06e8753d7d13d57542b30d56e6c2dedf9c331ae56312e/PyYAML-6.0.2-cp312-cp312-win_amd64.whl", hash = "sha256:7e7401d0de89a9a855c839bc697c079a4af81cf878373abd7dc625847d25cbd8", size = 156338, upload-time = "2024-08-06T20:32:41.93Z" }, +] + +[[package]] +name = "rabinmiller" +version = "0.1.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/2e/c8/9a4bd1d823200b4fcbdc25584cf4e788f672cdf0d6622b66a8b49c3be925/rabinmiller-0.1.0.tar.gz", hash = "sha256:a9873aa6fdd0c26d5205d99e126fd94e6e1bb2aa966e167e136dfbfab0d0556d", size = 5159, upload-time = "2024-11-22T07:14:04.89Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/f3/b0/68c2efd5f025b80316fce28e49ce25c5d0171aa17ce7f94a89c0a6544d2b/rabinmiller-0.1.0-py3-none-any.whl", hash = "sha256:3fec2d26fc210772ced965a8f0e2870e5582cadf255bc665ef3f4932752ada5f", size = 5309, upload-time = "2024-11-22T07:14:03.572Z" }, +] + +[[package]] +name = "realtime" +version = "2.4.2" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "aiohttp" }, + { name = "python-dateutil" }, + { name = "typing-extensions" }, + { name = "websockets" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/1e/1e/c5f47928789cd5abb96e527929dea088213968f785983a231b3dfe08cc4f/realtime-2.4.2.tar.gz", hash = "sha256:760308d5310533f65a9098e0b482a518f6ad2f3c0f2723e83cf5856865bafc5d", size = 18802, upload-time = "2025-03-26T17:39:11.26Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/1d/b7/1b7651f353e14543c60cdfe40e3ea4dea412cfb2e93ab6384e72be813f05/realtime-2.4.2-py3-none-any.whl", hash = "sha256:0cc1b4a097acf9c0bd3a2f1998170de47744574c606617285113ddb3021e54ca", size = 22025, upload-time = "2025-03-26T17:39:10.031Z" }, +] + +[[package]] +name = "redis" +version = "6.4.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/0d/d6/e8b92798a5bd67d659d51a18170e91c16ac3b59738d91894651ee255ed49/redis-6.4.0.tar.gz", hash = "sha256:b01bc7282b8444e28ec36b261df5375183bb47a07eb9c603f284e89cbc5ef010", size = 4647399, upload-time = "2025-08-07T08:10:11.441Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/e8/02/89e2ed7e85db6c93dfa9e8f691c5087df4e3551ab39081a4d7c6d1f90e05/redis-6.4.0-py3-none-any.whl", hash = "sha256:f0544fa9604264e9464cdf4814e7d4830f74b165d52f2a330a760a88dd248b7f", size = 279847, upload-time = "2025-08-07T08:10:09.84Z" }, +] + +[[package]] +name = "referencing" +version = "0.36.2" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "attrs" }, + { name = "rpds-py" }, + { name = "typing-extensions" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/2f/db/98b5c277be99dd18bfd91dd04e1b759cad18d1a338188c936e92f921c7e2/referencing-0.36.2.tar.gz", hash = "sha256:df2e89862cd09deabbdba16944cc3f10feb6b3e6f18e902f7cc25609a34775aa", size = 74744, upload-time = "2025-01-25T08:48:16.138Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/c1/b1/3baf80dc6d2b7bc27a95a67752d0208e410351e3feb4eb78de5f77454d8d/referencing-0.36.2-py3-none-any.whl", hash = "sha256:e8699adbbf8b5c7de96d8ffa0eb5c158b3beafce084968e2ea8bb08c6794dcd0", size = 26775, upload-time = "2025-01-25T08:48:14.241Z" }, +] + +[[package]] +name = "regex" +version = "2025.8.29" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/e4/10/2d333227cf5198eb3252f2d50c8ade5cd2015f11c22403f0c9e3d529e81a/regex-2025.8.29.tar.gz", hash = "sha256:731ddb27a0900fa227dfba976b4efccec8c1c6fba147829bb52e71d49e91a5d7", size = 400817, upload-time = "2025-08-29T22:43:36.985Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/e3/a0/8c37d276a80ffda94f7e019e50cc88f898015512c7f104e49f1a0a6d3c59/regex-2025.8.29-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:dd61f18dc4446bc3a2904559a61f32e98091cef7fb796e06fa35b9bfefe4c0c5", size = 485565, upload-time = "2025-08-29T22:41:41.069Z" }, + { url = "https://files.pythonhosted.org/packages/5d/34/baf5963bec36ac250fa242f0f0e7670f013de5004db6caa31c872981df42/regex-2025.8.29-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:f21b416be10a8348a7313ba8c610569a1ab4bf8ec70731750540842a4551cd3d", size = 290073, upload-time = "2025-08-29T22:41:42.686Z" }, + { url = "https://files.pythonhosted.org/packages/24/29/c5c18143cd60b736d7ff8acece126118fe5649f45a7a8db18e308f5f813d/regex-2025.8.29-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:008947a7fa92f4cb3b28201c9aa7becc0a44c31a7c2fcb934356e1877baccc09", size = 286144, upload-time = "2025-08-29T22:41:44.364Z" }, + { url = "https://files.pythonhosted.org/packages/86/7c/0d90b687d2a33fe28b201f85ddfde6b378bf41677aedbe23eb7dc79385aa/regex-2025.8.29-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:e78ab1b3e68b890d7ebd69218cfbfe4a09dc00b8a47be8648510b81b932d55ff", size = 797417, upload-time = "2025-08-29T22:41:47.224Z" }, + { url = "https://files.pythonhosted.org/packages/fb/67/c391c899e5ef274c4dd4ede029ffb853ddf5ba77aa251be02cfe3810574c/regex-2025.8.29-cp312-cp312-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:a848368797515bc141d3fad5fd2d81bf9e8a6a22d9ac1a4be4690dd22e997854", size = 862630, upload-time = "2025-08-29T22:41:48.891Z" }, + { url = "https://files.pythonhosted.org/packages/08/20/ae749a68da3496a133836c8724649bd2e004fc176c7c6647d9cb269cc975/regex-2025.8.29-cp312-cp312-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:8eaf3ea6631f804efcf0f5bd0e4ab62ba984fd9b70e3aef44b05cc6b951cc728", size = 910837, upload-time = "2025-08-29T22:41:50.592Z" }, + { url = "https://files.pythonhosted.org/packages/e2/80/bc4244ec79fba4185fd3a29d79f77f79b3b0dc12ee426687501b0b077e2a/regex-2025.8.29-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:4561aeb36b0bf3bb44826e4b61a80c6ace0d8839bf4914d78f061f9ba61444b4", size = 801968, upload-time = "2025-08-29T22:41:54.239Z" }, + { url = "https://files.pythonhosted.org/packages/ef/bd/a2d75042bb1d3c9997e22bc0051cb9791a405589d6293c874f7c2ba487e7/regex-2025.8.29-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:93e077d1fbd24033fa427eab43d80ad47e449d25700cda78e8cac821a30090bf", size = 786626, upload-time = "2025-08-29T22:41:56.158Z" }, + { url = "https://files.pythonhosted.org/packages/24/ab/19cec75bf7d335cc7595d4857591455de118f6bfb563e6731c31f4fe33c3/regex-2025.8.29-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:d92379e53d782bdb773988687300e3bccb91ad38157b754b04b1857aaeea16a3", size = 856532, upload-time = "2025-08-29T22:41:58.057Z" }, + { url = "https://files.pythonhosted.org/packages/b6/3d/517cd0b0f4b8330164d03ef0eafdd61ee839f82b891fcd8c571d5c727117/regex-2025.8.29-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:d41726de2040c2a487bbac70fdd6e3ff2f1aa47dc91f0a29f6955a6dfa0f06b6", size = 848977, upload-time = "2025-08-29T22:42:00.346Z" }, + { url = "https://files.pythonhosted.org/packages/ae/fc/b57e2644d87d038d7302f359f4042bf7092bd8259a3ae999adf236e6fbc0/regex-2025.8.29-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:1915dfda52bd4d466f3a66b66988db1f647ee1d9c605858640ceeb779cffd908", size = 788112, upload-time = "2025-08-29T22:42:02.008Z" }, + { url = "https://files.pythonhosted.org/packages/a9/2f/70737feddbd33ec9f3f0cb8b38e7fc89304eccc80fd693d79a6f336e2282/regex-2025.8.29-cp312-cp312-win32.whl", hash = "sha256:e2ef0087ad6949918836f215480a9331f6c59ad54912a9a412f08ab1c9ccbc98", size = 264487, upload-time = "2025-08-29T22:42:04.401Z" }, + { url = "https://files.pythonhosted.org/packages/2f/f5/8832d05ecc5a7f80043e7521ea55adfa2d9b9ac0e646474153e7e13722c2/regex-2025.8.29-cp312-cp312-win_amd64.whl", hash = "sha256:c15d361fe9800bf38ef69c2e0c4b8b961ae4ce2f076fcf4f28e1fc9ea127f55a", size = 275455, upload-time = "2025-08-29T22:42:06.312Z" }, + { url = "https://files.pythonhosted.org/packages/a5/f9/f10ae0c4e5e22db75dda155d83056e2b70c4e87b04ad9838723ff5057e90/regex-2025.8.29-cp312-cp312-win_arm64.whl", hash = "sha256:305577fab545e64fb84d9a24269aa3132dbe05e1d7fa74b3614e93ec598fe6e6", size = 268558, upload-time = "2025-08-29T22:42:08.062Z" }, +] + +[[package]] +name = "requests" +version = "2.32.3" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "certifi" }, + { name = "charset-normalizer" }, + { name = "idna" }, + { name = "urllib3" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/63/70/2bf7780ad2d390a8d301ad0b550f1581eadbd9a20f896afe06353c2a2913/requests-2.32.3.tar.gz", hash = "sha256:55365417734eb18255590a9ff9eb97e9e1da868d4ccd6402399eaf68af20a760", size = 131218, upload-time = "2024-05-29T15:37:49.536Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/f9/9b/335f9764261e915ed497fcdeb11df5dfd6f7bf257d4a6a2a686d80da4d54/requests-2.32.3-py3-none-any.whl", hash = "sha256:70761cfe03c773ceb22aa2f671b4757976145175cdfca038c02654d061d6dcc6", size = 64928, upload-time = "2024-05-29T15:37:47.027Z" }, +] + +[[package]] +name = "requests-oauthlib" +version = "2.0.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "oauthlib" }, + { name = "requests" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/42/f2/05f29bc3913aea15eb670be136045bf5c5bbf4b99ecb839da9b422bb2c85/requests-oauthlib-2.0.0.tar.gz", hash = "sha256:b3dffaebd884d8cd778494369603a9e7b58d29111bf6b41bdc2dcd87203af4e9", size = 55650, upload-time = "2024-03-22T20:32:29.939Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/3b/5d/63d4ae3b9daea098d5d6f5da83984853c1bbacd5dc826764b249fe119d24/requests_oauthlib-2.0.0-py2.py3-none-any.whl", hash = "sha256:7dd8a5c40426b779b0868c404bdef9768deccf22749cde15852df527e6269b36", size = 24179, upload-time = "2024-03-22T20:32:28.055Z" }, +] + +[[package]] +name = "requests-toolbelt" +version = "1.0.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "requests" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/f3/61/d7545dafb7ac2230c70d38d31cbfe4cc64f7144dc41f6e4e4b78ecd9f5bb/requests-toolbelt-1.0.0.tar.gz", hash = "sha256:7681a0a3d047012b5bdc0ee37d7f8f07ebe76ab08caeccfc3921ce23c88d5bc6", size = 206888, upload-time = "2023-05-01T04:11:33.229Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/3f/51/d4db610ef29373b879047326cbf6fa98b6c1969d6f6dc423279de2b1be2c/requests_toolbelt-1.0.0-py2.py3-none-any.whl", hash = "sha256:cccfdd665f0a24fcf4726e690f65639d272bb0637b9b92dfd91a5568ccf6bd06", size = 54481, upload-time = "2023-05-01T04:11:28.427Z" }, +] + +[[package]] +name = "requirements-parser" +version = "0.13.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "packaging" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/95/96/fb6dbfebb524d5601d359a47c78fe7ba1eef90fc4096404aa60c9a906fbb/requirements_parser-0.13.0.tar.gz", hash = "sha256:0843119ca2cb2331de4eb31b10d70462e39ace698fd660a915c247d2301a4418", size = 22630, upload-time = "2025-05-21T13:42:05.464Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/bd/60/50fbb6ffb35f733654466f1a90d162bcbea358adc3b0871339254fbc37b2/requirements_parser-0.13.0-py3-none-any.whl", hash = "sha256:2b3173faecf19ec5501971b7222d38f04cb45bb9d87d0ad629ca71e2e62ded14", size = 14782, upload-time = "2025-05-21T13:42:04.007Z" }, +] + +[[package]] +name = "rlp" +version = "4.1.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "eth-utils" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/1b/2d/439b0728a92964a04d9c88ea1ca9ebb128893fbbd5834faa31f987f2fd4c/rlp-4.1.0.tar.gz", hash = "sha256:be07564270a96f3e225e2c107db263de96b5bc1f27722d2855bd3459a08e95a9", size = 33429, upload-time = "2025-02-04T22:05:59.089Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/99/fb/e4c0ced9893b84ac95b7181d69a9786ce5879aeb3bbbcbba80a164f85d6a/rlp-4.1.0-py3-none-any.whl", hash = "sha256:8eca394c579bad34ee0b937aecb96a57052ff3716e19c7a578883e767bc5da6f", size = 19973, upload-time = "2025-02-04T22:05:57.05Z" }, +] + +[[package]] +name = "rpds-py" +version = "0.27.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/e9/dd/2c0cbe774744272b0ae725f44032c77bdcab6e8bcf544bffa3b6e70c8dba/rpds_py-0.27.1.tar.gz", hash = "sha256:26a1c73171d10b7acccbded82bf6a586ab8203601e565badc74bbbf8bc5a10f8", size = 27479, upload-time = "2025-08-27T12:16:36.024Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/bd/fe/38de28dee5df58b8198c743fe2bea0c785c6d40941b9950bac4cdb71a014/rpds_py-0.27.1-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:ae2775c1973e3c30316892737b91f9283f9908e3cc7625b9331271eaaed7dc90", size = 361887, upload-time = "2025-08-27T12:13:10.233Z" }, + { url = "https://files.pythonhosted.org/packages/7c/9a/4b6c7eedc7dd90986bf0fab6ea2a091ec11c01b15f8ba0a14d3f80450468/rpds_py-0.27.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:2643400120f55c8a96f7c9d858f7be0c88d383cd4653ae2cf0d0c88f668073e5", size = 345795, upload-time = "2025-08-27T12:13:11.65Z" }, + { url = "https://files.pythonhosted.org/packages/6f/0e/e650e1b81922847a09cca820237b0edee69416a01268b7754d506ade11ad/rpds_py-0.27.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:16323f674c089b0360674a4abd28d5042947d54ba620f72514d69be4ff64845e", size = 385121, upload-time = "2025-08-27T12:13:13.008Z" }, + { url = "https://files.pythonhosted.org/packages/1b/ea/b306067a712988e2bff00dcc7c8f31d26c29b6d5931b461aa4b60a013e33/rpds_py-0.27.1-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:9a1f4814b65eacac94a00fc9a526e3fdafd78e439469644032032d0d63de4881", size = 398976, upload-time = "2025-08-27T12:13:14.368Z" }, + { url = "https://files.pythonhosted.org/packages/2c/0a/26dc43c8840cb8fe239fe12dbc8d8de40f2365e838f3d395835dde72f0e5/rpds_py-0.27.1-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:7ba32c16b064267b22f1850a34051121d423b6f7338a12b9459550eb2096e7ec", size = 525953, upload-time = "2025-08-27T12:13:15.774Z" }, + { url = "https://files.pythonhosted.org/packages/22/14/c85e8127b573aaf3a0cbd7fbb8c9c99e735a4a02180c84da2a463b766e9e/rpds_py-0.27.1-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:e5c20f33fd10485b80f65e800bbe5f6785af510b9f4056c5a3c612ebc83ba6cb", size = 407915, upload-time = "2025-08-27T12:13:17.379Z" }, + { url = "https://files.pythonhosted.org/packages/ed/7b/8f4fee9ba1fb5ec856eb22d725a4efa3deb47f769597c809e03578b0f9d9/rpds_py-0.27.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:466bfe65bd932da36ff279ddd92de56b042f2266d752719beb97b08526268ec5", size = 386883, upload-time = "2025-08-27T12:13:18.704Z" }, + { url = "https://files.pythonhosted.org/packages/86/47/28fa6d60f8b74fcdceba81b272f8d9836ac0340570f68f5df6b41838547b/rpds_py-0.27.1-cp312-cp312-manylinux_2_31_riscv64.whl", hash = "sha256:41e532bbdcb57c92ba3be62c42e9f096431b4cf478da9bc3bc6ce5c38ab7ba7a", size = 405699, upload-time = "2025-08-27T12:13:20.089Z" }, + { url = "https://files.pythonhosted.org/packages/d0/fd/c5987b5e054548df56953a21fe2ebed51fc1ec7c8f24fd41c067b68c4a0a/rpds_py-0.27.1-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:f149826d742b406579466283769a8ea448eed82a789af0ed17b0cd5770433444", size = 423713, upload-time = "2025-08-27T12:13:21.436Z" }, + { url = "https://files.pythonhosted.org/packages/ac/ba/3c4978b54a73ed19a7d74531be37a8bcc542d917c770e14d372b8daea186/rpds_py-0.27.1-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:80c60cfb5310677bd67cb1e85a1e8eb52e12529545441b43e6f14d90b878775a", size = 562324, upload-time = "2025-08-27T12:13:22.789Z" }, + { url = "https://files.pythonhosted.org/packages/b5/6c/6943a91768fec16db09a42b08644b960cff540c66aab89b74be6d4a144ba/rpds_py-0.27.1-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:7ee6521b9baf06085f62ba9c7a3e5becffbc32480d2f1b351559c001c38ce4c1", size = 593646, upload-time = "2025-08-27T12:13:24.122Z" }, + { url = "https://files.pythonhosted.org/packages/11/73/9d7a8f4be5f4396f011a6bb7a19fe26303a0dac9064462f5651ced2f572f/rpds_py-0.27.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:a512c8263249a9d68cac08b05dd59d2b3f2061d99b322813cbcc14c3c7421998", size = 558137, upload-time = "2025-08-27T12:13:25.557Z" }, + { url = "https://files.pythonhosted.org/packages/6e/96/6772cbfa0e2485bcceef8071de7821f81aeac8bb45fbfd5542a3e8108165/rpds_py-0.27.1-cp312-cp312-win32.whl", hash = "sha256:819064fa048ba01b6dadc5116f3ac48610435ac9a0058bbde98e569f9e785c39", size = 221343, upload-time = "2025-08-27T12:13:26.967Z" }, + { url = "https://files.pythonhosted.org/packages/67/b6/c82f0faa9af1c6a64669f73a17ee0eeef25aff30bb9a1c318509efe45d84/rpds_py-0.27.1-cp312-cp312-win_amd64.whl", hash = "sha256:d9199717881f13c32c4046a15f024971a3b78ad4ea029e8da6b86e5aa9cf4594", size = 232497, upload-time = "2025-08-27T12:13:28.326Z" }, + { url = "https://files.pythonhosted.org/packages/e1/96/2817b44bd2ed11aebacc9251da03689d56109b9aba5e311297b6902136e2/rpds_py-0.27.1-cp312-cp312-win_arm64.whl", hash = "sha256:33aa65b97826a0e885ef6e278fbd934e98cdcfed80b63946025f01e2f5b29502", size = 222790, upload-time = "2025-08-27T12:13:29.71Z" }, +] + +[[package]] +name = "ruff" +version = "0.11.13" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/ed/da/9c6f995903b4d9474b39da91d2d626659af3ff1eeb43e9ae7c119349dba6/ruff-0.11.13.tar.gz", hash = "sha256:26fa247dc68d1d4e72c179e08889a25ac0c7ba4d78aecfc835d49cbfd60bf514", size = 4282054, upload-time = "2025-06-05T21:00:15.721Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/7d/ce/a11d381192966e0b4290842cc8d4fac7dc9214ddf627c11c1afff87da29b/ruff-0.11.13-py3-none-linux_armv6l.whl", hash = "sha256:4bdfbf1240533f40042ec00c9e09a3aade6f8c10b6414cf11b519488d2635d46", size = 10292516, upload-time = "2025-06-05T20:59:32.944Z" }, + { url = "https://files.pythonhosted.org/packages/78/db/87c3b59b0d4e753e40b6a3b4a2642dfd1dcaefbff121ddc64d6c8b47ba00/ruff-0.11.13-py3-none-macosx_10_12_x86_64.whl", hash = "sha256:aef9c9ed1b5ca28bb15c7eac83b8670cf3b20b478195bd49c8d756ba0a36cf48", size = 11106083, upload-time = "2025-06-05T20:59:37.03Z" }, + { url = "https://files.pythonhosted.org/packages/77/79/d8cec175856ff810a19825d09ce700265f905c643c69f45d2b737e4a470a/ruff-0.11.13-py3-none-macosx_11_0_arm64.whl", hash = "sha256:53b15a9dfdce029c842e9a5aebc3855e9ab7771395979ff85b7c1dedb53ddc2b", size = 10436024, upload-time = "2025-06-05T20:59:39.741Z" }, + { url = "https://files.pythonhosted.org/packages/8b/5b/f6d94f2980fa1ee854b41568368a2e1252681b9238ab2895e133d303538f/ruff-0.11.13-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ab153241400789138d13f362c43f7edecc0edfffce2afa6a68434000ecd8f69a", size = 10646324, upload-time = "2025-06-05T20:59:42.185Z" }, + { url = "https://files.pythonhosted.org/packages/6c/9c/b4c2acf24ea4426016d511dfdc787f4ce1ceb835f3c5fbdbcb32b1c63bda/ruff-0.11.13-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:6c51f93029d54a910d3d24f7dd0bb909e31b6cd989a5e4ac513f4eb41629f0dc", size = 10174416, upload-time = "2025-06-05T20:59:44.319Z" }, + { url = "https://files.pythonhosted.org/packages/f3/10/e2e62f77c65ede8cd032c2ca39c41f48feabedb6e282bfd6073d81bb671d/ruff-0.11.13-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1808b3ed53e1a777c2ef733aca9051dc9bf7c99b26ece15cb59a0320fbdbd629", size = 11724197, upload-time = "2025-06-05T20:59:46.935Z" }, + { url = "https://files.pythonhosted.org/packages/bb/f0/466fe8469b85c561e081d798c45f8a1d21e0b4a5ef795a1d7f1a9a9ec182/ruff-0.11.13-py3-none-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:d28ce58b5ecf0f43c1b71edffabe6ed7f245d5336b17805803312ec9bc665933", size = 12511615, upload-time = "2025-06-05T20:59:49.534Z" }, + { url = "https://files.pythonhosted.org/packages/17/0e/cefe778b46dbd0cbcb03a839946c8f80a06f7968eb298aa4d1a4293f3448/ruff-0.11.13-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:55e4bc3a77842da33c16d55b32c6cac1ec5fb0fbec9c8c513bdce76c4f922165", size = 12117080, upload-time = "2025-06-05T20:59:51.654Z" }, + { url = "https://files.pythonhosted.org/packages/5d/2c/caaeda564cbe103bed145ea557cb86795b18651b0f6b3ff6a10e84e5a33f/ruff-0.11.13-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:633bf2c6f35678c56ec73189ba6fa19ff1c5e4807a78bf60ef487b9dd272cc71", size = 11326315, upload-time = "2025-06-05T20:59:54.469Z" }, + { url = "https://files.pythonhosted.org/packages/75/f0/782e7d681d660eda8c536962920c41309e6dd4ebcea9a2714ed5127d44bd/ruff-0.11.13-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4ffbc82d70424b275b089166310448051afdc6e914fdab90e08df66c43bb5ca9", size = 11555640, upload-time = "2025-06-05T20:59:56.986Z" }, + { url = "https://files.pythonhosted.org/packages/5d/d4/3d580c616316c7f07fb3c99dbecfe01fbaea7b6fd9a82b801e72e5de742a/ruff-0.11.13-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:4a9ddd3ec62a9a89578c85842b836e4ac832d4a2e0bfaad3b02243f930ceafcc", size = 10507364, upload-time = "2025-06-05T20:59:59.154Z" }, + { url = "https://files.pythonhosted.org/packages/5a/dc/195e6f17d7b3ea6b12dc4f3e9de575db7983db187c378d44606e5d503319/ruff-0.11.13-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:d237a496e0778d719efb05058c64d28b757c77824e04ffe8796c7436e26712b7", size = 10141462, upload-time = "2025-06-05T21:00:01.481Z" }, + { url = "https://files.pythonhosted.org/packages/f4/8e/39a094af6967faa57ecdeacb91bedfb232474ff8c3d20f16a5514e6b3534/ruff-0.11.13-py3-none-musllinux_1_2_i686.whl", hash = "sha256:26816a218ca6ef02142343fd24c70f7cd8c5aa6c203bca284407adf675984432", size = 11121028, upload-time = "2025-06-05T21:00:04.06Z" }, + { url = "https://files.pythonhosted.org/packages/5a/c0/b0b508193b0e8a1654ec683ebab18d309861f8bd64e3a2f9648b80d392cb/ruff-0.11.13-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:51c3f95abd9331dc5b87c47ac7f376db5616041173826dfd556cfe3d4977f492", size = 11602992, upload-time = "2025-06-05T21:00:06.249Z" }, + { url = "https://files.pythonhosted.org/packages/7c/91/263e33ab93ab09ca06ce4f8f8547a858cc198072f873ebc9be7466790bae/ruff-0.11.13-py3-none-win32.whl", hash = "sha256:96c27935418e4e8e77a26bb05962817f28b8ef3843a6c6cc49d8783b5507f250", size = 10474944, upload-time = "2025-06-05T21:00:08.459Z" }, + { url = "https://files.pythonhosted.org/packages/46/f4/7c27734ac2073aae8efb0119cae6931b6fb48017adf048fdf85c19337afc/ruff-0.11.13-py3-none-win_amd64.whl", hash = "sha256:29c3189895a8a6a657b7af4e97d330c8a3afd2c9c8f46c81e2fc5a31866517e3", size = 11548669, upload-time = "2025-06-05T21:00:11.147Z" }, + { url = "https://files.pythonhosted.org/packages/ec/bf/b273dd11673fed8a6bd46032c0ea2a04b2ac9bfa9c628756a5856ba113b0/ruff-0.11.13-py3-none-win_arm64.whl", hash = "sha256:b4385285e9179d608ff1d2fb9922062663c658605819a6876d8beef0c30b7f3b", size = 10683928, upload-time = "2025-06-05T21:00:13.758Z" }, +] + +[[package]] +name = "s3transfer" +version = "0.13.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "botocore" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/6d/05/d52bf1e65044b4e5e27d4e63e8d1579dbdec54fce685908ae09bc3720030/s3transfer-0.13.1.tar.gz", hash = "sha256:c3fdba22ba1bd367922f27ec8032d6a1cf5f10c934fb5d68cf60fd5a23d936cf", size = 150589, upload-time = "2025-07-18T19:22:42.31Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/6d/4f/d073e09df851cfa251ef7840007d04db3293a0482ce607d2b993926089be/s3transfer-0.13.1-py3-none-any.whl", hash = "sha256:a981aa7429be23fe6dfc13e80e4020057cbab622b08c0315288758d67cabc724", size = 85308, upload-time = "2025-07-18T19:22:40.947Z" }, +] + +[[package]] +name = "sentry-sdk" +version = "2.35.2" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "certifi" }, + { name = "urllib3" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/bd/79/0ecb942f3f1ad26c40c27f81ff82392d85c01d26a45e3c72c2b37807e680/sentry_sdk-2.35.2.tar.gz", hash = "sha256:e9e8f3c795044beb59f2c8f4c6b9b0f9779e5e604099882df05eec525e782cc6", size = 343377, upload-time = "2025-09-01T11:00:58.633Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/c0/91/a43308dc82a0e32d80cd0dfdcfca401ecbd0f431ab45f24e48bb97b7800d/sentry_sdk-2.35.2-py2.py3-none-any.whl", hash = "sha256:38c98e3cbb620dd3dd80a8d6e39c753d453dd41f8a9df581b0584c19a52bc926", size = 363975, upload-time = "2025-09-01T11:00:56.574Z" }, +] + +[package.optional-dependencies] +fastapi = [ + { name = "fastapi" }, +] + +[[package]] +name = "six" +version = "1.17.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/94/e7/b2c673351809dca68a0e064b6af791aa332cf192da575fd474ed7d6f16a2/six-1.17.0.tar.gz", hash = "sha256:ff70335d468e7eb6ec65b95b99d3a2836546063f63acc5171de367e834932a81", size = 34031, upload-time = "2024-12-04T17:35:28.174Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/b7/ce/149a00dd41f10bc29e5921b496af8b574d8413afcd5e30dfa0ed46c2cc5e/six-1.17.0-py2.py3-none-any.whl", hash = "sha256:4721f391ed90541fddacab5acf947aa0d3dc7d27b2e1e8eda2be8970586c3274", size = 11050, upload-time = "2024-12-04T17:35:26.475Z" }, +] + +[[package]] +name = "slack-sdk" +version = "3.36.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/73/1e/bbf7fdd00306f097ddb839c23628b7e271128cc8f584b9cae8f704b3924e/slack_sdk-3.36.0.tar.gz", hash = "sha256:8586022bdbdf9f8f8d32f394540436c53b1e7c8da9d21e1eab4560ba70cfcffa", size = 233382, upload-time = "2025-07-09T20:58:22.838Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/70/9a/380d20856d9ea39fbc4d3bb66f076b0d72035ebe873eb05fc88ebee4125f/slack_sdk-3.36.0-py2.py3-none-any.whl", hash = "sha256:6c96887d7175fc1b0b2777b73bb65f39b5b8bee9bd8acfec071d64014f9e2d10", size = 293949, upload-time = "2025-07-09T20:58:21.233Z" }, +] + +[[package]] +name = "sniffio" +version = "1.3.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/a2/87/a6771e1546d97e7e041b6ae58d80074f81b7d5121207425c964ddf5cfdbd/sniffio-1.3.1.tar.gz", hash = "sha256:f4324edc670a0f49750a81b895f35c3adb843cca46f0530f79fc1babb23789dc", size = 20372, upload-time = "2024-02-25T23:20:04.057Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/e9/44/75a9c9421471a6c4805dbf2356f7c181a29c1879239abab1ea2cc8f38b40/sniffio-1.3.1-py3-none-any.whl", hash = "sha256:2f6da418d1f1e0fddd844478f41680e794e6051915791a034ff65e5f100525a2", size = 10235, upload-time = "2024-02-25T23:20:01.196Z" }, +] + +[[package]] +name = "solana" +version = "0.36.9" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "construct-typing" }, + { name = "httpx" }, + { name = "solders" }, + { name = "typing-extensions" }, + { name = "websockets" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/8c/e0/ce762b6763e3a0f8a5ccecbf695d65ef54b6f874ad5f58ce5cdcaba224f1/solana-0.36.9.tar.gz", hash = "sha256:f702f6177337c67a982909ef54ef3abce5e795b8cd93edb045bedfa4d13c20c5", size = 52722, upload-time = "2025-08-09T16:23:25.307Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/ac/11/d5e5d02200ca85b615da39078806b377156b67b2093c8bc08a1b9c293070/solana-0.36.9-py3-none-any.whl", hash = "sha256:e05824f91f95abe5a687914976e8bc78986386156f2106108c696db998c3c542", size = 62882, upload-time = "2025-08-09T16:23:24.149Z" }, +] + +[[package]] +name = "solders" +version = "0.26.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "jsonalias" }, + { name = "typing-extensions" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/87/96/23ad2e43e2676b78834064fe051e3db3ce1899336ecd4797f92fcd06113a/solders-0.26.0.tar.gz", hash = "sha256:057533892d6fa432c1ce1e2f5e3428802964666c10b57d3d1bcaab86295f046c", size = 181123, upload-time = "2025-02-18T19:23:57.734Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/a5/ce/58bbb4d2c696e770cdd37e5f6dc2891ef7610c0c085bf400f9c42dcff1ad/solders-0.26.0-cp37-abi3-macosx_10_12_x86_64.macosx_11_0_arm64.macosx_10_12_universal2.whl", hash = "sha256:9c1a0ef5daa1a05934af5fb6e7e32eab7c42cede406c80067fee006f461ffc4a", size = 24344472, upload-time = "2025-02-18T19:23:30.273Z" }, + { url = "https://files.pythonhosted.org/packages/5a/35/221cec0e5900c2202833e7e9110c3405a2d96ed25e110b247f88b8782e29/solders-0.26.0-cp37-abi3-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:1b964efbd7c0b38aef3bf4293ea5938517ae649b9a23e7cd147d889931775aab", size = 6674734, upload-time = "2025-02-18T19:23:35.15Z" }, + { url = "https://files.pythonhosted.org/packages/41/33/d17b7dbc92672351d59fc65cdb93b8924fc682deba09f6d96f25440187ae/solders-0.26.0-cp37-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:36e6a769c5298b887b7588edb171d93709a89302aef75913fe893d11c653739d", size = 13472961, upload-time = "2025-02-18T19:23:38.582Z" }, + { url = "https://files.pythonhosted.org/packages/bb/e7/533367d815ab000587ccc37d89e154132f63347f02dcaaac5df72bd851de/solders-0.26.0-cp37-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:b3cc55b971ec6ed1b4466fa7e7e09eee9baba492b8cd9e3204e3e1a0c5a0c4aa", size = 6886198, upload-time = "2025-02-18T19:23:41.453Z" }, + { url = "https://files.pythonhosted.org/packages/52/e0/ab41ab3df5fdf3b0e55613be93a43c2fe58b15a6ea8ceca26d3fba02e3c6/solders-0.26.0-cp37-abi3-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:3e3973074c17265921c70246a17bcf80972c5b96a3e1ed7f5049101f11865092", size = 7319170, upload-time = "2025-02-18T19:23:43.758Z" }, + { url = "https://files.pythonhosted.org/packages/7d/34/5174ce592607e0ac020aff203217f2f113a55eec49af3db12945fea42d89/solders-0.26.0-cp37-abi3-musllinux_1_2_i686.whl", hash = "sha256:59b52419452602f697e659199a25acacda8365971c376ef3c0687aecdd929e07", size = 7134977, upload-time = "2025-02-18T19:23:46.157Z" }, + { url = "https://files.pythonhosted.org/packages/ba/5e/822faabda0d473c29bdf59fe8869a411fd436af8ca6f5d6e89f7513f682f/solders-0.26.0-cp37-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:5946ec3f2a340afa9ce5c2b8ab628ae1dea2ad2235551b1297cafdd7e3e5c51a", size = 6984222, upload-time = "2025-02-18T19:23:49.429Z" }, + { url = "https://files.pythonhosted.org/packages/23/e8/dc992f677762ea2de44b7768120d95887ef39fab10d6f29fb53e6a9882c1/solders-0.26.0-cp37-abi3-win_amd64.whl", hash = "sha256:5466616610170aab08c627ae01724e425bcf90085bc574da682e9f3bd954900b", size = 5480492, upload-time = "2025-02-18T19:23:53.285Z" }, +] + +[[package]] +name = "soupsieve" +version = "2.8" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/6d/e6/21ccce3262dd4889aa3332e5a119a3491a95e8f60939870a3a035aabac0d/soupsieve-2.8.tar.gz", hash = "sha256:e2dd4a40a628cb5f28f6d4b0db8800b8f581b65bb380b97de22ba5ca8d72572f", size = 103472, upload-time = "2025-08-27T15:39:51.78Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/14/a0/bb38d3b76b8cae341dad93a2dd83ab7462e6dbcdd84d43f54ee60a8dc167/soupsieve-2.8-py3-none-any.whl", hash = "sha256:0cc76456a30e20f5d7f2e14a98a4ae2ee4e5abdc7c5ea0aafe795f344bc7984c", size = 36679, upload-time = "2025-08-27T15:39:50.179Z" }, +] + +[[package]] +name = "sqlalchemy" +version = "2.0.43" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "greenlet", marker = "platform_machine == 'AMD64' or platform_machine == 'WIN32' or platform_machine == 'aarch64' or platform_machine == 'amd64' or platform_machine == 'ppc64le' or platform_machine == 'win32' or platform_machine == 'x86_64'" }, + { name = "typing-extensions" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/d7/bc/d59b5d97d27229b0e009bd9098cd81af71c2fa5549c580a0a67b9bed0496/sqlalchemy-2.0.43.tar.gz", hash = "sha256:788bfcef6787a7764169cfe9859fe425bf44559619e1d9f56f5bddf2ebf6f417", size = 9762949, upload-time = "2025-08-11T14:24:58.438Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/61/db/20c78f1081446095450bdc6ee6cc10045fce67a8e003a5876b6eaafc5cc4/sqlalchemy-2.0.43-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:20d81fc2736509d7a2bd33292e489b056cbae543661bb7de7ce9f1c0cd6e7f24", size = 2134891, upload-time = "2025-08-11T15:51:13.019Z" }, + { url = "https://files.pythonhosted.org/packages/45/0a/3d89034ae62b200b4396f0f95319f7d86e9945ee64d2343dcad857150fa2/sqlalchemy-2.0.43-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:25b9fc27650ff5a2c9d490c13c14906b918b0de1f8fcbb4c992712d8caf40e83", size = 2123061, upload-time = "2025-08-11T15:51:14.319Z" }, + { url = "https://files.pythonhosted.org/packages/cb/10/2711f7ff1805919221ad5bee205971254845c069ee2e7036847103ca1e4c/sqlalchemy-2.0.43-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6772e3ca8a43a65a37c88e2f3e2adfd511b0b1da37ef11ed78dea16aeae85bd9", size = 3320384, upload-time = "2025-08-11T15:52:35.088Z" }, + { url = "https://files.pythonhosted.org/packages/6e/0e/3d155e264d2ed2778484006ef04647bc63f55b3e2d12e6a4f787747b5900/sqlalchemy-2.0.43-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1a113da919c25f7f641ffbd07fbc9077abd4b3b75097c888ab818f962707eb48", size = 3329648, upload-time = "2025-08-11T15:56:34.153Z" }, + { url = "https://files.pythonhosted.org/packages/5b/81/635100fb19725c931622c673900da5efb1595c96ff5b441e07e3dd61f2be/sqlalchemy-2.0.43-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:4286a1139f14b7d70141c67a8ae1582fc2b69105f1b09d9573494eb4bb4b2687", size = 3258030, upload-time = "2025-08-11T15:52:36.933Z" }, + { url = "https://files.pythonhosted.org/packages/0c/ed/a99302716d62b4965fded12520c1cbb189f99b17a6d8cf77611d21442e47/sqlalchemy-2.0.43-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:529064085be2f4d8a6e5fab12d36ad44f1909a18848fcfbdb59cc6d4bbe48efe", size = 3294469, upload-time = "2025-08-11T15:56:35.553Z" }, + { url = "https://files.pythonhosted.org/packages/5d/a2/3a11b06715149bf3310b55a98b5c1e84a42cfb949a7b800bc75cb4e33abc/sqlalchemy-2.0.43-cp312-cp312-win32.whl", hash = "sha256:b535d35dea8bbb8195e7e2b40059e2253acb2b7579b73c1b432a35363694641d", size = 2098906, upload-time = "2025-08-11T15:55:00.645Z" }, + { url = "https://files.pythonhosted.org/packages/bc/09/405c915a974814b90aa591280623adc6ad6b322f61fd5cff80aeaef216c9/sqlalchemy-2.0.43-cp312-cp312-win_amd64.whl", hash = "sha256:1c6d85327ca688dbae7e2b06d7d84cfe4f3fffa5b5f9e21bb6ce9d0e1a0e0e0a", size = 2126260, upload-time = "2025-08-11T15:55:02.965Z" }, + { url = "https://files.pythonhosted.org/packages/b8/d9/13bdde6521f322861fab67473cec4b1cc8999f3871953531cf61945fad92/sqlalchemy-2.0.43-py3-none-any.whl", hash = "sha256:1681c21dd2ccee222c2fe0bef671d1aef7c504087c9c4e800371cfcc8ac966fc", size = 1924759, upload-time = "2025-08-11T15:39:53.024Z" }, +] + +[package.optional-dependencies] +asyncio = [ + { name = "greenlet" }, +] + +[[package]] +name = "sse-starlette" +version = "3.0.2" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "anyio" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/42/6f/22ed6e33f8a9e76ca0a412405f31abb844b779d52c5f96660766edcd737c/sse_starlette-3.0.2.tar.gz", hash = "sha256:ccd60b5765ebb3584d0de2d7a6e4f745672581de4f5005ab31c3a25d10b52b3a", size = 20985, upload-time = "2025-07-27T09:07:44.565Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/ef/10/c78f463b4ef22eef8491f218f692be838282cd65480f6e423d7730dfd1fb/sse_starlette-3.0.2-py3-none-any.whl", hash = "sha256:16b7cbfddbcd4eaca11f7b586f3b8a080f1afe952c15813455b162edea619e5a", size = 11297, upload-time = "2025-07-27T09:07:43.268Z" }, +] + +[[package]] +name = "starlette" +version = "0.47.3" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "anyio" }, + { name = "typing-extensions" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/15/b9/cc3017f9a9c9b6e27c5106cc10cc7904653c3eec0729793aec10479dd669/starlette-0.47.3.tar.gz", hash = "sha256:6bc94f839cc176c4858894f1f8908f0ab79dfec1a6b8402f6da9be26ebea52e9", size = 2584144, upload-time = "2025-08-24T13:36:42.122Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/ce/fd/901cfa59aaa5b30a99e16876f11abe38b59a1a2c51ffb3d7142bb6089069/starlette-0.47.3-py3-none-any.whl", hash = "sha256:89c0778ca62a76b826101e7c709e70680a1699ca7da6b44d38eb0a7e61fe4b51", size = 72991, upload-time = "2025-08-24T13:36:40.887Z" }, +] + +[[package]] +name = "storage3" +version = "0.12.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "deprecation" }, + { name = "httpx", extra = ["http2"] }, + { name = "python-dateutil" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/b9/e2/280fe75f65e7a3ca680b7843acfc572a63aa41230e3d3c54c66568809c85/storage3-0.12.1.tar.gz", hash = "sha256:32ea8f5eb2f7185c2114a4f6ae66d577722e32503f0a30b56e7ed5c7f13e6b48", size = 10198, upload-time = "2025-08-05T18:09:11.989Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/7f/3b/c5f8709fc5349928e591fee47592eeff78d29a7d75b097f96a4e01de028d/storage3-0.12.1-py3-none-any.whl", hash = "sha256:9da77fd4f406b019fdcba201e9916aefbf615ef87f551253ce427d8136459a34", size = 18420, upload-time = "2025-08-05T18:09:10.365Z" }, +] + +[[package]] +name = "strenum" +version = "0.4.15" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/85/ad/430fb60d90e1d112a62ff57bdd1f286ec73a2a0331272febfddd21f330e1/StrEnum-0.4.15.tar.gz", hash = "sha256:878fb5ab705442070e4dd1929bb5e2249511c0bcf2b0eeacf3bcd80875c82eff", size = 23384, upload-time = "2023-06-29T22:02:58.399Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/81/69/297302c5f5f59c862faa31e6cb9a4cd74721cd1e052b38e464c5b402df8b/StrEnum-0.4.15-py3-none-any.whl", hash = "sha256:a30cda4af7cc6b5bf52c8055bc4bf4b2b6b14a93b574626da33df53cf7740659", size = 8851, upload-time = "2023-06-29T22:02:56.947Z" }, +] + +[[package]] +name = "supabase" +version = "2.16.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "gotrue" }, + { name = "httpx" }, + { name = "postgrest" }, + { name = "realtime" }, + { name = "storage3" }, + { name = "supafunc" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/c6/93/335b91e8d09a95a337f051f84e85495f7732400f10c1bcb698a7571f8f1c/supabase-2.16.0.tar.gz", hash = "sha256:98f3810158012d4ec0e3083f2e5515f5e10b32bd71e7d458662140e963c1d164", size = 14595, upload-time = "2025-06-23T16:09:29.504Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/7e/75/2ab71e6605d20a740ff041c6176a328cfaa3fcee0dd0db885e081d98df06/supabase-2.16.0-py3-none-any.whl", hash = "sha256:99065caab3d90a56650bf39fbd0e49740995da3738ab28706c61bd7f2401db55", size = 17713, upload-time = "2025-06-23T16:09:28.299Z" }, +] + +[[package]] +name = "supafunc" +version = "0.10.2" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "httpx", extra = ["http2"] }, + { name = "strenum" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/a0/a9/cd7c89972d8638f3b658126b2f580fe13bcd7235f8abfbdd9da70ebb2933/supafunc-0.10.2.tar.gz", hash = "sha256:45e4d500854167c261515c43f7a363320e0a928118182fe8932adefddeddb545", size = 5033, upload-time = "2025-08-08T15:58:28.626Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/eb/d3/784314aa18185f97c4b998a0384c7b3c021637a93cef20247f15772f0c84/supafunc-0.10.2-py3-none-any.whl", hash = "sha256:547a2c115b15319c78fc84460f19cb5ea6e72597f7573a3498f4db087787e0fd", size = 8444, upload-time = "2025-08-08T15:58:27.154Z" }, +] + +[[package]] +name = "telegramify-markdown" +version = "0.5.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "mistletoe" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/c5/a7/2350b24d8939d4f56f31f09dbcf0ef13c4c3cab9c4d91b01d933447d3170/telegramify_markdown-0.5.1.tar.gz", hash = "sha256:a2c4ca337219607dce13883bdc30bf4faf07bf37e3f748594e8b10e1b5708fae", size = 35700, upload-time = "2025-04-08T07:56:03.35Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/80/14/859fb616d12ebc472c5cb6ff65c0021e8f81c3e614d3ada2b3ce60d3f655/telegramify_markdown-0.5.1-py3-none-any.whl", hash = "sha256:a757b9a0f0d681ba7e29638deabc8decc634626bee359d95effaa505a86b756d", size = 32062, upload-time = "2025-04-08T07:56:02.096Z" }, +] + +[[package]] +name = "tenacity" +version = "9.1.2" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/0a/d4/2b0cd0fe285e14b36db076e78c93766ff1d529d70408bd1d2a5a84f1d929/tenacity-9.1.2.tar.gz", hash = "sha256:1169d376c297e7de388d18b4481760d478b0e99a777cad3a9c86e556f4b697cb", size = 48036, upload-time = "2025-04-02T08:25:09.966Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/e5/30/643397144bfbfec6f6ef821f36f33e57d35946c44a2352d3c9f0ae847619/tenacity-9.1.2-py3-none-any.whl", hash = "sha256:f77bf36710d8b73a50b2dd155c97b870017ad21afe6ab300326b0371b3b05138", size = 28248, upload-time = "2025-04-02T08:25:07.678Z" }, +] + +[[package]] +name = "tiktoken" +version = "0.11.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "regex" }, + { name = "requests" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/a7/86/ad0155a37c4f310935d5ac0b1ccf9bdb635dcb906e0a9a26b616dd55825a/tiktoken-0.11.0.tar.gz", hash = "sha256:3c518641aee1c52247c2b97e74d8d07d780092af79d5911a6ab5e79359d9b06a", size = 37648, upload-time = "2025-08-08T23:58:08.495Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/e7/9e/eceddeffc169fc75fe0fd4f38471309f11cb1906f9b8aa39be4f5817df65/tiktoken-0.11.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:fd9e6b23e860973cf9526544e220b223c60badf5b62e80a33509d6d40e6c8f5d", size = 1055199, upload-time = "2025-08-08T23:57:45.076Z" }, + { url = "https://files.pythonhosted.org/packages/4f/cf/5f02bfefffdc6b54e5094d2897bc80efd43050e5b09b576fd85936ee54bf/tiktoken-0.11.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:6a76d53cee2da71ee2731c9caa747398762bda19d7f92665e882fef229cb0b5b", size = 996655, upload-time = "2025-08-08T23:57:46.304Z" }, + { url = "https://files.pythonhosted.org/packages/65/8e/c769b45ef379bc360c9978c4f6914c79fd432400a6733a8afc7ed7b0726a/tiktoken-0.11.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6ef72aab3ea240646e642413cb363b73869fed4e604dcfd69eec63dc54d603e8", size = 1128867, upload-time = "2025-08-08T23:57:47.438Z" }, + { url = "https://files.pythonhosted.org/packages/d5/2d/4d77f6feb9292bfdd23d5813e442b3bba883f42d0ac78ef5fdc56873f756/tiktoken-0.11.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7f929255c705efec7a28bf515e29dc74220b2f07544a8c81b8d69e8efc4578bd", size = 1183308, upload-time = "2025-08-08T23:57:48.566Z" }, + { url = "https://files.pythonhosted.org/packages/7a/65/7ff0a65d3bb0fc5a1fb6cc71b03e0f6e71a68c5eea230d1ff1ba3fd6df49/tiktoken-0.11.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:61f1d15822e4404953d499fd1dcc62817a12ae9fb1e4898033ec8fe3915fdf8e", size = 1244301, upload-time = "2025-08-08T23:57:49.642Z" }, + { url = "https://files.pythonhosted.org/packages/f5/6e/5b71578799b72e5bdcef206a214c3ce860d999d579a3b56e74a6c8989ee2/tiktoken-0.11.0-cp312-cp312-win_amd64.whl", hash = "sha256:45927a71ab6643dfd3ef57d515a5db3d199137adf551f66453be098502838b0f", size = 884282, upload-time = "2025-08-08T23:57:50.759Z" }, +] + +[[package]] +name = "toolz" +version = "1.0.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/8a/0b/d80dfa675bf592f636d1ea0b835eab4ec8df6e9415d8cfd766df54456123/toolz-1.0.0.tar.gz", hash = "sha256:2c86e3d9a04798ac556793bced838816296a2f085017664e4995cb40a1047a02", size = 66790, upload-time = "2024-10-04T16:17:04.001Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/03/98/eb27cc78ad3af8e302c9d8ff4977f5026676e130d28dd7578132a457170c/toolz-1.0.0-py3-none-any.whl", hash = "sha256:292c8f1c4e7516bf9086f8850935c799a874039c8bcf959d47b600e4c44a6236", size = 56383, upload-time = "2024-10-04T16:17:01.533Z" }, +] + +[[package]] +name = "tox" +version = "4.23.2" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "cachetools" }, + { name = "chardet" }, + { name = "colorama" }, + { name = "filelock" }, + { name = "packaging" }, + { name = "platformdirs" }, + { name = "pluggy" }, + { name = "pyproject-api" }, + { name = "virtualenv" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/1f/86/32b10f91b4b975a37ac402b0f9fa016775088e0565c93602ba0b3c729ce8/tox-4.23.2.tar.gz", hash = "sha256:86075e00e555df6e82e74cfc333917f91ecb47ffbc868dcafbd2672e332f4a2c", size = 189998, upload-time = "2024-10-22T14:29:04.46Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/af/c0/124b73d01c120e917383bc6c53ebc34efdf7243faa9fca64d105c94cf2ab/tox-4.23.2-py3-none-any.whl", hash = "sha256:452bc32bb031f2282881a2118923176445bac783ab97c874b8770ab4c3b76c38", size = 166758, upload-time = "2024-10-22T14:29:02.087Z" }, +] + +[[package]] +name = "tqdm" +version = "4.67.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "colorama", marker = "sys_platform == 'win32'" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/a8/4b/29b4ef32e036bb34e4ab51796dd745cdba7ed47ad142a9f4a1eb8e0c744d/tqdm-4.67.1.tar.gz", hash = "sha256:f8aef9c52c08c13a65f30ea34f4e5aac3fd1a34959879d7e59e63027286627f2", size = 169737, upload-time = "2024-11-24T20:12:22.481Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/d0/30/dc54f88dd4a2b5dc8a0279bdd7270e735851848b762aeb1c1184ed1f6b14/tqdm-4.67.1-py3-none-any.whl", hash = "sha256:26445eca388f82e72884e0d580d5464cd801a3ea01e63e5601bdff9ba6a48de2", size = 78540, upload-time = "2024-11-24T20:12:19.698Z" }, +] + +[[package]] +name = "trustcall" +version = "0.0.39" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "dydantic" }, + { name = "jsonpatch" }, + { name = "langgraph" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/2b/72/4cdb54a31952827e8b58e11ea286bbfe2d3aa0ffb77a2f87dbc1c7ea77d3/trustcall-0.0.39.tar.gz", hash = "sha256:ec315818224501b9537ce6b7618dbc21be41210c6e8f2e239169a5a00912cd6e", size = 38637, upload-time = "2025-04-14T22:02:50.857Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/2c/3a/58de925a104ce554fc250b833fe76401c7822aa8d65f2002cb53195e6c64/trustcall-0.0.39-py3-none-any.whl", hash = "sha256:d7da42e0bba816c0539b2936dfed90ffb3ea8d789e548e73865d416f8ac4ee64", size = 30073, upload-time = "2025-04-14T22:02:49.402Z" }, +] + +[[package]] +name = "tweepy" +version = "4.16.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "oauthlib" }, + { name = "requests" }, + { name = "requests-oauthlib" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/6e/45/a73bb812b1817247d3f79b3b9a4784ab93a081853b697e87428caa8c287b/tweepy-4.16.0.tar.gz", hash = "sha256:1d95cbdc50bf6353a387f881f2584eaf60d14e00dbbdd8872a73de79c66878e3", size = 87646, upload-time = "2025-06-22T01:17:51.34Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/ee/7c/3375cd1fbefcb8ead580fe324b1b6dcdc21aabf51562ee6def7266fcf363/tweepy-4.16.0-py3-none-any.whl", hash = "sha256:48d1a1eb311d2c4b8990abcfa6f9fa2b2ad61be05c723b1a9b4f242656badae2", size = 98843, upload-time = "2025-06-22T01:17:49.823Z" }, +] + +[package.optional-dependencies] +async = [ + { name = "aiohttp" }, + { name = "async-lru" }, +] + +[[package]] +name = "types-requests" +version = "2.32.4.20250809" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "urllib3" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/ed/b0/9355adb86ec84d057fea765e4c49cce592aaf3d5117ce5609a95a7fc3dac/types_requests-2.32.4.20250809.tar.gz", hash = "sha256:d8060de1c8ee599311f56ff58010fb4902f462a1470802cf9f6ed27bc46c4df3", size = 23027, upload-time = "2025-08-09T03:17:10.664Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/2b/6f/ec0012be842b1d888d46884ac5558fd62aeae1f0ec4f7a581433d890d4b5/types_requests-2.32.4.20250809-py3-none-any.whl", hash = "sha256:f73d1832fb519ece02c85b1f09d5f0dd3108938e7d47e7f94bbfa18a6782b163", size = 20644, upload-time = "2025-08-09T03:17:09.716Z" }, +] + +[[package]] +name = "typing-extensions" +version = "4.12.2" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/df/db/f35a00659bc03fec321ba8bce9420de607a1d37f8342eee1863174c69557/typing_extensions-4.12.2.tar.gz", hash = "sha256:1a7ead55c7e559dd4dee8856e3a88b41225abfe1ce8df57b7c13915fe121ffb8", size = 85321, upload-time = "2024-06-07T18:52:15.995Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/26/9f/ad63fc0248c5379346306f8668cda6e2e2e9c95e01216d2b8ffd9ff037d0/typing_extensions-4.12.2-py3-none-any.whl", hash = "sha256:04e5ca0351e0f3f85c6853954072df659d0d13fac324d0072316b67d7794700d", size = 37438, upload-time = "2024-06-07T18:52:13.582Z" }, +] + +[[package]] +name = "typing-inspect" +version = "0.9.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "mypy-extensions" }, + { name = "typing-extensions" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/dc/74/1789779d91f1961fa9438e9a8710cdae6bd138c80d7303996933d117264a/typing_inspect-0.9.0.tar.gz", hash = "sha256:b23fc42ff6f6ef6954e4852c1fb512cdd18dbea03134f91f856a95ccc9461f78", size = 13825, upload-time = "2023-05-24T20:25:47.612Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/65/f3/107a22063bf27bdccf2024833d3445f4eea42b2e598abfbd46f6a63b6cb0/typing_inspect-0.9.0-py3-none-any.whl", hash = "sha256:9ee6fc59062311ef8547596ab6b955e1b8aa46242d854bfc78f4f6b0eff35f9f", size = 8827, upload-time = "2023-05-24T20:25:45.287Z" }, +] + +[[package]] +name = "typing-inspection" +version = "0.4.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "typing-extensions" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/f8/b1/0c11f5058406b3af7609f121aaa6b609744687f1d158b3c3a5bf4cc94238/typing_inspection-0.4.1.tar.gz", hash = "sha256:6ae134cc0203c33377d43188d4064e9b357dba58cff3185f22924610e70a9d28", size = 75726, upload-time = "2025-05-21T18:55:23.885Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/17/69/cd203477f944c353c31bade965f880aa1061fd6bf05ded0726ca845b6ff7/typing_inspection-0.4.1-py3-none-any.whl", hash = "sha256:389055682238f53b04f7badcb49b989835495a96700ced5dab2d8feae4b26f51", size = 14552, upload-time = "2025-05-21T18:55:22.152Z" }, +] + +[[package]] +name = "tzdata" +version = "2025.2" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/95/32/1a225d6164441be760d75c2c42e2780dc0873fe382da3e98a2e1e48361e5/tzdata-2025.2.tar.gz", hash = "sha256:b60a638fcc0daffadf82fe0f57e53d06bdec2f36c4df66280ae79bce6bd6f2b9", size = 196380, upload-time = "2025-03-23T13:54:43.652Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/5c/23/c7abc0ca0a1526a0774eca151daeb8de62ec457e77262b66b359c3c7679e/tzdata-2025.2-py2.py3-none-any.whl", hash = "sha256:1a403fada01ff9221ca8044d701868fa132215d84beb92242d9acd2147f667a8", size = 347839, upload-time = "2025-03-23T13:54:41.845Z" }, +] + +[[package]] +name = "tzlocal" +version = "5.3.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "tzdata", marker = "sys_platform == 'win32'" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/8b/2e/c14812d3d4d9cd1773c6be938f89e5735a1f11a9f184ac3639b93cef35d5/tzlocal-5.3.1.tar.gz", hash = "sha256:cceffc7edecefea1f595541dbd6e990cb1ea3d19bf01b2809f362a03dd7921fd", size = 30761, upload-time = "2025-03-05T21:17:41.549Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/c2/14/e2a54fabd4f08cd7af1c07030603c3356b74da07f7cc056e600436edfa17/tzlocal-5.3.1-py3-none-any.whl", hash = "sha256:eb1a66c3ef5847adf7a834f1be0800581b683b5608e74f86ecbcef8ab91bb85d", size = 18026, upload-time = "2025-03-05T21:17:39.857Z" }, +] + +[[package]] +name = "urllib3" +version = "2.3.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/aa/63/e53da845320b757bf29ef6a9062f5c669fe997973f966045cb019c3f4b66/urllib3-2.3.0.tar.gz", hash = "sha256:f8c5449b3cf0861679ce7e0503c7b44b5ec981bec0d1d3795a07f1ba96f0204d", size = 307268, upload-time = "2024-12-22T07:47:30.032Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/c8/19/4ec628951a74043532ca2cf5d97b7b14863931476d117c471e8e2b1eb39f/urllib3-2.3.0-py3-none-any.whl", hash = "sha256:1cee9ad369867bfdbbb48b7dd50374c0967a0bb7710050facf0dd6911440e3df", size = 128369, upload-time = "2024-12-22T07:47:28.074Z" }, +] + +[[package]] +name = "uvicorn" +version = "0.35.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "click" }, + { name = "h11" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/5e/42/e0e305207bb88c6b8d3061399c6a961ffe5fbb7e2aa63c9234df7259e9cd/uvicorn-0.35.0.tar.gz", hash = "sha256:bc662f087f7cf2ce11a1d7fd70b90c9f98ef2e2831556dd078d131b96cc94a01", size = 78473, upload-time = "2025-06-28T16:15:46.058Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/d2/e2/dc81b1bd1dcfe91735810265e9d26bc8ec5da45b4c0f6237e286819194c3/uvicorn-0.35.0-py3-none-any.whl", hash = "sha256:197535216b25ff9b785e29a0b79199f55222193d47f820816e7da751e9bc8d4a", size = 66406, upload-time = "2025-06-28T16:15:44.816Z" }, +] + +[[package]] +name = "virtualenv" +version = "20.28.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "distlib" }, + { name = "filelock" }, + { name = "platformdirs" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/50/39/689abee4adc85aad2af8174bb195a819d0be064bf55fcc73b49d2b28ae77/virtualenv-20.28.1.tar.gz", hash = "sha256:5d34ab240fdb5d21549b76f9e8ff3af28252f5499fb6d6f031adac4e5a8c5329", size = 7650532, upload-time = "2025-01-03T01:56:53.613Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/51/8f/dfb257ca6b4e27cb990f1631142361e4712badab8e3ca8dc134d96111515/virtualenv-20.28.1-py3-none-any.whl", hash = "sha256:412773c85d4dab0409b83ec36f7a6499e72eaf08c80e81e9576bca61831c71cb", size = 4276719, upload-time = "2025-01-03T01:56:50.498Z" }, +] + +[[package]] +name = "web3" +version = "7.10.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "aiohttp" }, + { name = "eth-abi" }, + { name = "eth-account" }, + { name = "eth-hash", extra = ["pycryptodome"] }, + { name = "eth-typing" }, + { name = "eth-utils" }, + { name = "hexbytes" }, + { name = "pydantic" }, + { name = "pyunormalize" }, + { name = "pywin32", marker = "sys_platform == 'win32'" }, + { name = "requests" }, + { name = "types-requests" }, + { name = "typing-extensions" }, + { name = "websockets" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/6d/19/c1e213dd87ead2ace55ff1dd179df6050bcf5d9006440c9153969c7d6863/web3-7.10.0.tar.gz", hash = "sha256:0cace05ea14f800a4497649ecd99332ca4e85c8a90ea577e05ae909cb08902b9", size = 2193725, upload-time = "2025-03-27T17:02:27.919Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/4c/c5/a8e25e3ff51c7cd6d2bdecf75da2afb2923b29eba28e5dfe4fde72ad2322/web3-7.10.0-py3-none-any.whl", hash = "sha256:06fcab920554450e9f7d108da5e6b9d29c0d1a981a59a5551cc82d2cb2233b34", size = 1365880, upload-time = "2025-03-27T17:02:25.04Z" }, +] + +[[package]] +name = "websockets" +version = "14.2" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/94/54/8359678c726243d19fae38ca14a334e740782336c9f19700858c4eb64a1e/websockets-14.2.tar.gz", hash = "sha256:5059ed9c54945efb321f097084b4c7e52c246f2c869815876a69d1efc4ad6eb5", size = 164394, upload-time = "2025-01-19T21:00:56.431Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/c1/81/04f7a397653dc8bec94ddc071f34833e8b99b13ef1a3804c149d59f92c18/websockets-14.2-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:1f20522e624d7ffbdbe259c6b6a65d73c895045f76a93719aa10cd93b3de100c", size = 163096, upload-time = "2025-01-19T20:59:29.763Z" }, + { url = "https://files.pythonhosted.org/packages/ec/c5/de30e88557e4d70988ed4d2eabd73fd3e1e52456b9f3a4e9564d86353b6d/websockets-14.2-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:647b573f7d3ada919fd60e64d533409a79dcf1ea21daeb4542d1d996519ca967", size = 160758, upload-time = "2025-01-19T20:59:32.095Z" }, + { url = "https://files.pythonhosted.org/packages/e5/8c/d130d668781f2c77d106c007b6c6c1d9db68239107c41ba109f09e6c218a/websockets-14.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:6af99a38e49f66be5a64b1e890208ad026cda49355661549c507152113049990", size = 160995, upload-time = "2025-01-19T20:59:33.527Z" }, + { url = "https://files.pythonhosted.org/packages/a6/bc/f6678a0ff17246df4f06765e22fc9d98d1b11a258cc50c5968b33d6742a1/websockets-14.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:091ab63dfc8cea748cc22c1db2814eadb77ccbf82829bac6b2fbe3401d548eda", size = 170815, upload-time = "2025-01-19T20:59:35.837Z" }, + { url = "https://files.pythonhosted.org/packages/d8/b2/8070cb970c2e4122a6ef38bc5b203415fd46460e025652e1ee3f2f43a9a3/websockets-14.2-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b374e8953ad477d17e4851cdc66d83fdc2db88d9e73abf755c94510ebddceb95", size = 169759, upload-time = "2025-01-19T20:59:38.216Z" }, + { url = "https://files.pythonhosted.org/packages/81/da/72f7caabd94652e6eb7e92ed2d3da818626e70b4f2b15a854ef60bf501ec/websockets-14.2-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a39d7eceeea35db85b85e1169011bb4321c32e673920ae9c1b6e0978590012a3", size = 170178, upload-time = "2025-01-19T20:59:40.423Z" }, + { url = "https://files.pythonhosted.org/packages/31/e0/812725b6deca8afd3a08a2e81b3c4c120c17f68c9b84522a520b816cda58/websockets-14.2-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:0a6f3efd47ffd0d12080594f434faf1cd2549b31e54870b8470b28cc1d3817d9", size = 170453, upload-time = "2025-01-19T20:59:41.996Z" }, + { url = "https://files.pythonhosted.org/packages/66/d3/8275dbc231e5ba9bb0c4f93144394b4194402a7a0c8ffaca5307a58ab5e3/websockets-14.2-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:065ce275e7c4ffb42cb738dd6b20726ac26ac9ad0a2a48e33ca632351a737267", size = 169830, upload-time = "2025-01-19T20:59:44.669Z" }, + { url = "https://files.pythonhosted.org/packages/a3/ae/e7d1a56755ae15ad5a94e80dd490ad09e345365199600b2629b18ee37bc7/websockets-14.2-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:e9d0e53530ba7b8b5e389c02282f9d2aa47581514bd6049d3a7cffe1385cf5fe", size = 169824, upload-time = "2025-01-19T20:59:46.932Z" }, + { url = "https://files.pythonhosted.org/packages/b6/32/88ccdd63cb261e77b882e706108d072e4f1c839ed723bf91a3e1f216bf60/websockets-14.2-cp312-cp312-win32.whl", hash = "sha256:20e6dd0984d7ca3037afcb4494e48c74ffb51e8013cac71cf607fffe11df7205", size = 163981, upload-time = "2025-01-19T20:59:49.228Z" }, + { url = "https://files.pythonhosted.org/packages/b3/7d/32cdb77990b3bdc34a306e0a0f73a1275221e9a66d869f6ff833c95b56ef/websockets-14.2-cp312-cp312-win_amd64.whl", hash = "sha256:44bba1a956c2c9d268bdcdf234d5e5ff4c9b6dc3e300545cbe99af59dda9dcce", size = 164421, upload-time = "2025-01-19T20:59:50.674Z" }, + { url = "https://files.pythonhosted.org/packages/7b/c8/d529f8a32ce40d98309f4470780631e971a5a842b60aec864833b3615786/websockets-14.2-py3-none-any.whl", hash = "sha256:7a6ceec4ea84469f15cf15807a747e9efe57e369c384fa86e022b3bea679b79b", size = 157416, upload-time = "2025-01-19T21:00:54.843Z" }, +] + +[[package]] +name = "xxhash" +version = "3.5.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/00/5e/d6e5258d69df8b4ed8c83b6664f2b47d30d2dec551a29ad72a6c69eafd31/xxhash-3.5.0.tar.gz", hash = "sha256:84f2caddf951c9cbf8dc2e22a89d4ccf5d86391ac6418fe81e3c67d0cf60b45f", size = 84241, upload-time = "2024-08-17T09:20:38.972Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/07/0e/1bfce2502c57d7e2e787600b31c83535af83746885aa1a5f153d8c8059d6/xxhash-3.5.0-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:14470ace8bd3b5d51318782cd94e6f94431974f16cb3b8dc15d52f3b69df8e00", size = 31969, upload-time = "2024-08-17T09:18:24.025Z" }, + { url = "https://files.pythonhosted.org/packages/3f/d6/8ca450d6fe5b71ce521b4e5db69622383d039e2b253e9b2f24f93265b52c/xxhash-3.5.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:59aa1203de1cb96dbeab595ded0ad0c0056bb2245ae11fac11c0ceea861382b9", size = 30787, upload-time = "2024-08-17T09:18:25.318Z" }, + { url = "https://files.pythonhosted.org/packages/5b/84/de7c89bc6ef63d750159086a6ada6416cc4349eab23f76ab870407178b93/xxhash-3.5.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:08424f6648526076e28fae6ea2806c0a7d504b9ef05ae61d196d571e5c879c84", size = 220959, upload-time = "2024-08-17T09:18:26.518Z" }, + { url = "https://files.pythonhosted.org/packages/fe/86/51258d3e8a8545ff26468c977101964c14d56a8a37f5835bc0082426c672/xxhash-3.5.0-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:61a1ff00674879725b194695e17f23d3248998b843eb5e933007ca743310f793", size = 200006, upload-time = "2024-08-17T09:18:27.905Z" }, + { url = "https://files.pythonhosted.org/packages/02/0a/96973bd325412feccf23cf3680fd2246aebf4b789122f938d5557c54a6b2/xxhash-3.5.0-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f2f2c61bee5844d41c3eb015ac652a0229e901074951ae48581d58bfb2ba01be", size = 428326, upload-time = "2024-08-17T09:18:29.335Z" }, + { url = "https://files.pythonhosted.org/packages/11/a7/81dba5010f7e733de88af9555725146fc133be97ce36533867f4c7e75066/xxhash-3.5.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9d32a592cac88d18cc09a89172e1c32d7f2a6e516c3dfde1b9adb90ab5df54a6", size = 194380, upload-time = "2024-08-17T09:18:30.706Z" }, + { url = "https://files.pythonhosted.org/packages/fb/7d/f29006ab398a173f4501c0e4977ba288f1c621d878ec217b4ff516810c04/xxhash-3.5.0-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:70dabf941dede727cca579e8c205e61121afc9b28516752fd65724be1355cc90", size = 207934, upload-time = "2024-08-17T09:18:32.133Z" }, + { url = "https://files.pythonhosted.org/packages/8a/6e/6e88b8f24612510e73d4d70d9b0c7dff62a2e78451b9f0d042a5462c8d03/xxhash-3.5.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:e5d0ddaca65ecca9c10dcf01730165fd858533d0be84c75c327487c37a906a27", size = 216301, upload-time = "2024-08-17T09:18:33.474Z" }, + { url = "https://files.pythonhosted.org/packages/af/51/7862f4fa4b75a25c3b4163c8a873f070532fe5f2d3f9b3fc869c8337a398/xxhash-3.5.0-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:3e5b5e16c5a480fe5f59f56c30abdeba09ffd75da8d13f6b9b6fd224d0b4d0a2", size = 203351, upload-time = "2024-08-17T09:18:34.889Z" }, + { url = "https://files.pythonhosted.org/packages/22/61/8d6a40f288f791cf79ed5bb113159abf0c81d6efb86e734334f698eb4c59/xxhash-3.5.0-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:149b7914451eb154b3dfaa721315117ea1dac2cc55a01bfbd4df7c68c5dd683d", size = 210294, upload-time = "2024-08-17T09:18:36.355Z" }, + { url = "https://files.pythonhosted.org/packages/17/02/215c4698955762d45a8158117190261b2dbefe9ae7e5b906768c09d8bc74/xxhash-3.5.0-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:eade977f5c96c677035ff39c56ac74d851b1cca7d607ab3d8f23c6b859379cab", size = 414674, upload-time = "2024-08-17T09:18:38.536Z" }, + { url = "https://files.pythonhosted.org/packages/31/5c/b7a8db8a3237cff3d535261325d95de509f6a8ae439a5a7a4ffcff478189/xxhash-3.5.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:fa9f547bd98f5553d03160967866a71056a60960be00356a15ecc44efb40ba8e", size = 192022, upload-time = "2024-08-17T09:18:40.138Z" }, + { url = "https://files.pythonhosted.org/packages/78/e3/dd76659b2811b3fd06892a8beb850e1996b63e9235af5a86ea348f053e9e/xxhash-3.5.0-cp312-cp312-win32.whl", hash = "sha256:f7b58d1fd3551b8c80a971199543379be1cee3d0d409e1f6d8b01c1a2eebf1f8", size = 30170, upload-time = "2024-08-17T09:18:42.163Z" }, + { url = "https://files.pythonhosted.org/packages/d9/6b/1c443fe6cfeb4ad1dcf231cdec96eb94fb43d6498b4469ed8b51f8b59a37/xxhash-3.5.0-cp312-cp312-win_amd64.whl", hash = "sha256:fa0cafd3a2af231b4e113fba24a65d7922af91aeb23774a8b78228e6cd785e3e", size = 30040, upload-time = "2024-08-17T09:18:43.699Z" }, + { url = "https://files.pythonhosted.org/packages/0f/eb/04405305f290173acc0350eba6d2f1a794b57925df0398861a20fbafa415/xxhash-3.5.0-cp312-cp312-win_arm64.whl", hash = "sha256:586886c7e89cb9828bcd8a5686b12e161368e0064d040e225e72607b43858ba2", size = 26796, upload-time = "2024-08-17T09:18:45.29Z" }, +] + +[[package]] +name = "yarl" +version = "1.20.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "idna" }, + { name = "multidict" }, + { name = "propcache" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/3c/fb/efaa23fa4e45537b827620f04cf8f3cd658b76642205162e072703a5b963/yarl-1.20.1.tar.gz", hash = "sha256:d017a4997ee50c91fd5466cef416231bb82177b93b029906cefc542ce14c35ac", size = 186428, upload-time = "2025-06-10T00:46:09.923Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/5f/9a/cb7fad7d73c69f296eda6815e4a2c7ed53fc70c2f136479a91c8e5fbdb6d/yarl-1.20.1-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:bdcc4cd244e58593a4379fe60fdee5ac0331f8eb70320a24d591a3be197b94a9", size = 133667, upload-time = "2025-06-10T00:43:44.369Z" }, + { url = "https://files.pythonhosted.org/packages/67/38/688577a1cb1e656e3971fb66a3492501c5a5df56d99722e57c98249e5b8a/yarl-1.20.1-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:b29a2c385a5f5b9c7d9347e5812b6f7ab267193c62d282a540b4fc528c8a9d2a", size = 91025, upload-time = "2025-06-10T00:43:46.295Z" }, + { url = "https://files.pythonhosted.org/packages/50/ec/72991ae51febeb11a42813fc259f0d4c8e0507f2b74b5514618d8b640365/yarl-1.20.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:1112ae8154186dfe2de4732197f59c05a83dc814849a5ced892b708033f40dc2", size = 89709, upload-time = "2025-06-10T00:43:48.22Z" }, + { url = "https://files.pythonhosted.org/packages/99/da/4d798025490e89426e9f976702e5f9482005c548c579bdae792a4c37769e/yarl-1.20.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:90bbd29c4fe234233f7fa2b9b121fb63c321830e5d05b45153a2ca68f7d310ee", size = 352287, upload-time = "2025-06-10T00:43:49.924Z" }, + { url = "https://files.pythonhosted.org/packages/1a/26/54a15c6a567aac1c61b18aa0f4b8aa2e285a52d547d1be8bf48abe2b3991/yarl-1.20.1-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:680e19c7ce3710ac4cd964e90dad99bf9b5029372ba0c7cbfcd55e54d90ea819", size = 345429, upload-time = "2025-06-10T00:43:51.7Z" }, + { url = "https://files.pythonhosted.org/packages/d6/95/9dcf2386cb875b234353b93ec43e40219e14900e046bf6ac118f94b1e353/yarl-1.20.1-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:4a979218c1fdb4246a05efc2cc23859d47c89af463a90b99b7c56094daf25a16", size = 365429, upload-time = "2025-06-10T00:43:53.494Z" }, + { url = "https://files.pythonhosted.org/packages/91/b2/33a8750f6a4bc224242a635f5f2cff6d6ad5ba651f6edcccf721992c21a0/yarl-1.20.1-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:255b468adf57b4a7b65d8aad5b5138dce6a0752c139965711bdcb81bc370e1b6", size = 363862, upload-time = "2025-06-10T00:43:55.766Z" }, + { url = "https://files.pythonhosted.org/packages/98/28/3ab7acc5b51f4434b181b0cee8f1f4b77a65919700a355fb3617f9488874/yarl-1.20.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a97d67108e79cfe22e2b430d80d7571ae57d19f17cda8bb967057ca8a7bf5bfd", size = 355616, upload-time = "2025-06-10T00:43:58.056Z" }, + { url = "https://files.pythonhosted.org/packages/36/a3/f666894aa947a371724ec7cd2e5daa78ee8a777b21509b4252dd7bd15e29/yarl-1.20.1-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:8570d998db4ddbfb9a590b185a0a33dbf8aafb831d07a5257b4ec9948df9cb0a", size = 339954, upload-time = "2025-06-10T00:43:59.773Z" }, + { url = "https://files.pythonhosted.org/packages/f1/81/5f466427e09773c04219d3450d7a1256138a010b6c9f0af2d48565e9ad13/yarl-1.20.1-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:97c75596019baae7c71ccf1d8cc4738bc08134060d0adfcbe5642f778d1dca38", size = 365575, upload-time = "2025-06-10T00:44:02.051Z" }, + { url = "https://files.pythonhosted.org/packages/2e/e3/e4b0ad8403e97e6c9972dd587388940a032f030ebec196ab81a3b8e94d31/yarl-1.20.1-cp312-cp312-musllinux_1_2_armv7l.whl", hash = "sha256:1c48912653e63aef91ff988c5432832692ac5a1d8f0fb8a33091520b5bbe19ef", size = 365061, upload-time = "2025-06-10T00:44:04.196Z" }, + { url = "https://files.pythonhosted.org/packages/ac/99/b8a142e79eb86c926f9f06452eb13ecb1bb5713bd01dc0038faf5452e544/yarl-1.20.1-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:4c3ae28f3ae1563c50f3d37f064ddb1511ecc1d5584e88c6b7c63cf7702a6d5f", size = 364142, upload-time = "2025-06-10T00:44:06.527Z" }, + { url = "https://files.pythonhosted.org/packages/34/f2/08ed34a4a506d82a1a3e5bab99ccd930a040f9b6449e9fd050320e45845c/yarl-1.20.1-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:c5e9642f27036283550f5f57dc6156c51084b458570b9d0d96100c8bebb186a8", size = 381894, upload-time = "2025-06-10T00:44:08.379Z" }, + { url = "https://files.pythonhosted.org/packages/92/f8/9a3fbf0968eac704f681726eff595dce9b49c8a25cd92bf83df209668285/yarl-1.20.1-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:2c26b0c49220d5799f7b22c6838409ee9bc58ee5c95361a4d7831f03cc225b5a", size = 383378, upload-time = "2025-06-10T00:44:10.51Z" }, + { url = "https://files.pythonhosted.org/packages/af/85/9363f77bdfa1e4d690957cd39d192c4cacd1c58965df0470a4905253b54f/yarl-1.20.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:564ab3d517e3d01c408c67f2e5247aad4019dcf1969982aba3974b4093279004", size = 374069, upload-time = "2025-06-10T00:44:12.834Z" }, + { url = "https://files.pythonhosted.org/packages/35/99/9918c8739ba271dcd935400cff8b32e3cd319eaf02fcd023d5dcd487a7c8/yarl-1.20.1-cp312-cp312-win32.whl", hash = "sha256:daea0d313868da1cf2fac6b2d3a25c6e3a9e879483244be38c8e6a41f1d876a5", size = 81249, upload-time = "2025-06-10T00:44:14.731Z" }, + { url = "https://files.pythonhosted.org/packages/eb/83/5d9092950565481b413b31a23e75dd3418ff0a277d6e0abf3729d4d1ce25/yarl-1.20.1-cp312-cp312-win_amd64.whl", hash = "sha256:48ea7d7f9be0487339828a4de0360d7ce0efc06524a48e1810f945c45b813698", size = 86710, upload-time = "2025-06-10T00:44:16.716Z" }, + { url = "https://files.pythonhosted.org/packages/b4/2d/2345fce04cfd4bee161bf1e7d9cdc702e3e16109021035dbb24db654a622/yarl-1.20.1-py3-none-any.whl", hash = "sha256:83b8eb083fe4683c6115795d9fc1cfaf2cbbefb19b3a1cb68f6527460f483a77", size = 46542, upload-time = "2025-06-10T00:46:07.521Z" }, +] + +[[package]] +name = "zstandard" +version = "0.24.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/09/1b/c20b2ef1d987627765dcd5bf1dadb8ef6564f00a87972635099bb76b7a05/zstandard-0.24.0.tar.gz", hash = "sha256:fe3198b81c00032326342d973e526803f183f97aa9e9a98e3f897ebafe21178f", size = 905681, upload-time = "2025-08-17T18:36:36.352Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/26/e9/0bd281d9154bba7fc421a291e263911e1d69d6951aa80955b992a48289f6/zstandard-0.24.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:a2bda8f2790add22773ee7a4e43c90ea05598bffc94c21c40ae0a9000b0133c3", size = 795710, upload-time = "2025-08-17T18:22:19.189Z" }, + { url = "https://files.pythonhosted.org/packages/36/26/b250a2eef515caf492e2d86732e75240cdac9d92b04383722b9753590c36/zstandard-0.24.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:cc76de75300f65b8eb574d855c12518dc25a075dadb41dd18f6322bda3fe15d5", size = 640336, upload-time = "2025-08-17T18:22:20.466Z" }, + { url = "https://files.pythonhosted.org/packages/79/bf/3ba6b522306d9bf097aac8547556b98a4f753dc807a170becaf30dcd6f01/zstandard-0.24.0-cp312-cp312-manylinux2010_i686.manylinux2014_i686.manylinux_2_12_i686.manylinux_2_17_i686.whl", hash = "sha256:d2b3b4bda1a025b10fe0269369475f420177f2cb06e0f9d32c95b4873c9f80b8", size = 5342533, upload-time = "2025-08-17T18:22:22.326Z" }, + { url = "https://files.pythonhosted.org/packages/ea/ec/22bc75bf054e25accdf8e928bc68ab36b4466809729c554ff3a1c1c8bce6/zstandard-0.24.0-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:9b84c6c210684286e504022d11ec294d2b7922d66c823e87575d8b23eba7c81f", size = 5062837, upload-time = "2025-08-17T18:22:24.416Z" }, + { url = "https://files.pythonhosted.org/packages/48/cc/33edfc9d286e517fb5b51d9c3210e5bcfce578d02a675f994308ca587ae1/zstandard-0.24.0-cp312-cp312-manylinux2014_ppc64le.manylinux_2_17_ppc64le.whl", hash = "sha256:c59740682a686bf835a1a4d8d0ed1eefe31ac07f1c5a7ed5f2e72cf577692b00", size = 5393855, upload-time = "2025-08-17T18:22:26.786Z" }, + { url = "https://files.pythonhosted.org/packages/73/36/59254e9b29da6215fb3a717812bf87192d89f190f23817d88cb8868c47ac/zstandard-0.24.0-cp312-cp312-manylinux2014_s390x.manylinux_2_17_s390x.whl", hash = "sha256:6324fde5cf5120fbf6541d5ff3c86011ec056e8d0f915d8e7822926a5377193a", size = 5451058, upload-time = "2025-08-17T18:22:28.885Z" }, + { url = "https://files.pythonhosted.org/packages/9a/c7/31674cb2168b741bbbe71ce37dd397c9c671e73349d88ad3bca9e9fae25b/zstandard-0.24.0-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:51a86bd963de3f36688553926a84e550d45d7f9745bd1947d79472eca27fcc75", size = 5546619, upload-time = "2025-08-17T18:22:31.115Z" }, + { url = "https://files.pythonhosted.org/packages/e6/01/1a9f22239f08c00c156f2266db857545ece66a6fc0303d45c298564bc20b/zstandard-0.24.0-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:d82ac87017b734f2fb70ff93818c66f0ad2c3810f61040f077ed38d924e19980", size = 5046676, upload-time = "2025-08-17T18:22:33.077Z" }, + { url = "https://files.pythonhosted.org/packages/a7/91/6c0cf8fa143a4988a0361380ac2ef0d7cb98a374704b389fbc38b5891712/zstandard-0.24.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:92ea7855d5bcfb386c34557516c73753435fb2d4a014e2c9343b5f5ba148b5d8", size = 5576381, upload-time = "2025-08-17T18:22:35.391Z" }, + { url = "https://files.pythonhosted.org/packages/e2/77/1526080e22e78871e786ccf3c84bf5cec9ed25110a9585507d3c551da3d6/zstandard-0.24.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:3adb4b5414febf074800d264ddf69ecade8c658837a83a19e8ab820e924c9933", size = 4953403, upload-time = "2025-08-17T18:22:37.266Z" }, + { url = "https://files.pythonhosted.org/packages/6e/d0/a3a833930bff01eab697eb8abeafb0ab068438771fa066558d96d7dafbf9/zstandard-0.24.0-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:6374feaf347e6b83ec13cc5dcfa70076f06d8f7ecd46cc71d58fac798ff08b76", size = 5267396, upload-time = "2025-08-17T18:22:39.757Z" }, + { url = "https://files.pythonhosted.org/packages/f3/5e/90a0db9a61cd4769c06374297ecfcbbf66654f74cec89392519deba64d76/zstandard-0.24.0-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:13fc548e214df08d896ee5f29e1f91ee35db14f733fef8eabea8dca6e451d1e2", size = 5433269, upload-time = "2025-08-17T18:22:42.131Z" }, + { url = "https://files.pythonhosted.org/packages/ce/58/fc6a71060dd67c26a9c5566e0d7c99248cbe5abfda6b3b65b8f1a28d59f7/zstandard-0.24.0-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:0a416814608610abf5488889c74e43ffa0343ca6cf43957c6b6ec526212422da", size = 5814203, upload-time = "2025-08-17T18:22:44.017Z" }, + { url = "https://files.pythonhosted.org/packages/5c/6a/89573d4393e3ecbfa425d9a4e391027f58d7810dec5cdb13a26e4cdeef5c/zstandard-0.24.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:0d66da2649bb0af4471699aeb7a83d6f59ae30236fb9f6b5d20fb618ef6c6777", size = 5359622, upload-time = "2025-08-17T18:22:45.802Z" }, + { url = "https://files.pythonhosted.org/packages/60/ff/2cbab815d6f02a53a9d8d8703bc727d8408a2e508143ca9af6c3cca2054b/zstandard-0.24.0-cp312-cp312-win32.whl", hash = "sha256:ff19efaa33e7f136fe95f9bbcc90ab7fb60648453b03f95d1de3ab6997de0f32", size = 435968, upload-time = "2025-08-17T18:22:49.493Z" }, + { url = "https://files.pythonhosted.org/packages/ce/a3/8f96b8ddb7ad12344218fbd0fd2805702dafd126ae9f8a1fb91eef7b33da/zstandard-0.24.0-cp312-cp312-win_amd64.whl", hash = "sha256:bc05f8a875eb651d1cc62e12a4a0e6afa5cd0cc231381adb830d2e9c196ea895", size = 505195, upload-time = "2025-08-17T18:22:47.193Z" }, + { url = "https://files.pythonhosted.org/packages/a3/4a/bfca20679da63bfc236634ef2e4b1b4254203098b0170e3511fee781351f/zstandard-0.24.0-cp312-cp312-win_arm64.whl", hash = "sha256:b04c94718f7a8ed7cdd01b162b6caa1954b3c9d486f00ecbbd300f149d2b2606", size = 461605, upload-time = "2025-08-17T18:22:48.317Z" }, +]