Skip to content

Commit

Permalink
[version bump] v0.10.5 (run-llama#10770)
Browse files Browse the repository at this point in the history
  • Loading branch information
logan-markewich authored and Izuki Matsuba committed Mar 29, 2024
1 parent fde716b commit 6fe82c6
Show file tree
Hide file tree
Showing 15 changed files with 33 additions and 15 deletions.
2 changes: 1 addition & 1 deletion llama-index-core/pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -42,7 +42,7 @@ name = "llama-index-core"
packages = [{include = "llama_index"}]
readme = "README.md"
repository = "https://github.com/run-llama/llama_index"
version = "0.10.3"
version = "0.10.5"

[tool.poetry.dependencies]
SQLAlchemy = {extras = ["asyncio"], version = ">=1.4.49"}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -50,7 +50,7 @@ def __init__(
) -> None:
"""Init params."""
import torch
from llama_index.finetuning.embeddings.adapter_utils import (
from llama_index.embeddings.adapter import (
BaseAdapter,
LinearLayer,
)
Expand Down Expand Up @@ -108,7 +108,7 @@ def from_model_path(
**kwargs (Any): Additional kwargs (see __init__)
"""
from llama_index.finetuning.embeddings.adapter_utils import LinearLayer
from llama_index.embeddings.adapter import LinearLayer

model_cls = model_cls or LinearLayer
model = model_cls.load(model_path)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@
import torch
import transformers
from llama_index.core.utils import print_text
from llama_index.embeddings.adapter.utils import BaseAdapter
from llama_index.embeddings.adapter import BaseAdapter
from sentence_transformers.util import cos_sim
from torch import Tensor, nn
from torch.optim import Optimizer
Expand Down
4 changes: 2 additions & 2 deletions llama-index-finetuning/pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -24,15 +24,15 @@ description = "llama-index finetuning"
license = "MIT"
name = "llama-index-finetuning"
readme = "README.md"
version = "0.1.1"
version = "0.1.2"

[tool.poetry.dependencies]
python = ">=3.8.1,<3.12"
llama-index-core = "^0.10.1"
llama-index-llms-openai = "^0.1.1"
llama-index-llms-gradient = "^0.1.1"
llama-index-postprocessor-cohere-rerank = "^0.1.1"
llama-index-embeddings-adapter = "^0.1.1"
llama-index-embeddings-adapter = "^0.1.2"
sentence-transformers = "^2.3.0"

[tool.poetry.group.dev.dependencies]
Expand Down
Original file line number Diff line number Diff line change
@@ -1,3 +1,12 @@
from llama_index.embeddings.adapter.base import AdapterEmbeddingModel
from llama_index.embeddings.adapter.base import (
AdapterEmbeddingModel,
LinearAdapterEmbeddingModel,
)
from llama_index.embeddings.adapter.utils import BaseAdapter, LinearLayer

__all__ = ["AdapterEmbeddingModel"]
__all__ = [
"AdapterEmbeddingModel",
"LinearAdapterEmbeddingModel",
"BaseAdapter",
"LinearLayer",
]
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,7 @@ description = "llama-index embeddings adapter integration"
license = "MIT"
name = "llama-index-embeddings-adapter"
readme = "README.md"
version = "0.1.1"
version = "0.1.2"

[tool.poetry.dependencies]
python = ">=3.8.1,<3.12"
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@ check-hidden = true
skip = "*.csv,*.html,*.json,*.jsonl,*.pdf,*.txt,*.ipynb"

[tool.llamahub]
classes = ["NomicEmbedding"]
classes = ["NomicEmbedding", "NomicHFEmbedding"]
contains_example = false
import_path = "llama_index.embeddings.nomic"

Expand All @@ -24,7 +24,7 @@ description = "llama-index embeddings nomic integration"
license = "MIT"
name = "llama-index-embeddings-nomic"
readme = "README.md"
version = "0.1.1"
version = "0.1.3"

[tool.poetry.dependencies]
python = ">=3.8.1,<3.12"
Expand Down
Binary file not shown.
Binary file not shown.
Binary file not shown.
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,7 @@ description = "llama-index llms nvidia tensorrt integration"
license = "MIT"
name = "llama-index-llms-nvidia-tensorrt"
readme = "README.md"
version = "0.1.1"
version = "0.1.2"

[tool.poetry.dependencies]
python = ">=3.8.1,<3.12"
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,7 @@ description = "llama-index llms vllm integration"
license = "MIT"
name = "llama-index-llms-vllm"
readme = "README.md"
version = "0.1.1"
version = "0.1.2"

[tool.poetry.dependencies]
python = ">=3.8.1,<3.12"
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,9 @@
# Minio Reader

## Install

`pip install llama-index-readers-minio`

## Import

`from llama_index.readers.minio import MinioReader, BotoMinioReader`
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,7 @@ description = "llama-index vector_stores mongodb integration"
license = "MIT"
name = "llama-index-vector-stores-mongodb"
readme = "README.md"
version = "0.1.2"
version = "0.1.3"

[tool.poetry.dependencies]
python = ">=3.8.1,<3.12"
Expand Down
2 changes: 1 addition & 1 deletion pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -41,7 +41,7 @@ name = "llama-index"
packages = [{from = "_llama-index", include = "llama_index"}]
readme = "README.md"
repository = "https://github.com/run-llama/llama_index"
version = "0.10.3"
version = "0.10.5"

[tool.poetry.dependencies]
python = ">=3.8.1,<4.0"
Expand Down

0 comments on commit 6fe82c6

Please sign in to comment.