Skip to content

Commit

Permalink
Merge pull request #44 from RaoFoundation/dev
Browse files Browse the repository at this point in the history
Release 2.1.4
  • Loading branch information
RusticLuftig authored Jan 17, 2024
2 parents 2300785 + 9c25951 commit 06eecdd
Show file tree
Hide file tree
Showing 6 changed files with 44 additions and 8 deletions.
4 changes: 2 additions & 2 deletions constants/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@
# Project Constants.
# ---------------------------------

__version__ = "2.1.0"
__version__ = "2.1.4"
version_split = __version__.split(".")
__spec_version__ = (
(1000 * int(version_split[0]))
Expand All @@ -21,7 +21,7 @@
# The maximum bytes for the hugging face repo (1 Gigabyte).
MAX_HUGGING_FACE_BYTES = 1 * 1024 * 1024 * 1024
# The maximum parameter size allowed for models.
MAX_MODEL_PARAMETER_SIZE = 122268040
MAX_MODEL_PARAMETER_SIZE = 186_000_000
# The number of run steps to log to single wandb run.
MAX_RUN_STEPS_PER_WANDB_RUN = 100

Expand Down
4 changes: 2 additions & 2 deletions docs/miner.md
Original file line number Diff line number Diff line change
Expand Up @@ -121,8 +121,8 @@ python scripts/upload_model.py --load_model_dir <path to model> --hf_repo_id my-

## Running a custom Miner

As of Jan 1st, 2024 the subnet works with any model supported by [AutoModelForCausalLM](https://huggingface.co/docs/transformers/model_doc/auto#transformers.AutoModelForCausalLM) subject to the following constraints:
1. Has less than 122268040 parameters.
As of Jan 15th, 2024 the subnet works with any model supported by [AutoModelForCausalLM](https://huggingface.co/docs/transformers/model_doc/auto#transformers.AutoModelForCausalLM) subject to the following constraints:
1. Has less than 186,000,000 parameters.
2. Total size of the repo is less than 1 Gigabyte.
3. Models sequence_length parameter must be 1024 tokens.

Expand Down
18 changes: 18 additions & 0 deletions pretrain/mining.py
Original file line number Diff line number Diff line change
Expand Up @@ -141,6 +141,24 @@ async def push(self, model: PreTrainedModel, retry_delay_secs: int = 60):
self.wallet.hotkey.ss58_address, model_id
)

bt.logging.info(
"Wrote model metadata to the chain. Checking we can read it back..."
)

model_metadata = (
await self.model_metadata_store.retrieve_model_metadata(
self.wallet.hotkey.ss58_address
)
)

if not model_metadata or model_metadata.id != model_id:
bt.logging.error(
f"Failed to read back model metadata from the chain. Expected: {model_id}, got: {model_metadata}"
)
raise ValueError(
f"Failed to read back model metadata from the chain. Expected: {model_id}, got: {model_metadata}"
)

bt.logging.success("Committed model to the chain.")
break
except Exception as e:
Expand Down
2 changes: 1 addition & 1 deletion setup.py
Original file line number Diff line number Diff line change
Expand Up @@ -53,7 +53,7 @@ def read_requirements(path):

# loading version from setup.py
with codecs.open(
os.path.join(here, "pretrain/__init__.py"), encoding="utf-8"
os.path.join(here, "constants/__init__.py"), encoding="utf-8"
) as init_file:
version_match = re.search(
r"^__version__ = ['\"]([^'\"]*)['\"]", init_file.read(), re.M
Expand Down
10 changes: 9 additions & 1 deletion tests/model/storage/fake_model_metadata_store.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
from collections import deque
from collections import defaultdict, deque
from typing import List, Optional
from model.data import ModelId, ModelMetadata
from model.storage.model_metadata_store import ModelMetadataStore
Expand All @@ -11,6 +11,7 @@ def __init__(self):
self.current_block = 1
self.metadata = dict()
self.store_errors = deque()
self.injected_metadata = defaultdict(deque)

async def store_model_metadata(self, hotkey: str, model_id: ModelId):
"""Fake stores model metadata for a specific hotkey."""
Expand Down Expand Up @@ -38,8 +39,15 @@ async def store_model_metadata_exact(
async def retrieve_model_metadata(self, hotkey: str) -> Optional[ModelMetadata]:
"""Retrieves model metadata for a specific hotkey"""

if len(self.injected_metadata[hotkey]) > 0:
return self.injected_metadata[hotkey].popleft()

return self.metadata[hotkey] if hotkey in self.metadata else None

def inject_model_metadata(self, hotkey: str, metadata: ModelMetadata):
"""Injects a metadata for hotkey to be returned instead of the expected response."""
self.injected_metadata[hotkey].append(metadata)

def inject_store_errors(self, errors: List[Exception]):
"""Injects a list of errors to be raised on the next N calls to store_model_metadata."""
self.store_errors.extend(errors)
Expand Down
14 changes: 12 additions & 2 deletions tests/pretrain/test_mining.py
Original file line number Diff line number Diff line change
Expand Up @@ -43,7 +43,7 @@ def test_model_to_disk_roundtrip(self):

assert_model_equality(self, self.tiny_model, model)

def _test_push(self):
def _test_push(self, min_expected_block: int = 1):
asyncio.run(self.actions.push(model=self.tiny_model, retry_delay_secs=1))

# Check that the model was uploaded to hugging face.
Expand All @@ -54,7 +54,7 @@ def _test_push(self):
model_metadata = asyncio.run(
self.metadata_store.retrieve_model_metadata(self.wallet.hotkey.ss58_address)
)
self.assertGreaterEqual(model_metadata.block, 1)
self.assertGreaterEqual(model_metadata.block, min_expected_block)
self.assertEqual(model_metadata.id, model.id)

self.metadata_store.reset()
Expand All @@ -73,6 +73,16 @@ def test_push_model_chain_failure(self):

self._test_push()

def test_push_metadata_read_is_old(self):
"""Tests that pushing a model to the chain is successful even if the metadata read back is stale."""

# Inject an empty response when push tries to read back the metadata commit.
self.metadata_store.inject_model_metadata(
self.wallet.hotkey.ss58_address, metadata=None
)

self._test_push(min_expected_block=2)


if __name__ == "__main__":
unittest.main()

0 comments on commit 06eecdd

Please sign in to comment.