Skip to content

Commit

Permalink
Merge pull request #76 from dwellir-public/develop
Browse files Browse the repository at this point in the history
Revisions 28,29
  • Loading branch information
jonathanudd authored Sep 4, 2024
2 parents 27c94b8 + 8c2db0f commit 250bc1c
Show file tree
Hide file tree
Showing 6 changed files with 47 additions and 17 deletions.
2 changes: 0 additions & 2 deletions charmcraft.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -23,5 +23,3 @@ parts:
- setuptools < 58
charm-binary-python-packages:
- substrate-interface
prime:
- files/*
6 changes: 0 additions & 6 deletions files/nrpe-checks/check_substrate.sh

This file was deleted.

18 changes: 14 additions & 4 deletions src/charm.py
Original file line number Diff line number Diff line change
Expand Up @@ -204,7 +204,17 @@ def _on_get_session_key_action(self, event: ops.ActionEvent) -> None:
rpc_port = ServiceArgs(self.config, self.rpc_urls()).rpc_port
key = PolkadotRpcWrapper(rpc_port).get_session_key()
if key:
event.set_results(results={'session-key': key})
event.set_results(results={'session-keys-merged': key})

# For convenience, also print a splitted version of the session key
# Remove the initial '0x'
key_without_prefix = key[2:]
# Split the key into chunks of 64 characters
chunks = [key_without_prefix[i:i+64] for i in range(0, len(key_without_prefix), 64)]
# Add '0x' to each chunk
keys_with_prefix = [f"0x{chunk}" for chunk in chunks]
for i, key in enumerate(keys_with_prefix):
event.set_results(results={f'session-key-{i}': key})
else:
event.fail("Unable to get new session key")

Expand Down Expand Up @@ -261,7 +271,7 @@ def _on_find_validator_address_action(self, event: ops.ActionEvent) -> None:
rpc_port = ServiceArgs(self.config, self.rpc_urls()).rpc_port
result = PolkadotRpcWrapper(rpc_port).is_validating_this_era()
if result:
event.set_results(results={'message': f'This node is currently validating for address {result["validator"]}.'})
event.set_results(results={'message': f'This node is currently validating for address {result["validator"]}'})
event.set_results(results={'session-key': result["session_key"]})
else:
event.set_results(results={'message': 'This node is not currently validating for any address.'})
Expand All @@ -272,10 +282,10 @@ def _on_is_validating_next_era_action(self, event: ops.ActionEvent) -> None:
rpc_port = ServiceArgs(self.config, self.rpc_urls()).rpc_port
session_key = PolkadotRpcWrapper(rpc_port).is_validating_next_era(validator_address)
if session_key:
event.set_results(results={'message': f'This node will be validating next era for address {validator_address}.'})
event.set_results(results={'message': f'This node will be validating next era for address {validator_address}'})
event.set_results(results={'session-key': session_key})
else:
event.set_results(results={'message': f'This node will not be validating next era for address {validator_address}.'})
event.set_results(results={'message': f'This node will not be validating next era for address {validator_address}'})

# TODO: this action is getting quite large and specialized, perhaps move all actions to an `actions.py` file?
def _on_get_node_info_action(self, event: ops.ActionEvent) -> None:
Expand Down
10 changes: 10 additions & 0 deletions src/constants.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,16 @@
SERVICE_NAME = USER
HOME_DIR = Path('/home/polkadot')
BINARY_FILE = Path(HOME_DIR, 'polkadot')
EXECUTE_WORKER_BINARY_FILE = {
'default': Path(HOME_DIR, 'polkadot-execute-worker'),
'enjin': Path(HOME_DIR, 'enjin-execute-worker'),
'canary': Path(HOME_DIR, 'enjin-execute-worker')
}
PREPARE_WORKER_BINARY_FILE = {
'default': Path(HOME_DIR, 'polkadot-prepare-worker'),
'enjin': Path(HOME_DIR, 'enjin-prepare-worker'),
'canary': Path(HOME_DIR, 'enjin-prepare-worker')
}
CHAIN_SPEC_DIR = Path(HOME_DIR, 'spec')
NODE_KEY_FILE = Path(HOME_DIR, 'node-key')
DB_CHAIN_DIR = Path(HOME_DIR, '.local/share/polkadot/chains')
Expand Down
8 changes: 7 additions & 1 deletion src/docker.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,7 @@ def __init__(self, chain_name, docker_tag):

def extract_resources_from_docker(self):
if self.chain_name in ['spiritnet', 'peregrine', 'peregrine-stg-kilt']:
self.__extract_from_docker('kiltprotocol/kilt-node', '/usr/local/bin/node-executable', '/node/dev-specs')
self.__extract_from_docker('kiltprotocol/kilt-node', '/usr/local/bin/node-executable')
elif self.chain_name == 'centrifuge' or self.chain_name == 'altair':
self.__extract_from_docker('centrifugeio/centrifuge-chain', '/usr/local/bin/centrifuge-chain')
elif self.chain_name == 'nodle' or self.chain_name == 'arcadia' or self.chain_name == 'eden':
Expand Down Expand Up @@ -71,6 +71,12 @@ def extract_resources_from_docker(self):
self.__extract_from_docker('opentensor/subtensor', 'usr/local/bin/node-subtensor')
elif self.chain_name in ['peaq', 'krest']:
self.__extract_from_docker('peaq/parachain', 'usr/local/bin/peaq-node')
elif self.chain_name == 'hyperbridge-nexus':
self.__extract_from_docker('polytopelabs/hyperbridge', './hyperbridge')
elif self.chain_name == 'litentry':
self.__extract_from_docker('litentry/litentry-parachain', '/usr/local/bin/litentry-collator')
elif self.chain_name == 'laos':
self.__extract_from_docker('freeverseio/laos-node', '/usr/bin/laos')
else:
raise ValueError(f"{self.chain_name} is not a supported chain using Docker!")

Expand Down
20 changes: 16 additions & 4 deletions src/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -40,7 +40,7 @@ def install_binary(config: ConfigData, chain_name: str) -> None:
elif config.get('binary-url').endswith('.tar.gz') or config.get('binary-url').endswith('.tgz'):
install_tarball_from_url(config.get('binary-url'), config.get('binary-sha256-url'), chain_name)
elif len(config.get('binary-url').split()) > 1:
install_binaries_from_urls(config.get('binary-url'), config.get('binary-sha256-url'))
install_binaries_from_urls(config.get('binary-url'), config.get('binary-sha256-url'), chain_name)
else:
install_binary_from_url(config.get('binary-url'), config.get('binary-sha256-url'))
elif config.get('docker-tag'):
Expand Down Expand Up @@ -104,7 +104,7 @@ def parse_install_urls(binary_urls: str, sha256_urls: str) -> list:
return url_pairs


def install_binaries_from_urls(binary_urls: str, sha256_urls: str) -> None:
def install_binaries_from_urls(binary_urls: str, sha256_urls: str, chain_name: str) -> None:
logger.debug('Installing multiple binaries!')
binary_sha256_pairs = parse_install_urls(binary_urls, sha256_urls)
responses = []
Expand All @@ -115,8 +115,20 @@ def install_binaries_from_urls(binary_urls: str, sha256_urls: str) -> None:
if response.status_code != 200:
raise ValueError(f"Download binary failed with: {response.text}. Check 'binary-url'!")
binary_hash = hashlib.sha256(response.content).hexdigest()
# TODO: keeping the binary name won't work for the charm if it's not exactly 'polkadot', adjust this if more chains start using multiple binaries
binary_name = binary_url.split('/')[-1]
# Get correct execute worker binary name
if 'execute-worker' in binary_url.split('/')[-1]:
if chain_name in c.EXECUTE_WORKER_BINARY_FILE:
binary_name = c.EXECUTE_WORKER_BINARY_FILE[chain_name]
else:
binary_name = c.EXECUTE_WORKER_BINARY_FILE['default']
# Get correct prepare worker binary name
elif 'prepare-worker' in binary_url.split('/')[-1]:
if chain_name in c.PREPARE_WORKER_BINARY_FILE:
binary_name = c.PREPARE_WORKER_BINARY_FILE[chain_name]
else:
binary_name = c.PREPARE_WORKER_BINARY_FILE['default']
else:
binary_name = c.BINARY_FILE
responses += [(binary_url, sha256_url, response, binary_name, binary_hash)]
perform_sha256_checksums(responses, sha256_urls)
stop_service()
Expand Down

0 comments on commit 250bc1c

Please sign in to comment.