Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Patch upgrade loop logic and logs #14

Merged
merged 4 commits into from
Sep 4, 2023
Merged
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
55 changes: 48 additions & 7 deletions app.py
Original file line number Diff line number Diff line change
Expand Up @@ -44,13 +44,29 @@

SEMANTIC_VERSION_PATTERN = re.compile(r'v(\d+(?:\.\d+){0,2})')

# Explicit list of chains to pull data from
def get_chain_watch_env_var():
chain_watch = os.environ.get('CHAIN_WATCH', '')

chain_watch.split(" ")

if len(chain_watch) > 0:
print("CHAIN_WATCH env variable set, gathering data and watching for these chains: " + chain_watch)
else:
print("CHAIN_WATCH env variable not set, gathering data for all chains")

return chain_watch

CHAIN_WATCH = get_chain_watch_env_var()

# Clone the repo
def fetch_repo():
"""Clone the GitHub repository or update it if it already exists."""
repo_clone_url = "https://github.com/cosmos/chain-registry.git"
repo_dir = os.path.join(os.getcwd(), 'chain-registry')

if os.path.exists(repo_dir):
old_wd = os.getcwd()
print(f"Repository already exists. Fetching and pulling latest changes...")
try:
# Navigate to the repo directory
Expand All @@ -61,6 +77,8 @@ def fetch_repo():
subprocess.run(["git", "pull"], check=True)
except subprocess.CalledProcessError:
raise Exception("Failed to fetch and pull the latest changes.")
finally:
os.chdir(old_wd)
else:
print(f"Cloning repo {repo_clone_url}...")
try:
Expand Down Expand Up @@ -156,7 +174,8 @@ def reorder_data(data):
("source", data.get("source")),
("upgrade_block_height", data.get("upgrade_block_height")),
("estimated_upgrade_time", data.get("estimated_upgrade_time")),
("version", data.get("version"))
("version", data.get("version")),
('error', data.get("error"))
])
return ordered_data

Expand Down Expand Up @@ -269,10 +288,17 @@ def fetch_data_for_network(network, network_type, repo_path):
else:
raise ValueError(f"Invalid network type: {network_type}")

err_output_data = {
"network": network,
"error": "insufficient data in Cosmos chain registry, consider a PR to cosmos/chain-registry",
"upgrade_found": False
}

# Check if the chain.json file exists
if not os.path.exists(chain_json_path):
print(f"chain.json not found for network {network}. Skipping...")
return None
err_output_data["error"] = f"insufficient data in Cosmos chain registry, chain.json not found for {network}. Consider a PR to cosmos/chain-registry"
return err_output_data

# Load the chain.json data
with open(chain_json_path, 'r') as file:
Expand All @@ -287,12 +313,14 @@ def fetch_data_for_network(network, network_type, repo_path):
healthy_rest_endpoints = get_healthy_rest_endpoints(rest_endpoints)

if len(healthy_rpc_endpoints) == 0:
print(f"No healthy RPC endpoints found for network {network}. Skipping...")
return None
print(f"No healthy RPC endpoints found for network {network} while searching through {len(rpc_endpoints)} endpoints. Skipping...")
err_output_data["error"] = f"insufficient data in Cosmos chain registry, no healthy RPC servers for {network}. Consider a PR to cosmos/chain-registry"
return err_output_data

if len(healthy_rest_endpoints) == 0:
print(f"No healthy REST endpoints found for network {network}. Skipping...")
return None
print(f"No healthy REST endpoints found for network {network} while searching through {len(rest_endpoints)} endpoints. Skipping...")
err_output_data["error"] = f"insufficient data in Cosmos chain registry, no healthy REST servers for {network}. Consider a PR to cosmos/chain-registry"
return err_output_data

print(f"Found {len(healthy_rest_endpoints)} rest endpoints and {len(healthy_rpc_endpoints)} rpc endpoints for {network}")

Expand Down Expand Up @@ -335,15 +363,22 @@ def fetch_data_for_network(network, network_type, repo_path):
upgrade_version = active_upgrade_version
upgrade_name = active_upgrade_name
source = "active_upgrade_proposals"
rest_server_used = current_endpoint
break

if current_upgrade_version and (current_upgrade_height is not None) and current_upgrade_height > latest_block_height:
upgrade_block_height = current_upgrade_height
upgrade_version = current_upgrade_version
upgrade_name = current_upgrade_name
source = "current_upgrade_plan"
rest_server_used = current_endpoint
break

if not active_upgrade_version and not current_upgrade_version:
#this is where the "no upgrades found block runs"
rest_server_used = current_endpoint
break
rest_server_used = current_endpoint


# Calculate average block time
current_block_time = get_block_time_rpc(rpc_server_used, latest_block_height)
Expand Down Expand Up @@ -404,11 +439,17 @@ def update_data():
and not d.startswith(('.', '_'))
and d != "testnets"]

if len(CHAIN_WATCH) != 0:
mainnet_networks = [d for d in mainnet_networks if d in CHAIN_WATCH]

testnet_path = os.path.join(repo_path, 'testnets')
testnet_networks = [d for d in os.listdir(testnet_path)
if os.path.isdir(os.path.join(testnet_path, d))
and not d.startswith(('.', '_'))]

if len(CHAIN_WATCH) != 0:
testnet_networks = [d for d in testnet_networks if d in CHAIN_WATCH]

with ThreadPoolExecutor() as executor:
testnet_data = list(filter(None, executor.map(lambda network, path: fetch_data_for_network(network, "testnet", path), testnet_networks, [repo_path]*len(testnet_networks))))
mainnet_data = list(filter(None, executor.map(lambda network, path: fetch_data_for_network(network, "mainnet", path), mainnet_networks, [repo_path]*len(mainnet_networks))))
Expand Down
Loading