diff --git a/.vscode/settings.json b/.vscode/settings.json new file mode 100644 index 0000000..6ba1afd --- /dev/null +++ b/.vscode/settings.json @@ -0,0 +1,6 @@ +{ + "[python]": { + "editor.defaultFormatter": "ms-python.black-formatter" + }, + "python.formatting.provider": "none" +} \ No newline at end of file diff --git a/app.py b/app.py index 9cf6d33..598f4f1 100644 --- a/app.py +++ b/app.py @@ -3,6 +3,7 @@ from datetime import datetime from datetime import timedelta from random import shuffle + # import logging import threading from flask import Flask, jsonify, request, Response @@ -20,10 +21,12 @@ # logging.basicConfig(filename='app.log', level=print, format='%(asctime)s - %(levelname)s - %(message)s') # Suppress only the single InsecureRequestWarning from urllib3 -requests.packages.urllib3.disable_warnings(requests.packages.urllib3.exceptions.InsecureRequestWarning) +requests.packages.urllib3.disable_warnings( + requests.packages.urllib3.exceptions.InsecureRequestWarning +) # Initialize cache -cache = Cache(app, config={'CACHE_TYPE': 'simple'}) +cache = Cache(app, config={"CACHE_TYPE": "simple"}) # Initialize repo vars # repo_path = "" @@ -31,39 +34,49 @@ # repo_retain_hours = int(os.environ.get('REPO_RETAIN_HOURS', 3)) # Initialize number of workers -num_workers = int(os.environ.get('NUM_WORKERS', 10)) +num_workers = int(os.environ.get("NUM_WORKERS", 10)) GITHUB_API_BASE_URL = "https://api.github.com/repos/cosmos/chain-registry/contents" # these servers have given consistent error responses, this list is used to skip them -SERVER_BLACKLIST = ["https://stride.api.bccnodes.com:443", "https://api.omniflix.nodestake.top", "https://cosmos-lcd.quickapi.com:443"] +SERVER_BLACKLIST = [ + "https://stride.api.bccnodes.com:443", + "https://api.omniflix.nodestake.top", + "https://cosmos-lcd.quickapi.com:443", +] # Global variables to store the data for mainnets and testnets MAINNET_DATA = [] TESTNET_DATA = [] -SEMANTIC_VERSION_PATTERN = re.compile(r'(v\d+(?:\.\d+){0,2})') +SEMANTIC_VERSION_PATTERN = re.compile(r"(v\d+(?:\.\d+){0,2})") + # Explicit list of chains to pull data from def get_chain_watch_env_var(): - chain_watch = os.environ.get('CHAIN_WATCH', '') + chain_watch = os.environ.get("CHAIN_WATCH", "") chain_watch.split(" ") if len(chain_watch) > 0: - print("CHAIN_WATCH env variable set, gathering data and watching for these chains: " + chain_watch) + print( + "CHAIN_WATCH env variable set, gathering data and watching for these chains: " + + chain_watch + ) else: print("CHAIN_WATCH env variable not set, gathering data for all chains") return chain_watch + CHAIN_WATCH = get_chain_watch_env_var() + # Clone the repo def fetch_repo(): """Clone the GitHub repository or update it if it already exists.""" repo_clone_url = "https://github.com/cosmos/chain-registry.git" - repo_dir = os.path.join(os.getcwd(), 'chain-registry') + repo_dir = os.path.join(os.getcwd(), "chain-registry") if os.path.exists(repo_dir): old_wd = os.getcwd() @@ -91,31 +104,51 @@ def fetch_repo(): def get_healthy_rpc_endpoints(rpc_endpoints): with ThreadPoolExecutor(max_workers=num_workers) as executor: - healthy_rpc_endpoints = [rpc for rpc, is_healthy in executor.map(lambda rpc: (rpc, is_endpoint_healthy(rpc['address'])), rpc_endpoints) if is_healthy] + healthy_rpc_endpoints = [ + rpc + for rpc, is_healthy in executor.map( + lambda rpc: (rpc, is_endpoint_healthy(rpc["address"])), rpc_endpoints + ) + if is_healthy + ] return healthy_rpc_endpoints[:5] # Select the first 5 healthy RPC endpoints + def get_healthy_rest_endpoints(rest_endpoints): with ThreadPoolExecutor(max_workers=num_workers) as executor: - healthy_rest_endpoints = [rest for rest, is_healthy in executor.map(lambda rest: (rest, is_endpoint_healthy(rest['address'])), rest_endpoints) if is_healthy] + healthy_rest_endpoints = [ + rest + for rest, is_healthy in executor.map( + lambda rest: (rest, is_endpoint_healthy(rest["address"])), + rest_endpoints, + ) + if is_healthy + ] return healthy_rest_endpoints[:5] # Select the first 5 healthy REST endpoints + def is_endpoint_healthy(endpoint): try: response = requests.get(f"{endpoint}/health", timeout=1, verify=False) # some chains dont implement the /health endpoint. Should we just skip /health and go directly to the below? if response.status_code == 501: - response = requests.get(f"{endpoint}/cosmos/gov/v1beta1/proposals?proposal_status=2", timeout=1, verify=False) + response = requests.get( + f"{endpoint}/cosmos/gov/v1beta1/proposals?proposal_status=2", + timeout=1, + verify=False, + ) return response.status_code == 200 except: return False + def get_healthy_endpoints(endpoints): healthy_endpoints = [] def check_endpoint(endpoint): - if is_endpoint_healthy(endpoint['address']): + if is_endpoint_healthy(endpoint["address"]): healthy_endpoints.append(endpoint) with ThreadPoolExecutor(max_workers=num_workers) as executor: @@ -123,6 +156,7 @@ def check_endpoint(endpoint): return healthy_endpoints + def check_rest_endpoint(rest_url): """Check the REST endpoint and return the application version and response time.""" start_time = datetime.now() @@ -132,65 +166,76 @@ def check_rest_endpoint(rest_url): elapsed_time = (datetime.now() - start_time).total_seconds() data = response.json() - app_version = data.get('application_version', {}).get('version') + app_version = data.get("application_version", {}).get("version") return app_version, elapsed_time except (requests.RequestException, requests.Timeout): return None, (datetime.now() - start_time).total_seconds() + def get_latest_block_height_rpc(rpc_url): """Fetch the latest block height from the RPC endpoint.""" try: response = requests.get(f"{rpc_url}/status", timeout=1) response.raise_for_status() data = response.json() - return int(data.get('result', {}).get('sync_info', {}).get('latest_block_height', 0)) + return int( + data.get("result", {}).get("sync_info", {}).get("latest_block_height", 0) + ) except requests.RequestException as e: return -1 # Return -1 to indicate an error + def get_block_time_rpc(rpc_url, height): """Fetch the block header time for a given block height from the RPC endpoint.""" try: response = requests.get(f"{rpc_url}/block?height={height}", timeout=1) response.raise_for_status() data = response.json() - return data.get('result', {}).get('block', {}).get('header', {}).get('time', "") + return data.get("result", {}).get("block", {}).get("header", {}).get("time", "") except requests.RequestException as e: return None + def parse_isoformat_string(date_string): date_string = re.sub(r"(\.\d{6})\d+Z", r"\1Z", date_string) date_string = date_string.replace("Z", "+00:00") return datetime.fromisoformat(date_string) + def reorder_data(data): - ordered_data = OrderedDict([ - ("type", data.get("type")), - ("network", data.get("network")), - ("rpc_server", data.get("rpc_server")), - ("rest_server", data.get("rest_server")), - ("latest_block_height", data.get("latest_block_height")), - ("upgrade_found", data.get("upgrade_found")), - ("upgrade_name", data.get("upgrade_name")), - ("source", data.get("source")), - ("upgrade_block_height", data.get("upgrade_block_height")), - ("estimated_upgrade_time", data.get("estimated_upgrade_time")), - ("version", data.get("version")), - ('error', data.get("error")) - ]) + ordered_data = OrderedDict( + [ + ("type", data.get("type")), + ("network", data.get("network")), + ("rpc_server", data.get("rpc_server")), + ("rest_server", data.get("rest_server")), + ("latest_block_height", data.get("latest_block_height")), + ("upgrade_found", data.get("upgrade_found")), + ("upgrade_name", data.get("upgrade_name")), + ("source", data.get("source")), + ("upgrade_block_height", data.get("upgrade_block_height")), + ("estimated_upgrade_time", data.get("estimated_upgrade_time")), + ("version", data.get("version")), + ("error", data.get("error")), + ] + ) return ordered_data + def fetch_all_endpoints(network_type, base_url, request_data): """Fetch all the REST and RPC endpoints for all networks and store in a map.""" - networks = request_data.get("MAINNETS", []) if network_type == "mainnet" else request_data.get("TESTNETS", []) + networks = ( + request_data.get("MAINNETS", []) + if network_type == "mainnet" + else request_data.get("TESTNETS", []) + ) endpoints_map = {} for network in networks: rest_endpoints, rpc_endpoints = fetch_endpoints(network, base_url) - endpoints_map[network] = { - "rest": rest_endpoints, - "rpc": rpc_endpoints - } + endpoints_map[network] = {"rest": rest_endpoints, "rpc": rpc_endpoints} return endpoints_map + def fetch_endpoints(network, base_url): """Fetch the REST and RPC endpoints for a given network.""" try: @@ -204,9 +249,12 @@ def fetch_endpoints(network, base_url): except requests.RequestException: return [], [] + def fetch_active_upgrade_proposals(rest_url): try: - response = requests.get(f"{rest_url}/cosmos/gov/v1beta1/proposals?proposal_status=2", verify=False) + response = requests.get( + f"{rest_url}/cosmos/gov/v1beta1/proposals?proposal_status=2", verify=False + ) # Handle 501 Server Error if response.status_code == 501: @@ -217,7 +265,10 @@ def fetch_active_upgrade_proposals(rest_url): for proposal in data.get("proposals", []): content = proposal.get("content", {}) - if content.get("@type") == "/cosmos.upgrade.v1beta1.SoftwareUpgradeProposal": + if ( + content.get("@type") + == "/cosmos.upgrade.v1beta1.SoftwareUpgradeProposal" + ): # Extract version from the plan name plan = content.get("plan", {}) plan_name = plan.get("name", "") @@ -238,42 +289,59 @@ def fetch_active_upgrade_proposals(rest_url): print(f"Error received from server {rest_url}: {e}") raise e except Exception as e: - print(f"Unhandled error while requesting active upgrade endpoint from {rest_url}: {e}") + print( + f"Unhandled error while requesting active upgrade endpoint from {rest_url}: {e}" + ) raise e + def fetch_current_upgrade_plan(rest_url): try: - response = requests.get(f"{rest_url}/cosmos/upgrade/v1beta1/current_plan", verify=False) + response = requests.get( + f"{rest_url}/cosmos/upgrade/v1beta1/current_plan", verify=False + ) response.raise_for_status() data = response.json() plan = data.get("plan", {}) if plan: plan_name = plan.get("name", "") - version_match = SEMANTIC_VERSION_PATTERN.search(plan_name) - if version_match: - version = version_match.group(1) + + # Convert the plan to string and search for the version pattern + plan_dump = json.dumps(plan) + + # Get all version matches + version_matches = SEMANTIC_VERSION_PATTERN.findall(plan_dump) + print(version_matches) + + if version_matches: + # Find the longest match + version = max(version_matches, key=len) try: height = int(plan.get("height", 0)) except ValueError: height = 0 return plan_name, version, height + return None, None, None except requests.RequestException as e: print(f"Error received from server {rest_url}: {e}") raise e except Exception as e: - print(f"Unhandled error while requesting current upgrade endpoint from {rest_url}: {e}") + print( + f"Unhandled error while requesting current upgrade endpoint from {rest_url}: {e}" + ) raise e + def fetch_data_for_network(network, network_type, repo_path): """Fetch data for a given network.""" # Construct the path to the chain.json file based on network type if network_type == "mainnet": - chain_json_path = os.path.join(repo_path, network, 'chain.json') + chain_json_path = os.path.join(repo_path, network, "chain.json") elif network_type == "testnet": - chain_json_path = os.path.join(repo_path, 'testnets', network, 'chain.json') + chain_json_path = os.path.join(repo_path, "testnets", network, "chain.json") else: raise ValueError(f"Invalid network type: {network_type}") @@ -281,17 +349,19 @@ def fetch_data_for_network(network, network_type, repo_path): "network": network, "type": network_type, "error": "insufficient data in Cosmos chain registry, consider a PR to cosmos/chain-registry", - "upgrade_found": False + "upgrade_found": False, } # Check if the chain.json file exists if not os.path.exists(chain_json_path): print(f"chain.json not found for network {network}. Skipping...") - err_output_data["error"] = f"insufficient data in Cosmos chain registry, chain.json not found for {network}. Consider a PR to cosmos/chain-registry" + err_output_data[ + "error" + ] = f"insufficient data in Cosmos chain registry, chain.json not found for {network}. Consider a PR to cosmos/chain-registry" return err_output_data # Load the chain.json data - with open(chain_json_path, 'r') as file: + with open(chain_json_path, "r") as file: data = json.load(file) rest_endpoints = data.get("apis", {}).get("rest", []) @@ -303,8 +373,12 @@ def fetch_data_for_network(network, network_type, repo_path): healthy_rest_endpoints = get_healthy_rest_endpoints(rest_endpoints) if len(healthy_rpc_endpoints) == 0: - print(f"No healthy RPC endpoints found for network {network} while searching through {len(rpc_endpoints)} endpoints. Skipping...") - err_output_data["error"] = f"insufficient data in Cosmos chain registry, no healthy RPC servers for {network}. Consider a PR to cosmos/chain-registry" + print( + f"No healthy RPC endpoints found for network {network} while searching through {len(rpc_endpoints)} endpoints. Skipping..." + ) + err_output_data[ + "error" + ] = f"insufficient data in Cosmos chain registry, no healthy RPC servers for {network}. Consider a PR to cosmos/chain-registry" return err_output_data # Shuffle the healthy endpoints @@ -313,19 +387,25 @@ def fetch_data_for_network(network, network_type, repo_path): rpc_server_used = "" for rpc_endpoint in healthy_rpc_endpoints: - latest_block_height = get_latest_block_height_rpc(rpc_endpoint['address']) + latest_block_height = get_latest_block_height_rpc(rpc_endpoint["address"]) if latest_block_height > 0: - rpc_server_used = rpc_endpoint['address'] + rpc_server_used = rpc_endpoint["address"] break if len(healthy_rest_endpoints) == 0: - print(f"No healthy REST endpoints found for network {network} while searching through {len(rest_endpoints)} endpoints. Skipping...") - err_output_data["error"] = f"insufficient data in Cosmos chain registry, no healthy REST servers for {network}. Consider a PR to cosmos/chain-registry" + print( + f"No healthy REST endpoints found for network {network} while searching through {len(rest_endpoints)} endpoints. Skipping..." + ) + err_output_data[ + "error" + ] = f"insufficient data in Cosmos chain registry, no healthy REST servers for {network}. Consider a PR to cosmos/chain-registry" err_output_data["latest_block_height"] = latest_block_height err_output_data["rpc_server"] = rpc_server_used return err_output_data - print(f"Found {len(healthy_rest_endpoints)} rest endpoints and {len(healthy_rpc_endpoints)} rpc endpoints for {network}") + print( + f"Found {len(healthy_rest_endpoints)} rest endpoints and {len(healthy_rpc_endpoints)} rpc endpoints for {network}" + ) # Check for active upgrade proposals upgrade_block_height = None @@ -340,17 +420,33 @@ def fetch_data_for_network(network, network_type, repo_path): if current_endpoint in SERVER_BLACKLIST: continue try: - active_upgrade_name, active_upgrade_version, active_upgrade_height = fetch_active_upgrade_proposals(current_endpoint) - current_upgrade_name, current_upgrade_version, current_upgrade_height = fetch_current_upgrade_plan(current_endpoint) + ( + active_upgrade_name, + active_upgrade_version, + active_upgrade_height, + ) = fetch_active_upgrade_proposals(current_endpoint) + ( + current_upgrade_name, + current_upgrade_version, + current_upgrade_height, + ) = fetch_current_upgrade_plan(current_endpoint) except: if index + 1 < len(healthy_rest_endpoints): - print(f"Failed to query rest endpoints {current_endpoint}, trying next rest endpoint") + print( + f"Failed to query rest endpoints {current_endpoint}, trying next rest endpoint" + ) continue else: - print(f"Failed to query rest endpoints {current_endpoint}, all out of endpoints to try") + print( + f"Failed to query rest endpoints {current_endpoint}, all out of endpoints to try" + ) break - if active_upgrade_version and (active_upgrade_height is not None) and active_upgrade_height > latest_block_height: + if ( + active_upgrade_version + and (active_upgrade_height is not None) + and active_upgrade_height > latest_block_height + ): upgrade_block_height = active_upgrade_height upgrade_version = active_upgrade_version upgrade_name = active_upgrade_name @@ -358,7 +454,11 @@ def fetch_data_for_network(network, network_type, repo_path): rest_server_used = current_endpoint break - if current_upgrade_version and (current_upgrade_height is not None) and current_upgrade_height > latest_block_height: + if ( + current_upgrade_version + and (current_upgrade_height is not None) + and current_upgrade_height > latest_block_height + ): upgrade_block_height = current_upgrade_height upgrade_version = current_upgrade_version upgrade_name = current_upgrade_name @@ -367,11 +467,10 @@ def fetch_data_for_network(network, network_type, repo_path): break if not active_upgrade_version and not current_upgrade_version: - #this is where the "no upgrades found block runs" + # this is where the "no upgrades found block runs" rest_server_used = current_endpoint break - # Calculate average block time current_block_time = get_block_time_rpc(rpc_server_used, latest_block_height) past_block_time = get_block_time_rpc(rpc_server_used, latest_block_height - 10000) @@ -380,14 +479,22 @@ def fetch_data_for_network(network, network_type, repo_path): if current_block_time and past_block_time: current_block_datetime = parse_isoformat_string(current_block_time) past_block_datetime = parse_isoformat_string(past_block_time) - avg_block_time_seconds = (current_block_datetime - past_block_datetime).total_seconds() / 10000 + avg_block_time_seconds = ( + current_block_datetime - past_block_datetime + ).total_seconds() / 10000 # Estimate the upgrade time estimated_upgrade_time = None if upgrade_block_height and avg_block_time_seconds: - estimated_seconds_until_upgrade = avg_block_time_seconds * (upgrade_block_height - latest_block_height) - estimated_upgrade_datetime = datetime.utcnow() + timedelta(seconds=estimated_seconds_until_upgrade) - estimated_upgrade_time = estimated_upgrade_datetime.isoformat().replace('+00:00', 'Z') + estimated_seconds_until_upgrade = avg_block_time_seconds * ( + upgrade_block_height - latest_block_height + ) + estimated_upgrade_datetime = datetime.utcnow() + timedelta( + seconds=estimated_seconds_until_upgrade + ) + estimated_upgrade_time = estimated_upgrade_datetime.isoformat().replace( + "+00:00", "Z" + ) output_data = { "network": network, @@ -400,11 +507,12 @@ def fetch_data_for_network(network, network_type, repo_path): "source": source, "upgrade_block_height": upgrade_block_height, "estimated_upgrade_time": estimated_upgrade_time, - "version": upgrade_version + "version": upgrade_version, } print(f"Completed fetch data for network {network}") return output_data + # periodic cache update def update_data(): """Function to periodically update the data for mainnets and testnets.""" @@ -426,35 +534,69 @@ def update_data(): try: # Process mainnets & testnets - mainnet_networks = [d for d in os.listdir(repo_path) - if os.path.isdir(os.path.join(repo_path, d)) - and not d.startswith(('.', '_')) - and d != "testnets"] + mainnet_networks = [ + d + for d in os.listdir(repo_path) + if os.path.isdir(os.path.join(repo_path, d)) + and not d.startswith((".", "_")) + and d != "testnets" + ] if len(CHAIN_WATCH) != 0: mainnet_networks = [d for d in mainnet_networks if d in CHAIN_WATCH] - testnet_path = os.path.join(repo_path, 'testnets') - testnet_networks = [d for d in os.listdir(testnet_path) - if os.path.isdir(os.path.join(testnet_path, d)) - and not d.startswith(('.', '_'))] + testnet_path = os.path.join(repo_path, "testnets") + testnet_networks = [ + d + for d in os.listdir(testnet_path) + if os.path.isdir(os.path.join(testnet_path, d)) + and not d.startswith((".", "_")) + ] if len(CHAIN_WATCH) != 0: testnet_networks = [d for d in testnet_networks if d in CHAIN_WATCH] with ThreadPoolExecutor() as executor: - testnet_data = list(filter(None, executor.map(lambda network, path: fetch_data_for_network(network, "testnet", path), testnet_networks, [repo_path]*len(testnet_networks)))) - mainnet_data = list(filter(None, executor.map(lambda network, path: fetch_data_for_network(network, "mainnet", path), mainnet_networks, [repo_path]*len(mainnet_networks)))) + testnet_data = list( + filter( + None, + executor.map( + lambda network, path: fetch_data_for_network( + network, "testnet", path + ), + testnet_networks, + [repo_path] * len(testnet_networks), + ), + ) + ) + mainnet_data = list( + filter( + None, + executor.map( + lambda network, path: fetch_data_for_network( + network, "mainnet", path + ), + mainnet_networks, + [repo_path] * len(mainnet_networks), + ), + ) + ) # Update the Flask cache - cache.set('MAINNET_DATA', mainnet_data) - cache.set('TESTNET_DATA', testnet_data) - - elapsed_time = (datetime.now() - start_time).total_seconds() # Calculate the elapsed time - print(f"Data update cycle completed in {elapsed_time} seconds. Sleeping for 1 minute...") + cache.set("MAINNET_DATA", mainnet_data) + cache.set("TESTNET_DATA", testnet_data) + + elapsed_time = ( + datetime.now() - start_time + ).total_seconds() # Calculate the elapsed time + print( + f"Data update cycle completed in {elapsed_time} seconds. Sleeping for 1 minute..." + ) sleep(60) except Exception as e: - elapsed_time = (datetime.now() - start_time).total_seconds() # Calculate the elapsed time in case of an error + elapsed_time = ( + datetime.now() - start_time + ).total_seconds() # Calculate the elapsed time in case of an error print(f"Error in update_data loop after {elapsed_time} seconds: {e}") print("Error encountered. Sleeping for 1 minute before retrying...") sleep(60) @@ -465,25 +607,29 @@ def start_update_data_thread(): update_thread.daemon = True update_thread.start() -@app.route('/healthz') + +@app.route("/healthz") def health_check(): return jsonify(status="OK"), 200 -@app.route('/fetch', methods=['POST']) + +@app.route("/fetch", methods=["POST"]) def fetch_network_data(): try: request_data = request.get_json() if not request_data: return jsonify({"error": "Invalid payload"}), 400 - mainnet_data = cache.get('MAINNET_DATA') - testnet_data = cache.get('TESTNET_DATA') + mainnet_data = cache.get("MAINNET_DATA") + testnet_data = cache.get("TESTNET_DATA") # If the data is not in the cache, fetch it live if not mainnet_data or not testnet_data: results = [] - for network_type, networks in [("mainnet", request_data.get("MAINNETS", [])), - ("testnet", request_data.get("TESTNETS", []))]: + for network_type, networks in [ + ("mainnet", request_data.get("MAINNETS", [])), + ("testnet", request_data.get("TESTNETS", [])), + ]: for network in networks: try: network_data = fetch_data_for_network(network, network_type) @@ -492,13 +638,24 @@ def fetch_network_data(): print(f"Error fetching data for network {network}: {e}") else: # Filter the cached data based on the networks provided in the POST request - filtered_mainnet_data = [data for data in mainnet_data if data['network'] in request_data.get("MAINNETS", [])] - filtered_testnet_data = [data for data in testnet_data if data['network'] in request_data.get("TESTNETS", [])] + filtered_mainnet_data = [ + data + for data in mainnet_data + if data["network"] in request_data.get("MAINNETS", []) + ] + filtered_testnet_data = [ + data + for data in testnet_data + if data["network"] in request_data.get("TESTNETS", []) + ] results = filtered_mainnet_data + filtered_testnet_data - sorted_results = sorted(results, key=lambda x: x['upgrade_found'], reverse=True) + sorted_results = sorted(results, key=lambda x: x["upgrade_found"], reverse=True) reordered_results = [reorder_data(result) for result in sorted_results] - return Response(json.dumps(reordered_results, indent=2) + '\n', content_type="application/json") + return Response( + json.dumps(reordered_results, indent=2) + "\n", + content_type="application/json", + ) except Exception as e: return jsonify({"error": str(e)}), 500 @@ -506,31 +663,38 @@ def fetch_network_data(): except Exception as e: return jsonify({"error": str(e)}), 500 -@app.route('/mainnets') + +@app.route("/mainnets") # @cache.cached(timeout=600) # Cache the result for 10 minutes def get_mainnet_data(): - results = cache.get('MAINNET_DATA') + results = cache.get("MAINNET_DATA") if results is None: return jsonify({"error": "Data not available"}), 500 results = [r for r in results if r is not None] - sorted_results = sorted(results, key=lambda x: x['upgrade_found'], reverse=True) + sorted_results = sorted(results, key=lambda x: x["upgrade_found"], reverse=True) reordered_results = [reorder_data(result) for result in sorted_results] - return Response(json.dumps(reordered_results) + '\n', content_type="application/json") + return Response( + json.dumps(reordered_results) + "\n", content_type="application/json" + ) + -@app.route('/testnets') +@app.route("/testnets") # @cache.cached(timeout=600) # Cache the result for 10 minutes def get_testnet_data(): - results = cache.get('TESTNET_DATA') + results = cache.get("TESTNET_DATA") if results is None: return jsonify({"error": "Data not available"}), 500 results = [r for r in results if r is not None] - sorted_results = sorted(results, key=lambda x: x['upgrade_found'], reverse=True) + sorted_results = sorted(results, key=lambda x: x["upgrade_found"], reverse=True) reordered_results = [reorder_data(result) for result in sorted_results] - return Response(json.dumps(reordered_results) + '\n', content_type="application/json") + return Response( + json.dumps(reordered_results) + "\n", content_type="application/json" + ) + -if __name__ == '__main__': +if __name__ == "__main__": app.debug = True start_update_data_thread() - app.run(host='0.0.0.0', use_reloader=False) + app.run(host="0.0.0.0", use_reloader=False) diff --git a/mainnets-filtered.sh b/mainnets-filtered.sh index ce431c0..1f41d9c 100755 --- a/mainnets-filtered.sh +++ b/mainnets-filtered.sh @@ -1,11 +1,12 @@ #!/bin/bash declare -A networks=( - [mainnets]="osmosis neutron nolus crescent akash cosmoshub sentinel stargaze omniflixhub cosmoshub terra kujira stride injective juno agoric evmos noble omny quasar dvpn onomy" + [mainnets]="secretnetwork osmosis neutron nolus crescent akash cosmoshub sentinel stargaze omniflixhub cosmoshub terra kujira stride injective juno agoric evmos noble omny quasar dvpn onomy" [testnets]="agorictestnet quasartestnet stridetestnet onomytestnet axelartestnet nibirutestnet nobletestnet dydxtestnet osmosistestnet cosmoshubtestnet" ) base_url="https://cosmos-upgrades.apis.defiantlabs.net" +# base_url="http://localhost:5000" # Loop over both mainnets and testnets for type in "${!networks[@]}"; do