Skip to content

Commit

Permalink
Merge branch 'main' into direct_mapping_feature
Browse files Browse the repository at this point in the history
  • Loading branch information
ibhatt-jumptrading authored Jul 31, 2024
2 parents bac8dfd + 189a80e commit 03f64c1
Show file tree
Hide file tree
Showing 54 changed files with 1,061 additions and 505 deletions.
3 changes: 3 additions & 0 deletions .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -10,6 +10,9 @@ target/
*.lcov
*.profraw

# GDB
.gdb_history

# Clangd
.cache
.clangd
Expand Down
49 changes: 17 additions & 32 deletions contrib/checkptify/autodownload.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,31 +21,8 @@
print( f"solana endpoint: {solana_url}" )

def download(url):
while True:
print( f'trying {url}' )
cmd = f'curl --max-redirs 0 --silent {url}'
proc = subprocess.Popen( cmd, shell=True, stdout=subprocess.PIPE )
newname = proc.stdout.read().decode("utf-8").split('/')[-1]
if os.path.exists(newname) and os.stat(newname).st_size > 0:
return (newname,False)
if len(newname) == 0:
# We are temporarily banned
print( f'"{cmd}" failed' )
time.sleep( 10 )
continue

print( f'downloading {newname} ...' )
subprocess.run( 'rm -f tmp', shell=True )
cmd = f'wget --output-document=tmp --quiet {url}'
subprocess.run( cmd, shell=True )
if not (os.path.exists('tmp') and os.stat('tmp').st_size > 0):
print( f'"{cmd}" failed' )
time.sleep( 10 )
continue

subprocess.run( f'mv -f tmp {newname}', shell=True )
print( f'downloaded {newname}' )
return (newname,True)
cmd = f'wget --no-clobber --trust-server-names {url}'
subprocess.run( cmd, shell=True )

def relink(snap, link):
subprocess.run( f'rm -f tmp-link', shell=True )
Expand All @@ -54,18 +31,13 @@ def relink(snap, link):
print( f'linked {link} to {snap}' )

def rmold(files, keep):
files.sort( key=os.path.getmtime, reverse=True)
for i in range(keep, len(files)):
os.remove( files[i] )
print( f'removed {files[i]}' )

while True:
(fullsnap,fullsnapnew) = download( f'{solana_url}/snapshot.tar.bz2' )
(incsnap,incsnapnew) = download( f'{solana_url}/incremental-snapshot.tar.bz2' )

if (fullsnapnew or incsnapnew) and (fullsnap.split('-')[1] == incsnap.split('-')[2]):
relink( fullsnap, 'snapshot.tar.bz2' )
relink( incsnap, 'incremental-snapshot.tar.bz2' )
download( f'{solana_url}/snapshot.tar.bz2' )
download( f'{solana_url}/incremental-snapshot.tar.bz2' )

fullfiles = []
incfiles = []
Expand All @@ -76,7 +48,20 @@ def rmold(files, keep):
elif "snapshot" in file:
if file != 'snapshot.tar.bz2':
fullfiles.append(file)

fullfiles.sort( key=(lambda n: int(n.split('-')[1])), reverse=True );
incfiles.sort( key=(lambda n: int(n.split('-')[3])), reverse=True );

rmold(fullfiles, 2)
rmold(incfiles, 3)

if fullfiles[0].split('-')[1] == incfiles[0].split('-')[2]:
fullname = os.path.realpath(fullfiles[0])
incname = os.path.realpath(incfiles[0])
print(f'FULLSNAP={fullname}')
print(f'INCSNAP={incname}')
with open('latest', 'w') as fd:
fd.write(f'FULLSNAP={fullname}\n')
fd.write(f'INCSNAP={incname}\n')

time.sleep(30)
4 changes: 2 additions & 2 deletions contrib/ledger-gen/run_cluster.py
Original file line number Diff line number Diff line change
Expand Up @@ -83,7 +83,7 @@ async def first_cluster_validator(expected_shred_version, expected_genesis_hash,
vote_pubkey = await get_pubkey(vote_key, solana_source_directory)

process = await asyncio.create_subprocess_shell(
f"{solana_binary('agave-validator', solana_source_directory)} --enable-rpc-transaction-history --allow-private-addr --identity {identity_key} --ledger {ledger_path} --limit-ledger-size 100000000 --dynamic-port-range 8000-8100 --no-snapshot-fetch --no-poh-speed-test --no-os-network-limits-test --vote-account {vote_pubkey} --expected-shred-version {expected_shred_version} --expected-genesis-hash {expected_genesis_hash} --no-wait-for-vote-to-start-leader --no-incremental-snapshots --full-snapshot-interval-slots {snapshot_interval} --maximum-full-snapshots-to-retain {snapshots_to_retain} --rpc-port 8899 --gossip-port 8010 --full-rpc-api --tpu-enable-udp --log {ledger_path}/validator.log",
f"{solana_binary('agave-validator', solana_source_directory)} --enable-rpc-transaction-history --allow-private-addr --identity {identity_key} --ledger {ledger_path} --limit-ledger-size 100000000 --dynamic-port-range 8000-8100 --no-snapshot-fetch --no-poh-speed-test --no-os-network-limits-test --vote-account {vote_pubkey} --expected-shred-version {expected_shred_version} --expected-genesis-hash {expected_genesis_hash} --no-wait-for-vote-to-start-leader --no-incremental-snapshots --full-snapshot-interval-slots {snapshot_interval} --snapshot-interval-slots {snapshot_interval} --maximum-full-snapshots-to-retain {snapshots_to_retain} --rpc-port 8899 --gossip-port 8010 --full-rpc-api --tpu-enable-udp --log {ledger_path}/validator.log",
stdout=asyncio.subprocess.PIPE,
stderr=asyncio.subprocess.PIPE
)
Expand Down Expand Up @@ -180,7 +180,7 @@ async def spawn_solana_cluster(nodes, output_dir, solana_source_directory, tick_
)
stdout, _ = await process.communicate()

if os.path.exists(os.path.join(output_dir, "node-ledger-0", "snapshot", str(snapshot_interval), "state_complete")):
if os.path.exists(os.path.join(output_dir, "node-ledger-0", "snapshot", str(snapshot_interval), "state_complete")) or os.path.exists(os.path.join(output_dir, "node-ledger-0", "snapshots", str(snapshot_interval), "state_complete")):
break
await asyncio.sleep(1)

Expand Down
8 changes: 8 additions & 0 deletions contrib/ledger-gen/src/ledgers.rs
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,7 @@ use {
};

use crate::bpf_loader;
use crate::stake;

/// CI Link: gs://firedancer-ci-resources/v18multi-bpf-loader.tar.gz
pub fn bpf_loader_ledger(client: &RpcClient, arc_client: &Arc<RpcClient>, payer: &Keypair, program_data: &Vec<u8>, account_data: &Vec<u8>) {
Expand All @@ -29,3 +30,10 @@ pub fn bpf_loader_ledger(client: &RpcClient, arc_client: &Arc<RpcClient>, payer:
bpf_loader::close_redeploy_same_slot(&client, &arc_client, &payer, &program_data, &account_data);
bpf_loader::close_redeploy_diff_slot(&client, &arc_client, &payer, &program_data, &account_data);
}

/// CI Link: gs://firedancer-ci-resources/v203-move-stake.tar.gz
/// CI Link: gs://firedancer-ci-resources/v203-move-lamports.tar.gz
pub fn stake_ledger(client: &RpcClient, payer: &Keypair) {
// stake::move_stake(&client, &payer);
stake::move_lamports(&client, &payer);
}
4 changes: 3 additions & 1 deletion contrib/ledger-gen/src/main.rs
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,7 @@ mod utils;

mod bpf_loader;
mod nonce;
mod stake;

/// Workflow for Creating Ledgers
/// * Set up all buffer accounts
Expand Down Expand Up @@ -53,5 +54,6 @@ fn main() {
let account_data = vec![0u8; 4];

// ----------------------- ONLY CHANGE BELOW THIS LINE -----------------------
ledgers::bpf_loader_ledger(&rpc_client, &arc_client, &payer, &program_data, &account_data);
// ledgers::bpf_loader_ledger(&rpc_client, &arc_client, &payer, &program_data, &account_data);
ledgers::stake_ledger(&rpc_client, &payer);
}
130 changes: 130 additions & 0 deletions contrib/ledger-gen/src/stake.rs
Original file line number Diff line number Diff line change
@@ -0,0 +1,130 @@
use {
solana_sdk::{
signature::{Keypair, Signer, read_keypair_file},
commitment_config::{CommitmentConfig},
feature::{self, Feature},
feature_set,
nonce::{State as NonceState},
system_instruction,
system_program,
message::Message,
transaction::Transaction,
stake::{
self,
instruction::{self as stake_instruction, LockupArgs, StakeError},
state::{
Authorized, Lockup, Meta, StakeActivationStatus, StakeAuthorize, StakeStateV2,
},
tools::{acceptable_reference_epoch_credits, eligible_for_deactivate_delinquent},
},
},
solana_client::{
rpc_client::{RpcClient},
},
solana_rpc_client_nonce_utils::{get_account_with_commitment, nonblocking},
solana_cli::{
spend_utils::{SpendAmount, resolve_spend_tx_and_check_account_balance},
}
};

use crate::instructions;
use crate::utils;

pub fn move_lamports(client: &RpcClient, payer: &Keypair) {
let from_stake_account = Keypair::new();

let authorized = Authorized {
staker: payer.pubkey(),
withdrawer: payer.pubkey(),
};

let create_from_stake_account_instruction = stake_instruction::create_account_checked(
&payer.pubkey(),
&from_stake_account.pubkey(),
&authorized,
1000000000,
);

let transaction = utils::create_message_and_sign(&create_from_stake_account_instruction, &payer, vec![&payer, &from_stake_account], client.get_latest_blockhash().unwrap());
let _ = client.send_and_confirm_transaction(&transaction).unwrap();
println!("Created From Stake Account {:?} - Slot: {:?}", from_stake_account.pubkey(), client.get_slot_with_commitment(CommitmentConfig::processed()).unwrap());

let to_stake_account = Keypair::new();

let create_to_stake_account_instruction = stake_instruction::create_account_checked(
&payer.pubkey(),
&to_stake_account.pubkey(),
&authorized,
1000000000,
);

let transaction = utils::create_message_and_sign(&create_to_stake_account_instruction, &payer, vec![&payer, &to_stake_account], client.get_latest_blockhash().unwrap());
let _ = client.send_and_confirm_transaction(&transaction).unwrap();
println!("Created To Stake Account {:?} - Slot: {:?}", to_stake_account.pubkey(), client.get_slot_with_commitment(CommitmentConfig::processed()).unwrap());

let move_lamports_instruction = vec![stake_instruction::move_lamports(
&from_stake_account.pubkey(),
&to_stake_account.pubkey(),
&payer.pubkey(),
10000000,
)];
let transaction = utils::create_message_and_sign(&move_lamports_instruction, &payer, vec![&payer], client.get_latest_blockhash().unwrap());
let _ = client.send_and_confirm_transaction(&transaction).unwrap();
println!("Moved Lamport from {:?} to {:?} - Slot: {:?}", from_stake_account.pubkey(), to_stake_account.pubkey(), client.get_slot_with_commitment(CommitmentConfig::processed()).unwrap());
}

pub fn move_stake(client: &RpcClient, payer: &Keypair) {
let from_stake_account = Keypair::new();

let authorized = Authorized {
staker: payer.pubkey(),
withdrawer: payer.pubkey(),
};

let create_from_stake_account_instructions = stake_instruction::create_account_checked(
&payer.pubkey(),
&from_stake_account.pubkey(),
&authorized,
1000000000,
);

let transaction = utils::create_message_and_sign(&create_from_stake_account_instructions, &payer, vec![&payer, &from_stake_account], client.get_latest_blockhash().unwrap());
let _ = client.send_and_confirm_transaction(&transaction).unwrap();
println!("Created From Stake Account {:?} - Slot: {:?}", from_stake_account.pubkey(), client.get_slot_with_commitment(CommitmentConfig::processed()).unwrap());

let voter = read_keypair_file("/data/kbhargava/ledgers/ledger-gen-cluster/keys-0/vote.json").unwrap();

let delegate_stake_account_instruction = vec![stake_instruction::delegate_stake(
&from_stake_account.pubkey(),
&payer.pubkey(),
&voter.pubkey(),
)];
let transaction = utils::create_message_and_sign(&delegate_stake_account_instruction, &payer, vec![&payer], client.get_latest_blockhash().unwrap());
let _ = client.send_and_confirm_transaction(&transaction).unwrap();
println!("Delegated Stake Account {:?} to {:?} - Slot: {:?}", from_stake_account.pubkey(), voter.pubkey(), client.get_slot_with_commitment(CommitmentConfig::processed()).unwrap());

let to_stake_account = Keypair::new();

let create_to_stake_account_instruction = stake_instruction::create_account_checked(
&payer.pubkey(),
&to_stake_account.pubkey(),
&authorized,
1000000000,
);

let transaction = utils::create_message_and_sign(&create_to_stake_account_instruction, &payer, vec![&payer, &to_stake_account], client.get_latest_blockhash().unwrap());
let _ = client.send_and_confirm_transaction(&transaction).unwrap();
println!("Created To Stake Account {:?} - Slot: {:?}", to_stake_account.pubkey(), client.get_slot_with_commitment(CommitmentConfig::processed()).unwrap());

utils::wait_atleast_n_slots(&client, 1000);
let move_lamports_instruction = vec![stake_instruction::move_stake(
&from_stake_account.pubkey(),
&to_stake_account.pubkey(),
&payer.pubkey(),
100000000,
)];

let transaction = utils::create_message_and_sign(&move_lamports_instruction, &payer, vec![&payer], client.get_latest_blockhash().unwrap());
let _ = client.send_and_confirm_transaction(&transaction).unwrap();
println!("Moved Stake from {:?} to {:?} - Slot: {:?}", from_stake_account.pubkey(), to_stake_account.pubkey(), client.get_slot_with_commitment(CommitmentConfig::processed()).unwrap());
}
11 changes: 3 additions & 8 deletions src/app/fdctl/run/tiles/fd_replay.c
Original file line number Diff line number Diff line change
Expand Up @@ -804,23 +804,18 @@ after_frag( void * _ctx,
FD_LOG_INFO(( "NOT publishing mblk to poh - slot: %lu, parent_slot: %lu, flags: %lx", ctx->curr_slot, ctx->parent_slot, ctx->flags ));
}

fd_ghost_slot_print( ctx->ghost, child->slot, 12 );
// fd_ghost_print( ctx->ghost );
fd_tower_print( ctx->tower );
fd_fork_t const * vote_fork = fd_tower_vote_fork_select( ctx->tower,
ctx->forks,
ctx->acc_mgr,
ctx->ghost );

// fd_ghost_print( ctx->ghost );
fd_ghost_slot_print( ctx->ghost, child->slot, 8 );
fd_tower_print( ctx->tower );

FD_LOG_NOTICE( ( "\n\n[Fork Selection]\n"
"# of vote accounts: %lu\n"
"reset fork: %lu\n"
"vote fork: %lu\n"
"best fork: %lu\n",
fd_tower_vote_accs_cnt( ctx->tower->vote_accs ),
!!reset_fork ? reset_fork->slot : 0,
!!vote_fork ? vote_fork->slot : 0,
fd_ghost_head( ctx->ghost )->slot ) );

ulong poh_slot = fd_fseq_query( ctx->poh_slot );
Expand Down
18 changes: 15 additions & 3 deletions src/app/fdctl/run/tiles/fd_store_int.c
Original file line number Diff line number Diff line change
Expand Up @@ -207,17 +207,29 @@ after_frag( void * _ctx,

if( FD_UNLIKELY( in_idx==SHRED_IN_IDX ) ) {
for( ulong i = 0; i < ctx->s34_buffer->shred_cnt; i++ ) {
fd_shred_t * shred = &ctx->s34_buffer->pkts[i].shred;

if( FD_UNLIKELY( (long)(ctx->store->pending_slots->end - shred->slot) > (long)FD_PENDING_MAX ) ) {
FD_LOG_WARNING(("received shred %lu that would overrun pending queue. skipping.", shred->slot));
continue;
}

if( FD_UNLIKELY( (long)(ctx->store->curr_turbine_slot - shred->slot) > 100 ) ) {
FD_LOG_WARNING(("received shred with slot %lu that would overrun pending queue. skipping.", shred->slot));
continue;
}

// TODO: improve return value of api to not use < OK
if( fd_store_shred_insert( ctx->store, &ctx->s34_buffer->pkts[i].shred ) < FD_BLOCKSTORE_OK ) {
if( fd_store_shred_insert( ctx->store, shred ) < FD_BLOCKSTORE_OK ) {
FD_LOG_ERR(( "failed inserting to blockstore" ));
} else if ( ctx->shred_cap_ctx.is_archive ) {
uchar shred_cap_flag = FD_SHRED_CAP_FLAG_MARK_TURBINE(0);
if ( fd_shred_cap_archive(&ctx->shred_cap_ctx, &ctx->s34_buffer->pkts[i].shred, shred_cap_flag) < FD_SHRED_CAP_OK ) {
if ( fd_shred_cap_archive(&ctx->shred_cap_ctx, shred, shred_cap_flag) < FD_SHRED_CAP_OK ) {
FD_LOG_ERR(( "failed at archiving turbine shred to file" ));
}
}

fd_store_shred_update_with_shred_from_turbine( ctx->store, &ctx->s34_buffer->pkts[i].shred );
fd_store_shred_update_with_shred_from_turbine( ctx->store, shred );
}
}

Expand Down
1 change: 0 additions & 1 deletion src/app/fddev/configure/genesis.c
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,6 @@ default_enable_features( fd_features_t * features ) {
features->commission_updates_only_allowed_in_first_half_of_epoch = 0UL;
features->validate_fee_collector_account = 0UL;
features->incremental_snapshot_only_incremental_hash_calculation = 0UL;
features->stake_redelegate_instruction = 0UL;
features->timely_vote_credits = 0UL;
features->apply_cost_tracker_during_replay = 0UL;
features->reject_callx_r10 = 0UL;
Expand Down
Loading

0 comments on commit 03f64c1

Please sign in to comment.