Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

remote-hsmd on CLN v23.11rc2 #98

Draft
wants to merge 14 commits into
base: master
Choose a base branch
from
Draft
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions .github/scripts/setup.sh
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,7 @@ sudo apt-get -qq install --no-install-recommends --allow-unauthenticated -yy \
build-essential \
clang \
cppcheck \
curl \
docbook-xml \
eatmydata \
gcc-aarch64-linux-gnu \
Expand Down
2 changes: 1 addition & 1 deletion Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -68,7 +68,7 @@ COMPAT_CFLAGS=-DCOMPAT_V052=1 -DCOMPAT_V060=1 -DCOMPAT_V061=1 -DCOMPAT_V062=1 -D
endif

# (method=thread to support xdist)
PYTEST_OPTS := -v -p no:logging $(PYTEST_OPTS)
PYTEST_OPTS := -v -p no:logging $(PYTEST_OPTS) $(PYTEST_MOREOPTS)
MY_CHECK_PYTHONPATH=$${PYTHONPATH}$${PYTHONPATH:+:}$(shell pwd)/contrib/pyln-client:$(shell pwd)/contrib/pyln-testing:$(shell pwd)/contrib/pyln-proto/:$(shell pwd)/external/lnprototest:$(shell pwd)/contrib/pyln-spec/bolt1:$(shell pwd)/contrib/pyln-spec/bolt2:$(shell pwd)/contrib/pyln-spec/bolt4:$(shell pwd)/contrib/pyln-spec/bolt7
# Collect generated python files to be excluded from lint checks
PYTHON_GENERATED= \
Expand Down
2 changes: 1 addition & 1 deletion common/utils.h
Original file line number Diff line number Diff line change
Expand Up @@ -136,7 +136,7 @@ extern const tal_t *wally_tal_ctx;

/* Like mkstemp but resolves template relative to $TMPDIR (or /tmp if unset).
* Returns created temporary path name at *created if successful. */
int tmpdir_mkstemp(const tal_t *ctx, const char *template TAKES, char **created);
int tmpdir_mkstemp(const tal_t *ctx, const char *tmplt TAKES, char **created);

/**
* tal_strlowering - return the same string by in lower case.
Expand Down
29 changes: 26 additions & 3 deletions contrib/pyln-testing/pyln/testing/fixtures.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
from concurrent import futures
from pyln.testing.db import SqliteDbProvider, PostgresDbProvider
from pyln.testing.utils import NodeFactory, BitcoinD, ElementsD, env, LightningNode, TEST_DEBUG
from pyln.testing.utils import NodeFactory, BitcoinD, ElementsD, env, LightningNode, TEST_DEBUG, LssD
from pyln.client import Millisatoshi
from typing import Dict

Expand Down Expand Up @@ -31,6 +31,9 @@ def test_base_dir():

yield directory

if bool(int(os.getenv('TEST_KEEPDIR', '0'))):
return

# Now check if any test directory is left because the corresponding test
# failed. If there are no such tests we can clean up the root test
# directory.
Expand Down Expand Up @@ -92,7 +95,7 @@ def directory(request, test_base_dir, test_name):
outcome = 'passed' if rep_call is None else rep_call.outcome
failed = not outcome or request.node.has_errors or outcome != 'passed'

if not failed:
if not failed and not bool(int(os.getenv('TEST_KEEPDIR', '0'))):
try:
shutil.rmtree(directory)
except OSError:
Expand Down Expand Up @@ -164,6 +167,25 @@ def bitcoind(directory, teardown_checks):
bitcoind.proc.wait()


@pytest.fixture
def lssd(directory, teardown_checks):
lssd = LssD(directory)

try:
lssd.start()
except Exception:
lssd.stop()
raise

yield lssd

try:
lssd.stop()
except Exception:
lssd.proc.kill()
lssd.proc.wait()


class TeardownErrors(object):
def __init__(self):
self.errors = []
Expand Down Expand Up @@ -446,11 +468,12 @@ def jsonschemas():


@pytest.fixture
def node_factory(request, directory, test_name, bitcoind, executor, db_provider, teardown_checks, node_cls, jsonschemas):
def node_factory(request, directory, test_name, bitcoind, lssd, executor, db_provider, teardown_checks, node_cls, jsonschemas):
nf = NodeFactory(
request,
test_name,
bitcoind,
lssd,
executor,
directory=directory,
db_provider=db_provider,
Expand Down
190 changes: 182 additions & 8 deletions contrib/pyln-testing/pyln/testing/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -78,6 +78,7 @@ def env(name, default=None):
SLOW_MACHINE = env("SLOW_MACHINE", "0") == "1"
DEPRECATED_APIS = env("DEPRECATED_APIS", "0") == "1"
TIMEOUT = int(env("TIMEOUT", 180 if SLOW_MACHINE else 60))
SUBDAEMON = env("SUBDAEMON", "")
EXPERIMENTAL_DUAL_FUND = env("EXPERIMENTAL_DUAL_FUND", "0") == "1"
EXPERIMENTAL_SPLICING = env("EXPERIMENTAL_SPLICING", "0") == "1"

Expand Down Expand Up @@ -383,6 +384,45 @@ def f(*args):
return f


class LssD(TailableProc):
def __init__(self, directory, rpcport=None):
lss_dir = os.path.join(directory, 'lss')
TailableProc.__init__(self, lss_dir, verbose=False)

if rpcport is None:
self.reserved_rpcport = reserve_unused_port()
rpcport = self.reserved_rpcport
else:
self.reserved_rpcport = None

self.rpcport = rpcport
self.prefix = 'lss'

if not os.path.exists(lss_dir):
os.makedirs(lss_dir)

self.cmd_line = [
'lssd',
'--datadir={}'.format(lss_dir),
'--port={}'.format(rpcport),
]

def __del__(self):
if self.reserved_rpcport is not None:
drop_unused_port(self.reserved_rpcport)

def start(self):
self.env['RUST_LOG'] = 'debug'
TailableProc.start(self)
self.wait_for_log("ready on", timeout=TIMEOUT)

logging.info("LssD started")

def stop(self):
logging.info("Stopping LssD")
return TailableProc.stop(self)


class BitcoinD(TailableProc):

def __init__(self, bitcoin_dir="/tmp/bitcoind-test", rpcport=None):
Expand Down Expand Up @@ -578,11 +618,49 @@ def getnewaddress(self):
return info['unconfidential']


class ValidatingLightningSignerD(TailableProc):
def __init__(self, vlsd_dir, vlsd_port, node_id, network):
TailableProc.__init__(self, vlsd_dir, verbose=True)
self.executable = env("REMOTE_SIGNER_CMD", 'vlsd2')
os.environ['ALLOWLIST'] = env(
'REMOTE_SIGNER_ALLOWLIST',
'contrib/remote_hsmd/TESTING_ALLOWLIST')
self.opts = [
'--network={}'.format(network),
'--datadir={}'.format(vlsd_dir),
'--connect=http://localhost:{}'.format(vlsd_port),
'--integration-test',
]
self.prefix = 'vlsd2-%d' % (node_id)
self.vlsd_port = vlsd_port

@property
def cmd_line(self):
return [self.executable] + self.opts

def start(self, stdin=None, stdout_redir=True, stderr_redir=True,
wait_for_initialized=True):
TailableProc.start(self, stdin, stdout_redir, stderr_redir)
# We need to always wait for initialization
self.wait_for_log("vlsd2 git_desc")
logging.info("vlsd2 started")

def stop(self, timeout=10):
logging.info("stopping vlsd2")
rc = TailableProc.stop(self, timeout)
logging.info("vlsd2 stopped")
self.logs_catchup()
return rc

def __del__(self):
self.logs_catchup()

class LightningD(TailableProc):
def __init__(
self,
lightning_dir,
bitcoindproxy,
lssd_port,
port=9735,
random_hsm=False,
node_id=0,
Expand All @@ -594,9 +672,16 @@ def __init__(
self.lightning_dir = lightning_dir
self.port = port
self.cmd_prefix = []
self.lightning_dir = lightning_dir
self.use_vlsd = False
self.vlsd_dir = os.path.join(lightning_dir, "vlsd")
self.vlsd_port = None
self.vlsd = None
self.node_id = node_id

self.rpcproxy = bitcoindproxy
self.env['CLN_PLUGIN_LOG'] = "cln_plugin=trace,cln_rpc=trace,cln_grpc=trace,debug"
self.lssd_port = lssd_port

self.opts = LIGHTNINGD_CONFIG.copy()
opts = {
Expand All @@ -616,12 +701,38 @@ def __init__(
if grpc_port is not None:
opts['grpc-port'] = grpc_port

if SUBDAEMON:
assert node_id > 0
subdaemons = SUBDAEMON.split(',')
# VLS_SERIAL_SELECT "swaps" the selected item with the first item
select = env("VLS_SERIAL_SELECT", '1')
if node_id == int(select):
ndx = 1
elif node_id == 1:
ndx = int(select)
else:
ndx = node_id
if ndx > len(subdaemons):
# use the last element if not as many specifiers as nodes
opts['subdaemon'] = subdaemons[-1]
else:
# use the matching specifier
opts['subdaemon'] = subdaemons[ndx - 1]

print(f"starting node {node_id} with subdaemon {opts['subdaemon']}")
if SUBDAEMON == 'hsmd:remote_hsmd_socket':
self.use_vlsd = True

for k, v in opts.items():
self.opts[k] = v

if not os.path.exists(os.path.join(lightning_dir, TEST_NETWORK)):
os.makedirs(os.path.join(lightning_dir, TEST_NETWORK))

if self.use_vlsd:
if not os.path.exists(self.vlsd_dir):
os.makedirs(self.vlsd_dir)

# Last 32-bytes of final part of dir -> seed.
seed = (bytes(re.search('([^/]+)/*$', lightning_dir).group(1), encoding='utf-8') + bytes(32))[:32]
if not random_hsm:
Expand All @@ -637,6 +748,10 @@ def __init__(
self.early_opts = ['--developer']

def cleanup(self):
if self.use_vlsd:
# Make sure the remotesigner is shutdown
self.vlsd.stop()

# To force blackhole to exit, disconnect file must be truncated!
if 'dev-disconnect' in self.opts:
with open(self.opts['dev-disconnect'], "w") as f:
Expand All @@ -657,12 +772,65 @@ def cmd_line(self):

return self.cmd_prefix + [self.executable] + self.early_opts + opts

def __del__(self):
if self.vlsd_port is not None:
drop_unused_port(self.vlsd_port)

def start(self, stdin=None, wait_for_initialized=True, stderr_redir=False):
self.opts['bitcoin-rpcport'] = self.rpcproxy.rpcport
TailableProc.start(self, stdin, stdout_redir=False, stderr_redir=stderr_redir)
if wait_for_initialized:
self.wait_for_log("Server started with public key")
logging.info("LightningD started")
try:
self.env['VLS_LSS'] = f"http://localhost:{self.lssd_port}"
self.env['RUST_LOG'] = 'debug'
# Some of the remote hsmd proxies need a bitcoind RPC connection
self.env['BITCOIND_RPC_URL'] = 'http://{}:{}@localhost:{}'.format(
BITCOIND_CONFIG['rpcuser'],
BITCOIND_CONFIG['rpcpassword'],
BITCOIND_CONFIG['rpcport'])

# The remote hsmd proxies need to know which network we are using
if 'network' in self.opts:
self.env['VLS_NETWORK'] = self.opts['network']

self.opts['bitcoin-rpcport'] = self.rpcproxy.rpcport

if self.use_vlsd:
self.vlsd_port = reserve_unused_port()
# We can't do this in the constructor because we need a new port on each restart.
self.env['VLS_PORT'] = str(self.vlsd_port)
# Kill any previous vlsd (we may have been restarted)
if self.vlsd is not None:
logging.info("killing prior vlsd")
self.vlsd.kill()

TailableProc.start(self, stdin, stdout_redir=False, stderr_redir=stderr_redir)

if self.use_vlsd:
# Start the remote signer first
self.vlsd = ValidatingLightningSignerD(
self.vlsd_dir, self.vlsd_port, self.node_id, self.opts['network'])
self.vlsd.start(
stdin, stdout_redir=True, stderr_redir=True,
wait_for_initialized=wait_for_initialized)

if wait_for_initialized:
self.wait_for_log("Server started with public key")
logging.info("LightningD started")
except Exception:
if self.use_vlsd:
# LightningD didn't start, stop the remotesigner
self.vlsd.stop()
raise

def stop(self, timeout=10):
if self.use_vlsd:
# Stop the remote signer first
self.vlsd.stop(timeout)
return TailableProc.stop(self, timeout)

def kill(self):
if self.use_vlsd:
# Kill the remote signer first
self.vlsd.kill()
TailableProc.kill(self)

def wait(self, timeout=TIMEOUT):
"""Wait for the daemon to stop for up to timeout seconds
Expand Down Expand Up @@ -745,7 +913,7 @@ def call(self, method, payload=None, cmdprefix=None, filter=None):


class LightningNode(object):
def __init__(self, node_id, lightning_dir, bitcoind, executor, valgrind, may_fail=False,
def __init__(self, node_id, lightning_dir, bitcoind, lssd, executor, valgrind, may_fail=False,
may_reconnect=False,
allow_broken_log=False,
allow_warning=False,
Expand All @@ -755,6 +923,7 @@ def __init__(self, node_id, lightning_dir, bitcoind, executor, valgrind, may_fai
valgrind_plugins=True,
**kwargs):
self.bitcoin = bitcoind
self.lssd = lssd
self.executor = executor
self.may_fail = may_fail
self.may_reconnect = may_reconnect
Expand All @@ -774,6 +943,7 @@ def __init__(self, node_id, lightning_dir, bitcoind, executor, valgrind, may_fai

self.daemon = LightningD(
lightning_dir, bitcoindproxy=bitcoind.get_proxy(),
lssd_port=lssd.rpcport,
port=port, random_hsm=random_hsm, node_id=node_id,
grpc_port=self.grpc_port,
)
Expand Down Expand Up @@ -1222,6 +1392,9 @@ def pay(self, dst, amt, label=None, route=False):
'channel': scid
}

# let the signer know this payment is coming
self.rpc.preapproveinvoice(bolt11=inv['bolt11'])

# sendpay is async now
self.rpc.sendpay([routestep], rhash, payment_secret=psecret, bolt11=inv['bolt11'])
# wait for sendpay to comply
Expand Down Expand Up @@ -1478,7 +1651,7 @@ def flock(directory: Path):
class NodeFactory(object):
"""A factory to setup and start `lightningd` daemons.
"""
def __init__(self, request, testname, bitcoind, executor, directory,
def __init__(self, request, testname, bitcoind, lssd, executor, directory,
db_provider, node_cls, jsonschemas):
if request.node.get_closest_marker("slow_test") and SLOW_MACHINE:
self.valgrind = False
Expand All @@ -1490,6 +1663,7 @@ def __init__(self, request, testname, bitcoind, executor, directory,
self.reserved_ports = []
self.executor = executor
self.bitcoind = bitcoind
self.lssd = lssd
self.directory = directory
self.lock = threading.Lock()
self.db_provider = db_provider
Expand Down Expand Up @@ -1575,7 +1749,7 @@ def get_node(self, node_id=None, options=None, dbfile=None,
db = self.db_provider.get_db(os.path.join(lightning_dir, TEST_NETWORK), self.testname, node_id)
db.provider = self.db_provider
node = self.node_cls(
node_id, lightning_dir, self.bitcoind, self.executor, self.valgrind, db=db,
node_id, lightning_dir, self.bitcoind, self.lssd, self.executor, self.valgrind, db=db,
port=port, options=options, may_fail=may_fail or expect_fail,
jsonschemas=self.jsonschemas,
**kwargs
Expand Down
Loading
Loading