From 65e5074bcc8dd3ecebe7bf271c1486fc98bd74ed Mon Sep 17 00:00:00 2001 From: Joe Clark Date: Wed, 22 Nov 2023 18:44:15 +0000 Subject: [PATCH 01/24] runtime: noodling --- packages/runtime/src/execute/job.ts | 11 +++++++++++ packages/runtime/test/runtime.test.ts | 6 +++++- 2 files changed, 16 insertions(+), 1 deletion(-) diff --git a/packages/runtime/src/execute/job.ts b/packages/runtime/src/execute/job.ts index 5db9c70ce..4acb1d3cc 100644 --- a/packages/runtime/src/execute/job.ts +++ b/packages/runtime/src/execute/job.ts @@ -159,12 +159,23 @@ const executeJob = async ( } if (!didError) { + const { heapUsed, rss } = process.memoryUsage(); + const used = heapUsed / 1024 / 1024; + const humanUsed = Math.round(used); + const humanRSS = Math.round(rss / 1024 / 1024); + console.log(`Job ${jobId} heap ${humanUsed} MB`); + console.log(`Job ${jobId} rss ${humanRSS} MB`); + next = calculateNext(job, result); notify(NOTIFY_JOB_COMPLETE, { duration: Date.now() - duration, state: result, jobId, next, + mem: { + heapUsedMb: used, + totalMb: rss / 1024 / 1024, + }, }); } } else { diff --git a/packages/runtime/test/runtime.test.ts b/packages/runtime/test/runtime.test.ts index 74894f0ff..8d3047d67 100644 --- a/packages/runtime/test/runtime.test.ts +++ b/packages/runtime/test/runtime.test.ts @@ -13,7 +13,11 @@ import run from '../src/runtime'; // High level examples of runtime usages -test('run simple expression', async (t) => { +// TODO create memory test +// create large arrays or something to inflate memory usage +// https://www.valentinog.com/blog/node-usage/ + +test.only('run simple expression', async (t) => { const expression = 'export default [(s) => {s.data.done = true; return s}]'; const result: any = await run(expression); From e2e8717b74625bdf5d0d2a2073d00efab51b83c2 Mon Sep 17 00:00:00 2001 From: Joe Clark Date: Fri, 24 Nov 2023 09:14:08 +0000 Subject: [PATCH 02/24] runtime: better memory logging --- packages/runtime/src/execute/job.ts | 21 ++++++++++++--------- packages/runtime/test/execute/job.test.ts | 3 ++- 2 files changed, 14 insertions(+), 10 deletions(-) diff --git a/packages/runtime/src/execute/job.ts b/packages/runtime/src/execute/job.ts index 4acb1d3cc..c25b8d8d5 100644 --- a/packages/runtime/src/execute/job.ts +++ b/packages/runtime/src/execute/job.ts @@ -124,8 +124,6 @@ const executeJob = async ( // TODO include the upstream job notify(NOTIFY_JOB_START, { jobId }); result = await executeExpression(ctx, job.expression, state); - const humanDuration = logger.timer(timerId); - logger.success(`Completed job ${jobId} in ${humanDuration}`); } catch (e: any) { didError = true; if (e.hasOwnProperty('error') && e.hasOwnProperty('state')) { @@ -160,11 +158,16 @@ const executeJob = async ( if (!didError) { const { heapUsed, rss } = process.memoryUsage(); - const used = heapUsed / 1024 / 1024; - const humanUsed = Math.round(used); - const humanRSS = Math.round(rss / 1024 / 1024); - console.log(`Job ${jobId} heap ${humanUsed} MB`); - console.log(`Job ${jobId} rss ${humanRSS} MB`); + const humanDuration = logger.timer(timerId); + + const jobMemory = heapUsed; + const systemMemory = rss; + + const humanJobMemory = Math.round(jobMemory / 1024 / 1024); + // TODO is this something we want to always log? + logger.success( + `Completed job ${jobId} in ${humanDuration} (used ${humanJobMemory}mb)` + ); next = calculateNext(job, result); notify(NOTIFY_JOB_COMPLETE, { @@ -173,8 +176,8 @@ const executeJob = async ( jobId, next, mem: { - heapUsedMb: used, - totalMb: rss / 1024 / 1024, + job: jobMemory, + system: systemMemory, }, }); } diff --git a/packages/runtime/test/execute/job.test.ts b/packages/runtime/test/execute/job.test.ts index 972fa7b31..0f9703c6e 100644 --- a/packages/runtime/test/execute/job.test.ts +++ b/packages/runtime/test/execute/job.test.ts @@ -123,12 +123,13 @@ test(`notify ${NOTIFY_JOB_COMPLETE} with no next`, async (t) => { const notify = (event: string, payload: any) => { if (event === NOTIFY_JOB_COMPLETE) { - const { state, duration, jobId, next } = payload; + const { state, duration, jobId, next, mem } = payload; t.truthy(state); t.deepEqual(state, state); t.deepEqual(next, []); t.assert(!isNaN(duration)); t.true(duration < 100); + t.true(mem); t.is(jobId, 'j'); } }; From 04ff7c807c6d303d806faf2b784a35878803ce83 Mon Sep 17 00:00:00 2001 From: Joe Clark Date: Fri, 24 Nov 2023 10:17:19 +0000 Subject: [PATCH 03/24] runtime: couple of basic memory tests --- packages/runtime/src/types.ts | 4 + packages/runtime/test/memory.test.ts | 110 +++++++++++++++++++++++++++ 2 files changed, 114 insertions(+) create mode 100644 packages/runtime/test/memory.test.ts diff --git a/packages/runtime/src/types.ts b/packages/runtime/src/types.ts index e3bbb52b6..296907941 100644 --- a/packages/runtime/src/types.ts +++ b/packages/runtime/src/types.ts @@ -150,6 +150,10 @@ export type NotifyJobCompletePayload = { state: any; jobId: string; next: string[]; + mem: { + job: number; + system: number; + }; }; export type NotifyJobErrorPayload = { diff --git a/packages/runtime/test/memory.test.ts b/packages/runtime/test/memory.test.ts new file mode 100644 index 000000000..c8e73cd7d --- /dev/null +++ b/packages/runtime/test/memory.test.ts @@ -0,0 +1,110 @@ +import test from 'ava'; + +import { + ExecutionPlan, + NOTIFY_JOB_COMPLETE, + NotifyJobCompletePayload, +} from '../src'; +import callRuntime from '../src/runtime'; + +/** + * This file contains various memory tests for runtime usage + * The aim right now sis to understand how accurate process.memoryUsage() + * reports are at the end of the job + * + * Something to bear in mind here is that all the tests run in the same process and share memory + * I fully expect test 1 to have an impact on test 2 + * Part of the testing is to understand how much + * + * Also a consideration for the engine: after a job completes, I don't think we do any cleanup + * of the SourceTextModule and context etc that we created for it. + * + * In the CLI this doesn't mattter because the process ends, and actually in the worker right now + * We burn the thread so it still doesn't matter much/ + */ + +type Mem = { + job: number; // heapUsed in bytes + system: number; // rss in bytes +}; + +// This helper will run a workflow and return +// memory usage per run +const run = async (t, workflow: ExecutionPlan) => { + const useage: Record = {}; + + const notify = (evt: string, payload: NotifyJobCompletePayload) => { + if (evt === NOTIFY_JOB_COMPLETE) { + useage[payload.jobId] = payload.mem; + } + }; + + const state = await callRuntime( + workflow, + {}, + { + strict: false, + callbacks: { notify }, + } + ); + logUsage(t, useage.a); + return { state, useage }; +}; + +const logUsage = (t: any, mem: Mem, label = '') => { + // What kind of rounding should I Do? + // Rounding to an integer is best for humans but I think in these tests we lose a lot of fidelity + // I mean you could lose nearly 500kb of accuracy, that's a lot! + const job = (mem.job / 1024 / 1024).toFixed(2); + const system = (mem.system / 1024 / 1024).toFixed(2); + t.log(`${label} job: ${job}mb / system ${system}mb`); +}; + +// const jobs = { +// fn: () => 'export default [(s) => s]', + +// }; + +const expressions = { + readMemory: (jobName: string) => (s: any) => { + // Hmm, the rounded human number actually looks quite different to theactual reported number + const mem = process.memoryUsage(); + s[jobName] = { job: mem.heapUsed, system: mem.rss }; + return s; + }, +}; + +test('emit memory usage to job-complete', async (t) => { + const plan = { + jobs: [ + { + id: 'a', + // This seems to use ~55mb of heap (job) + expression: [(s) => s], + }, + ], + }; + + const { useage } = await run(t, plan); + t.true(!isNaN(useage.a.job)); + t.true(!isNaN(useage.a.system)); + t.true(useage.a.job < useage.a.system); +}); + +test('report memory usage for a job to state', async (t) => { + const plan = { + jobs: [ + { + id: 'a', + expression: [expressions.readMemory('a')], + }, + ], + }; + + const { state } = await run(t, plan); + logUsage(t, state.a, 'state: '); + + t.true(!isNaN(state.a.job)); + t.true(!isNaN(state.a.system)); + t.true(state.a.job < state.a.system); +}); From e00aeacee740bb7206fe99094df07eaca713d4a6 Mon Sep 17 00:00:00 2001 From: Joe Clark Date: Fri, 24 Nov 2023 13:05:00 +0000 Subject: [PATCH 04/24] runtime: more detailed memory testing --- packages/runtime/src/execute/job.ts | 4 + packages/runtime/test/memory.test.ts | 185 +++++++++++++++++++++++++-- 2 files changed, 178 insertions(+), 11 deletions(-) diff --git a/packages/runtime/src/execute/job.ts b/packages/runtime/src/execute/job.ts index c25b8d8d5..7741269ef 100644 --- a/packages/runtime/src/execute/job.ts +++ b/packages/runtime/src/execute/job.ts @@ -157,6 +157,10 @@ const executeJob = async ( } if (!didError) { + // Take a memory snapshot + // IMPORTANT: this runs after the state object has been serialized + // Which has a big impact on memory + // This is reasonable I think because your final state is part of the job! const { heapUsed, rss } = process.memoryUsage(); const humanDuration = logger.timer(timerId); diff --git a/packages/runtime/test/memory.test.ts b/packages/runtime/test/memory.test.ts index c8e73cd7d..da22cb6c4 100644 --- a/packages/runtime/test/memory.test.ts +++ b/packages/runtime/test/memory.test.ts @@ -23,6 +23,14 @@ import callRuntime from '../src/runtime'; * We burn the thread so it still doesn't matter much/ */ +test.afterEach(() => { + // Force gc to try and better isolate tests + // THis may not work and maybe we need to use threads or something to ensure a pristine environment + // Certainly runs seem to affect each other in the same process (no suprise there) + // @ts-ignore + global.gc(); +}); + type Mem = { job: number; // heapUsed in bytes system: number; // rss in bytes @@ -31,11 +39,12 @@ type Mem = { // This helper will run a workflow and return // memory usage per run const run = async (t, workflow: ExecutionPlan) => { - const useage: Record = {}; + const mem: Record = {}; const notify = (evt: string, payload: NotifyJobCompletePayload) => { if (evt === NOTIFY_JOB_COMPLETE) { - useage[payload.jobId] = payload.mem; + mem[payload.jobId] = payload.mem; + logUsage(t, payload.mem, `final ${payload.jobId}:`); } }; @@ -45,10 +54,14 @@ const run = async (t, workflow: ExecutionPlan) => { { strict: false, callbacks: { notify }, + globals: { + process: { + memoryUsage: () => process.memoryUsage(), + }, + }, } ); - logUsage(t, useage.a); - return { state, useage }; + return { state, mem }; }; const logUsage = (t: any, mem: Mem, label = '') => { @@ -72,9 +85,29 @@ const expressions = { s[jobName] = { job: mem.heapUsed, system: mem.rss }; return s; }, + createArray: (numberofElements: number) => (s: any) => { + s.data = Array(numberofElements).fill('bowser'); + return s; + }, +}; + +// assert that b is within tolerance% of the value of a +const roughlyEqual = (a: number, b: number, tolerance: number) => { + const diff = Math.abs(a - b); + return diff <= a * tolerance; }; -test('emit memory usage to job-complete', async (t) => { +test.serial('roughly equal', (t) => { + t.true(roughlyEqual(10, 10, 0)); // exactly equal, no tolerance + t.false(roughlyEqual(10, 11, 0)); // not equal, no tolerance + t.false(roughlyEqual(10, 9, 0)); // not equal, no tolerance + + t.true(roughlyEqual(10, 11, 0.1)); // roughly equal with 10% tolerance + t.true(roughlyEqual(10, 9, 0.1)); // roughly equal with 10% tolerance + t.false(roughlyEqual(10, 12, 0.1)); // not equal with 10% tolerance +}); + +test.serial('emit memory usage to job-complete', async (t) => { const plan = { jobs: [ { @@ -85,13 +118,13 @@ test('emit memory usage to job-complete', async (t) => { ], }; - const { useage } = await run(t, plan); - t.true(!isNaN(useage.a.job)); - t.true(!isNaN(useage.a.system)); - t.true(useage.a.job < useage.a.system); + const { mem } = await run(t, plan); + t.true(!isNaN(mem.a.job)); + t.true(!isNaN(mem.a.system)); + t.true(mem.a.job < mem.a.system); }); -test('report memory usage for a job to state', async (t) => { +test.serial('report memory usage for a job to state', async (t) => { const plan = { jobs: [ { @@ -102,9 +135,139 @@ test('report memory usage for a job to state', async (t) => { }; const { state } = await run(t, plan); - logUsage(t, state.a, 'state: '); + logUsage(t, state.a, 'state:'); t.true(!isNaN(state.a.job)); t.true(!isNaN(state.a.system)); t.true(state.a.job < state.a.system); }); + +test.serial('report memory usage multiple times', async (t) => { + const plan = { + jobs: [ + { + id: 'a', + expression: [ + expressions.readMemory('a'), // ~56mb + expressions.readMemory('b'), + expressions.readMemory('c'), + expressions.readMemory('d'), + ], + }, + ], + }; + + const { state, mem } = await run(t, plan); + logUsage(t, state.a, 'state a:'); + logUsage(t, state.b, 'state b:'); + logUsage(t, state.c, 'state c:'); + logUsage(t, state.d, 'state d:'); + + // Each job should use basically the same memory interally (within 2%) + t.true(roughlyEqual(state.a.job, state.d.job, 0.002)); + + // The total memory should be about the last job's memory + t.true(roughlyEqual(mem.a.job, state.d.job, 0.002)); +}); + +test.serial('create a large array in a job', async (t) => { + const plan = { + jobs: [ + { + id: 'a', + expression: [ + expressions.readMemory('a1'), // ~56mb + expressions.createArray(10e6), // 10 million ~76mb + expressions.readMemory('a2'), // ~133mb + ], + }, + ], + }; + + const { state, mem } = await run(t, plan); + logUsage(t, state.a1, 'state a1:'); + logUsage(t, state.a2, 'state a2:'); + + // The second read should have more memory + t.true(state.a2.job > state.a1.job); + + // The final job memory is a lot bigger because AFTER the job we serialize state.data + // Which of course has a huge array on it - so memory baloons¬ + t.true(mem.a.job > state.a1.job + state.a2.job); +}); + +// as before but without using closures (implications for gc I think) +// The result is basically the same (worth knowing!) +test.serial('create a large array in a job without closures', async (t) => { + const f1 = `(s) => { + const mem = process.memoryUsage(); + s.a = { job: mem.heapUsed, system: mem.rss }; + return s; + }`; + + const f2 = `(s) => { + s.data = Array(10e6).fill('bowser'); + return s; + }`; + + const f3 = `(s) => { + const mem = process.memoryUsage(); + s.b = { job: mem.heapUsed, system: mem.rss }; + return s; + }`; + + const expression = `export default [${f1}, ${f2}, ${f3}]`; + + const plan = { + jobs: [ + { + id: 'a', + expression, + }, + ], + }; + + const { state, mem } = await run(t, plan); + logUsage(t, state.a, 'state a:'); + logUsage(t, state.b, 'state b:'); + + // The second read should have more memory + t.true(state.b.job > state.a.job); + + // The final job memory is a lot bigger because AFTER the job we serialize state.data + // Which of course has a huge array on it - so memory baloons + t.true(mem.a.job > state.a.job + state.b.job); +}); + +test.serial( + "create a large array in a job but don't write it to state", + async (t) => { + const plan = { + jobs: [ + { + id: 'a', + expression: [ + expressions.readMemory('a1'), // ~56mb + expressions.createArray(10e6), // 10 million ~76mb + (s) => { + delete s.data; + return s; + }, + expressions.readMemory('a2'), // ~133mb + ], + }, + ], + }; + + const { state, mem } = await run(t, plan); + logUsage(t, state.a1, 'state a1:'); + logUsage(t, state.a2, 'state a2:'); + + // The second read should have more memory + t.true(state.a2.job > state.a1.job); + + // In this example, because we didn't return state, + // the final memory is basically the same as the last operation + t.true(roughlyEqual(mem.a.job, state.a2.job, 0.001)); + } +); From c0ba95e718692e206eb61581a68cdbb16b30250e Mon Sep 17 00:00:00 2001 From: Joe Clark Date: Fri, 24 Nov 2023 13:05:31 +0000 Subject: [PATCH 05/24] ava: change config to allow us to run gc in theruntime This should NOT be merged to main --- ava.config.js | 3 +++ 1 file changed, 3 insertions(+) diff --git a/ava.config.js b/ava.config.js index aff2feea3..7ef33db2e 100644 --- a/ava.config.js +++ b/ava.config.js @@ -11,7 +11,10 @@ module.exports = { '--loader=ts-node/esm', '--no-warnings', // Disable experimental module warnings '--experimental-vm-modules', + '--expose-gc', // TODO this should only be in the runtime ], + workerThreads: false, // TMP runtime only - needed to expose gc + files: ['test/**/*test.ts'], }; From 102f0ab1417878ff006e51414eab529e00104d45 Mon Sep 17 00:00:00 2001 From: Joe Clark Date: Fri, 24 Nov 2023 14:28:42 +0000 Subject: [PATCH 06/24] runtime: more memory tests --- packages/runtime/test/memory.test.ts | 71 ++++++++++++++++++++++------ 1 file changed, 57 insertions(+), 14 deletions(-) diff --git a/packages/runtime/test/memory.test.ts b/packages/runtime/test/memory.test.ts index da22cb6c4..ea5fe194a 100644 --- a/packages/runtime/test/memory.test.ts +++ b/packages/runtime/test/memory.test.ts @@ -44,7 +44,7 @@ const run = async (t, workflow: ExecutionPlan) => { const notify = (evt: string, payload: NotifyJobCompletePayload) => { if (evt === NOTIFY_JOB_COMPLETE) { mem[payload.jobId] = payload.mem; - logUsage(t, payload.mem, `final ${payload.jobId}:`); + logUsage(t, payload.mem, `job ${payload.jobId}`); } }; @@ -70,7 +70,7 @@ const logUsage = (t: any, mem: Mem, label = '') => { // I mean you could lose nearly 500kb of accuracy, that's a lot! const job = (mem.job / 1024 / 1024).toFixed(2); const system = (mem.system / 1024 / 1024).toFixed(2); - t.log(`${label} job: ${job}mb / system ${system}mb`); + t.log(`${label}: ${job}mb / system ${system}mb`); }; // const jobs = { @@ -158,10 +158,10 @@ test.serial('report memory usage multiple times', async (t) => { }; const { state, mem } = await run(t, plan); - logUsage(t, state.a, 'state a:'); - logUsage(t, state.b, 'state b:'); - logUsage(t, state.c, 'state c:'); - logUsage(t, state.d, 'state d:'); + logUsage(t, state.a, 'state a'); + logUsage(t, state.b, 'state b'); + logUsage(t, state.c, 'state c'); + logUsage(t, state.d, 'state d'); // Each job should use basically the same memory interally (within 2%) t.true(roughlyEqual(state.a.job, state.d.job, 0.002)); @@ -170,6 +170,7 @@ test.serial('report memory usage multiple times', async (t) => { t.true(roughlyEqual(mem.a.job, state.d.job, 0.002)); }); +// This test shows that creating a large array will increase the memory used by a job test.serial('create a large array in a job', async (t) => { const plan = { jobs: [ @@ -185,8 +186,8 @@ test.serial('create a large array in a job', async (t) => { }; const { state, mem } = await run(t, plan); - logUsage(t, state.a1, 'state a1:'); - logUsage(t, state.a2, 'state a2:'); + logUsage(t, state.a1, 'state a1'); + logUsage(t, state.a2, 'state a2'); // The second read should have more memory t.true(state.a2.job > state.a1.job); @@ -196,8 +197,8 @@ test.serial('create a large array in a job', async (t) => { t.true(mem.a.job > state.a1.job + state.a2.job); }); -// as before but without using closures (implications for gc I think) -// The result is basically the same (worth knowing!) +// This test proves that running a job from a string or by passing in a function +// doesn't really affect memory usage test.serial('create a large array in a job without closures', async (t) => { const f1 = `(s) => { const mem = process.memoryUsage(); @@ -228,8 +229,8 @@ test.serial('create a large array in a job without closures', async (t) => { }; const { state, mem } = await run(t, plan); - logUsage(t, state.a, 'state a:'); - logUsage(t, state.b, 'state b:'); + logUsage(t, state.a, 'state a'); + logUsage(t, state.b, 'state b'); // The second read should have more memory t.true(state.b.job > state.a.job); @@ -239,6 +240,8 @@ test.serial('create a large array in a job without closures', async (t) => { t.true(mem.a.job > state.a.job + state.b.job); }); +// This test proves that writing a lot of data to state dramatically increases final memory +// Note that we can surely optimise the serialisation step, but that's another story test.serial( "create a large array in a job but don't write it to state", async (t) => { @@ -260,8 +263,8 @@ test.serial( }; const { state, mem } = await run(t, plan); - logUsage(t, state.a1, 'state a1:'); - logUsage(t, state.a2, 'state a2:'); + logUsage(t, state.a1, 'state a1'); + logUsage(t, state.a2, 'state a2'); // The second read should have more memory t.true(state.a2.job > state.a1.job); @@ -271,3 +274,43 @@ test.serial( t.true(roughlyEqual(mem.a.job, state.a2.job, 0.001)); } ); + +// This test basically comfirms that final memory is not peak memory +// because GC can dramatically change the reported memory usage (thank goodness!) +test.serial( + 'create a large array in a job, run gc, compare peak and final memory', + async (t) => { + const plan = { + jobs: [ + { + id: 'a', + expression: [ + (s) => { + // clean up first + global.gc(); + return s; + }, + expressions.readMemory('a'), + expressions.createArray(10e6), // 10 million ~76mb + expressions.readMemory('b'), + (s) => { + delete s.data; + global.gc(); + return s; + }, + expressions.readMemory('c'), + ], + }, + ], + }; + + const { state, mem } = await run(t, plan); + logUsage(t, state.b, 'peak'); + + // The first job should use over 100mb + t.true(state.b.job > 100 * 1024 * 1024); + + // the final state should be the intial state + t.true(roughlyEqual(mem.a.job, state.a.job, 0.01)); + } +); From 94a6e11e0e8109264fb00171c5c42ebd334884bb Mon Sep 17 00:00:00 2001 From: Joe Clark Date: Fri, 24 Nov 2023 15:20:14 +0000 Subject: [PATCH 07/24] runtime: remove memory tests from standard set --- ava.config.js | 3 --- packages/runtime/ava.config.cjs | 7 ++++++ packages/runtime/memtest.ava.config.cjs | 24 +++++++++++++++++++ packages/runtime/package.json | 1 + packages/runtime/test/execute/job.test.ts | 2 +- packages/runtime/test/memory.test.ts | 7 ++++++ .../test/repo/get-latest-installed-version.ts | 2 +- 7 files changed, 41 insertions(+), 5 deletions(-) create mode 100644 packages/runtime/ava.config.cjs create mode 100644 packages/runtime/memtest.ava.config.cjs diff --git a/ava.config.js b/ava.config.js index 7ef33db2e..aff2feea3 100644 --- a/ava.config.js +++ b/ava.config.js @@ -11,10 +11,7 @@ module.exports = { '--loader=ts-node/esm', '--no-warnings', // Disable experimental module warnings '--experimental-vm-modules', - '--expose-gc', // TODO this should only be in the runtime ], - workerThreads: false, // TMP runtime only - needed to expose gc - files: ['test/**/*test.ts'], }; diff --git a/packages/runtime/ava.config.cjs b/packages/runtime/ava.config.cjs new file mode 100644 index 000000000..49d21b401 --- /dev/null +++ b/packages/runtime/ava.config.cjs @@ -0,0 +1,7 @@ +const baseConfig = require('../../ava.config'); + +module.exports = { + ...baseConfig, + + files: ['!test/memory.test.ts'], +}; diff --git a/packages/runtime/memtest.ava.config.cjs b/packages/runtime/memtest.ava.config.cjs new file mode 100644 index 000000000..690125de5 --- /dev/null +++ b/packages/runtime/memtest.ava.config.cjs @@ -0,0 +1,24 @@ +// This is special ava config just for the memory test, because: +// a) it should explicitly expose gc +// b) it does not use worker threads +// c) it should not run in ci +module.exports = { + extensions: { + ts: 'module', + }, + + environmentVariables: { + TS_NODE_TRANSPILE_ONLY: 'true', + }, + + nodeArguments: [ + '--loader=ts-node/esm', + '--no-warnings', + '--experimental-vm-modules', + '--expose-gc', + ], + + workerThreads: false, + + files: ['test/memory.test.ts'], +}; diff --git a/packages/runtime/package.json b/packages/runtime/package.json index 057281117..c32d782e5 100644 --- a/packages/runtime/package.json +++ b/packages/runtime/package.json @@ -17,6 +17,7 @@ "test": "pnpm ava", "test:watch": "pnpm ava -w", "test:types": "pnpm tsc --project tsconfig.test.json", + "test:memory": "pnpm ava --config memtest.ava.config.cjs", "build": "tsup --config ../../tsup.config.js src/index.ts", "build:watch": "pnpm build --watch", "pack": "pnpm pack --pack-destination ../../dist" diff --git a/packages/runtime/test/execute/job.test.ts b/packages/runtime/test/execute/job.test.ts index 0f9703c6e..a8b8c235e 100644 --- a/packages/runtime/test/execute/job.test.ts +++ b/packages/runtime/test/execute/job.test.ts @@ -129,7 +129,7 @@ test(`notify ${NOTIFY_JOB_COMPLETE} with no next`, async (t) => { t.deepEqual(next, []); t.assert(!isNaN(duration)); t.true(duration < 100); - t.true(mem); + t.truthy(mem); t.is(jobId, 'j'); } }; diff --git a/packages/runtime/test/memory.test.ts b/packages/runtime/test/memory.test.ts index ea5fe194a..29f9bddf3 100644 --- a/packages/runtime/test/memory.test.ts +++ b/packages/runtime/test/memory.test.ts @@ -1,3 +1,8 @@ +/** + * IGNORED BY AVA + * RUN WITH pnpm test:memory + * */ + import test from 'ava'; import { @@ -314,3 +319,5 @@ test.serial( t.true(roughlyEqual(mem.a.job, state.a.job, 0.01)); } ); + +test.todo('will gc run if we leave a long timeout?'); diff --git a/packages/runtime/test/repo/get-latest-installed-version.ts b/packages/runtime/test/repo/get-latest-installed-version.ts index 3f2f98593..cc064617d 100644 --- a/packages/runtime/test/repo/get-latest-installed-version.ts +++ b/packages/runtime/test/repo/get-latest-installed-version.ts @@ -47,7 +47,7 @@ test('return the higher if order is changed', async (t) => { test('should read package json from disk', async (t) => { const result = await getLatestInstalledVersion( 'ultimate-answer', - path.resolve('test/__repo') + path.resolve('test/__repo__') ); t.assert(result === 'ultimate-answer_2.0.0'); }); From e53e98636e150365b705b9995f29820d3b8b734e Mon Sep 17 00:00:00 2001 From: Joe Clark Date: Fri, 24 Nov 2023 15:47:37 +0000 Subject: [PATCH 08/24] runtime: reformat memory output --- packages/runtime/src/execute/job.ts | 12 +- packages/runtime/test/execute/job.test.ts | 158 +++++++++++++--------- 2 files changed, 99 insertions(+), 71 deletions(-) diff --git a/packages/runtime/src/execute/job.ts b/packages/runtime/src/execute/job.ts index 7741269ef..91beba3ea 100644 --- a/packages/runtime/src/execute/job.ts +++ b/packages/runtime/src/execute/job.ts @@ -157,20 +157,22 @@ const executeJob = async ( } if (!didError) { + const humanDuration = logger.timer(timerId); + logger.success(`Completed job ${jobId} in ${humanDuration}`); + // Take a memory snapshot - // IMPORTANT: this runs after the state object has been serialized + // IMPORTANT: this runs _after_ the state object has been serialized // Which has a big impact on memory // This is reasonable I think because your final state is part of the job! const { heapUsed, rss } = process.memoryUsage(); - const humanDuration = logger.timer(timerId); const jobMemory = heapUsed; const systemMemory = rss; const humanJobMemory = Math.round(jobMemory / 1024 / 1024); - // TODO is this something we want to always log? - logger.success( - `Completed job ${jobId} in ${humanDuration} (used ${humanJobMemory}mb)` + const humanSystemMemory = Math.round(systemMemory / 1024 / 1024); + logger.debug( + `Final memory usage: [job ${humanJobMemory}mb] [system ${humanSystemMemory}mb]` ); next = calculateNext(job, result); diff --git a/packages/runtime/test/execute/job.test.ts b/packages/runtime/test/execute/job.test.ts index a8b8c235e..cecf9e383 100644 --- a/packages/runtime/test/execute/job.test.ts +++ b/packages/runtime/test/execute/job.test.ts @@ -31,7 +31,7 @@ test.afterEach(() => { logger._reset(); }); -test('resolve and return next for a simple job', async (t) => { +test.serial('resolve and return next for a simple job', async (t) => { const job = { id: 'j', expression: [(s: State) => s], @@ -45,7 +45,7 @@ test('resolve and return next for a simple job', async (t) => { t.deepEqual(next, ['k']); }); -test('resolve and return next for a trigger-style job', async (t) => { +test.serial('resolve and return next for a trigger-style job', async (t) => { const job = { id: 'j', next: { k: true, a: false }, @@ -58,7 +58,7 @@ test('resolve and return next for a trigger-style job', async (t) => { t.deepEqual(next, ['k']); }); -test('resolve and return next for a failed job', async (t) => { +test.serial('resolve and return next for a failed job', async (t) => { const job = { id: 'j', expression: [ @@ -77,7 +77,7 @@ test('resolve and return next for a failed job', async (t) => { t.deepEqual(next, ['k']); }); -test(`notify ${NOTIFY_JOB_START}`, async (t) => { +test.serial(`notify ${NOTIFY_JOB_START}`, async (t) => { const job = { id: 'j', expression: [(s: State) => s], @@ -95,25 +95,28 @@ test(`notify ${NOTIFY_JOB_START}`, async (t) => { await execute(context, job, state); }); -test(`don't notify ${NOTIFY_JOB_START} for trigger-style jobs`, async (t) => { - const job = { - id: 'j', - }; - const state = createState(); +test.serial( + `don't notify ${NOTIFY_JOB_START} for trigger-style jobs`, + async (t) => { + const job = { + id: 'j', + }; + const state = createState(); - const notify = (event: string, payload?: any) => { - if (event === NOTIFY_JOB_START) { - t.fail('should not notify job-start for trigger nodes'); - } - }; + const notify = (event: string, payload?: any) => { + if (event === NOTIFY_JOB_START) { + t.fail('should not notify job-start for trigger nodes'); + } + }; - const context = createContext({ notify }); + const context = createContext({ notify }); - await execute(context, job, state); - t.pass('all ok'); -}); + await execute(context, job, state); + t.pass('all ok'); + } +); -test(`notify ${NOTIFY_JOB_COMPLETE} with no next`, async (t) => { +test.serial(`notify ${NOTIFY_JOB_COMPLETE} with no next`, async (t) => { const job = { id: 'j', expression: [(s: State) => s], @@ -139,7 +142,7 @@ test(`notify ${NOTIFY_JOB_COMPLETE} with no next`, async (t) => { await execute(context, job, state); }); -test(`notify ${NOTIFY_JOB_COMPLETE} with two nexts`, async (t) => { +test.serial(`notify ${NOTIFY_JOB_COMPLETE} with two nexts`, async (t) => { const job = { id: 'j', expression: [(s: State) => s], @@ -165,49 +168,55 @@ test(`notify ${NOTIFY_JOB_COMPLETE} with two nexts`, async (t) => { await execute(context, job, state); }); -test(`don't notify ${NOTIFY_JOB_COMPLETE} for trigger-style jobs`, async (t) => { - const job = { - id: 'j', - }; - const state = createState(); - - const notify = (event: string) => { - if (event === NOTIFY_JOB_COMPLETE) { - t.fail('should not notify job-start for trigger nodes'); - } - }; - - const context = createContext({ notify }); - - await execute(context, job, state); - t.pass('all ok'); -}); - -test(`notify ${NOTIFY_JOB_COMPLETE} should publish serializable state`, async (t) => { - // Promises will trigger an exception if you try to serialize them - // If we don't return finalState in execute/expression, this test will fail - const resultState = { x: new Promise((r) => r), y: 22 }; - const job = { - id: 'j', - expression: [() => resultState], - }; - const state = createState(); - - const notify = (event: string, payload: any) => { - if (event === NOTIFY_JOB_COMPLETE) { - const { state, duration, jobId } = payload; - t.truthy(state); - t.assert(!isNaN(duration)); - t.is(jobId, 'j'); - } - }; - - const context = createContext({ notify }); - - await execute(context, job, state); -}); - -test(`notify ${NOTIFY_JOB_ERROR} for a fail`, async (t) => { +test.serial( + `don't notify ${NOTIFY_JOB_COMPLETE} for trigger-style jobs`, + async (t) => { + const job = { + id: 'j', + }; + const state = createState(); + + const notify = (event: string) => { + if (event === NOTIFY_JOB_COMPLETE) { + t.fail('should not notify job-start for trigger nodes'); + } + }; + + const context = createContext({ notify }); + + await execute(context, job, state); + t.pass('all ok'); + } +); + +test.serial( + `notify ${NOTIFY_JOB_COMPLETE} should publish serializable state`, + async (t) => { + // Promises will trigger an exception if you try to serialize them + // If we don't return finalState in execute/expression, this test will fail + const resultState = { x: new Promise((r) => r), y: 22 }; + const job = { + id: 'j', + expression: [() => resultState], + }; + const state = createState(); + + const notify = (event: string, payload: any) => { + if (event === NOTIFY_JOB_COMPLETE) { + const { state, duration, jobId } = payload; + t.truthy(state); + t.assert(!isNaN(duration)); + t.is(jobId, 'j'); + } + }; + + const context = createContext({ notify }); + + await execute(context, job, state); + } +); + +test.serial(`notify ${NOTIFY_JOB_ERROR} for a fail`, async (t) => { const job = { id: 'j', expression: [ @@ -241,9 +250,9 @@ test(`notify ${NOTIFY_JOB_ERROR} for a fail`, async (t) => { await execute(context, job, state); }); -test('log duration of execution', async (t) => { +test.serial('log duration of execution', async (t) => { const job = { - id: 'j', + id: 'y', expression: [(s: State) => s], }; const initialState = createState(); @@ -253,5 +262,22 @@ test('log duration of execution', async (t) => { const duration = logger._find('success', /completed job /i); - t.regex(duration?.message, /completed job j in \d\d?ms/i); + t.regex(duration?.message, /completed job y in \d\d?ms/i); +}); + +test.serial('log memory usage', async (t) => { + const job = { + id: 'z', + expression: [(s: State) => s], + }; + const initialState = createState(); + const context = createContext(); + + await execute(context, job, initialState); + + const memory = logger._find('debug', /final memory usage/i); + + // All we're looking for here is two strings of numbers in mb + // the rest is for the birds + t.regex(memory?.message, /\d+mb(.+)\d+mb/i); }); From 5991622ec262c68e42c318b7188f61de29e6e434 Mon Sep 17 00:00:00 2001 From: Joe Clark Date: Fri, 24 Nov 2023 15:53:48 +0000 Subject: [PATCH 09/24] engine: forward mem on job-complete --- .changeset/happy-pianos-cheat.md | 5 +++++ packages/engine-multi/src/api/lifecycle.ts | 3 ++- packages/engine-multi/src/events.ts | 4 ++++ packages/engine-multi/src/worker/events.ts | 4 ++++ packages/engine-multi/test/api/lifecycle.test.ts | 4 ++++ packages/engine-multi/test/integration.test.ts | 2 ++ 6 files changed, 21 insertions(+), 1 deletion(-) create mode 100644 .changeset/happy-pianos-cheat.md diff --git a/.changeset/happy-pianos-cheat.md b/.changeset/happy-pianos-cheat.md new file mode 100644 index 000000000..52eb68363 --- /dev/null +++ b/.changeset/happy-pianos-cheat.md @@ -0,0 +1,5 @@ +--- +'@openfn/engine-multi': patch +--- + +Include memory usage in job-compelte events diff --git a/packages/engine-multi/src/api/lifecycle.ts b/packages/engine-multi/src/api/lifecycle.ts index de0259f1a..65c7e3f2e 100644 --- a/packages/engine-multi/src/api/lifecycle.ts +++ b/packages/engine-multi/src/api/lifecycle.ts @@ -84,7 +84,7 @@ export const jobComplete = ( context: ExecutionContext, event: internalEvents.JobCompleteEvent ) => { - const { threadId, state, duration, jobId, next } = event; + const { threadId, state, duration, jobId, next, mem } = event; context.emit(externalEvents.JOB_COMPLETE, { threadId, @@ -92,6 +92,7 @@ export const jobComplete = ( duration, jobId, next, + mem, }); }; diff --git a/packages/engine-multi/src/events.ts b/packages/engine-multi/src/events.ts index b7bd6cdac..7294567a7 100644 --- a/packages/engine-multi/src/events.ts +++ b/packages/engine-multi/src/events.ts @@ -72,6 +72,10 @@ export interface JobCompletePayload extends ExternalEvent { duration: number; state: any; // the result state next: string[]; // downstream jobs + mem: { + job: number; + system: number; + }; } export interface JobErrorPayload extends ExternalEvent { diff --git a/packages/engine-multi/src/worker/events.ts b/packages/engine-multi/src/worker/events.ts index 6ed599069..1bab7c1ed 100644 --- a/packages/engine-multi/src/worker/events.ts +++ b/packages/engine-multi/src/worker/events.ts @@ -45,6 +45,10 @@ export interface JobCompleteEvent extends InternalEvent { state: any; duration: number; next: string[]; + mem: { + job: number; + system: number; + }; } export interface JobErrorEvent extends InternalEvent { diff --git a/packages/engine-multi/test/api/lifecycle.test.ts b/packages/engine-multi/test/api/lifecycle.test.ts index 853523668..624f72388 100644 --- a/packages/engine-multi/test/api/lifecycle.test.ts +++ b/packages/engine-multi/test/api/lifecycle.test.ts @@ -142,6 +142,8 @@ test(`job-complete: emits ${e.JOB_COMPLETE}`, (t) => { jobId: 'j', duration: 200, state: 22, + next: [], + mem: { job: 100, system: 1000 }, }; context.on(e.JOB_COMPLETE, (evt) => { @@ -150,6 +152,8 @@ test(`job-complete: emits ${e.JOB_COMPLETE}`, (t) => { t.is(evt.jobId, 'j'); t.is(evt.state, 22); t.is(evt.duration, 200); + t.deepEqual(evt.next, []); + t.deepEqual(evt.mem, event.mem); done(); }); diff --git a/packages/engine-multi/test/integration.test.ts b/packages/engine-multi/test/integration.test.ts index f55d9c9c3..abe17b7a4 100644 --- a/packages/engine-multi/test/integration.test.ts +++ b/packages/engine-multi/test/integration.test.ts @@ -90,6 +90,8 @@ test.serial('trigger job-complete', (t) => { t.is(evt.jobId, 'j1'); t.deepEqual(evt.state, { data: {} }); t.pass('job completed'); + t.truthy(evt.mem.job); + t.truthy(evt.mem.system); done(); }); }); From 340b96e55e9474c4b715045622030f354236dde3 Mon Sep 17 00:00:00 2001 From: Joe Clark Date: Fri, 24 Nov 2023 16:03:58 +0000 Subject: [PATCH 10/24] worker: send memory usage to run:complete --- .changeset/ninety-fireants-melt.md | 5 +++++ packages/engine-multi/src/worker/mock-worker.ts | 1 + packages/ws-worker/src/api/execute.ts | 5 +++-- packages/ws-worker/test/api/execute.test.ts | 3 ++- packages/ws-worker/test/lightning.test.ts | 3 +++ 5 files changed, 14 insertions(+), 3 deletions(-) create mode 100644 .changeset/ninety-fireants-melt.md diff --git a/.changeset/ninety-fireants-melt.md b/.changeset/ninety-fireants-melt.md new file mode 100644 index 000000000..f609c6c2a --- /dev/null +++ b/.changeset/ninety-fireants-melt.md @@ -0,0 +1,5 @@ +--- +'@openfn/ws-worker': patch +--- + +Send memory usage to lightning on run:complete diff --git a/packages/engine-multi/src/worker/mock-worker.ts b/packages/engine-multi/src/worker/mock-worker.ts index c55c88f72..978ffa8c1 100644 --- a/packages/engine-multi/src/worker/mock-worker.ts +++ b/packages/engine-multi/src/worker/mock-worker.ts @@ -65,6 +65,7 @@ function mock(plan: MockExecutionPlan) { duration: 100, state, next: [], + mem: { job: 100, system: 1000 }, }); resolve(state); }, job._delay || 1); diff --git a/packages/ws-worker/src/api/execute.ts b/packages/ws-worker/src/api/execute.ts index f072d248c..f9b2e7d7d 100644 --- a/packages/ws-worker/src/api/execute.ts +++ b/packages/ws-worker/src/api/execute.ts @@ -216,6 +216,8 @@ export function onJobComplete( } state.dataclips[dataclipId] = event.state; + delete state.activeRun; + delete state.activeJob; // TODO right now, the last job to run will be the result for the attempt // this may not stand up in the future // I'd feel happer if the runtime could judge what the final result is @@ -229,8 +231,6 @@ export function onJobComplete( state.inputDataclips[nextJobId] = dataclipId; }); - delete state.activeRun; - delete state.activeJob; const { reason, error_message, error_type } = calculateJobExitReason( job_id, event.state, @@ -247,6 +247,7 @@ export function onJobComplete( reason, error_message, error_type, + mem: event.mem, }; return sendEvent(channel, RUN_COMPLETE, evt); } diff --git a/packages/ws-worker/test/api/execute.test.ts b/packages/ws-worker/test/api/execute.test.ts index 2b7c58e46..0498b0669 100644 --- a/packages/ws-worker/test/api/execute.test.ts +++ b/packages/ws-worker/test/api/execute.test.ts @@ -235,10 +235,11 @@ test('jobComplete should send a run:complete event', async (t) => { t.truthy(evt.run_id); t.truthy(evt.output_dataclip_id); t.is(evt.output_dataclip, JSON.stringify(result)); + t.deepEqual(evt.mem, event.mem); }, }); - const event = { state: result, next: ['a'] }; + const event = { state: result, next: ['a'], mem: { job: 1, system: 10 } }; await onJobComplete({ channel, state }, event); }); diff --git a/packages/ws-worker/test/lightning.test.ts b/packages/ws-worker/test/lightning.test.ts index 207b5e384..36340c733 100644 --- a/packages/ws-worker/test/lightning.test.ts +++ b/packages/ws-worker/test/lightning.test.ts @@ -275,6 +275,9 @@ test.serial( t.truthy(payload.run_id); t.truthy(payload.output_dataclip); t.truthy(payload.output_dataclip_id); + t.truthy(payload.mem.job); + t.truthy(payload.mem.system); + t.true(payload.mem.system > payload.mem.job); t.pass('called run complete'); }); From 23ec88ea03b93060c63e6be7d0724e1472996c09 Mon Sep 17 00:00:00 2001 From: Joe Clark Date: Fri, 24 Nov 2023 16:21:47 +0000 Subject: [PATCH 11/24] tests: include memory log for attempts tests --- .../worker/test/attempts.test.ts | 52 ++++++++++++++++--- .../worker/test/integration.test.ts | 33 ------------ 2 files changed, 44 insertions(+), 41 deletions(-) diff --git a/integration-tests/worker/test/attempts.test.ts b/integration-tests/worker/test/attempts.test.ts index 078ca4d49..868180711 100644 --- a/integration-tests/worker/test/attempts.test.ts +++ b/integration-tests/worker/test/attempts.test.ts @@ -22,14 +22,30 @@ test.before(async () => { })); }); +test.afterEach(async () => { + lightning.destroy(); +}); + test.after(async () => { lightning.destroy(); await worker.destroy(); }); -const run = async (attempt) => { +const humanMb = (sizeInBytes: number) => + `${Math.round(sizeInBytes / 1024 / 1024)}mb`; + +const run = async (t, attempt) => { return new Promise(async (done, reject) => { - lightning.once('attempt:complete', (evt) => { + lightning.on('run:complete', ({ payload }) => { + // TODO I'd actually love to include the thread id here, but we don't send it :( Maybe soon? + // TODO ditto duration!! omg how am I not sending that? + t.log( + `${payload.job_id}: ${humanMb(payload.mem.job)} (${humanMb( + payload.mem.system + )})` + ); + }); + lightning.on('attempt:complete', (evt) => { if (attempt.id === evt.attemptId) { done(lightning.getResult(attempt.id)); } else { @@ -42,7 +58,7 @@ const run = async (attempt) => { }); }; -test('echo initial state', async (t) => { +test.serial('echo initial state', async (t) => { const initialState = { data: { count: 22 } }; lightning.addDataclip('s1', initialState); @@ -52,7 +68,7 @@ test('echo initial state', async (t) => { dataclip_id: 's1', }); - const result = await run(attempt); + const result = await run(t, attempt); t.deepEqual(result, { data: { @@ -61,7 +77,7 @@ test('echo initial state', async (t) => { }); }); -test('start from a trigger node', async (t) => { +test.serial('start from a trigger node', async (t) => { let runStartEvent; let runCompleteEvent; @@ -84,7 +100,7 @@ test('start from a trigger node', async (t) => { runCompleteEvent = evt.payload; }); - await run(attempt); + await run(t, attempt); t.truthy(runStartEvent); t.is(runStartEvent.job_id, job.id); @@ -103,7 +119,7 @@ test('start from a trigger node', async (t) => { // hmm this event feels a bit fine-grained for this // This file should just be about input-output // TODO maybe move it into integrations later -test('run parallel jobs', async (t) => { +test.serial('run parallel jobs', async (t) => { const initialState = { data: { count: 22 } }; lightning.addDataclip('s1', initialState); @@ -144,7 +160,7 @@ test('run parallel jobs', async (t) => { outputJson[evt.payload.job_id] = JSON.parse(evt.payload.output_dataclip); }); - const result = await run(attempt); + await run(t, attempt); t.deepEqual(outputJson[x.id].data, { a: true, @@ -169,3 +185,23 @@ test('run parallel jobs', async (t) => { // }, // }); }); + +test('run a http adaptor job', async (t) => { + const job = createJob({ + adaptor: '@openfn/language-http@5.0.4', + body: 'get("https://jsonplaceholder.typicode.com/todos/1");', + }); + const attempt = createAttempt([], [job], []); + const result = await run(t, attempt); + + t.truthy(result.response); + t.is(result.response.status, 200); + t.truthy(result.response.headers); + + t.deepEqual(result.data, { + userId: 1, + id: 1, + title: 'delectus aut autem', + completed: false, + }); +}); diff --git a/integration-tests/worker/test/integration.test.ts b/integration-tests/worker/test/integration.test.ts index 652c641fb..38b2d0c08 100644 --- a/integration-tests/worker/test/integration.test.ts +++ b/integration-tests/worker/test/integration.test.ts @@ -236,39 +236,6 @@ test('run a job with initial state (no top level keys)', (t) => { }); }); -test('run a http adaptor job', (t) => { - return new Promise(async (done) => { - const attempt = { - id: crypto.randomUUID(), - jobs: [ - { - adaptor: '@openfn/language-http@5.0.4', - body: 'get("https://jsonplaceholder.typicode.com/todos/1");', - }, - ], - }; - - lightning.once('attempt:complete', () => { - const result = lightning.getResult(attempt.id); - - t.truthy(result.response); - t.is(result.response.status, 200); - t.truthy(result.response.headers); - - t.deepEqual(result.data, { - userId: 1, - id: 1, - title: 'delectus aut autem', - completed: false, - }); - - done(); - }); - - lightning.enqueueAttempt(attempt); - }); -}); - // TODO this sort of works but the server side of it does not // Will work on it more // TODO2: the runtime doesn't return config anymore (correctly!) From 04ac3ccc177f9bdc032c0c5d68aaa633165dd992 Mon Sep 17 00:00:00 2001 From: Joe Clark Date: Fri, 24 Nov 2023 16:35:17 +0000 Subject: [PATCH 12/24] worker: include duration and threadId in run:complete --- .changeset/fifty-students-pay.md | 5 +++++ packages/ws-worker/src/api/execute.ts | 3 +++ packages/ws-worker/src/mock/runtime-engine.ts | 11 +++++++++-- packages/ws-worker/test/api/execute.test.ts | 12 ++++++++++-- packages/ws-worker/test/lightning.test.ts | 3 ++- 5 files changed, 29 insertions(+), 5 deletions(-) create mode 100644 .changeset/fifty-students-pay.md diff --git a/.changeset/fifty-students-pay.md b/.changeset/fifty-students-pay.md new file mode 100644 index 000000000..888c1c4a8 --- /dev/null +++ b/.changeset/fifty-students-pay.md @@ -0,0 +1,5 @@ +--- +'@openfn/ws-worker': patch +--- + +Include duration and threadid in run-complete diff --git a/packages/ws-worker/src/api/execute.ts b/packages/ws-worker/src/api/execute.ts index f9b2e7d7d..0df599976 100644 --- a/packages/ws-worker/src/api/execute.ts +++ b/packages/ws-worker/src/api/execute.ts @@ -247,7 +247,10 @@ export function onJobComplete( reason, error_message, error_type, + mem: event.mem, + duration: event.duration, + thread_id: event.threadId, }; return sendEvent(channel, RUN_COMPLETE, evt); } diff --git a/packages/ws-worker/src/mock/runtime-engine.ts b/packages/ws-worker/src/mock/runtime-engine.ts index 56246d6b9..97a56de76 100644 --- a/packages/ws-worker/src/mock/runtime-engine.ts +++ b/packages/ws-worker/src/mock/runtime-engine.ts @@ -1,6 +1,8 @@ import { EventEmitter } from 'node:events'; +import crypto from 'node:crypto'; import run, { ExecutionPlan } from '@openfn/runtime'; import * as engine from '@openfn/engine-multi'; + import mockResolvers from './resolvers'; export type EngineEvent = @@ -74,6 +76,8 @@ async function createMock() { const { id, jobs } = xplan; activeWorkflows[id!] = true; + const threadId = crypto.randomUUID(); + for (const job of jobs) { if (typeof job.configuration === 'string') { // Call the crendtial callback, but don't do anything with it @@ -96,6 +100,7 @@ async function createMock() { log: (...args: any[]) => { dispatch('workflow-log', { workflowId: id, + threadId: threadId, level: 'info', json: true, message: args, @@ -113,18 +118,20 @@ async function createMock() { notify: (name: any, payload: any) => { dispatch(name, { workflowId: id, + threadId: threadId, ...payload, }); }, }, }; setTimeout(async () => { - dispatch('workflow-start', { workflowId: id }); + dispatch('workflow-start', { workflowId: id, threadId: threadId }); try { await run(xplan, undefined, opts as any); } catch (e: any) { dispatch('workflow-error', { + threadId: threadId, workflowId: id, type: e.name, message: e.message, @@ -132,7 +139,7 @@ async function createMock() { } delete activeWorkflows[id!]; - dispatch('workflow-complete', { workflowId: id }); + dispatch('workflow-complete', { workflowId: id, threadId: threadId }); }, 1); // Technically the engine should return an event emitter diff --git a/packages/ws-worker/test/api/execute.test.ts b/packages/ws-worker/test/api/execute.test.ts index 0498b0669..2b73a61a4 100644 --- a/packages/ws-worker/test/api/execute.test.ts +++ b/packages/ws-worker/test/api/execute.test.ts @@ -220,7 +220,7 @@ test('jobComplete should generate an exit reason: success', async (t) => { t.is(event.error_message, null); }); -test('jobComplete should send a run:complete event', async (t) => { +test.only('jobComplete should send a run:complete event', async (t) => { const plan = { id: 'attempt-1' }; const jobId = 'job-1'; const result = { x: 10 }; @@ -236,10 +236,18 @@ test('jobComplete should send a run:complete event', async (t) => { t.truthy(evt.output_dataclip_id); t.is(evt.output_dataclip, JSON.stringify(result)); t.deepEqual(evt.mem, event.mem); + t.is(evt.duration, event.duration); + t.is(evt.thread_id, event.threadId); }, }); - const event = { state: result, next: ['a'], mem: { job: 1, system: 10 } }; + const event = { + state: result, + next: ['a'], + mem: { job: 1, system: 10 }, + duration: 61, + threadId: 'abc', + }; await onJobComplete({ channel, state }, event); }); diff --git a/packages/ws-worker/test/lightning.test.ts b/packages/ws-worker/test/lightning.test.ts index 36340c733..a16f66d33 100644 --- a/packages/ws-worker/test/lightning.test.ts +++ b/packages/ws-worker/test/lightning.test.ts @@ -264,13 +264,14 @@ test.serial(`events: lightning should receive a ${e.RUN_START} event`, (t) => { }); }); -test.serial( +test.serial.only( `events: lightning should receive a ${e.RUN_COMPLETE} event`, (t) => { return new Promise((done) => { const attempt = getAttempt(); lng.onSocketEvent(e.RUN_COMPLETE, attempt.id, ({ payload }) => { + console.log(payload); t.is(payload.job_id, 'j'); t.truthy(payload.run_id); t.truthy(payload.output_dataclip); From 57a90f6abd56aecbe5a1bf9d67ae335ad3523356 Mon Sep 17 00:00:00 2001 From: Joe Clark Date: Fri, 24 Nov 2023 16:42:17 +0000 Subject: [PATCH 13/24] tests: More diagnostics in attempts tests --- integration-tests/worker/test/attempts.test.ts | 12 +++++------- 1 file changed, 5 insertions(+), 7 deletions(-) diff --git a/integration-tests/worker/test/attempts.test.ts b/integration-tests/worker/test/attempts.test.ts index 868180711..434ea90b7 100644 --- a/integration-tests/worker/test/attempts.test.ts +++ b/integration-tests/worker/test/attempts.test.ts @@ -31,18 +31,16 @@ test.after(async () => { await worker.destroy(); }); -const humanMb = (sizeInBytes: number) => - `${Math.round(sizeInBytes / 1024 / 1024)}mb`; +const humanMb = (sizeInBytes: number) => Math.round(sizeInBytes / 1024 / 1024); const run = async (t, attempt) => { return new Promise(async (done, reject) => { lightning.on('run:complete', ({ payload }) => { - // TODO I'd actually love to include the thread id here, but we don't send it :( Maybe soon? - // TODO ditto duration!! omg how am I not sending that? + // TODO friendlier job names for this would be nice (rather than run ids) t.log( - `${payload.job_id}: ${humanMb(payload.mem.job)} (${humanMb( - payload.mem.system - )})` + `run ${payload.run_id} done in ${payload.duration / 1000}s [${humanMb( + payload.mem.job + )} / ${humanMb(payload.mem.system)}mb] [thread ${payload.thread_id}]` ); }); lightning.on('attempt:complete', (evt) => { From 3c657025ccf1255d6b0ed98e6a49301ed196b592 Mon Sep 17 00:00:00 2001 From: Joe Clark Date: Fri, 24 Nov 2023 16:54:33 +0000 Subject: [PATCH 14/24] worker: tests and typings --- packages/ws-worker/src/mock/runtime-engine.ts | 3 +++ packages/ws-worker/test/mock/runtime-engine.test.ts | 3 ++- 2 files changed, 5 insertions(+), 1 deletion(-) diff --git a/packages/ws-worker/src/mock/runtime-engine.ts b/packages/ws-worker/src/mock/runtime-engine.ts index 97a56de76..ebb87a5c3 100644 --- a/packages/ws-worker/src/mock/runtime-engine.ts +++ b/packages/ws-worker/src/mock/runtime-engine.ts @@ -15,15 +15,18 @@ export type EngineEvent = export type WorkflowStartEvent = { workflowId: string; + threadId: string; }; export type WorkflowCompleteEvent = { workflowId: string; error?: any; // hmm maybe not + threadId: string; }; export type WorkflowErrorEvent = { workflowId: string; + threadId: string; message: string; }; diff --git a/packages/ws-worker/test/mock/runtime-engine.test.ts b/packages/ws-worker/test/mock/runtime-engine.test.ts index 14dc2b843..13c3c3055 100644 --- a/packages/ws-worker/test/mock/runtime-engine.test.ts +++ b/packages/ws-worker/test/mock/runtime-engine.test.ts @@ -53,7 +53,8 @@ test('Dispatch complete events when a workflow completes', async (t) => { 'workflow-complete' ); - t.deepEqual(evt, { workflowId: 'w1' }); + t.is(evt.workflowId, 'w1'); + t.truthy(evt.threadId); }); test('Dispatch start events for a job', async (t) => { From 8263220a9bcd11c9ed9a4d4ec775fe029b951154 Mon Sep 17 00:00:00 2001 From: Joe Clark Date: Fri, 24 Nov 2023 17:01:36 +0000 Subject: [PATCH 15/24] changeset typo --- .changeset/happy-pianos-cheat.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.changeset/happy-pianos-cheat.md b/.changeset/happy-pianos-cheat.md index 52eb68363..f860e7c4d 100644 --- a/.changeset/happy-pianos-cheat.md +++ b/.changeset/happy-pianos-cheat.md @@ -2,4 +2,4 @@ '@openfn/engine-multi': patch --- -Include memory usage in job-compelte events +Include memory usage in job-complete events From b0286f3ae454d5da89239d670804e7a38cb938c1 Mon Sep 17 00:00:00 2001 From: Joe Clark Date: Mon, 27 Nov 2023 10:12:08 +0000 Subject: [PATCH 16/24] runtime: comments and new test --- packages/runtime/test/memory.test.ts | 61 +++++++++++++++++++++------- 1 file changed, 46 insertions(+), 15 deletions(-) diff --git a/packages/runtime/test/memory.test.ts b/packages/runtime/test/memory.test.ts index 29f9bddf3..972482dbd 100644 --- a/packages/runtime/test/memory.test.ts +++ b/packages/runtime/test/memory.test.ts @@ -30,7 +30,7 @@ import callRuntime from '../src/runtime'; test.afterEach(() => { // Force gc to try and better isolate tests - // THis may not work and maybe we need to use threads or something to ensure a pristine environment + // This may not work and maybe we need to use threads or something to ensure a pristine environment // Certainly runs seem to affect each other in the same process (no suprise there) // @ts-ignore global.gc(); @@ -41,8 +41,7 @@ type Mem = { system: number; // rss in bytes }; -// This helper will run a workflow and return -// memory usage per run +// This helper will run a workflow and return memory usage per run const run = async (t, workflow: ExecutionPlan) => { const mem: Record = {}; @@ -70,22 +69,14 @@ const run = async (t, workflow: ExecutionPlan) => { }; const logUsage = (t: any, mem: Mem, label = '') => { - // What kind of rounding should I Do? - // Rounding to an integer is best for humans but I think in these tests we lose a lot of fidelity - // I mean you could lose nearly 500kb of accuracy, that's a lot! const job = (mem.job / 1024 / 1024).toFixed(2); const system = (mem.system / 1024 / 1024).toFixed(2); t.log(`${label}: ${job}mb / system ${system}mb`); }; -// const jobs = { -// fn: () => 'export default [(s) => s]', - -// }; - const expressions = { readMemory: (jobName: string) => (s: any) => { - // Hmm, the rounded human number actually looks quite different to theactual reported number + // Hmm, the rounded human number actually looks quite different to the actual reported number const mem = process.memoryUsage(); s[jobName] = { job: mem.heapUsed, system: mem.rss }; return s; @@ -94,6 +85,12 @@ const expressions = { s.data = Array(numberofElements).fill('bowser'); return s; }, + wait: + (duration = 100) => + (s: any) => + new Promise((resolve) => { + setTimeout(() => resolve(s), duration); + }), }; // assert that b is within tolerance% of the value of a @@ -118,7 +115,7 @@ test.serial('emit memory usage to job-complete', async (t) => { { id: 'a', // This seems to use ~55mb of heap (job) - expression: [(s) => s], + expression: [(s: any) => s], }, ], }; @@ -198,7 +195,7 @@ test.serial('create a large array in a job', async (t) => { t.true(state.a2.job > state.a1.job); // The final job memory is a lot bigger because AFTER the job we serialize state.data - // Which of course has a huge array on it - so memory baloons¬ + // Which of course has a huge array on it - so memory baloons t.true(mem.a.job > state.a1.job + state.a2.job); }); @@ -320,4 +317,38 @@ test.serial( } ); -test.todo('will gc run if we leave a long timeout?'); +// This one is pretty inconsisent - about a 50% pass rate +// Does that imply that sometimes GC is running during the timeout? +test.serial.skip( + 'create a large array in a job, wait on timeout, read final memory', + async (t) => { + const plan = { + jobs: [ + { + id: 'a', + expression: [ + expressions.createArray(10e6), // 10 million ~76mb + expressions.readMemory('a'), + + // will garbage collection run? + // I dont think so because it's based on # of allocations + expressions.wait(500), + + expressions.readMemory('b'), + ], + }, + ], + }; + + const { state } = await run(t, plan); + logUsage(t, state.a, 'peak'); + t.log(state.a.job); + t.log(state.b.job); + + // The first job should use over 100mb + t.true(state.a.job > 100 * 1024 * 1024); + + // The two memory snapshots should be about the same + t.true(roughlyEqual(state.a.job, state.b.job, 0.02)); + } +); From 090a45dec6843b1494843a87ef5ec1214257bab6 Mon Sep 17 00:00:00 2001 From: Joe Clark Date: Mon, 27 Nov 2023 15:30:21 +0000 Subject: [PATCH 17/24] engine: test on memory limits --- .../engine-multi/src/test/worker-functions.js | 21 +++++++++ .../test/worker/worker-pool.test.ts | 43 ++++++++++++++++--- 2 files changed, 59 insertions(+), 5 deletions(-) diff --git a/packages/engine-multi/src/test/worker-functions.js b/packages/engine-multi/src/test/worker-functions.js index 0a10c4fe9..ff09c7b79 100644 --- a/packages/engine-multi/src/test/worker-functions.js +++ b/packages/engine-multi/src/test/worker-functions.js @@ -1,6 +1,7 @@ import path from 'node:path'; import workerpool from 'workerpool'; import { threadId } from 'node:worker_threads'; +import v8 from 'v8'; import { increment } from './counter.js'; @@ -92,4 +93,24 @@ workerpool.worker({ const { increment } = await import(path.resolve('src/test/counter.js')); return increment(); }, + + // Creating a big enough array with `Array(1e9).fill('mario')` + // is enoguh to OOM the _process_, taking the whole engine out + // This function should blow the thread's memory without + // killing the parent process + blowMemory: () => { + let data = []; + while (true) { + data.push(Array(1e6).fill('mario')); + } + }, + + // Some useful code + // const stats = v8.getHeapStatistics(); + + // console.log( + // `node heap limit = ${ + // stats.heap_size_limit / 1024 / 1024 + // } Mb\n heap used = ${hprocess.memoryUsage().heapUsed / 1024 / 1024}mb` + // ); }); diff --git a/packages/engine-multi/test/worker/worker-pool.test.ts b/packages/engine-multi/test/worker/worker-pool.test.ts index b2e59b057..5ec782c25 100644 --- a/packages/engine-multi/test/worker/worker-pool.test.ts +++ b/packages/engine-multi/test/worker/worker-pool.test.ts @@ -1,5 +1,6 @@ import path from 'node:path'; import test from 'ava'; +import v8 from 'v8'; import workerpool from 'workerpool'; const workerPath = path.resolve('src/test/worker-functions.js'); @@ -206,7 +207,6 @@ test.serial('dynamic imports should share state across runs', async (t) => { t.is(count3, 3); }); - // This is kinda done in the tests above, it's just to setup the next test test.serial('module scope is shared within a thread', async (t) => { pool = createDedicatedPool({ maxWorkers: 1 }); @@ -215,9 +215,9 @@ test.serial('module scope is shared within a thread', async (t) => { pool.exec('incrementDynamic', []), pool.exec('incrementDynamic', []), pool.exec('incrementDynamic', []), - ]) + ]); - t.deepEqual(result, [1, 2, 3]) + t.deepEqual(result, [1, 2, 3]); }); test.serial('module scope is isolated across threads', async (t) => { @@ -227,7 +227,40 @@ test.serial('module scope is isolated across threads', async (t) => { pool.exec('incrementDynamic', []), pool.exec('incrementDynamic', []), pool.exec('incrementDynamic', []), - ]) + ]); + + t.deepEqual(result, [1, 1, 1]); +}); + +test.serial('worker should die if it blows the memory limit', async (t) => { + pool = createDedicatedPool({ + workerThreadOpts: { + // Note for the record that these limits do NOT include arraybuffers + resourceLimits: { + // These are values I can set - t.deepEqual(result, [1,1,1]) + // And I think this is the one I care about: + // The maximum size of the main heap in MB. + // Note that this needs to be at least like 200mb to not blow up in test + maxOldGenerationSizeMb: 100, + + // // The maximum size of a heap space for recently created objects. + // maxYoungGenerationSizeMb: 10, + + // // The size of a pre-allocated memory range used for generated code. + // codeRangeSizeMb: 20, + + // The default maximum stack size for the thread. Small values may lead to unusable Worker instances. Default: 4 + // stackSizeMb: 4, + }, + }, + }); + + await t.throwsAsync(() => pool.exec('blowMemory', []), { + code: 'ERR_WORKER_OUT_OF_MEMORY', + message: + 'Worker terminated due to reaching memory limit: JS heap out of memory', + }); }); + +test.todo('threads should all have the same rss memory'); From 56035cb311f32f2c9bb4c41e216b7b83c18ea03d Mon Sep 17 00:00:00 2001 From: Joe Clark Date: Mon, 27 Nov 2023 15:39:06 +0000 Subject: [PATCH 18/24] engine: clean up a bit --- .../engine-multi/src/test/worker-functions.js | 6 ++++-- .../engine-multi/test/worker/worker-pool.test.ts | 15 ++------------- 2 files changed, 6 insertions(+), 15 deletions(-) diff --git a/packages/engine-multi/src/test/worker-functions.js b/packages/engine-multi/src/test/worker-functions.js index ff09c7b79..ae7e6cf78 100644 --- a/packages/engine-multi/src/test/worker-functions.js +++ b/packages/engine-multi/src/test/worker-functions.js @@ -95,7 +95,7 @@ workerpool.worker({ }, // Creating a big enough array with `Array(1e9).fill('mario')` - // is enoguh to OOM the _process_, taking the whole engine out + // is enghuh to OOM the _process_, taking the whole engine out // This function should blow the thread's memory without // killing the parent process blowMemory: () => { @@ -103,11 +103,13 @@ workerpool.worker({ while (true) { data.push(Array(1e6).fill('mario')); } + + // This is too extreme and will kill the process + // Array(1e9).fill('mario') }, // Some useful code // const stats = v8.getHeapStatistics(); - // console.log( // `node heap limit = ${ // stats.heap_size_limit / 1024 / 1024 diff --git a/packages/engine-multi/test/worker/worker-pool.test.ts b/packages/engine-multi/test/worker/worker-pool.test.ts index 5ec782c25..f2186ba2d 100644 --- a/packages/engine-multi/test/worker/worker-pool.test.ts +++ b/packages/engine-multi/test/worker/worker-pool.test.ts @@ -235,23 +235,12 @@ test.serial('module scope is isolated across threads', async (t) => { test.serial('worker should die if it blows the memory limit', async (t) => { pool = createDedicatedPool({ workerThreadOpts: { + // See resourceLimits for more docs // Note for the record that these limits do NOT include arraybuffers resourceLimits: { - // These are values I can set - - // And I think this is the one I care about: - // The maximum size of the main heap in MB. + // This is basically heap size // Note that this needs to be at least like 200mb to not blow up in test maxOldGenerationSizeMb: 100, - - // // The maximum size of a heap space for recently created objects. - // maxYoungGenerationSizeMb: 10, - - // // The size of a pre-allocated memory range used for generated code. - // codeRangeSizeMb: 20, - - // The default maximum stack size for the thread. Small values may lead to unusable Worker instances. Default: 4 - // stackSizeMb: 4, }, }, }); From 67ed024c6255c4c3e9a9cd791f72ac34ff0ab353 Mon Sep 17 00:00:00 2001 From: Joe Clark Date: Mon, 27 Nov 2023 16:00:11 +0000 Subject: [PATCH 19/24] engine: trap, map and test OOM errors --- packages/engine-multi/src/api.ts | 7 +++-- packages/engine-multi/src/api/call-worker.ts | 7 +++++ packages/engine-multi/src/api/execute.ts | 7 +++-- packages/engine-multi/src/engine.ts | 2 ++ packages/engine-multi/src/errors.ts | 13 +++++++++ packages/engine-multi/test/errors.test.ts | 30 ++++++++++++++++++-- 6 files changed, 58 insertions(+), 8 deletions(-) diff --git a/packages/engine-multi/src/api.ts b/packages/engine-multi/src/api.ts index 4b25829b1..10c7c046a 100644 --- a/packages/engine-multi/src/api.ts +++ b/packages/engine-multi/src/api.ts @@ -29,6 +29,8 @@ export type RTEOptions = Partial< const DEFAULT_REPO_DIR = '/tmp/openfn/worker/repo'; +const DEFAULT_MEMORY_LIMIT = 500; + // Create the engine and handle user-facing stuff, like options parsing // and defaulting const createAPI = async function (options: RTEOptions = {}) { @@ -59,15 +61,14 @@ const createAPI = async function (options: RTEOptions = {}) { minWorkers: options.minWorkers, maxWorkers: options.maxWorkers, + memoryLimitMb: options.memoryLimitMb || DEFAULT_MEMORY_LIMIT, purge: options.hasOwnProperty('purge') ? options.purge : true, }; + logger.info(`memory limit set to ${options.memoryLimitMb}mb`); - // Note that the engine here always uses the standard worker, the real one - // To use a mock, create the engine directly const engine = await createEngine(engineOptions); - // Return the external API return { execute: engine.execute, listen: engine.listen, diff --git a/packages/engine-multi/src/api/call-worker.ts b/packages/engine-multi/src/api/call-worker.ts index 120cb1657..257175705 100644 --- a/packages/engine-multi/src/api/call-worker.ts +++ b/packages/engine-multi/src/api/call-worker.ts @@ -19,6 +19,7 @@ type WorkerOptions = { maxWorkers?: number; env?: any; timeout?: number; // ms + memoryLimitMb?: number; }; // Adds a `callWorker` function to the API object, which will execute a task in a worker @@ -67,6 +68,7 @@ export function createWorkers(workerPath: string, options: WorkerOptions) { env = {}, minWorkers = 0, maxWorkers = 5, // what's a good default here? Keeping it low to be conservative + memoryLimitMb, } = options; let resolvedWorkerPath; @@ -88,6 +90,11 @@ export function createWorkers(workerPath: string, options: WorkerOptions) { execArgv: ['--no-warnings', '--experimental-vm-modules'], // Important to override the child env so that it cannot access the parent env env, + resourceLimits: { + // This is a fair approximation for heapsize + // Note that it's still possible to OOM the process without hitting this limit + maxOldGenerationSizeMb: memoryLimitMb, + }, }, }); } diff --git a/packages/engine-multi/src/api/execute.ts b/packages/engine-multi/src/api/execute.ts index 2a4669b24..0a68625d4 100644 --- a/packages/engine-multi/src/api/execute.ts +++ b/packages/engine-multi/src/api/execute.ts @@ -16,7 +16,7 @@ import { jobError, } from './lifecycle'; import preloadCredentials from './preload-credentials'; -import { ExecutionError, TimeoutError } from '../errors'; +import { ExecutionError, OOMError, TimeoutError } from '../errors'; const execute = async (context: ExecutionContext) => { const { state, callWorker, logger, options } = context; @@ -84,8 +84,9 @@ const execute = async (context: ExecutionContext) => { options.timeout ).catch((e: any) => { // An error here is basically a crash state - - if (e instanceof WorkerPoolPromise.TimeoutError) { + if (e.code === 'ERR_WORKER_OUT_OF_MEMORY') { + e = new OOMError(); + } else if (e instanceof WorkerPoolPromise.TimeoutError) { // Map the workerpool error to our own e = new TimeoutError(options.timeout!); } diff --git a/packages/engine-multi/src/engine.ts b/packages/engine-multi/src/engine.ts index 133167cba..29c3e3d54 100644 --- a/packages/engine-multi/src/engine.ts +++ b/packages/engine-multi/src/engine.ts @@ -74,6 +74,7 @@ export type EngineOptions = { minWorkers?: number; maxWorkers?: number; + memoryLimitMb?: number; whitelist?: RegExp[]; @@ -125,6 +126,7 @@ const createEngine = async (options: EngineOptions, workerPath?: string) => { minWorkers: options.minWorkers, maxWorkers: options.maxWorkers, purge: options.purge, + memoryLimitMb: options.memoryLimitMb, }, options.logger ); diff --git a/packages/engine-multi/src/errors.ts b/packages/engine-multi/src/errors.ts index 34145d720..a51a163b4 100644 --- a/packages/engine-multi/src/errors.ts +++ b/packages/engine-multi/src/errors.ts @@ -70,4 +70,17 @@ export class AutoinstallError extends EngineError { } } +export class OOMError extends EngineError { + severity = 'kill'; + type = 'OOMError'; + name = 'OOMError'; + message; + + constructor() { + super(); + + this.message = `Run exceeded maximum memory usage`; + } +} + // CredentialsError (exception) diff --git a/packages/engine-multi/test/errors.test.ts b/packages/engine-multi/test/errors.test.ts index 165afe419..bec6c8a16 100644 --- a/packages/engine-multi/test/errors.test.ts +++ b/packages/engine-multi/test/errors.test.ts @@ -1,5 +1,5 @@ import test from 'ava'; -import createEngine from '../src/engine'; +import createEngine, { EngineOptions } from '../src/engine'; import { createMockLogger } from '@openfn/logger'; import { WORKFLOW_ERROR } from '../src/events'; @@ -8,7 +8,7 @@ let engine; test.before(async () => { const logger = createMockLogger('', { level: 'debug' }); - const options = { + const options: EngineOptions = { logger, repoDir: '.', autoinstall: { @@ -16,6 +16,7 @@ test.before(async () => { handleIsInstalled: async () => true, }, maxWorkers: 1, + memoryLimitMb: 200, }; // This uses the real runtime and real worker @@ -68,3 +69,28 @@ test('syntax error: illegal throw', (t) => { }); }); }); + +test('syntax error: oom error', (t) => { + return new Promise((done) => { + const plan = { + id: 'a', + jobs: [ + { + expression: `export default [(s) => { + s.a = []; + while(true) { + s.a.push(new Array(1e6).fill("oom")); + } + return s; + }]`, + }, + ], + }; + + engine.execute(plan).on(WORKFLOW_ERROR, (evt) => { + t.is(evt.type, 'OOMError'); + t.is(evt.severity, 'kill'); + done(); + }); + }); +}); From db3a3c35fea87358c255efc252f7048618ce5a52 Mon Sep 17 00:00:00 2001 From: Joe Clark Date: Mon, 27 Nov 2023 16:32:56 +0000 Subject: [PATCH 20/24] worker: allow max run memory to be set on startup --- packages/ws-worker/src/start.ts | 18 +++++++++++++----- 1 file changed, 13 insertions(+), 5 deletions(-) diff --git a/packages/ws-worker/src/start.ts b/packages/ws-worker/src/start.ts index 1b3fd6999..e23a00b26 100644 --- a/packages/ws-worker/src/start.ts +++ b/packages/ws-worker/src/start.ts @@ -18,9 +18,10 @@ type Args = { mock: boolean; backoff: string; capacity?: number; + runMemory?: number; }; -const { WORKER_REPO_DIR, WORKER_SECRET } = process.env; +const { WORKER_REPO_DIR, WORKER_SECRET, MAX_RUN_MEMORY } = process.env; const args = yargs(hideBin(process.argv)) .command('server', 'Start a ws-worker server') @@ -71,6 +72,11 @@ const args = yargs(hideBin(process.argv)) default: 5, type: 'number', }) + .option('run-memory', { + description: 'Maximum memory allocated to a single run, in mb', + type: 'number', + default: MAX_RUN_MEMORY ? parseInt(MAX_RUN_MEMORY) : 500, + }) .parse() as Args; const logger = createLogger('SRV', { level: args.log }); @@ -114,8 +120,10 @@ if (args.mock) { engineReady(engine); }); } else { - createRTE({ repoDir: args.repoDir }).then((engine) => { - logger.debug('engine created'); - engineReady(engine); - }); + createRTE({ repoDir: args.repoDir, memoryLimitMb: args.runMemory }).then( + (engine) => { + logger.debug('engine created'); + engineReady(engine); + } + ); } From 22339c6744611ed444877b7675778b36289f8ac5 Mon Sep 17 00:00:00 2001 From: Joe Clark Date: Mon, 27 Nov 2023 16:34:12 +0000 Subject: [PATCH 21/24] changesets --- .changeset/brown-windows-sleep.md | 5 +++++ .changeset/dull-beds-dance.md | 5 +++++ 2 files changed, 10 insertions(+) create mode 100644 .changeset/brown-windows-sleep.md create mode 100644 .changeset/dull-beds-dance.md diff --git a/.changeset/brown-windows-sleep.md b/.changeset/brown-windows-sleep.md new file mode 100644 index 000000000..8de70b0f6 --- /dev/null +++ b/.changeset/brown-windows-sleep.md @@ -0,0 +1,5 @@ +--- +'@openfn/engine-multi': patch +--- + +Enforce memory limit on workflows diff --git a/.changeset/dull-beds-dance.md b/.changeset/dull-beds-dance.md new file mode 100644 index 000000000..cce54caf3 --- /dev/null +++ b/.changeset/dull-beds-dance.md @@ -0,0 +1,5 @@ +--- +'@openfn/ws-worker': patch +--- + +Add MAX_RUN_MEMORY env var and option to limit the memory available to each run From fc8d2d904c419aee3d3cfe78ad656a3eab66861c Mon Sep 17 00:00:00 2001 From: Joe Clark Date: Tue, 28 Nov 2023 12:34:21 +0000 Subject: [PATCH 22/24] tests: add OOM error --- .../worker/test/exit-reasons.test.ts | 24 +++++++++++++++++++ 1 file changed, 24 insertions(+) diff --git a/integration-tests/worker/test/exit-reasons.test.ts b/integration-tests/worker/test/exit-reasons.test.ts index 0128c0b9f..fb3218951 100644 --- a/integration-tests/worker/test/exit-reasons.test.ts +++ b/integration-tests/worker/test/exit-reasons.test.ts @@ -76,3 +76,27 @@ test('exception: autoinstall error', async (t) => { /Error installing @openfn\/language-worker-integration-tests@9.9.9/ ); }); + +test('kill: oom', async (t) => { + const attempt = { + id: crypto.randomUUID(), + jobs: [ + { + adaptor: '@openfn/language-common@latest', + body: `fn((s) => { + s.data = []; + while(true) { + s.data.push(new Array(1e5).fill("xyz")) + } + })`, + }, + ], + }; + + const result = await run(attempt); + + const { reason, error_type, error_message } = result; + t.is(reason, 'kill'); + t.is(error_type, 'OOMError'); + t.is(error_message, 'Run exceeded maximum memory usage'); +}); From 0336d3122e3ca505ef9256a745d5c354854ab670 Mon Sep 17 00:00:00 2001 From: Joe Clark Date: Tue, 28 Nov 2023 14:43:35 +0000 Subject: [PATCH 23/24] tests: added benchmark --- integration-tests/worker/src/init.ts | 8 +- integration-tests/worker/src/util.ts | 17 +++ .../worker/test/benchmark.test.ts | 118 ++++++++++++++++++ 3 files changed, 142 insertions(+), 1 deletion(-) create mode 100644 integration-tests/worker/src/util.ts create mode 100644 integration-tests/worker/test/benchmark.test.ts diff --git a/integration-tests/worker/src/init.ts b/integration-tests/worker/src/init.ts index e2d878b27..ed92aba72 100644 --- a/integration-tests/worker/src/init.ts +++ b/integration-tests/worker/src/init.ts @@ -14,13 +14,18 @@ export const initLightning = (port = 4000) => { return createLightningServer({ port }); }; -export const initWorker = async (lightningPort, engineArgs = {}) => { +export const initWorker = async ( + lightningPort, + engineArgs = {}, + workerArgs = {} +) => { const workerPort = randomPort(); const engineLogger = createMockLogger('engine', { level: 'debug', json: true, }); + const engine = await createEngine({ logger: engineLogger, repoDir: path.resolve('./tmp/repo/default'), @@ -33,6 +38,7 @@ export const initWorker = async (lightningPort, engineArgs = {}) => { port: workerPort, lightning: `ws://localhost:${lightningPort}/worker`, secret: crypto.randomUUID(), + ...workerArgs, }); return { engine, engineLogger, worker }; diff --git a/integration-tests/worker/src/util.ts b/integration-tests/worker/src/util.ts new file mode 100644 index 000000000..c06c5a04a --- /dev/null +++ b/integration-tests/worker/src/util.ts @@ -0,0 +1,17 @@ +export const run = async (lightning, attempt) => { + return new Promise(async (done, reject) => { + lightning.on('attempt:complete', (evt) => { + if (attempt.id === evt.attemptId) { + done(lightning.getResult(attempt.id)); + } else { + // If we get here, something has gone very wrong + reject('attempt not found'); + } + }); + + lightning.enqueueAttempt(attempt); + }); +}; + +export const humanMb = (sizeInBytes: number) => + Math.round(sizeInBytes / 1024 / 1024); diff --git a/integration-tests/worker/test/benchmark.test.ts b/integration-tests/worker/test/benchmark.test.ts new file mode 100644 index 000000000..fe309de8c --- /dev/null +++ b/integration-tests/worker/test/benchmark.test.ts @@ -0,0 +1,118 @@ +import test from 'ava'; +import path from 'node:path'; + +import { createAttempt } from '../src/factories'; +import { initLightning, initWorker } from '../src/init'; +import { run, humanMb } from '../src/util'; + +let lightning; +let worker; + +const maxConcurrency = 1; + +test.before(async () => { + const lightningPort = 4322; + + lightning = initLightning(lightningPort); + + ({ worker } = await initWorker( + lightningPort, + { + repoDir: path.resolve('tmp/repo/bench'), + maxWorkers: maxConcurrency, + }, + { + // Keep the backoff nice and low so that we can claim attempts quickly + backoff: { min: 0.001, max: 0.1 }, + maxWorkflows: maxConcurrency, + } + )); + + // trigger autoinstall + const bootstrap = createAttempt( + [], + [ + { + body: 'fn((s) => s)', + adaptor: '@openfn/language-common@1.7.0', + }, + ], + [] + ); + + await run(lightning, bootstrap); +}); + +test.afterEach(async () => { + lightning.reset(); +}); + +test.after(async () => { + lightning.destroy(); + await worker.destroy(); +}); + +// Skipping these in CI (for now at least) +test.serial.skip('run 100 attempts', async (t) => { + return new Promise((done, reject) => { + const attemptsTotal = 100; + let attemptsComplete = 0; + + let jobMax = 0; + let sysMax = 0; + + const start = Date.now(); + + for (let i = 0; i < attemptsTotal; i++) { + const attempt = createAttempt( + [], + [ + { + body: `fn((s) => new Promise(resolve => { + // create an array and fill with random items + const items = [] + while (items.length > 1e6) { + items.push(Math.randomInt * 1000) + } + // sort it and stringify + s.data = items.sort().join('-') + + // wait before returning + setTimeout(() => resolve(s), 100) + }))`, + adaptor: '@openfn/language-common@1.7.0', + }, + ], + [] + ); + lightning.enqueueAttempt(attempt); + } + + lightning.on('run:complete', (evt) => { + // May want to disable this but it's nice feedback + //console.log('Completed ', evt.attemptId); + + if (evt.payload.reason !== 'success') { + t.log('Atempt failed:'); + t.log(evt.payload); + reject('Attempt failed!'); + } + + attemptsComplete++; + + const { job, system } = evt.payload.mem; + jobMax = Math.max(job, jobMax); + sysMax = Math.max(system, sysMax); + + if (attemptsComplete === attemptsTotal) { + t.log(`${attemptsComplete} attempts processed`); + t.log(`${maxConcurrency} concurrent workers`); + t.log(`duration: ${(Date.now() - start) / 1000}s`); + t.log(`max job memory: ${humanMb(jobMax)}mb`); + t.log(`max system memory: ${humanMb(sysMax)}mb`); + t.pass('done'); + done(); + } + }); + }); +}); From 61029256a47b84362e900eba5f0d51ea742eb30f Mon Sep 17 00:00:00 2001 From: Joe Clark Date: Tue, 28 Nov 2023 15:04:41 +0000 Subject: [PATCH 24/24] versions: worker@0.2.11 cli@0.4.10 --- .changeset/brown-windows-sleep.md | 5 - .changeset/dull-beds-dance.md | 5 - .changeset/fifty-students-pay.md | 5 - .changeset/happy-pianos-cheat.md | 5 - .changeset/ninety-fireants-melt.md | 5 - integration-tests/worker/CHANGELOG.md | 13 + integration-tests/worker/package.json | 2 +- packages/cli/CHANGELOG.md | 7 + packages/cli/package.json | 2 +- packages/engine-multi/CHANGELOG.md | 9 + packages/engine-multi/package.json | 2 +- packages/lightning-mock/CHANGELOG.md | 10 + packages/lightning-mock/package.json | 2 +- packages/runtime/CHANGELOG.md | 6 + packages/runtime/package.json | 2 +- packages/ws-worker/CHANGELOG.md | 11 + packages/ws-worker/package.json | 2 +- pnpm-lock.yaml | 602 +------------------------- 18 files changed, 79 insertions(+), 616 deletions(-) delete mode 100644 .changeset/brown-windows-sleep.md delete mode 100644 .changeset/dull-beds-dance.md delete mode 100644 .changeset/fifty-students-pay.md delete mode 100644 .changeset/happy-pianos-cheat.md delete mode 100644 .changeset/ninety-fireants-melt.md diff --git a/.changeset/brown-windows-sleep.md b/.changeset/brown-windows-sleep.md deleted file mode 100644 index 8de70b0f6..000000000 --- a/.changeset/brown-windows-sleep.md +++ /dev/null @@ -1,5 +0,0 @@ ---- -'@openfn/engine-multi': patch ---- - -Enforce memory limit on workflows diff --git a/.changeset/dull-beds-dance.md b/.changeset/dull-beds-dance.md deleted file mode 100644 index cce54caf3..000000000 --- a/.changeset/dull-beds-dance.md +++ /dev/null @@ -1,5 +0,0 @@ ---- -'@openfn/ws-worker': patch ---- - -Add MAX_RUN_MEMORY env var and option to limit the memory available to each run diff --git a/.changeset/fifty-students-pay.md b/.changeset/fifty-students-pay.md deleted file mode 100644 index 888c1c4a8..000000000 --- a/.changeset/fifty-students-pay.md +++ /dev/null @@ -1,5 +0,0 @@ ---- -'@openfn/ws-worker': patch ---- - -Include duration and threadid in run-complete diff --git a/.changeset/happy-pianos-cheat.md b/.changeset/happy-pianos-cheat.md deleted file mode 100644 index f860e7c4d..000000000 --- a/.changeset/happy-pianos-cheat.md +++ /dev/null @@ -1,5 +0,0 @@ ---- -'@openfn/engine-multi': patch ---- - -Include memory usage in job-complete events diff --git a/.changeset/ninety-fireants-melt.md b/.changeset/ninety-fireants-melt.md deleted file mode 100644 index f609c6c2a..000000000 --- a/.changeset/ninety-fireants-melt.md +++ /dev/null @@ -1,5 +0,0 @@ ---- -'@openfn/ws-worker': patch ---- - -Send memory usage to lightning on run:complete diff --git a/integration-tests/worker/CHANGELOG.md b/integration-tests/worker/CHANGELOG.md index 9b6ddecf9..e49464cd4 100644 --- a/integration-tests/worker/CHANGELOG.md +++ b/integration-tests/worker/CHANGELOG.md @@ -1,5 +1,18 @@ # @openfn/integration-tests-worker +## 1.0.22 + +### Patch Changes + +- Updated dependencies [22339c6] +- Updated dependencies [22339c6] +- Updated dependencies [04ac3cc] +- Updated dependencies [5991622] +- Updated dependencies [340b96e] + - @openfn/engine-multi@0.2.2 + - @openfn/ws-worker@0.2.11 + - @openfn/lightning-mock@1.1.4 + ## 1.0.21 ### Patch Changes diff --git a/integration-tests/worker/package.json b/integration-tests/worker/package.json index 250e55e14..21c16cd39 100644 --- a/integration-tests/worker/package.json +++ b/integration-tests/worker/package.json @@ -1,7 +1,7 @@ { "name": "@openfn/integration-tests-worker", "private": true, - "version": "1.0.21", + "version": "1.0.22", "description": "Lightning WOrker integration tests", "author": "Open Function Group ", "license": "ISC", diff --git a/packages/cli/CHANGELOG.md b/packages/cli/CHANGELOG.md index 42f53349d..9c6324443 100644 --- a/packages/cli/CHANGELOG.md +++ b/packages/cli/CHANGELOG.md @@ -1,5 +1,12 @@ # @openfn/cli +## 0.4.10 + +### Patch Changes + +- Updated dependencies + - @openfn/runtime@0.2.1 + ## 0.4.9 ### Patch Changes diff --git a/packages/cli/package.json b/packages/cli/package.json index 437f7ec5c..433f4cea1 100644 --- a/packages/cli/package.json +++ b/packages/cli/package.json @@ -1,6 +1,6 @@ { "name": "@openfn/cli", - "version": "0.4.9", + "version": "0.4.10", "description": "CLI devtools for the openfn toolchain.", "engines": { "node": ">=18", diff --git a/packages/engine-multi/CHANGELOG.md b/packages/engine-multi/CHANGELOG.md index 14bd79db2..a5f7ac5e7 100644 --- a/packages/engine-multi/CHANGELOG.md +++ b/packages/engine-multi/CHANGELOG.md @@ -1,5 +1,14 @@ # engine-multi +## 0.2.2 + +### Patch Changes + +- 22339c6: Enforce memory limit on workflows +- 5991622: Include memory usage in job-complete events +- Updated dependencies + - @openfn/runtime@0.2.1 + ## 0.2.1 ### Patch Changes diff --git a/packages/engine-multi/package.json b/packages/engine-multi/package.json index 746f5aaa4..c1774f152 100644 --- a/packages/engine-multi/package.json +++ b/packages/engine-multi/package.json @@ -1,6 +1,6 @@ { "name": "@openfn/engine-multi", - "version": "0.2.1", + "version": "0.2.2", "description": "Multi-process runtime engine", "main": "dist/index.js", "type": "module", diff --git a/packages/lightning-mock/CHANGELOG.md b/packages/lightning-mock/CHANGELOG.md index 983db3746..51d8931c1 100644 --- a/packages/lightning-mock/CHANGELOG.md +++ b/packages/lightning-mock/CHANGELOG.md @@ -1,5 +1,15 @@ # @openfn/lightning-mock +## 1.1.4 + +### Patch Changes + +- Updated dependencies [22339c6] +- Updated dependencies [5991622] +- Updated dependencies + - @openfn/engine-multi@0.2.2 + - @openfn/runtime@0.2.1 + ## 1.1.3 ### Patch Changes diff --git a/packages/lightning-mock/package.json b/packages/lightning-mock/package.json index a81b59518..da77fc87e 100644 --- a/packages/lightning-mock/package.json +++ b/packages/lightning-mock/package.json @@ -1,6 +1,6 @@ { "name": "@openfn/lightning-mock", - "version": "1.1.3", + "version": "1.1.4", "private": true, "description": "A mock Lightning server", "main": "dist/index.js", diff --git a/packages/runtime/CHANGELOG.md b/packages/runtime/CHANGELOG.md index 8d275d366..e6f2632aa 100644 --- a/packages/runtime/CHANGELOG.md +++ b/packages/runtime/CHANGELOG.md @@ -1,5 +1,11 @@ # @openfn/runtime +## 0.2.1 + +### Patch Changes + +- Report on memory usage at the end of job + ## 0.2.0 ### Minor Changes diff --git a/packages/runtime/package.json b/packages/runtime/package.json index c32d782e5..ed204d168 100644 --- a/packages/runtime/package.json +++ b/packages/runtime/package.json @@ -1,6 +1,6 @@ { "name": "@openfn/runtime", - "version": "0.2.0", + "version": "0.2.1", "description": "Job processing runtime.", "type": "module", "exports": { diff --git a/packages/ws-worker/CHANGELOG.md b/packages/ws-worker/CHANGELOG.md index 61a14d6f8..ee5dacbea 100644 --- a/packages/ws-worker/CHANGELOG.md +++ b/packages/ws-worker/CHANGELOG.md @@ -1,5 +1,16 @@ # ws-worker +## 0.2.11 + +### Patch Changes + +- 22339c6: Add MAX_RUN_MEMORY env var and option to limit the memory available to each run +- 04ac3cc: Include duration and threadid in run-complete +- 340b96e: Send memory usage to lightning on run:complete +- Updated dependencies + - @openfn/engine-multi@0.2.2 + - @openfn/runtime@0.2.1 + ## 0.2.10 ### Patch Changes diff --git a/packages/ws-worker/package.json b/packages/ws-worker/package.json index 75900d80e..a14630f98 100644 --- a/packages/ws-worker/package.json +++ b/packages/ws-worker/package.json @@ -1,6 +1,6 @@ { "name": "@openfn/ws-worker", - "version": "0.2.10", + "version": "0.2.11", "description": "A Websocket Worker to connect Lightning to a Runtime Engine", "main": "dist/index.js", "type": "module", diff --git a/pnpm-lock.yaml b/pnpm-lock.yaml index c125e5dcb..9bf56ec71 100644 --- a/pnpm-lock.yaml +++ b/pnpm-lock.yaml @@ -146,17 +146,17 @@ importers: specifier: ^5.1.6 version: 5.1.6 - integration-tests/worker/tmp/repo/autoinstall: + integration-tests/worker/tmp/repo/bench: dependencies: - '@openfn/language-common_1.11.1': + '@openfn/language-common_1.7.0': + specifier: npm:@openfn/language-common@^1.7.0 + version: /@openfn/language-common@1.7.5 + + integration-tests/worker/tmp/repo/exit-reason: + dependencies: + '@openfn/language-common_latest': specifier: npm:@openfn/language-common@^1.11.1 version: /@openfn/language-common@1.11.1 - '@openfn/language-googlesheets_2.2.2': - specifier: npm:@openfn/language-googlesheets@^2.2.2 - version: /@openfn/language-googlesheets@2.2.2 - '@openfn/language-http_5.0.0': - specifier: npm:@openfn/language-http@^5.0.0 - version: /@openfn/language-http@5.0.0 packages/cli: dependencies: @@ -1603,19 +1603,6 @@ packages: semver: 7.5.4 dev: true - /@openfn/language-common@1.10.1: - resolution: {integrity: sha512-LTH9arUPPzbmmswVrLp9pFxrjYeo9rJB0UMA0yZ5tlU2CKnr7Pj35YkxlpmdcHMBvMo+tCrDrL89MjwQhwyzDA==} - dependencies: - axios: 1.1.3 - csv-parse: 5.5.2 - csvtojson: 2.0.10 - date-fns: 2.30.0 - jsonpath-plus: 4.0.0 - lodash: 4.17.21 - transitivePeerDependencies: - - debug - dev: false - /@openfn/language-common@1.11.1: resolution: {integrity: sha512-pyi2QymdF9NmUYJX/Bsv5oBy7TvzICfKcnCqutq412HYq2KTGKDO2dMWloDrxrH1kuzG+4XkSn0ZUom36b3KAA==} dependencies: @@ -1640,39 +1627,11 @@ packages: lodash: 4.17.21 transitivePeerDependencies: - debug - dev: true /@openfn/language-common@2.0.0-rc3: resolution: {integrity: sha512-7kwhBnCd1idyTB3MD9dXmUqROAhoaUIkz2AGDKuv9vn/cbZh7egEv9/PzKkRcDJYFV9qyyS+cVT3Xbgsg2ii5g==} bundledDependencies: [] - /@openfn/language-googlesheets@2.2.2: - resolution: {integrity: sha512-Ez7M1w/gtJCZjnpbebHzwGJTmL7JmBkKG9It1Mxu8lTjVgrz1Tr5yJLWRE1HgAsiQjDjF6l2WidnXy6lHbkdbw==} - dependencies: - '@openfn/language-common': 1.11.1 - googleapis: 100.0.0 - transitivePeerDependencies: - - debug - - encoding - - supports-color - dev: false - - /@openfn/language-http@5.0.0: - resolution: {integrity: sha512-UUsazztKd6h0z61OR9hyurICRhRiD9yQaJV3mNPJkDtnWZubpYDZg+JWAatgPJHe2K0u+Mb18coAGhVLHb5O3A==} - dependencies: - '@openfn/language-common': 1.10.1 - cheerio: 1.0.0-rc.12 - cheerio-tableparser: 1.0.1 - csv-parse: 4.16.3 - fast-safe-stringify: 2.1.1 - form-data: 3.0.1 - lodash: 4.17.21 - request: 2.88.2 - tough-cookie: 4.1.3 - transitivePeerDependencies: - - debug - dev: false - /@pkgjs/parseargs@0.11.0: resolution: {integrity: sha512-+1VkjdD0QBLPodGrJUeqarH8VAIvQODIbwh9XpP5Syisf7YoQgsJKPNFoqqLQlu+VQ/tVSshMR6loPMn8U+dPg==} engines: {node: '>=14'} @@ -2041,6 +2000,7 @@ packages: engines: {node: '>=6.5'} dependencies: event-target-shim: 5.0.1 + dev: true /accepts@1.3.8: resolution: {integrity: sha512-PYAthTa2m2VKxuvSD3DPC/Gy+U+sOA1LAuT8mkmRuvw+NACSaeXEQ+NHcVF7rONl6qcaxV3Uuemwawk+7+SJLw==} @@ -2090,6 +2050,7 @@ packages: debug: 4.3.4 transitivePeerDependencies: - supports-color + dev: true /agentkeepalive@4.3.0: resolution: {integrity: sha512-7Epl1Blf4Sy37j4v9f9FjICCh4+KAQOyXgHEwlyBiAQLbhKdq/i2QQU3amQalS/wPhdPzDXPL5DMR5bkn+YeWg==} @@ -2117,15 +2078,6 @@ packages: clean-stack: 4.2.0 indent-string: 5.0.0 - /ajv@6.12.6: - resolution: {integrity: sha512-j3fVLgvTo527anyYyJOGTYJbG+vnnQYvE0m5mmkc1TK+nxAppkCLMIL0aZ4dblVCNoGShhm+kzE4ZUykBoMg4g==} - dependencies: - fast-deep-equal: 3.1.3 - fast-json-stable-stringify: 2.1.0 - json-schema-traverse: 0.4.1 - uri-js: 4.4.1 - dev: false - /ajv@8.12.0: resolution: {integrity: sha512-sRu1kpcO9yLtYxBKvqfTeh9KzZEwO3STyX1HT+4CaDzC6HpTGYhIhPIzj9XuKU7KYDwnaeh5hcOwjy1QuJzBPA==} dependencies: @@ -2266,26 +2218,10 @@ packages: engines: {node: '>=0.10.0'} dev: true - /arrify@2.0.1: - resolution: {integrity: sha512-3duEwti880xqi4eAMN8AyR4a0ByT90zoYdLlevfrvU43vb0YZwZVfxOgxWrLXXXpyugL0hNZc9G6BiB5B3nUug==} - engines: {node: '>=8'} - dev: false - /arrify@3.0.0: resolution: {integrity: sha512-tLkvA81vQG/XqE2mjDkGQHoOINtMHtysSnemrmoGe6PydDPMRbVugqyk4A6V/WDWEfm3l+0d8anA9r8cv/5Jaw==} engines: {node: '>=12'} - /asn1@0.2.6: - resolution: {integrity: sha512-ix/FxPn0MDjeyJ7i/yoHGFt/EX6LyNbxSEhPPXODPL+KB0VPk86UYfL0lMdy+KCnv+fmvIzySwaK5COwqVbWTQ==} - dependencies: - safer-buffer: 2.1.2 - dev: false - - /assert-plus@1.0.0: - resolution: {integrity: sha512-NfJ4UzBCcQGLDlQq7nHxH+tv3kyZ0hHQqF5BO6J7tNJeP5do1llPr8dZ8zHonfhAu0PHAdMkSo+8o0wxg9lZWw==} - engines: {node: '>=0.8'} - dev: false - /assign-symbols@1.0.0: resolution: {integrity: sha512-Q+JC7Whu8HhmTdBph/Tq59IoRtoy6KAm5zzPv00WdujX82lbAL8K7WVjne7vdCsAmbF4AYaDOPyO3k0kl8qIrw==} engines: {node: '>=0.10.0'} @@ -2441,18 +2377,10 @@ packages: fast-glob: 3.3.1 dev: true - /aws-sign2@0.7.0: - resolution: {integrity: sha512-08kcGqnYf/YmjoRhfxyu+CLxBjUtHLXLXX/vUfx9l2LYzG3c1m61nrpyFUZI6zeS+Li/wWMMidD9KgrqtGq3mA==} - dev: false - - /aws4@1.12.0: - resolution: {integrity: sha512-NmWvPnx0F1SfrQbYwOi7OeaNGokp9XhzNioJ/CSBs8Qa4vxug81mhJEAVZwxXuBmYB5KDRfMq/F3RR0BIU7sWg==} - dev: false - /axios@0.27.2: resolution: {integrity: sha512-t+yRIyySRTp/wua5xEr+z1q60QmLq8ABsS5O9Me1AsE5dfKqgnCFzwiCZZ/cGNd1lq4/7akDWMxdhVlucjmnOQ==} dependencies: - follow-redirects: 1.15.2 + follow-redirects: 1.15.3 form-data: 4.0.0 transitivePeerDependencies: - debug @@ -2476,6 +2404,7 @@ packages: /base64-js@1.5.1: resolution: {integrity: sha512-AKpaYlHn8t4SVbOHCy+b5+KKgvR4vrsD8vbvrbiQJps7fKDTkjkDry6ji0rUJjC0kzbNePLwzxq8iypo41qeWA==} + dev: true /base@0.11.2: resolution: {integrity: sha512-5T6P4xPgpp0YDFvSWwEZ4NoE3aM4QBQXDzmVbraCkFj8zHM+mba8SyqB5DbZWyR7mYHo6Y7BdQo3MoA4m0TeQg==} @@ -2501,12 +2430,6 @@ packages: resolution: {integrity: sha512-x+VAiMRL6UPkx+kudNvxTl6hB2XNNCG2r+7wixVfIYwu/2HKRXimwQyaumLjMveWvT2Hkd/cAJw+QBMfJ/EKVw==} dev: true - /bcrypt-pbkdf@1.0.2: - resolution: {integrity: sha512-qeFIXtP4MSoi6NLqO12WfqARWWuCKi2Rn/9hJLEmtB5yTNr9DqFWkJRCf2qShWzPeAMRnOgCrq0sg/KLv5ES9w==} - dependencies: - tweetnacl: 0.14.5 - dev: false - /bcryptjs@2.4.3: resolution: {integrity: sha512-V/Hy/X9Vt7f3BbPJEi8BdVFMByHi+jNXrYkW3huaybV/kQ0KJg0Y6PkEMbn+zeT+i+SiKZ/HMqJGIIt4LZDqNQ==} dev: true @@ -2518,10 +2441,6 @@ packages: is-windows: 1.0.2 dev: true - /bignumber.js@9.1.2: - resolution: {integrity: sha512-2/mKyZH9K85bzOEfhXDBFZTGd1CTs+5IHpeFQo9luiBG7hghdC851Pj2WAhb6E3R6b9tZj/XKhbg4fum+Kepug==} - dev: false - /binary-extensions@1.13.1: resolution: {integrity: sha512-Un7MIEDdUC5gNpcGDV97op1Ywk748MpHcFTHoYs6qnj1Z3j7I53VG3nwZhKzoBZmbdRNnb6WRdFlwl7tSDuZGw==} engines: {node: '>=0.10.0'} @@ -2562,10 +2481,6 @@ packages: /blueimp-md5@2.19.0: resolution: {integrity: sha512-DRQrD6gJyy8FbiE4s+bDoXS9hiW3Vbx5uCdwvcCf3zLHL+Iv7LtGHLpr+GZV8rHG8tK766FGYBwRbu8pELTt+w==} - /boolbase@1.0.0: - resolution: {integrity: sha512-JZOSA7Mo9sNGB8+UjSgzdLtokWAky1zbztM3WRLCbZ70/3cTANmQmOdR7y2g+J0e2WXywy1yS468tY+IruqEww==} - dev: false - /brace-expansion@1.1.11: resolution: {integrity: sha512-iCuPHDFgrHX7H2vEI/5xpz07zSHB00TpugqhmYtVmMO6518mCuRMoOYFldEBl0g187ufozdaHgWKcYFb61qGiA==} dependencies: @@ -2614,10 +2529,6 @@ packages: pako: 0.2.9 dev: true - /buffer-equal-constant-time@1.0.1: - resolution: {integrity: sha512-zRpUiDwd/xk6ADqPMATG8vc9VPrkck7T07OIx0gnjmJAnHnTVXNQG3vfvWNuiZIkwu9KrKdA1iJKfsfTVxE6NA==} - dev: false - /buffer-from@1.1.2: resolution: {integrity: sha512-E+XQCRwSbaaiChtv6k6Dwgc+bx+Bs6vuKJHHl5kox/BaKbhiXzqQOwK4cO22yElGp2OCmjwVhT3HmxgyPGnJfQ==} dev: true @@ -2754,10 +2665,6 @@ packages: engines: {node: '>=6'} dev: true - /caseless@0.12.0: - resolution: {integrity: sha512-4tYFyifaFfGacoiObjJegolkwSU4xQNGbVgUiNYVUxbQ2x2lUsFvY4hVgVzGiIe6WLOPqycWXA40l+PWsxthUw==} - dev: false - /cbor@8.1.0: resolution: {integrity: sha512-DwGjNW9omn6EwP70aXsn7FQJx5kO12tX0bZkaTjzdVFM6/7nhA4t0EENocKGx6D2Bch9PE2KzCUf5SceBdeijg==} engines: {node: '>=12.19'} @@ -2791,34 +2698,6 @@ packages: /chardet@0.7.0: resolution: {integrity: sha512-mT8iDcrh03qDGRRmoA2hmBJnxpllMR+0/0qlzjqZES6NdiWDcZkCNAk4rPFZ9Q85r27unkiNNg8ZOiwZXBHwcA==} - /cheerio-select@2.1.0: - resolution: {integrity: sha512-9v9kG0LvzrlcungtnJtpGNxY+fzECQKhK4EGJX2vByejiMX84MFNQw4UxPJl3bFbTMw+Dfs37XaIkCwTZfLh4g==} - dependencies: - boolbase: 1.0.0 - css-select: 5.1.0 - css-what: 6.1.0 - domelementtype: 2.3.0 - domhandler: 5.0.3 - domutils: 3.1.0 - dev: false - - /cheerio-tableparser@1.0.1: - resolution: {integrity: sha512-SCSWdMoFvIue0jdFZqRNPXDCZ67vuirJEG3pfh3AAU2hwxe/qh1EQUkUNPWlZhd6DMjRlTfcpcPWbaowjwRnNQ==} - dev: false - - /cheerio@1.0.0-rc.12: - resolution: {integrity: sha512-VqR8m68vM46BNnuZ5NtnGBKIE/DfN0cRIzg9n40EIq9NOv90ayxLBXA8fXC5gquFRGJSTRqBq25Jt2ECLR431Q==} - engines: {node: '>= 6'} - dependencies: - cheerio-select: 2.1.0 - dom-serializer: 2.0.0 - domhandler: 5.0.3 - domutils: 3.1.0 - htmlparser2: 8.0.2 - parse5: 7.1.2 - parse5-htmlparser2-tree-adapter: 7.0.0 - dev: false - /chokidar@2.1.8: resolution: {integrity: sha512-ZmZUazfOzf0Nve7duiCKD23PFSCs4JPoYyccjUFF3aQkQadqBhfzhjkwBH2mNOG9cTBwhamM37EIsIkZw3nRgg==} deprecated: Chokidar 2 does not receive security updates since 2019. Upgrade to chokidar 3 with 15x fewer dependencies @@ -3069,10 +2948,6 @@ packages: resolution: {integrity: sha512-3DdaFaU/Zf1AnpLiFDeNCD4TOWe3Zl2RZaTzUvWiIk5ERzcCodOE20Vqq4fzCbNoHURFHT4/us/Lfq+S2zyY4w==} dev: false - /core-util-is@1.0.2: - resolution: {integrity: sha512-3lqz5YjWTYnW6dlDa5TLaTCcShfar1e40rmcJVwCBJC6mWlFuj0eCHIElmG1g5kyuJ/GD+8Wn4FFCcz4gJPfaQ==} - dev: false - /core-util-is@1.0.3: resolution: {integrity: sha512-ZQBvi1DcpJ4GDqanjucZ2Hj3wEO5pZDS89BWbkcrvdxksJorwUDDZamX9ldFkp9aw2lmBDLgkObEA4DWNJ9FYQ==} dev: true @@ -3123,21 +2998,6 @@ packages: which: 2.0.2 dev: true - /css-select@5.1.0: - resolution: {integrity: sha512-nwoRF1rvRRnnCqqY7updORDsuqKzqYJ28+oSMaJMMgOauh3fvwHqMS7EZpIPqK8GL+g9mKxF1vP/ZjSeNjEVHg==} - dependencies: - boolbase: 1.0.0 - css-what: 6.1.0 - domhandler: 5.0.3 - domutils: 3.1.0 - nth-check: 2.1.1 - dev: false - - /css-what@6.1.0: - resolution: {integrity: sha512-HTUrgRJ7r4dsZKU6GjmpfRK1O76h97Z8MfS1G0FozR+oF2kG6Vfe8JE6zwrkbxigziPHinCJ+gCPjA9EaBDtRw==} - engines: {node: '>= 6'} - dev: false - /cssesc@3.0.0: resolution: {integrity: sha512-/Tb/JcjK111nNScGob5MNtsntNM1aCNUDipB/TkwZFhyDrrE47SOx/18wF2bbjgc3ZzCSKW1T5nt5EbFoAz/Vg==} engines: {node: '>=4'} @@ -3154,6 +3014,7 @@ packages: /csv-parse@4.16.3: resolution: {integrity: sha512-cO1I/zmz4w2dcKHVvpCr7JVRu8/FymG5OEpmvsZYlccYolPBLoVGKUHgNoc4ZGkFeFlWGEDmMyBM+TTqRdW/wg==} + dev: true /csv-parse@5.5.2: resolution: {integrity: sha512-YRVtvdtUNXZCMyK5zd5Wty1W6dNTpGKdqQd4EQ8tl/c6KW1aMBB1Kg1ppky5FONKmEqGJ/8WjLlTNLPne4ioVA==} @@ -3189,13 +3050,6 @@ packages: dependencies: array-find-index: 1.0.2 - /dashdash@1.14.1: - resolution: {integrity: sha512-jRFi8UDGo6j+odZiEpjazZaWqEal3w/basFjQHQEwVtZJGDpxbH1MeYluwCS8Xq5wmLJooDlMgvVarmWfGM44g==} - engines: {node: '>=0.10'} - dependencies: - assert-plus: 1.0.0 - dev: false - /date-fns@2.30.0: resolution: {integrity: sha512-fnULvOpxnC5/Vg3NCiWelDsLiUc9bRwAPs/+LfTLNvetFCtCTN+yQz15C/fs4AwX1R9K5GLtLfn8QW+dWisaAw==} engines: {node: '>=0.11'} @@ -3380,33 +3234,6 @@ packages: resolution: {integrity: sha512-+HlytyjlPKnIG8XuRG8WvmBP8xs8P71y+SKKS6ZXWoEgLuePxtDoUEiH7WkdePWrQ5JBpE6aoVqfZfJUQkjXwA==} dev: true - /dom-serializer@2.0.0: - resolution: {integrity: sha512-wIkAryiqt/nV5EQKqQpo3SToSOV9J0DnbJqwK7Wv/Trc92zIAYZ4FlMu+JPFW1DfGFt81ZTCGgDEabffXeLyJg==} - dependencies: - domelementtype: 2.3.0 - domhandler: 5.0.3 - entities: 4.5.0 - dev: false - - /domelementtype@2.3.0: - resolution: {integrity: sha512-OLETBj6w0OsagBwdXnPdN0cnMfF9opN69co+7ZrbfPGrdpPVNBUj02spi6B1N7wChLQiPn4CSH/zJvXw56gmHw==} - dev: false - - /domhandler@5.0.3: - resolution: {integrity: sha512-cgwlv/1iFQiFnU96XXgROh8xTeetsnJiDsTc7TYCLFd9+/WNkIqPTxiM/8pSd8VIrhXGTf1Ny1q1hquVqDJB5w==} - engines: {node: '>= 4'} - dependencies: - domelementtype: 2.3.0 - dev: false - - /domutils@3.1.0: - resolution: {integrity: sha512-H78uMmQtI2AhgDJjWeQmHwJJ2bLPD3GMmO7Zja/ZZh84wkm+4ut+IUnUdRa8uCGX88DiVx1j6FRe1XfxEgjEZA==} - dependencies: - dom-serializer: 2.0.0 - domelementtype: 2.3.0 - domhandler: 5.0.3 - dev: false - /dreamopt@0.8.0: resolution: {integrity: sha512-vyJTp8+mC+G+5dfgsY+r3ckxlz+QMX40VjPQsZc5gxVAxLmi64TBoVkP54A/pRAXMXsbu2GMMBrZPxNv23waMg==} engines: {node: '>=0.4.0'} @@ -3430,19 +3257,6 @@ packages: /eastasianwidth@0.2.0: resolution: {integrity: sha512-I88TYZWc9XiYHRQ4/3c5rjjfgkjhLyW2luGIheGERbNQ6OY7yTybanSpDXZa8y7VUP9YmDcYa+eyq4ca7iLqWA==} - /ecc-jsbn@0.1.2: - resolution: {integrity: sha512-eh9O+hwRHNbG4BLTjEl3nw044CkGm5X6LoaCf7LPp7UU8Qrt47JYNi6nPX8xjW97TKGKm1ouctg0QSpZe9qrnw==} - dependencies: - jsbn: 0.1.1 - safer-buffer: 2.1.2 - dev: false - - /ecdsa-sig-formatter@1.0.11: - resolution: {integrity: sha512-nagl3RYrbNv6kQkeJIpt6NJZy8twLB/2vtz6yN9Z4vRKHN4/QZJIEbqohALSgwKdnksuY3k5Addp5lg8sVoVcQ==} - dependencies: - safe-buffer: 5.2.1 - dev: false - /ee-first@1.1.1: resolution: {integrity: sha512-WMwm9LhRUo+WUaRN+vRuETqG89IgZphVSNkdFgeb6sS/E4OrDIN7t48CAewSHXc6C8lefD8KKfr5vY61brQlow==} @@ -3481,11 +3295,6 @@ packages: ansi-colors: 4.1.3 dev: true - /entities@4.5.0: - resolution: {integrity: sha512-V0hjH4dGPh9Ao5p0MoRY6BVqtwCjhz6vI5LT8AJ55H+4g9/4vbHx1I54fS0XuclLhDHArPQCiMjDxjaL8fPxhw==} - engines: {node: '>=0.12'} - dev: false - /err-code@2.0.3: resolution: {integrity: sha512-2bmlRpNKBxT/CRmPOlyISQpNj+qSeYvcym/uT0Jx2bMOlKLtSy1ZmLuVxSEKKyor/N5yhvp/ZiG1oE3DEYMSFA==} dev: true @@ -4112,6 +3921,7 @@ packages: /event-target-shim@5.0.1: resolution: {integrity: sha512-i/2XbnSz/uxRCU6+NdVJgKWDTM427+MqYbkQzD321DuCQJUqOuJKIA0IM2+W2xtYHdKOmZ4dR6fExsd4SXL+WQ==} engines: {node: '>=6'} + dev: true /eventemitter3@3.1.2: resolution: {integrity: sha512-tvtQIeLVHjDkJYnzf2dgVMxfuSGJeM/7UCG17TT4EumTfNtF+0nebF/4zWOIkCreAbtNqhGEboB6BWrwqNaw4Q==} @@ -4171,10 +3981,6 @@ packages: is-extendable: 1.0.1 dev: true - /extend@3.0.2: - resolution: {integrity: sha512-fjquC59cD7CyW6urNXK0FBufkZcoiGG80wTuPujX590cB5Ttln20E2UB4S/WARVqhXffZl2LNgS+gQdPIIim/g==} - dev: false - /extendable-error@0.1.7: resolution: {integrity: sha512-UOiS2in6/Q0FK0R0q6UY9vYpQ21mr/Qn1KOnte7vsACuNJf514WvCCUHSRCPcgjPT2bAhNIJdlE6bVap1GKmeg==} dev: true @@ -4203,11 +4009,6 @@ packages: - supports-color dev: true - /extsprintf@1.3.0: - resolution: {integrity: sha512-11Ndz7Nv+mvAC1j0ktTa7fAb0vLyGGX+rMHNBYQviQDGU0Hw7lhctJANqbPhu9nV9/izT/IntTgZ7Im/9LJs9g==} - engines: {'0': node >=0.6.0} - dev: false - /fast-deep-equal@3.1.3: resolution: {integrity: sha512-f3qQ9oQy9j2AhBe/H9VC91wLmKBCCU/gDOnKNAYG5hswO7BLKj09Hc5HYNz9cGI++xlpDCIgDaitVs03ATR84Q==} dev: false @@ -4244,10 +4045,6 @@ packages: resolution: {integrity: sha512-vf6IHUX2SBcA+5/+4883dsIjpBTqmfBjmYiWK1savxQmFk4JfBMLa7ynTYOs1Rolp/T1betJxHiGD3g1Mn8lUQ==} dev: false - /fast-json-stable-stringify@2.1.0: - resolution: {integrity: sha512-lhd/wF+Lk98HZoTCtlVraHtfh5XYijIjalXck7saUtuanSDyLMxnHhSXEDJqHxD7msR8D0uCmqlkwjCV8xvwHw==} - dev: false - /fast-levenshtein@2.0.6: resolution: {integrity: sha512-DCXu6Ifhqcks7TZKY3Hxp3y6qphY5SJZmrWMDrKcERSOXWQdMhU9Ig/PYrzyw/ul9jOIyh0N4M0tbC5hodg8dw==} dev: true @@ -4256,10 +4053,6 @@ packages: resolution: {integrity: sha512-W+KJc2dmILlPplD/H4K9l9LcAHAfPtP6BY84uVLXQ6Evcz9Lcg33Y2z1IVblT6xdY54PXYVHEv+0Wpq8Io6zkA==} dev: false - /fast-text-encoding@1.0.6: - resolution: {integrity: sha512-VhXlQgj9ioXCqGstD37E/HBeqEGV/qOD/kmbVG8h5xKBYvM1L3lR1Zn4555cQ8GkYbJa8aJSipLPndE1k6zK2w==} - dev: false - /fastq@1.13.0: resolution: {integrity: sha512-YpkpUnK8od0o1hmeSc7UUs/eB/vIPWJYjKck2QKIzAf71Vm1AAQ3EbuZB3g2JIy+pg+ERD0vqI79KyZiB2e2Nw==} dependencies: @@ -4358,16 +4151,6 @@ packages: pkg-dir: 4.2.0 dev: true - /follow-redirects@1.15.2: - resolution: {integrity: sha512-VQLG33o04KaQ8uYi2tVNbdrWp1QWxNNea+nmIB4EVM28v0hmP17z7aG1+wAkNzVq4KeXTq3221ye5qTJP91JwA==} - engines: {node: '>=4.0'} - peerDependencies: - debug: '*' - peerDependenciesMeta: - debug: - optional: true - dev: true - /follow-redirects@1.15.3: resolution: {integrity: sha512-1VzOtuEM8pC9SFU1E+8KfTjZyMztRsgEfwQl44z8A25uy13jSzTj6dyK2Df52iV0vgHCfBwLhDWevLn95w5v6Q==} engines: {node: '>=4.0'} @@ -4390,19 +4173,6 @@ packages: signal-exit: 4.0.2 dev: true - /forever-agent@0.6.1: - resolution: {integrity: sha512-j0KLYPhm6zeac4lz3oJ3o65qvgQCcPubiyotZrXqEaG4hNagNYO8qdlUrX5vwqv9ohqeT/Z3j6+yW067yWWdUw==} - dev: false - - /form-data@2.3.3: - resolution: {integrity: sha512-1lLKB2Mu3aGP1Q/2eCOx0fNbRMe7XdwktwOruhfqqd0rIJWwN4Dh+E3hrPSlDCXnSR7UtZ1N38rVXm+6+MEhJQ==} - engines: {node: '>= 0.12'} - dependencies: - asynckit: 0.4.0 - combined-stream: 1.0.8 - mime-types: 2.1.35 - dev: false - /form-data@2.5.1: resolution: {integrity: sha512-m21N3WOmEEURgk6B9GLOE4RuWOFf28Lhh9qGYeNlGq4VDXUlJy2th2slBNU8Gp8EzloYZOibZJ7t5ecIrFSjVA==} engines: {node: '>= 0.12'} @@ -4412,15 +4182,6 @@ packages: mime-types: 2.1.35 dev: true - /form-data@3.0.1: - resolution: {integrity: sha512-RHkBKtLWUVwd7SqRIvCZMEvAMoGUp0XU+seQiZejj0COz3RI3hWP4sCv3gZWWLjJTd7rGwcsF5eKZGii0r/hbg==} - engines: {node: '>= 6'} - dependencies: - asynckit: 0.4.0 - combined-stream: 1.0.8 - mime-types: 2.1.35 - dev: false - /form-data@4.0.0: resolution: {integrity: sha512-ETEklSGi5t0QMZuiXoA/Q6vcnxcLQP5vdugSpuAyi6SVGi2clPPp+xgEhuMaHC+zGgn31Kd235W35f7Hykkaww==} engines: {node: '>= 6'} @@ -4515,31 +4276,6 @@ packages: resolution: {integrity: sha512-xckBUXyTIqT97tq2x2AMb+g163b5JFysYk0x4qxNFwbfQkmNZoiRHb6sPzI9/QV33WeuvVYBUIiD4NzNIyqaRQ==} dev: true - /gaxios@4.3.3: - resolution: {integrity: sha512-gSaYYIO1Y3wUtdfHmjDUZ8LWaxJQpiavzbF5Kq53akSzvmVg0RfyOcFDbO1KJ/KCGRFz2qG+lS81F0nkr7cRJA==} - engines: {node: '>=10'} - dependencies: - abort-controller: 3.0.0 - extend: 3.0.2 - https-proxy-agent: 5.0.1 - is-stream: 2.0.1 - node-fetch: 2.6.7 - transitivePeerDependencies: - - encoding - - supports-color - dev: false - - /gcp-metadata@4.3.1: - resolution: {integrity: sha512-x850LS5N7V1F3UcV7PoupzGsyD6iVwTVvsh3tbXfkctZnBnjW5yu5z1/3k3SehF7TyoTIe78rJs02GMMy+LF+A==} - engines: {node: '>=10'} - dependencies: - gaxios: 4.3.3 - json-bigint: 1.0.0 - transitivePeerDependencies: - - encoding - - supports-color - dev: false - /get-caller-file@2.0.5: resolution: {integrity: sha512-DyFP3BM/3YHTQOCUL/w0OZHR0lpKeGrxotcHWcqNEdnltqFwXVfhEBQ94eIo34AfQpo0rGki4cyIiftY06h2Fg==} engines: {node: 6.* || 8.* || >= 10.*} @@ -4569,12 +4305,6 @@ packages: engines: {node: '>=0.10.0'} dev: true - /getpass@0.1.7: - resolution: {integrity: sha512-0fzj9JxOLfJ+XGLhR8ze3unN0KZCgZwiSSDz168VERjK8Wl8kVSdcu2kspd4s4wtAa1y/qrVRiAA0WclVsu0ng==} - dependencies: - assert-plus: 1.0.0 - dev: false - /glob-parent@3.1.0: resolution: {integrity: sha512-E8Ak/2+dZY6fnzlR7+ueWvhsH1SjHr4jjss4YS/h4py44jY9MhK/VFdaZJAWDz6BbL21KeteKxFSFpq8OS5gVA==} dependencies: @@ -4661,58 +4391,6 @@ packages: merge2: 1.4.1 slash: 4.0.0 - /google-auth-library@7.14.1: - resolution: {integrity: sha512-5Rk7iLNDFhFeBYc3s8l1CqzbEBcdhwR193RlD4vSNFajIcINKI8W8P0JLmBpwymHqqWbX34pJDQu39cSy/6RsA==} - engines: {node: '>=10'} - dependencies: - arrify: 2.0.1 - base64-js: 1.5.1 - ecdsa-sig-formatter: 1.0.11 - fast-text-encoding: 1.0.6 - gaxios: 4.3.3 - gcp-metadata: 4.3.1 - gtoken: 5.3.2 - jws: 4.0.0 - lru-cache: 6.0.0 - transitivePeerDependencies: - - encoding - - supports-color - dev: false - - /google-p12-pem@3.1.4: - resolution: {integrity: sha512-HHuHmkLgwjdmVRngf5+gSmpkyaRI6QmOg77J8tkNBHhNEI62sGHyw4/+UkgyZEI7h84NbWprXDJ+sa3xOYFvTg==} - engines: {node: '>=10'} - hasBin: true - dependencies: - node-forge: 1.3.1 - dev: false - - /googleapis-common@5.1.0: - resolution: {integrity: sha512-RXrif+Gzhq1QAzfjxulbGvAY3FPj8zq/CYcvgjzDbaBNCD6bUl+86I7mUs4DKWHGruuK26ijjR/eDpWIDgNROA==} - engines: {node: '>=10.10.0'} - dependencies: - extend: 3.0.2 - gaxios: 4.3.3 - google-auth-library: 7.14.1 - qs: 6.11.2 - url-template: 2.0.8 - uuid: 8.3.2 - transitivePeerDependencies: - - encoding - - supports-color - dev: false - - /googleapis@100.0.0: - resolution: {integrity: sha512-RToFQGY54B756IDbjdyjb1vWFmn03bYpXHB2lIf0eq2UBYsIbYOLZ0kqSomfJnpclEukwEmMF7Jn6Wsev871ew==} - engines: {node: '>=10'} - dependencies: - google-auth-library: 7.14.1 - googleapis-common: 5.1.0 - transitivePeerDependencies: - - encoding - - supports-color - dev: false - /graceful-fs@4.2.10: resolution: {integrity: sha512-9ByhssR2fPVsNZj478qUUbKfmL0+t5BDVyjShtyZZLiK7ZDAArFFfopyOTj0M05wE2tJPisA4iTnnXl2YoPvOA==} dev: true @@ -4721,18 +4399,6 @@ packages: resolution: {integrity: sha512-bzh50DW9kTPM00T8y4o8vQg89Di9oLJVLW/KaOGIXJWP/iqCN6WKYkbNOF04vFLJhwcpYUh9ydh/+5vpOqV4YQ==} dev: true - /gtoken@5.3.2: - resolution: {integrity: sha512-gkvEKREW7dXWF8NV8pVrKfW7WqReAmjjkMBh6lNCCGOM4ucS0r0YyXXl0r/9Yj8wcW/32ISkfc8h5mPTDbtifQ==} - engines: {node: '>=10'} - dependencies: - gaxios: 4.3.3 - google-p12-pem: 3.1.4 - jws: 4.0.0 - transitivePeerDependencies: - - encoding - - supports-color - dev: false - /gunzip-maybe@1.4.2: resolution: {integrity: sha512-4haO1M4mLO91PW57BMsDFf75UmwoRX0GkdD+Faw+Lr+r/OZrOCS0pIBwOL1xCKQqnQzbNFGgK2V2CpBUPeFNTw==} hasBin: true @@ -4745,20 +4411,6 @@ packages: through2: 2.0.5 dev: true - /har-schema@2.0.0: - resolution: {integrity: sha512-Oqluz6zhGX8cyRaTQlFMPw80bSJVG2x/cFb8ZPhUILGgHka9SsokCCOQgpveePerqidZOrT14ipqfJb7ILcW5Q==} - engines: {node: '>=4'} - dev: false - - /har-validator@5.1.5: - resolution: {integrity: sha512-nmT2T0lljbxdQZfspsno9hgrG3Uir6Ks5afism62poxqBM6sDnMEuPmzTq8XN0OEwqKLLdh1jQI3qyE66Nzb3w==} - engines: {node: '>=6'} - deprecated: this library is no longer supported - dependencies: - ajv: 6.12.6 - har-schema: 2.0.0 - dev: false - /hard-rejection@2.1.0: resolution: {integrity: sha512-VIZB+ibDhx7ObhAe7OVtoEbuP4h/MuOTHJ+J8h/eBXotJYl0fBgR72xDFCKgIh22OJZIOVNxBMWuhAr10r8HdA==} engines: {node: '>=6'} @@ -4844,15 +4496,6 @@ packages: lru-cache: 7.18.3 dev: true - /htmlparser2@8.0.2: - resolution: {integrity: sha512-GYdjWKDkbRLkZ5geuHs5NY1puJ+PXwP7+fHPRz06Eirsb9ugf6d8kkXav6ADhcODhFFPMIXyxkxSuMf3D6NCFA==} - dependencies: - domelementtype: 2.3.0 - domhandler: 5.0.3 - domutils: 3.1.0 - entities: 4.5.0 - dev: false - /http-assert@1.5.0: resolution: {integrity: sha512-uPpH7OKX4H25hBmU6G1jWNaqJGpTXxey+YOUizJUAgu0AjLUeC8D73hTrhvDS5D+GJN1DN1+hhc/eF/wpxtp0w==} engines: {node: '>= 0.8'} @@ -4921,15 +4564,6 @@ packages: - supports-color dev: true - /http-signature@1.2.0: - resolution: {integrity: sha512-CAbnr6Rz4CYQkLYUtSNXxQPUH2gK8f3iWexVlsnMeD+GjlsQ0Xsy1cOX+mN3dtxYomRy21CiOzU8Uhw6OwncEQ==} - engines: {node: '>=0.8', npm: '>=1.3.7'} - dependencies: - assert-plus: 1.0.0 - jsprim: 1.4.2 - sshpk: 1.18.0 - dev: false - /https-proxy-agent@5.0.1: resolution: {integrity: sha512-dFcAjpTQFgoLMzC2VwU+C/CbS7uRL0lWmxDITmqm7C+7F0Odmj6s9l6alZc6AELXhrnggM2CeWSXHGOdX2YtwA==} engines: {node: '>= 6'} @@ -4938,6 +4572,7 @@ packages: debug: 4.3.4 transitivePeerDependencies: - supports-color + dev: true /human-id@1.0.2: resolution: {integrity: sha512-UNopramDEhHJD+VR+ehk8rOslwSfByxPIZyJRfV739NDhN5LF1fa1MqnzKm2lGTQRjNrjK19Q5fhkgIfjlVUKw==} @@ -5304,6 +4939,7 @@ packages: /is-stream@2.0.1: resolution: {integrity: sha512-hFoiJiTl63nn+kstHGBtewWSKnQLpyb155KHheA1l39uvtO9nWIop1p3udqPcUd/xbF1VLMO4n7OI6p7RbngDg==} engines: {node: '>=8'} + dev: true /is-string@1.0.7: resolution: {integrity: sha512-tE2UXzivje6ofPW7l23cjDOMa09gb7xlAqG6jG5ej6uPV32TlWP3NKPigtaGeHNu9fohccRYvIiZMfOOnOYUtg==} @@ -5326,10 +4962,6 @@ packages: has-symbols: 1.0.3 dev: true - /is-typedarray@1.0.0: - resolution: {integrity: sha512-cyA56iCMHAh5CdzjJIa4aohJyeO1YbwLi3Jc35MmRU6poroFjIGZzUzupGiRPOjgHg9TLu43xbpwXk523fMxKA==} - dev: false - /is-unicode-supported@0.1.0: resolution: {integrity: sha512-knxG2q4UC3u8stRGyAVJCOdxFmv5DZiRcdlIaAQXAbSfJya+OhopNotLQrstBhququ4ZpuKbDc/8S6mgXgPFPw==} engines: {node: '>=10'} @@ -5383,10 +5015,6 @@ packages: engines: {node: '>=0.10.0'} dev: true - /isstream@0.1.2: - resolution: {integrity: sha512-Yljz7ffyPbrLpLngrMtZ7NduUgVvi6wG9RJ9IUcyCd59YQ911PBJphODUcbOVbqYfxe1wuYf/LJ8PauMRwsM/g==} - dev: false - /jackspeak@2.2.2: resolution: {integrity: sha512-mgNtVv4vUuaKA97yxUHoA3+FkuhtxkjdXEWOyB/N76fjy0FjezEt34oy3epBtvCvS+7DyKwqCFWx/oJLV5+kCg==} engines: {node: '>=14'} @@ -5427,16 +5055,6 @@ packages: argparse: 2.0.1 dev: true - /jsbn@0.1.1: - resolution: {integrity: sha512-UVU9dibq2JcFWxQPA6KCqj5O42VOmAY3zQUfEKxU0KpTGXwNoCjkX1e13eHNvw/xPynt6pU0rZ1htjWTNTSXsg==} - dev: false - - /json-bigint@1.0.0: - resolution: {integrity: sha512-SiPv/8VpZuWbvLSMtTDU8hEfrZWg/mH/nV/b4o0CYbSxu1UIQPLdwKOCIyLQX+VIPO5vrLX3i8qtqFyhdPSUSQ==} - dependencies: - bignumber.js: 9.1.2 - dev: false - /json-diff@1.0.6: resolution: {integrity: sha512-tcFIPRdlc35YkYdGxcamJjllUhXWv4n2rK9oJ2RsAzV4FBkuV4ojKEDgcZ+kpKxDmJKv+PFK65+1tVVOnSeEqA==} hasBin: true @@ -5450,22 +5068,10 @@ packages: resolution: {integrity: sha512-xyFwyhro/JEof6Ghe2iz2NcXoj2sloNsWr/XsERDK/oiPCfaNhl5ONfp+jQdAZRQQ0IJWNzH9zIZF7li91kh2w==} dev: true - /json-schema-traverse@0.4.1: - resolution: {integrity: sha512-xbbCH5dCYU5T8LcEhhuh7HJ88HXuW3qsI3Y0zOZFKfZEHcpWiHU/Jxzk629Brsab/mMiHQti9wMP+845RPe3Vg==} - dev: false - /json-schema-traverse@1.0.0: resolution: {integrity: sha512-NM8/P9n3XjXhIZn1lLhkFaACTOURQXjWhV4BA/RnOv8xvgqtqpAX9IO4mRQxSx1Rlo4tqzeqb0sOlruaOy3dug==} dev: false - /json-schema@0.4.0: - resolution: {integrity: sha512-es94M3nTIfsEPisRafak+HDLfHXnKBhV3vU5eqPcS3flIWqcxJWgXHXiey3YrpaNsanY5ei1VoYEbOzijuq9BA==} - dev: false - - /json-stringify-safe@5.0.1: - resolution: {integrity: sha512-ZClg6AaYvamvYEE82d3Iyd3vSSIjQ+odgjaTzRuO3s7toCdFKczob2i0zCh7JE8kWn17yvAWhUVxvqGwUalsRA==} - dev: false - /jsonfile@4.0.0: resolution: {integrity: sha512-m6F1R3z8jjlf2imQHS2Qez5sjKWQzbuuhuJ/FKYFRZvPE3PuHcSMVZzfsLhGVOkfd20obL5SWEBew5ShlquNxg==} optionalDependencies: @@ -5489,31 +5095,6 @@ packages: underscore: 1.12.1 dev: true - /jsprim@1.4.2: - resolution: {integrity: sha512-P2bSOMAc/ciLz6DzgjVlGJP9+BrJWu5UDGK70C2iweC5QBIeFf0ZXRvGjEj2uYgrY2MkAAhsSWHDWlFtEroZWw==} - engines: {node: '>=0.6.0'} - dependencies: - assert-plus: 1.0.0 - extsprintf: 1.3.0 - json-schema: 0.4.0 - verror: 1.10.0 - dev: false - - /jwa@2.0.0: - resolution: {integrity: sha512-jrZ2Qx916EA+fq9cEAeCROWPTfCwi1IVHqT2tapuqLEVVDKFDENFw1oL+MwrTvH6msKxsd1YTDVw6uKEcsrLEA==} - dependencies: - buffer-equal-constant-time: 1.0.1 - ecdsa-sig-formatter: 1.0.11 - safe-buffer: 5.2.1 - dev: false - - /jws@4.0.0: - resolution: {integrity: sha512-KDncfTmOZoOMTFG4mBlG0qUIOlc03fmzH+ru6RgYVZhPkyiy/92Owlt/8UEN+a4TXR1FQetfIpJE8ApdvdVxTg==} - dependencies: - jwa: 2.0.0 - safe-buffer: 5.2.1 - dev: false - /keygrip@1.1.0: resolution: {integrity: sha512-iYSchDJ+liQ8iwbSI2QqsQOvqv58eJCEanyJPJi+Khyu8smkcKSFUCbPwzFcL7YVtZ6eONjqRX/38caJ7QjRAQ==} engines: {node: '>= 0.6'} @@ -6156,11 +5737,6 @@ packages: whatwg-url: 5.0.0 dev: false - /node-forge@1.3.1: - resolution: {integrity: sha512-dPEtOeMvF9VMcYV/1Wb8CPoVAXtp6MKMlcbAt4ddqmGqUJ6fQZFXkNZNkNlfevtNkGtaSoXf/vNNNSvgrdXwtA==} - engines: {node: '>= 6.13.0'} - dev: false - /nodemon@3.0.1: resolution: {integrity: sha512-g9AZ7HmkhQkqXkRc20w+ZfQ73cHLbE8hnPbtaFbFtCumZsjyMhKk9LajQ07U5Ux28lvFjZ5X7HvWR1xzU8jHVw==} engines: {node: '>=10'} @@ -6241,16 +5817,6 @@ packages: path-key: 3.1.1 dev: true - /nth-check@2.1.1: - resolution: {integrity: sha512-lqjrjmaOoAnWfMmBPL+XNnynZh2+swxiX3WUE0s4yEHI6m+AwrK2UZOimIRl3X/4QctVqS8AiZjFqyOGrMXb/w==} - dependencies: - boolbase: 1.0.0 - dev: false - - /oauth-sign@0.9.0: - resolution: {integrity: sha512-fexhUFFPTGV8ybAtSIGbV6gOkSv8UtRbDBnAyLQw4QPKkgNlsH2ByPGtMUqdWkos6YCRmAqViwgZrJc/mRDzZQ==} - dev: false - /object-assign@4.1.1: resolution: {integrity: sha512-rJgTQnkUnH1sFw8yT6VSU3zD3sWmu6sZhIseY8VX+GRu3P6F7Fu+JNDoXfklElbLJSnc3FUQHVe4cU5hj+BcUg==} engines: {node: '>=0.10.0'} @@ -6520,19 +6086,6 @@ packages: resolution: {integrity: sha512-Tpb8Z7r7XbbtBTrM9UhpkzzaMrqA2VXMT3YChzYltwV3P3pM6t8wl7TvpMnSTosz1aQAdVib7kdoys7vYOPerw==} engines: {node: '>=12'} - /parse5-htmlparser2-tree-adapter@7.0.0: - resolution: {integrity: sha512-B77tOZrqqfUfnVcOrUvfdLbz4pu4RopLD/4vmu3HUPswwTA8OH0EMW9BlWR2B0RCoiZRAHEUu7IxeP1Pd1UU+g==} - dependencies: - domhandler: 5.0.3 - parse5: 7.1.2 - dev: false - - /parse5@7.1.2: - resolution: {integrity: sha512-Czj1WaSVpaoj0wbhMzLmWD69anp2WH7FXMB9n1Sy8/ZFF9jolSQVMu1Ij5WIyGmcBmhk7EOndpO4mIpihVqAXw==} - dependencies: - entities: 4.5.0 - dev: false - /parseurl@1.3.3: resolution: {integrity: sha512-CiyeOxFT/JZyN5m0z9PfXw4SCBJ6Sygz1Dpl0wqjlhDEGGBP1GnsUVEL0p63hoG1fcj3fHynXi9NYO4nWOL+qQ==} engines: {node: '>= 0.8'} @@ -6615,10 +6168,6 @@ packages: through2: 2.0.5 dev: true - /performance-now@2.1.0: - resolution: {integrity: sha512-7EAHlyLHI56VEIdK57uwHdHKIaAGbnXPiw0yWbarQZOKaKpvUIgW0jWRVLiatnM+XXlSwsanIBH/hzGMJulMow==} - dev: false - /phoenix@1.7.7: resolution: {integrity: sha512-moAN6e4Z16x/x1nswUpnTR2v5gm7HsI7eluZ2YnYUUsBNzi3cY/5frmiJfXIEi877IQAafzTfp8hd6vEUMme+w==} dev: false @@ -6853,10 +6402,6 @@ packages: resolution: {integrity: sha512-b/YwNhb8lk1Zz2+bXXpS/LK9OisiZZ1SNsSLxN1x2OXVEhW2Ckr/7mWE5vrC1ZTiJlD9g19jWszTmJsB+oEpFQ==} dev: true - /psl@1.9.0: - resolution: {integrity: sha512-E/ZsdU4HLs/68gYzgGTkMicWTLPdAftJLfJFlLUAAKZGkStNU72sZjT66SnMDVOfOWY/YAoiD7Jxa9iHvngcag==} - dev: false - /pstree.remy@1.1.8: resolution: {integrity: sha512-77DZwxQmxKnu3aR542U+X8FypNzbfJ+C5XQDk3uWjWxn6151aIMGthWYRXTqT1E5oJvg+ljaa2OJi+VfvCOQ8w==} dev: true @@ -6887,11 +6432,6 @@ packages: side-channel: 1.0.4 dev: false - /qs@6.5.3: - resolution: {integrity: sha512-qxXIEh4pCGfHICj1mAJQ2/2XVZkjCDTcEgfoSQxc/fYivUZxTkk7L3bDBJSoNrEzXI17oUO5Dp07ktqE5KzczA==} - engines: {node: '>=0.6'} - dev: false - /query-string@8.1.0: resolution: {integrity: sha512-BFQeWxJOZxZGix7y+SByG3F36dA0AbTy9o6pSmKFcFz7DAj0re9Frkty3saBn3nHo3D0oZJ/+rx3r8H8r8Jbpw==} engines: {node: '>=14.16'} @@ -6901,10 +6441,6 @@ packages: split-on-first: 3.0.0 dev: true - /querystringify@2.2.0: - resolution: {integrity: sha512-FIqgj2EUvTa7R50u0rGsyTftzjYmv/a3hO345bZNrqabNqjtgiDMgmo4mkUjd+nzU5oF3dClKqFIPUKybUyqoQ==} - dev: false - /queue-microtask@1.2.3: resolution: {integrity: sha512-NuaNSa6flKT5JaSYQzJok04JzTL1CA6aGhv5rfLW3PgqA+M2ChpZQnAC8h8i4ZFkBS8X5RqkDBHA7r4hej3K9A==} @@ -7089,33 +6625,6 @@ packages: engines: {node: '>=0.10'} dev: true - /request@2.88.2: - resolution: {integrity: sha512-MsvtOrfG9ZcrOwAW+Qi+F6HbD0CWXEh9ou77uOb7FM2WPhwT7smM833PzanhJLsgXjN89Ir6V2PczXNnMpwKhw==} - engines: {node: '>= 6'} - deprecated: request has been deprecated, see https://github.com/request/request/issues/3142 - dependencies: - aws-sign2: 0.7.0 - aws4: 1.12.0 - caseless: 0.12.0 - combined-stream: 1.0.8 - extend: 3.0.2 - forever-agent: 0.6.1 - form-data: 2.3.3 - har-validator: 5.1.5 - http-signature: 1.2.0 - is-typedarray: 1.0.0 - isstream: 0.1.2 - json-stringify-safe: 5.0.1 - mime-types: 2.1.35 - oauth-sign: 0.9.0 - performance-now: 2.1.0 - qs: 6.5.3 - safe-buffer: 5.2.1 - tough-cookie: 2.5.0 - tunnel-agent: 0.6.0 - uuid: 3.4.0 - dev: false - /require-directory@2.1.1: resolution: {integrity: sha512-fGxEI7+wsG9xrvdjsrlmL22OMTTiHRwAMroiEeMgq8gzoLC/PQr7RsRDSTLUg/bZAZtF+TVIkHc6/4RIKrui+Q==} engines: {node: '>=0.10.0'} @@ -7129,10 +6638,6 @@ packages: resolution: {integrity: sha512-NKN5kMDylKuldxYLSUfrbo5Tuzh4hd+2E8NPPX02mZtn1VuREQToYe/ZdlJy+J3uCpfaiGF05e7B8W0iXbQHmg==} dev: true - /requires-port@1.0.0: - resolution: {integrity: sha512-KigOCHcocU3XODJxsu8i/j8T9tzT4adHiecwORRQ0ZZFcp7ahwXuRU1m+yuO90C5ZUyGeGfocHDI14M3L3yDAQ==} - dev: false - /resolve-cwd@3.0.0: resolution: {integrity: sha512-OrZaX2Mb+rJCpH/6CpSqt9xFVpN++x01XnN2ie9g6P5/3xelLAkXWVADpdz1IHD/KFfEXyE6V0U01OQ3UO2rEg==} engines: {node: '>=8'} @@ -7544,22 +7049,6 @@ packages: /sprintf-js@1.0.3: resolution: {integrity: sha512-D9cPgkvLlV3t3IzL0D0YLvGA9Ahk4PcvVwUbN0dSGr1aP0Nrt4AEnTUbuGvquEC0mA64Gqt1fzirlRs5ibXx8g==} - /sshpk@1.18.0: - resolution: {integrity: sha512-2p2KJZTSqQ/I3+HX42EpYOa2l3f8Erv8MWKsy2I9uf4wA7yFIkXRffYdsx86y6z4vHtV8u7g+pPlr8/4ouAxsQ==} - engines: {node: '>=0.10.0'} - hasBin: true - dependencies: - asn1: 0.2.6 - assert-plus: 1.0.0 - bcrypt-pbkdf: 1.0.2 - dashdash: 1.14.1 - ecc-jsbn: 0.1.2 - getpass: 0.1.7 - jsbn: 0.1.1 - safer-buffer: 2.1.2 - tweetnacl: 0.14.5 - dev: false - /ssri@10.0.4: resolution: {integrity: sha512-12+IR2CB2C28MMAw0Ncqwj5QbTcs0nGIhgJzYWzDkb21vWmfNI83KS4f3Ci6GI98WreIfG7o9UXp3C0qbpA8nQ==} engines: {node: ^14.17.0 || ^16.13.0 || >=18.0.0} @@ -7891,24 +7380,6 @@ packages: nopt: 1.0.10 dev: true - /tough-cookie@2.5.0: - resolution: {integrity: sha512-nlLsUzgm1kfLXSXfRZMc1KLAugd4hqJHDTvc2hDIwS3mZAfMEuMbc03SujMF+GEcpaX/qboeycw6iO8JwVv2+g==} - engines: {node: '>=0.8'} - dependencies: - psl: 1.9.0 - punycode: 2.3.0 - dev: false - - /tough-cookie@4.1.3: - resolution: {integrity: sha512-aX/y5pVRkfRnfmuX+OdbSdXvPe6ieKX/G2s7e98f4poJHnqH3281gDPm/metm6E/WRamfx7WC4HUqkWHfQHprw==} - engines: {node: '>=6'} - dependencies: - psl: 1.9.0 - punycode: 2.3.0 - universalify: 0.2.0 - url-parse: 1.5.10 - dev: false - /tr46@0.0.3: resolution: {integrity: sha512-N3WMsuqV66lT30CrXNbEjx4GEwlow3v6rr4mCcv6prnfwhS01rkgyFdjPNBYd9br7LpXV1+Emh01fHnq2Gdgrw==} dev: false @@ -8168,16 +7639,6 @@ packages: yargs: 17.7.2 dev: true - /tunnel-agent@0.6.0: - resolution: {integrity: sha512-McnNiV1l8RYeY8tBgEpuodCC1mLUdbSN+CYBL7kJsJNInOP8UjDDEwdk6Mw60vdLLrr5NHKZhMAOSrR2NZuQ+w==} - dependencies: - safe-buffer: 5.2.1 - dev: false - - /tweetnacl@0.14.5: - resolution: {integrity: sha512-KXXFFdAbFXY4geFIwoyNK+f5Z1b7swfXABfL7HXCmoIWMKU3dmS26672A4EeQtDzLKy7SXmfBu51JolvEKwtGA==} - dev: false - /type-check@0.3.2: resolution: {integrity: sha512-ZCmOJdvOWDBYJlzAoFkC+Q0+bUyEOS1ltgp1MGU03fqHG+dbi9tBFU2Rd9QKiDZFAYrhPh2JUf7rZRIuHRKtOg==} engines: {node: '>= 0.8.0'} @@ -8292,11 +7753,6 @@ packages: engines: {node: '>= 4.0.0'} dev: true - /universalify@0.2.0: - resolution: {integrity: sha512-CJ1QgKmNg3CwvAv/kOFmtnEN05f0D/cn9QntgNOQlQF9dgvVTHj3t+8JPdjqawCHk7V/KA+fbUqzZ9XWhcqPUg==} - engines: {node: '>= 4.0.0'} - dev: false - /unix-crypt-td-js@1.1.4: resolution: {integrity: sha512-8rMeVYWSIyccIJscb9NdCfZKSRBKYTeVnwmiRYT2ulE3qd1RaDQ0xQDP+rI3ccIWbhu/zuo5cgN8z73belNZgw==} dev: true @@ -8334,17 +7790,6 @@ packages: engines: {node: ^12.20.0 || ^14.13.1 || >=16.0.0} dev: false - /url-parse@1.5.10: - resolution: {integrity: sha512-WypcfiRhfeUP9vvF0j6rw0J3hrWrw6iZv3+22h6iRMJ/8z1Tj6XfLP4DsUix5MhMPnXpiHDoKyoZ/bdCkwBCiQ==} - dependencies: - querystringify: 2.2.0 - requires-port: 1.0.0 - dev: false - - /url-template@2.0.8: - resolution: {integrity: sha512-XdVKMF4SJ0nP/O7XIPB0JwAEuT9lDIYnNsK8yGVe43y0AWoKeJNdv3ZNWh7ksJ6KqQFjOO6ox/VEitLnaVNufw==} - dev: false - /use@3.1.1: resolution: {integrity: sha512-cwESVXlO3url9YWlFW/TA9cshCEhtu7IKJ/p5soJ/gGpj7vbvFrAY/eIioQ6Dw23KjZhYgiIo8HOs1nQ2vr/oQ==} engines: {node: '>=0.10.0'} @@ -8363,11 +7808,7 @@ packages: resolution: {integrity: sha512-HjSDRw6gZE5JMggctHBcjVak08+KEVhSIiDzFnT9S9aegmp85S/bReBVTb4QTFaRNptJ9kuYaNhnbNEOkbKb/A==} deprecated: Please upgrade to version 7 or higher. Older versions may use Math.random() in certain circumstances, which is known to be problematic. See https://v8.dev/blog/math-random for details. hasBin: true - - /uuid@8.3.2: - resolution: {integrity: sha512-+NYs2QeMWy+GWFOEm9xnn6HCDp0l7QBD7ml8zLUmJ+93Q5NF0NocErnwkTkXVFNiX3/fpC6afS8Dhb/gz7R7eg==} - hasBin: true - dev: false + dev: true /v8-compile-cache-lib@3.0.1: resolution: {integrity: sha512-wa7YjyUGfNZngI/vtK0UHAN+lgDCxBPCylVXGp0zu59Fz5aiGtNXaq3DhIov063MorB+VfufLh3JlF2KdTK3xg==} @@ -8390,15 +7831,6 @@ packages: resolution: {integrity: sha512-BNGbWLfd0eUPabhkXUVm0j8uuvREyTh5ovRa/dyow/BqAbZJyC+5fU+IzQOzmAKzYqYRAISoRhdQr3eIZ/PXqg==} engines: {node: '>= 0.8'} - /verror@1.10.0: - resolution: {integrity: sha512-ZZKSmDAEFOijERBLkmYfJ+vmk3w+7hOLYDNkRCuRuMJGEmqYNCNLyBBFwWKVMhfwaEF3WOd0Zlw86U/WC/+nYw==} - engines: {'0': node >=0.6.0} - dependencies: - assert-plus: 1.0.0 - core-util-is: 1.0.2 - extsprintf: 1.3.0 - dev: false - /wcwidth@1.0.1: resolution: {integrity: sha512-XHPEwS0q6TaxcvG85+8EYkbiCux2XtWG2mkc47Ng2A77BQu9+DqIOJldST4HgPkuea7dvKSj5VgX3P1d4rW8Tg==} dependencies: