From 4f26afcb55c3dbab761a593718515d779456312b Mon Sep 17 00:00:00 2001 From: Joe Clark Date: Tue, 30 Jan 2024 16:41:32 +0000 Subject: [PATCH 001/128] engine: spike mapping console logs to an adaptor logger --- .../engine-multi/src/worker/thread/helpers.ts | 9 ++++++- .../engine-multi/src/worker/thread/run.ts | 11 ++++++-- .../{ => @openfn}/helper_1.0.0/index.cjs | 9 ++++++- .../{ => @openfn}/helper_1.0.0/package.json | 2 +- .../engine-multi/test/__repo__/package.json | 2 +- packages/engine-multi/test/errors.test.ts | 6 ++--- .../engine-multi/test/integration.test.ts | 27 +++++++++++++++++++ 7 files changed, 57 insertions(+), 9 deletions(-) rename packages/engine-multi/test/__repo__/node_modules/{ => @openfn}/helper_1.0.0/index.cjs (50%) rename packages/engine-multi/test/__repo__/node_modules/{ => @openfn}/helper_1.0.0/package.json (75%) diff --git a/packages/engine-multi/src/worker/thread/helpers.ts b/packages/engine-multi/src/worker/thread/helpers.ts index b3489a338..9c7544da6 100644 --- a/packages/engine-multi/src/worker/thread/helpers.ts +++ b/packages/engine-multi/src/worker/thread/helpers.ts @@ -48,7 +48,14 @@ export const createLoggers = ( sanitize, }); - return { logger, jobLogger }; + const adaptorLogger = createLogger('ADA', { + logger: emitter, + level: 'debug', + json: true, + sanitize, + }); + + return { logger, jobLogger, adaptorLogger }; }; // Execute wrapper function diff --git a/packages/engine-multi/src/worker/thread/run.ts b/packages/engine-multi/src/worker/thread/run.ts index 196853859..054120742 100644 --- a/packages/engine-multi/src/worker/thread/run.ts +++ b/packages/engine-multi/src/worker/thread/run.ts @@ -29,9 +29,16 @@ register({ run: (plan: ExecutionPlan, runOptions: RunOptions) => { const { adaptorPaths, whitelist, sanitize, statePropsToRemove } = runOptions; - const { logger, jobLogger } = createLoggers(plan.id!, sanitize); - // TODO I would like to pull these options out of here + const { logger, jobLogger, adaptorLogger } = createLoggers( + plan.id!, + sanitize + ); + + // override console.log + // any console.log statements will now get treated as adaptor logs + console = adaptorLogger; + // TODO I would like to pull these options out of here const options = { // disable the run/step timeout timeout: 0, diff --git a/packages/engine-multi/test/__repo__/node_modules/helper_1.0.0/index.cjs b/packages/engine-multi/test/__repo__/node_modules/@openfn/helper_1.0.0/index.cjs similarity index 50% rename from packages/engine-multi/test/__repo__/node_modules/helper_1.0.0/index.cjs rename to packages/engine-multi/test/__repo__/node_modules/@openfn/helper_1.0.0/index.cjs index 42a79abc5..2959891e0 100644 --- a/packages/engine-multi/test/__repo__/node_modules/helper_1.0.0/index.cjs +++ b/packages/engine-multi/test/__repo__/node_modules/@openfn/helper_1.0.0/index.cjs @@ -1,8 +1,15 @@ module.exports = { + log: function(message) { + return function (state) { + console.log(message) + return state; + } + }, + exit: function() { return function (state) { process.exit(42) return state; } - } + }, }; diff --git a/packages/engine-multi/test/__repo__/node_modules/helper_1.0.0/package.json b/packages/engine-multi/test/__repo__/node_modules/@openfn/helper_1.0.0/package.json similarity index 75% rename from packages/engine-multi/test/__repo__/node_modules/helper_1.0.0/package.json rename to packages/engine-multi/test/__repo__/node_modules/@openfn/helper_1.0.0/package.json index 429f14f13..a17c395c9 100644 --- a/packages/engine-multi/test/__repo__/node_modules/helper_1.0.0/package.json +++ b/packages/engine-multi/test/__repo__/node_modules/@openfn/helper_1.0.0/package.json @@ -1,5 +1,5 @@ { - "name": "helper", + "name": "@openfn/helper", "version": "1.0.0", "type": "module", "main": "index.cjs", diff --git a/packages/engine-multi/test/__repo__/package.json b/packages/engine-multi/test/__repo__/package.json index 679a959d6..5d53c92d6 100644 --- a/packages/engine-multi/test/__repo__/package.json +++ b/packages/engine-multi/test/__repo__/package.json @@ -3,6 +3,6 @@ "private": true, "version": "1.0.0", "dependencies": { - "helper_1.0.0": "@npm:helper@1.0.0" + "@openfn/helper_1.0.0": "@npm:@openfn/helper@1.0.0" } } diff --git a/packages/engine-multi/test/errors.test.ts b/packages/engine-multi/test/errors.test.ts index 80860876b..2a57db481 100644 --- a/packages/engine-multi/test/errors.test.ts +++ b/packages/engine-multi/test/errors.test.ts @@ -3,7 +3,7 @@ import path from 'node:path'; import createEngine, { EngineOptions } from '../src/engine'; import { createMockLogger } from '@openfn/logger'; -import { WORKFLOW_COMPLETE, WORKFLOW_ERROR } from '../src/events'; +import { WORKFLOW_ERROR } from '../src/events'; let engine; @@ -154,13 +154,13 @@ test.serial.skip('execution error from async code', (t) => { }); }); -test.serial('emit a crash error on process.exit()', (t) => { +test.serial.only('emit a crash error on process.exit()', (t) => { return new Promise((done) => { const plan = { id: 'z', jobs: [ { - adaptor: 'helper@1.0.0', + adaptor: '@openfn/helper@1.0.0', expression: 'export default [exit()]', }, ], diff --git a/packages/engine-multi/test/integration.test.ts b/packages/engine-multi/test/integration.test.ts index 129770fe7..bb0364671 100644 --- a/packages/engine-multi/test/integration.test.ts +++ b/packages/engine-multi/test/integration.test.ts @@ -1,4 +1,5 @@ import test from 'ava'; +import path from 'node:path'; import createAPI from '../src/api'; import { createMockLogger } from '@openfn/logger'; @@ -150,6 +151,32 @@ test.serial('trigger workflow-log for job logs', (t) => { }); }); +test.serial.only('trigger workflow-log for adaptor logs', (t) => { + return new Promise(async (done) => { + api = await createAPI({ + logger, + repoDir: path.resolve('./test/__repo__'), + }); + + const plan = createPlan([ + { + // This will trigger console.log from inside the adaptor + // rather than from job code directly + expression: "log('hola')", + adaptor: '@openfn/helper@1.0.0', + }, + ]); + + api.execute(plan).on('workflow-log', (evt) => { + if (evt.name === 'ADA') { + t.deepEqual(evt.message, ['hola']); + t.pass('workflow logged'); + done(); + } + }); + }); +}); + test.serial('compile and run', (t) => { return new Promise(async (done) => { api = await createAPI({ From 201d5289685a2808f8c6852bab27c1af8cdd900c Mon Sep 17 00:00:00 2001 From: Joe Clark Date: Tue, 30 Jan 2024 16:41:52 +0000 Subject: [PATCH 002/128] runtime: messy tweak to module loading --- packages/runtime/src/modules/linker.ts | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/packages/runtime/src/modules/linker.ts b/packages/runtime/src/modules/linker.ts index 09c689a6f..ad57418b6 100644 --- a/packages/runtime/src/modules/linker.ts +++ b/packages/runtime/src/modules/linker.ts @@ -56,6 +56,10 @@ const linker: Linker = async (specifier, context, options = {}) => { if (exports.__esModule && target.default.default) { // CJS target = target.default.default; // ?! + } else if (target.default) { + // TODO I've just added this to import the helper in engine-multi's repo + // BUt why?? + target = target.default; } else { // ESM // If we import @openfn/language-common@2.0.0-rc3, its named exports are found on the default object @@ -69,7 +73,6 @@ const linker: Linker = async (specifier, context, options = {}) => { target = target.default; } } - const exportNames = Object.keys(target); // Wrap up the real module into a Synthetic Module const m = new vm.SyntheticModule( From 17803299cc22e6f95bd9e485e86ea17fb0348e0a Mon Sep 17 00:00:00 2001 From: Joe Clark Date: Tue, 30 Jan 2024 17:20:05 +0000 Subject: [PATCH 003/128] engine,runtime: revert linker change and fix tests --- .../node_modules/@openfn/helper_1.0.0/index.cjs | 15 --------------- .../@openfn/helper_1.0.0/package.json | 2 +- packages/engine-multi/test/integration.test.ts | 2 +- packages/runtime/src/modules/linker.ts | 4 ---- 4 files changed, 2 insertions(+), 21 deletions(-) delete mode 100644 packages/engine-multi/test/__repo__/node_modules/@openfn/helper_1.0.0/index.cjs diff --git a/packages/engine-multi/test/__repo__/node_modules/@openfn/helper_1.0.0/index.cjs b/packages/engine-multi/test/__repo__/node_modules/@openfn/helper_1.0.0/index.cjs deleted file mode 100644 index 2959891e0..000000000 --- a/packages/engine-multi/test/__repo__/node_modules/@openfn/helper_1.0.0/index.cjs +++ /dev/null @@ -1,15 +0,0 @@ -module.exports = { - log: function(message) { - return function (state) { - console.log(message) - return state; - } - }, - - exit: function() { - return function (state) { - process.exit(42) - return state; - } - }, -}; diff --git a/packages/engine-multi/test/__repo__/node_modules/@openfn/helper_1.0.0/package.json b/packages/engine-multi/test/__repo__/node_modules/@openfn/helper_1.0.0/package.json index a17c395c9..a3d7607fb 100644 --- a/packages/engine-multi/test/__repo__/node_modules/@openfn/helper_1.0.0/package.json +++ b/packages/engine-multi/test/__repo__/node_modules/@openfn/helper_1.0.0/package.json @@ -2,6 +2,6 @@ "name": "@openfn/helper", "version": "1.0.0", "type": "module", - "main": "index.cjs", + "main": "index.js", "private": true } diff --git a/packages/engine-multi/test/integration.test.ts b/packages/engine-multi/test/integration.test.ts index bb0364671..994a9f409 100644 --- a/packages/engine-multi/test/integration.test.ts +++ b/packages/engine-multi/test/integration.test.ts @@ -151,7 +151,7 @@ test.serial('trigger workflow-log for job logs', (t) => { }); }); -test.serial.only('trigger workflow-log for adaptor logs', (t) => { +test.serial('trigger workflow-log for adaptor logs', (t) => { return new Promise(async (done) => { api = await createAPI({ logger, diff --git a/packages/runtime/src/modules/linker.ts b/packages/runtime/src/modules/linker.ts index ad57418b6..42d848888 100644 --- a/packages/runtime/src/modules/linker.ts +++ b/packages/runtime/src/modules/linker.ts @@ -56,10 +56,6 @@ const linker: Linker = async (specifier, context, options = {}) => { if (exports.__esModule && target.default.default) { // CJS target = target.default.default; // ?! - } else if (target.default) { - // TODO I've just added this to import the helper in engine-multi's repo - // BUt why?? - target = target.default; } else { // ESM // If we import @openfn/language-common@2.0.0-rc3, its named exports are found on the default object From 058eff2499b13f6f89cb279f42e5590f9980f043 Mon Sep 17 00:00:00 2001 From: Joe Clark Date: Tue, 30 Jan 2024 18:43:52 +0000 Subject: [PATCH 004/128] engine: track test file --- packages/engine-multi/src/api/validate-worker.ts | 1 - .../node_modules/@openfn/helper_1.0.0/index.js | 13 +++++++++++++ 2 files changed, 13 insertions(+), 1 deletion(-) create mode 100644 packages/engine-multi/test/__repo__/node_modules/@openfn/helper_1.0.0/index.js diff --git a/packages/engine-multi/src/api/validate-worker.ts b/packages/engine-multi/src/api/validate-worker.ts index fb29b9ffc..4be3bab91 100644 --- a/packages/engine-multi/src/api/validate-worker.ts +++ b/packages/engine-multi/src/api/validate-worker.ts @@ -11,7 +11,6 @@ export default async (api: EngineAPI, timeout = 5000) => { // TODO argument drive this await api.callWorker('handshake', [], {}, { timeout }); } catch (e) { - console.error(e); throw new Error('Invalid worker path'); } }; diff --git a/packages/engine-multi/test/__repo__/node_modules/@openfn/helper_1.0.0/index.js b/packages/engine-multi/test/__repo__/node_modules/@openfn/helper_1.0.0/index.js new file mode 100644 index 000000000..54e37b93c --- /dev/null +++ b/packages/engine-multi/test/__repo__/node_modules/@openfn/helper_1.0.0/index.js @@ -0,0 +1,13 @@ +export function exit() { + return function (state) { + process.exit(42) + return state; + } +}; + +export function log(message) { + return function (state) { + console.log(message) + return state; + } +}; From 649ca431c320ec66e21876fc717e3d4c1738d519 Mon Sep 17 00:00:00 2001 From: Joe Clark Date: Wed, 31 Jan 2024 08:56:09 +0000 Subject: [PATCH 005/128] logger: dont stringify json output AND serialize errors This cause problems with the worker because errors get flattened to {}, and also we have to double parse. Now the logger will just emit whatever it logged to whatever the log emmiter is, so JSON stays as JSON. Which is good, but it no longer guarantees it'll be serializable --- .changeset/dull-bags-punch.md | 5 ++ packages/logger/src/logger.ts | 15 +++++- packages/logger/src/sanitize.ts | 9 ++++ packages/logger/test/logger.test.ts | 66 +++++++++++++++++---------- packages/logger/test/mock.test.ts | 3 +- packages/logger/test/sanitize.test.ts | 9 ++++ 6 files changed, 81 insertions(+), 26 deletions(-) create mode 100644 .changeset/dull-bags-punch.md diff --git a/.changeset/dull-bags-punch.md b/.changeset/dull-bags-punch.md new file mode 100644 index 000000000..a2f607770 --- /dev/null +++ b/.changeset/dull-bags-punch.md @@ -0,0 +1,5 @@ +--- +'@openfn/logger': patch +--- + +Do not serialize messages when logging to JSON" diff --git a/packages/logger/src/logger.ts b/packages/logger/src/logger.ts index 73c487c3c..6160af161 100644 --- a/packages/logger/src/logger.ts +++ b/packages/logger/src/logger.ts @@ -163,6 +163,7 @@ export default function (name?: string, options: LogOptions = {}): Logger { sanitize(o, { stringify: false, policy: options.sanitize, + serializeErrors: true, }) ); if (message.length === 1 && message[0] === null) { @@ -178,7 +179,19 @@ export default function (name?: string, options: LogOptions = {}): Logger { time: hrtimestamp().toString(), }; - emitter[level](stringify(output)); + // TODO OK maybe we should not stringify here because + // a) it doesn't safely stringify errors (how and where should we do this?) + // b) the worker wants the raw json, not a string + // You know what actually I think the message should be stringified, but not the wrapper + // I wonder how much that's going to break + //emitter[level](stringify(output)); + + // If we don't stringify, what if we hit: + // a function + // a circular reference + // something else non serializable + // But then again, I don't think we care about this? + emitter[level](output); }; const logString = ( diff --git a/packages/logger/src/sanitize.ts b/packages/logger/src/sanitize.ts index 00c980da3..fc7e548a3 100644 --- a/packages/logger/src/sanitize.ts +++ b/packages/logger/src/sanitize.ts @@ -15,6 +15,8 @@ type SanitizeOptions = { // This is potentially important so we do want to break // but! we should throw in the CLI< not here. policy?: SanitizePolicies; + + serializeErrors?: boolean; // false by default }; const scrubbers: Record any> = { @@ -31,6 +33,13 @@ const sanitize = (item: any, options: SanitizeOptions = {}) => { options.stringify === false ? o : stringify(o, undefined, 2); if (item instanceof Error) { + if (options.serializeErrors) { + return { + name: item.name, + message: item.message || item.toString(), + // TODO stack? Tricky + }; + } return item; } diff --git a/packages/logger/test/logger.test.ts b/packages/logger/test/logger.test.ts index f5a2a4166..6b38d84b2 100644 --- a/packages/logger/test/logger.test.ts +++ b/packages/logger/test/logger.test.ts @@ -1,6 +1,6 @@ import test from 'ava'; import chalk from 'chalk'; -import { styleLevel, LogFns, StringLog } from '../src/logger'; +import { styleLevel, LogFns, StringLog, JSONLog } from '../src/logger'; import { defaults as defaultOptions, LogLevel } from '../src/options'; import hrtimestamp from '../src/util/timestamp'; import { SECRET } from '../src/sanitize'; @@ -194,10 +194,10 @@ test('sanitize: summarise object', (t) => { test(`${level} - as json`, (t) => { const options = { level, json: true }; - const logger = createLogger('x', options); + const logger = createLogger('x', options); logger[fn]('abc'); - const result = JSON.parse(logger._last); + const result = logger._last; t.assert(Object.keys(result).length === 4); t.assert(result.level === level); @@ -208,14 +208,14 @@ test('sanitize: summarise object', (t) => { }); test(`JSON timestamps are bigints representing sensible times`, (t) => { - const testStartTime = new Date().toISOString() + const testStartTime = new Date().toISOString(); const startTime = hrtimestamp(); const options = { level: 'info' as const, json: true }; - const logger = createLogger('x', options); + const logger = createLogger('x', options); logger.info("what's the time mr wolf"); - const { time } = JSON.parse(logger._last); + const { time } = logger._last; // The time we get here is NOT a bigint because it's been serialized t.true(typeof time === 'string'); t.is(time.length, 19); @@ -247,21 +247,20 @@ big end time: ${endDate.toISOString()}`); // TODO this test needs to pass without the timeout test('timestamps increase in time', async (t) => { const options = { level: 'info' as const, json: true }; - const logger = createLogger('x', options); + const logger = createLogger('x', options); - for(let i = 0; i < 10; i += 1) { + for (let i = 0; i < 10; i += 1) { // await new Promise(done => setTimeout(done, 2)) logger.info("what's the time mr wolf"); } let last = 0; - logger._history.forEach(l => { - const { time } = JSON.parse(l); - t.log(time) - t.true(time > last) + logger._history.forEach(({ time }) => { + t.log(time); + t.true(time > last); last = time; - }) -}) + }); +}); test('print() should be barebones', (t) => { const options = { level: 'default' as const }; @@ -283,7 +282,7 @@ test('print() should not log if level is none', (t) => { test('print() should log as json', (t) => { const options = { json: true }; - const logger = createLogger('x', options); + const logger = createLogger('x', options); logger.print('abc'); const [level, message] = logger._last; @@ -333,6 +332,20 @@ test('in json mode with level=none, logs errors only', (t) => { t.assert(logger._history.length === 1); }); +test('json mode should serialize errors nicely', (t) => { + const logger = createLogger(undefined, { + level: 'debug', + json: true, + }); + const e = new Error('wibble'); + + logger.info(e); + + const result = logger._last; + t.is(result.level, 'info'); + t.deepEqual(result.message[0], { name: 'Error', message: 'wibble' }); +}); + test('with level=default, logs success, error and warning but not info and debug', (t) => { const logger = createLogger('x', { level: 'default' }); @@ -429,14 +442,14 @@ test('sanitize state in second arg', (t) => { }); test('sanitize state in json logging', (t) => { - const logger = createLogger(undefined, { json: true }); + const logger = createLogger(undefined, { json: true }); logger.success({ configuration: { x: 'y', }, data: {}, }); - const { message } = JSON.parse(logger._last); + const { message } = logger._last; t.is(message[0].configuration.x, SECRET); }); @@ -487,16 +500,21 @@ test('log a circular object', async (t) => { ); }); -test('log a circular object as JSON', async (t) => { +// This fails now because I'm not stringifying the output +// I think this has to just be OK. +// Maybe the worker needs to stringify the result downstream, +// but I don't think the logger itself should? +test.skip('log a circular object as JSON', async (t) => { const z: any = {}; const a = { z, }; z.a = a; - const logger = createLogger(undefined, { json: true }); + const logger = createLogger(undefined, { json: true }); logger.success(a); - const { message } = JSON.parse(logger._last); + const { message } = logger._last; + t.log(message); t.deepEqual(message[0], { z: { a: '[Circular]', @@ -550,20 +568,20 @@ test('proxy string arguments to string', (t) => { }); test('proxy a json argument to json', (t) => { - const logger = createLogger('x', { json: true }); + const logger = createLogger('x', { json: true }); logger.proxy({ name: 'y', level: 'success', message: ['hello'] }); - const { name, level, message } = JSON.parse(logger._last as any); + const { name, level, message } = logger._last; t.is(name, 'y'); t.is(level, 'success'); t.deepEqual(message, ['hello']); }); test('proxy string arguments to json', (t) => { - const logger = createLogger('x', { json: true }); + const logger = createLogger('x', { json: true }); logger.proxy('y', 'success', ['hello']); - const { name, level, message } = JSON.parse(logger._last as any); + const { name, level, message } = logger._last; t.is(name, 'y'); t.is(level, 'success'); t.deepEqual(message, ['hello']); diff --git a/packages/logger/test/mock.test.ts b/packages/logger/test/mock.test.ts index 8c5613674..d8dc96537 100644 --- a/packages/logger/test/mock.test.ts +++ b/packages/logger/test/mock.test.ts @@ -182,7 +182,8 @@ test('log JSON', async (t) => { const logger = mockLogger('a', { json: true }); logger.success('z'); - const { level, message, name, time } = JSON.parse(logger._last); + //const { level, message, name, time } = JSON.parse(logger._last); + const { level, message, name, time } = logger._last; t.is(name, 'a'); t.is(level, 'success'); t.is(message[0], 'z'); diff --git a/packages/logger/test/sanitize.test.ts b/packages/logger/test/sanitize.test.ts index 0c53a340f..834571657 100644 --- a/packages/logger/test/sanitize.test.ts +++ b/packages/logger/test/sanitize.test.ts @@ -40,6 +40,15 @@ test("Don't stringify a custom error", (t) => { t.assert(result instanceof Error); }); +test('do stringify an error if asked to', (t) => { + const e = new Error('test'); + const result = sanitize(e, { serializeErrors: true }); + t.deepEqual(result, { + name: 'Error', + message: 'test', + }); +}); + test('stringify an object', (t) => { const result = sanitize({}); t.is(result, '{}'); From deee94be7ccb22c83ec294b245a87129a55c92fd Mon Sep 17 00:00:00 2001 From: Joe Clark Date: Wed, 31 Jan 2024 08:59:07 +0000 Subject: [PATCH 006/128] logger: tidy --- packages/logger/src/logger.ts | 16 +++------------- 1 file changed, 3 insertions(+), 13 deletions(-) diff --git a/packages/logger/src/logger.ts b/packages/logger/src/logger.ts index 6160af161..38de46685 100644 --- a/packages/logger/src/logger.ts +++ b/packages/logger/src/logger.ts @@ -1,6 +1,5 @@ import c from 'chalk'; import iconfirm from '@inquirer/confirm'; -import stringify from 'fast-safe-stringify'; import * as symbols from './symbols'; import sanitize from './sanitize'; import getDurationString from './util/duration'; @@ -179,18 +178,9 @@ export default function (name?: string, options: LogOptions = {}): Logger { time: hrtimestamp().toString(), }; - // TODO OK maybe we should not stringify here because - // a) it doesn't safely stringify errors (how and where should we do this?) - // b) the worker wants the raw json, not a string - // You know what actually I think the message should be stringified, but not the wrapper - // I wonder how much that's going to break - //emitter[level](stringify(output)); - - // If we don't stringify, what if we hit: - // a function - // a circular reference - // something else non serializable - // But then again, I don't think we care about this? + // Emit the output directly, without any further + // serialisation. Note that this may cause us to log + // non-serialisable stuff emitter[level](output); }; From 271f1a8b5d9a8a2de4624b46c146488d72ad8745 Mon Sep 17 00:00:00 2001 From: Joe Clark Date: Wed, 31 Jan 2024 09:56:49 +0000 Subject: [PATCH 007/128] engine: don't parse json logs coming out of the logger --- packages/engine-multi/src/api/call-worker.ts | 10 ++++- .../engine-multi/src/worker/child/runner.ts | 1 + .../engine-multi/src/worker/thread/helpers.ts | 7 ++-- .../src/worker/thread/mock-run.ts | 2 +- .../engine-multi/src/worker/thread/run.ts | 4 +- .../engine-multi/test/worker/helper.test.ts | 38 +++++++++++++++++++ 6 files changed, 54 insertions(+), 8 deletions(-) create mode 100644 packages/engine-multi/test/worker/helper.test.ts diff --git a/packages/engine-multi/src/api/call-worker.ts b/packages/engine-multi/src/api/call-worker.ts index 4ecc1d441..9306b7101 100644 --- a/packages/engine-multi/src/api/call-worker.ts +++ b/packages/engine-multi/src/api/call-worker.ts @@ -34,14 +34,20 @@ export default function initWorkers( logger ); - const callWorker: CallWorker = (task, args = [], events = [], options = {}) => - workers.exec(task, args, { + const callWorker: CallWorker = ( + task, + args = [], + events = [], + options = {} + ) => { + return workers.exec(task, args, { ...options, on: ({ type, ...args }: WorkerEvent) => { // just call the callback events[type]?.(args); }, }); + }; const closeWorkers = async (instant?: boolean) => workers.destroy(instant); diff --git a/packages/engine-multi/src/worker/child/runner.ts b/packages/engine-multi/src/worker/child/runner.ts index 1a9f6ba25..a5bcffaf0 100644 --- a/packages/engine-multi/src/worker/child/runner.ts +++ b/packages/engine-multi/src/worker/child/runner.ts @@ -18,6 +18,7 @@ process.on('message', async (evt: WorkerEvent) => { }); const run = async (task: string, args: any[] = [], options = {}) => { + console.log(' > RUN ', task); const thread = createThread(task, args, options); thread.on('error', (e) => { diff --git a/packages/engine-multi/src/worker/thread/helpers.ts b/packages/engine-multi/src/worker/thread/helpers.ts index 9c7544da6..972758832 100644 --- a/packages/engine-multi/src/worker/thread/helpers.ts +++ b/packages/engine-multi/src/worker/thread/helpers.ts @@ -14,14 +14,13 @@ import serializeError from '../../util/serialize-error'; export const createLoggers = ( workflowId: string, - sanitize?: SanitizePolicies + sanitize: SanitizePolicies = 'none', + publish?: any ) => { const log = (message: string) => { - // Apparently the json log stringifies the message - // We don't really want it to do that publish(workerEvents.LOG, { workflowId, - message: JSON.parse(message), + message, } as workerEvents.LogEvent); }; diff --git a/packages/engine-multi/src/worker/thread/mock-run.ts b/packages/engine-multi/src/worker/thread/mock-run.ts index 54d6de72b..c6b29b0d8 100644 --- a/packages/engine-multi/src/worker/thread/mock-run.ts +++ b/packages/engine-multi/src/worker/thread/mock-run.ts @@ -32,7 +32,7 @@ type MockExecutionPlan = { // optionally delay function mockRun(plan: MockExecutionPlan) { const [job] = plan.jobs; - const { jobLogger } = createLoggers(plan.id!); + const { jobLogger } = createLoggers(plan.id!, 'none', publish); const workflowId = plan.id; return new Promise((resolve) => { const jobId = job.id || ''; diff --git a/packages/engine-multi/src/worker/thread/run.ts b/packages/engine-multi/src/worker/thread/run.ts index 054120742..35bc1dc60 100644 --- a/packages/engine-multi/src/worker/thread/run.ts +++ b/packages/engine-multi/src/worker/thread/run.ts @@ -29,9 +29,11 @@ register({ run: (plan: ExecutionPlan, runOptions: RunOptions) => { const { adaptorPaths, whitelist, sanitize, statePropsToRemove } = runOptions; + // @ts-ignore const { logger, jobLogger, adaptorLogger } = createLoggers( plan.id!, - sanitize + sanitize, + publish ); // override console.log diff --git a/packages/engine-multi/test/worker/helper.test.ts b/packages/engine-multi/test/worker/helper.test.ts new file mode 100644 index 000000000..011f2f4e0 --- /dev/null +++ b/packages/engine-multi/test/worker/helper.test.ts @@ -0,0 +1,38 @@ +import test from 'ava'; + +import { createLoggers } from '../../src/worker/thread/helpers'; + +test('createLogger: runtime logger should emit an event on log', (t) => { + const message = 'testing1234'; + + const publish = (type: string, payload: any) => { + t.is(type, 'worker:log'); + t.is(payload.workflowId, 'x'); + t.is(payload.message.level, 'info'); + t.is(payload.message.name, 'R/T'); + t.deepEqual(payload.message.message, [message]); + }; + + const { logger } = createLoggers('x', 'none', publish); + + logger.log(message); +}); + +test('createLogger: runtime logger should emit a nicely serialised error on log', (t) => { + const message = new Error('err'); + + const publish = (type: string, payload: any) => { + t.is(type, 'worker:log'); + + t.deepEqual(payload.message.message, [ + { + name: 'Error', + message: 'err', + }, + ]); + }; + + const { logger } = createLoggers('x', 'none', publish); + + logger.log(message); +}); From 3877474f771857b53094da270639605fcd4cd14e Mon Sep 17 00:00:00 2001 From: Joe Clark Date: Wed, 31 Jan 2024 10:45:27 +0000 Subject: [PATCH 008/128] engine, worker: better handling of objects coming from the logger The logger always sends raw json, but the log message is stringified by the engine, and rebuilt by the worker before sending to lightning this last bit needs work but its better --- packages/engine-multi/package.json | 3 ++- packages/engine-multi/src/worker/child/runner.ts | 1 - packages/engine-multi/src/worker/thread/helpers.ts | 12 +++++++++--- packages/engine-multi/src/worker/thread/run.ts | 2 +- packages/engine-multi/test/worker/helper.test.ts | 8 ++++++-- packages/ws-worker/src/api/execute.ts | 10 +++++++--- packages/ws-worker/src/types.d.ts | 6 ++++++ pnpm-lock.yaml | 5 +++++ 8 files changed, 36 insertions(+), 11 deletions(-) diff --git a/packages/engine-multi/package.json b/packages/engine-multi/package.json index 07172ae22..cc9349a27 100644 --- a/packages/engine-multi/package.json +++ b/packages/engine-multi/package.json @@ -17,7 +17,8 @@ "@openfn/compiler": "workspace:*", "@openfn/language-common": "2.0.0-rc3", "@openfn/logger": "workspace:*", - "@openfn/runtime": "workspace:*" + "@openfn/runtime": "workspace:*", + "fast-safe-stringify": "^2.1.1" }, "devDependencies": { "@types/node": "^18.15.13", diff --git a/packages/engine-multi/src/worker/child/runner.ts b/packages/engine-multi/src/worker/child/runner.ts index a5bcffaf0..1a9f6ba25 100644 --- a/packages/engine-multi/src/worker/child/runner.ts +++ b/packages/engine-multi/src/worker/child/runner.ts @@ -18,7 +18,6 @@ process.on('message', async (evt: WorkerEvent) => { }); const run = async (task: string, args: any[] = [], options = {}) => { - console.log(' > RUN ', task); const thread = createThread(task, args, options); thread.on('error', (e) => { diff --git a/packages/engine-multi/src/worker/thread/helpers.ts b/packages/engine-multi/src/worker/thread/helpers.ts index 972758832..ea8ee0363 100644 --- a/packages/engine-multi/src/worker/thread/helpers.ts +++ b/packages/engine-multi/src/worker/thread/helpers.ts @@ -2,7 +2,7 @@ // This is designed to minimize the amount of code we have to mock import process from 'node:process'; - +import stringify from 'fast-safe-stringify'; import createLogger, { SanitizePolicies } from '@openfn/logger'; import * as workerEvents from '../events'; @@ -11,16 +11,22 @@ import { ExecutionError, ExitError } from '../../errors'; import { publish } from './runtime'; import serializeError from '../../util/serialize-error'; +import { JSONLog } from '@openfn/logger'; export const createLoggers = ( workflowId: string, sanitize: SanitizePolicies = 'none', publish?: any ) => { - const log = (message: string) => { + const log = (message: JSONLog) => { publish(workerEvents.LOG, { workflowId, - message, + message: { + ...message, + // stringify the message now so that we know it's safe + // this also makes it more performant to feed up to the worker + message: stringify(message.message), + }, } as workerEvents.LogEvent); }; diff --git a/packages/engine-multi/src/worker/thread/run.ts b/packages/engine-multi/src/worker/thread/run.ts index 35bc1dc60..c55ddbf32 100644 --- a/packages/engine-multi/src/worker/thread/run.ts +++ b/packages/engine-multi/src/worker/thread/run.ts @@ -38,7 +38,7 @@ register({ // override console.log // any console.log statements will now get treated as adaptor logs - console = adaptorLogger; + //console = adaptorLogger; // TODO I would like to pull these options out of here const options = { diff --git a/packages/engine-multi/test/worker/helper.test.ts b/packages/engine-multi/test/worker/helper.test.ts index 011f2f4e0..c527ef3c1 100644 --- a/packages/engine-multi/test/worker/helper.test.ts +++ b/packages/engine-multi/test/worker/helper.test.ts @@ -10,7 +10,10 @@ test('createLogger: runtime logger should emit an event on log', (t) => { t.is(payload.workflowId, 'x'); t.is(payload.message.level, 'info'); t.is(payload.message.name, 'R/T'); - t.deepEqual(payload.message.message, [message]); + + // The log message is always encoded into a string + const parsedMessage = JSON.parse(payload.message.message); + t.deepEqual(parsedMessage, [message]); }; const { logger } = createLoggers('x', 'none', publish); @@ -24,7 +27,8 @@ test('createLogger: runtime logger should emit a nicely serialised error on log' const publish = (type: string, payload: any) => { t.is(type, 'worker:log'); - t.deepEqual(payload.message.message, [ + const parsedMessage = JSON.parse(payload.message.message); + t.deepEqual(parsedMessage, [ { name: 'Error', message: 'err', diff --git a/packages/ws-worker/src/api/execute.ts b/packages/ws-worker/src/api/execute.ts index 2d7234dd6..73cc395de 100644 --- a/packages/ws-worker/src/api/execute.ts +++ b/packages/ws-worker/src/api/execute.ts @@ -9,10 +9,10 @@ import { STEP_COMPLETE, STEP_START, } from '../events'; -import { AttemptOptions, Channel, AttemptState } from '../types'; +import { AttemptOptions, Channel, AttemptState, JSONLog } from '../types'; import { getWithReply, createAttemptState } from '../util'; -import type { JSONLog, Logger } from '@openfn/logger'; +import type { Logger } from '@openfn/logger'; import type { RuntimeEngine, Resolvers, @@ -205,7 +205,11 @@ export function onJobLog({ channel, state }: Context, event: JSONLog) { // lightning-friendly log object const log: AttemptLogPayload = { attempt_id: state.plan.id!, - message: event.message, + // The message body, the actual thing that is logged, + // is always encoded into a string + // Parse it here before sending on to lightning + // TODO this needs optimising! + message: JSON.parse(event.message), source: event.name, level: event.level, timestamp: timeInMicroseconds.toString(), diff --git a/packages/ws-worker/src/types.d.ts b/packages/ws-worker/src/types.d.ts index f2aed29ca..0d707516f 100644 --- a/packages/ws-worker/src/types.d.ts +++ b/packages/ws-worker/src/types.d.ts @@ -123,3 +123,9 @@ export interface Channel extends PhxChannel { push:

(event: string, payload?: P) => ReceiveHook; // join: () => ReceiveHook; } + +// override the JSON log typing because the log message +// is always JSON encoded in a string +export type JSONLog = Omit & { + message: string; +}; diff --git a/pnpm-lock.yaml b/pnpm-lock.yaml index b4efc4aa9..17e5bf7f3 100644 --- a/pnpm-lock.yaml +++ b/pnpm-lock.yaml @@ -383,6 +383,9 @@ importers: '@openfn/runtime': specifier: workspace:* version: link:../runtime + fast-safe-stringify: + specifier: ^2.1.1 + version: 2.1.1 devDependencies: '@types/node': specifier: ^18.15.13 @@ -406,6 +409,8 @@ importers: specifier: ^5.1.6 version: 5.1.6 + packages/engine-multi/tmp/a/b/c: {} + packages/engine-multi/tmp/repo: {} packages/lightning-mock: From bdf2586bea013a202d7b81ede8426693d6ad001d Mon Sep 17 00:00:00 2001 From: Joe Clark Date: Wed, 31 Jan 2024 11:10:36 +0000 Subject: [PATCH 009/128] engine: fix tests --- packages/engine-multi/src/worker/thread/run.ts | 2 +- packages/engine-multi/test/api/execute.test.ts | 2 +- packages/engine-multi/test/integration.test.ts | 4 ++-- packages/engine-multi/test/worker/mock-worker.test.ts | 2 +- 4 files changed, 5 insertions(+), 5 deletions(-) diff --git a/packages/engine-multi/src/worker/thread/run.ts b/packages/engine-multi/src/worker/thread/run.ts index c55ddbf32..35bc1dc60 100644 --- a/packages/engine-multi/src/worker/thread/run.ts +++ b/packages/engine-multi/src/worker/thread/run.ts @@ -38,7 +38,7 @@ register({ // override console.log // any console.log statements will now get treated as adaptor logs - //console = adaptorLogger; + console = adaptorLogger; // TODO I would like to pull these options out of here const options = { diff --git a/packages/engine-multi/test/api/execute.test.ts b/packages/engine-multi/test/api/execute.test.ts index bdc039be1..8e03c34a8 100644 --- a/packages/engine-multi/test/api/execute.test.ts +++ b/packages/engine-multi/test/api/execute.test.ts @@ -186,8 +186,8 @@ test.serial('should emit a log event', async (t) => { await execute(context); t.is(workflowLog.workflowId, 'y'); - t.is(workflowLog.message[0], 'hi'); t.is(workflowLog.level, 'info'); + t.deepEqual(workflowLog.message, JSON.stringify(['hi'])); }); test.serial('log events are timestamped in hr time', async (t) => { diff --git a/packages/engine-multi/test/integration.test.ts b/packages/engine-multi/test/integration.test.ts index 994a9f409..88489b654 100644 --- a/packages/engine-multi/test/integration.test.ts +++ b/packages/engine-multi/test/integration.test.ts @@ -143,7 +143,7 @@ test.serial('trigger workflow-log for job logs', (t) => { api.execute(plan).on('workflow-log', (evt) => { if (evt.name === 'JOB') { - t.deepEqual(evt.message, ['hola']); + t.deepEqual(evt.message, JSON.stringify(['hola'])); t.pass('workflow logged'); done(); } @@ -169,7 +169,7 @@ test.serial('trigger workflow-log for adaptor logs', (t) => { api.execute(plan).on('workflow-log', (evt) => { if (evt.name === 'ADA') { - t.deepEqual(evt.message, ['hola']); + t.deepEqual(evt.message, JSON.stringify(['hola'])); t.pass('workflow logged'); done(); } diff --git a/packages/engine-multi/test/worker/mock-worker.test.ts b/packages/engine-multi/test/worker/mock-worker.test.ts index 19486175e..af31a0634 100644 --- a/packages/engine-multi/test/worker/mock-worker.test.ts +++ b/packages/engine-multi/test/worker/mock-worker.test.ts @@ -157,7 +157,7 @@ test('Publish a job log event', async (t) => { t.is(id, plan.id); t.is(log.level, 'info'); - t.deepEqual(log.message, ['test']); t.is(log.name, 'JOB'); t.truthy(log.time); + t.deepEqual(log.message, JSON.stringify(['test'])); }); From d13adfbe36bc952c2d41280bd9040d8101ca7891 Mon Sep 17 00:00:00 2001 From: Joe Clark Date: Wed, 31 Jan 2024 11:29:30 +0000 Subject: [PATCH 010/128] logger: tests and types --- packages/logger/test/logger.test.ts | 13 ++++++------- packages/logger/test/mock.test.ts | 6 +++--- 2 files changed, 9 insertions(+), 10 deletions(-) diff --git a/packages/logger/test/logger.test.ts b/packages/logger/test/logger.test.ts index 6b38d84b2..f1700ffd5 100644 --- a/packages/logger/test/logger.test.ts +++ b/packages/logger/test/logger.test.ts @@ -244,19 +244,17 @@ big end time: ${endDate.toISOString()}`); t.true(endTime - startTime < 1e6); }); -// TODO this test needs to pass without the timeout test('timestamps increase in time', async (t) => { const options = { level: 'info' as const, json: true }; const logger = createLogger('x', options); for (let i = 0; i < 10; i += 1) { - // await new Promise(done => setTimeout(done, 2)) logger.info("what's the time mr wolf"); } - let last = 0; + let last = '0'; logger._history.forEach(({ time }) => { - t.log(time); + t.log(typeof time, time); t.true(time > last); last = time; }); @@ -280,12 +278,13 @@ test('print() should not log if level is none', (t) => { t.is(logger._history.length, 0); }); -test('print() should log as json', (t) => { +test.only('print() should log as json', (t) => { const options = { json: true }; - const logger = createLogger('x', options); + const logger = createLogger('x', options); logger.print('abc'); - const [level, message] = logger._last; + // @ts-ignore + const { level, message } = logger._parse(logger._last); t.is(level, 'print'); t.deepEqual(message, { message: ['abc'] }); }); diff --git a/packages/logger/test/mock.test.ts b/packages/logger/test/mock.test.ts index d8dc96537..4444ffbc6 100644 --- a/packages/logger/test/mock.test.ts +++ b/packages/logger/test/mock.test.ts @@ -1,6 +1,7 @@ import test from 'ava'; import chalk from 'chalk'; import mockLogger from '../src/mock'; +import { JSONLog } from '../src'; // disable chalk colours in unit tests chalk.level = 0; @@ -179,15 +180,14 @@ test('print should include the message', async (t) => { }); test('log JSON', async (t) => { - const logger = mockLogger('a', { json: true }); + const logger = mockLogger('a', { json: true }); logger.success('z'); - //const { level, message, name, time } = JSON.parse(logger._last); const { level, message, name, time } = logger._last; t.is(name, 'a'); t.is(level, 'success'); t.is(message[0], 'z'); - t.true(!isNaN(time)); + t.true(typeof time === 'string'); }); test('find a log', (t) => { From 593ebd569886cfca188d436c4fdbd257ecfcf6c9 Mon Sep 17 00:00:00 2001 From: Joe Clark Date: Wed, 31 Jan 2024 11:31:26 +0000 Subject: [PATCH 011/128] cli: update test --- packages/cli/test/util/print-versions.test.ts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packages/cli/test/util/print-versions.test.ts b/packages/cli/test/util/print-versions.test.ts index a07ae2a83..9594eb317 100644 --- a/packages/cli/test/util/print-versions.test.ts +++ b/packages/cli/test/util/print-versions.test.ts @@ -109,7 +109,7 @@ test('json output', async (t) => { const logger = createMockLogger('', { level: 'info', json: true }); await printVersions(logger, { adaptors: ['http'], logJson: true }); - const last = JSON.parse(logger._last) as JSONLog; + const last = logger._last as JSONLog; t.is(last.level, 'always'); const [{ versions }] = last.message; From d8e8ee3aa3af2bd05fb28af0a57888b597e34720 Mon Sep 17 00:00:00 2001 From: Joe Clark Date: Wed, 31 Jan 2024 12:33:02 +0000 Subject: [PATCH 012/128] engine: types --- packages/engine-multi/src/worker/thread/helpers.ts | 1 + 1 file changed, 1 insertion(+) diff --git a/packages/engine-multi/src/worker/thread/helpers.ts b/packages/engine-multi/src/worker/thread/helpers.ts index ea8ee0363..7f14b31ef 100644 --- a/packages/engine-multi/src/worker/thread/helpers.ts +++ b/packages/engine-multi/src/worker/thread/helpers.ts @@ -19,6 +19,7 @@ export const createLoggers = ( publish?: any ) => { const log = (message: JSONLog) => { + // @ts-ignore publish(workerEvents.LOG, { workflowId, message: { From a60a83e63ecd9e38a5d04aa0649d2aed11560985 Mon Sep 17 00:00:00 2001 From: Joe Clark Date: Wed, 31 Jan 2024 12:48:59 +0000 Subject: [PATCH 013/128] worker: update tests --- packages/ws-worker/src/events/step-start.ts | 2 +- packages/ws-worker/src/mock/runtime-engine.ts | 2 +- .../ws-worker/src/util/log-final-reason.ts | 2 +- packages/ws-worker/test/api/execute.test.ts | 12 +++---- .../test/mock/runtime-engine.test.ts | 33 +++++++++++++++++++ 5 files changed, 42 insertions(+), 9 deletions(-) diff --git a/packages/ws-worker/src/events/step-start.ts b/packages/ws-worker/src/events/step-start.ts index 9703fb0e5..e95a56245 100644 --- a/packages/ws-worker/src/events/step-start.ts +++ b/packages/ws-worker/src/events/step-start.ts @@ -58,7 +58,7 @@ export default async function onStepStart( await onJobLog(versionLogContext, { time, - message: [versionMessage], + message: JSON.stringify([versionMessage]), level: 'info', name: 'VER', }); diff --git a/packages/ws-worker/src/mock/runtime-engine.ts b/packages/ws-worker/src/mock/runtime-engine.ts index 068575fb2..b8f2741e5 100644 --- a/packages/ws-worker/src/mock/runtime-engine.ts +++ b/packages/ws-worker/src/mock/runtime-engine.ts @@ -109,7 +109,7 @@ async function createMock() { threadId: threadId, level: 'info', json: true, - message: args, + message: JSON.stringify(args), time: Date.now(), }); }, diff --git a/packages/ws-worker/src/util/log-final-reason.ts b/packages/ws-worker/src/util/log-final-reason.ts index aaa37c492..7f24314e5 100644 --- a/packages/ws-worker/src/util/log-final-reason.ts +++ b/packages/ws-worker/src/util/log-final-reason.ts @@ -12,7 +12,7 @@ export default async (context: Context, reason: ExitReason) => { await onJobLog(context, { time, - message: [message], + message: JSON.stringify([message]), level: 'info', name: 'R/T', }); diff --git a/packages/ws-worker/test/api/execute.test.ts b/packages/ws-worker/test/api/execute.test.ts index 51eaa0112..592bb4fba 100644 --- a/packages/ws-worker/test/api/execute.test.ts +++ b/packages/ws-worker/test/api/execute.test.ts @@ -1,5 +1,5 @@ import test from 'ava'; -import { JSONLog, createMockLogger } from '@openfn/logger'; +import { createMockLogger } from '@openfn/logger'; import { STEP_START, @@ -24,7 +24,7 @@ import { mockChannel } from '../../src/mock/sockets'; import { stringify, createAttemptState } from '../../src/util'; import type { ExecutionPlan } from '@openfn/runtime'; -import type { Attempt, AttemptState } from '../../src/types'; +import type { Attempt, AttemptState, JSONLog } from '../../src/types'; const enc = new TextEncoder(); @@ -71,7 +71,7 @@ test('jobLog should should send a log event outside a run', async (t) => { name: 'R/T', level: 'info', time: getBigIntTimestamp(), - message: ['ping'], + message: JSON.stringify(['ping']), }; // The logger should print in nanoseconds (19 digits) @@ -79,7 +79,7 @@ test('jobLog should should send a log event outside a run', async (t) => { const result = { attempt_id: plan.id, - message: log.message, + message: JSON.parse(log.message), // Conveniently this won't have rounding errors because the last // 3 digits are always 000, because of how we generate the stamp above timestamp: log.time.substring(0, 16), @@ -109,7 +109,7 @@ test('jobLog should should send a log event inside a run', async (t) => { name: 'R/T', level: 'info', time: getBigIntTimestamp(), - message: ['ping'], + message: JSON.stringify(['ping']), }; // The logger should print in nanoseconds (19 digits) @@ -124,7 +124,7 @@ test('jobLog should should send a log event inside a run', async (t) => { const channel = mockChannel({ [ATTEMPT_LOG]: (evt) => { t.truthy(evt.step_id); - t.deepEqual(evt.message, log.message); + t.deepEqual(evt.message, JSON.parse(log.message)); t.is(evt.level, log.level); t.is(evt.source, log.name); t.is(evt.timestamp, log.time.substring(0, 16)); diff --git a/packages/ws-worker/test/mock/runtime-engine.test.ts b/packages/ws-worker/test/mock/runtime-engine.test.ts index 65ed5d19d..bfb9eba63 100644 --- a/packages/ws-worker/test/mock/runtime-engine.test.ts +++ b/packages/ws-worker/test/mock/runtime-engine.test.ts @@ -194,6 +194,39 @@ test('only listen to events for the correct workflow', async (t) => { t.pass(); }); +test('log events should stringify a string message', async (t) => { + const wf = clone(sampleWorkflow); + wf.jobs[0].expression = + 'fn((s) => {console.log("haul away joe"); return s; })'; + + engine.listen(wf.id, { + 'workflow-log': ({ message }) => { + t.is(typeof message, 'string'); + const result = JSON.parse(message); + t.deepEqual(result, ['haul away joe']); + }, + }); + + engine.execute(wf); + await waitForEvent(engine, 'workflow-complete'); +}); + +test('log events should stringify an object message', async (t) => { + const wf = clone(sampleWorkflow); + wf.jobs[0].expression = 'fn((s) => {console.log({ x: 22 }); return s; })'; + + engine.listen(wf.id, { + 'workflow-log': ({ message }) => { + t.is(typeof message, 'string'); + const result = JSON.parse(message); + t.deepEqual(result, [{ x: 22 }]); + }, + }); + + engine.execute(wf); + await waitForEvent(engine, 'workflow-complete'); +}); + test('do nothing for a job if no expression and adaptor (trigger node)', async (t) => { const workflow = { id: 'w1', From 0d9804ecf1fbe3c28f9ea4e298cf3e6c56428c11 Mon Sep 17 00:00:00 2001 From: Joe Clark Date: Wed, 31 Jan 2024 14:22:03 +0000 Subject: [PATCH 014/128] logger: set a special json emitter so that json logs get nicely printed in the CLI --- packages/logger/src/options.ts | 20 +++--- packages/logger/src/util/default-emitter.ts | 8 +++ packages/logger/src/util/json-emitter.ts | 16 +++++ .../logger/test/util/json-emitter.test.ts | 61 +++++++++++++++++++ 4 files changed, 92 insertions(+), 13 deletions(-) create mode 100644 packages/logger/src/util/default-emitter.ts create mode 100644 packages/logger/src/util/json-emitter.ts create mode 100644 packages/logger/test/util/json-emitter.test.ts diff --git a/packages/logger/src/options.ts b/packages/logger/src/options.ts index d51d05fd7..801d2feaf 100644 --- a/packages/logger/src/options.ts +++ b/packages/logger/src/options.ts @@ -1,4 +1,6 @@ import { SanitizePolicies } from './sanitize'; +import defaultEmitter from './util/default-emitter'; +import jsonEmitter from './util/json-emitter'; export type LogLevel = 'debug' | 'info' | 'default' | 'none'; @@ -36,24 +38,13 @@ export type LogOptions = { sanitize?: SanitizePolicies; }; -// TODO not crazy about the handling of this -// but to support the success handler we need to alias console.log -const defaultEmitter = { - ...console, - // Direct error and warn logs to stdout, so that they appear in sequence - error: (...args: any[]) => console.log(...args), - warn: (...args: any[]) => console.log(...args), - success: (...args: any[]) => console.log(...args), - always: (...args: any[]) => console.log(...args), -}; - export const defaults: Required = { level: 'default', - // TODO support an array of emitters here - logger: defaultEmitter, // I guess? hideNamespace: false, hideIcons: false, + // @ts-ignore + emitter: defaultEmitter, // Not implemented wrap: false, @@ -70,6 +61,9 @@ const parseOptions = (opts: LogOptions = {}): Required => { // First default all values const options = { ...defaults, + // If logging to json, and no emitter is provided, + // use this emitter which will serialise the output to JSON + emitter: opts.json ? jsonEmitter : defaultEmitter, ...opts, }; diff --git a/packages/logger/src/util/default-emitter.ts b/packages/logger/src/util/default-emitter.ts new file mode 100644 index 000000000..649162db0 --- /dev/null +++ b/packages/logger/src/util/default-emitter.ts @@ -0,0 +1,8 @@ +export default { + ...console, + // Direct error and warn logs to stdout, so that they appear in sequence + error: (...args: any[]) => console.log(...args), + warn: (...args: any[]) => console.log(...args), + success: (...args: any[]) => console.log(...args), + always: (...args: any[]) => console.log(...args), +}; diff --git a/packages/logger/src/util/json-emitter.ts b/packages/logger/src/util/json-emitter.ts new file mode 100644 index 000000000..5f2b64d40 --- /dev/null +++ b/packages/logger/src/util/json-emitter.ts @@ -0,0 +1,16 @@ +import stringify from 'fast-safe-stringify'; + +const jsonEmitter: Console = { + ...console, +}; + +['log', 'info', 'success', 'always', 'debug', 'warn', 'error'].forEach((fn) => { + // @ts-ignore + jsonEmitter[fn] = (...args: any[]) => { + const stringified = args.map((value) => stringify(value)); + // @ts-ignore + console[fn](...stringified); + }; +}); + +export default jsonEmitter; diff --git a/packages/logger/test/util/json-emitter.test.ts b/packages/logger/test/util/json-emitter.test.ts new file mode 100644 index 000000000..edd9ff240 --- /dev/null +++ b/packages/logger/test/util/json-emitter.test.ts @@ -0,0 +1,61 @@ +import test from 'ava'; +import jsonEmitter from '../../src/util/json-emitter'; + +const levels = ['log', 'info', 'success', 'always', 'debug', 'warn', 'error']; + +const history = {}; + +test.before(() => { + levels.forEach((l) => { + history[l] = []; + + // Override the console object + // the json emitter should redirect here + console[l] = (...args: any[]) => { + history[l].push(args); + }; + }); +}); + +levels.forEach((level) => { + test(`should log a string to ${level}`, (t) => { + jsonEmitter[level]('hello'); + + const last = history[level].pop(); + t.is(last.length, 1); + t.is(last[0], '"hello"'); + }); + + test(`should log a number to ${level}`, (t) => { + jsonEmitter[level](1); + + const last = history[level].pop(); + t.is(last.length, 1); + t.is(last[0], '1'); + }); + + test(`should log a boolean to ${level}`, (t) => { + jsonEmitter[level](false); + + const last = history[level].pop(); + t.is(last.length, 1); + t.is(last[0], 'false'); + }); + + test(`should log an error to ${level}`, (t) => { + jsonEmitter[level](new Error('err')); + + const last = history[level].pop(); + t.is(last.length, 1); + t.is(last[0], '{}'); + }); + + test(`should log an object to ${level}`, (t) => { + const o = { a: 1, b: 2, c: 3 }; + jsonEmitter[level](o); + + const last = history[level].pop(); + t.is(last.length, 1); + t.is(last[0], JSON.stringify(o)); + }); +}); From 99defcd698f7f014bb87be206391c23678f2917d Mon Sep 17 00:00:00 2001 From: Joe Clark Date: Wed, 31 Jan 2024 14:41:46 +0000 Subject: [PATCH 015/128] logger: fix types --- packages/logger/src/options.ts | 5 ++--- packages/logger/src/util/default-emitter.ts | 4 +++- packages/logger/src/util/json-emitter.ts | 5 +++-- packages/logger/test/util/json-emitter.test.ts | 14 ++++++++++++-- 4 files changed, 20 insertions(+), 8 deletions(-) diff --git a/packages/logger/src/options.ts b/packages/logger/src/options.ts index 801d2feaf..6cfa7f7a7 100644 --- a/packages/logger/src/options.ts +++ b/packages/logger/src/options.ts @@ -43,8 +43,7 @@ export const defaults: Required = { hideNamespace: false, hideIcons: false, - // @ts-ignore - emitter: defaultEmitter, + logger: defaultEmitter, // Not implemented wrap: false, @@ -63,7 +62,7 @@ const parseOptions = (opts: LogOptions = {}): Required => { ...defaults, // If logging to json, and no emitter is provided, // use this emitter which will serialise the output to JSON - emitter: opts.json ? jsonEmitter : defaultEmitter, + logger: opts.json ? jsonEmitter : defaultEmitter, ...opts, }; diff --git a/packages/logger/src/util/default-emitter.ts b/packages/logger/src/util/default-emitter.ts index 649162db0..e7579c6bc 100644 --- a/packages/logger/src/util/default-emitter.ts +++ b/packages/logger/src/util/default-emitter.ts @@ -1,3 +1,5 @@ +import type { LogEmitter } from '../options'; + export default { ...console, // Direct error and warn logs to stdout, so that they appear in sequence @@ -5,4 +7,4 @@ export default { warn: (...args: any[]) => console.log(...args), success: (...args: any[]) => console.log(...args), always: (...args: any[]) => console.log(...args), -}; +} as LogEmitter; diff --git a/packages/logger/src/util/json-emitter.ts b/packages/logger/src/util/json-emitter.ts index 5f2b64d40..4258597d3 100644 --- a/packages/logger/src/util/json-emitter.ts +++ b/packages/logger/src/util/json-emitter.ts @@ -1,6 +1,7 @@ import stringify from 'fast-safe-stringify'; +import { LogEmitter } from '../options'; -const jsonEmitter: Console = { +const jsonEmitter: Partial = { ...console, }; @@ -13,4 +14,4 @@ const jsonEmitter: Console = { }; }); -export default jsonEmitter; +export default jsonEmitter as LogEmitter; diff --git a/packages/logger/test/util/json-emitter.test.ts b/packages/logger/test/util/json-emitter.test.ts index edd9ff240..8a7df5a09 100644 --- a/packages/logger/test/util/json-emitter.test.ts +++ b/packages/logger/test/util/json-emitter.test.ts @@ -1,9 +1,18 @@ import test from 'ava'; import jsonEmitter from '../../src/util/json-emitter'; +import { LogFns } from '../../src/logger'; -const levels = ['log', 'info', 'success', 'always', 'debug', 'warn', 'error']; +const levels: LogFns[] = [ + 'log', + 'info', + 'success', + 'always', + 'debug', + 'warn', + 'error', +]; -const history = {}; +const history: Record = {}; test.before(() => { levels.forEach((l) => { @@ -11,6 +20,7 @@ test.before(() => { // Override the console object // the json emitter should redirect here + // @ts-ignore add success function console[l] = (...args: any[]) => { history[l].push(args); }; From 94c900f319b2f281026074e5e835aea2759b0c02 Mon Sep 17 00:00:00 2001 From: Joe Clark Date: Wed, 31 Jan 2024 14:52:35 +0000 Subject: [PATCH 016/128] logger: log all json to .log --- packages/logger/src/util/json-emitter.ts | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/packages/logger/src/util/json-emitter.ts b/packages/logger/src/util/json-emitter.ts index 4258597d3..b8f688ca5 100644 --- a/packages/logger/src/util/json-emitter.ts +++ b/packages/logger/src/util/json-emitter.ts @@ -9,8 +9,7 @@ const jsonEmitter: Partial = { // @ts-ignore jsonEmitter[fn] = (...args: any[]) => { const stringified = args.map((value) => stringify(value)); - // @ts-ignore - console[fn](...stringified); + console.log(...stringified); }; }); From 77bab1a72aa58a01ffda938ca48a7818c7631f2c Mon Sep 17 00:00:00 2001 From: Joe Clark Date: Wed, 31 Jan 2024 15:29:04 +0000 Subject: [PATCH 017/128] tests: fixes --- integration-tests/cli/test/errors.test.ts | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/integration-tests/cli/test/errors.test.ts b/integration-tests/cli/test/errors.test.ts index e9e5a80b5..1c3e66a43 100644 --- a/integration-tests/cli/test/errors.test.ts +++ b/integration-tests/cli/test/errors.test.ts @@ -101,7 +101,7 @@ test.serial('circular workflow', async (t) => { assertLog(t, stdlogs, /Error validating execution plan/i); assertLog(t, stdlogs, /Workflow failed/i); - const error = stdlogs.find((l) => l.message[0].severity); + const error = stdlogs.find((l) => l.message[0].name === 'ValidationError'); t.regex(error.message[0].message, /circular dependency: b <-> a/i); }); @@ -116,7 +116,7 @@ test.serial('multiple inputs', async (t) => { assertLog(t, stdlogs, /Error validating execution plan/i); assertLog(t, stdlogs, /Workflow failed/i); - const error = stdlogs.find((l) => l.message[0].severity); + const error = stdlogs.find((l) => l.message[0].name === 'ValidationError'); t.regex(error.message[0].message, /multiple dependencies detected for: c/i); }); @@ -132,6 +132,6 @@ test.serial('invalid start', async (t) => { assertLog(t, stdlogs, /Workflow failed/i); // Find the error obejct which is logged out - const error = stdlogs.find((l) => l.message[0].severity); + const error = stdlogs.find((l) => l.message[0].name === 'ValidationError'); t.regex(error.message[0].message, /could not find start job: nope/i); }); From 1313b09f9594b9b29f246f5b3a1a5e0fb35adaa7 Mon Sep 17 00:00:00 2001 From: Joe Clark Date: Wed, 31 Jan 2024 15:36:08 +0000 Subject: [PATCH 018/128] logger: fix tests --- .../logger/test/util/json-emitter.test.ts | 26 +++++++------------ 1 file changed, 10 insertions(+), 16 deletions(-) diff --git a/packages/logger/test/util/json-emitter.test.ts b/packages/logger/test/util/json-emitter.test.ts index 8a7df5a09..09a1d4df7 100644 --- a/packages/logger/test/util/json-emitter.test.ts +++ b/packages/logger/test/util/json-emitter.test.ts @@ -12,26 +12,20 @@ const levels: LogFns[] = [ 'error', ]; -const history: Record = {}; +const history = []; test.before(() => { - levels.forEach((l) => { - history[l] = []; - - // Override the console object - // the json emitter should redirect here - // @ts-ignore add success function - console[l] = (...args: any[]) => { - history[l].push(args); - }; - }); + // All json functions emit to log - so we just have to override that one function here + console.log = (...args: any[]) => { + history.push(args); + }; }); levels.forEach((level) => { test(`should log a string to ${level}`, (t) => { jsonEmitter[level]('hello'); - const last = history[level].pop(); + const last = history.pop(); t.is(last.length, 1); t.is(last[0], '"hello"'); }); @@ -39,7 +33,7 @@ levels.forEach((level) => { test(`should log a number to ${level}`, (t) => { jsonEmitter[level](1); - const last = history[level].pop(); + const last = history.pop(); t.is(last.length, 1); t.is(last[0], '1'); }); @@ -47,7 +41,7 @@ levels.forEach((level) => { test(`should log a boolean to ${level}`, (t) => { jsonEmitter[level](false); - const last = history[level].pop(); + const last = history.pop(); t.is(last.length, 1); t.is(last[0], 'false'); }); @@ -55,7 +49,7 @@ levels.forEach((level) => { test(`should log an error to ${level}`, (t) => { jsonEmitter[level](new Error('err')); - const last = history[level].pop(); + const last = history.pop(); t.is(last.length, 1); t.is(last[0], '{}'); }); @@ -64,7 +58,7 @@ levels.forEach((level) => { const o = { a: 1, b: 2, c: 3 }; jsonEmitter[level](o); - const last = history[level].pop(); + const last = history.pop(); t.is(last.length, 1); t.is(last[0], JSON.stringify(o)); }); From 63a251a4580816b8c3e3d72ef8b30fcffb677819 Mon Sep 17 00:00:00 2001 From: Joe Clark Date: Wed, 31 Jan 2024 15:40:22 +0000 Subject: [PATCH 019/128] logger: serialise print() properly --- integration-tests/cli/test/metadata.test.ts | 3 +-- packages/logger/src/logger.ts | 2 +- 2 files changed, 2 insertions(+), 3 deletions(-) diff --git a/integration-tests/cli/test/metadata.test.ts b/integration-tests/cli/test/metadata.test.ts index 158e17483..2ff24a967 100644 --- a/integration-tests/cli/test/metadata.test.ts +++ b/integration-tests/cli/test/metadata.test.ts @@ -17,11 +17,10 @@ test.before(async () => { }); // Generate metadata -test.serial( +test.serial.only( `openfn metadata -S "${state}" -a test=${modulePath} --log-json --log info`, async (t) => { const { stdout } = await run(t.title); - t.regex(stdout, /Generating metadata/); t.regex(stdout, /Metadata function found. Generating metadata/); t.notRegex(stdout, /Returning metadata from cache/); diff --git a/packages/logger/src/logger.ts b/packages/logger/src/logger.ts index 38de46685..74e59e486 100644 --- a/packages/logger/src/logger.ts +++ b/packages/logger/src/logger.ts @@ -240,7 +240,7 @@ export default function (name?: string, options: LogOptions = {}): Logger { const print = (...args: any[]) => { if (opts.level !== NONE) { if (opts.json) { - emitter.info(JSON.stringify({ message: args })); + emitter.info({ message: args }); } else { emitter.info(...args); } From 8024831c80c9dede3475ad1a95b4a98522220309 Mon Sep 17 00:00:00 2001 From: Joe Clark Date: Wed, 31 Jan 2024 15:42:59 +0000 Subject: [PATCH 020/128] logger: types --- packages/logger/test/util/json-emitter.test.ts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packages/logger/test/util/json-emitter.test.ts b/packages/logger/test/util/json-emitter.test.ts index 09a1d4df7..346971f27 100644 --- a/packages/logger/test/util/json-emitter.test.ts +++ b/packages/logger/test/util/json-emitter.test.ts @@ -12,7 +12,7 @@ const levels: LogFns[] = [ 'error', ]; -const history = []; +const history: any[] = []; test.before(() => { // All json functions emit to log - so we just have to override that one function here From bf610e71da76e8dc9854d10cd88d2816bb31a826 Mon Sep 17 00:00:00 2001 From: Joe Clark Date: Wed, 31 Jan 2024 16:12:18 +0000 Subject: [PATCH 021/128] engine: fix logs to gcp They were neglecting to parse the strings sent out by the new json logger --- packages/engine-multi/src/api/execute.ts | 8 ++++++-- packages/engine-multi/src/api/lifecycle.ts | 6 +++++- packages/engine-multi/src/events.ts | 7 ++++++- packages/engine-multi/src/worker/events.ts | 7 ++++++- packages/engine-multi/test/api/execute.test.ts | 2 +- packages/engine-multi/test/api/lifecycle.test.ts | 2 +- 6 files changed, 25 insertions(+), 7 deletions(-) diff --git a/packages/engine-multi/src/api/execute.ts b/packages/engine-multi/src/api/execute.ts index e4ff9c67e..6521b1e76 100644 --- a/packages/engine-multi/src/api/execute.ts +++ b/packages/engine-multi/src/api/execute.ts @@ -69,7 +69,9 @@ const execute = async (context: ExecutionContext) => { threadId: '-', // no thread at this point message: { level: 'debug', - message: [`Memory limit: ${workerOptions.memoryLimitMb}mb`], + message: JSON.stringify([ + `Memory limit: ${workerOptions.memoryLimitMb}mb`, + ]), name: 'RTE', time: timestamp().toString(), }, @@ -82,7 +84,9 @@ const execute = async (context: ExecutionContext) => { threadId: '-', // no thread at this point message: { level: 'debug', - message: [`Timeout: ${workerOptions.timeout / 1000}s`], + message: JSON.stringify([ + `Timeout: ${workerOptions.timeout / 1000}s`, + ]), name: 'RTE', time: timestamp().toString(), }, diff --git a/packages/engine-multi/src/api/lifecycle.ts b/packages/engine-multi/src/api/lifecycle.ts index 33fac1737..e67b58b5d 100644 --- a/packages/engine-multi/src/api/lifecycle.ts +++ b/packages/engine-multi/src/api/lifecycle.ts @@ -121,7 +121,11 @@ export const log = ( const { threadId } = event; if (event.message.name !== 'JOB') { - context.logger.proxy(event.message); + const proxy = { + ...event.message, + message: JSON.parse(event.message.message), + }; + context.logger.proxy(proxy); } context.emit(externalEvents.WORKFLOW_LOG, { diff --git a/packages/engine-multi/src/events.ts b/packages/engine-multi/src/events.ts index a7af03b1c..c2c855134 100644 --- a/packages/engine-multi/src/events.ts +++ b/packages/engine-multi/src/events.ts @@ -89,7 +89,12 @@ export interface JobErrorPayload extends ExternalEvent { next: string[]; // downstream jobs } -export interface WorkerLogPayload extends ExternalEvent, JSONLog {} +export interface WorkerLogPayload + extends ExternalEvent, + Omit { + // message is always a JSON string + message: string; +} export interface EdgeResolvedPayload extends ExternalEvent { edgeId: string; // interesting, we don't really have this yet. Is index more appropriate? key? yeah, it's target node basically diff --git a/packages/engine-multi/src/worker/events.ts b/packages/engine-multi/src/worker/events.ts index 9e871e338..b168d4fb8 100644 --- a/packages/engine-multi/src/worker/events.ts +++ b/packages/engine-multi/src/worker/events.ts @@ -67,8 +67,13 @@ export interface JobErrorEvent extends InternalEvent { next: string[]; } +export type SerializedLogEvent = Omit & { + // message is always a JSON string + message: string; +}; + export interface LogEvent extends InternalEvent { - message: JSONLog; + message: SerializedLogEvent; } export interface ErrorEvent { diff --git a/packages/engine-multi/test/api/execute.test.ts b/packages/engine-multi/test/api/execute.test.ts index 8e03c34a8..dfb58744e 100644 --- a/packages/engine-multi/test/api/execute.test.ts +++ b/packages/engine-multi/test/api/execute.test.ts @@ -106,7 +106,7 @@ test.serial('should emit a log event with the memory limit', async (t) => { await execute(context); const log = logs.find(({ name }) => name === 'RTE'); - t.is(log.message[0], 'Memory limit: 666mb'); + t.is(log.message, JSON.stringify(['Memory limit: 666mb'])); }); test.serial('should emit a workflow-complete event', async (t) => { diff --git a/packages/engine-multi/test/api/lifecycle.test.ts b/packages/engine-multi/test/api/lifecycle.test.ts index 624f72388..218df83a4 100644 --- a/packages/engine-multi/test/api/lifecycle.test.ts +++ b/packages/engine-multi/test/api/lifecycle.test.ts @@ -173,7 +173,7 @@ test(`log: emits ${e.WORKFLOW_LOG}`, (t) => { message: { level: 'info', name: 'job', - message: ['oh hai'], + message: JSON.stringify(['oh hai']), time: Date.now() - 100, }, }; From 6cc50fcedb89ae1c06a5cfb39c27d331896c34a0 Mon Sep 17 00:00:00 2001 From: Joe Clark Date: Wed, 31 Jan 2024 17:44:01 +0000 Subject: [PATCH 022/128] test: update log handling --- integration-tests/worker/test/integration.test.ts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/integration-tests/worker/test/integration.test.ts b/integration-tests/worker/test/integration.test.ts index 423c4802c..3fd04fc11 100644 --- a/integration-tests/worker/test/integration.test.ts +++ b/integration-tests/worker/test/integration.test.ts @@ -147,7 +147,7 @@ test("Don't send job logs to stdout", (t) => { }; lightning.once('attempt:complete', () => { - const jsonLogs = engineLogger._history.map((l) => JSON.parse(l)); + const jsonLogs = engineLogger._history; // The engine logger shouldn't print out any job logs const jobLog = jsonLogs.find((l) => l.name === 'JOB'); From 502eaef2dacdce4bc262c727f149937d8b36bce4 Mon Sep 17 00:00:00 2001 From: Joe Clark Date: Wed, 31 Jan 2024 18:01:14 +0000 Subject: [PATCH 023/128] engine: fix passing test It was secretly failing under the hood --- packages/engine-multi/test/integration.test.ts | 17 ++++++++++++----- 1 file changed, 12 insertions(+), 5 deletions(-) diff --git a/packages/engine-multi/test/integration.test.ts b/packages/engine-multi/test/integration.test.ts index 88489b654..a7b150334 100644 --- a/packages/engine-multi/test/integration.test.ts +++ b/packages/engine-multi/test/integration.test.ts @@ -116,6 +116,8 @@ test.serial('trigger workflow-complete', (t) => { const plan = createPlan(); api.execute(plan).on('workflow-complete', (evt) => { + t.falsy(evt.state.errors); + t.is(evt.workflowId, plan.id); t.truthy(evt.duration); t.truthy(evt.state); @@ -130,24 +132,29 @@ test.serial('trigger workflow-log for job logs', (t) => { return new Promise(async (done) => { api = await createAPI({ logger, - compile: { - skip: true, - }, }); const plan = createPlan([ { - expression: `${withFn}console.log('hola')`, + expression: `${withFn}fn((s) => { console.log('hola'); return s; })`, }, ]); + let didLog = false; + api.execute(plan).on('workflow-log', (evt) => { if (evt.name === 'JOB') { + didLog = true; t.deepEqual(evt.message, JSON.stringify(['hola'])); t.pass('workflow logged'); - done(); } }); + + api.execute(plan).on('workflow-complete', (evt) => { + t.true(didLog); + t.falsy(evt.state.errors); + done(); + }); }); }); From 4b080c05e196f8a14f9806358f3dcee4e47e3127 Mon Sep 17 00:00:00 2001 From: Joe Clark Date: Wed, 31 Jan 2024 18:34:22 +0000 Subject: [PATCH 024/128] runtime: add tests on job logger and errors --- packages/runtime/test/runtime.test.ts | 45 +++++++++++++++++++++++++++ 1 file changed, 45 insertions(+) diff --git a/packages/runtime/test/runtime.test.ts b/packages/runtime/test/runtime.test.ts index 48c735f16..719a7acc1 100644 --- a/packages/runtime/test/runtime.test.ts +++ b/packages/runtime/test/runtime.test.ts @@ -386,6 +386,51 @@ test('log errors, write to state, and continue', async (t) => { t.truthy(logger._find('error', /failed job a/i)); }); +test('log job code to the job logger', async (t) => { + const plan: ExecutionPlan = { + jobs: [ + { + id: 'a', + expression: 'export default [(s) => { console.log("hi"); return s;}]', + }, + ], + }; + + const jobLogger = createMockLogger('JOB', { level: 'debug', json: true }); + await run(plan, {}, { jobLogger }); + + t.is(jobLogger._history.length, 1); + const [out] = jobLogger._history; + + t.is(out.level, 'info'); + t.is(out.message[0], 'hi'); +}); + +test.only('log and serialize an error to the job logger', async (t) => { + const plan: ExecutionPlan = { + jobs: [ + { + id: 'a', + expression: + 'export default [(s) => { console.log(new Error("hi")); return s;}]', + }, + ], + }; + + const jobLogger = createMockLogger('JOB', { level: 'debug', json: true }); + await run(plan, {}, { jobLogger }); + + t.is(jobLogger._history.length, 1); + const [out] = jobLogger._history; + t.log(out); + + t.is(out.level, 'info'); + t.is(out.message[0].name, 'Error'); + t.is(out.message[0].message, 'hi'); + // should not be an error instance + t.falsy(out.message[0].stack); +}); + test('error reports can be overwritten', async (t) => { const plan: ExecutionPlan = { jobs: [ From 6e18805f7499fb595d84d2b1276f54c85c9d9cc7 Mon Sep 17 00:00:00 2001 From: Joe Clark Date: Wed, 31 Jan 2024 18:34:41 +0000 Subject: [PATCH 025/128] logger: improve detection of error objects --- packages/logger/src/sanitize.ts | 19 ++++++++++++++++++- 1 file changed, 18 insertions(+), 1 deletion(-) diff --git a/packages/logger/src/sanitize.ts b/packages/logger/src/sanitize.ts index fc7e548a3..7597a8d95 100644 --- a/packages/logger/src/sanitize.ts +++ b/packages/logger/src/sanitize.ts @@ -26,13 +26,30 @@ const scrubbers: Record any> = { none: (item) => item, }; +// If an error is generated inside the sandbox, it does not seem to be instanceof error +// So let's walk the prototype chain to see if it LOOKs like an error +const isError = (obj: any) => { + if (obj instanceof Error) { + return true; + } + + let o = obj; + while (o && o.constructor) { + if (o.constructor.name === 'Error') { + return true; + } + o = o.prototype?.constructor; + } + return false; +}; + // Sanitize console output const sanitize = (item: any, options: SanitizeOptions = {}) => { // Stringify output to ensure we show deep nesting const maybeStringify = (o: any) => options.stringify === false ? o : stringify(o, undefined, 2); - if (item instanceof Error) { + if (isError(item)) { if (options.serializeErrors) { return { name: item.name, From 32aed6702eec85b990581c3ba5fe727e13c7bcd8 Mon Sep 17 00:00:00 2001 From: Joe Clark Date: Wed, 31 Jan 2024 18:35:32 +0000 Subject: [PATCH 026/128] engine: tests on error logging --- .../engine-multi/src/worker/thread/helpers.ts | 2 ++ .../engine-multi/src/worker/thread/run.ts | 7 +++- .../engine-multi/test/integration.test.ts | 34 +++++++++++++++++++ 3 files changed, 42 insertions(+), 1 deletion(-) diff --git a/packages/engine-multi/src/worker/thread/helpers.ts b/packages/engine-multi/src/worker/thread/helpers.ts index 7f14b31ef..ad16228be 100644 --- a/packages/engine-multi/src/worker/thread/helpers.ts +++ b/packages/engine-multi/src/worker/thread/helpers.ts @@ -19,6 +19,7 @@ export const createLoggers = ( publish?: any ) => { const log = (message: JSONLog) => { + // console.trace(' >>>>>> ', message); // @ts-ignore publish(workerEvents.LOG, { workflowId, @@ -47,6 +48,7 @@ export const createLoggers = ( json: true, sanitize, }); + const jobLogger = createLogger('JOB', { logger: emitter, level: 'debug', diff --git a/packages/engine-multi/src/worker/thread/run.ts b/packages/engine-multi/src/worker/thread/run.ts index 35bc1dc60..12fe90360 100644 --- a/packages/engine-multi/src/worker/thread/run.ts +++ b/packages/engine-multi/src/worker/thread/run.ts @@ -38,7 +38,12 @@ register({ // override console.log // any console.log statements will now get treated as adaptor logs - console = adaptorLogger; + // const trace = console.trace; + // console = adaptorLogger; + + // // Leave console.trace for local debugging + // // This goes to stdout but not the adpator logger + // console.trace = trace; // TODO I would like to pull these options out of here const options = { diff --git a/packages/engine-multi/test/integration.test.ts b/packages/engine-multi/test/integration.test.ts index a7b150334..fed31f5b5 100644 --- a/packages/engine-multi/test/integration.test.ts +++ b/packages/engine-multi/test/integration.test.ts @@ -158,6 +158,40 @@ test.serial('trigger workflow-log for job logs', (t) => { }); }); +test.serial('log errors', (t) => { + return new Promise(async (done) => { + api = await createAPI({ + logger, + }); + + const plan = createPlan([ + { + expression: `${withFn}fn((s) => { console.log(new Error('hola')); return s; })`, + }, + ]); + + api.execute(plan).on('workflow-log', (evt) => { + if (evt.name === 'JOB') { + t.log(evt); + t.deepEqual( + evt.message, + JSON.stringify([ + { + name: 'Error', + message: 'hola', + }, + ]) + ); + t.pass('workflow logged'); + } + }); + + api.execute(plan).on('workflow-complete', (evt) => { + done(); + }); + }); +}); + test.serial('trigger workflow-log for adaptor logs', (t) => { return new Promise(async (done) => { api = await createAPI({ From 2f80a78ba3b3f2305a8e6ea98bbdeacedfa06c2f Mon Sep 17 00:00:00 2001 From: Joe Clark Date: Thu, 1 Feb 2024 09:41:05 +0000 Subject: [PATCH 027/128] engine: restore adaptor logger --- packages/engine-multi/src/worker/thread/run.ts | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/packages/engine-multi/src/worker/thread/run.ts b/packages/engine-multi/src/worker/thread/run.ts index 12fe90360..fa23a2c02 100644 --- a/packages/engine-multi/src/worker/thread/run.ts +++ b/packages/engine-multi/src/worker/thread/run.ts @@ -36,14 +36,16 @@ register({ publish ); + // Save the debug function so that we can use it + const debug = console.debug; + // override console.log // any console.log statements will now get treated as adaptor logs - // const trace = console.trace; - // console = adaptorLogger; + console = adaptorLogger; - // // Leave console.trace for local debugging - // // This goes to stdout but not the adpator logger - // console.trace = trace; + // Leave console.debug for local debugging + // This goes to stdout but not the adpator logger + console.debug = debug; // TODO I would like to pull these options out of here const options = { From 823b471fe6c19e4b47db28c5560313842c25c59b Mon Sep 17 00:00:00 2001 From: Joe Clark Date: Thu, 1 Feb 2024 10:12:27 +0000 Subject: [PATCH 028/128] changesets --- .changeset/dull-bags-punch.md | 3 ++- .changeset/real-snakes-begin.md | 5 +++++ .changeset/unlucky-moose-greet.md | 5 +++++ 3 files changed, 12 insertions(+), 1 deletion(-) create mode 100644 .changeset/real-snakes-begin.md create mode 100644 .changeset/unlucky-moose-greet.md diff --git a/.changeset/dull-bags-punch.md b/.changeset/dull-bags-punch.md index a2f607770..968ee0eb8 100644 --- a/.changeset/dull-bags-punch.md +++ b/.changeset/dull-bags-punch.md @@ -2,4 +2,5 @@ '@openfn/logger': patch --- -Do not serialize messages when logging to JSON" +In JSON mode, do not stringify emitted messages. +Better handling of error objects diff --git a/.changeset/real-snakes-begin.md b/.changeset/real-snakes-begin.md new file mode 100644 index 000000000..2b3849e60 --- /dev/null +++ b/.changeset/real-snakes-begin.md @@ -0,0 +1,5 @@ +--- +'@openfn/engine-multi': patch +--- + +Update handling of logs so that JSON messages are stringified diff --git a/.changeset/unlucky-moose-greet.md b/.changeset/unlucky-moose-greet.md new file mode 100644 index 000000000..5b4d40748 --- /dev/null +++ b/.changeset/unlucky-moose-greet.md @@ -0,0 +1,5 @@ +--- +'@openfn/ws-worker': patch +--- + +Update handling of logs to accept stringified messages From b76266e13322356f8c18383ca52333cfd21b3d5d Mon Sep 17 00:00:00 2001 From: Joe Clark Date: Thu, 1 Feb 2024 10:12:49 +0000 Subject: [PATCH 029/128] Tidy ups --- integration-tests/cli/test/metadata.test.ts | 2 +- packages/engine-multi/src/worker/thread/helpers.ts | 2 -- packages/engine-multi/test/errors.test.ts | 2 +- packages/logger/test/logger.test.ts | 2 +- packages/runtime/test/runtime.test.ts | 2 +- 5 files changed, 4 insertions(+), 6 deletions(-) diff --git a/integration-tests/cli/test/metadata.test.ts b/integration-tests/cli/test/metadata.test.ts index 2ff24a967..7be1413dc 100644 --- a/integration-tests/cli/test/metadata.test.ts +++ b/integration-tests/cli/test/metadata.test.ts @@ -17,7 +17,7 @@ test.before(async () => { }); // Generate metadata -test.serial.only( +test.serial( `openfn metadata -S "${state}" -a test=${modulePath} --log-json --log info`, async (t) => { const { stdout } = await run(t.title); diff --git a/packages/engine-multi/src/worker/thread/helpers.ts b/packages/engine-multi/src/worker/thread/helpers.ts index 7621939fd..0b721bc85 100644 --- a/packages/engine-multi/src/worker/thread/helpers.ts +++ b/packages/engine-multi/src/worker/thread/helpers.ts @@ -19,8 +19,6 @@ export const createLoggers = ( publish?: any ) => { const log = (message: JSONLog) => { - // console.trace(' >>>>>> ', message); - // @ts-ignore publish(workerEvents.LOG, { workflowId, message: { diff --git a/packages/engine-multi/test/errors.test.ts b/packages/engine-multi/test/errors.test.ts index 2a57db481..e9202584b 100644 --- a/packages/engine-multi/test/errors.test.ts +++ b/packages/engine-multi/test/errors.test.ts @@ -154,7 +154,7 @@ test.serial.skip('execution error from async code', (t) => { }); }); -test.serial.only('emit a crash error on process.exit()', (t) => { +test.serial('emit a crash error on process.exit()', (t) => { return new Promise((done) => { const plan = { id: 'z', diff --git a/packages/logger/test/logger.test.ts b/packages/logger/test/logger.test.ts index f1700ffd5..53e9a7515 100644 --- a/packages/logger/test/logger.test.ts +++ b/packages/logger/test/logger.test.ts @@ -278,7 +278,7 @@ test('print() should not log if level is none', (t) => { t.is(logger._history.length, 0); }); -test.only('print() should log as json', (t) => { +test('print() should log as json', (t) => { const options = { json: true }; const logger = createLogger('x', options); logger.print('abc'); diff --git a/packages/runtime/test/runtime.test.ts b/packages/runtime/test/runtime.test.ts index 719a7acc1..d4bb1888d 100644 --- a/packages/runtime/test/runtime.test.ts +++ b/packages/runtime/test/runtime.test.ts @@ -406,7 +406,7 @@ test('log job code to the job logger', async (t) => { t.is(out.message[0], 'hi'); }); -test.only('log and serialize an error to the job logger', async (t) => { +test('log and serialize an error to the job logger', async (t) => { const plan: ExecutionPlan = { jobs: [ { From 425c515cdb3600cd24c4cd2fab51cdabfc3ac489 Mon Sep 17 00:00:00 2001 From: Joe Clark Date: Thu, 1 Feb 2024 10:42:04 +0000 Subject: [PATCH 030/128] engine: refactor log messages (and be a bit more lenient about structure) --- packages/engine-multi/README.md | 10 ++++++++++ packages/engine-multi/src/api/execute.ts | 12 ++++-------- packages/engine-multi/src/api/lifecycle.ts | 13 +++++++++---- packages/engine-multi/src/events.ts | 12 ++---------- packages/engine-multi/src/worker/events.ts | 7 ++++--- packages/engine-multi/src/worker/thread/helpers.ts | 2 +- packages/engine-multi/test/api/execute.test.ts | 2 +- packages/engine-multi/test/api/lifecycle.test.ts | 4 ++-- packages/engine-multi/test/worker/helper.test.ts | 8 ++++---- .../engine-multi/test/worker/mock-worker.test.ts | 4 ++-- 10 files changed, 39 insertions(+), 35 deletions(-) diff --git a/packages/engine-multi/README.md b/packages/engine-multi/README.md index 3942e207a..bc7b1b9a9 100644 --- a/packages/engine-multi/README.md +++ b/packages/engine-multi/README.md @@ -110,3 +110,13 @@ engine.execute(plan, { resolvers }); ``` Initial state and credentials are at the moment pre-loaded, with a "fully resolved" state object passed into the runtime. The Runtime has the ability to lazy load but implementing lazy loading across the worker_thread interface has proven tricky. + +## Note on Debugging + +Debugging in the engine can be really tricky. + +First there's the problem that a lot of code runs inside a worker thread in a child process, which is hard to get a breakpoint into (at the time of writing I haven't managed to do it). + +But also, any console.log statements inside the inner thread will get consumed by the adaptor logger and won't go to stdout. + +As a workaround to this, use console.debug inside the thread to print to stdout. This is not bound to the adaptor logger. diff --git a/packages/engine-multi/src/api/execute.ts b/packages/engine-multi/src/api/execute.ts index 6479ffbc9..c35085581 100644 --- a/packages/engine-multi/src/api/execute.ts +++ b/packages/engine-multi/src/api/execute.ts @@ -67,11 +67,9 @@ const execute = async (context: ExecutionContext) => { type: workerEvents.LOG, workflowId: state.plan.id!, threadId: '-', // no thread at this point - message: { + log: { level: 'debug', - message: JSON.stringify([ - `Memory limit: ${workerOptions.memoryLimitMb}mb`, - ]), + message: [`Memory limit: ${workerOptions.memoryLimitMb}mb`], name: 'RTE', time: timestamp().toString(), }, @@ -82,11 +80,9 @@ const execute = async (context: ExecutionContext) => { type: workerEvents.LOG, workflowId: state.plan.id!, threadId: '-', // no thread at this point - message: { + log: { level: 'debug', - message: JSON.stringify([ - `Timeout: ${workerOptions.timeout / 1000}s`, - ]), + message: [`Timeout: ${workerOptions.timeout / 1000}s`], name: 'RTE', time: timestamp().toString(), }, diff --git a/packages/engine-multi/src/api/lifecycle.ts b/packages/engine-multi/src/api/lifecycle.ts index e67b58b5d..f1fefe9b4 100644 --- a/packages/engine-multi/src/api/lifecycle.ts +++ b/packages/engine-multi/src/api/lifecycle.ts @@ -120,17 +120,22 @@ export const log = ( ) => { const { threadId } = event; - if (event.message.name !== 'JOB') { + if (event.log.name !== 'JOB') { + // Forwrad the log event to the engine's logger + // Note that we may have to parse the serialized log string const proxy = { - ...event.message, - message: JSON.parse(event.message.message), + ...event.log, + message: + typeof event.log.message == 'string' + ? JSON.parse(event.log.message) + : event.log.message, }; context.logger.proxy(proxy); } context.emit(externalEvents.WORKFLOW_LOG, { threadId, - ...event.message, + ...event.log, }); }; diff --git a/packages/engine-multi/src/events.ts b/packages/engine-multi/src/events.ts index c2c855134..4dc1d63e6 100644 --- a/packages/engine-multi/src/events.ts +++ b/packages/engine-multi/src/events.ts @@ -1,8 +1,5 @@ -// TODO remove ths file in favour of types - -// TODO mayberename event constants -import { JSONLog } from '@openfn/logger'; import { Versions } from './types'; +import { SerializedLogEvent } from './worker/events'; // If the worker thread exists a process safely, it'll return this error code // any other error code is unexpected @@ -89,12 +86,7 @@ export interface JobErrorPayload extends ExternalEvent { next: string[]; // downstream jobs } -export interface WorkerLogPayload - extends ExternalEvent, - Omit { - // message is always a JSON string - message: string; -} +export interface WorkerLogPayload extends ExternalEvent, SerializedLogEvent {} export interface EdgeResolvedPayload extends ExternalEvent { edgeId: string; // interesting, we don't really have this yet. Is index more appropriate? key? yeah, it's target node basically diff --git a/packages/engine-multi/src/worker/events.ts b/packages/engine-multi/src/worker/events.ts index b168d4fb8..698df06eb 100644 --- a/packages/engine-multi/src/worker/events.ts +++ b/packages/engine-multi/src/worker/events.ts @@ -68,12 +68,13 @@ export interface JobErrorEvent extends InternalEvent { } export type SerializedLogEvent = Omit & { - // message is always a JSON string - message: string; + // the message is either an array of strings/object to log, + // or a JSON array that was previously serialized + message: string | any[]; }; export interface LogEvent extends InternalEvent { - message: SerializedLogEvent; + log: SerializedLogEvent; } export interface ErrorEvent { diff --git a/packages/engine-multi/src/worker/thread/helpers.ts b/packages/engine-multi/src/worker/thread/helpers.ts index 0b721bc85..cb8a2d417 100644 --- a/packages/engine-multi/src/worker/thread/helpers.ts +++ b/packages/engine-multi/src/worker/thread/helpers.ts @@ -21,7 +21,7 @@ export const createLoggers = ( const log = (message: JSONLog) => { publish(workerEvents.LOG, { workflowId, - message: { + log: { ...message, // stringify the message now so that we know it's safe // this also makes it more performant to feed up to the worker diff --git a/packages/engine-multi/test/api/execute.test.ts b/packages/engine-multi/test/api/execute.test.ts index 42331590f..deda81d22 100644 --- a/packages/engine-multi/test/api/execute.test.ts +++ b/packages/engine-multi/test/api/execute.test.ts @@ -106,7 +106,7 @@ test.serial('should emit a log event with the memory limit', async (t) => { await execute(context); const log = logs.find(({ name }) => name === 'RTE'); - t.is(log.message, JSON.stringify(['Memory limit: 666mb'])); + t.is(log.message[0], 'Memory limit: 666mb'); }); test.serial('should emit a workflow-complete event', async (t) => { diff --git a/packages/engine-multi/test/api/lifecycle.test.ts b/packages/engine-multi/test/api/lifecycle.test.ts index 218df83a4..b6c0566a2 100644 --- a/packages/engine-multi/test/api/lifecycle.test.ts +++ b/packages/engine-multi/test/api/lifecycle.test.ts @@ -170,7 +170,7 @@ test(`log: emits ${e.WORKFLOW_LOG}`, (t) => { const event = { workflowId, threadId: 'a', - message: { + log: { level: 'info', name: 'job', message: JSON.stringify(['oh hai']), @@ -182,7 +182,7 @@ test(`log: emits ${e.WORKFLOW_LOG}`, (t) => { t.deepEqual(evt, { workflowId, threadId: 'a', - ...event.message, + ...event.log, }); done(); }); diff --git a/packages/engine-multi/test/worker/helper.test.ts b/packages/engine-multi/test/worker/helper.test.ts index c527ef3c1..445fac3ed 100644 --- a/packages/engine-multi/test/worker/helper.test.ts +++ b/packages/engine-multi/test/worker/helper.test.ts @@ -8,11 +8,11 @@ test('createLogger: runtime logger should emit an event on log', (t) => { const publish = (type: string, payload: any) => { t.is(type, 'worker:log'); t.is(payload.workflowId, 'x'); - t.is(payload.message.level, 'info'); - t.is(payload.message.name, 'R/T'); + t.is(payload.log.level, 'info'); + t.is(payload.log.name, 'R/T'); // The log message is always encoded into a string - const parsedMessage = JSON.parse(payload.message.message); + const parsedMessage = JSON.parse(payload.log.message); t.deepEqual(parsedMessage, [message]); }; @@ -27,7 +27,7 @@ test('createLogger: runtime logger should emit a nicely serialised error on log' const publish = (type: string, payload: any) => { t.is(type, 'worker:log'); - const parsedMessage = JSON.parse(payload.message.message); + const parsedMessage = JSON.parse(payload.log.message); t.deepEqual(parsedMessage, [ { name: 'Error', diff --git a/packages/engine-multi/test/worker/mock-worker.test.ts b/packages/engine-multi/test/worker/mock-worker.test.ts index af31a0634..679f663a1 100644 --- a/packages/engine-multi/test/worker/mock-worker.test.ts +++ b/packages/engine-multi/test/worker/mock-worker.test.ts @@ -145,10 +145,10 @@ test('Publish a job log event', async (t) => { let log; let id; await workers.exec('run', [plan], { - on: ({ workflowId, type, message }) => { + on: ({ workflowId, type, log: _log }) => { if (type === e.LOG) { didFire = true; - log = message; + log = _log; id = workflowId; } }, From 2fb6493aae2d4a18d4c9f6f2b7dcbbc564fb548d Mon Sep 17 00:00:00 2001 From: Joe Clark Date: Thu, 1 Feb 2024 10:47:54 +0000 Subject: [PATCH 031/128] worker: simplify logging --- packages/ws-worker/src/api/execute.ts | 7 +++++-- packages/ws-worker/src/events/step-start.ts | 2 +- packages/ws-worker/src/types.d.ts | 4 ++-- packages/ws-worker/src/util/log-final-reason.ts | 2 +- 4 files changed, 9 insertions(+), 6 deletions(-) diff --git a/packages/ws-worker/src/api/execute.ts b/packages/ws-worker/src/api/execute.ts index 404cb33f9..9ce817bf4 100644 --- a/packages/ws-worker/src/api/execute.ts +++ b/packages/ws-worker/src/api/execute.ts @@ -207,10 +207,13 @@ export function onJobLog({ channel, state }: Context, event: JSONLog) { const log: RunLogPayload = { run_id: state.plan.id!, // The message body, the actual thing that is logged, - // is always encoded into a string + // may be always encoded into a string // Parse it here before sending on to lightning // TODO this needs optimising! - message: JSON.parse(event.message), + message: + typeof event.message === 'string' + ? JSON.parse(event.message) + : event.message, source: event.name, level: event.level, timestamp: timeInMicroseconds.toString(), diff --git a/packages/ws-worker/src/events/step-start.ts b/packages/ws-worker/src/events/step-start.ts index e95a56245..9703fb0e5 100644 --- a/packages/ws-worker/src/events/step-start.ts +++ b/packages/ws-worker/src/events/step-start.ts @@ -58,7 +58,7 @@ export default async function onStepStart( await onJobLog(versionLogContext, { time, - message: JSON.stringify([versionMessage]), + message: [versionMessage], level: 'info', name: 'VER', }); diff --git a/packages/ws-worker/src/types.d.ts b/packages/ws-worker/src/types.d.ts index 983e84a1c..8cc0709dd 100644 --- a/packages/ws-worker/src/types.d.ts +++ b/packages/ws-worker/src/types.d.ts @@ -115,7 +115,7 @@ export interface Channel extends PhxChannel { } // override the JSON log typing because the log message -// is always JSON encoded in a string +// might be JSON encoded in a string export type JSONLog = Omit & { - message: string; + message: string | any[]; }; diff --git a/packages/ws-worker/src/util/log-final-reason.ts b/packages/ws-worker/src/util/log-final-reason.ts index 7f24314e5..aaa37c492 100644 --- a/packages/ws-worker/src/util/log-final-reason.ts +++ b/packages/ws-worker/src/util/log-final-reason.ts @@ -12,7 +12,7 @@ export default async (context: Context, reason: ExitReason) => { await onJobLog(context, { time, - message: JSON.stringify([message]), + message: [message], level: 'info', name: 'R/T', }); From 7cd9e5e8fbf16c1c33ca5fb4ab954724914b2594 Mon Sep 17 00:00:00 2001 From: Joe Clark Date: Thu, 1 Feb 2024 11:03:20 +0000 Subject: [PATCH 032/128] tiny tidyups --- packages/engine-multi/src/api/lifecycle.ts | 2 +- packages/engine-multi/src/worker/thread/run.ts | 4 +--- 2 files changed, 2 insertions(+), 4 deletions(-) diff --git a/packages/engine-multi/src/api/lifecycle.ts b/packages/engine-multi/src/api/lifecycle.ts index f1fefe9b4..68dcae76a 100644 --- a/packages/engine-multi/src/api/lifecycle.ts +++ b/packages/engine-multi/src/api/lifecycle.ts @@ -121,7 +121,7 @@ export const log = ( const { threadId } = event; if (event.log.name !== 'JOB') { - // Forwrad the log event to the engine's logger + // Forward the log event to the engine's logger // Note that we may have to parse the serialized log string const proxy = { ...event.log, diff --git a/packages/engine-multi/src/worker/thread/run.ts b/packages/engine-multi/src/worker/thread/run.ts index fa23a2c02..b6af70c87 100644 --- a/packages/engine-multi/src/worker/thread/run.ts +++ b/packages/engine-multi/src/worker/thread/run.ts @@ -29,7 +29,6 @@ register({ run: (plan: ExecutionPlan, runOptions: RunOptions) => { const { adaptorPaths, whitelist, sanitize, statePropsToRemove } = runOptions; - // @ts-ignore const { logger, jobLogger, adaptorLogger } = createLoggers( plan.id!, sanitize, @@ -39,8 +38,7 @@ register({ // Save the debug function so that we can use it const debug = console.debug; - // override console.log - // any console.log statements will now get treated as adaptor logs + // override console: any console.log statements will now get treated as adaptor logs console = adaptorLogger; // Leave console.debug for local debugging From 909cf3e129a3621fcc27de4d2ca38c1298070d14 Mon Sep 17 00:00:00 2001 From: Joe Clark Date: Thu, 1 Feb 2024 11:59:59 +0000 Subject: [PATCH 033/128] remove old docs --- docs/future/README.md | 11 --- docs/future/diagrams/README.md | 17 ----- .../diagrams/core-cli-requirements.drawio | 1 - .../future/diagrams/core-cli-requirements.svg | 3 - .../diagrams/core-compilation-steps.drawio | 1 - .../diagrams/core-compilation-steps.svg | 3 - .../diagrams/core-execution-steps.drawio | 1 - docs/future/diagrams/core-execution-steps.svg | 3 - docs/future/diagrams/kit-components.drawio | 1 - docs/future/diagrams/kit-components.svg | 3 - docs/future/editor.md | 25 ------- docs/future/history.md | 73 ------------------- docs/future/kit-components.md | 21 ------ 13 files changed, 163 deletions(-) delete mode 100644 docs/future/README.md delete mode 100644 docs/future/diagrams/README.md delete mode 100644 docs/future/diagrams/core-cli-requirements.drawio delete mode 100644 docs/future/diagrams/core-cli-requirements.svg delete mode 100644 docs/future/diagrams/core-compilation-steps.drawio delete mode 100644 docs/future/diagrams/core-compilation-steps.svg delete mode 100644 docs/future/diagrams/core-execution-steps.drawio delete mode 100644 docs/future/diagrams/core-execution-steps.svg delete mode 100644 docs/future/diagrams/kit-components.drawio delete mode 100644 docs/future/diagrams/kit-components.svg delete mode 100644 docs/future/editor.md delete mode 100644 docs/future/history.md delete mode 100644 docs/future/kit-components.md diff --git a/docs/future/README.md b/docs/future/README.md deleted file mode 100644 index 09a65648f..000000000 --- a/docs/future/README.md +++ /dev/null @@ -1,11 +0,0 @@ -Future -====== - -A collection of docs describing and outlining the history of OpenFn internals -and ideas for the future. - -## Table of Contents - -- [History of Core](history.md) -- [Kit Components](kit-components.md) - diff --git a/docs/future/diagrams/README.md b/docs/future/diagrams/README.md deleted file mode 100644 index 2c2125cf4..000000000 --- a/docs/future/diagrams/README.md +++ /dev/null @@ -1,17 +0,0 @@ -# Diagrams - -Diagrams are created using [draw.io](https://app.diagrams.net/). - -Editing diagrams can be done in the browser via the Draw.io web app, however -in order to programmically export the files to SVG for use in markdown -it is suggested to install the desktop version. - -See [here](https://github.com/jgraph/drawio-desktop/releases) to get the -appropriate version for your OS. - -## Exporting to SVG - -1. Install draw.io -2. Export the diagram to SVG: - `drawio --export --format svg` - diff --git a/docs/future/diagrams/core-cli-requirements.drawio b/docs/future/diagrams/core-cli-requirements.drawio deleted file mode 100644 index b065be4d7..000000000 --- a/docs/future/diagrams/core-cli-requirements.drawio +++ /dev/null @@ -1 +0,0 @@ -7VnbctowEP0aHsP4BoTHBEI7nbaTSSbT5KkjbMVWIluuLAfTr+8KS75gg8kEhtDkCe9qZUt7zlmvTM+ehNkXjuLgB/Mw7VmGl/Xsac+yTNOw4Ed6lspjWIPc43PiKV/puCV/sQ5U3pR4OKkFCsaoIHHd6bIowq6o+RDnbFEPe2S0/tQY+bjhuHURbXp/EU8EuffcGpX+r5j4gX6yORznIyHSwWonSYA8tqi47KuePeGMifwqzCaYyuzpvOTzZhtGi4VxHIldJvjf7qz7+Ma4mV5cTSN2d3n3h5+ZQ7U4sdQ7xh4kQJmMi4D5LEL0qvRecpZGHpa3NcEqY74zFivnExZiqdBEqWDgCkRI1SismC/vwTD6QAdlP0hbG9OsZi2V9cgiMUMhodIxYSknmMPqf+LFKtS7kIjD0Jwy9zl3zQjVj00EZ88FjADAZb55ueONSVWuBB7m4i2ZVEQXiPtYbMt4iT2oBrMQwwZhIscUCfJSXwhS7PWLuBJguFAYvwJvtcoXRFP1pJ41pLDeyzlc+GKVJSSw9sJDioEikmvPU8KiBn+aWa4gDyKIZVyY+bJg9B8pW7gB4qLvMTcNIetywiIgAt/GaJXwBQTKmTmZFGO2EuEFc0FAwheU+BGMC0lLhbUcw9l2tJvg6Alay7qaDZW9KEuDaShfUCkLOm7veJqDI+v3f5GvfRLytbvli7OY4yQhUpm7afgjKdgaDd6bgkdHVvDoqBLGGRHVTgDMB71GuC5XIQ29iD3qfnwSuh936x567xS4AVCgcMPre13mSsmQNkQppgwEHa6JvYFXjDmBPQHoaxOvy4FW9dcpi7iruGlatdJgd3FsD0XAMetFwBq3FIFBSxFwDlYEjG6AXcZ3bMtwht1UoLmkwyyCM5nE3eUkFg0O1FFpw63CBjRPGE0FvijBq0PptFLm0HBaa12Zc96EszhPVuEsCv3+8Tw/alEvjIde/Yx1en2ZqQ/9By/Qq6mwU7SsBMSMRCKp3PlaOkruDZ069wbGYI08+R1LKhVLewO7zO5q8UiAOb8/z3Ibq8bQeHdnubbD+RogPhSJeOPu1UewVeWvfnp6xavRXsuK05IVqyUr54fKyrA7KSfN0i1U2PzGOzZVnU9Qmh+H1pRzbIwG3S8JfWbY5bDwYeDUh4Jj42c6R+0hjb5j29U+8szoG12N5MqqnAS7u8uWLwJd/eYeu8vBjs3l+I295QYigFn+/5N3h+XfaPbVPw== \ No newline at end of file diff --git a/docs/future/diagrams/core-cli-requirements.svg b/docs/future/diagrams/core-cli-requirements.svg deleted file mode 100644 index 628d414a5..000000000 --- a/docs/future/diagrams/core-cli-requirements.svg +++ /dev/null @@ -1,3 +0,0 @@ - - -
state
json
state...
expression
js
expression...
Module Name
Module Name
core
executable/node script
core...
final_state
json
final_state...
Module
Module
Text is not SVG - cannot display
\ No newline at end of file diff --git a/docs/future/diagrams/core-compilation-steps.drawio b/docs/future/diagrams/core-compilation-steps.drawio deleted file mode 100644 index 04d2d0563..000000000 --- a/docs/future/diagrams/core-compilation-steps.drawio +++ /dev/null @@ -1 +0,0 @@ -7ZpLc5tADIB/jWfaQz08DNhH20n6mD4OOTQ5bkCGbReWLEuM++urhcWYkIfblEAzOQVptSAkfRIQT+x1XLwXJI2+8ADYxDKCYmKfTCzLNA0L/yjNTmsMy6k0oaCB1jWKc/oLakOtzWkAWctQcs4kTdtKnycJ+LKlI0Lwbdtsw1n7qikJoaM49wnrar/TQEaVdm55jf4D0DCqr2y6i2olJrWxvpMsIgHfHqjs04m9FpzL6igu1sBU9Oq4VPvO7lndOyYgkcds+Mi2gQfezPj2iQdOsrxerON35lw7J3f1HUOAAdAiFzLiIU8IO220K8HzJAB1WhOlxuYz56lW/gApdzqbJJccVZGMmV7d8ESekZgyVRVrngsKAp34ChieFSTBUiUOl64Y939WqjPK6t2ZFPznPhsYx1V1D8rxe2OjVRlezIcHAqLrVRIRgnzAbrbPINY+8Bik2OE+AYxIetP2g+gaDPd2TZrwQGfqD7KmnbwhLK+TVqQCsozypJPPbrgOMoFFmSq7uAgVwNMN41s/IkJOA+7nMYZPbdhGVMJ5SsrIbdFQ7aySa0wRaHt1A0JSpGbJaJigWqpKuFUo2mm0hOLhPHXjWm9wNEy6nVi2lrcNm2YNXHTApWv0lArTegWoFRD7SICcIQGyOwCRgKSSixdNjzM+ehZD0IPhEruLMvxOLV4qsRZOipa009KIqZsdSZ07JHWzLnU+F92J1U7wXfgcJJNcZZzlEpbC1wkvtY00uzP2D+ayh0llu0ey5vTGmv3K2j9hzTmStfmQrDkd1gRc51TAILRleEKahCh5z8De7Tk3Ava8QdgrqLw4OL5sMESpAU8JNXcNry1aG3j/P17dI3n1huTV7c7GTA7Cag/DbzY8gO6ww+/FwOT9D8PP68B0RpNAXancq16vLIPBTfmxcmK5TOqgqy+ERN+/e52rT3Ordvz3ajwKy2U85+n+60tWnw39rk5YWQ08dd1/w7Xt2FNnbGTPhhytzTi9PFh5bLS+mEfh+ZHdYDFkN5h3usE6AowUbtso5yNQvmCq1MA1aFIKKRZFNoJn5R4eh0fArDMos3/zOGxOXsoEXxzJrHlPVT0PtIsOtKucMjXDEzWFjc7YRY6J4peoV1zDx2UIyhrHGD55yL/JJJHwdpzDvac2sZcfaxOL3tqE0SmC7yp8ukk/MaeUbmBMCe3hvcvxhu70+39+v3b65+/0Zv2DiUdbvXF3WT1Pq6/d7AdzKMBHGN9Mp9NR9e9eXsf6BB7F5scq5drBb37s098= \ No newline at end of file diff --git a/docs/future/diagrams/core-compilation-steps.svg b/docs/future/diagrams/core-compilation-steps.svg deleted file mode 100644 index 249c1d463..000000000 --- a/docs/future/diagrams/core-compilation-steps.svg +++ /dev/null @@ -1,3 +0,0 @@ - - -
expression
expression
adaptor
adaptor
acorn
acorn
require
require
ast
ast
Find all top level CallExpressions
Find all top level...
Check if they exist in exports
Check if they exis...
Build new CallExpressions that are called with (state)
Build new CallExpr...
Wrap in iife
Wrap in iife
Wrap in execute(...)
Wrap in execute(...)
Text is not SVG - cannot display
\ No newline at end of file diff --git a/docs/future/diagrams/core-execution-steps.drawio b/docs/future/diagrams/core-execution-steps.drawio deleted file mode 100644 index d2940fa1b..000000000 --- a/docs/future/diagrams/core-execution-steps.drawio +++ /dev/null @@ -1 +0,0 @@ -7ZpZc5swEIB/jR+TAWFw+mg7STOdHjPJQ483GdagRiAihI376ysZiSM4rts4kdvpU9jVvbvf6nBG3jyt3nKcJx9YBHSEnKgaeZcjhFzXQfKP0my0xkF+rYk5ibSuVdyRH2Aqam1JIih6FQVjVJC8rwxZlkEoejrMOVv3qy0Z7Y+a4xgGirsQ06H2M4lEUmsv0KTV3wCJEzOyG7ypS1JsKuuVFAmO2Lqj8q5G3pwzJuqvtJoDVdYzdqnbXT9R2kyMQyYOafAtuz1b3ny6viuDKYsfPrwr08sz3UshNmbBEMn1a5FxkbCYZZhetdoZZ2UWgerVlVJb5z1juVZ+ByE22pm4FEyqEpFSXbpkmbjGKaEqKOas5AS4nMRHkNaZQRZNld9k0YKy8L5WXRNqWheCs/vGGdKMs3oNauJPmsasUw4Wwh576HAVmMcg9tQLGgfK0AeWguAb2Y4DxYKs+vPAOgTjpl7rJfmhHfUbTtOTXGFa6pFGKKByurOF/IjF1khYgNHKQZqCpiY3mu8FywZRMDRyx38yknNVL61iRf35krJ1mGAuziMWlqk0umqwToiAuxxv7b2WFVXLOiScc5kFvNkKuCAStSklcSbVQsXPo/DaGyvaDrIbqPa7fugq08DXeOoEhTwtr1vaXYNw0iE9cF7Iu95/JHv2GB+I5IVNJMcDJHGEc8H46ZJ1BHj8k4PHtwGPtBbffNla3zfiVyUa4bLqSRstnTB0wYHQuc7uAHkd6oIBdbeAIzVSFtULL2AQEH1372Kp41q8KBgtBUx5qN2/1bbSeKcnep69AboChePL7FhecCB0/ktBh6ycIhvq3B5zLYJ/Qt3Qlb/k8IjUXRxIHbJK3cWAOg4PJeF2UCtkhySLpTR5hePi4x3PPnxvrLBXEfGl890hT0oteEow3LW75BF5tbtLmkeKX2+TyCawZpodYqd5Dts90twHlZXV+wjWCw4eSvUwMesbvFE/dc+suzFXTUfGjTSj3I0XrLKSHV5gvx1bRx4Fe5kHumDr5+O+7UaK5i7xz5953UMff9yJVZqHzz+3ZaaMUOUcioKo5xyHZJbJa/fl4Dgcer537p8Yie5k4IvPnKiMKIOhKKlQFlCm32bJOh1GpLj/W3Oh5497HvAntj2A0Gvkwt2Jb2I18+3Iz0dMhujgo43VuwgaHm3mHGracJ0UISxFnRObdOiURQ3mM08/qxQ9efb5N1Ou/fsOGp/Q4eeYTw+Wf/jy/oqrjJlmh/eQpTmhED06AZ3qs3sLaQfZWw1O0Krew9JojkDy43eLg49Of/BSL8X2V+1tWeefA7yrnw== \ No newline at end of file diff --git a/docs/future/diagrams/core-execution-steps.svg b/docs/future/diagrams/core-execution-steps.svg deleted file mode 100644 index 77bf2d0df..000000000 --- a/docs/future/diagrams/core-execution-steps.svg +++ /dev/null @@ -1,3 +0,0 @@ - - -
state
json
state...
adaptor
adaptor
Read and parse
Read and parse
require
require
Append state to sandbox
Append state to sand...
Run expression in sandbox
Run expression in...
Write resulting state to disk
Write resulting stat...
Create an execution sandbox using vm2
Create an executio...
compiled expression
compiled expression
Text is not SVG - cannot display
\ No newline at end of file diff --git a/docs/future/diagrams/kit-components.drawio b/docs/future/diagrams/kit-components.drawio deleted file mode 100644 index 845a5079b..000000000 --- a/docs/future/diagrams/kit-components.drawio +++ /dev/null @@ -1 +0,0 @@ -7Vvdd+IoFP9rfKwnAROTx9bW6c7p7M45nd2Z2TdMMDJNQoZgrf3rlxhIzIc2Y6PonH0yXOAC9+MH94IDOIlePjCULD5RH4cDYPgvA3g7AMA0oCl+Mso6p4wdkBMCRnzZqCQ8klesekrqkvg4rTTklIacJFWiR+MYe7xCQ4zRVbXZnIbVURMU4Abh0UNhk/qV+HwhqabtlhX3mAQLObQDxnlFhFRjuZJ0gXy62iLBuwGcMEp5/hW9THCYCU/JJe833VFbTIzhmHfp8PjxJ/iLOl8+fjFfrX/+/fuVxPgK2DmbZxQu5YrlbPlaiYDRZezjjIs5gDerBeH4MUFeVrsSShe0BY9CWZ1yRp8KUYlF3iDmSbWKCYriLKXhkuPrgpx1k7PAjOOXneszC6kJc8M0wpytRRPVYSQFLS3NcmR5taU31WaxpTLTkkQkbSUoeJfiFB9Sor8iXfPo0u1BcCO3KjgIWwRntwgOuMcSnOk25IR94ZeySBlf0IDGKLwrqTdVSZZtHihNJPEH5nwtzQ4tOa1KF78Q/k18G8OxJYvft6puM3kZqrBWhVgs+Nt24fuGBbBUuey3KamOcxrzKYpImBHucfiMOfHQRsmI8esMu0RFTGOsaFOSSVGy8lWLWUi9p5wkG+yzlEyM++1ESJ0umYf3qUcCMWIB5m/af9PwGA4RJ8/VifTvfUbD+5hgTLAYWAzno4RTJr6G/pCnrfb2gGZiO6vYCApJEItvTwgNM0HIXE/oLbyWFRHx/dwccUpe0WzDL1NZQknMN2u0bgbW7T4L2OnUcpeTXMu9ZVuJexxqJwRcGUPDtp2cWWftSHafs4WVvOwqmlyNqwzofJ4Ko6krt5jTO0Cjoe47n2xU3C/kzoWXTWgoGGfcoOtOp67bDxQDqyo8022BYtACxfbRkBhqQeICVIdWBVYPxVS9eAkuAi/NUcOBxNHbY2RWwcszwklGuZALjWXPI8Am3OuuxhAawK3CHegFRC1Q5QrBqVAUNIxgIoI6QfmAY8xQvmUKPwnk8JcBrJZ5dsDqNAStA2m7nUIVZm4fQ80qrMqDaomqxrFRFXZEVXOsE1Wh5g20GpWcW1CiQi4VY+WzLMKv9pCrz525cyij14hasMIOuVRYltdCcpn2z2WWULqZiJUTnGH1n3hVksVXkP0iYUzr16w+ZyNmlXPKqxV5xuoUogifGX4eDjLIVLw9GiUk3PAsR1PcScnoyJvGZDKdHmfTsEbaNw2zIT6Np/Gi7gSncR+li2INPQLAqKP/Wzrdv3kwLxxUudUn6i+FtkuHm/2CL4sTNDDiJDqFv/bgly6s+aU6S2nzS+ttBd3TlAupAUOYOU7fo6hlnDwFQ4G2F6qukQo0talLT3a57yPZASjadorr8zTlXAKamloCrXNIaXWK6Xq0h67msCPLchpzaMbhExpFG6S9Lu4I/hA6oWmCvU26SYA48khIOMHNiwP9gFs/t0JLN+DCs3K5g8+tF3AzN+7ocnrjWTXN/gLaL+tkk6tO+M6Q9uz91B7rji9VDnJLL18pe5qHwp6BcUtQwFDUtySPmPY1a/dpI6BdwqP/kfDtdKCcpkoIDk6VDbS7oifQiZ6g93Qgw8jjV7mbnwQ9j/cKrObxlvaLHrA7OXCgtnD4dPUj/d00Zdu6NQX1ZFcPwLxLwfM+sbnzydbRis29n2y3rlVO4e9HvE8Z17FZu8eDZvBfxvwPFPkkDs492q9LVX96VT2J0fy04RwSru332+AM77ffi5o7HoM6NeMc1YwuX4HsVdpdg1H9EgE6oyqjfIUNRn09i1LDtz8m3uQEfRyh2G+19N/nITF48yGxCR05yDttR72cKd7A1UznkDdwolj+4yZvXv5vCd79Bw== \ No newline at end of file diff --git a/docs/future/diagrams/kit-components.svg b/docs/future/diagrams/kit-components.svg deleted file mode 100644 index 110930044..000000000 --- a/docs/future/diagrams/kit-components.svg +++ /dev/null @@ -1,3 +0,0 @@ - - -
retrieve adaptor .d.ts
retrieve adaptor .d.ts
Editor
Editor
describe adaptor
describe adaptor
Code Generator Widget
Code Generator Widget
analyzer
Prev. "compiler"
analyzer...
Module
on npm
Module...
Hosted Files
on unpkg.com
Hosted Files...
Common Adaptor Introspection Facilities
Common Adaptor Intro...
Typescript
Typescript
Workflow Diagram
Workflow Diagram
react-flow
react-flow
elk-js
elk-js
compiler
compiler
Adaptor Loading Facilities
Adaptor Loading Faci...
retrieve on demand
retrieve on demand
Text is not SVG - cannot display
\ No newline at end of file diff --git a/docs/future/editor.md b/docs/future/editor.md deleted file mode 100644 index 216bd8eab..000000000 --- a/docs/future/editor.md +++ /dev/null @@ -1,25 +0,0 @@ -## Editor - -### Monaco - -**Adding libraries to Moanco** - -```ts -monaco.languages.typescript.typescriptDefaults.addExtraLib('const arr = [];') -``` - -**Hiding Editor Lines** - -https://github.com/microsoft/monaco-editor/issues/45#issuecomment-1159168677 - -```ts -interface IMyStandaloneCodeEditor extends monaco.editor.IStandaloneCodeEditor { - setHiddenAreas(range: monaco.IRange[]): void; -} - -... - -const casted = editor as IMyStandaloneCodeEditor; -const range = new monaco.Range(1, 0, 1, 0); -casted.setHiddenAreas([range]); -``` \ No newline at end of file diff --git a/docs/future/history.md b/docs/future/history.md deleted file mode 100644 index 99f1af56b..000000000 --- a/docs/future/history.md +++ /dev/null @@ -1,73 +0,0 @@ -# History of Core - -## How core works - -OpenFn [`core`](https://github.com/OpenFn/core) is an npm module, -that both transpiles and executes a job based on the following inputs: - -- A state file -- An adaptor path or module name -- An output path for "final state" - -![](diagrams/core-cli-requirements.svg) - -## Compilation - -The first thing that core does is try to compile an expression. -Since we allow users to write function calls -(without having to write require or import statements), -we have to transpile the code to be able to reference the adaptor functions. - -![](diagrams/core-compilation-steps.svg) - -The process outlined above would result in the transformation shown below: - -**Input** - -```js -fn((state) => { - return { ...state, counter: 1 }; -}); - -fn(({ counter, ...rest }) => { - return { ...rest, counter: counter + 1 }; -}); -``` - -**Output** - -```js -fn((state) => { - console.log(state); - return state; -})(function () { - return execute( - fn((state) => { - return { ...state, counter: 1 }; - }), - fn(({ counter, ...rest }) => { - return { ...rest, counter: counter + 1 }; - }), - fn((state) => { - console.log(state); - return state; - }) - )(state); -})(); -``` - -The execute function is an async reducer (using vanilla promises, -as async/await and generators were in Stage 0 at the time of implementation). -This pattern allows users to write code that ‘feels’ synchronous but is -executed asynchronously. - -## Execution - -The execution environment ties all the work together. - -![](diagrams/core-execution-steps.svg) - -It’s important to note that code is executed in a sandboxed environment, -where the vast majority of NodeJS root functionality is not available. -We also check in the compilation step that function calls that are not in -the ‘scope’ of our sandbox and throw errors in these cases. diff --git a/docs/future/kit-components.md b/docs/future/kit-components.md deleted file mode 100644 index 96bd0745f..000000000 --- a/docs/future/kit-components.md +++ /dev/null @@ -1,21 +0,0 @@ -Kit Components -============== - -![Draft Status](https://img.shields.io/badge/status-draft-red) - - -![](diagrams/kit-components.svg) - -## Analyzer - -See [here](../../packages/compiler/) - -## Compiler - -Produce transpiled/transformed expressions - -To be used from the worker to compile a job (or set of jobs) in the required -structure. - -Would contain all the transforms, and the helpers borne from creating the -transforms. From 9956032aa6ec3f44d636769f6d1c90caec8d91c1 Mon Sep 17 00:00:00 2001 From: Joe Clark Date: Thu, 1 Feb 2024 13:05:44 +0000 Subject: [PATCH 034/128] lexicon: start building a central lexicon of definitions --- packages/lexicon/README.md | 5 + packages/lexicon/core.d.ts | 141 +++++++++++++++++++++++++ packages/lexicon/index.d.ts | 1 + packages/lexicon/lightning.d.ts | 3 + packages/lexicon/package.json | 7 ++ packages/runtime/package.json | 1 + packages/runtime/src/runtime.ts | 3 +- packages/runtime/src/types.ts | 94 +++++------------ packages/runtime/src/util/log-error.ts | 5 +- pnpm-lock.yaml | 5 + 10 files changed, 194 insertions(+), 71 deletions(-) create mode 100644 packages/lexicon/README.md create mode 100644 packages/lexicon/core.d.ts create mode 100644 packages/lexicon/index.d.ts create mode 100644 packages/lexicon/lightning.d.ts create mode 100644 packages/lexicon/package.json diff --git a/packages/lexicon/README.md b/packages/lexicon/README.md new file mode 100644 index 000000000..7fc062f9b --- /dev/null +++ b/packages/lexicon/README.md @@ -0,0 +1,5 @@ +The lexicon is a central repositoty of key type and word definitions. + +It's a types repo and glossary at the same time. + +TODO: should it also capture constants for cross-package events? diff --git a/packages/lexicon/core.d.ts b/packages/lexicon/core.d.ts new file mode 100644 index 000000000..6351d0bdf --- /dev/null +++ b/packages/lexicon/core.d.ts @@ -0,0 +1,141 @@ +type UUID = string; + +/** + * An execution plan is a portable definition of a Work Order, + * or, a unit of work to execute + */ +export type ExecutionPlan = { + id?: UUID; // this would bet the run (nee attempt) id + workflow: Workflow; + options: any; +}; + +/** + * A workflow is just a series of steps, executed start to finish + */ +export type Workflow = { + id?: UUID; // unique id used to track this workflow. Could be autogenerated + name: string; // user-friendly name. CLI can derive this from file names + steps: Step[]; +}; + +/** + * Options which can be set on a workflow as part of an execution plan + */ +export type WorkflowOptions = { + // Both numbers in minutes maybe + timeout?: number; + stepTimeout?: number; + start?: StepId; + intialState?: State; +}; + +export type StepId = string; + +/** + * A thing to be run as part of a workflow + * (usually a job) + */ +export interface Step { + id: StepId; // is this really required? It could be generated + name?: string; // user-friendly name used in logging + + next?: string | Record; + previous?: StepId; +} + +/** + * Not actually keen on the node/edge semantics here + */ +export type StepEdge = + | boolean + | string + | { + condition?: string; // Javascript expression (function body, not function) + label?: string; + disabled?: boolean; + }; + +/** + * A type of Step which executes code + */ +export interface Job extends Step { + adaptor?: string; + expression: string; + configuration?: object | string; + state?: Omit | string; +} + +/** + * A no-op type of Step + */ +export interface Trigger extends Step {} + +/** + * A raw openfn-js script to be executed by the runtime + * + * Can be compiled as part of a job + */ +export type Expression = string; + +/** + * An expression which has been compiled, and so includes import and export statements + */ +export type CompiledExpression = Expression; + +export declare interface State { + // Core state props + configuration?: C; + data?: S; + errors?: Record; + + // Added by common + references?: Array; + + // Typically used by other adaptors + index?: number; + response?: any; + query?: any; + + [other: string]: any; +} + +/** + * An operation function that runs in an Expression + */ +export declare interface Operation | State> { + (state: State): T; +} + +export type ErrorReport = { + type: string; // The name/type of error, ie Error, TypeError + message: string; // simple human readable message + stepId: StepId; // ID of the associated job + jobId?: StepId; // deprecated + error: Error; // the original underlying error object + + code?: string; // The error code, if any (found on node errors) + stack?: string; // not sure this is useful? + data?: any; // General store for related error information +}; + +/* + + +run (workflow, options) + + +some options relate to the workflow: +- initial state +- start node +- timeout + +others are system settings +- linker (paths, whitelist) +- statePropsToRemove +- loggers +- sandbox rules +- callbacks (notify, lazy loaders) + +Some of those system things might just be defaults - although maybe it's cleaner for the engine to do that +*/ diff --git a/packages/lexicon/index.d.ts b/packages/lexicon/index.d.ts new file mode 100644 index 000000000..4b0e04137 --- /dev/null +++ b/packages/lexicon/index.d.ts @@ -0,0 +1 @@ +export * from './core'; diff --git a/packages/lexicon/lightning.d.ts b/packages/lexicon/lightning.d.ts new file mode 100644 index 000000000..499e0192b --- /dev/null +++ b/packages/lexicon/lightning.d.ts @@ -0,0 +1,3 @@ +/** + * Type definitions for Lightning and Worker interfaces + */ diff --git a/packages/lexicon/package.json b/packages/lexicon/package.json new file mode 100644 index 000000000..55e20d513 --- /dev/null +++ b/packages/lexicon/package.json @@ -0,0 +1,7 @@ +{ + "name": "@openfn/lexicon", + "version": "1.0.0", + "description": "Central repo of names and type definitions", + "author": "Open Function Group ", + "license": "ISC" +} diff --git a/packages/runtime/package.json b/packages/runtime/package.json index f4c1be126..d39198a72 100644 --- a/packages/runtime/package.json +++ b/packages/runtime/package.json @@ -27,6 +27,7 @@ "license": "ISC", "devDependencies": { "@openfn/language-common": "2.0.0-rc3", + "@openfn/lexicon": "workspace:^", "@types/mock-fs": "^4.13.1", "@types/node": "^18.15.13", "@types/semver": "^7.5.0", diff --git a/packages/runtime/src/runtime.ts b/packages/runtime/src/runtime.ts index 6d91ea408..9eca3f845 100644 --- a/packages/runtime/src/runtime.ts +++ b/packages/runtime/src/runtime.ts @@ -1,9 +1,8 @@ import { createMockLogger, Logger } from '@openfn/logger'; - +import type { State } from '@openfn/lexicon'; import type { Operation, ExecutionPlan, - State, JobNodeID, ExecutionCallbacks, } from './types'; diff --git a/packages/runtime/src/types.ts b/packages/runtime/src/types.ts index 296907941..9395661ad 100644 --- a/packages/runtime/src/types.ts +++ b/packages/runtime/src/types.ts @@ -1,4 +1,5 @@ // TMP just thinking through things +import { State } from '@openfn/lexicon'; import { Logger } from '@openfn/logger'; import { Options } from './runtime'; @@ -12,80 +13,39 @@ import { NOTIFY_STATE_LOAD, } from './events'; -// I dont think this is useufl? We can just use error.name of the error object -export type ErrorTypes = - | 'AdaptorNotFound' // probably a CLI validation thing - | 'PackageNotFound' // Linker failed to load a dependency - | 'ExpressionTimeout' // An expression (job) failed to return before the timeout - | 'AdaptorException' // Bubbled out of adaptor code - | 'RuntimeException'; // Caused by an exception in a job. JobException? What about "expected" errors from adaptors? - -export type ErrorReport = { - type: string; // The name/type of error, ie Error, TypeError - message: string; // simple human readable message - jobId: JobNodeID; // ID of the associated job - error: Error; // the original underlying error object - - code?: string; // The error code, if any (found on node errors) - stack?: string; // not sure this is useful? - data?: any; // General store for related error information -}; - -export declare interface State { - configuration?: C; - state?: S; - references?: Array; - index?: number; - - // New error capture object - // Synonyms: exceptions, problems, issues, err, failures - errors?: Record; - - // Legacy error property from old platform - // Adaptors may use this? - error?: any[]; - - // Note that other properties written to state may be lost between jobs - [other: string]: any; -} - -export declare interface Operation | State> { - (state: State): T; -} - -export type ExecutionPlan = { - id?: string; // UUID for this plan - jobs: JobNode[]; - start?: JobNodeID; - initialState?: State | string; -}; +// export type ExecutionPlan = { +// id?: string; // UUID for this plan +// jobs: JobNode[]; +// start?: JobNodeID; +// initialState?: State | string; +// }; -export type JobNode = { - id?: JobNodeID; +// export type JobNode = { +// id?: JobNodeID; - // The runtime itself will ignore the adaptor flag - // The adaptor import should be compiled in by the compiler, and dependency managed by the runtime manager - adaptor?: string; +// // The runtime itself will ignore the adaptor flag +// // The adaptor import should be compiled in by the compiler, and dependency managed by the runtime manager +// adaptor?: string; - expression?: string | Operation[]; // the code we actually want to execute. Can be a path. +// expression?: string | Operation[]; // the code we actually want to execute. Can be a path. - configuration?: object | string; // credential object +// configuration?: object | string; // credential object - // TODO strings aren't actually suppored here yet - state?: Omit | string; // default state (globals) +// // TODO strings aren't actually suppored here yet +// state?: Omit | string; // default state (globals) - next?: string | Record; - previous?: JobNodeID; -}; +// next?: string | Record; +// previous?: JobNodeID; +// }; -export type JobEdge = - | boolean - | string - | { - condition?: string; // Javascript expression (function body, not function) - label?: string; - disabled?: boolean; - }; +// export type JobEdge = +// | boolean +// | string +// | { +// condition?: string; // Javascript expression (function body, not function) +// label?: string; +// disabled?: boolean; +// }; export type JobNodeID = string; diff --git a/packages/runtime/src/util/log-error.ts b/packages/runtime/src/util/log-error.ts index af13aec87..bbf6ce61f 100644 --- a/packages/runtime/src/util/log-error.ts +++ b/packages/runtime/src/util/log-error.ts @@ -1,9 +1,9 @@ import { Logger } from '@openfn/logger'; -import { ErrorReport, JobNodeID, State } from '../types'; +import type { State, ErrorReport, StepId } from '@openfn/lexicon'; export type ErrorReporter = ( state: State, - jobId: JobNodeID, + jobId: StepId, error: NodeJS.ErrnoException & { severity?: string; handled?: boolean; @@ -20,6 +20,7 @@ const createErrorReporter = (logger: Logger): ErrorReporter => { const report: ErrorReport = { type: error.subtype || error.type || error.name, jobId, + stepId: jobId, message: error.message, error: error, }; diff --git a/pnpm-lock.yaml b/pnpm-lock.yaml index 17e5bf7f3..ae793d257 100644 --- a/pnpm-lock.yaml +++ b/pnpm-lock.yaml @@ -413,6 +413,8 @@ importers: packages/engine-multi/tmp/repo: {} + packages/lexicon: {} + packages/lightning-mock: dependencies: '@koa/router': @@ -559,6 +561,9 @@ importers: '@openfn/language-common': specifier: 2.0.0-rc3 version: 2.0.0-rc3 + '@openfn/lexicon': + specifier: workspace:^ + version: link:../lexicon '@types/mock-fs': specifier: ^4.13.1 version: 4.13.1 From c996c9883d75a935ceb4a02176263bc42b1eb30c Mon Sep 17 00:00:00 2001 From: Joe Clark Date: Thu, 1 Feb 2024 18:58:02 +0000 Subject: [PATCH 035/128] runtime: huge refactor of runtime core API --- packages/runtime/src/execute/compile-plan.ts | 40 +- packages/runtime/src/execute/context.ts | 7 +- packages/runtime/src/execute/expression.ts | 32 +- packages/runtime/src/execute/job.ts | 18 +- packages/runtime/src/execute/plan.ts | 15 +- packages/runtime/src/runtime.ts | 96 +- packages/runtime/src/types.ts | 67 +- packages/runtime/src/util/default-state.ts | 1 + packages/runtime/src/util/index.ts | 19 + packages/runtime/src/util/validate-plan.ts | 17 +- packages/runtime/test/context.test.ts | 42 +- packages/runtime/test/errors.test.ts | 92 +- .../runtime/test/execute/compile-plan.test.ts | 367 +++--- .../runtime/test/execute/expression.test.ts | 31 +- packages/runtime/test/execute/job.test.ts | 7 +- packages/runtime/test/execute/plan.test.ts | 1046 ++++++++--------- packages/runtime/test/memory.test.ts | 10 +- packages/runtime/test/security.test.ts | 87 +- .../test/util/{regex.ts => regex.test.ts} | 0 .../runtime/test/util/validate-plan.test.ts | 131 +-- 20 files changed, 1085 insertions(+), 1040 deletions(-) create mode 100644 packages/runtime/src/util/default-state.ts create mode 100644 packages/runtime/src/util/index.ts rename packages/runtime/test/util/{regex.ts => regex.test.ts} (100%) diff --git a/packages/runtime/src/execute/compile-plan.ts b/packages/runtime/src/execute/compile-plan.ts index f5c7291c0..30673d696 100644 --- a/packages/runtime/src/execute/compile-plan.ts +++ b/packages/runtime/src/execute/compile-plan.ts @@ -2,16 +2,16 @@ import type { CompiledExecutionPlan, CompiledJobEdge, CompiledJobNode, - ExecutionPlan, - JobEdge, } from '../types'; import compileFunction from '../modules/compile-function'; import { conditionContext, Context } from './context'; +import { ExecutionPlan, StepEdge, Workflow } from '@openfn/lexicon'; +import { clone, defaultState } from '../util'; const compileEdges = ( from: string, - edges: string | Record, + edges: string | Record, context: Context ) => { if (typeof edges === 'string') { @@ -55,8 +55,8 @@ const compileEdges = ( // find the upstream job for a given job // Inefficient but fine for now (note that validation does something similar) // Note that right now we only support one upstream job -const findUpstream = (plan: ExecutionPlan, id: string) => { - for (const job of plan.jobs) { +const findUpstream = (workflow: Workflow, id: string) => { + for (const job of workflow.jobs) { if (job.next) if (typeof job.next === 'string') { if (job.next === id) { @@ -69,7 +69,9 @@ const findUpstream = (plan: ExecutionPlan, id: string) => { }; export default (plan: ExecutionPlan) => { + const { workflow, options = {} } = plan; let autoJobId = 0; + const generateJobId = () => `job-${++autoJobId}`; const context = conditionContext(); @@ -89,25 +91,25 @@ export default (plan: ExecutionPlan) => { } }; - // ensure ids before we start - for (const job of plan.jobs) { + for (const job of workflow.jobs) { if (!job.id) { job.id = generateJobId(); } } - const newPlan = { - jobs: {}, - start: plan.start, - initialState: plan.initialState, - } as Pick; + const newPlan: CompiledExecutionPlan = { + workflow: { + jobs: {}, + }, + options: { + ...options, + start: options.start ?? workflow.jobs[0]?.id!, + initialState: clone(options.initialState ?? defaultState), + }, + }; - for (const job of plan.jobs) { + for (const job of workflow.jobs) { const jobId = job.id!; - if (!newPlan.start) { - // Default the start job to the first - newPlan.start = jobId; - } const newJob: CompiledJobNode = { id: jobId, expression: job.expression, // TODO we should compile this here @@ -123,8 +125,8 @@ export default (plan: ExecutionPlan) => { newJob.next = compileEdges(jobId, job.next!, context); }); } - newJob.previous = findUpstream(plan, jobId); - newPlan.jobs[jobId] = newJob; + newJob.previous = findUpstream(workflow, jobId); + newPlan.workflow.jobs[jobId] = newJob; } if (errs.length) { diff --git a/packages/runtime/src/execute/context.ts b/packages/runtime/src/execute/context.ts index 585567199..afe45cc52 100644 --- a/packages/runtime/src/execute/context.ts +++ b/packages/runtime/src/execute/context.ts @@ -1,5 +1,5 @@ import vm from 'node:vm'; -import type { State } from '../types'; +import type { State } from '@openfn/lexicon'; import type { Options } from '../runtime'; const freezeAll = ( @@ -15,7 +15,10 @@ const freezeAll = ( // Build a safe and helpful execution context // This will be shared by all jobs -export default (state: State, options: Pick) => { +export default ( + state: State, + options: Pick +) => { const logger = options.jobLogger ?? console; const globals = options.globals || {}; const context = vm.createContext( diff --git a/packages/runtime/src/execute/expression.ts b/packages/runtime/src/execute/expression.ts index 324f611bb..fed13bfad 100644 --- a/packages/runtime/src/execute/expression.ts +++ b/packages/runtime/src/execute/expression.ts @@ -1,8 +1,9 @@ import { printDuration, Logger } from '@openfn/logger'; import stringify from 'fast-safe-stringify'; +import type { Operation, State, WorkflowOptions } from '@openfn/lexicon'; + import loadModule from '../modules/module-loader'; -import { Operation, JobModule, State, ExecutionContext } from '../types'; -import { Options, TIMEOUT } from '../runtime'; +import { Options, DEFAULT_TIMEOUT_MS } from '../runtime'; import buildContext, { Context } from './context'; import defaultExecute from '../util/execute'; import clone from '../util/clone'; @@ -16,25 +17,27 @@ import { assertRuntimeError, assertSecurityKill, } from '../errors'; +import type { JobModule, ExecutionContext } from '../types'; export type ExecutionErrorWrapper = { state: any; error: any; }; +// TODO don't send the whole context because it's a bit confusing - just the options maybe? export default ( ctx: ExecutionContext, expression: string | Operation[], - initialState: State + input: State ) => new Promise(async (resolve, reject) => { let duration = Date.now(); - const { logger, opts = {} } = ctx; + const { logger, plan, opts = {} } = ctx; try { - const timeout = opts.timeout ?? TIMEOUT; + const timeout = plan.options.timeout ?? DEFAULT_TIMEOUT_MS; // Setup an execution context - const context = buildContext(initialState, opts); + const context = buildContext(input, opts); const { operations, execute } = await prepareJob( expression, @@ -61,19 +64,19 @@ export default ( } // Note that any errors will be trapped by the containing Job - const result = await reducer(initialState); + const result = await reducer(input); clearTimeout(tid); logger.debug('Expression complete!'); duration = Date.now() - duration; - const finalState = prepareFinalState(opts, result, logger); + const finalState = prepareFinalState(plan.options, opts, result, logger); // return the final state resolve(finalState); } catch (e: any) { // whatever initial state looks like now, clean it and report it back - const finalState = prepareFinalState(opts, initialState, logger); + const finalState = prepareFinalState(plan.options, opts, input, logger); duration = Date.now() - duration; let finalError; try { @@ -150,11 +153,16 @@ const assignKeys = ( // TODO this is suboptimal and may be slow on large objects // (especially as the result get stringified again downstream) -const prepareFinalState = (opts: Options, state: any, logger: Logger) => { +const prepareFinalState = ( + options: WorkflowOptions, + opts: Options, // TODO remove this with strict mode + state: any, + logger: Logger +) => { if (state) { let statePropsToRemove; - if (opts.hasOwnProperty('statePropsToRemove')) { - ({ statePropsToRemove } = opts); + if (options.hasOwnProperty('statePropsToRemove')) { + ({ statePropsToRemove } = options); } else { // As a strict default, remove the configuration key // tbh this should happen higher up in the stack but it causes havoc in unit testing diff --git a/packages/runtime/src/execute/job.ts b/packages/runtime/src/execute/job.ts index b5880a59d..2af6ad88a 100644 --- a/packages/runtime/src/execute/job.ts +++ b/packages/runtime/src/execute/job.ts @@ -1,16 +1,12 @@ // TODO hmm. I have a horrible feeling that the callbacks should go here // at least the resolvesrs -import executeExpression, { ExecutionErrorWrapper } from './expression'; +import type { State, StepId } from '@openfn/lexicon'; +import type { Logger } from '@openfn/logger'; +import executeExpression, { ExecutionErrorWrapper } from './expression'; import clone from '../util/clone'; import assembleState from '../util/assemble-state'; -import type { - CompiledJobNode, - ExecutionContext, - JobNodeID, - State, -} from '../types'; -import { Logger } from '@openfn/logger'; +import type { CompiledJobNode, ExecutionContext } from '../types'; import { EdgeConditionError } from '../errors'; import { NOTIFY_INIT_COMPLETE, @@ -85,8 +81,8 @@ const calculateNext = (job: CompiledJobNode, result: any, logger: Logger) => { const executeJob = async ( ctx: ExecutionContext, job: CompiledJobNode, - initialState: State = {} -): Promise<{ next: JobNodeID[]; state: any }> => { + input: State = {} +): Promise<{ next: StepId[]; state: any }> => { const { opts, notify, logger, report } = ctx; const duration = Date.now(); @@ -107,7 +103,7 @@ const executeJob = async ( ); const state = assembleState( - clone(initialState), + clone(input), configuration, globals, opts.strict diff --git a/packages/runtime/src/execute/plan.ts b/packages/runtime/src/execute/plan.ts index b4085d2e3..792800ae3 100644 --- a/packages/runtime/src/execute/plan.ts +++ b/packages/runtime/src/execute/plan.ts @@ -1,19 +1,21 @@ import type { Logger } from '@openfn/logger'; +import type { ExecutionPlan, State } from '@openfn/lexicon'; + import executeJob from './job'; import compilePlan from './compile-plan'; -import type { ExecutionPlan } from '../types'; import type { Options } from '../runtime'; import validatePlan from '../util/validate-plan'; import createErrorReporter from '../util/log-error'; import { NOTIFY_STATE_LOAD } from '../events'; +import { CompiledExecutionPlan } from '../types'; const executePlan = async ( plan: ExecutionPlan, opts: Options, logger: Logger ) => { - let compiledPlan; + let compiledPlan: CompiledExecutionPlan; try { validatePlan(plan); compiledPlan = compilePlan(plan); @@ -24,7 +26,9 @@ const executePlan = async ( throw e; } - let queue: string[] = [opts.start || compiledPlan.start]; + const { workflow, options } = compiledPlan; + + let queue: string[] = [options.start]; const ctx = { plan: compiledPlan, @@ -34,13 +38,12 @@ const executePlan = async ( notify: opts.callbacks?.notify ?? (() => {}), }; - type State = any; // record of state returned by every job const stateHistory: Record = {}; // Record of state on lead nodes (nodes with no next) const leaves: Record = {}; - let { initialState } = compiledPlan; + let { initialState } = options; if (typeof initialState === 'string') { const id = initialState; const startTime = Date.now(); @@ -58,7 +61,7 @@ const executePlan = async ( // Right now this executes in series, even if jobs are parallelised while (queue.length) { const next = queue.shift()!; - const job = compiledPlan.jobs[next]; + const job = workflow.jobs[next]; const prevState = stateHistory[job.previous || ''] ?? initialState; diff --git a/packages/runtime/src/runtime.ts b/packages/runtime/src/runtime.ts index 9eca3f845..a8f0b4a71 100644 --- a/packages/runtime/src/runtime.ts +++ b/packages/runtime/src/runtime.ts @@ -1,24 +1,17 @@ import { createMockLogger, Logger } from '@openfn/logger'; -import type { State } from '@openfn/lexicon'; -import type { - Operation, - ExecutionPlan, - JobNodeID, - ExecutionCallbacks, -} from './types'; +import type { ExecutionPlan } from '@openfn/lexicon'; +import type { ExecutionCallbacks } from './types'; import type { LinkerOptions } from './modules/linker'; import executePlan from './execute/plan'; -import clone from './util/clone'; -import parseRegex from './util/regex'; +import { parseRegex } from './util/index'; -export const TIMEOUT = 5 * 60 * 1000; // 5 minutes +export const DEFAULT_TIMEOUT_MS = 5 * 60 * 1000; // 5 minutes export type Options = { - start?: JobNodeID; logger?: Logger; jobLogger?: Logger; - timeout?: number; // this is timeout used per job, not per workflow + // TODO: deprecate in this work strict?: boolean; // Be strict about handling of state returned from jobs // Treat state as immutable (likely to break in legacy jobs) @@ -34,10 +27,8 @@ export type Options = { callbacks?: ExecutionCallbacks; // inject globals into the environment + // TODO leaving this here for now, but maybe its actually on the xplan? globals?: any; - - // all listed props will be removed from the state object at the end of a job - statePropsToRemove?: string[]; }; type RawOptions = Omit & { @@ -46,26 +37,48 @@ type RawOptions = Omit & { }; }; -const defaultState = { data: {}, configuration: {} }; - // Log nothing by default const defaultLogger = createMockLogger(); -// TODO doesn't really make sense to pass in a state object to an xplan, -// so maybe state becomes an option in the opts object -const run = ( - expressionOrXPlan: string | Operation[] | ExecutionPlan, - state?: State, - opts: RawOptions = {} -) => { +const loadPlanFromString = (expression: string, logger: Logger) => { + const plan: ExecutionPlan = { + workflow: { + jobs: [ + { + expression, + }, + ], + }, + options: {}, + }; + + logger.debug('Generated execution plan for incoming expression'); + logger.debug(plan); + + return plan; +}; + +const run = (xplan: ExecutionPlan | string, opts: RawOptions = {}) => { const logger = opts.logger || defaultLogger; + if (typeof xplan === 'string') { + xplan = loadPlanFromString(xplan, logger); + } + + if (!xplan.options) { + xplan.options = {}; + } + + const { options } = xplan; + + // TODO remove // Strict state handling by default if (!opts.hasOwnProperty('strict')) { opts.strict = true; } - if (!opts.hasOwnProperty('statePropsToRemove')) { - opts.statePropsToRemove = ['configuration']; + + if (!options.hasOwnProperty('statePropsToRemove')) { + options.statePropsToRemove = ['configuration']; } if (opts.linker?.whitelist) { opts.linker.whitelist = opts.linker.whitelist.map((w) => { @@ -76,35 +89,12 @@ const run = ( }); } - // TODO the plan doesn't have an id, should it be given one? - // Ditto the jobs? - let plan: ExecutionPlan; - if ( - typeof expressionOrXPlan == 'string' || - !expressionOrXPlan.hasOwnProperty('jobs') - ) { - // Build an execution plan for an incoming expression - plan = { - jobs: [ - { - expression: expressionOrXPlan, - }, - ], - } as ExecutionPlan; - logger.debug('Generated execution plan for incoming expression'); - // TODO how do we sanitise state.config? - logger.debug(plan); - } else { - plan = expressionOrXPlan as ExecutionPlan; - } - - if (state) { - plan.initialState = clone(state); - } else if (!plan.initialState) { - plan.initialState = defaultState; + // TODO change where initial state comes from (ie never from options) + if (!xplan.options.initialState) { + xplan.options.initialState = (options as any).intitialState; } - return executePlan(plan, opts as Options, logger); + return executePlan(xplan, opts as Options, logger); }; export default run; diff --git a/packages/runtime/src/types.ts b/packages/runtime/src/types.ts index 9395661ad..49b87623b 100644 --- a/packages/runtime/src/types.ts +++ b/packages/runtime/src/types.ts @@ -1,5 +1,10 @@ -// TMP just thinking through things -import { State } from '@openfn/lexicon'; +import { + State, + Operation, + Job, + StepId, + WorkflowOptions, +} from '@openfn/lexicon'; import { Logger } from '@openfn/logger'; import { Options } from './runtime'; @@ -13,42 +18,6 @@ import { NOTIFY_STATE_LOAD, } from './events'; -// export type ExecutionPlan = { -// id?: string; // UUID for this plan -// jobs: JobNode[]; -// start?: JobNodeID; -// initialState?: State | string; -// }; - -// export type JobNode = { -// id?: JobNodeID; - -// // The runtime itself will ignore the adaptor flag -// // The adaptor import should be compiled in by the compiler, and dependency managed by the runtime manager -// adaptor?: string; - -// expression?: string | Operation[]; // the code we actually want to execute. Can be a path. - -// configuration?: object | string; // credential object - -// // TODO strings aren't actually suppored here yet -// state?: Omit | string; // default state (globals) - -// next?: string | Record; -// previous?: JobNodeID; -// }; - -// export type JobEdge = -// | boolean -// | string -// | { -// condition?: string; // Javascript expression (function body, not function) -// label?: string; -// disabled?: boolean; -// }; - -export type JobNodeID = string; - export type CompiledJobEdge = | boolean | { @@ -56,16 +25,21 @@ export type CompiledJobEdge = disabled?: boolean; }; -export type CompiledJobNode = Omit & { - id: JobNodeID; - next?: Record; +export type CompiledJobNode = Omit & { + id: StepId; + next?: Record; }; +export type Lazy = string | T; + export type CompiledExecutionPlan = { - id?: string; - start: JobNodeID; - jobs: Record; - initialState?: State | string; + workflow: { + jobs: Record; + }; + options: WorkflowOptions & { + start: StepId; + initialState: Lazy; + }; }; export type JobModule = { @@ -79,7 +53,6 @@ type NotifyHandler = ( payload: NotifyEventsLookup[typeof event] ) => void; -// TODO difficulty: this is not the same as a vm execution context export type ExecutionContext = { plan: CompiledExecutionPlan; logger: Logger; @@ -143,7 +116,7 @@ export type NotifyEventsLookup = { }; export type ExecutionCallbacks = { - notify: NotifyHandler; + notify?: NotifyHandler; resolveState?: (stateId: string) => Promise; resolveCredential?: (credentialId: string) => Promise; }; diff --git a/packages/runtime/src/util/default-state.ts b/packages/runtime/src/util/default-state.ts new file mode 100644 index 000000000..4d4dc5450 --- /dev/null +++ b/packages/runtime/src/util/default-state.ts @@ -0,0 +1 @@ +export default { data: {}, configuration: {} }; diff --git a/packages/runtime/src/util/index.ts b/packages/runtime/src/util/index.ts new file mode 100644 index 000000000..1ad364095 --- /dev/null +++ b/packages/runtime/src/util/index.ts @@ -0,0 +1,19 @@ +import assembleState from './assemble-state'; +import clone from './clone'; +import defaultState from './default-state'; +import exec from './exec'; +import execute from './execute'; +import logError from './log-error'; +import parseRegex from './regex'; +import validatePlan from './validate-plan'; + +export { + assembleState, + clone, + defaultState, + exec, + execute, + logError, + parseRegex, + validatePlan, +}; diff --git a/packages/runtime/src/util/validate-plan.ts b/packages/runtime/src/util/validate-plan.ts index b1b058105..a700708e5 100644 --- a/packages/runtime/src/util/validate-plan.ts +++ b/packages/runtime/src/util/validate-plan.ts @@ -1,5 +1,5 @@ +import { ExecutionPlan, Job } from '@openfn/lexicon'; import { ValidationError } from '../errors'; -import { ExecutionPlan, JobNode } from '../types'; type ModelNode = { up: Record; @@ -20,16 +20,16 @@ export default (plan: ExecutionPlan) => { return true; }; -export const buildModel = (plan: ExecutionPlan) => { +export const buildModel = ({ workflow }: ExecutionPlan) => { const model: Model = {}; - const jobIdx = plan.jobs.reduce((obj, item) => { + const jobIdx = workflow.jobs.reduce((obj, item) => { if (item.id) { obj[item.id] = item; } // TODO warn if there's no id? It's usually fine (until it isn't) return obj; - }, {} as Record); + }, {} as Record); const ensureModel = (jobId: string) => { if (!model[jobId]) { @@ -48,7 +48,7 @@ export const buildModel = (plan: ExecutionPlan) => { } }; - for (const job of plan.jobs) { + for (const job of workflow.jobs) { let node = job.id ? ensureModel(job.id) : { up: {}, down: {} }; if (typeof job.next === 'string') { validateJob(job.next); @@ -71,9 +71,10 @@ export const buildModel = (plan: ExecutionPlan) => { }; const assertStart = (plan: ExecutionPlan) => { - if (typeof plan.start === 'string') { - if (!plan.jobs.find(({ id }) => id == plan.start)) { - throw new ValidationError(`Could not find start job: ${plan.start}`); + const { start } = plan.options; + if (typeof start === 'string') { + if (!plan.workflow.jobs.find(({ id }) => id == start)) { + throw new ValidationError(`Could not find start job: ${start}`); } } }; diff --git a/packages/runtime/test/context.test.ts b/packages/runtime/test/context.test.ts index 11909a604..0c33c81d1 100644 --- a/packages/runtime/test/context.test.ts +++ b/packages/runtime/test/context.test.ts @@ -2,21 +2,37 @@ import test from 'ava'; import run from '../src/runtime'; import { createMockLogger } from '@openfn/logger'; +import { State } from '@openfn/lexicon'; const createState = (data = {}) => ({ data, configuration: {} }); +const createPlan = (expression: string, initialState: State) => ({ + workflow: { + jobs: [ + { + expression, + }, + ], + }, + options: { + initialState, + }, +}); + test('makes parseInt available inside the job', async (t) => { - const job = ` + const expression = ` export default [ (s) => { s.data.count = parseInt(s.data.count); return s; } ];`; + const intialState = createState({ count: '22' }); + const plan = createPlan(expression, intialState); - const result = await run(job, createState({ count: '22' })); + const result = await run(plan); t.deepEqual(result.data, { count: 22 }); }); test('makes Set available inside the job', async (t) => { - const job = ` + const expression = ` export default [ (s) => { new Set(); // should not throw @@ -24,13 +40,16 @@ test('makes Set available inside the job', async (t) => { } ];`; - const result = await run(job, createState({ count: '33' })); + const state = createState({ count: '33' }); + const plan = createPlan(expression, state); + + const result = await run(plan); t.deepEqual(result.data, { count: '33' }); }); test("doesn't allow process inside the job", async (t) => { const logger = createMockLogger(undefined, { level: 'default' }); - const job = ` + const expression = ` export default [ (s) => { process.exit() @@ -38,9 +57,9 @@ test("doesn't allow process inside the job", async (t) => { } ];`; - const state = createState(); + const plan = createPlan(expression, createState()); - await t.throwsAsync(() => run(job, state, { logger }), { + await t.throwsAsync(() => run(plan, { logger }), { name: 'RuntimeCrash', message: 'ReferenceError: process is not defined', }); @@ -48,17 +67,14 @@ test("doesn't allow process inside the job", async (t) => { test("doesn't allow eval inside a job", async (t) => { const logger = createMockLogger(undefined, { level: 'default' }); - const job = ` + const expression = ` export default [ (state) => eval('ok') // should throw ];`; - const state = createState(); - await t.throwsAsync(() => run(job, state, { logger }), { + const plan = createPlan(expression, createState()); + await t.throwsAsync(() => run(plan, { logger }), { name: 'SecurityError', message: /Illegal eval statement detected/, }); }); - -// TODO exhaustive test of globals? -// TODO ensure an imported module can't access eval/process diff --git a/packages/runtime/test/errors.test.ts b/packages/runtime/test/errors.test.ts index 90a9d9d16..c1e360ab5 100644 --- a/packages/runtime/test/errors.test.ts +++ b/packages/runtime/test/errors.test.ts @@ -1,16 +1,28 @@ import test from 'ava'; import path from 'node:path'; +import type { WorkflowOptions } from '@openfn/lexicon'; + import run from '../src/runtime'; -// This is irrelevant now as state and credentials are preloaded -test.todo('lazy state & credential loading'); +const createPlan = (expression: string, options: WorkflowOptions = {}) => ({ + workflow: { + jobs: [ + { + expression, + }, + ], + }, + options, +}); test('crash on timeout', async (t) => { const expression = 'export default [(s) => new Promise((resolve) => {})]'; + const plan = createPlan(expression, { timeout: 1 }); + console.log(plan); let error; try { - await run(expression, {}, { timeout: 1 }); + await run(plan); } catch (e) { error = e; } @@ -72,24 +84,28 @@ test('crash on eval with SecurityError', async (t) => { }); test('crash on edge condition error with EdgeConditionError', async (t) => { - const workflow = { - jobs: [ - { - id: 'a', - next: { - b: { - // Will throw a reference error - condition: 'wibble', + const plan = { + workflow: { + jobs: [ + { + id: 'a', + expression: '.', + next: { + b: { + // Will throw a reference error + condition: 'wibble', + }, }, }, - }, - { id: 'b' }, - ], + { id: 'b', expression: '.' }, + ], + }, + options: {}, }; let error; try { - await run(workflow); + await run(plan); } catch (e) { error = e; } @@ -126,15 +142,11 @@ test('crash on blacklisted module', async (t) => { let error; try { - await run( - expression, - {}, - { - linker: { - whitelist: [/^@opennfn/], - }, - } - ); + await run(expression, { + linker: { + whitelist: [/^@opennfn/], + }, + }); } catch (e) { error = e; } @@ -202,17 +214,13 @@ test('fail on adaptor error (with throw new Error())', async (t) => { import { err } from 'x'; export default [(s) => err()]; `; - const result = await run( - expression, - {}, - { - linker: { - modules: { - x: { path: path.resolve('test/__modules__/test') }, - }, + const result = await run(expression, { + linker: { + modules: { + x: { path: path.resolve('test/__modules__/test') }, }, - } - ); + }, + }); const error = result.errors['job-1']; t.is(error.type, 'AdaptorError'); @@ -227,17 +235,13 @@ test('adaptor error with no stack trace will be a user error', async (t) => { import { err2 } from 'x'; export default [(s) => err2()]; `; - const result = await run( - expression, - {}, - { - linker: { - modules: { - x: { path: path.resolve('test/__modules__/test') }, - }, + const result = await run(expression, { + linker: { + modules: { + x: { path: path.resolve('test/__modules__/test') }, }, - } - ); + }, + }); const error = result.errors['job-1']; t.is(error.type, 'JobError'); diff --git a/packages/runtime/test/execute/compile-plan.test.ts b/packages/runtime/test/execute/compile-plan.test.ts index 23ef3518e..d3a384b8a 100644 --- a/packages/runtime/test/execute/compile-plan.test.ts +++ b/packages/runtime/test/execute/compile-plan.test.ts @@ -1,125 +1,200 @@ import test from 'ava'; -import { ExecutionPlan, JobEdge } from '../../src'; +import { ExecutionPlan, StepEdge } from '@openfn/lexicon'; import compilePlan from '../../src/execute/compile-plan'; const testPlan: ExecutionPlan = { - start: 'a', - jobs: [ - { id: 'a', expression: 'x', next: { b: true } }, - { id: 'b', expression: 'y' }, - ], + workflow: { + jobs: [ + { id: 'a', expression: 'x', next: { b: true } }, + { id: 'b', expression: 'y' }, + ], + }, + options: { + start: 'a', + }, }; -const planWithEdge = (edge: JobEdge) => - ({ - ...testPlan, - jobs: [{ id: 'a', next: { b: edge } }], - } as ExecutionPlan); +const planWithEdge = (edge: Partial) => ({ + workflow: { + jobs: [ + { + id: 'a', + expression: 'x', + next: { + b: edge, + }, + }, + { id: 'b', expression: 'y' }, + ], + }, + options: { + start: 'a', + }, +}); test('should preserve initial state as an object', (t) => { const state = { x: 123 }; const compiledPlan = compilePlan({ id: 'a', - initialState: state, - jobs: [], + workflow: { + jobs: [], + }, + options: { + initialState: state, + }, }); - t.deepEqual(state, compiledPlan.initialState); + t.deepEqual(state, compiledPlan.options.initialState); }); test('should preserve initial state a string', (t) => { const compiledPlan = compilePlan({ id: 'a', - initialState: 'abc', - jobs: [], + workflow: { + jobs: [], + }, + options: { + // @ts-ignore + initialState: 'abc', + }, + }); + + // @ts-ignore + t.is(compiledPlan.options.initialState, 'abc'); +}); + +test('should preserve the start option', (t) => { + const compiledPlan = compilePlan({ + id: 'a', + workflow: { + jobs: [{ id: 'a', expression: 'a' }], + }, + options: { + start: 'a', + }, + }); + + t.is(compiledPlan.options.start, 'a'); +}); + +test('should preserve arbitrary options', (t) => { + const compiledPlan = compilePlan({ + id: 'a', + workflow: { + jobs: [{ id: 'a', expression: 'a' }], + }, + options: { + // @ts-ignore + a: 1, + z: 2, + '-': 3, + }, + }); + + t.deepEqual(compiledPlan.options.start, { + a: 1, + z: 2, + '-': 3, }); - t.is(compiledPlan.initialState, 'abc'); }); test('should convert jobs to an object', (t) => { - const compiledPlan = compilePlan(testPlan); - t.truthy(compiledPlan.jobs.a); - t.is(compiledPlan.jobs.a.expression, 'x'); + const { workflow } = compilePlan(testPlan); + t.truthy(workflow.jobs.a); + t.is(workflow.jobs.a.expression, 'x'); - t.truthy(compiledPlan.jobs.b); - t.is(compiledPlan.jobs.b.expression, 'y'); + t.truthy(workflow.jobs.b); + t.is(workflow.jobs.b.expression, 'y'); }); test('should set previous job with 2 jobs', (t) => { const plan: ExecutionPlan = { - start: 'a', - jobs: [ - { id: 'a', expression: 'x', next: { b: true } }, - { id: 'b', expression: 'y' }, - ], + workflow: { + jobs: [ + { id: 'a', expression: 'x', next: { b: true } }, + { id: 'b', expression: 'y' }, + ], + }, + options: {}, }; - const compiledPlan = compilePlan(plan); - t.is(compiledPlan.jobs.a.previous, undefined); - t.is(compiledPlan.jobs.b.previous, 'a'); + const { workflow } = compilePlan(plan); + t.is(workflow.jobs.a.previous, undefined); + t.is(workflow.jobs.b.previous, 'a'); }); test('should set previous job with 2 jobs and shorthand syntax', (t) => { const plan: ExecutionPlan = { - start: 'a', - jobs: [ - { id: 'a', expression: 'x', next: 'b' }, - { id: 'b', expression: 'y' }, - ], + workflow: { + jobs: [ + { id: 'a', expression: 'x', next: 'b' }, + { id: 'b', expression: 'y' }, + ], + }, + options: {}, }; - const compiledPlan = compilePlan(plan); - t.is(compiledPlan.jobs.a.previous, undefined); - t.is(compiledPlan.jobs.b.previous, 'a'); + const { workflow } = compilePlan(plan); + t.is(workflow.jobs.a.previous, undefined); + t.is(workflow.jobs.b.previous, 'a'); }); test('should set previous job with 2 jobs and no start', (t) => { const plan: ExecutionPlan = { - jobs: [ - { id: 'a', expression: 'x', next: { b: true } }, - { id: 'b', expression: 'y' }, - ], + workflow: { + jobs: [ + { id: 'a', expression: 'x', next: { b: true } }, + { id: 'b', expression: 'y' }, + ], + }, + options: {}, }; - const compiledPlan = compilePlan(plan); - t.is(compiledPlan.jobs.a.previous, undefined); - t.is(compiledPlan.jobs.b.previous, 'a'); + const { workflow } = compilePlan(plan); + t.is(workflow.jobs.a.previous, undefined); + t.is(workflow.jobs.b.previous, 'a'); }); test('should set previous job with 3 jobs', (t) => { const plan: ExecutionPlan = { - start: 'a', - jobs: [ - { id: 'a', expression: 'x', next: { b: true } }, - { id: 'b', expression: 'y', next: { c: true } }, - { id: 'c', expression: 'z' }, - ], + workflow: { + jobs: [ + { id: 'a', expression: 'x', next: { b: true } }, + { id: 'b', expression: 'y', next: { c: true } }, + { id: 'c', expression: 'z' }, + ], + }, + options: {}, }; - const compiledPlan = compilePlan(plan); - t.is(compiledPlan.jobs.a.previous, undefined); - t.is(compiledPlan.jobs.b.previous, 'a'); - t.is(compiledPlan.jobs.c.previous, 'b'); + const { workflow } = compilePlan(plan); + t.is(workflow.jobs.a.previous, undefined); + t.is(workflow.jobs.b.previous, 'a'); + t.is(workflow.jobs.c.previous, 'b'); }); test('should set previous job with 3 jobs and shorthand syntax', (t) => { const plan: ExecutionPlan = { - start: 'a', - jobs: [ - { id: 'c', expression: 'z' }, - { id: 'a', expression: 'x', next: 'b' }, - { id: 'b', expression: 'y', next: 'c' }, - ], + workflow: { + jobs: [ + { id: 'c', expression: 'z' }, + { id: 'a', expression: 'x', next: 'b' }, + { id: 'b', expression: 'y', next: 'c' }, + ], + }, + options: {}, }; - const compiledPlan = compilePlan(plan); - t.is(compiledPlan.jobs.a.previous, undefined); - t.is(compiledPlan.jobs.b.previous, 'a'); - t.is(compiledPlan.jobs.c.previous, 'b'); + const { workflow } = compilePlan(plan); + t.is(workflow.jobs.a.previous, undefined); + t.is(workflow.jobs.b.previous, 'a'); + t.is(workflow.jobs.c.previous, 'b'); }); test('should auto generate ids for jobs', (t) => { const plan = { - start: 'a', - jobs: [{ expression: 'x' }, { expression: 'y' }], + workflow: { + jobs: [{ expression: 'x' }, { expression: 'y' }], + }, + options: {}, }; - const compiledPlan = compilePlan(plan); - const ids = Object.keys(compiledPlan.jobs); + const { workflow } = compilePlan(plan); + const ids = Object.keys(workflow.jobs); t.truthy(ids[0]); t.truthy(ids[1]); t.assert(ids[0] !== ids[1]); @@ -127,67 +202,82 @@ test('should auto generate ids for jobs', (t) => { test('should convert jobs to an object with auto ids', (t) => { const plan: ExecutionPlan = { - jobs: [ - // silly use case but it doens't matter - { expression: 'x' }, - { expression: 'y' }, - ], + workflow: { + jobs: [ + // silly use case but it doens't matter + { expression: 'x' }, + { expression: 'y' }, + ], + }, + options: {}, }; - const compiledPlan = compilePlan(plan); - t.deepEqual(Object.keys(compiledPlan.jobs), ['job-1', 'job-2']); + const { workflow } = compilePlan(plan); + t.deepEqual(Object.keys(workflow.jobs), ['job-1', 'job-2']); }); test('should reset job ids for each call', (t) => { const plan: ExecutionPlan = { - jobs: [{ expression: 'x' }], + workflow: { + jobs: [{ expression: 'x' }], + }, + options: {}, }; const first = compilePlan(plan); - t.is(first.jobs['job-1'].expression, 'x'); + t.is(first.workflow.jobs['job-1'].expression, 'x'); const second = compilePlan(plan); - t.is(second.jobs['job-1'].expression, 'x'); + t.is(second.workflow.jobs['job-1'].expression, 'x'); }); test('should set the start to jobs[0]', (t) => { const plan: ExecutionPlan = { - jobs: [ - { id: 'a', expression: 'x' }, - { id: 'b', expression: 'y' }, - { id: 'c', expression: 'z' }, - ], + workflow: { + jobs: [ + { id: 'a', expression: 'x' }, + { id: 'b', expression: 'y' }, + { id: 'c', expression: 'z' }, + ], + }, + options: {}, }; - const compiledPlan = compilePlan(plan); - t.is(compiledPlan.start, 'a'); + const { options } = compilePlan(plan); + t.is(options.start, 'a'); }); test('should not override the start', (t) => { const plan: ExecutionPlan = { - start: 'c', - jobs: [ - { id: 'a', expression: 'x' }, - { id: 'b', expression: 'y' }, - { id: 'c', expression: 'z' }, - ], + options: { + start: 'c', + }, + workflow: { + jobs: [ + { id: 'a', expression: 'x' }, + { id: 'b', expression: 'y' }, + { id: 'c', expression: 'z' }, + ], + }, }; - const compiledPlan = compilePlan(plan); - t.is(compiledPlan.start, 'c'); + const { options } = compilePlan(plan); + t.is(options.start, 'c'); }); test('should compile a shorthand edge', (t) => { const plan: ExecutionPlan = { - start: 'a', - jobs: [ - { - id: 'a', - expression: 'x', - next: 'y', - }, - ], + workflow: { + jobs: [ + { + id: 'a', + expression: 'x', + next: 'y', + }, + ], + }, + options: {}, }; - const compiledPlan = compilePlan(plan); + const { workflow } = compilePlan(plan); - t.deepEqual(compiledPlan.jobs.a.next!, { + t.deepEqual(workflow.jobs.a.next!, { y: true, }); }); @@ -198,69 +288,69 @@ test('should not recompile a functional edge', (t) => { condition: () => true, }); - const compiledPlan = compilePlan(plan); + const { workflow } = compilePlan(plan); // @ts-ignore - const result = compiledPlan.jobs.a.next!.b.condition({}); + const result = workflow.jobs.a.next!.b.condition({}); t.true(result); }); test('should compile a truthy edge', (t) => { const plan = planWithEdge({ condition: 'true' }); - const compiledPlan = compilePlan(plan); + const { workflow } = compilePlan(plan); // @ts-ignore - const result = compiledPlan.jobs.a.next!.b.condition({}); + const result = workflow.jobs.a.next!.b.condition({}); t.true(result); }); test('should compile a string edge', (t) => { const plan = planWithEdge('true'); - const compiledPlan = compilePlan(plan); + const { workflow } = compilePlan(plan); // @ts-ignore - const result = compiledPlan.jobs.a.next!.b.condition(); + const result = workflow.jobs.a.next!.b.condition(); t.true(result); }); test('should compile a falsy edge', (t) => { const plan = planWithEdge({ condition: 'false' }); - const compiledPlan = compilePlan(plan); + const { workflow } = compilePlan(plan); // @ts-ignore - const result = compiledPlan.jobs.a.next!.b.condition({}); + const result = workflow.jobs.a.next!.b.condition({}); t.false(result); }); test('should compile an edge with arithmetic', (t) => { const plan = planWithEdge({ condition: '1 + 1' }); - const compiledPlan = compilePlan(plan); + const { workflow } = compilePlan(plan); // @ts-ignore - const result = compiledPlan.jobs.a.next!.b.condition({}); + const result = workflow.jobs.a.next!.b.condition({}); t.is(result, 2); }); test('should compile an edge which uses state', (t) => { const plan = planWithEdge({ condition: '!state.hasOwnProperty("error")' }); - const compiledPlan = compilePlan(plan); + const { workflow } = compilePlan(plan); // @ts-ignore - const result = compiledPlan.jobs.a.next!.b.condition({}); + const result = workflow.jobs.a.next!.b.condition({}); t.true(result); }); test('condition cannot require', (t) => { const plan = planWithEdge({ condition: 'require("axios")' }); - const compiledPlan = compilePlan(plan); + const { workflow } = compilePlan(plan); // @ts-ignore - t.throws(() => compiledPlan.jobs.a.next!.b.condition({ data: {} }), { + t.throws(() => workflow.jobs.a.next!.b.condition({ data: {} }), { message: 'require is not defined', }); }); @@ -268,10 +358,10 @@ test('condition cannot require', (t) => { test('condition cannot access process', (t) => { const plan = planWithEdge({ condition: 'process.exit()' }); - const compiledPlan = compilePlan(plan); + const { workflow } = compilePlan(plan); // @ts-ignore - t.throws(() => compiledPlan.jobs.a.next!.b.condition({ data: {} }), { + t.throws(() => workflow.jobs.a.next!.b.condition({ data: {} }), { message: 'process is not defined', }); }); @@ -279,10 +369,10 @@ test('condition cannot access process', (t) => { test('condition cannot access process #2', (t) => { const plan = planWithEdge({ condition: '(() => process.exit())()' }); - const compiledPlan = compilePlan(plan); + const { workflow } = compilePlan(plan); // @ts-ignore - t.throws(() => compiledPlan.jobs.a.next!.b.condition({ data: {} }), { + t.throws(() => workflow.jobs.a.next!.b.condition({ data: {} }), { message: 'process is not defined', }); }); @@ -290,10 +380,10 @@ test('condition cannot access process #2', (t) => { test('condition cannot eval', (t) => { const plan = planWithEdge({ condition: 'eval("process.exit()")' }); - const compiledPlan = compilePlan(plan); + const { workflow } = compilePlan(plan); // @ts-ignore - t.throws(() => compiledPlan.jobs.a.next!.b.condition({ data: {} }), { + t.throws(() => workflow.jobs.a.next!.b.condition({ data: {} }), { message: 'Code generation from strings disallowed for this context', }); }); @@ -310,25 +400,28 @@ test('throw for a syntax error on a job edge', (t) => { test('throw for multiple errors', (t) => { const plan = { - jobs: [ - { - id: 'a', - expression: 'x', - next: { - b: { - condition: '@£^!!', - }, - c: { - condition: '@£^!!', + workflow: { + jobs: [ + { + id: 'a', + expression: 'x', + next: { + b: { + condition: '@£^!!', + }, + c: { + condition: '@£^!!', + }, }, }, - }, - ], + ], + }, + options: {}, }; try { compilePlan(plan); - } catch (e) { + } catch (e: any) { // the message will have have one error per line const { message } = e; const lines = message.split('\n\n'); diff --git a/packages/runtime/test/execute/expression.test.ts b/packages/runtime/test/execute/expression.test.ts index 2258e43c2..a455a974e 100644 --- a/packages/runtime/test/execute/expression.test.ts +++ b/packages/runtime/test/execute/expression.test.ts @@ -1,8 +1,10 @@ import test from 'ava'; import { fn } from '@openfn/language-common'; import { createMockLogger } from '@openfn/logger'; +import type { State } from '@openfn/lexicon'; + import execute from '../../src/execute/expression'; -import type { State, Operation, ExecutionContext } from '../../src/types'; +import type { ExecutionContext } from '../../src/types'; type TestState = State & { data: { @@ -17,15 +19,18 @@ const createState = (data = {}) => ({ const logger = createMockLogger(undefined, { level: 'debug' }); -const createContext = (args = {}) => +const createContext = (args = {}, options = {}) => + // @ts-ignore ({ logger, - plan: {}, + plan: { + options, + }, opts: {}, notify: () => {}, report: () => {}, ...args, - } as unknown as ExecutionContext); + } as ExecutionContext); test.afterEach(() => { logger._reset(); @@ -38,7 +43,6 @@ test.afterEach(() => { test('run a live no-op job with one operation', async (t) => { const job = [(s: State) => s]; const state = createState(); - const context = createContext(); const result = await execute(context, job, state); @@ -108,7 +112,7 @@ test('configuration is removed from the result by default', async (t) => { test('statePropsToRemove removes multiple props from state', async (t) => { const job = [async (s: State) => s]; const statePropsToRemove = ['x', 'y']; - const context = createContext({ opts: { statePropsToRemove } }); + const context = createContext({}, { statePropsToRemove }); const result = await execute(context, job, { x: 1, y: 1, z: 1 }); t.deepEqual(result, { z: 1 }); @@ -118,7 +122,7 @@ test('statePropsToRemove logs to debug when a prop is removed', async (t) => { const job = [async (s: State) => s]; const statePropsToRemove = ['x']; - const context = createContext({ opts: { statePropsToRemove } }); + const context = createContext({}, { statePropsToRemove }); const result = await execute(context, job, { x: 1, y: 1, z: 1 }); t.deepEqual(result, { y: 1, z: 1 }); @@ -130,7 +134,7 @@ test('statePropsToRemove logs to debug when a prop is removed', async (t) => { test('no props are removed from state if an empty array is passed to statePropsToRemove', async (t) => { const job = [async (s: State) => s]; const statePropsToRemove = ['x', 'y']; - const context = createContext({ opts: { statePropsToRemove } }); + const context = createContext({}, { statePropsToRemove }); const state = { x: 1, configuration: 1 }; const result = await execute(context, job, state as any); @@ -140,7 +144,7 @@ test('no props are removed from state if an empty array is passed to statePropsT test('no props are removed from state if a falsy value is passed to statePropsToRemove', async (t) => { const job = [async (s: State) => s]; const statePropsToRemove = undefined; - const context = createContext({ opts: { statePropsToRemove } }); + const context = createContext({}, { statePropsToRemove }); const state = { x: 1, configuration: 1 }; const result = await execute(context, job, state as any); @@ -351,9 +355,12 @@ test('Throws after custom timeout', async (t) => { const job = `export default [() => new Promise((resolve) => setTimeout(resolve, 100))];`; - const context = createContext({ - opts: { jobLogger: logger, timeout: 10 }, - }); + const context = createContext( + { + opts: { jobLogger: logger }, + }, + { timeout: 10 } + ); const state = createState(); await t.throwsAsync(async () => execute(context, job, state), { message: 'Job took longer than 10ms to complete', diff --git a/packages/runtime/test/execute/job.test.ts b/packages/runtime/test/execute/job.test.ts index d1a36cc68..66cc29d13 100644 --- a/packages/runtime/test/execute/job.test.ts +++ b/packages/runtime/test/execute/job.test.ts @@ -8,7 +8,8 @@ import { } from '../../src'; import execute from '../../src/execute/job'; -import type { ExecutionContext, State } from '../../src/types'; +import type { ExecutionContext } from '../../src/types'; +import { State } from '@openfn/lexicon'; const createState = (data = {}) => ({ data: data, @@ -20,7 +21,9 @@ const logger = createMockLogger(undefined, { level: 'debug' }); const createContext = (args = {}) => ({ logger, - plan: {}, + plan: { + options: {}, + }, opts: {}, notify: () => {}, report: () => {}, diff --git a/packages/runtime/test/execute/plan.test.ts b/packages/runtime/test/execute/plan.test.ts index 1cdd96682..52ecd80b6 100644 --- a/packages/runtime/test/execute/plan.test.ts +++ b/packages/runtime/test/execute/plan.test.ts @@ -1,142 +1,113 @@ import test from 'ava'; import path from 'node:path'; import { createMockLogger } from '@openfn/logger'; -import { ExecutionPlan, JobNode } from '../../src/types'; +import type { ExecutionPlan, Job } from '@openfn/lexicon'; + import execute from './../../src/execute/plan'; +import { CompiledExecutionPlan } from '../../src'; let mockLogger = createMockLogger(undefined, { level: 'debug' }); +const createPlan = ( + jobs: Job[], + options: Partial = {} +): ExecutionPlan => ({ + workflow: { + jobs, + }, + options, +}); + +const createJob = ({ id, expression, next, state }: any): Job => ({ + id: id ?? 'job1', + expression: expression ?? 'export default [s => s]', + state, + next, +}); + test('throw for a circular job', async (t) => { - const plan: ExecutionPlan = { - start: 'job1', - jobs: [ - { - id: 'job1', - expression: 'export default [s => s]', - next: { job2: true }, - }, - { - id: 'job2', - expression: 'export default [s => s]', - next: { job1: true }, - }, - ], - }; + const plan = createPlan([ + createJob({ next: { job2: true } }), + createJob({ id: 'job2', next: { job1: true } }), + ]); const e = await t.throwsAsync(() => execute(plan, {}, mockLogger)); t.regex(e!.message, /circular dependency/i); }); test('throw for a job with multiple inputs', async (t) => { - // TODO maybe this isn't a good test - job1 and job2 both input to job3, but job2 never gets called - const plan: ExecutionPlan = { - start: 'job1', - jobs: [ - { - id: 'job1', - expression: 'export default [s => s]', - next: { job3: true }, - }, - { - id: 'job2', - expression: 'export default [s => s]', - next: { job3: true }, - }, - { - id: 'job3', - expression: 'export default [s => s]', - next: {}, - }, - ], - }; + const plan = createPlan([ + createJob({ next: { job3: true } }), + createJob({ id: 'job2', next: { job3: true } }), + createJob({ id: 'job3' }), + ]); + const e = await t.throwsAsync(() => execute(plan, {}, mockLogger)); t.regex(e!.message, /multiple dependencies/i); }); test('throw for a plan which references an undefined job', async (t) => { - const plan: ExecutionPlan = { - start: 'job1', - jobs: [ - { - id: 'job1', - expression: 'export default [s => s]', - next: { job3: true }, - }, - ], - }; + const plan = createPlan([createJob({ next: { job3: true } })]); + const e = await t.throwsAsync(() => execute(plan, {}, mockLogger)); t.regex(e!.message, /cannot find job/i); }); test('throw for an illegal edge condition', async (t) => { - const plan: ExecutionPlan = { - jobs: [ - { - id: 'a', - expression: '.', - next: { - b: { - condition: '!!!', - }, + const plan = createPlan([ + createJob({ + next: { + job2: { + condition: '!!!', }, }, - { id: 'b' }, - ], - }; + }), + createJob({ id: 'job2' }), + ]); const e = await t.throwsAsync(() => execute(plan, {}, mockLogger)); - t.regex(e!.message, /failed to compile edge condition a->b/i); -}); - -test('throw for an edge condition', async (t) => { - const plan: ExecutionPlan = { - jobs: [ - { - expression: 'x', - next: { - b: { - condition: '!!!!', - }, - }, - }, - { id: 'b' }, - ], - }; - const e = await t.throwsAsync(() => execute(plan, {}, mockLogger)); - t.regex(e!.message, /failed to compile edge condition/i); + t.regex(e!.message, /failed to compile edge condition job1->job2/i); }); test('execute a one-job execution plan with inline state', async (t) => { - const plan: ExecutionPlan = { - jobs: [ - { - expression: 'export default [s => s.data.x]', - state: { data: { x: 22 } }, - }, - ], - }; + const plan = createPlan([ + createJob({ + expression: 'export default [s => s.data.x]', + state: { data: { x: 22 } }, + }), + ]); const result = (await execute(plan, {}, mockLogger)) as unknown as number; t.is(result, 22); }); test('execute a one-job execution plan with initial state', async (t) => { - const plan: ExecutionPlan = { - initialState: { - data: { x: 33 }, - }, - jobs: [ - { + const plan = createPlan( + [ + createJob({ expression: 'export default [s => s.data.x]', - }, + }), ], - }; + { + initialState: { + data: { x: 33 }, + }, + } + ); const result = (await execute(plan, {}, mockLogger)) as unknown as number; t.is(result, 33); }); test('lazy load initial state', async (t) => { - const plan: ExecutionPlan = { - initialState: 's1', - jobs: [{ id: 'a', expression: 'export default [s => s]' }], - }; + const plan = createPlan( + [ + createJob({ + expression: 'export default [s => s]', + }), + ], + { + // @ts-ignore TODO tidy this up + initialState: 's1', + } + ); + const states = { s1: { data: { result: 42 } } }; const options = { callbacks: { @@ -148,9 +119,6 @@ test('lazy load initial state', async (t) => { t.deepEqual(result, states.s1); }); -test.todo('lazy load initial state with log'); -test.todo('lazy load initial state with notify'); - test('execute a one-job execution plan and notify init-start and init-complete', async (t) => { let notifications: Record = {}; @@ -158,14 +126,16 @@ test('execute a one-job execution plan and notify init-start and init-complete', data: { x: 33 }, }; - const plan: ExecutionPlan = { - initialState: state, - jobs: [ - { + const plan = createPlan( + [ + createJob({ expression: 'export default [s => s.data.x]', - }, + }), ], - }; + { + initialState: state, + } + ); const notify = (event: string, payload: any) => { if (notifications[event]) { @@ -184,139 +154,142 @@ test('execute a one-job execution plan and notify init-start and init-complete', }); test('execute a job with a simple truthy "precondition" or "trigger node"', async (t) => { - const plan: ExecutionPlan = { - jobs: [ - { - next: { - job: { - condition: 'true', - }, + const plan = createPlan([ + createJob({ + next: { + job: { + condition: 'true', }, }, - { - id: 'job', - expression: 'export default [() => ({ data: { done: true } })]', - }, - ], - }; + }), + createJob({ + id: 'job', + expression: 'export default [() => ({ data: { done: true } })]', + }), + ]); + const result = await execute(plan, {}, mockLogger); t.true(result.data.done); }); test('do not execute a job with a simple falsy "precondition" or "trigger node"', async (t) => { - const plan: ExecutionPlan = { - jobs: [ - { - next: { - job: { - condition: 'false', - }, + const plan = createPlan([ + createJob({ + next: { + job: { + condition: 'false', }, }, - { - id: 'job', - expression: 'export default [() => ({ data: { done: true } })]', - }, - ], - }; + }), + createJob({ + id: 'job', + expression: 'export default [() => ({ data: { done: true } })]', + }), + ]); + const result = await execute(plan, {}, mockLogger); t.falsy(result.data.done); }); test('execute a job with a valid "precondition" or "trigger node"', async (t) => { - const plan: ExecutionPlan = { - initialState: { data: { x: 10 } }, - jobs: [ + const plan = createPlan( + [ + // @ts-ignore TODO make this a trigger node when we have the types { + id: 'a', next: { job: { - condition: 'state.data.x === 10', + condition: 'true', }, }, }, - { + createJob({ id: 'job', expression: 'export default [() => ({ data: { done: true } })]', - }, + }), ], - }; + { + initialState: { data: { x: 10 } }, + } + ); + const result = await execute(plan, {}, mockLogger); t.true(result.data.done); }); test('merge initial and inline state', async (t) => { - const plan: ExecutionPlan = { - initialState: { data: { x: 33 } }, - jobs: [ - { + const plan = createPlan( + [ + createJob({ expression: 'export default [s => s]', state: { data: { y: 11 } }, - }, + }), ], - }; + { + initialState: { data: { x: 33 } }, + } + ); + const result = await execute(plan, {}, mockLogger); t.is(result.data.x, 33); t.is(result.data.y, 11); }); test('Initial state overrides inline data', async (t) => { - const plan: ExecutionPlan = { - initialState: { data: { x: 34 } }, - jobs: [ - { + const plan = createPlan( + [ + createJob({ expression: 'export default [s => s]', - state: { data: { x: 11 } }, - }, + state: { data: { y: 11 } }, + }), ], - }; + { + initialState: { data: { x: 34 } }, + } + ); + const result = await execute(plan, {}, mockLogger); t.is(result.data.x, 34); }); test('Previous state overrides inline data', async (t) => { - const plan: ExecutionPlan = { - jobs: [ - // This will return x as 5 - { - id: 'job1', - expression: 'export default [s => s]', - state: { data: { x: 5 } }, - next: { - job2: true, - }, - }, + const plan = createPlan([ + // This will return x as 5 + createJob({ + state: { data: { x: 5 } }, + next: { + job2: true, + }, + }), + // This will receive x as 5, prefer it to the default x as 88, and return it plus 1 + createJob({ + id: 'job2', + expression: 'export default [s => { s.data.x +=1 ; return s; }]', + state: { data: { x: 88 } }, + }), + ]); - // This will receive x as 5, prefer it to the default x as 88, and return it plus 1 - { - id: 'job2', - expression: 'export default [s => { s.data.x +=1 ; return s; }]', - state: { data: { x: 88 } }, - }, - ], - }; const result = await execute(plan, {}, mockLogger); t.is(result.data.x, 6); }); test('only allowed state is passed through in strict mode', async (t) => { - const plan: ExecutionPlan = { - jobs: [ - { - expression: - 'export default [s => ({ data: {}, references: [], x: 22, y: 33 })]', - next: { - job2: true, - }, - }, + const plan = createPlan([ + createJob({ + expression: + 'export default [s => ({ data: {}, references: [], x: 22, y: 33 })]', + next: { + job2: true, + }, + }), + createJob({ + id: 'job2', + // Throw if we receive unexpected stuff in state + expression: + 'export default [s => { if (s.x || s.y) { throw new Error() }; return s;}]', + }), + ]); - { - id: 'job2', - // Throw if we receive unexpected stuff in state - expression: - 'export default [s => { if (s.x || s.y) { throw new Error() }; return s;}]', - }, - ], - }; const result = await execute(plan, { strict: true }, mockLogger); t.deepEqual(result, { data: {}, @@ -328,57 +301,55 @@ test('Jobs only receive state from upstream jobs', async (t) => { const assert = (expr: string) => `if (!(${expr})) throw new Error('ASSERT FAIL')`; - const plan: ExecutionPlan = { - jobs: [ - { - id: 'start', - expression: 'export default [s => s]', - state: { data: { x: 1, y: 1 } }, - next: { - 'x-a': true, - 'y-a': true, - }, + const plan = createPlan([ + { + id: 'start', + expression: 'export default [s => s]', + state: { data: { x: 1, y: 1 } }, + next: { + 'x-a': true, + 'y-a': true, }, + }, - { - id: 'x-a', - expression: `export default [s => { - ${assert('s.data.x === 1')}; - ${assert('s.data.y === 1')}; - s.data.x += 1; - return s; - }]`, - next: { 'x-b': true }, - }, - { - id: 'x-b', - expression: `export default [s => { - ${assert('s.data.x === 2')}; - ${assert('s.data.y === 1')}; - return s; - }]`, - }, + { + id: 'x-a', + expression: `export default [s => { + ${assert('s.data.x === 1')}; + ${assert('s.data.y === 1')}; + s.data.x += 1; + return s; + }]`, + next: { 'x-b': true }, + }, + { + id: 'x-b', + expression: `export default [s => { + ${assert('s.data.x === 2')}; + ${assert('s.data.y === 1')}; + return s; + }]`, + }, - { - id: 'y-a', - expression: `export default [s => { - ${assert('s.data.x === 1')}; - ${assert('s.data.y === 1')}; - s.data.y += 1; - return s; - }]`, - next: { 'y-b': true }, - }, - { - id: 'y-b', - expression: `export default [s => { - ${assert('s.data.x === 1')}; - ${assert('s.data.y === 2')}; - return s; - }]`, - }, - ], - }; + { + id: 'y-a', + expression: `export default [s => { + ${assert('s.data.x === 1')}; + ${assert('s.data.y === 1')}; + s.data.y += 1; + return s; + }]`, + next: { 'y-b': true }, + }, + { + id: 'y-b', + expression: `export default [s => { + ${assert('s.data.x === 1')}; + ${assert('s.data.y === 2')}; + return s; + }]`, + }, + ]); const result = await execute(plan, {}, mockLogger); @@ -393,24 +364,22 @@ test('Jobs only receive state from upstream jobs', async (t) => { }); test('all state is passed through in non-strict mode', async (t) => { - const plan: ExecutionPlan = { - jobs: [ - { - expression: - 'export default [s => ({ data: {}, references: [], x: 22, y: 33 })]', - next: { - job2: true, - }, - }, + const plan = createPlan([ + createJob({ + expression: + 'export default [s => ({ data: {}, references: [], x: 22, y: 33 })]', + next: { + job2: true, + }, + }), + createJob({ + id: 'job2', + // Throw if we receive unexpected stuff in state + expression: + 'export default [s => { if (!s.x || !s.y || !s.references) { throw new Error() }; return s;}]', + }), + ]); - { - id: 'job2', - // Throw if we receive unexpected stuff in state - expression: - 'export default [s => { if (!s.x || !s.y || !s.references) { throw new Error() }; return s;}]', - }, - ], - }; const result = await execute(plan, { strict: false }, mockLogger); t.deepEqual(result, { data: {}, @@ -421,74 +390,67 @@ test('all state is passed through in non-strict mode', async (t) => { }); test('execute edge based on state in the condition', async (t) => { - const plan: ExecutionPlan = { - jobs: [ - { - id: 'job1', - state: {}, - expression: 'export default [(s) => { s.data.x = 10; return s;}]', - next: { - job2: { condition: 'state.data.x === 10' }, - }, - }, - { - id: 'job2', - expression: 'export default [() => ({ data: { y: 20 } })]', + const plan = createPlan([ + { + id: 'job1', + state: {}, + expression: 'export default [(s) => { s.data.x = 10; return s;}]', + next: { + job2: { condition: 'state.data.x === 10' }, }, - ], - }; + }, + { + id: 'job2', + expression: 'export default [() => ({ data: { y: 20 } })]', + }, + ]); const result = await execute(plan, {}, mockLogger); t.is(result.data?.y, 20); }); test('skip edge based on state in the condition ', async (t) => { - const plan: ExecutionPlan = { - jobs: [ - { - id: 'job1', - state: {}, - expression: 'export default [s => { s.data.x = 10; return s;}]', - next: { - job2: { condition: 'false' }, - }, - }, - { - id: 'job2', - expression: 'export default [() => ({ y: 20 })]', + const plan = createPlan([ + { + id: 'job1', + state: {}, + expression: 'export default [s => { s.data.x = 10; return s;}]', + next: { + job2: { condition: 'false' }, }, - ], - }; + }, + { + id: 'job2', + expression: 'export default [() => ({ y: 20 })]', + }, + ]); const result = await execute(plan, {}, mockLogger); t.is(result.data?.x, 10); }); test('do not traverse a disabled edge', async (t) => { - const plan: ExecutionPlan = { - jobs: [ - { - id: 'job1', - expression: 'export default [(s) => { s.data.x = 10; return s;}]', - next: { - job2: { - disabled: true, - condition: 'true', - }, + const plan = createPlan([ + { + id: 'job1', + expression: 'export default [(s) => { s.data.x = 10; return s;}]', + next: { + job2: { + disabled: true, + condition: 'true', }, }, - { - id: 'job2', - expression: 'export default [() => ({ data: { x: 20 } })]', - }, - ], - }; + }, + { + id: 'job2', + expression: 'export default [() => ({ data: { x: 20 } })]', + }, + ]); const result = await execute(plan, {}, mockLogger); t.is(result.data?.x, 10); }); test('execute a two-job execution plan', async (t) => { - const plan: ExecutionPlan = { - initialState: { data: { x: 0 } }, - jobs: [ + const plan = createPlan( + [ { id: 'job1', expression: 'export default [s => { s.data.x += 1; return s; } ]', @@ -499,15 +461,16 @@ test('execute a two-job execution plan', async (t) => { expression: 'export default [s => { s.data.x += 1; return s; } ]', }, ], - }; + { initialState: { data: { x: 0 } } } + ); + const result = await execute(plan, {}, mockLogger); t.is(result.data.x, 2); }); test('only execute one job in a two-job execution plan', async (t) => { - const plan: ExecutionPlan = { - initialState: { data: { x: 0 } }, - jobs: [ + const plan = createPlan( + [ { id: 'job1', expression: 'export default [s => { s.data.x += 1; return s; } ]', @@ -518,15 +481,16 @@ test('only execute one job in a two-job execution plan', async (t) => { expression: 'export default [s => { s.data.x += 1; return s; } ]', }, ], - }; + { initialState: { data: { x: 0 } } } + ); + const result = await execute(plan, {}, mockLogger); t.is(result.data.x, 1); }); -test('execute a two-job execution plan with custom start in state', async (t) => { - const plan: ExecutionPlan = { - start: 'job2', - jobs: [ +test('execute a two-job execution plan with custom start', async (t) => { + const plan = createPlan( + [ { id: 'job1', expression: 'export default [() => ({ data: { result: 11 } }) ]', @@ -537,36 +501,16 @@ test('execute a two-job execution plan with custom start in state', async (t) => next: { job1: true }, }, ], - }; - const result = await execute(plan, {}, mockLogger); - t.is(result.data.result, 11); -}); + { start: 'job2' } + ); -test('execute a two-job execution plan with custom start in options', async (t) => { - const plan: ExecutionPlan = { - start: 'job1', - initialState: { start: 'job2' }, - jobs: [ - { - id: 'job1', - expression: 'export default [() => ({ data: { result: 11 } }) ]', - }, - { - id: 'job2', - expression: 'export default [() => ({ data: { result: 1 } }) ]', - next: { job1: true }, - }, - ], - }; const result = await execute(plan, {}, mockLogger); t.is(result.data.result, 11); }); test('Return when there are no more edges', async (t) => { - const plan: ExecutionPlan = { - start: 'job1', - initialState: { data: { x: 0 } }, - jobs: [ + const plan = createPlan( + [ { id: 'job1', expression: 'export default [s => { s.data.x += 1; return s; } ]', @@ -576,33 +520,35 @@ test('Return when there are no more edges', async (t) => { expression: 'export default [s => { s.data.x += 1; return s; } ]', }, ], - }; + { start: 'job1', initialState: { data: { x: 0 } } } + ); + const result = await execute(plan, {}, mockLogger); t.is(result.data?.x, 1); }); test('execute a 5 job execution plan', async (t) => { - const plan: ExecutionPlan = { - initialState: { data: { x: 0 } }, - start: '1', - jobs: [], - } as ExecutionPlan; + const jobs = []; for (let i = 1; i < 6; i++) { - plan.jobs.push({ + jobs.push({ id: `${i}`, expression: 'export default [s => { s.data.x += 1; return s; } ]', next: i === 5 ? null : { [`${i + 1}`]: true }, - } as JobNode); + } as Job); } + + const plan = createPlan(jobs, { + initialState: { data: { x: 0 } }, + start: '1', + }); + const result = await execute(plan, {}, mockLogger); t.is(result.data.x, 5); }); test('execute multiple steps in "parallel"', async (t) => { - const plan: ExecutionPlan = { - start: 'start', - initialState: { data: { x: 0 } }, - jobs: [ + const plan = createPlan( + [ { id: 'start', expression: 'export default [s => s]', @@ -625,7 +571,9 @@ test('execute multiple steps in "parallel"', async (t) => { expression: 'export default [s => { s.data.x += 1; return s; } ]', }, ], - }; + { start: 'start', initialState: { data: { x: 0 } } } + ); + const result = await execute(plan, {}, mockLogger); t.deepEqual(result, { a: { data: { x: 1 } }, @@ -635,10 +583,8 @@ test('execute multiple steps in "parallel"', async (t) => { }); test('isolate state in "parallel" execution', async (t) => { - const plan: ExecutionPlan = { - start: 'start', - initialState: { data: { x: 0 } }, - jobs: [ + const plan = createPlan( + [ { id: 'start', expression: 'export default [s => s]', @@ -658,17 +604,16 @@ test('isolate state in "parallel" execution', async (t) => { 'export default [s => { if (s.data.b) { throw "e" }; s.data.c = true; return s }]', }, ], - }; + { start: 'start', initialState: { data: { x: 0 } } } + ); const result = await execute(plan, {}, mockLogger); t.falsy(result.errors); }); test('isolate state in "parallel" execution with deeper trees', async (t) => { - const plan: ExecutionPlan = { - start: 'start', - initialState: { data: { x: 0 } }, - jobs: [ + const plan = createPlan( + [ { id: 'start', expression: 'export default [s => s]', @@ -701,34 +646,32 @@ test('isolate state in "parallel" execution with deeper trees', async (t) => { 'export default [s => { if (s.data.c) { throw "e" }; s.data.b = true; return s }]', }, ], - }; + { start: 'start', initialState: { data: { x: 0 } } } + ); const result = await execute(plan, {}, mockLogger); t.falsy(result.errors); }); test('"parallel" execution with multiple leaves should write multiple results to state', async (t) => { - const plan: ExecutionPlan = { - start: 'start', - jobs: [ - { - id: 'start', - expression: 'export default [s => s]', - next: { - 'job-b': true, - 'job-c': true, - }, + const plan = createPlan([ + { + id: 'start', + expression: 'export default [s => s]', + next: { + 'job-b': true, + 'job-c': true, }, - { - id: 'job-b', - expression: 'export default [s => { s.data.b = true; return s }]', - }, - { - id: 'job-c', - expression: 'export default [s => { s.data.c = true; return s }]', - }, - ], - }; + }, + { + id: 'job-b', + expression: 'export default [s => { s.data.b = true; return s }]', + }, + { + id: 'job-c', + expression: 'export default [s => { s.data.c = true; return s }]', + }, + ]); const result = await execute(plan, {}, mockLogger); // Each leaf should write to its own place on state @@ -747,15 +690,14 @@ test('"parallel" execution with multiple leaves should write multiple results to }); test('return an error in state', async (t) => { - const plan: ExecutionPlan = { - jobs: [ - { - id: 'a', - state: {}, - expression: 'export default [s => { throw Error("e")}]', - }, - ], - }; + const plan = createPlan([ + { + id: 'a', + state: {}, + expression: 'export default [s => { throw Error("e")}]', + }, + ]); + const result = await execute(plan, {}, mockLogger); t.truthy(result.errors); t.is(result.errors.a.message, 'e'); @@ -763,15 +705,14 @@ test('return an error in state', async (t) => { // Fix for https://github.com/OpenFn/kit/issues/317 test('handle non-standard error objects', async (t) => { - const plan: ExecutionPlan = { - jobs: [ - { - id: 'a', - state: {}, - expression: 'export default [s => { throw "wibble" }]', - }, - ], - }; + const plan = createPlan([ + { + id: 'a', + state: {}, + expression: 'export default [s => { throw "wibble" }]', + }, + ]); + const result = await execute(plan, {}, mockLogger); t.truthy(result.errors); const err = result.errors.a; @@ -780,64 +721,60 @@ test('handle non-standard error objects', async (t) => { }); test('keep executing after an error', async (t) => { - const plan: ExecutionPlan = { - jobs: [ - { - id: 'a', - state: {}, - expression: 'export default [s => { throw Error("e"); state.x = 20 }]', - next: { - b: true, - }, - }, - { - id: 'b', - expression: 'export default [() => ({ y: 20 })]', + const plan = createPlan([ + { + id: 'a', + state: {}, + expression: 'export default [s => { throw Error("e"); state.x = 20 }]', + next: { + b: true, }, - ], - }; + }, + { + id: 'b', + expression: 'export default [() => ({ y: 20 })]', + }, + ]); + const result = await execute(plan, {}, mockLogger); t.is(result.y, 20); t.falsy(result.x); }); test('simple on-error handler', async (t) => { - const plan: ExecutionPlan = { - jobs: [ - { - id: 'job1', - state: {}, - expression: 'export default [s => { throw Error("e")}]', - next: { - job2: { condition: 'state.errors' }, - job3: { condition: '!state.errors' }, - }, - }, - { - id: 'job2', - expression: 'export default [() => ({ y: 20 })]', + const plan = createPlan([ + { + id: 'job1', + state: {}, + expression: 'export default [s => { throw Error("e")}]', + next: { + job2: { condition: 'state.errors' }, + job3: { condition: '!state.errors' }, }, - { - id: 'job3', - expression: 'export default [() => ({ x: 20 })]', - }, - ], - }; + }, + { + id: 'job2', + expression: 'export default [() => ({ y: 20 })]', + }, + { + id: 'job3', + expression: 'export default [() => ({ x: 20 })]', + }, + ]); + const result = await execute(plan, {}, mockLogger); t.is(result.y, 20); t.falsy(result.x); }); test('log appopriately on error', async (t) => { - const plan: ExecutionPlan = { - jobs: [ - { - id: 'job1', - state: {}, - expression: 'export default [s => { throw Error("e")}]', - }, - ], - }; + const plan = createPlan([ + { + id: 'job1', + state: {}, + expression: 'export default [s => { throw Error("e")}]', + }, + ]); const logger = createMockLogger(undefined, { level: 'debug' }); @@ -851,10 +788,10 @@ test('log appopriately on error', async (t) => { }); test('jobs do not share a local scope', async (t) => { - const plan: ExecutionPlan = { - initialState: { data: {} }, - jobs: [ + const plan = createPlan( + [ { + id: 'job1', // declare x in this expression's scope expression: 'const x = 10; export default [s => s];', next: { @@ -867,7 +804,8 @@ test('jobs do not share a local scope', async (t) => { expression: 'export default [s => { s.data.x = x; return s; }]', }, ], - }; + { initialState: { data: {} } } + ); await t.throwsAsync(() => execute(plan, {}, mockLogger), { message: 'ReferenceError: x is not defined', name: 'RuntimeCrash', @@ -875,21 +813,20 @@ test('jobs do not share a local scope', async (t) => { }); test('jobs do not share a global scope', async (t) => { - const plan: ExecutionPlan = { - initialState: { data: {} }, - jobs: [ - { - expression: 'export default [s => { x = 10; return s; }]', - next: { - b: true, - }, - }, - { - id: 'b', - expression: 'export default [s => { s.data.x = x; return s; }]', + const plan = createPlan([ + { + id: 'job1', + expression: 'export default [s => { x = 10; return s; }]', + next: { + b: true, }, - ], - }; + }, + { + id: 'b', + expression: 'export default [s => { s.data.x = x; return s; }]', + }, + ]); + console.log(JSON.stringify(plan, null, 2)); await t.throwsAsync(() => execute(plan, {}, mockLogger), { message: 'ReferenceError: x is not defined', @@ -898,22 +835,20 @@ test('jobs do not share a global scope', async (t) => { }); test('jobs do not share a globalThis object', async (t) => { - const plan: ExecutionPlan = { - initialState: { data: {} }, - jobs: [ - { - expression: 'export default [(s) => { globalThis.x = 10; return s; }]', - next: { - b: true, - }, - }, - { - id: 'b', - expression: - 'export default [(s) => { s.data.x = globalThis.x; return s; }]', + const plan = createPlan([ + { + id: 'job1', + expression: 'export default [(s) => { globalThis.x = 10; return s; }]', + next: { + b: true, }, - ], - }; + }, + { + id: 'b', + expression: + 'export default [(s) => { s.data.x = globalThis.x; return s; }]', + }, + ]); const result = await execute(plan, {}, mockLogger); t.deepEqual(result, { data: {} }); }); @@ -921,21 +856,19 @@ test('jobs do not share a globalThis object', async (t) => { // TODO this fails right now // https://github.com/OpenFn/kit/issues/213 test.skip('jobs cannot scribble on globals', async (t) => { - const plan: ExecutionPlan = { - initialState: { data: {} }, - jobs: [ - { - expression: 'export default [s => { console.x = 10; return s; }]', - next: { - b: true, - }, - }, - { - id: 'b', - expression: 'export default [s => { s.data.x = console.x; return s; }]', + const plan = createPlan([ + { + id: 'job1', + expression: 'export default [s => { console.x = 10; return s; }]', + next: { + b: true, }, - ], - }; + }, + { + id: 'b', + expression: 'export default [s => { s.data.x = console.x; return s; }]', + }, + ]); const result = await execute(plan, {}, mockLogger); t.falsy(result.data.x); }); @@ -943,23 +876,21 @@ test.skip('jobs cannot scribble on globals', async (t) => { // TODO this fails right now // https://github.com/OpenFn/kit/issues/213 test.skip('jobs cannot scribble on adaptor functions', async (t) => { - const plan: ExecutionPlan = { - initialState: { data: { x: 0 } }, - jobs: [ - { - expression: - 'import { fn } from "@openfn/language-common"; fn.x = 10; export default [s => s]', - next: { - b: true, - }, - }, - { - id: 'b', - expression: - 'import { fn } from "@openfn/language-common"; export default [s => { s.data.x = fn.x; return s; }]', + const plan = createPlan([ + { + id: 'job1', + expression: + 'import { fn } from "@openfn/language-common"; fn.x = 10; export default [s => s]', + next: { + b: true, }, - ], - }; + }, + { + id: 'b', + expression: + 'import { fn } from "@openfn/language-common"; export default [s => { s.data.x = fn.x; return s; }]', + }, + ]); const options = { linker: { modules: { @@ -984,19 +915,17 @@ test('jobs can write circular references to state without blowing up downstream' return s; }] `; - const plan: ExecutionPlan = { - initialState: { data: {} }, - jobs: [ - { - expression, - next: { b: true }, - }, - { - id: 'b', - expression: 'export default [(s => s)]', - }, - ], - }; + const plan = createPlan([ + { + id: 'job1', + expression, + next: { b: true }, + }, + { + id: 'b', + expression: 'export default [(s => s)]', + }, + ]); const result = await execute(plan, {}, mockLogger); @@ -1020,22 +949,19 @@ test('jobs cannot pass circular references to each other', async (t) => { return s; }] `; - const plan: ExecutionPlan = { - initialState: { data: {} }, - jobs: [ - { - expression, - next: { b: true }, - }, - { - id: 'b', - expression: `export default [(s => { + const plan = createPlan([ + { + expression, + next: { b: true }, + }, + { + id: 'b', + expression: `export default [(s => { s.data.answer = s.data.ref.b.a; return s })]`, - }, - ], - }; + }, + ]); const result = await execute(plan, {}, mockLogger); @@ -1044,25 +970,22 @@ test('jobs cannot pass circular references to each other', async (t) => { }); test('jobs can write functions to state without blowing up downstream', async (t) => { - const plan: ExecutionPlan = { - initialState: { data: {} }, - jobs: [ - { - next: { b: true }, - expression: `export default [(s) => { + const plan = createPlan([ + { + next: { b: true }, + expression: `export default [(s) => { s.data = { x: () => 22 } return s; }]`, - }, - { - id: 'b', - expression: 'export default [(s) => s]', - }, - ], - }; + }, + { + id: 'b', + expression: 'export default [(s) => s]', + }, + ]); const result = await execute(plan, {}, mockLogger); @@ -1071,27 +994,24 @@ test('jobs can write functions to state without blowing up downstream', async (t }); test('jobs cannot pass functions to each other', async (t) => { - const plan: ExecutionPlan = { - initialState: { data: {} }, - jobs: [ - { - next: { b: true }, - expression: `export default [(s) => { + const plan = createPlan([ + { + next: { b: true }, + expression: `export default [(s) => { s.data = { x: () => 22 } return s; }]`, - }, - { - id: 'b', - expression: `export default [ + }, + { + id: 'b', + expression: `export default [ (s) => { s.data.x(); return s; } ]`, - }, - ], - }; + }, + ]); const result = await execute(plan, {}, mockLogger); @@ -1101,14 +1021,12 @@ test('jobs cannot pass functions to each other', async (t) => { }); test('Plans log for each job start and end', async (t) => { - const plan: ExecutionPlan = { - jobs: [ - { - id: 'a', - expression: 'export default [s => s]', - }, - ], - }; + const plan = createPlan([ + { + id: 'a', + expression: 'export default [s => s]', + }, + ]); const logger = createMockLogger(undefined, { level: 'debug' }); await execute(plan, {}, logger); diff --git a/packages/runtime/test/memory.test.ts b/packages/runtime/test/memory.test.ts index 972482dbd..7b332c925 100644 --- a/packages/runtime/test/memory.test.ts +++ b/packages/runtime/test/memory.test.ts @@ -4,12 +4,9 @@ * */ import test from 'ava'; +import type { ExecutionPlan } from '@openfn/lexicon'; -import { - ExecutionPlan, - NOTIFY_JOB_COMPLETE, - NotifyJobCompletePayload, -} from '../src'; +import { NOTIFY_JOB_COMPLETE, NotifyJobCompletePayload } from '../src'; import callRuntime from '../src/runtime'; /** @@ -53,8 +50,7 @@ const run = async (t, workflow: ExecutionPlan) => { }; const state = await callRuntime( - workflow, - {}, + { workflow }, { strict: false, callbacks: { notify }, diff --git a/packages/runtime/test/security.test.ts b/packages/runtime/test/security.test.ts index caa8f1dd0..eab8bf3a0 100644 --- a/packages/runtime/test/security.test.ts +++ b/packages/runtime/test/security.test.ts @@ -1,13 +1,26 @@ // a suite of tests with various security concerns in mind import test from 'ava'; +import { createMockLogger } from '@openfn/logger'; +import type { ExecutionPlan, State } from '@openfn/lexicon'; + import doRun from '../src/runtime'; -import { createMockLogger } from '@openfn/logger'; -import { ExecutionPlan } from '../src/types'; +const createPlan = (expression: string, state: State = {}) => ({ + workflow: { + jobs: [ + { + expression, + }, + ], + }, + options: { + initialState: state, + }, +}); // Disable strict mode for all these tests -const run = (job: any, state?: any, options: any = {}) => - doRun(job, state, { ...options, strict: false }); +const run = (plan: ExecutionPlan, options: any = {}) => + doRun(plan, { strict: false, ...options }); const logger = createMockLogger(undefined, { level: 'default' }); @@ -21,13 +34,15 @@ test.serial( const src = 'export default [(s) => s]'; const state = { - data: true, + data: {}, configuration: { password: 'secret', }, }; - const result: any = await run(src, state); - t.is(result.data, true); + const plan = createPlan(src, state); + + const result: any = await run(plan); + t.deepEqual(result.data, {}); t.is(result.configuration, undefined); } ); @@ -38,13 +53,15 @@ test.serial( const src = 'export default [(s) => s]'; const state = { - data: true, + data: {}, configuration: { password: 'secret', }, }; - const result: any = await run(src, state, { strict: true }); - t.is(result.data, true); + + const plan = createPlan(src, state); + const result: any = await run(plan, { strict: true }); + t.deepEqual(result.data, {}); t.is(result.configuration, undefined); } ); @@ -55,13 +72,15 @@ test.serial( const src = 'export default [(s) => s]'; const state = { - data: true, + data: {}, configuration: { password: 'secret', }, }; - const result: any = await run(src, state, { strict: false }); - t.is(result.data, true); + + const plan = createPlan(src, state); + const result: any = await run(plan, { strict: false }); + t.deepEqual(result.data, {}); t.is(result.configuration, undefined); } ); @@ -72,14 +91,16 @@ test.serial( const src = 'export default [(s) => { throw "err" }]'; const state = { - data: true, + data: {}, configuration: { password: 'secret', }, }; - const result: any = await run(src, state, { strict: false }); + const plan = createPlan(src, state); + + const result: any = await run(plan, { strict: false }); t.truthy(result.errors); - t.is(result.data, true); + t.deepEqual(result.data, {}); t.is(result.configuration, undefined); } ); @@ -99,14 +120,16 @@ test.serial('jobs should not have access to global scope', async (t) => { test.serial('jobs should be able to read global state', async (t) => { const src = 'export default [() => state.data.x]'; - const result: any = await run(src, { data: { x: 42 } }); // typings are a bit tricky + const plan = createPlan(src, { data: { x: 42 } }); + const result: any = await run(plan); t.is(result, 42); }); test.serial('jobs should be able to mutate global state', async (t) => { const src = 'export default [() => { state.x = 22; return state.x; }]'; - const result: any = await run(src, { data: { x: 42 } }); // typings are a bit tricky + const plan = createPlan(src, { data: { x: 42 } }); + const result: any = await run(plan); t.is(result, 22); }); @@ -198,20 +221,22 @@ test.serial( 'jobs in workflow cannot share data through globals (issue #213)', async (t) => { const plan: ExecutionPlan = { - jobs: [ - { - id: 'a', - expression: 'export default [s => { console.x = 10; return s; }]', - next: { - b: true, + workflow: { + jobs: [ + { + id: 'a', + expression: 'export default [s => { console.x = 10; return s; }]', + next: { + b: true, + }, }, - }, - { - id: 'b', - expression: - 'export default [s => { s.data.x = console.x; return s; }]', - }, - ], + { + id: 'b', + expression: + 'export default [s => { s.data.x = console.x; return s; }]', + }, + ], + }, }; const result = await run(plan); diff --git a/packages/runtime/test/util/regex.ts b/packages/runtime/test/util/regex.test.ts similarity index 100% rename from packages/runtime/test/util/regex.ts rename to packages/runtime/test/util/regex.test.ts diff --git a/packages/runtime/test/util/validate-plan.test.ts b/packages/runtime/test/util/validate-plan.test.ts index 451940703..fcaf8df7f 100644 --- a/packages/runtime/test/util/validate-plan.test.ts +++ b/packages/runtime/test/util/validate-plan.test.ts @@ -1,19 +1,21 @@ import test from 'ava'; -import { ExecutionPlan } from '../../src'; +import type { ExecutionPlan, Job } from '@openfn/lexicon'; import validate, { buildModel } from '../../src/util/validate-plan'; +const job = (id: string, next?: Record) => + ({ + id, + next, + expression: '.', + } as Job); + test('builds a simple model', (t) => { const plan: ExecutionPlan = { - jobs: [ - { - id: 'a', - next: { b: true }, - }, - { - id: 'b', - }, - ], + options: {}, + workflow: { + jobs: [job('a', { b: true }), job('b')], + }, }; const model = buildModel(plan); @@ -31,17 +33,10 @@ test('builds a simple model', (t) => { test('builds a more complex model', (t) => { const plan: ExecutionPlan = { - jobs: [ - { - id: 'a', - next: { b: true }, - }, - { - id: 'b', - next: { c: true, a: true }, - }, - { id: 'c' }, - ], + options: {}, + workflow: { + jobs: [job('a', { b: true }), job('b', { c: true, a: true }), job('c')], + }, }; const model = buildModel(plan); @@ -63,16 +58,10 @@ test('builds a more complex model', (t) => { test('throws for a circular dependency', (t) => { const plan: ExecutionPlan = { - jobs: [ - { - id: 'a', - next: { b: true }, - }, - { - id: 'b', - next: { a: true }, - }, - ], + options: {}, + workflow: { + jobs: [job('a', { b: true }), job('b', { a: true })], + }, }; t.throws(() => validate(plan), { @@ -82,20 +71,14 @@ test('throws for a circular dependency', (t) => { test('throws for an indirect circular dependency', (t) => { const plan: ExecutionPlan = { - jobs: [ - { - id: 'a', - next: { b: true }, - }, - { - id: 'b', - next: { c: true }, - }, - { - id: 'c', - next: { a: true }, - }, - ], + options: {}, + workflow: { + jobs: [ + job('a', { b: true }), + job('b', { c: true }), + job('c', { a: true }), + ], + }, }; t.throws(() => validate(plan), { @@ -105,22 +88,17 @@ test('throws for an indirect circular dependency', (t) => { test('throws for a multiple inputs', (t) => { const plan: ExecutionPlan = { - jobs: [ - { - id: 'a', - next: { b: true, c: true }, - }, - { - id: 'b', - next: { z: true }, - }, - { - id: 'c', - next: { z: true }, - }, - { id: 'z' }, - ], + options: {}, + workflow: { + jobs: [ + job('a', { b: true, c: true }), + job('b', { z: true }), + job('c', { z: true }), + job('z'), + ], + }, }; + t.throws(() => validate(plan), { message: 'Multiple dependencies detected for: z', }); @@ -128,12 +106,12 @@ test('throws for a multiple inputs', (t) => { test('throws for a an unknown job', (t) => { const plan: ExecutionPlan = { - jobs: [ - { - next: { z: true }, - }, - ], + options: {}, + workflow: { + jobs: [job('next', { z: true })], + }, }; + t.throws(() => validate(plan), { message: 'Cannot find job: z', }); @@ -141,11 +119,15 @@ test('throws for a an unknown job', (t) => { test('throws for a an unknown job with shorthand syntax', (t) => { const plan: ExecutionPlan = { - jobs: [ - { - next: 'z', - }, - ], + options: {}, + workflow: { + jobs: [ + { + next: 'z', + expression: '.', + }, + ], + }, }; t.throws(() => validate(plan), { message: 'Cannot find job: z', @@ -154,9 +136,14 @@ test('throws for a an unknown job with shorthand syntax', (t) => { test('throws for invalid string start', (t) => { const plan: ExecutionPlan = { - start: 'z', - jobs: [{ id: 'a' }], + options: { + start: 'z', + }, + workflow: { + jobs: [job('a')], + }, }; + t.throws(() => validate(plan), { message: 'Could not find start job: z', }); From cf61d6a23a134a41abededc7f37daae13a8191d2 Mon Sep 17 00:00:00 2001 From: Joe Clark Date: Fri, 2 Feb 2024 09:28:12 +0000 Subject: [PATCH 036/128] runtime: more refactoring --- packages/lexicon/README.md | 2 +- packages/lexicon/core.d.ts | 23 +++- packages/runtime/src/runtime.ts | 2 + packages/runtime/test/runtime.test.ts | 162 +++++++++++++++----------- 4 files changed, 113 insertions(+), 76 deletions(-) diff --git a/packages/lexicon/README.md b/packages/lexicon/README.md index 7fc062f9b..16e4ec4a9 100644 --- a/packages/lexicon/README.md +++ b/packages/lexicon/README.md @@ -1,4 +1,4 @@ -The lexicon is a central repositoty of key type and word definitions. +The lexicon (aka the OpenFunctionicon) is a central repositoty of key type and word definitions. It's a types repo and glossary at the same time. diff --git a/packages/lexicon/core.d.ts b/packages/lexicon/core.d.ts index 6351d0bdf..7825ffa91 100644 --- a/packages/lexicon/core.d.ts +++ b/packages/lexicon/core.d.ts @@ -3,11 +3,13 @@ type UUID = string; /** * An execution plan is a portable definition of a Work Order, * or, a unit of work to execute + * Should it include initial state? I don't really think so + * // BUT the plan sent from Lightning DOES include the initial state */ export type ExecutionPlan = { id?: UUID; // this would bet the run (nee attempt) id workflow: Workflow; - options: any; + options?: WorkflowOptions; }; /** @@ -15,8 +17,14 @@ export type ExecutionPlan = { */ export type Workflow = { id?: UUID; // unique id used to track this workflow. Could be autogenerated - name: string; // user-friendly name. CLI can derive this from file names - steps: Step[]; + + // TODO: make required + name?: string; // user-friendly name. CLI can derive this from file names + + // TODO - rename jobs to steps + // TODO - support Trigger types in steps + jobs: Array; + //steps: Array; }; /** @@ -27,7 +35,12 @@ export type WorkflowOptions = { timeout?: number; stepTimeout?: number; start?: StepId; - intialState?: State; + + statePropsToRemove?: string[]; + + // TODO let's rename this to input, because that's what it is! + // also we need to remove it from here because it's not an option is it + initialState?: State; }; export type StepId = string; @@ -37,7 +50,7 @@ export type StepId = string; * (usually a job) */ export interface Step { - id: StepId; // is this really required? It could be generated + id?: StepId; // is this really required? It could be generated name?: string; // user-friendly name used in logging next?: string | Record; diff --git a/packages/runtime/src/runtime.ts b/packages/runtime/src/runtime.ts index a8f0b4a71..b1dbcb694 100644 --- a/packages/runtime/src/runtime.ts +++ b/packages/runtime/src/runtime.ts @@ -58,6 +58,8 @@ const loadPlanFromString = (expression: string, logger: Logger) => { return plan; }; +// TODO change to this: +// const run = (xplan: ExecutionPlan | string, input: State, opts: RawOptions = {}) => { const run = (xplan: ExecutionPlan | string, opts: RawOptions = {}) => { const logger = opts.logger || defaultLogger; diff --git a/packages/runtime/test/runtime.test.ts b/packages/runtime/test/runtime.test.ts index d4bb1888d..9782915a0 100644 --- a/packages/runtime/test/runtime.test.ts +++ b/packages/runtime/test/runtime.test.ts @@ -1,8 +1,9 @@ import test from 'ava'; import path from 'node:path'; import { createMockLogger } from '@openfn/logger'; +import type { ExecutionPlan, State } from '@openfn/lexicon'; + import { - ExecutionPlan, NOTIFY_INIT_COMPLETE, NOTIFY_JOB_COMPLETE, NOTIFY_JOB_ERROR, @@ -20,9 +21,11 @@ test('run simple expression', async (t) => { test('run a simple workflow', async (t) => { const plan: ExecutionPlan = { - jobs: [ - { expression: 'export default [(s) => ({ data: { done: true } })]' }, - ], + workflow: { + jobs: [ + { expression: 'export default [(s) => ({ data: { done: true } })]' }, + ], + }, }; const result: any = await run(plan); @@ -43,10 +46,12 @@ test('run a workflow and notify major events', async (t) => { }; const plan: ExecutionPlan = { - jobs: [{ expression: 'export default [(s) => s]' }], + workflow: { + jobs: [{ expression: 'export default [(s) => s]' }], + }, }; - await run(plan, {}, { callbacks }); + await run(plan, { callbacks }); t.is(counts[NOTIFY_INIT_START], 1); t.is(counts[NOTIFY_INIT_COMPLETE], 1); @@ -70,12 +75,14 @@ test('notify job error even after fail', async (t) => { }; const plan: ExecutionPlan = { - jobs: [ - { id: 'a', expression: 'export default [(s) => s.data.x = s.err.z ]' }, - ], + workflow: { + jobs: [ + { id: 'a', expression: 'export default [(s) => s.data.x = s.err.z ]' }, + ], + }, }; - await run(plan, {}, { callbacks }); + await run(plan, { callbacks }); }); test('notify job error even after crash', async (t) => { @@ -94,11 +101,11 @@ test('notify job error even after crash', async (t) => { }; const plan: ExecutionPlan = { - jobs: [{ id: 'a', expression: 'export default [() => s]' }], + workflow: { jobs: [{ id: 'a', expression: 'export default [() => s]' }] }, }; try { - await run(plan, {}, { callbacks }); + await run(plan, { callbacks }); } catch (e) { // this will throw, it's fine // don't assert on it, I only wnat to assert in on-error @@ -107,12 +114,14 @@ test('notify job error even after crash', async (t) => { test('resolve a credential', async (t) => { const plan: ExecutionPlan = { - jobs: [ - { - expression: 'export default [(s) => s]', - configuration: 'ccc', - }, - ], + workflow: { + jobs: [ + { + expression: 'export default [(s) => s]', + configuration: 'ccc', + }, + ], + }, }; const options = { @@ -123,19 +132,21 @@ test('resolve a credential', async (t) => { }, }; - const result: any = await run(plan, {}, options); + const result: any = await run(plan, options); t.truthy(result); t.deepEqual(result.configuration, { password: 'password1' }); }); test('resolve initial state', async (t) => { const plan: ExecutionPlan = { - jobs: [ - { - expression: 'export default [(s) => s]', - state: 'abc', - }, - ], + workflow: { + jobs: [ + { + expression: 'export default [(s) => s]', + state: 'abc', + }, + ], + }, }; const options = { @@ -144,7 +155,7 @@ test('resolve initial state', async (t) => { }, }; - const result: any = await run(plan, {}, options); + const result: any = await run(plan, options); t.truthy(result); t.deepEqual(result.data, { foo: 'bar' }); }); @@ -163,13 +174,15 @@ test('run a workflow with two jobs and call callbacks', async (t) => { }; const plan: ExecutionPlan = { - jobs: [ - { id: 'a', expression: 'export default [(s) => s]', next: { b: true } }, - { id: 'b', expression: 'export default [(s) => s]' }, - ], + workflow: { + jobs: [ + { id: 'a', expression: 'export default [(s) => s]', next: { b: true } }, + { id: 'b', expression: 'export default [(s) => s]' }, + ], + }, }; - await run(plan, {}, { callbacks }); + await run(plan, { callbacks }); t.is(counts['init-start'], 2); t.is(counts['init-complete'], 2); @@ -179,29 +192,34 @@ test('run a workflow with two jobs and call callbacks', async (t) => { test('run a workflow with state and parallel branching', async (t) => { const plan: ExecutionPlan = { - jobs: [ - { - expression: - 'export default [(s) => { s.data.count += 1; s.data.a = true; return s}]', - next: { - b: true as const, - c: true as const, + workflow: { + jobs: [ + { + expression: + 'export default [(s) => { s.data.count += 1; s.data.a = true; return s}]', + next: { + b: true as const, + c: true as const, + }, }, - }, - { - id: 'b', - expression: - 'export default [(s) => { s.data.count += 1; s.data.b = true; return s}]', - }, - { - id: 'c', - expression: - 'export default [(s) => { s.data.count += 1; s.data.c = true; return s}]', - }, - ], + { + id: 'b', + expression: + 'export default [(s) => { s.data.count += 1; s.data.b = true; return s}]', + }, + { + id: 'c', + expression: + 'export default [(s) => { s.data.count += 1; s.data.c = true; return s}]', + }, + ], + }, + options: { + initialState: { data: { count: 0 } }, + }, }; - const result: any = await run(plan, { data: { count: 0 } }); + const result: any = await run(plan); t.deepEqual(result, { b: { data: { @@ -220,29 +238,33 @@ test('run a workflow with state and parallel branching', async (t) => { }); }); +// TODO this test sort of shows why input state on the plan object is a bit funky +// running the same plan with two inputs is pretty clunky test('run a workflow with state and conditional branching', async (t) => { const plan: ExecutionPlan = { - jobs: [ - { - expression: 'export default [(s) => { s.data.a = true; return s}]', - next: { - b: { - condition: 'state.data.count > 0', - }, - c: { - condition: 'state.data.count == 0', + workflow: { + jobs: [ + { + expression: 'export default [(s) => { s.data.a = true; return s}]', + next: { + b: { + condition: 'state.data.count > 0', + }, + c: { + condition: 'state.data.count == 0', + }, }, }, - }, - { - id: 'b', - expression: 'export default [(s) => { s.data.b = true; return s}]', - }, - { - id: 'c', - expression: 'export default [(s) => { s.data.c = true; return s}]', - }, - ], + { + id: 'b', + expression: 'export default [(s) => { s.data.b = true; return s}]', + }, + { + id: 'c', + expression: 'export default [(s) => { s.data.c = true; return s}]', + }, + ], + }, }; const result1: any = await run(plan, { data: { count: 10 } }); From da7c6090d497885c57b2a3ab148b5583bd19d604 Mon Sep 17 00:00:00 2001 From: Joe Clark Date: Fri, 2 Feb 2024 10:23:35 +0000 Subject: [PATCH 037/128] runtime: take initial state out of the execution plan --- packages/lexicon/core.d.ts | 5 +- packages/runtime/src/execute/compile-plan.ts | 2 - packages/runtime/src/execute/plan.ts | 19 +- packages/runtime/src/runtime.ts | 23 +- packages/runtime/src/types.ts | 1 - .../runtime/test/execute/compile-plan.test.ts | 33 +- packages/runtime/test/execute/plan.test.ts | 292 +++++++++--------- 7 files changed, 164 insertions(+), 211 deletions(-) diff --git a/packages/lexicon/core.d.ts b/packages/lexicon/core.d.ts index 7825ffa91..164d0b338 100644 --- a/packages/lexicon/core.d.ts +++ b/packages/lexicon/core.d.ts @@ -1,5 +1,6 @@ type UUID = string; +export type Lazy = T | string; /** * An execution plan is a portable definition of a Work Order, * or, a unit of work to execute @@ -37,10 +38,6 @@ export type WorkflowOptions = { start?: StepId; statePropsToRemove?: string[]; - - // TODO let's rename this to input, because that's what it is! - // also we need to remove it from here because it's not an option is it - initialState?: State; }; export type StepId = string; diff --git a/packages/runtime/src/execute/compile-plan.ts b/packages/runtime/src/execute/compile-plan.ts index 30673d696..098491f50 100644 --- a/packages/runtime/src/execute/compile-plan.ts +++ b/packages/runtime/src/execute/compile-plan.ts @@ -7,7 +7,6 @@ import type { import compileFunction from '../modules/compile-function'; import { conditionContext, Context } from './context'; import { ExecutionPlan, StepEdge, Workflow } from '@openfn/lexicon'; -import { clone, defaultState } from '../util'; const compileEdges = ( from: string, @@ -104,7 +103,6 @@ export default (plan: ExecutionPlan) => { options: { ...options, start: options.start ?? workflow.jobs[0]?.id!, - initialState: clone(options.initialState ?? defaultState), }, }; diff --git a/packages/runtime/src/execute/plan.ts b/packages/runtime/src/execute/plan.ts index 792800ae3..d4496cfed 100644 --- a/packages/runtime/src/execute/plan.ts +++ b/packages/runtime/src/execute/plan.ts @@ -1,5 +1,5 @@ import type { Logger } from '@openfn/logger'; -import type { ExecutionPlan, State } from '@openfn/lexicon'; +import type { ExecutionPlan, State, Lazy } from '@openfn/lexicon'; import executeJob from './job'; import compilePlan from './compile-plan'; @@ -12,6 +12,7 @@ import { CompiledExecutionPlan } from '../types'; const executePlan = async ( plan: ExecutionPlan, + input: Lazy, opts: Options, logger: Logger ) => { @@ -40,22 +41,19 @@ const executePlan = async ( // record of state returned by every job const stateHistory: Record = {}; + // Record of state on lead nodes (nodes with no next) const leaves: Record = {}; - let { initialState } = options; - if (typeof initialState === 'string') { - const id = initialState; + if (typeof input === 'string') { + const id = input; const startTime = Date.now(); logger.debug(`fetching intial state ${id}`); - initialState = await opts.callbacks?.resolveState?.(id); - + input = await opts.callbacks?.resolveState?.(id); const duration = Date.now() - startTime; opts.callbacks?.notify?.(NOTIFY_STATE_LOAD, { duration, jobId: id }); logger.success(`loaded state for ${id} in ${duration}ms`); - - // TODO catch and re-throw } // Right now this executes in series, even if jobs are parallelised @@ -63,7 +61,7 @@ const executePlan = async ( const next = queue.shift()!; const job = workflow.jobs[next]; - const prevState = stateHistory[job.previous || ''] ?? initialState; + const prevState = stateHistory[job.previous || ''] ?? input; const result = await executeJob(ctx, job, prevState); stateHistory[next] = result.state; @@ -81,7 +79,8 @@ const executePlan = async ( if (Object.keys(leaves).length > 1) { return leaves; } - // Return a single value + + // Otherwise return a single value return Object.values(leaves)[0]; }; diff --git a/packages/runtime/src/runtime.ts b/packages/runtime/src/runtime.ts index b1dbcb694..44117a346 100644 --- a/packages/runtime/src/runtime.ts +++ b/packages/runtime/src/runtime.ts @@ -1,9 +1,9 @@ import { createMockLogger, Logger } from '@openfn/logger'; -import type { ExecutionPlan } from '@openfn/lexicon'; +import type { ExecutionPlan, State } from '@openfn/lexicon'; import type { ExecutionCallbacks } from './types'; import type { LinkerOptions } from './modules/linker'; import executePlan from './execute/plan'; -import { parseRegex } from './util/index'; +import { defaultState, parseRegex, clone } from './util/index'; export const DEFAULT_TIMEOUT_MS = 5 * 60 * 1000; // 5 minutes @@ -58,9 +58,11 @@ const loadPlanFromString = (expression: string, logger: Logger) => { return plan; }; -// TODO change to this: -// const run = (xplan: ExecutionPlan | string, input: State, opts: RawOptions = {}) => { -const run = (xplan: ExecutionPlan | string, opts: RawOptions = {}) => { +const run = ( + xplan: ExecutionPlan | string, + input: State, + opts: RawOptions = {} +) => { const logger = opts.logger || defaultLogger; if (typeof xplan === 'string') { @@ -71,6 +73,10 @@ const run = (xplan: ExecutionPlan | string, opts: RawOptions = {}) => { xplan.options = {}; } + if (!input) { + input = clone(defaultState); + } + const { options } = xplan; // TODO remove @@ -91,12 +97,7 @@ const run = (xplan: ExecutionPlan | string, opts: RawOptions = {}) => { }); } - // TODO change where initial state comes from (ie never from options) - if (!xplan.options.initialState) { - xplan.options.initialState = (options as any).intitialState; - } - - return executePlan(xplan, opts as Options, logger); + return executePlan(xplan, input, opts as Options, logger); }; export default run; diff --git a/packages/runtime/src/types.ts b/packages/runtime/src/types.ts index 49b87623b..882b55590 100644 --- a/packages/runtime/src/types.ts +++ b/packages/runtime/src/types.ts @@ -38,7 +38,6 @@ export type CompiledExecutionPlan = { }; options: WorkflowOptions & { start: StepId; - initialState: Lazy; }; }; diff --git a/packages/runtime/test/execute/compile-plan.test.ts b/packages/runtime/test/execute/compile-plan.test.ts index d3a384b8a..23c1bcce3 100644 --- a/packages/runtime/test/execute/compile-plan.test.ts +++ b/packages/runtime/test/execute/compile-plan.test.ts @@ -33,36 +33,6 @@ const planWithEdge = (edge: Partial) => ({ }, }); -test('should preserve initial state as an object', (t) => { - const state = { x: 123 }; - const compiledPlan = compilePlan({ - id: 'a', - workflow: { - jobs: [], - }, - options: { - initialState: state, - }, - }); - t.deepEqual(state, compiledPlan.options.initialState); -}); - -test('should preserve initial state a string', (t) => { - const compiledPlan = compilePlan({ - id: 'a', - workflow: { - jobs: [], - }, - options: { - // @ts-ignore - initialState: 'abc', - }, - }); - - // @ts-ignore - t.is(compiledPlan.options.initialState, 'abc'); -}); - test('should preserve the start option', (t) => { const compiledPlan = compilePlan({ id: 'a', @@ -91,10 +61,11 @@ test('should preserve arbitrary options', (t) => { }, }); - t.deepEqual(compiledPlan.options.start, { + t.deepEqual(compiledPlan.options, { a: 1, z: 2, '-': 3, + start: 'a', }); }); diff --git a/packages/runtime/test/execute/plan.test.ts b/packages/runtime/test/execute/plan.test.ts index 52ecd80b6..c2f8fcd07 100644 --- a/packages/runtime/test/execute/plan.test.ts +++ b/packages/runtime/test/execute/plan.test.ts @@ -3,7 +3,7 @@ import path from 'node:path'; import { createMockLogger } from '@openfn/logger'; import type { ExecutionPlan, Job } from '@openfn/lexicon'; -import execute from './../../src/execute/plan'; +import executePlan from './../../src/execute/plan'; import { CompiledExecutionPlan } from '../../src'; let mockLogger = createMockLogger(undefined, { level: 'debug' }); @@ -30,7 +30,7 @@ test('throw for a circular job', async (t) => { createJob({ next: { job2: true } }), createJob({ id: 'job2', next: { job1: true } }), ]); - const e = await t.throwsAsync(() => execute(plan, {}, mockLogger)); + const e = await t.throwsAsync(() => executePlan(plan, {}, {}, mockLogger)); t.regex(e!.message, /circular dependency/i); }); @@ -41,14 +41,14 @@ test('throw for a job with multiple inputs', async (t) => { createJob({ id: 'job3' }), ]); - const e = await t.throwsAsync(() => execute(plan, {}, mockLogger)); + const e = await t.throwsAsync(() => executePlan(plan, {}, {}, mockLogger)); t.regex(e!.message, /multiple dependencies/i); }); test('throw for a plan which references an undefined job', async (t) => { const plan = createPlan([createJob({ next: { job3: true } })]); - const e = await t.throwsAsync(() => execute(plan, {}, mockLogger)); + const e = await t.throwsAsync(() => executePlan(plan, {}, {}, mockLogger)); t.regex(e!.message, /cannot find job/i); }); @@ -63,7 +63,7 @@ test('throw for an illegal edge condition', async (t) => { }), createJob({ id: 'job2' }), ]); - const e = await t.throwsAsync(() => execute(plan, {}, mockLogger)); + const e = await t.throwsAsync(() => executePlan(plan, {}, {}, mockLogger)); t.regex(e!.message, /failed to compile edge condition job1->job2/i); }); @@ -74,39 +74,38 @@ test('execute a one-job execution plan with inline state', async (t) => { state: { data: { x: 22 } }, }), ]); - const result = (await execute(plan, {}, mockLogger)) as unknown as number; + + const result: any = (await executePlan( + plan, + {}, + {}, + mockLogger + )) as unknown as number; t.is(result, 22); }); test('execute a one-job execution plan with initial state', async (t) => { - const plan = createPlan( - [ - createJob({ - expression: 'export default [s => s.data.x]', - }), - ], - { - initialState: { - data: { x: 33 }, - }, - } - ); - const result = (await execute(plan, {}, mockLogger)) as unknown as number; + const plan = createPlan([ + createJob({ + expression: 'export default [s => s.data.x]', + }), + ]); + const input = { + data: { x: 33 }, + }; + + const result: any = await executePlan(plan, input, {}, mockLogger); + t.is(result, 33); }); test('lazy load initial state', async (t) => { - const plan = createPlan( - [ - createJob({ - expression: 'export default [s => s]', - }), - ], - { - // @ts-ignore TODO tidy this up - initialState: 's1', - } - ); + const plan = createPlan([ + createJob({ + expression: 'export default [s => s]', + }), + ]); + const state = 's1'; const states = { s1: { data: { result: 42 } } }; const options = { @@ -115,7 +114,7 @@ test('lazy load initial state', async (t) => { }, }; - const result = await execute(plan, options, mockLogger); + const result: any = await executePlan(plan, state, options, mockLogger); t.deepEqual(result, states.s1); }); @@ -126,16 +125,11 @@ test('execute a one-job execution plan and notify init-start and init-complete', data: { x: 33 }, }; - const plan = createPlan( - [ - createJob({ - expression: 'export default [s => s.data.x]', - }), - ], - { - initialState: state, - } - ); + const plan = createPlan([ + createJob({ + expression: 'export default [s => s.data.x]', + }), + ]); const notify = (event: string, payload: any) => { if (notifications[event]) { @@ -146,7 +140,7 @@ test('execute a one-job execution plan and notify init-start and init-complete', const options = { callbacks: { notify } }; - await execute(plan, options, mockLogger); + await executePlan(plan, state, options, mockLogger); t.truthy(notifications['init-start']); t.truthy(notifications['init-complete']); @@ -168,7 +162,7 @@ test('execute a job with a simple truthy "precondition" or "trigger node"', asyn }), ]); - const result = await execute(plan, {}, mockLogger); + const result: any = await executePlan(plan, {}, {}, mockLogger); t.true(result.data.done); }); @@ -187,7 +181,7 @@ test('do not execute a job with a simple falsy "precondition" or "trigger node"' }), ]); - const result = await execute(plan, {}, mockLogger); + const result: any = await executePlan(plan, {}, {}, mockLogger); t.falsy(result.data.done); }); @@ -213,42 +207,34 @@ test('execute a job with a valid "precondition" or "trigger node"', async (t) => } ); - const result = await execute(plan, {}, mockLogger); + const result: any = await executePlan(plan, {}, {}, mockLogger); t.true(result.data.done); }); test('merge initial and inline state', async (t) => { - const plan = createPlan( - [ - createJob({ - expression: 'export default [s => s]', - state: { data: { y: 11 } }, - }), - ], - { - initialState: { data: { x: 33 } }, - } - ); + const plan = createPlan([ + createJob({ + expression: 'export default [s => s]', + state: { data: { y: 11 } }, + }), + ]); + const state = { data: { x: 33 } }; - const result = await execute(plan, {}, mockLogger); + const result: any = await executePlan(plan, state, {}, mockLogger); t.is(result.data.x, 33); t.is(result.data.y, 11); }); test('Initial state overrides inline data', async (t) => { - const plan = createPlan( - [ - createJob({ - expression: 'export default [s => s]', - state: { data: { y: 11 } }, - }), - ], - { - initialState: { data: { x: 34 } }, - } - ); + const plan = createPlan([ + createJob({ + expression: 'export default [s => s]', + state: { data: { y: 11 } }, + }), + ]); + const state = { data: { x: 34 } }; - const result = await execute(plan, {}, mockLogger); + const result: any = await executePlan(plan, state, {}, mockLogger); t.is(result.data.x, 34); }); @@ -269,7 +255,7 @@ test('Previous state overrides inline data', async (t) => { }), ]); - const result = await execute(plan, {}, mockLogger); + const result: any = await executePlan(plan, {}, {}, mockLogger); t.is(result.data.x, 6); }); @@ -290,7 +276,7 @@ test('only allowed state is passed through in strict mode', async (t) => { }), ]); - const result = await execute(plan, { strict: true }, mockLogger); + const result: any = await executePlan(plan, {}, { strict: true }, mockLogger); t.deepEqual(result, { data: {}, references: [], @@ -351,7 +337,7 @@ test('Jobs only receive state from upstream jobs', async (t) => { }, ]); - const result = await execute(plan, {}, mockLogger); + const result: any = await executePlan(plan, {}, {}, mockLogger); // explicit check that no assertion failed and wrote an error to state t.falsy(result.error); @@ -380,7 +366,12 @@ test('all state is passed through in non-strict mode', async (t) => { }), ]); - const result = await execute(plan, { strict: false }, mockLogger); + const result: any = await executePlan( + plan, + {}, + { strict: false }, + mockLogger + ); t.deepEqual(result, { data: {}, references: [], @@ -404,7 +395,7 @@ test('execute edge based on state in the condition', async (t) => { expression: 'export default [() => ({ data: { y: 20 } })]', }, ]); - const result = await execute(plan, {}, mockLogger); + const result: any = await executePlan(plan, {}, {}, mockLogger); t.is(result.data?.y, 20); }); @@ -423,7 +414,7 @@ test('skip edge based on state in the condition ', async (t) => { expression: 'export default [() => ({ y: 20 })]', }, ]); - const result = await execute(plan, {}, mockLogger); + const result: any = await executePlan(plan, {}, {}, mockLogger); t.is(result.data?.x, 10); }); @@ -444,47 +435,42 @@ test('do not traverse a disabled edge', async (t) => { expression: 'export default [() => ({ data: { x: 20 } })]', }, ]); - const result = await execute(plan, {}, mockLogger); + const result: any = await executePlan(plan, {}, {}, mockLogger); t.is(result.data?.x, 10); }); test('execute a two-job execution plan', async (t) => { - const plan = createPlan( - [ - { - id: 'job1', - expression: 'export default [s => { s.data.x += 1; return s; } ]', - next: { job2: true }, - }, - { - id: 'job2', - expression: 'export default [s => { s.data.x += 1; return s; } ]', - }, - ], - { initialState: { data: { x: 0 } } } - ); + const plan = createPlan([ + { + id: 'job1', + expression: 'export default [s => { s.data.x += 1; return s; } ]', + next: { job2: true }, + }, + { + id: 'job2', + expression: 'export default [s => { s.data.x += 1; return s; } ]', + }, + ]); + const state = { data: { x: 0 } }; - const result = await execute(plan, {}, mockLogger); + const result: any = await executePlan(plan, state, {}, mockLogger); t.is(result.data.x, 2); }); test('only execute one job in a two-job execution plan', async (t) => { - const plan = createPlan( - [ - { - id: 'job1', - expression: 'export default [s => { s.data.x += 1; return s; } ]', - next: { job2: false }, - }, - { - id: 'job2', - expression: 'export default [s => { s.data.x += 1; return s; } ]', - }, - ], - { initialState: { data: { x: 0 } } } - ); - - const result = await execute(plan, {}, mockLogger); + const plan = createPlan([ + { + id: 'job1', + expression: 'export default [s => { s.data.x += 1; return s; } ]', + next: { job2: false }, + }, + { + id: 'job2', + expression: 'export default [s => { s.data.x += 1; return s; } ]', + }, + ]); + const state = { data: { x: 0 } }; + const result: any = await executePlan(plan, state, {}, mockLogger); t.is(result.data.x, 1); }); @@ -504,7 +490,7 @@ test('execute a two-job execution plan with custom start', async (t) => { { start: 'job2' } ); - const result = await execute(plan, {}, mockLogger); + const result: any = await executePlan(plan, {}, {}, mockLogger); t.is(result.data.result, 11); }); @@ -520,10 +506,11 @@ test('Return when there are no more edges', async (t) => { expression: 'export default [s => { s.data.x += 1; return s; } ]', }, ], - { start: 'job1', initialState: { data: { x: 0 } } } + { start: 'job1' } ); + const state = { data: { x: 0 } }; - const result = await execute(plan, {}, mockLogger); + const result: any = await executePlan(plan, state, {}, mockLogger); t.is(result.data?.x, 1); }); @@ -538,11 +525,11 @@ test('execute a 5 job execution plan', async (t) => { } const plan = createPlan(jobs, { - initialState: { data: { x: 0 } }, start: '1', }); + const state = { data: { x: 0 } }; - const result = await execute(plan, {}, mockLogger); + const result: any = await executePlan(plan, state, {}, mockLogger); t.is(result.data.x, 5); }); @@ -571,10 +558,11 @@ test('execute multiple steps in "parallel"', async (t) => { expression: 'export default [s => { s.data.x += 1; return s; } ]', }, ], - { start: 'start', initialState: { data: { x: 0 } } } + { start: 'start' } ); + const state = { data: { x: 0 } }; - const result = await execute(plan, {}, mockLogger); + const result: any = await executePlan(plan, state, {}, mockLogger); t.deepEqual(result, { a: { data: { x: 1 } }, b: { data: { x: 1 } }, @@ -604,10 +592,11 @@ test('isolate state in "parallel" execution', async (t) => { 'export default [s => { if (s.data.b) { throw "e" }; s.data.c = true; return s }]', }, ], - { start: 'start', initialState: { data: { x: 0 } } } + { start: 'start' } ); + const state = { data: { x: 0 } }; - const result = await execute(plan, {}, mockLogger); + const result: any = await executePlan(plan, state, {}, mockLogger); t.falsy(result.errors); }); @@ -646,10 +635,11 @@ test('isolate state in "parallel" execution with deeper trees', async (t) => { 'export default [s => { if (s.data.c) { throw "e" }; s.data.b = true; return s }]', }, ], - { start: 'start', initialState: { data: { x: 0 } } } + { start: 'start' } ); + const state = { data: { x: 0 } }; - const result = await execute(plan, {}, mockLogger); + const result: any = await executePlan(plan, state, {}, mockLogger); t.falsy(result.errors); }); @@ -673,7 +663,7 @@ test('"parallel" execution with multiple leaves should write multiple results to }, ]); - const result = await execute(plan, {}, mockLogger); + const result: any = await executePlan(plan, {}, {}, mockLogger); // Each leaf should write to its own place on state t.deepEqual(result, { 'job-b': { @@ -698,7 +688,7 @@ test('return an error in state', async (t) => { }, ]); - const result = await execute(plan, {}, mockLogger); + const result: any = await executePlan(plan, {}, {}, mockLogger); t.truthy(result.errors); t.is(result.errors.a.message, 'e'); }); @@ -713,7 +703,7 @@ test('handle non-standard error objects', async (t) => { }, ]); - const result = await execute(plan, {}, mockLogger); + const result: any = await executePlan(plan, {}, {}, mockLogger); t.truthy(result.errors); const err = result.errors.a; t.is(err.type, 'JobError'); @@ -736,7 +726,7 @@ test('keep executing after an error', async (t) => { }, ]); - const result = await execute(plan, {}, mockLogger); + const result: any = await executePlan(plan, {}, {}, mockLogger); t.is(result.y, 20); t.falsy(result.x); }); @@ -762,7 +752,7 @@ test('simple on-error handler', async (t) => { }, ]); - const result = await execute(plan, {}, mockLogger); + const result: any = await executePlan(plan, {}, {}, mockLogger); t.is(result.y, 20); t.falsy(result.x); }); @@ -778,7 +768,7 @@ test('log appopriately on error', async (t) => { const logger = createMockLogger(undefined, { level: 'debug' }); - await execute(plan, {}, logger); + await executePlan(plan, {}, {}, logger); const err = logger._find('error', /failed job/i); t.truthy(err); t.regex(err!.message as string, /Failed job job1 after \d+ms/i); @@ -788,25 +778,22 @@ test('log appopriately on error', async (t) => { }); test('jobs do not share a local scope', async (t) => { - const plan = createPlan( - [ - { - id: 'job1', - // declare x in this expression's scope - expression: 'const x = 10; export default [s => s];', - next: { - b: true, - }, - }, - { - id: 'b', - // x should not defined here and this will throw - expression: 'export default [s => { s.data.x = x; return s; }]', + const plan = createPlan([ + { + id: 'job1', + // declare x in this expression's scope + expression: 'const x = 10; export default [s => s];', + next: { + b: true, }, - ], - { initialState: { data: {} } } - ); - await t.throwsAsync(() => execute(plan, {}, mockLogger), { + }, + { + id: 'b', + // x should not defined here and this will throw + expression: 'export default [s => { s.data.x = x; return s; }]', + }, + ]); + await t.throwsAsync(() => executePlan(plan, {}, {}, mockLogger), { message: 'ReferenceError: x is not defined', name: 'RuntimeCrash', }); @@ -826,9 +813,8 @@ test('jobs do not share a global scope', async (t) => { expression: 'export default [s => { s.data.x = x; return s; }]', }, ]); - console.log(JSON.stringify(plan, null, 2)); - await t.throwsAsync(() => execute(plan, {}, mockLogger), { + await t.throwsAsync(() => executePlan(plan, {}, {}, mockLogger), { message: 'ReferenceError: x is not defined', name: 'RuntimeCrash', }); @@ -849,7 +835,8 @@ test('jobs do not share a globalThis object', async (t) => { 'export default [(s) => { s.data.x = globalThis.x; return s; }]', }, ]); - const result = await execute(plan, {}, mockLogger); + + const result: any = await executePlan(plan, {}, {}, mockLogger); t.deepEqual(result, { data: {} }); }); @@ -869,7 +856,8 @@ test.skip('jobs cannot scribble on globals', async (t) => { expression: 'export default [s => { s.data.x = console.x; return s; }]', }, ]); - const result = await execute(plan, {}, mockLogger); + + const result: any = await executePlan(plan, {}, {}, mockLogger); t.falsy(result.data.x); }); @@ -901,7 +889,7 @@ test.skip('jobs cannot scribble on adaptor functions', async (t) => { }, }; - const result = await execute(plan, options, mockLogger); + const result: any = await executePlan(plan, {}, options, mockLogger); t.falsy(result.data.x); }); @@ -927,7 +915,7 @@ test('jobs can write circular references to state without blowing up downstream' }, ]); - const result = await execute(plan, {}, mockLogger); + const result: any = await executePlan(plan, {}, {}, mockLogger); t.notThrows(() => JSON.stringify(result)); t.deepEqual(result, { @@ -963,7 +951,7 @@ test('jobs cannot pass circular references to each other', async (t) => { }, ]); - const result = await execute(plan, {}, mockLogger); + const result: any = await executePlan(plan, {}, {}, mockLogger); t.notThrows(() => JSON.stringify(result)); t.is(result.data.answer, '[Circular]'); @@ -987,7 +975,7 @@ test('jobs can write functions to state without blowing up downstream', async (t }, ]); - const result = await execute(plan, {}, mockLogger); + const result: any = await executePlan(plan, {}, {}, mockLogger); t.notThrows(() => JSON.stringify(result)); t.deepEqual(result, { data: {} }); @@ -1013,7 +1001,7 @@ test('jobs cannot pass functions to each other', async (t) => { }, ]); - const result = await execute(plan, {}, mockLogger); + const result: any = await executePlan(plan, {}, {}, mockLogger); const error = result.errors.b; t.is(error.type, 'TypeError'); @@ -1028,7 +1016,7 @@ test('Plans log for each job start and end', async (t) => { }, ]); const logger = createMockLogger(undefined, { level: 'debug' }); - await execute(plan, {}, logger); + await executePlan(plan, {}, {}, logger); const start = logger._find('always', /starting job/i); t.is(start!.message, 'Starting job a'); From 5f3e75aace53220b0933c044b43df7e07ae10646 Mon Sep 17 00:00:00 2001 From: Joe Clark Date: Fri, 2 Feb 2024 10:54:47 +0000 Subject: [PATCH 038/128] fix tests --- packages/runtime/test/context.test.ts | 28 +-- packages/runtime/test/errors.test.ts | 48 +++-- packages/runtime/test/runtime.test.ts | 253 ++++++++++++++----------- packages/runtime/test/security.test.ts | 35 +--- 4 files changed, 183 insertions(+), 181 deletions(-) diff --git a/packages/runtime/test/context.test.ts b/packages/runtime/test/context.test.ts index 0c33c81d1..4583837cb 100644 --- a/packages/runtime/test/context.test.ts +++ b/packages/runtime/test/context.test.ts @@ -6,28 +6,14 @@ import { State } from '@openfn/lexicon'; const createState = (data = {}) => ({ data, configuration: {} }); -const createPlan = (expression: string, initialState: State) => ({ - workflow: { - jobs: [ - { - expression, - }, - ], - }, - options: { - initialState, - }, -}); - test('makes parseInt available inside the job', async (t) => { const expression = ` export default [ (s) => { s.data.count = parseInt(s.data.count); return s; } ];`; - const intialState = createState({ count: '22' }); - const plan = createPlan(expression, intialState); + const input = createState({ count: '22' }); - const result = await run(plan); + const result = await run(expression, input); t.deepEqual(result.data, { count: 22 }); }); @@ -41,9 +27,8 @@ test('makes Set available inside the job', async (t) => { ];`; const state = createState({ count: '33' }); - const plan = createPlan(expression, state); - const result = await run(plan); + const result = await run(expression, state); t.deepEqual(result.data, { count: '33' }); }); @@ -57,9 +42,7 @@ test("doesn't allow process inside the job", async (t) => { } ];`; - const plan = createPlan(expression, createState()); - - await t.throwsAsync(() => run(plan, { logger }), { + await t.throwsAsync(() => run(expression, {}, { logger }), { name: 'RuntimeCrash', message: 'ReferenceError: process is not defined', }); @@ -72,8 +55,7 @@ test("doesn't allow eval inside a job", async (t) => { (state) => eval('ok') // should throw ];`; - const plan = createPlan(expression, createState()); - await t.throwsAsync(() => run(plan, { logger }), { + await t.throwsAsync(() => run(expression, {}, { logger }), { name: 'SecurityError', message: /Illegal eval statement detected/, }); diff --git a/packages/runtime/test/errors.test.ts b/packages/runtime/test/errors.test.ts index c1e360ab5..6a7b7af45 100644 --- a/packages/runtime/test/errors.test.ts +++ b/packages/runtime/test/errors.test.ts @@ -19,7 +19,6 @@ test('crash on timeout', async (t) => { const expression = 'export default [(s) => new Promise((resolve) => {})]'; const plan = createPlan(expression, { timeout: 1 }); - console.log(plan); let error; try { await run(plan); @@ -100,7 +99,6 @@ test('crash on edge condition error with EdgeConditionError', async (t) => { { id: 'b', expression: '.' }, ], }, - options: {}, }; let error; @@ -142,11 +140,15 @@ test('crash on blacklisted module', async (t) => { let error; try { - await run(expression, { - linker: { - whitelist: [/^@opennfn/], - }, - }); + await run( + expression, + {}, + { + linker: { + whitelist: [/^@opennfn/], + }, + } + ); } catch (e) { error = e; } @@ -214,13 +216,17 @@ test('fail on adaptor error (with throw new Error())', async (t) => { import { err } from 'x'; export default [(s) => err()]; `; - const result = await run(expression, { - linker: { - modules: { - x: { path: path.resolve('test/__modules__/test') }, + const result = await run( + expression, + {}, + { + linker: { + modules: { + x: { path: path.resolve('test/__modules__/test') }, + }, }, - }, - }); + } + ); const error = result.errors['job-1']; t.is(error.type, 'AdaptorError'); @@ -235,13 +241,17 @@ test('adaptor error with no stack trace will be a user error', async (t) => { import { err2 } from 'x'; export default [(s) => err2()]; `; - const result = await run(expression, { - linker: { - modules: { - x: { path: path.resolve('test/__modules__/test') }, + const result = await run( + expression, + {}, + { + linker: { + modules: { + x: { path: path.resolve('test/__modules__/test') }, + }, }, - }, - }); + } + ); const error = result.errors['job-1']; t.is(error.type, 'JobError'); diff --git a/packages/runtime/test/runtime.test.ts b/packages/runtime/test/runtime.test.ts index 9782915a0..5b5c64140 100644 --- a/packages/runtime/test/runtime.test.ts +++ b/packages/runtime/test/runtime.test.ts @@ -51,7 +51,7 @@ test('run a workflow and notify major events', async (t) => { }, }; - await run(plan, { callbacks }); + await run(plan, {}, { callbacks }); t.is(counts[NOTIFY_INIT_START], 1); t.is(counts[NOTIFY_INIT_COMPLETE], 1); @@ -82,7 +82,7 @@ test('notify job error even after fail', async (t) => { }, }; - await run(plan, { callbacks }); + await run(plan, {}, { callbacks }); }); test('notify job error even after crash', async (t) => { @@ -105,7 +105,7 @@ test('notify job error even after crash', async (t) => { }; try { - await run(plan, { callbacks }); + await run(plan, {}, { callbacks }); } catch (e) { // this will throw, it's fine // don't assert on it, I only wnat to assert in on-error @@ -122,17 +122,19 @@ test('resolve a credential', async (t) => { }, ], }, + options: { + statePropsToRemove: [], + }, }; const options = { strict: false, - statePropsToRemove: [], callbacks: { resolveCredential: async () => ({ password: 'password1' }), }, }; - const result: any = await run(plan, options); + const result: any = await run(plan, {}, options); t.truthy(result); t.deepEqual(result.configuration, { password: 'password1' }); }); @@ -155,7 +157,7 @@ test('resolve initial state', async (t) => { }, }; - const result: any = await run(plan, options); + const result: any = await run(plan, {}, options); t.truthy(result); t.deepEqual(result.data, { foo: 'bar' }); }); @@ -182,7 +184,7 @@ test('run a workflow with two jobs and call callbacks', async (t) => { }, }; - await run(plan, { callbacks }); + await run(plan, {}, { callbacks }); t.is(counts['init-start'], 2); t.is(counts['init-complete'], 2); @@ -214,12 +216,11 @@ test('run a workflow with state and parallel branching', async (t) => { }, ], }, - options: { - initialState: { data: { count: 0 } }, - }, }; - const result: any = await run(plan); + const state = { data: { count: 0 } }; + + const result: any = await run(plan, state); t.deepEqual(result, { b: { data: { @@ -282,40 +283,48 @@ test('run a workflow with state and conditional branching', async (t) => { test('run a workflow with initial state (data key) and optional start', async (t) => { const plan: ExecutionPlan = { - jobs: [ - { - // won't run - id: 'a', - expression: 'export default [(s) => { s.data.count +=1 ; return s}]', - next: { b: true }, - }, - { - id: 'b', - expression: 'export default [(s) => { s.data.count +=1 ; return s}]', - next: { c: true }, - }, - { - id: 'c', - expression: 'export default [(s) => { s.data.count +=1 ; return s}]', - }, - ], + workflow: { + jobs: [ + { + // won't run + id: 'a', + expression: 'export default [(s) => { s.data.count +=1 ; return s}]', + next: { b: true }, + }, + { + id: 'b', + expression: 'export default [(s) => { s.data.count +=1 ; return s}]', + next: { c: true }, + }, + { + id: 'c', + expression: 'export default [(s) => { s.data.count +=1 ; return s}]', + }, + ], + }, + options: { + start: 'b', + }, }; - const result: any = await run(plan, { data: { count: 10 } }, { start: 'b' }); + const result: any = await run(plan, { data: { count: 10 } }); t.is(result.data.count, 12); }); test('run a workflow with a trigger node', async (t) => { const plan: ExecutionPlan = { - jobs: [ - { - next: { b: { condition: 'state.data.age > 18 ' } }, - }, - { - id: 'b', - expression: 'export default [(s) => { s.data.done = true ; return s}]', - }, - ], + workflow: { + jobs: [ + { + next: { b: { condition: 'state.data.age > 18 ' } }, + }, + { + id: 'b', + expression: + 'export default [(s) => { s.data.done = true ; return s}]', + }, + ], + }, }; const result: any = await run(plan, { data: { age: 28 } }); @@ -324,17 +333,19 @@ test('run a workflow with a trigger node', async (t) => { test('prefer initial state to inline state', async (t) => { const plan: ExecutionPlan = { - jobs: [ - { - state: { - data: { - x: 20, // this will be overriden by the incoming state - y: 20, // This will be untouched + workflow: { + jobs: [ + { + state: { + data: { + x: 20, // this will be overriden by the incoming state + y: 20, // This will be untouched + }, }, + expression: 'export default [(s) => s]', }, - expression: 'export default [(s) => s]', - }, - ], + ], + }, }; const result: any = await run(plan, { data: { x: 40 } }); @@ -344,11 +355,13 @@ test('prefer initial state to inline state', async (t) => { test('do not pass extraneous state in strict mode', async (t) => { const plan: ExecutionPlan = { - jobs: [ - { - expression: 'export default [() => ({ x: 1, data: {}} )]', - }, - ], + workflow: { + jobs: [ + { + expression: 'export default [() => ({ x: 1, data: {}} )]', + }, + ], + }, }; const result: any = await run(plan, {}, { strict: true }); @@ -359,11 +372,13 @@ test('do not pass extraneous state in strict mode', async (t) => { test('do pass extraneous state in non-strict mode', async (t) => { const plan: ExecutionPlan = { - jobs: [ - { - expression: 'export default [() => ({ x: 1, data: {}} )]', - }, - ], + workflow: { + jobs: [ + { + expression: 'export default [() => ({ x: 1, data: {}} )]', + }, + ], + }, }; const result: any = await run(plan, {}, { strict: false }); @@ -375,7 +390,9 @@ test('do pass extraneous state in non-strict mode', async (t) => { test('Allow a job to return undefined', async (t) => { const plan: ExecutionPlan = { - jobs: [{ expression: 'export default [() => {}]' }], + workflow: { + jobs: [{ expression: 'export default [() => {}]' }], + }, }; const result: any = await run(plan); @@ -384,17 +401,19 @@ test('Allow a job to return undefined', async (t) => { test('log errors, write to state, and continue', async (t) => { const plan: ExecutionPlan = { - jobs: [ - { - id: 'a', - expression: 'export default [() => { throw new Error("test") }]', - next: { b: true }, - }, - { - id: 'b', - expression: 'export default [(s) => { s.x = 1; return s; }]', - }, - ], + workflow: { + jobs: [ + { + id: 'a', + expression: 'export default [() => { throw new Error("test") }]', + next: { b: true }, + }, + { + id: 'b', + expression: 'export default [(s) => { s.x = 1; return s; }]', + }, + ], + }, }; const logger = createMockLogger(); @@ -410,12 +429,14 @@ test('log errors, write to state, and continue', async (t) => { test('log job code to the job logger', async (t) => { const plan: ExecutionPlan = { - jobs: [ - { - id: 'a', - expression: 'export default [(s) => { console.log("hi"); return s;}]', - }, - ], + workflow: { + jobs: [ + { + id: 'a', + expression: 'export default [(s) => { console.log("hi"); return s;}]', + }, + ], + }, }; const jobLogger = createMockLogger('JOB', { level: 'debug', json: true }); @@ -430,13 +451,15 @@ test('log job code to the job logger', async (t) => { test('log and serialize an error to the job logger', async (t) => { const plan: ExecutionPlan = { - jobs: [ - { - id: 'a', - expression: - 'export default [(s) => { console.log(new Error("hi")); return s;}]', - }, - ], + workflow: { + jobs: [ + { + id: 'a', + expression: + 'export default [(s) => { console.log(new Error("hi")); return s;}]', + }, + ], + }, }; const jobLogger = createMockLogger('JOB', { level: 'debug', json: true }); @@ -455,17 +478,19 @@ test('log and serialize an error to the job logger', async (t) => { test('error reports can be overwritten', async (t) => { const plan: ExecutionPlan = { - jobs: [ - { - id: 'a', - expression: 'export default [() => { throw new Error("test") }]', - next: { b: true }, - }, - { - id: 'b', - expression: 'export default [(s) => ({ errors: 22 })]', - }, - ], + workflow: { + jobs: [ + { + id: 'a', + expression: 'export default [() => { throw new Error("test") }]', + next: { b: true }, + }, + { + id: 'b', + expression: 'export default [(s) => ({ errors: 22 })]', + }, + ], + }, }; const logger = createMockLogger(); @@ -477,14 +502,16 @@ test('error reports can be overwritten', async (t) => { // This tracks current behaviour but I don't know if it's right test('stuff written to state before an error is preserved', async (t) => { const plan: ExecutionPlan = { - jobs: [ - { - id: 'a', - data: { x: 0 }, - expression: - 'export default [(s) => { s.x = 1; throw new Error("test") }]', - }, - ], + workflow: { + jobs: [ + { + id: 'a', + data: { x: 0 }, + expression: + 'export default [(s) => { s.x = 1; throw new Error("test") }]', + }, + ], + }, }; const logger = createMockLogger(); @@ -502,17 +529,19 @@ test('data can be an array (expression)', async (t) => { test('data can be an array (workflow)', async (t) => { const plan: ExecutionPlan = { - jobs: [ - { - id: 'a', - expression: 'export default [() => ({ data: [1,2,3] })]', - next: 'b', - }, - { - id: 'b', - expression: 'export default [(s) => s]', - }, - ], + workflow: { + jobs: [ + { + id: 'a', + expression: 'export default [() => ({ data: [1,2,3] })]', + next: 'b', + }, + { + id: 'b', + expression: 'export default [(s) => s]', + }, + ], + }, }; const result: any = await run(plan, {}, { strict: false }); diff --git a/packages/runtime/test/security.test.ts b/packages/runtime/test/security.test.ts index eab8bf3a0..1b7da2cd2 100644 --- a/packages/runtime/test/security.test.ts +++ b/packages/runtime/test/security.test.ts @@ -5,22 +5,9 @@ import type { ExecutionPlan, State } from '@openfn/lexicon'; import doRun from '../src/runtime'; -const createPlan = (expression: string, state: State = {}) => ({ - workflow: { - jobs: [ - { - expression, - }, - ], - }, - options: { - initialState: state, - }, -}); - // Disable strict mode for all these tests -const run = (plan: ExecutionPlan, options: any = {}) => - doRun(plan, { strict: false, ...options }); +const run = (plan: ExecutionPlan | string, state: State, options: any = {}) => + doRun(plan, state, { strict: false, ...options }); const logger = createMockLogger(undefined, { level: 'default' }); @@ -39,9 +26,8 @@ test.serial( password: 'secret', }, }; - const plan = createPlan(src, state); - const result: any = await run(plan); + const result: any = await run(src, state); t.deepEqual(result.data, {}); t.is(result.configuration, undefined); } @@ -59,8 +45,7 @@ test.serial( }, }; - const plan = createPlan(src, state); - const result: any = await run(plan, { strict: true }); + const result: any = await run(src, state, { strict: true }); t.deepEqual(result.data, {}); t.is(result.configuration, undefined); } @@ -78,8 +63,7 @@ test.serial( }, }; - const plan = createPlan(src, state); - const result: any = await run(plan, { strict: false }); + const result: any = await run(src, state, { strict: false }); t.deepEqual(result.data, {}); t.is(result.configuration, undefined); } @@ -96,9 +80,8 @@ test.serial( password: 'secret', }, }; - const plan = createPlan(src, state); - const result: any = await run(plan, { strict: false }); + const result: any = await run(src, state, { strict: false }); t.truthy(result.errors); t.deepEqual(result.data, {}); t.is(result.configuration, undefined); @@ -120,16 +103,14 @@ test.serial('jobs should not have access to global scope', async (t) => { test.serial('jobs should be able to read global state', async (t) => { const src = 'export default [() => state.data.x]'; - const plan = createPlan(src, { data: { x: 42 } }); - const result: any = await run(plan); + const result: any = await run(src, { data: { x: 42 } }); t.is(result, 42); }); test.serial('jobs should be able to mutate global state', async (t) => { const src = 'export default [() => { state.x = 22; return state.x; }]'; - const plan = createPlan(src, { data: { x: 42 } }); - const result: any = await run(plan); + const result: any = await run(src, { data: { x: 42 } }); t.is(result, 22); }); From 5eae3ec29ffb2c27b774fcf631dc137ee41ff82a Mon Sep 17 00:00:00 2001 From: Joe Clark Date: Fri, 2 Feb 2024 10:55:46 +0000 Subject: [PATCH 039/128] runtime: changeset --- .changeset/pretty-spoons-beam.md | 6 ++++++ 1 file changed, 6 insertions(+) create mode 100644 .changeset/pretty-spoons-beam.md diff --git a/.changeset/pretty-spoons-beam.md b/.changeset/pretty-spoons-beam.md new file mode 100644 index 000000000..61d01d3f3 --- /dev/null +++ b/.changeset/pretty-spoons-beam.md @@ -0,0 +1,6 @@ +--- +'@openfn/runtime': major +--- + +Update main run() signature +Integrate with lexicon From fbdd43f26be60f2a734c80a593840fb6946b31a0 Mon Sep 17 00:00:00 2001 From: Joe Clark Date: Fri, 2 Feb 2024 10:59:22 +0000 Subject: [PATCH 040/128] runtime: extra type tweakings --- packages/lexicon/core.d.ts | 2 +- packages/runtime/src/modules/module-loader.ts | 2 +- packages/runtime/src/runtime.ts | 4 ++-- packages/runtime/src/types.ts | 8 +------- packages/runtime/src/util/clone.ts | 2 +- packages/runtime/src/util/execute.ts | 2 +- 6 files changed, 7 insertions(+), 13 deletions(-) diff --git a/packages/lexicon/core.d.ts b/packages/lexicon/core.d.ts index 164d0b338..612fd1a3c 100644 --- a/packages/lexicon/core.d.ts +++ b/packages/lexicon/core.d.ts @@ -10,7 +10,7 @@ export type Lazy = T | string; export type ExecutionPlan = { id?: UUID; // this would bet the run (nee attempt) id workflow: Workflow; - options?: WorkflowOptions; + options: WorkflowOptions; }; /** diff --git a/packages/runtime/src/modules/module-loader.ts b/packages/runtime/src/modules/module-loader.ts index 5e87653bb..fa239b319 100644 --- a/packages/runtime/src/modules/module-loader.ts +++ b/packages/runtime/src/modules/module-loader.ts @@ -4,7 +4,7 @@ import vm, { Context } from './experimental-vm'; import mainLinker, { Linker, LinkerOptions } from './linker'; -import type { Operation } from '../types'; +import type { Operation } from '@openfn/lexicon'; import type { Logger } from '@openfn/logger'; type Options = LinkerOptions & { diff --git a/packages/runtime/src/runtime.ts b/packages/runtime/src/runtime.ts index 44117a346..1523043d8 100644 --- a/packages/runtime/src/runtime.ts +++ b/packages/runtime/src/runtime.ts @@ -59,7 +59,7 @@ const loadPlanFromString = (expression: string, logger: Logger) => { }; const run = ( - xplan: ExecutionPlan | string, + xplan: Partial | string, input: State, opts: RawOptions = {} ) => { @@ -97,7 +97,7 @@ const run = ( }); } - return executePlan(xplan, input, opts as Options, logger); + return executePlan(xplan as ExecutionPlan, input, opts as Options, logger); }; export default run; diff --git a/packages/runtime/src/types.ts b/packages/runtime/src/types.ts index 882b55590..2fbb88d34 100644 --- a/packages/runtime/src/types.ts +++ b/packages/runtime/src/types.ts @@ -1,10 +1,4 @@ -import { - State, - Operation, - Job, - StepId, - WorkflowOptions, -} from '@openfn/lexicon'; +import { Operation, Job, StepId, WorkflowOptions } from '@openfn/lexicon'; import { Logger } from '@openfn/logger'; import { Options } from './runtime'; diff --git a/packages/runtime/src/util/clone.ts b/packages/runtime/src/util/clone.ts index d81320f4a..408f108a6 100644 --- a/packages/runtime/src/util/clone.ts +++ b/packages/runtime/src/util/clone.ts @@ -1,4 +1,4 @@ -import type { State } from '../types'; +import type { State } from '@openfn/lexicon'; // TODO I'm in the market for the best solution here - immer? deep-clone? // What should we do if functions are in the state? diff --git a/packages/runtime/src/util/execute.ts b/packages/runtime/src/util/execute.ts index bd2d6aaa5..7c5f03439 100644 --- a/packages/runtime/src/util/execute.ts +++ b/packages/runtime/src/util/execute.ts @@ -1,4 +1,4 @@ -import type { Operation, State } from '../types'; +import type { Operation, State } from '@openfn/lexicon'; // Standard execute factory export default (...operations: Operation[]): Operation => { From e9dacc4241be3c0f7234e5d046a2a9006967d7c1 Mon Sep 17 00:00:00 2001 From: Joe Clark Date: Fri, 2 Feb 2024 11:01:03 +0000 Subject: [PATCH 041/128] runtime: readme --- packages/runtime/README.md | 44 ++++++++++++++++++++++---------------- 1 file changed, 26 insertions(+), 18 deletions(-) diff --git a/packages/runtime/README.md b/packages/runtime/README.md index 52723b6cd..7ae05271b 100644 --- a/packages/runtime/README.md +++ b/packages/runtime/README.md @@ -54,29 +54,35 @@ It is expected that that long-running runtimes will have some kind of purge func ## Execution Plans -The runtime can accept an Execution Plan (or workflow) as an input. +The runtime can accept an Execution Plan (or workflow) as an input. This defines a graph of of jobs (expressions) to run in sequence. Each node in the graph is a job, and contains a set of edges which tell the runtime what to execute next. The runtime will return the final state when there is nothing left to execute. -A workflow looks like this: -``` +An execution plan looks like this: + +```js { - start: 'a', - jobs: [{ - id: 'a', - expression: "source or path", - state: { /* default state */ }, - configuration: { /* credentials */ }, - next: { - 'b': true, // edge to another job - 'c': { condition: "state.data.age > 18", // conditional edge to another job - } - adaptor: "common", // it's complicated - }] + workflow: { + jobs: [{ + id: 'a', + expression: "source or path", + state: { /* default state */ }, + configuration: { /* credentials */ }, + next: { + 'b': true, // edge to another job + 'c': { condition: "state.data.age > 18", // conditional edge to another job + } + adaptor: "common", // it's complicated + }] + }, + options: { + start: 'a', + } } ``` + State and start node can be passed to the runtime as inputs. If no start node is provided, the first job in the jobs array will run first. @@ -88,9 +94,10 @@ The runtime itself does not use the `adaptor` key, as it expects jobs to be comp See src/types.ts for a full definition of an execution plan, and `test/runtime.test.ts` for examples. At the time of writing, exectuion plans have some restrictions: -* Jobs execute in series (but parallisation can be simulated) -* A job can only have one input node (`a -> z <- b` is not allowed) -* Jobs cannot have circular references (`a -> b -> a` is not allowed) + +- Jobs execute in series (but parallisation can be simulated) +- A job can only have one input node (`a -> z <- b` is not allowed) +- Jobs cannot have circular references (`a -> b -> a` is not allowed) Support for more complex plans will be introduced later. @@ -149,6 +156,7 @@ When a job calls `import` to import a dependent module, the runtime must resolve It does this through a `linker` function, which takes as arguments a package specifier and `vm` context, and an options object. It will load the module using a dynamic `import` and proxy the interface through a `vm.SyntheticModules`, usng the experimental `vm.SourceTextModule` API. Modules can be loaded from: + - An explicit path (pass as a dictionary of name: path strings into the options) - The current working repo (see below) - The current working node_modules (should we somehow disallow this?) From 372ae3d5e73883195ecda557fae9b15350e86dcf Mon Sep 17 00:00:00 2001 From: Joe Clark Date: Fri, 2 Feb 2024 12:25:57 +0000 Subject: [PATCH 042/128] runtime: jobs -> steps (mostly) there are cases where job is more accurate and useful --- packages/lexicon/core.d.ts | 95 ++++++-------- packages/runtime/src/execute/compile-plan.ts | 57 ++++---- packages/runtime/src/execute/plan.ts | 6 +- .../runtime/src/execute/{job.ts => step.ts} | 96 +++++++------- packages/runtime/src/runtime.ts | 4 +- packages/runtime/src/types.ts | 12 +- packages/runtime/src/util/validate-plan.ts | 10 +- packages/runtime/test/errors.test.ts | 4 +- .../runtime/test/execute/compile-plan.test.ts | 123 +++++++++--------- packages/runtime/test/execute/plan.test.ts | 61 +++++---- .../execute/{job.test.ts => step.test.ts} | 92 ++++++------- packages/runtime/test/memory.test.ts | 19 ++- packages/runtime/test/runtime.test.ts | 86 ++++++------ packages/runtime/test/security.test.ts | 2 +- .../runtime/test/util/validate-plan.test.ts | 16 +-- 15 files changed, 349 insertions(+), 334 deletions(-) rename packages/runtime/src/execute/{job.ts => step.ts} (70%) rename packages/runtime/test/execute/{job.test.ts => step.test.ts} (74%) diff --git a/packages/lexicon/core.d.ts b/packages/lexicon/core.d.ts index 612fd1a3c..27469b326 100644 --- a/packages/lexicon/core.d.ts +++ b/packages/lexicon/core.d.ts @@ -1,14 +1,9 @@ -type UUID = string; - -export type Lazy = T | string; /** * An execution plan is a portable definition of a Work Order, * or, a unit of work to execute - * Should it include initial state? I don't really think so - * // BUT the plan sent from Lightning DOES include the initial state */ export type ExecutionPlan = { - id?: UUID; // this would bet the run (nee attempt) id + id?: UUID; // this would be the run (nee attempt) id workflow: Workflow; options: WorkflowOptions; }; @@ -19,24 +14,48 @@ export type ExecutionPlan = { export type Workflow = { id?: UUID; // unique id used to track this workflow. Could be autogenerated - // TODO: make required - name?: string; // user-friendly name. CLI can derive this from file names + // TODO: make required (worker and cli may have to generate a name) + name?: string; - // TODO - rename jobs to steps - // TODO - support Trigger types in steps - jobs: Array; - //steps: Array; + steps: Array; }; +/** + * State is an object passed into a workflow and returned from a workflow + */ +export declare interface State { + // Core state props used by the runtime + configuration?: C; + data?: S; + errors?: Record; + + // Props added by common + references?: Array; + + // Props commonly used by other adaptors + index?: number; + response?: any; + query?: any; + + [other: string]: any; +} + +/** + * An operation function that runs in an Expression + */ +export declare interface Operation | State> { + (state: State): T; +} + /** * Options which can be set on a workflow as part of an execution plan */ export type WorkflowOptions = { - // Both numbers in minutes maybe + // TODO Both numbers in minutes maybe timeout?: number; stepTimeout?: number; - start?: StepId; + start?: StepId; statePropsToRemove?: string[]; }; @@ -47,7 +66,7 @@ export type StepId = string; * (usually a job) */ export interface Step { - id?: StepId; // is this really required? It could be generated + id?: StepId; name?: string; // user-friendly name used in logging next?: string | Record; @@ -56,6 +75,7 @@ export interface Step { /** * Not actually keen on the node/edge semantics here + * Maybe StepLink? */ export type StepEdge = | boolean @@ -93,30 +113,6 @@ export type Expression = string; */ export type CompiledExpression = Expression; -export declare interface State { - // Core state props - configuration?: C; - data?: S; - errors?: Record; - - // Added by common - references?: Array; - - // Typically used by other adaptors - index?: number; - response?: any; - query?: any; - - [other: string]: any; -} - -/** - * An operation function that runs in an Expression - */ -export declare interface Operation | State> { - (state: State): T; -} - export type ErrorReport = { type: string; // The name/type of error, ie Error, TypeError message: string; // simple human readable message @@ -129,23 +125,8 @@ export type ErrorReport = { data?: any; // General store for related error information }; -/* +// TODO standard shape of error object in our stack +type UUID = string; -run (workflow, options) - - -some options relate to the workflow: -- initial state -- start node -- timeout - -others are system settings -- linker (paths, whitelist) -- statePropsToRemove -- loggers -- sandbox rules -- callbacks (notify, lazy loaders) - -Some of those system things might just be defaults - although maybe it's cleaner for the engine to do that -*/ +export type Lazy = T | string; diff --git a/packages/runtime/src/execute/compile-plan.ts b/packages/runtime/src/execute/compile-plan.ts index 098491f50..ccb692f0c 100644 --- a/packages/runtime/src/execute/compile-plan.ts +++ b/packages/runtime/src/execute/compile-plan.ts @@ -1,12 +1,12 @@ import type { + CompiledEdge, CompiledExecutionPlan, - CompiledJobEdge, - CompiledJobNode, + CompiledStep, } from '../types'; import compileFunction from '../modules/compile-function'; import { conditionContext, Context } from './context'; -import { ExecutionPlan, StepEdge, Workflow } from '@openfn/lexicon'; +import { ExecutionPlan, Job, StepEdge, Workflow } from '@openfn/lexicon'; const compileEdges = ( from: string, @@ -18,7 +18,7 @@ const compileEdges = ( } const errs = []; - const result = {} as Record; + const result = {} as Record; for (const edgeId in edges) { try { const edge = edges[edgeId]; @@ -33,7 +33,7 @@ const compileEdges = ( if (typeof edge.condition === 'string') { (newEdge as any).condition = compileFunction(edge.condition, context); } - result[edgeId] = newEdge as CompiledJobEdge; + result[edgeId] = newEdge as CompiledEdge; } } catch (e: any) { errs.push( @@ -55,7 +55,7 @@ const compileEdges = ( // Inefficient but fine for now (note that validation does something similar) // Note that right now we only support one upstream job const findUpstream = (workflow: Workflow, id: string) => { - for (const job of workflow.jobs) { + for (const job of workflow.steps) { if (job.next) if (typeof job.next === 'string') { if (job.next === id) { @@ -90,7 +90,7 @@ export default (plan: ExecutionPlan) => { } }; - for (const job of workflow.jobs) { + for (const job of workflow.steps) { if (!job.id) { job.id = generateJobId(); } @@ -98,33 +98,42 @@ export default (plan: ExecutionPlan) => { const newPlan: CompiledExecutionPlan = { workflow: { - jobs: {}, + steps: {}, }, options: { ...options, - start: options.start ?? workflow.jobs[0]?.id!, + start: options.start ?? workflow.steps[0]?.id!, }, }; - for (const job of workflow.jobs) { - const jobId = job.id!; - const newJob: CompiledJobNode = { - id: jobId, - expression: job.expression, // TODO we should compile this here + const maybeAssign = (a: any, b: any, keys: Array) => { + keys.forEach((key) => { + if (a.hasOwnProperty(key)) { + b[key] = a[key]; + } + }); + }; + + for (const step of workflow.steps) { + const stepId = step.id!; + const newStep: CompiledStep = { + id: stepId, }; - if (job.state) { - newJob.state = job.state; - } - if (job.configuration) { - newJob.configuration = job.configuration; - } - if (job.next) { + + maybeAssign(step, newStep, [ + 'expression', + 'state', + 'configuration', + 'name', + ]); + + if (step.next) { trapErrors(() => { - newJob.next = compileEdges(jobId, job.next!, context); + newStep.next = compileEdges(stepId, step.next!, context); }); } - newJob.previous = findUpstream(workflow, jobId); - newPlan.workflow.jobs[jobId] = newJob; + newStep.previous = findUpstream(workflow, stepId); + newPlan.workflow.steps[stepId] = newStep; } if (errs.length) { diff --git a/packages/runtime/src/execute/plan.ts b/packages/runtime/src/execute/plan.ts index d4496cfed..c2083c76f 100644 --- a/packages/runtime/src/execute/plan.ts +++ b/packages/runtime/src/execute/plan.ts @@ -1,7 +1,7 @@ import type { Logger } from '@openfn/logger'; import type { ExecutionPlan, State, Lazy } from '@openfn/lexicon'; -import executeJob from './job'; +import executeStep from './step'; import compilePlan from './compile-plan'; import type { Options } from '../runtime'; @@ -59,11 +59,11 @@ const executePlan = async ( // Right now this executes in series, even if jobs are parallelised while (queue.length) { const next = queue.shift()!; - const job = workflow.jobs[next]; + const job = workflow.steps[next]; const prevState = stateHistory[job.previous || ''] ?? input; - const result = await executeJob(ctx, job, prevState); + const result = await executeStep(ctx, job, prevState); stateHistory[next] = result.state; if (!result.next.length) { diff --git a/packages/runtime/src/execute/job.ts b/packages/runtime/src/execute/step.ts similarity index 70% rename from packages/runtime/src/execute/job.ts rename to packages/runtime/src/execute/step.ts index 2af6ad88a..912cae9b9 100644 --- a/packages/runtime/src/execute/job.ts +++ b/packages/runtime/src/execute/step.ts @@ -1,12 +1,12 @@ // TODO hmm. I have a horrible feeling that the callbacks should go here // at least the resolvesrs -import type { State, StepId } from '@openfn/lexicon'; +import type { Job, State, StepId } from '@openfn/lexicon'; import type { Logger } from '@openfn/logger'; import executeExpression, { ExecutionErrorWrapper } from './expression'; import clone from '../util/clone'; import assembleState from '../util/assemble-state'; -import type { CompiledJobNode, ExecutionContext } from '../types'; +import type { CompiledStep, ExecutionContext } from '../types'; import { EdgeConditionError } from '../errors'; import { NOTIFY_INIT_COMPLETE, @@ -17,7 +17,7 @@ import { } from '../events'; const loadCredentials = async ( - job: CompiledJobNode, + job: Job, resolver: (id: string) => Promise ) => { if (typeof job.configuration === 'string') { @@ -28,10 +28,7 @@ const loadCredentials = async ( return job.configuration; }; -const loadState = async ( - job: CompiledJobNode, - resolver: (id: string) => Promise -) => { +const loadState = async (job: Job, resolver: (id: string) => Promise) => { if (typeof job.state === 'string') { // TODO let's log and notify something useful if we're lazy loading // TODO throw a controlled error if there's no resolver @@ -40,7 +37,7 @@ const loadState = async ( return job.state; }; -const calculateNext = (job: CompiledJobNode, result: any, logger: Logger) => { +const calculateNext = (job: CompiledStep, result: any, logger: Logger) => { const next: string[] = []; if (job.next) { for (const nextJobId in job.next) { @@ -78,50 +75,59 @@ const calculateNext = (job: CompiledJobNode, result: any, logger: Logger) => { // The job handler is responsible for preparing the job // and working out where to go next // it'll resolve credentials and state and notify how long init took -const executeJob = async ( +const executeStep = async ( ctx: ExecutionContext, - job: CompiledJobNode, + step: CompiledStep, input: State = {} ): Promise<{ next: StepId[]; state: any }> => { const { opts, notify, logger, report } = ctx; const duration = Date.now(); - const jobId = job.id; + const stepId = step.id; - notify(NOTIFY_INIT_START, { jobId }); + // The expression SHOULD return state, but COULD return anything + let result: any = input; + let next: string[] = []; + let didError = false; - // lazy load config and state - const configuration = await loadCredentials( - job, - opts.callbacks?.resolveCredential! // cheat - we need to handle the error case here - ); + if (step.expression) { + const job = step as Job; + const jobId = job.id!; + const jobName = job.name || job.id; - const globals = await loadState( - job, - opts.callbacks?.resolveState! // and here - ); + // The notify events only apply to jobs - not steps - so names don't need to be changed here + notify(NOTIFY_INIT_START, { jobId }); - const state = assembleState( - clone(input), - configuration, - globals, - opts.strict - ); + // lazy load config and state + const configuration = await loadCredentials( + job, + opts.callbacks?.resolveCredential! // cheat - we need to handle the error case here + ); - notify(NOTIFY_INIT_COMPLETE, { jobId, duration: Date.now() - duration }); + const globals = await loadState( + job, + opts.callbacks?.resolveState! // and here + ); - // We should by this point have validated the plan, so the job MUST exist + const state = assembleState( + clone(input), + configuration, + globals, + opts.strict + ); - const timerId = `job-${jobId}`; - logger.timer(timerId); - logger.always('Starting job', jobId); + notify(NOTIFY_INIT_COMPLETE, { + jobId, + duration: Date.now() - duration, + }); + + // We should by this point have validated the plan, so the step MUST exist + + const timerId = `step-${jobId}`; + logger.timer(timerId); + logger.always(`Starting step ${jobName}`); - // The expression SHOULD return state, but COULD return anything - let result: any = state; - let next: string[] = []; - let didError = false; - if (job.expression) { const startTime = Date.now(); try { // TODO include the upstream job? @@ -136,10 +142,10 @@ const executeJob = async ( result = state; const duration = logger.timer(timerId); - logger.error(`Failed job ${jobId} after ${duration}`); + logger.error(`Failed step ${jobName} after ${duration}`); report(state, jobId, error); - next = calculateNext(job, result, logger); + next = calculateNext(step, result, logger); notify(NOTIFY_JOB_ERROR, { duration: Date.now() - startTime, @@ -161,7 +167,7 @@ const executeJob = async ( if (!didError) { const humanDuration = logger.timer(timerId); - logger.success(`Completed job ${jobId} in ${humanDuration}`); + logger.success(`Completed step ${jobName} in ${humanDuration}`); // Take a memory snapshot // IMPORTANT: this runs _after_ the state object has been serialized @@ -175,10 +181,10 @@ const executeJob = async ( const humanJobMemory = Math.round(jobMemory / 1024 / 1024); const humanSystemMemory = Math.round(systemMemory / 1024 / 1024); logger.debug( - `Final memory usage: [job ${humanJobMemory}mb] [system ${humanSystemMemory}mb]` + `Final memory usage: [step ${humanJobMemory}mb] [system ${humanSystemMemory}mb]` ); - next = calculateNext(job, result, logger); + next = calculateNext(step, result, logger); notify(NOTIFY_JOB_COMPLETE, { duration: Date.now() - duration, state: result, @@ -192,16 +198,16 @@ const executeJob = async ( } } else { // calculate next for trigger nodes - next = calculateNext(job, result, logger); + next = calculateNext(step, result, logger); } if (next.length && !didError && !result) { logger.warn( - `WARNING: job ${jobId} did not return a state object. This may cause downstream jobs to fail.` + `WARNING: step ${stepId} did not return a state object. This may cause downstream jobs to fail.` ); } return { next, state: result }; }; -export default executeJob; +export default executeStep; diff --git a/packages/runtime/src/runtime.ts b/packages/runtime/src/runtime.ts index 1523043d8..162d2420b 100644 --- a/packages/runtime/src/runtime.ts +++ b/packages/runtime/src/runtime.ts @@ -43,7 +43,7 @@ const defaultLogger = createMockLogger(); const loadPlanFromString = (expression: string, logger: Logger) => { const plan: ExecutionPlan = { workflow: { - jobs: [ + steps: [ { expression, }, @@ -60,7 +60,7 @@ const loadPlanFromString = (expression: string, logger: Logger) => { const run = ( xplan: Partial | string, - input: State, + input?: State, opts: RawOptions = {} ) => { const logger = opts.logger || defaultLogger; diff --git a/packages/runtime/src/types.ts b/packages/runtime/src/types.ts index 2fbb88d34..a869cc73a 100644 --- a/packages/runtime/src/types.ts +++ b/packages/runtime/src/types.ts @@ -1,4 +1,4 @@ -import { Operation, Job, StepId, WorkflowOptions } from '@openfn/lexicon'; +import { Operation, StepId, WorkflowOptions, Step } from '@openfn/lexicon'; import { Logger } from '@openfn/logger'; import { Options } from './runtime'; @@ -12,23 +12,25 @@ import { NOTIFY_STATE_LOAD, } from './events'; -export type CompiledJobEdge = +export type CompiledEdge = | boolean | { condition?: Function; disabled?: boolean; }; -export type CompiledJobNode = Omit & { +export type CompiledStep = Omit & { id: StepId; - next?: Record; + next?: Record; + + [other: string]: any; }; export type Lazy = string | T; export type CompiledExecutionPlan = { workflow: { - jobs: Record; + steps: Record; }; options: WorkflowOptions & { start: StepId; diff --git a/packages/runtime/src/util/validate-plan.ts b/packages/runtime/src/util/validate-plan.ts index a700708e5..2dd86628d 100644 --- a/packages/runtime/src/util/validate-plan.ts +++ b/packages/runtime/src/util/validate-plan.ts @@ -1,4 +1,4 @@ -import { ExecutionPlan, Job } from '@openfn/lexicon'; +import { ExecutionPlan, Step } from '@openfn/lexicon'; import { ValidationError } from '../errors'; type ModelNode = { @@ -23,13 +23,13 @@ export default (plan: ExecutionPlan) => { export const buildModel = ({ workflow }: ExecutionPlan) => { const model: Model = {}; - const jobIdx = workflow.jobs.reduce((obj, item) => { + const jobIdx = workflow.steps.reduce((obj, item) => { if (item.id) { obj[item.id] = item; } // TODO warn if there's no id? It's usually fine (until it isn't) return obj; - }, {} as Record); + }, {} as Record); const ensureModel = (jobId: string) => { if (!model[jobId]) { @@ -48,7 +48,7 @@ export const buildModel = ({ workflow }: ExecutionPlan) => { } }; - for (const job of workflow.jobs) { + for (const job of workflow.steps) { let node = job.id ? ensureModel(job.id) : { up: {}, down: {} }; if (typeof job.next === 'string') { validateJob(job.next); @@ -73,7 +73,7 @@ export const buildModel = ({ workflow }: ExecutionPlan) => { const assertStart = (plan: ExecutionPlan) => { const { start } = plan.options; if (typeof start === 'string') { - if (!plan.workflow.jobs.find(({ id }) => id == start)) { + if (!plan.workflow.steps.find(({ id }) => id == start)) { throw new ValidationError(`Could not find start job: ${start}`); } } diff --git a/packages/runtime/test/errors.test.ts b/packages/runtime/test/errors.test.ts index 6a7b7af45..a18f3ba5a 100644 --- a/packages/runtime/test/errors.test.ts +++ b/packages/runtime/test/errors.test.ts @@ -6,7 +6,7 @@ import run from '../src/runtime'; const createPlan = (expression: string, options: WorkflowOptions = {}) => ({ workflow: { - jobs: [ + steps: [ { expression, }, @@ -85,7 +85,7 @@ test('crash on eval with SecurityError', async (t) => { test('crash on edge condition error with EdgeConditionError', async (t) => { const plan = { workflow: { - jobs: [ + steps: [ { id: 'a', expression: '.', diff --git a/packages/runtime/test/execute/compile-plan.test.ts b/packages/runtime/test/execute/compile-plan.test.ts index 23c1bcce3..ec99bd574 100644 --- a/packages/runtime/test/execute/compile-plan.test.ts +++ b/packages/runtime/test/execute/compile-plan.test.ts @@ -5,8 +5,8 @@ import compilePlan from '../../src/execute/compile-plan'; const testPlan: ExecutionPlan = { workflow: { - jobs: [ - { id: 'a', expression: 'x', next: { b: true } }, + steps: [ + { id: 'a', expression: 'x', name: 'a', next: { b: true } }, { id: 'b', expression: 'y' }, ], }, @@ -17,7 +17,7 @@ const testPlan: ExecutionPlan = { const planWithEdge = (edge: Partial) => ({ workflow: { - jobs: [ + steps: [ { id: 'a', expression: 'x', @@ -37,7 +37,7 @@ test('should preserve the start option', (t) => { const compiledPlan = compilePlan({ id: 'a', workflow: { - jobs: [{ id: 'a', expression: 'a' }], + steps: [{ id: 'a', expression: 'a' }], }, options: { start: 'a', @@ -51,7 +51,7 @@ test('should preserve arbitrary options', (t) => { const compiledPlan = compilePlan({ id: 'a', workflow: { - jobs: [{ id: 'a', expression: 'a' }], + steps: [{ id: 'a', expression: 'a' }], }, options: { // @ts-ignore @@ -69,19 +69,24 @@ test('should preserve arbitrary options', (t) => { }); }); -test('should convert jobs to an object', (t) => { +test('should convert steps to an object', (t) => { const { workflow } = compilePlan(testPlan); - t.truthy(workflow.jobs.a); - t.is(workflow.jobs.a.expression, 'x'); + t.deepEqual(workflow.steps.a, { + id: 'a', + name: 'a', + expression: 'x', + next: { b: true }, + previous: undefined, + }); - t.truthy(workflow.jobs.b); - t.is(workflow.jobs.b.expression, 'y'); + t.truthy(workflow.steps.b); + t.is(workflow.steps.b.expression, 'y'); }); -test('should set previous job with 2 jobs', (t) => { +test('should set previous job with 2 steps', (t) => { const plan: ExecutionPlan = { workflow: { - jobs: [ + steps: [ { id: 'a', expression: 'x', next: { b: true } }, { id: 'b', expression: 'y' }, ], @@ -89,14 +94,14 @@ test('should set previous job with 2 jobs', (t) => { options: {}, }; const { workflow } = compilePlan(plan); - t.is(workflow.jobs.a.previous, undefined); - t.is(workflow.jobs.b.previous, 'a'); + t.is(workflow.steps.a.previous, undefined); + t.is(workflow.steps.b.previous, 'a'); }); -test('should set previous job with 2 jobs and shorthand syntax', (t) => { +test('should set previous job with 2 steps and shorthand syntax', (t) => { const plan: ExecutionPlan = { workflow: { - jobs: [ + steps: [ { id: 'a', expression: 'x', next: 'b' }, { id: 'b', expression: 'y' }, ], @@ -104,14 +109,14 @@ test('should set previous job with 2 jobs and shorthand syntax', (t) => { options: {}, }; const { workflow } = compilePlan(plan); - t.is(workflow.jobs.a.previous, undefined); - t.is(workflow.jobs.b.previous, 'a'); + t.is(workflow.steps.a.previous, undefined); + t.is(workflow.steps.b.previous, 'a'); }); -test('should set previous job with 2 jobs and no start', (t) => { +test('should set previous job with 2 steps and no start', (t) => { const plan: ExecutionPlan = { workflow: { - jobs: [ + steps: [ { id: 'a', expression: 'x', next: { b: true } }, { id: 'b', expression: 'y' }, ], @@ -119,14 +124,14 @@ test('should set previous job with 2 jobs and no start', (t) => { options: {}, }; const { workflow } = compilePlan(plan); - t.is(workflow.jobs.a.previous, undefined); - t.is(workflow.jobs.b.previous, 'a'); + t.is(workflow.steps.a.previous, undefined); + t.is(workflow.steps.b.previous, 'a'); }); -test('should set previous job with 3 jobs', (t) => { +test('should set previous job with 3 steps', (t) => { const plan: ExecutionPlan = { workflow: { - jobs: [ + steps: [ { id: 'a', expression: 'x', next: { b: true } }, { id: 'b', expression: 'y', next: { c: true } }, { id: 'c', expression: 'z' }, @@ -135,15 +140,15 @@ test('should set previous job with 3 jobs', (t) => { options: {}, }; const { workflow } = compilePlan(plan); - t.is(workflow.jobs.a.previous, undefined); - t.is(workflow.jobs.b.previous, 'a'); - t.is(workflow.jobs.c.previous, 'b'); + t.is(workflow.steps.a.previous, undefined); + t.is(workflow.steps.b.previous, 'a'); + t.is(workflow.steps.c.previous, 'b'); }); -test('should set previous job with 3 jobs and shorthand syntax', (t) => { +test('should set previous job with 3 steps and shorthand syntax', (t) => { const plan: ExecutionPlan = { workflow: { - jobs: [ + steps: [ { id: 'c', expression: 'z' }, { id: 'a', expression: 'x', next: 'b' }, { id: 'b', expression: 'y', next: 'c' }, @@ -152,58 +157,54 @@ test('should set previous job with 3 jobs and shorthand syntax', (t) => { options: {}, }; const { workflow } = compilePlan(plan); - t.is(workflow.jobs.a.previous, undefined); - t.is(workflow.jobs.b.previous, 'a'); - t.is(workflow.jobs.c.previous, 'b'); + t.is(workflow.steps.a.previous, undefined); + t.is(workflow.steps.b.previous, 'a'); + t.is(workflow.steps.c.previous, 'b'); }); -test('should auto generate ids for jobs', (t) => { +test('should auto generate ids for steps', (t) => { const plan = { workflow: { - jobs: [{ expression: 'x' }, { expression: 'y' }], + steps: [{ expression: 'x' }, { expression: 'y' }], }, options: {}, }; const { workflow } = compilePlan(plan); - const ids = Object.keys(workflow.jobs); + const ids = Object.keys(workflow.steps); t.truthy(ids[0]); t.truthy(ids[1]); t.assert(ids[0] !== ids[1]); }); -test('should convert jobs to an object with auto ids', (t) => { +test('should convert steps to an object with auto ids', (t) => { const plan: ExecutionPlan = { workflow: { - jobs: [ - // silly use case but it doens't matter - { expression: 'x' }, - { expression: 'y' }, - ], + steps: [{ expression: 'x' }, { expression: 'y' }], }, options: {}, }; const { workflow } = compilePlan(plan); - t.deepEqual(Object.keys(workflow.jobs), ['job-1', 'job-2']); + t.deepEqual(Object.keys(workflow.steps), ['job-1', 'job-2']); }); test('should reset job ids for each call', (t) => { const plan: ExecutionPlan = { workflow: { - jobs: [{ expression: 'x' }], + steps: [{ expression: 'x' }], }, options: {}, }; const first = compilePlan(plan); - t.is(first.workflow.jobs['job-1'].expression, 'x'); + t.is(first.workflow.steps['job-1'].expression, 'x'); const second = compilePlan(plan); - t.is(second.workflow.jobs['job-1'].expression, 'x'); + t.is(second.workflow.steps['job-1'].expression, 'x'); }); -test('should set the start to jobs[0]', (t) => { +test('should set the start to steps[0]', (t) => { const plan: ExecutionPlan = { workflow: { - jobs: [ + steps: [ { id: 'a', expression: 'x' }, { id: 'b', expression: 'y' }, { id: 'c', expression: 'z' }, @@ -221,7 +222,7 @@ test('should not override the start', (t) => { start: 'c', }, workflow: { - jobs: [ + steps: [ { id: 'a', expression: 'x' }, { id: 'b', expression: 'y' }, { id: 'c', expression: 'z' }, @@ -235,7 +236,7 @@ test('should not override the start', (t) => { test('should compile a shorthand edge', (t) => { const plan: ExecutionPlan = { workflow: { - jobs: [ + steps: [ { id: 'a', expression: 'x', @@ -248,7 +249,7 @@ test('should compile a shorthand edge', (t) => { const { workflow } = compilePlan(plan); - t.deepEqual(workflow.jobs.a.next!, { + t.deepEqual(workflow.steps.a.next!, { y: true, }); }); @@ -261,7 +262,7 @@ test('should not recompile a functional edge', (t) => { const { workflow } = compilePlan(plan); // @ts-ignore - const result = workflow.jobs.a.next!.b.condition({}); + const result = workflow.steps.a.next!.b.condition({}); t.true(result); }); @@ -271,7 +272,7 @@ test('should compile a truthy edge', (t) => { const { workflow } = compilePlan(plan); // @ts-ignore - const result = workflow.jobs.a.next!.b.condition({}); + const result = workflow.steps.a.next!.b.condition({}); t.true(result); }); @@ -281,7 +282,7 @@ test('should compile a string edge', (t) => { const { workflow } = compilePlan(plan); // @ts-ignore - const result = workflow.jobs.a.next!.b.condition(); + const result = workflow.steps.a.next!.b.condition(); t.true(result); }); @@ -291,7 +292,7 @@ test('should compile a falsy edge', (t) => { const { workflow } = compilePlan(plan); // @ts-ignore - const result = workflow.jobs.a.next!.b.condition({}); + const result = workflow.steps.a.next!.b.condition({}); t.false(result); }); @@ -301,7 +302,7 @@ test('should compile an edge with arithmetic', (t) => { const { workflow } = compilePlan(plan); // @ts-ignore - const result = workflow.jobs.a.next!.b.condition({}); + const result = workflow.steps.a.next!.b.condition({}); t.is(result, 2); }); @@ -311,7 +312,7 @@ test('should compile an edge which uses state', (t) => { const { workflow } = compilePlan(plan); // @ts-ignore - const result = workflow.jobs.a.next!.b.condition({}); + const result = workflow.steps.a.next!.b.condition({}); t.true(result); }); @@ -321,7 +322,7 @@ test('condition cannot require', (t) => { const { workflow } = compilePlan(plan); // @ts-ignore - t.throws(() => workflow.jobs.a.next!.b.condition({ data: {} }), { + t.throws(() => workflow.steps.a.next!.b.condition({ data: {} }), { message: 'require is not defined', }); }); @@ -332,7 +333,7 @@ test('condition cannot access process', (t) => { const { workflow } = compilePlan(plan); // @ts-ignore - t.throws(() => workflow.jobs.a.next!.b.condition({ data: {} }), { + t.throws(() => workflow.steps.a.next!.b.condition({ data: {} }), { message: 'process is not defined', }); }); @@ -343,7 +344,7 @@ test('condition cannot access process #2', (t) => { const { workflow } = compilePlan(plan); // @ts-ignore - t.throws(() => workflow.jobs.a.next!.b.condition({ data: {} }), { + t.throws(() => workflow.steps.a.next!.b.condition({ data: {} }), { message: 'process is not defined', }); }); @@ -354,7 +355,7 @@ test('condition cannot eval', (t) => { const { workflow } = compilePlan(plan); // @ts-ignore - t.throws(() => workflow.jobs.a.next!.b.condition({ data: {} }), { + t.throws(() => workflow.steps.a.next!.b.condition({ data: {} }), { message: 'Code generation from strings disallowed for this context', }); }); @@ -372,7 +373,7 @@ test('throw for a syntax error on a job edge', (t) => { test('throw for multiple errors', (t) => { const plan = { workflow: { - jobs: [ + steps: [ { id: 'a', expression: 'x', diff --git a/packages/runtime/test/execute/plan.test.ts b/packages/runtime/test/execute/plan.test.ts index c2f8fcd07..b2fea96af 100644 --- a/packages/runtime/test/execute/plan.test.ts +++ b/packages/runtime/test/execute/plan.test.ts @@ -9,11 +9,11 @@ import { CompiledExecutionPlan } from '../../src'; let mockLogger = createMockLogger(undefined, { level: 'debug' }); const createPlan = ( - jobs: Job[], + steps: Job[], options: Partial = {} ): ExecutionPlan => ({ workflow: { - jobs, + steps, }, options, }); @@ -283,7 +283,7 @@ test('only allowed state is passed through in strict mode', async (t) => { }); }); -test('Jobs only receive state from upstream jobs', async (t) => { +test('steps only receive state from upstream steps', async (t) => { const assert = (expr: string) => `if (!(${expr})) throw new Error('ASSERT FAIL')`; @@ -515,16 +515,16 @@ test('Return when there are no more edges', async (t) => { }); test('execute a 5 job execution plan', async (t) => { - const jobs = []; + const steps = []; for (let i = 1; i < 6; i++) { - jobs.push({ + steps.push({ id: `${i}`, expression: 'export default [s => { s.data.x += 1; return s; } ]', next: i === 5 ? null : { [`${i + 1}`]: true }, } as Job); } - const plan = createPlan(jobs, { + const plan = createPlan(steps, { start: '1', }); const state = { data: { x: 0 } }; @@ -769,15 +769,15 @@ test('log appopriately on error', async (t) => { const logger = createMockLogger(undefined, { level: 'debug' }); await executePlan(plan, {}, {}, logger); - const err = logger._find('error', /failed job/i); + const err = logger._find('error', /failed step/i); t.truthy(err); - t.regex(err!.message as string, /Failed job job1 after \d+ms/i); + t.regex(err!.message as string, /Failed step job1 after \d+ms/i); t.truthy(logger._find('error', /JobError: e/)); t.truthy(logger._find('error', /Check state.errors.job1 for details/i)); }); -test('jobs do not share a local scope', async (t) => { +test('steps do not share a local scope', async (t) => { const plan = createPlan([ { id: 'job1', @@ -799,7 +799,7 @@ test('jobs do not share a local scope', async (t) => { }); }); -test('jobs do not share a global scope', async (t) => { +test('steps do not share a global scope', async (t) => { const plan = createPlan([ { id: 'job1', @@ -820,7 +820,7 @@ test('jobs do not share a global scope', async (t) => { }); }); -test('jobs do not share a globalThis object', async (t) => { +test('steps do not share a globalThis object', async (t) => { const plan = createPlan([ { id: 'job1', @@ -842,7 +842,7 @@ test('jobs do not share a globalThis object', async (t) => { // TODO this fails right now // https://github.com/OpenFn/kit/issues/213 -test.skip('jobs cannot scribble on globals', async (t) => { +test.skip('steps cannot scribble on globals', async (t) => { const plan = createPlan([ { id: 'job1', @@ -863,7 +863,7 @@ test.skip('jobs cannot scribble on globals', async (t) => { // TODO this fails right now // https://github.com/OpenFn/kit/issues/213 -test.skip('jobs cannot scribble on adaptor functions', async (t) => { +test.skip('steps cannot scribble on adaptor functions', async (t) => { const plan = createPlan([ { id: 'job1', @@ -893,7 +893,7 @@ test.skip('jobs cannot scribble on adaptor functions', async (t) => { t.falsy(result.data.x); }); -test('jobs can write circular references to state without blowing up downstream', async (t) => { +test('steps can write circular references to state without blowing up downstream', async (t) => { const expression = `export default [(s) => { const a = {}; const b = { a }; @@ -927,7 +927,7 @@ test('jobs can write circular references to state without blowing up downstream' }); }); -test('jobs cannot pass circular references to each other', async (t) => { +test('steps cannot pass circular references to each other', async (t) => { const expression = `export default [(s) => { const a = {}; const b = { a }; @@ -957,7 +957,7 @@ test('jobs cannot pass circular references to each other', async (t) => { t.is(result.data.answer, '[Circular]'); }); -test('jobs can write functions to state without blowing up downstream', async (t) => { +test('steps can write functions to state without blowing up downstream', async (t) => { const plan = createPlan([ { next: { b: true }, @@ -981,7 +981,7 @@ test('jobs can write functions to state without blowing up downstream', async (t t.deepEqual(result, { data: {} }); }); -test('jobs cannot pass functions to each other', async (t) => { +test('steps cannot pass functions to each other', async (t) => { const plan = createPlan([ { next: { b: true }, @@ -1008,7 +1008,7 @@ test('jobs cannot pass functions to each other', async (t) => { t.is(error.message, 'TypeError: s.data.x is not a function'); }); -test('Plans log for each job start and end', async (t) => { +test('Plans log step ids for each job start and end', async (t) => { const plan = createPlan([ { id: 'a', @@ -1017,10 +1017,27 @@ test('Plans log for each job start and end', async (t) => { ]); const logger = createMockLogger(undefined, { level: 'debug' }); await executePlan(plan, {}, {}, logger); + const start = logger._find('always', /starting step a/i); + t.is(start!.message, 'Starting step a'); - const start = logger._find('always', /starting job/i); - t.is(start!.message, 'Starting job a'); + const end = logger._find('success', /completed step a/i); + t.regex(end!.message as string, /Completed step a in \d+ms/); +}); + +test('Plans log step names for each job start and end', async (t) => { + const plan = createPlan([ + { + id: 'a', + name: 'do-the-thing', + expression: 'export default [s => s]', + }, + ]); + const logger = createMockLogger(undefined, { level: 'debug' }); + await executePlan(plan, {}, {}, logger); + + const start = logger._find('always', /starting step do-the-thing/i); + t.is(start!.message, 'Starting step do-the-thing'); - const end = logger._find('success', /completed job/i); - t.regex(end!.message as string, /Completed job a in \d+ms/); + const end = logger._find('success', /completed step do-the-thing/i); + t.regex(end!.message as string, /Completed step do-the-thing in \d+ms/); }); diff --git a/packages/runtime/test/execute/job.test.ts b/packages/runtime/test/execute/step.test.ts similarity index 74% rename from packages/runtime/test/execute/job.test.ts rename to packages/runtime/test/execute/step.test.ts index 66cc29d13..2fbd9205f 100644 --- a/packages/runtime/test/execute/job.test.ts +++ b/packages/runtime/test/execute/step.test.ts @@ -6,7 +6,7 @@ import { NOTIFY_JOB_ERROR, NOTIFY_JOB_START, } from '../../src'; -import execute from '../../src/execute/job'; +import execute from '../../src/execute/step'; import type { ExecutionContext } from '../../src/types'; import { State } from '@openfn/lexicon'; @@ -34,35 +34,35 @@ test.afterEach(() => { logger._reset(); }); -test.serial('resolve and return next for a simple job', async (t) => { - const job = { +test.serial('resolve and return next for a simple step', async (t) => { + const step = { id: 'j', expression: [(s: State) => s], next: { k: true, a: false }, }; const initialState = createState(); const context = createContext(); - const { next, state } = await execute(context, job, initialState); + const { next, state } = await execute(context, step, initialState); t.deepEqual(state, { data: {} }); t.deepEqual(next, ['k']); }); -test.serial('resolve and return next for a trigger-style job', async (t) => { - const job = { +test.serial('resolve and return next for a trigger-style step', async (t) => { + const step = { id: 'j', next: { k: true, a: false }, }; const initialState = createState(); const context = createContext(); - const { next, state } = await execute(context, job, initialState); + const { next, state } = await execute(context, step, initialState); t.deepEqual(state, initialState); t.deepEqual(next, ['k']); }); -test.serial('resolve and return next for a failed job', async (t) => { - const job = { +test.serial('resolve and return next for a failed step', async (t) => { + const step = { id: 'j', expression: [ () => { @@ -73,7 +73,7 @@ test.serial('resolve and return next for a failed job', async (t) => { }; const initialState = createState(); const context = createContext(); - const { next, state } = await execute(context, job, initialState); + const { next, state } = await execute(context, step, initialState); // Config should still be scrubbed from data t.deepEqual(state, { data: {} }); @@ -81,7 +81,7 @@ test.serial('resolve and return next for a failed job', async (t) => { }); test.serial(`notify ${NOTIFY_JOB_START}`, async (t) => { - const job = { + const step = { id: 'j', expression: [(s: State) => s], }; @@ -95,32 +95,32 @@ test.serial(`notify ${NOTIFY_JOB_START}`, async (t) => { const context = createContext({ notify }); - await execute(context, job, state); + await execute(context, step, state); }); test.serial( - `don't notify ${NOTIFY_JOB_START} for trigger-style jobs`, + `don't notify ${NOTIFY_JOB_START} for trigger-style steps`, async (t) => { - const job = { + const step = { id: 'j', }; const state = createState(); const notify = (event: string, payload?: any) => { if (event === NOTIFY_JOB_START) { - t.fail('should not notify job-start for trigger nodes'); + t.fail('should not notify step-start for trigger nodes'); } }; const context = createContext({ notify }); - await execute(context, job, state); + await execute(context, step, state); t.pass('all ok'); } ); test.serial(`notify ${NOTIFY_JOB_COMPLETE} with no next`, async (t) => { - const job = { + const step = { id: 'j', expression: [(s: State) => s], }; @@ -142,11 +142,11 @@ test.serial(`notify ${NOTIFY_JOB_COMPLETE} with no next`, async (t) => { const context = createContext({ notify }); - await execute(context, job, state); + await execute(context, step, state); }); test.serial(`notify ${NOTIFY_JOB_COMPLETE} with two nexts`, async (t) => { - const job = { + const step = { id: 'j', expression: [(s: State) => s], next: { b: true, c: true }, @@ -168,26 +168,26 @@ test.serial(`notify ${NOTIFY_JOB_COMPLETE} with two nexts`, async (t) => { const context = createContext({ notify }); - await execute(context, job, state); + await execute(context, step, state); }); test.serial( - `don't notify ${NOTIFY_JOB_COMPLETE} for trigger-style jobs`, + `don't notify ${NOTIFY_JOB_COMPLETE} for trigger-style steps`, async (t) => { - const job = { + const step = { id: 'j', }; const state = createState(); const notify = (event: string) => { if (event === NOTIFY_JOB_COMPLETE) { - t.fail('should not notify job-start for trigger nodes'); + t.fail('should not notify step-start for trigger nodes'); } }; const context = createContext({ notify }); - await execute(context, job, state); + await execute(context, step, state); t.pass('all ok'); } ); @@ -198,7 +198,7 @@ test.serial( // Promises will trigger an exception if you try to serialize them // If we don't return finalState in execute/expression, this test will fail const resultState = { x: new Promise((r) => r), y: 22 }; - const job = { + const step = { id: 'j', expression: [() => resultState], }; @@ -215,12 +215,12 @@ test.serial( const context = createContext({ notify }); - await execute(context, job, state); + await execute(context, step, state); } ); test.serial(`notify ${NOTIFY_JOB_ERROR} for a fail`, async (t) => { - const job = { + const step = { id: 'j', expression: [ () => { @@ -250,33 +250,33 @@ test.serial(`notify ${NOTIFY_JOB_ERROR} for a fail`, async (t) => { const context = createContext({ notify }); - await execute(context, job, state); + await execute(context, step, state); }); test.serial('log duration of execution', async (t) => { - const job = { + const step = { id: 'y', expression: [(s: State) => s], }; const initialState = createState(); const context = createContext(); - await execute(context, job, initialState); + await execute(context, step, initialState); - const duration = logger._find('success', /completed job /i); + const duration = logger._find('success', /completed step /i); - t.regex(duration?.message, /completed job y in \d\d?ms/i); + t.regex(duration?.message, /completed step y in \d\d?ms/i); }); test.serial('log memory usage', async (t) => { - const job = { + const step = { id: 'z', expression: [(s: State) => s], }; const initialState = createState(); const context = createContext(); - await execute(context, job, initialState); + await execute(context, step, initialState); const memory = logger._find('debug', /final memory usage/i); @@ -285,8 +285,8 @@ test.serial('log memory usage', async (t) => { t.regex(memory?.message, /\d+mb(.+)\d+mb/i); }); -test.serial('warn if a non-leaf job does not return state', async (t) => { - const job = { +test.serial('warn if a non-leaf step does not return state', async (t) => { + const step = { id: 'k', expression: [(s: State) => {}], next: { l: true }, @@ -295,14 +295,14 @@ test.serial('warn if a non-leaf job does not return state', async (t) => { const context = createContext(); const state = createState(); - // @ts-ignore ts complains that the job does not return state - const result = await execute(context, job, state); + // @ts-ignore ts complains that the step does not return state + const result = await execute(context, step, state); const warn = logger._find('warn', /did not return a state object/); t.truthy(warn); }); -test.serial('do not warn if a leaf job does not return state', async (t) => { - const job = { +test.serial('do not warn if a leaf step does not return state', async (t) => { + const step = { id: 'k', expression: [(s: State) => {}], }; @@ -310,17 +310,17 @@ test.serial('do not warn if a leaf job does not return state', async (t) => { const context = createContext(); const state = createState(); - // @ts-ignore ts complains that the job does not return state - const result = await execute(context, job, state); + // @ts-ignore ts complains that the step does not return state + const result = await execute(context, step, state); const warn = logger._find('warn', /did not return a state object/); t.falsy(warn); }); test.serial( - 'do not warn a non-leaf job does not return state and there was an error', + 'do not warn a non-leaf step does not return state and there was an error', async (t) => { - const job = { + const step = { id: 'k', expression: [ (s: State) => { @@ -333,8 +333,8 @@ test.serial( const context = createContext(); const state = createState(); - // @ts-ignore ts complains that the job does not return state - const result = await execute(context, job, state); + // @ts-ignore ts complains that the step does not return state + const result = await execute(context, step, state); const warn = logger._find('warn', /did not return a state object/); t.falsy(warn); diff --git a/packages/runtime/test/memory.test.ts b/packages/runtime/test/memory.test.ts index 7b332c925..e0a10cbec 100644 --- a/packages/runtime/test/memory.test.ts +++ b/packages/runtime/test/memory.test.ts @@ -49,18 +49,15 @@ const run = async (t, workflow: ExecutionPlan) => { } }; - const state = await callRuntime( - { workflow }, - { - strict: false, - callbacks: { notify }, - globals: { - process: { - memoryUsage: () => process.memoryUsage(), - }, + const state = await callRuntime(workflow, { + strict: false, + callbacks: { notify }, + globals: { + process: { + memoryUsage: () => process.memoryUsage(), }, - } - ); + }, + }); return { state, mem }; }; diff --git a/packages/runtime/test/runtime.test.ts b/packages/runtime/test/runtime.test.ts index 5b5c64140..f19a494c7 100644 --- a/packages/runtime/test/runtime.test.ts +++ b/packages/runtime/test/runtime.test.ts @@ -1,7 +1,7 @@ import test from 'ava'; import path from 'node:path'; import { createMockLogger } from '@openfn/logger'; -import type { ExecutionPlan, State } from '@openfn/lexicon'; +import type { ExecutionPlan } from '@openfn/lexicon'; import { NOTIFY_INIT_COMPLETE, @@ -12,6 +12,8 @@ import { } from '../src'; import run from '../src/runtime'; +type ExecutionPlanNoOptions = Omit; + test('run simple expression', async (t) => { const expression = 'export default [(s) => {s.data.done = true; return s}]'; @@ -20,9 +22,9 @@ test('run simple expression', async (t) => { }); test('run a simple workflow', async (t) => { - const plan: ExecutionPlan = { + const plan: ExecutionPlanNoOptions = { workflow: { - jobs: [ + steps: [ { expression: 'export default [(s) => ({ data: { done: true } })]' }, ], }, @@ -45,9 +47,9 @@ test('run a workflow and notify major events', async (t) => { notify, }; - const plan: ExecutionPlan = { + const plan: ExecutionPlanNoOptions = { workflow: { - jobs: [{ expression: 'export default [(s) => s]' }], + steps: [{ expression: 'export default [(s) => s]' }], }, }; @@ -74,9 +76,9 @@ test('notify job error even after fail', async (t) => { notify, }; - const plan: ExecutionPlan = { + const plan: ExecutionPlanNoOptions = { workflow: { - jobs: [ + steps: [ { id: 'a', expression: 'export default [(s) => s.data.x = s.err.z ]' }, ], }, @@ -100,8 +102,8 @@ test('notify job error even after crash', async (t) => { notify, }; - const plan: ExecutionPlan = { - workflow: { jobs: [{ id: 'a', expression: 'export default [() => s]' }] }, + const plan: ExecutionPlanNoOptions = { + workflow: { steps: [{ id: 'a', expression: 'export default [() => s]' }] }, }; try { @@ -115,7 +117,7 @@ test('notify job error even after crash', async (t) => { test('resolve a credential', async (t) => { const plan: ExecutionPlan = { workflow: { - jobs: [ + steps: [ { expression: 'export default [(s) => s]', configuration: 'ccc', @@ -140,9 +142,9 @@ test('resolve a credential', async (t) => { }); test('resolve initial state', async (t) => { - const plan: ExecutionPlan = { + const plan: ExecutionPlanNoOptions = { workflow: { - jobs: [ + steps: [ { expression: 'export default [(s) => s]', state: 'abc', @@ -175,9 +177,9 @@ test('run a workflow with two jobs and call callbacks', async (t) => { notify, }; - const plan: ExecutionPlan = { + const plan: ExecutionPlanNoOptions = { workflow: { - jobs: [ + steps: [ { id: 'a', expression: 'export default [(s) => s]', next: { b: true } }, { id: 'b', expression: 'export default [(s) => s]' }, ], @@ -193,9 +195,9 @@ test('run a workflow with two jobs and call callbacks', async (t) => { }); test('run a workflow with state and parallel branching', async (t) => { - const plan: ExecutionPlan = { + const plan: ExecutionPlanNoOptions = { workflow: { - jobs: [ + steps: [ { expression: 'export default [(s) => { s.data.count += 1; s.data.a = true; return s}]', @@ -242,9 +244,9 @@ test('run a workflow with state and parallel branching', async (t) => { // TODO this test sort of shows why input state on the plan object is a bit funky // running the same plan with two inputs is pretty clunky test('run a workflow with state and conditional branching', async (t) => { - const plan: ExecutionPlan = { + const plan: ExecutionPlanNoOptions = { workflow: { - jobs: [ + steps: [ { expression: 'export default [(s) => { s.data.a = true; return s}]', next: { @@ -284,7 +286,7 @@ test('run a workflow with state and conditional branching', async (t) => { test('run a workflow with initial state (data key) and optional start', async (t) => { const plan: ExecutionPlan = { workflow: { - jobs: [ + steps: [ { // won't run id: 'a', @@ -312,9 +314,9 @@ test('run a workflow with initial state (data key) and optional start', async (t }); test('run a workflow with a trigger node', async (t) => { - const plan: ExecutionPlan = { + const plan: ExecutionPlanNoOptions = { workflow: { - jobs: [ + steps: [ { next: { b: { condition: 'state.data.age > 18 ' } }, }, @@ -332,9 +334,9 @@ test('run a workflow with a trigger node', async (t) => { }); test('prefer initial state to inline state', async (t) => { - const plan: ExecutionPlan = { + const plan: ExecutionPlanNoOptions = { workflow: { - jobs: [ + steps: [ { state: { data: { @@ -354,9 +356,9 @@ test('prefer initial state to inline state', async (t) => { }); test('do not pass extraneous state in strict mode', async (t) => { - const plan: ExecutionPlan = { + const plan: ExecutionPlanNoOptions = { workflow: { - jobs: [ + steps: [ { expression: 'export default [() => ({ x: 1, data: {}} )]', }, @@ -371,9 +373,9 @@ test('do not pass extraneous state in strict mode', async (t) => { }); test('do pass extraneous state in non-strict mode', async (t) => { - const plan: ExecutionPlan = { + const plan: ExecutionPlanNoOptions = { workflow: { - jobs: [ + steps: [ { expression: 'export default [() => ({ x: 1, data: {}} )]', }, @@ -389,9 +391,9 @@ test('do pass extraneous state in non-strict mode', async (t) => { }); test('Allow a job to return undefined', async (t) => { - const plan: ExecutionPlan = { + const plan: ExecutionPlanNoOptions = { workflow: { - jobs: [{ expression: 'export default [() => {}]' }], + steps: [{ expression: 'export default [() => {}]' }], }, }; @@ -400,9 +402,9 @@ test('Allow a job to return undefined', async (t) => { }); test('log errors, write to state, and continue', async (t) => { - const plan: ExecutionPlan = { + const plan: ExecutionPlanNoOptions = { workflow: { - jobs: [ + steps: [ { id: 'a', expression: 'export default [() => { throw new Error("test") }]', @@ -424,13 +426,13 @@ test('log errors, write to state, and continue', async (t) => { t.is(result.errors.a.message, 'test'); t.is(result.errors.a.type, 'JobError'); - t.truthy(logger._find('error', /failed job a/i)); + t.truthy(logger._find('error', /failed step a/i)); }); test('log job code to the job logger', async (t) => { - const plan: ExecutionPlan = { + const plan: ExecutionPlanNoOptions = { workflow: { - jobs: [ + steps: [ { id: 'a', expression: 'export default [(s) => { console.log("hi"); return s;}]', @@ -450,9 +452,9 @@ test('log job code to the job logger', async (t) => { }); test('log and serialize an error to the job logger', async (t) => { - const plan: ExecutionPlan = { + const plan: ExecutionPlanNoOptions = { workflow: { - jobs: [ + steps: [ { id: 'a', expression: @@ -477,9 +479,9 @@ test('log and serialize an error to the job logger', async (t) => { }); test('error reports can be overwritten', async (t) => { - const plan: ExecutionPlan = { + const plan: ExecutionPlanNoOptions = { workflow: { - jobs: [ + steps: [ { id: 'a', expression: 'export default [() => { throw new Error("test") }]', @@ -501,9 +503,9 @@ test('error reports can be overwritten', async (t) => { // This tracks current behaviour but I don't know if it's right test('stuff written to state before an error is preserved', async (t) => { - const plan: ExecutionPlan = { + const plan: ExecutionPlanNoOptions = { workflow: { - jobs: [ + steps: [ { id: 'a', data: { x: 0 }, @@ -528,9 +530,9 @@ test('data can be an array (expression)', async (t) => { }); test('data can be an array (workflow)', async (t) => { - const plan: ExecutionPlan = { + const plan: ExecutionPlanNoOptions = { workflow: { - jobs: [ + steps: [ { id: 'a', expression: 'export default [() => ({ data: [1,2,3] })]', diff --git a/packages/runtime/test/security.test.ts b/packages/runtime/test/security.test.ts index 1b7da2cd2..02ab842f2 100644 --- a/packages/runtime/test/security.test.ts +++ b/packages/runtime/test/security.test.ts @@ -203,7 +203,7 @@ test.serial( async (t) => { const plan: ExecutionPlan = { workflow: { - jobs: [ + steps: [ { id: 'a', expression: 'export default [s => { console.x = 10; return s; }]', diff --git a/packages/runtime/test/util/validate-plan.test.ts b/packages/runtime/test/util/validate-plan.test.ts index fcaf8df7f..1f0858d06 100644 --- a/packages/runtime/test/util/validate-plan.test.ts +++ b/packages/runtime/test/util/validate-plan.test.ts @@ -14,7 +14,7 @@ test('builds a simple model', (t) => { const plan: ExecutionPlan = { options: {}, workflow: { - jobs: [job('a', { b: true }), job('b')], + steps: [job('a', { b: true }), job('b')], }, }; @@ -35,7 +35,7 @@ test('builds a more complex model', (t) => { const plan: ExecutionPlan = { options: {}, workflow: { - jobs: [job('a', { b: true }), job('b', { c: true, a: true }), job('c')], + steps: [job('a', { b: true }), job('b', { c: true, a: true }), job('c')], }, }; @@ -60,7 +60,7 @@ test('throws for a circular dependency', (t) => { const plan: ExecutionPlan = { options: {}, workflow: { - jobs: [job('a', { b: true }), job('b', { a: true })], + steps: [job('a', { b: true }), job('b', { a: true })], }, }; @@ -73,7 +73,7 @@ test('throws for an indirect circular dependency', (t) => { const plan: ExecutionPlan = { options: {}, workflow: { - jobs: [ + steps: [ job('a', { b: true }), job('b', { c: true }), job('c', { a: true }), @@ -90,7 +90,7 @@ test('throws for a multiple inputs', (t) => { const plan: ExecutionPlan = { options: {}, workflow: { - jobs: [ + steps: [ job('a', { b: true, c: true }), job('b', { z: true }), job('c', { z: true }), @@ -108,7 +108,7 @@ test('throws for a an unknown job', (t) => { const plan: ExecutionPlan = { options: {}, workflow: { - jobs: [job('next', { z: true })], + steps: [job('next', { z: true })], }, }; @@ -121,7 +121,7 @@ test('throws for a an unknown job with shorthand syntax', (t) => { const plan: ExecutionPlan = { options: {}, workflow: { - jobs: [ + steps: [ { next: 'z', expression: '.', @@ -140,7 +140,7 @@ test('throws for invalid string start', (t) => { start: 'z', }, workflow: { - jobs: [job('a')], + steps: [job('a')], }, }; From a33362d0288b155f60c215ef449281a39ae555e0 Mon Sep 17 00:00:00 2001 From: Joe Clark Date: Fri, 2 Feb 2024 18:17:04 +0000 Subject: [PATCH 043/128] cli: start refactoring towrads new runtime API Done a big chunk of execute but still a way to go --- packages/cli/src/commands.ts | 16 +- packages/cli/src/compile/handler.ts | 17 +- packages/cli/src/docs/handler.ts | 2 +- packages/cli/src/execute/execute.ts | 49 ++--- .../src/execute/get-autoinstall-targets.ts | 29 +-- packages/cli/src/execute/handler.ts | 23 +- packages/cli/src/options.ts | 38 ++-- packages/cli/src/types.ts | 6 + packages/cli/src/util/expand-adaptors.ts | 26 ++- packages/cli/src/util/index.d.ts | 1 - packages/cli/src/util/index.ts | 6 + packages/cli/src/util/load-input.ts | 7 +- packages/cli/src/util/load-plan.ts | 170 +++++++++++++++ .../cli/src/util/map-adaptors-to-monorepo.ts | 38 ++-- .../execute/get-autoinstall-targets.test.ts | 198 ++++++------------ .../cli/test/execute/parse-adaptors.test.ts | 127 +++++------ .../cli/test/util/expand-adaptors.test.ts | 103 ++++----- packages/cli/test/util/load-input.test.ts | 15 +- packages/cli/test/util/load-plan.test.ts | 172 +++++++++++++++ .../util/map-adaptors-to-monorepo.test.ts | 26 +-- 20 files changed, 678 insertions(+), 391 deletions(-) delete mode 100644 packages/cli/src/util/index.d.ts create mode 100644 packages/cli/src/util/index.ts create mode 100644 packages/cli/src/util/load-plan.ts create mode 100644 packages/cli/test/util/load-plan.test.ts diff --git a/packages/cli/src/commands.ts b/packages/cli/src/commands.ts index 328f0ce05..e43d743f2 100644 --- a/packages/cli/src/commands.ts +++ b/packages/cli/src/commands.ts @@ -12,6 +12,7 @@ import { clean, install, pwd, list } from './repo/handler'; import createLogger, { CLI, Logger } from './util/logger'; import mapAdaptorsToMonorepo, { MapAdaptorsToMonorepoOptions, + validateMonoRepo, } from './util/map-adaptors-to-monorepo'; import printVersions from './util/print-versions'; @@ -56,16 +57,23 @@ const parse = async (options: Opts, log?: Logger) => { await printVersions(logger, options); } - if (options.monorepoPath) { - if (options.monorepoPath === 'ERR') { + const { monorepoPath } = options; + if (monorepoPath) { + // TODO how does this occur? + if (monorepoPath === 'ERR') { logger.error( 'ERROR: --use-adaptors-monorepo was passed, but OPENFN_ADAPTORS_REPO env var is undefined' ); logger.error('Set OPENFN_ADAPTORS_REPO to a path pointing to the repo'); process.exit(9); // invalid argument } - await mapAdaptorsToMonorepo( - options as MapAdaptorsToMonorepoOptions, + + await validateMonoRepo(monorepoPath, logger); + logger.success(`Loading adaptors from monorepo at ${monorepoPath}`); + + options.adaptors = await mapAdaptorsToMonorepo( + monorepoPath, + options.adaptors, logger ); } diff --git a/packages/cli/src/compile/handler.ts b/packages/cli/src/compile/handler.ts index 2435ccd19..3e78eaa11 100644 --- a/packages/cli/src/compile/handler.ts +++ b/packages/cli/src/compile/handler.ts @@ -12,16 +12,17 @@ import mapAdaptorsToMonorepo, { const compileHandler = async (options: CompileOptions, logger: Logger) => { assertPath(options.path); + // TODO use loadPlan await loadInput(options, logger); - if (options.workflow) { - // expand shorthand adaptors in the workflow jobs - expandAdaptors(options); - await mapAdaptorsToMonorepo( - options as MapAdaptorsToMonorepoOptions, - logger - ); - } + // if (options.workflow) { + // // expand shorthand adaptors in the workflow jobs + // expandAdaptors(options); + // await mapAdaptorsToMonorepo( + // options as MapAdaptorsToMonorepoOptions, + // logger + // ); + // } let result = await compile(options, logger); if (options.workflow) { diff --git a/packages/cli/src/docs/handler.ts b/packages/cli/src/docs/handler.ts index 5be8d0792..fe304aabc 100644 --- a/packages/cli/src/docs/handler.ts +++ b/packages/cli/src/docs/handler.ts @@ -60,7 +60,7 @@ const docsHandler = async ( // does the adaptor have a version? If not, fetch the latest // (docgen won't do this for us) - const { adaptors } = expandAdaptors({ adaptors: [adaptor] }); + const { adaptors } = expandAdaptors([adaptor]); const [adaptorName] = adaptors!; let { name, version } = getNameAndVersion(adaptorName); if (!version) { diff --git a/packages/cli/src/execute/execute.ts b/packages/cli/src/execute/execute.ts index 487924ca5..64b8f0520 100644 --- a/packages/cli/src/execute/execute.ts +++ b/packages/cli/src/execute/execute.ts @@ -1,5 +1,7 @@ import run, { getNameAndVersion } from '@openfn/runtime'; -import type { ModuleInfo, ModuleInfoMap, ExecutionPlan } from '@openfn/runtime'; +import type { ExecutionPlan, Job } from '@openfn/lexicon'; +import type { ModuleInfo, ModuleInfoMap } from '@openfn/runtime'; + import createLogger, { RUNTIME, JOB } from '../util/logger'; import { ExecuteOptions } from './command'; @@ -8,21 +10,21 @@ type ExtendedModuleInfo = ModuleInfo & { }; export default async ( - input: string | ExecutionPlan, - state: any, - opts: Omit + plan: ExecutionPlan, + input: any, + opts: ExecuteOptions ): Promise => { try { - const result = await run(input, state, { + const result = await run(plan, input, { strict: opts.strict, - start: opts.start, - timeout: opts.timeout, + // start: opts.start, + // timeout: opts.timeout, immutableState: opts.immutable, logger: createLogger(RUNTIME, opts), jobLogger: createLogger(JOB, opts), linker: { repo: opts.repoDir, - modules: parseAdaptors(opts), + modules: parseAdaptors(plan), }, }); return result; @@ -34,9 +36,7 @@ export default async ( }; // TODO we should throw if the adaptor strings are invalid for any reason -export function parseAdaptors( - opts: Partial> -) { +export function parseAdaptors(plan: ExecutionPlan) { const extractInfo = (specifier: string) => { const [module, path] = specifier.split('='); const { name, version } = getNameAndVersion(module); @@ -54,24 +54,15 @@ export function parseAdaptors( const adaptors: ModuleInfoMap = {}; - if (opts.adaptors) { - opts.adaptors.reduce((obj, exp) => { - const { name, ...maybeVersionAndPath } = extractInfo(exp); - obj[name] = { ...maybeVersionAndPath }; - return obj; - }, adaptors); - } - - if (opts.workflow) { - // TODO what if there are different versions of the same adaptor? - // This structure can't handle it - we'd need to build it for every job - Object.values(opts.workflow.jobs).forEach((job) => { - if (job.adaptor) { - const { name, ...maybeVersionAndPath } = extractInfo(job.adaptor); - adaptors[name] = { ...maybeVersionAndPath }; - } - }); - } + // TODO what if there are different versions of the same adaptor? + // This structure can't handle it - we'd need to build it for every job + Object.values(plan.workflow.steps).forEach((step) => { + const job = step as Job; + if (job.adaptor) { + const { name, ...maybeVersionAndPath } = extractInfo(job.adaptor); + adaptors[name] = maybeVersionAndPath; + } + }); return adaptors; } diff --git a/packages/cli/src/execute/get-autoinstall-targets.ts b/packages/cli/src/execute/get-autoinstall-targets.ts index eead48820..79323a216 100644 --- a/packages/cli/src/execute/get-autoinstall-targets.ts +++ b/packages/cli/src/execute/get-autoinstall-targets.ts @@ -1,23 +1,14 @@ -import type { ExecuteOptions } from './command'; +import { ExecutionPlan, Job } from '@openfn/lexicon'; -const getAutoinstallTargets = ( - options: Partial< - Pick - > -) => { - if (options.workflow) { - const adaptors = {} as Record; - Object.values(options.workflow.jobs).forEach((job) => { - if (job.adaptor) { - adaptors[job.adaptor] = true; - } - }); - return Object.keys(adaptors); - } - if (options.adaptors) { - return options.adaptors?.filter((a) => !/=/.test(a)); - } - return []; +const getAutoinstallTargets = (plan: ExecutionPlan) => { + const adaptors = {} as Record; + Object.values(plan.workflow.steps).forEach((step) => { + const job = step as Job; + if (job.adaptor) { + adaptors[job.adaptor] = true; + } + }); + return Object.keys(adaptors); }; export default getAutoinstallTargets; diff --git a/packages/cli/src/execute/handler.ts b/packages/cli/src/execute/handler.ts index aefb6894e..8b83af791 100644 --- a/packages/cli/src/execute/handler.ts +++ b/packages/cli/src/execute/handler.ts @@ -10,11 +10,7 @@ import { CompileOptions } from '../compile/command'; import { Logger, printDuration } from '../util/logger'; import loadState from '../util/load-state'; import validateAdaptors from '../util/validate-adaptors'; -import loadInput from '../util/load-input'; -import expandAdaptors from '../util/expand-adaptors'; -import mapAdaptorsToMonorepo, { - MapAdaptorsToMonorepoOptions, -} from '../util/map-adaptors-to-monorepo'; +import loadPlan from '../util/load-plan'; import assertPath from '../util/assert-path'; const executeHandler = async (options: ExecuteOptions, logger: Logger) => { @@ -22,23 +18,14 @@ const executeHandler = async (options: ExecuteOptions, logger: Logger) => { assertPath(options.path); await validateAdaptors(options, logger); - let input = await loadInput(options, logger); - - if (options.workflow) { - // expand shorthand adaptors in the workflow jobs - expandAdaptors(options); - await mapAdaptorsToMonorepo( - options as MapAdaptorsToMonorepoOptions, - logger - ); - } + let plan = await loadPlan(options, logger); const { repoDir, monorepoPath, autoinstall } = options; if (autoinstall) { if (monorepoPath) { logger.warn('Skipping auto-install as monorepo is being used'); } else { - const autoInstallTargets = getAutoinstallTargets(options); + const autoInstallTargets = getAutoinstallTargets(plan); if (autoInstallTargets.length) { logger.info('Auto-installing language adaptors'); await install({ packages: autoInstallTargets, repoDir }, logger); @@ -49,13 +36,13 @@ const executeHandler = async (options: ExecuteOptions, logger: Logger) => { const state = await loadState(options, logger); if (options.compile) { - input = await compile(options as CompileOptions, logger); + plan = await compile(options as CompileOptions, logger); } else { logger.info('Skipping compilation as noCompile is set'); } try { - const result = await execute(input!, state, options); + const result = await execute(plan, state, options); await serializeOutput(options, result, logger); const duration = printDuration(new Date().getTime() - start); if (result?.errors) { diff --git a/packages/cli/src/options.ts b/packages/cli/src/options.ts index a29be4c7f..a155780b2 100644 --- a/packages/cli/src/options.ts +++ b/packages/cli/src/options.ts @@ -1,13 +1,15 @@ import path from 'node:path'; - import yargs from 'yargs'; -import type { ExecutionPlan } from '@openfn/runtime'; +import type { ExecutionPlan } from '@openfn/lexicon'; + import type { CommandList } from './commands'; -import { CLIExecutionPlan } from './types'; +import { OldCLIWorkflow } from './types'; import { DEFAULT_REPO_DIR } from './constants'; -import doExpandAdaptors from './util/expand-adaptors'; -import ensureLogOpts from './util/ensure-log-opts'; -import { LogLevel } from './util'; +import { + expandAdaptors as doExpandAdaptors, + ensureLogOpts, + LogLevel, +} from './util'; // Central type definition for the main options // This represents the types coming out of yargs, @@ -37,6 +39,8 @@ export type Opts = { outputPath?: string; outputStdout?: boolean; packages?: string[]; + plan?: ExecutionPlan; + planPath?: string; projectPath?: string; repoDir?: string; skipAdaptorValidation?: boolean; @@ -48,9 +52,11 @@ export type Opts = { sanitize: 'none' | 'remove' | 'summarize' | 'obfuscate'; timeout?: number; // ms useAdaptorsMonorepo?: boolean; - workflow?: CLIExecutionPlan | ExecutionPlan; - workflowPath?: string; + workflow?: OldCLIWorkflow; projectId?: string; + + // deprecated + workflowPath?: string; }; // Definition of what Yargs returns (before ensure is called) @@ -97,8 +103,10 @@ export const adaptors: CLIOption = { opts.adaptors = []; } + // TODO this might be redundant now as load-plan should handle it + // maybe commands other than execute need it if (opts.expandAdaptors) { - doExpandAdaptors(opts); + opts.adaptors = doExpandAdaptors(opts.adaptors) as string[]; } // delete the aliases as they have not been expanded @@ -218,14 +226,12 @@ export const projectId: CLIOption = { hidden: true, }, ensure: (opts) => { - const projectId = opts.projectId; - //check that this is a uuid - return projectId; - }, + const projectId = opts.projectId; + //check that this is a uuid + return projectId; + }, }; - - // Input path covers jobPath and workflowPath export const inputPath: CLIOption = { name: 'input-path', @@ -235,7 +241,7 @@ export const inputPath: CLIOption = { ensure: (opts) => { const { path: basePath } = opts; if (basePath?.endsWith('.json')) { - opts.workflowPath = basePath; + opts.planPath = basePath; } else if (basePath?.endsWith('.js')) { opts.jobPath = basePath; } else { diff --git a/packages/cli/src/types.ts b/packages/cli/src/types.ts index 3c6b781e8..ed27ef8bc 100644 --- a/packages/cli/src/types.ts +++ b/packages/cli/src/types.ts @@ -2,6 +2,12 @@ // Ie config can be a string export type JobNodeID = string; +export type OldCLIWorkflow = { + id?: string; // UUID for this plan + start?: JobNodeID; + jobs: CLIJobNode[]; +}; + export type CLIExecutionPlan = { id?: string; // UUID for this plan start?: JobNodeID; diff --git a/packages/cli/src/util/expand-adaptors.ts b/packages/cli/src/util/expand-adaptors.ts index d60f1a0ab..ba90d6d66 100644 --- a/packages/cli/src/util/expand-adaptors.ts +++ b/packages/cli/src/util/expand-adaptors.ts @@ -1,4 +1,4 @@ -import { Opts } from '../options'; +import { ExecutionPlan, Job } from '@openfn/lexicon'; const expand = (name: any) => { if (typeof name === 'string') { @@ -12,20 +12,18 @@ const expand = (name: any) => { return name; }; -export default (opts: Partial>) => { - const { adaptors, workflow } = opts; - - if (adaptors) { - opts.adaptors = adaptors?.map(expand); +export default (input: string[] | ExecutionPlan) => { + if (Array.isArray(input)) { + return input?.map(expand) as string[]; } - if (workflow) { - Object.values(workflow.jobs).forEach((job) => { - if (job.adaptor) { - job.adaptor = expand(job.adaptor); - } - }); - } + const plan = input as ExecutionPlan; + Object.values(plan.workflow.steps).forEach((step) => { + const job = step as Job; + if (job.adaptor) { + job.adaptor = expand(job.adaptor); + } + }); - return opts; + return plan; }; diff --git a/packages/cli/src/util/index.d.ts b/packages/cli/src/util/index.d.ts deleted file mode 100644 index 1ff09efd4..000000000 --- a/packages/cli/src/util/index.d.ts +++ /dev/null @@ -1 +0,0 @@ -export * from './logger'; diff --git a/packages/cli/src/util/index.ts b/packages/cli/src/util/index.ts new file mode 100644 index 000000000..640967359 --- /dev/null +++ b/packages/cli/src/util/index.ts @@ -0,0 +1,6 @@ +import expandAdaptors from './expand-adaptors'; +import ensureLogOpts from './ensure-log-opts'; + +export * from './logger'; + +export { expandAdaptors, ensureLogOpts }; diff --git a/packages/cli/src/util/load-input.ts b/packages/cli/src/util/load-input.ts index da1e58f1b..f57b73e49 100644 --- a/packages/cli/src/util/load-input.ts +++ b/packages/cli/src/util/load-input.ts @@ -1,11 +1,12 @@ import path from 'node:path'; import fs from 'node:fs/promises'; import { isPath } from '@openfn/compiler'; +import type { ExecutionPlan } from '@openfn/lexicon'; import type { Logger } from '@openfn/logger'; -import type { Opts } from '../options'; -import { CLIExecutionPlan } from '../types'; -import { ExecutionPlan } from '@openfn/runtime'; + import abort from './abort'; +import type { CLIExecutionPlan } from '../types'; +import type { Opts } from '../options'; type LoadWorkflowOpts = Required< Pick diff --git a/packages/cli/src/util/load-plan.ts b/packages/cli/src/util/load-plan.ts new file mode 100644 index 000000000..7e969fecd --- /dev/null +++ b/packages/cli/src/util/load-plan.ts @@ -0,0 +1,170 @@ +/* + * New entry point for loading up the input/execution plan + Note that htere's a lot of complexity from load input that I need to deal with here :( + */ +import fs from 'node:fs/promises'; +import path from 'node:path'; + +import abort from './abort'; + +import type { ExecutionPlan, Job, WorkflowOptions } from '@openfn/lexicon'; +import type { Opts } from '../options'; +import type { Logger } from './logger'; +import { OldCLIWorkflow } from '../types'; +import expandAdaptors from './expand-adaptors'; +import mapAdaptorsToMonorepo, { + MapAdaptorsToMonorepoOptions, +} from './map-adaptors-to-monorepo'; + +const loadPlan = async ( + options: Opts, + logger: Logger +): Promise => { + const { workflowPath, planPath, jobPath } = options; + + if (jobPath) { + return loadExpression(options, logger); + } + + const jsonPath = planPath || workflowPath; + // TODO if neither jobPath, planPath or workflowPath is set... what happens? + // I think the CLI will exit before we even get here + const json = await loadJson(jsonPath!, logger); + + if (json.workflow) { + return loadXPlan(json, options, logger); + } else { + return loadOldWorkflow(json, options, logger); + } +}; +export default loadPlan; + +// TODO this is way over simplified :( +// see load-input +const loadJson = async (workflowPath: string, logger: Logger): Promise => { + let text: string; + + try { + text = await fs.readFile(workflowPath, 'utf8'); + } catch (e) { + return abort( + logger, + 'Workflow not found', + undefined, + `Failed to load a workflow from ${workflowPath}` + ); + } + + let json: object; + try { + json = JSON.parse(text); + } catch (e: any) { + return abort( + logger, + 'Invalid JSON in workflow', + e, + `Check the syntax of the JSON at ${workflowPath}` + ); + } + + return json; +}; + +const maybeAssign = (a: any, b: any, keys: Array) => { + keys.forEach((key) => { + if (a.hasOwnProperty(key)) { + b[key] = a[key]; + } + }); +}; + +const loadExpression = async ( + options: Opts, + logger: Logger +): Promise => { + const jobPath = options.jobPath!; + + logger.debug(`Loading job from ${jobPath}`); + const expression = await fs.readFile(jobPath, 'utf8'); + const name = path.parse(jobPath).name; + + const step: Job = { expression }; + + // The adaptor should have been expanded nicely already, so we don't need todo much here + if (options.adaptors) { + const [adaptor] = options.adaptors; + if (adaptor) { + step.adaptor = adaptor; + } + } + + const wfOptions: WorkflowOptions = {}; + // TODO support state props to remove? + maybeAssign(options, wfOptions, ['timeout']); + + const plan: ExecutionPlan = { + workflow: { + name, + steps: [step], + }, + options: wfOptions, + }; + // call loadXPlan now so that any options can be written + return loadXPlan(plan, options, logger); +}; + +const loadOldWorkflow = async ( + workflow: OldCLIWorkflow, + options: Opts, + logger: Logger +) => { + const plan: ExecutionPlan = { + workflow: { + steps: workflow.jobs, + }, + options: { + start: workflow.start, + }, + }; + + if (workflow.id) { + plan.id = workflow.id; + } + + try { + const name = path.parse(options.workflowPath!).name; + if (name) { + plan.workflow.name = name; + } + } catch (e) { + // do nothing + } + + // call loadXPlan now so that any options can be written + const final = await loadXPlan(plan, options, logger); + + // TODO this can be nicer + logger.warn('converted old workflow into execution plan'); + logger.warn(final); + + return final; +}; + +// TODO default the workflow name from the file name +const loadXPlan = async ( + plan: ExecutionPlan, + options: Opts, + logger: Logger +) => { + if (!plan.options) { + plan.options = {}; + } + + // expand shorthand adaptors in the workflow jobs + expandAdaptors(plan); + await mapAdaptorsToMonorepo(options.monorepoPath, plan, logger); + + // TODO: write any options from the user onto the potions object + + return plan; +}; diff --git a/packages/cli/src/util/map-adaptors-to-monorepo.ts b/packages/cli/src/util/map-adaptors-to-monorepo.ts index e4e33fce2..bf53d21c5 100644 --- a/packages/cli/src/util/map-adaptors-to-monorepo.ts +++ b/packages/cli/src/util/map-adaptors-to-monorepo.ts @@ -3,6 +3,8 @@ import path from 'node:path'; import assert from 'node:assert'; import { Logger } from '@openfn/logger'; import { getNameAndVersion } from '@openfn/runtime'; +import type { ExecutionPlan, Job } from '@openfn/lexicon'; + import type { Opts } from '../options'; export const validateMonoRepo = async (repoPath: string, log: Logger) => { @@ -32,6 +34,8 @@ export const updatePath = (adaptor: string, repoPath: string, log: Logger) => { } const shortName = name.replace('@openfn/language-', ''); const abspath = path.resolve(repoPath, 'packages', shortName); + + log.info(`Mapped adaptor ${name} to monorepo: ${abspath}`); return `${name}=${abspath}`; }; @@ -40,31 +44,27 @@ export type MapAdaptorsToMonorepoOptions = Pick< 'monorepoPath' | 'adaptors' | 'workflow' >; -// This will mutate options (adaptors, workflow) to support the monorepo const mapAdaptorsToMonorepo = async ( - options: MapAdaptorsToMonorepoOptions, + monorepoPath: string = '', + input: string[] | ExecutionPlan, log: Logger ) => { - const { adaptors, monorepoPath, workflow } = options; if (monorepoPath) { - await validateMonoRepo(monorepoPath, log); - log.success(`Loading adaptors from monorepo at ${monorepoPath}`); - if (adaptors) { - options.adaptors = adaptors.map((a) => { - const p = updatePath(a, monorepoPath, log); - log.info(`Mapped adaptor ${a} to monorepo: ${p.split('=')[1]}`); - return p; - }); - } - if (workflow) { - Object.values(workflow.jobs).forEach((job) => { - if (job.adaptor) { - job.adaptor = updatePath(job.adaptor, monorepoPath, log); - } - }); + if (Array.isArray(input)) { + const adaptors = input as string[]; + return adaptors.map((a) => updatePath(a, monorepoPath, log)); } + + const plan = input as ExecutionPlan; + Object.values(plan.workflow.steps).forEach((step) => { + const job = step as Job; + if (job.adaptor) { + job.adaptor = updatePath(job.adaptor, monorepoPath, log); + } + }); + + return plan; } - return options; }; export default mapAdaptorsToMonorepo; diff --git a/packages/cli/test/execute/get-autoinstall-targets.test.ts b/packages/cli/test/execute/get-autoinstall-targets.test.ts index 9dc275a6d..e5a4adaea 100644 --- a/packages/cli/test/execute/get-autoinstall-targets.test.ts +++ b/packages/cli/test/execute/get-autoinstall-targets.test.ts @@ -1,162 +1,82 @@ import test from 'ava'; import getAutoinstallTargets from '../../src/execute/get-autoinstall-targets'; +import { ExecutionPlan, Job } from '@openfn/lexicon'; -test('return empty if an empty array is passed', (t) => { - const result = getAutoinstallTargets({ - adaptors: [], - }); - t.truthy(result); - t.is(result.length, 0); -}); - -test('return 2 valid targets', (t) => { - const result = getAutoinstallTargets({ - adaptors: ['a', 'b'], - }); - t.truthy(result); - t.is(result.length, 2); - t.deepEqual(result, ['a', 'b']); -}); - -test('return empty if a path is passed', (t) => { - const result = getAutoinstallTargets({ - adaptors: ['a=a/b/c'], - }); - t.truthy(result); - t.is(result.length, 0); -}); - -test('return 1 valid target', (t) => { - const result = getAutoinstallTargets({ - adaptors: ['a=/some/path', 'b@1.2.3'], - }); - t.truthy(result); - t.is(result.length, 1); - t.deepEqual(result, ['b@1.2.3']); -}); - -test('return language common', (t) => { - const result = getAutoinstallTargets({ - adaptors: ['@openfn/language-common'], - }); - t.truthy(result); - t.is(result.length, 1); - t.deepEqual(result, ['@openfn/language-common']); -}); - -test('return language common with specifier', (t) => { - const result = getAutoinstallTargets({ - adaptors: ['@openfn/language-common@1.0.0'], - }); - t.truthy(result); - t.is(result.length, 1); - t.deepEqual(result, ['@openfn/language-common@1.0.0']); -}); - -test('reject language common with path', (t) => { - const result = getAutoinstallTargets({ - adaptors: ['@openfn/language-common=/a/b/c'], - }); - t.truthy(result); - t.is(result.length, 0); -}); - -test('reject language common with specifier and path', (t) => { - const result = getAutoinstallTargets({ - adaptors: ['@openfn/language-common@1.0.0=/tmp/repo/common'], - }); - t.truthy(result); - t.is(result.length, 0); -}); - -test('empty workflow', (t) => { - const result = getAutoinstallTargets({ +const getPlan = (steps: Job[]) => + ({ workflow: { - start: 'a', - jobs: {}, + steps, }, - }); + options: {}, + } as ExecutionPlan); + +test('empty plan', (t) => { + const plan = getPlan([]); + const result = getAutoinstallTargets(plan); t.truthy(result); t.is(result.length, 0); }); -test('workflow with zero adaptors', (t) => { - const result = getAutoinstallTargets({ - workflow: { - start: 'a', - jobs: { - a: { - expression: 'fn()', - }, - }, +test('plan with zero adaptors', (t) => { + const plan = getPlan([ + { + expression: 'fn()', }, - }); + ]); + const result = getAutoinstallTargets(plan); t.truthy(result); t.is(result.length, 0); }); -test('workflow with multiple adaptors', (t) => { - const result = getAutoinstallTargets({ - workflow: { - start: 'a', - jobs: { - a: { - adaptor: '@openfn/language-common', - expression: 'fn()', - }, - b: { - adaptor: '@openfn/language-http', - expression: 'fn()', - }, - }, +test('plan with multiple adaptors', (t) => { + const plan = getPlan([ + { + adaptor: '@openfn/language-common', + expression: 'fn()', + }, + { + adaptor: '@openfn/language-http', + expression: 'fn()', }, - }); + ]); + const result = getAutoinstallTargets(plan); t.is(result.length, 2); t.deepEqual(result, ['@openfn/language-common', '@openfn/language-http']); }); -test('workflow with duplicate adaptors', (t) => { - const result = getAutoinstallTargets({ - workflow: { - start: 'a', - jobs: { - a: { - adaptor: '@openfn/language-common', - expression: 'fn()', - }, - b: { - adaptor: '@openfn/language-common', - expression: 'fn()', - }, - }, +test('plan with duplicate adaptors', (t) => { + const plan = getPlan([ + { + adaptor: '@openfn/language-common', + expression: 'fn()', + }, + { + adaptor: '@openfn/language-common', + expression: 'fn()', }, - }); + ]); + const result = getAutoinstallTargets(plan); t.is(result.length, 1); t.deepEqual(result, ['@openfn/language-common']); }); -test('workflow with one adaptor but different versions', (t) => { - const result = getAutoinstallTargets({ - adaptors: [], - workflow: { - start: 'a', - jobs: { - a: { - adaptor: '@openfn/language-common@1.0.0', - expression: 'fn()', - }, - b: { - adaptor: '@openfn/language-common@2.0.0', - expression: 'fn()', - }, - c: { - adaptor: '@openfn/language-common@3.0.0', - expression: 'fn()', - }, - }, +test('plan with one adaptor but different versions', (t) => { + const plan = getPlan([ + { + adaptor: '@openfn/language-common@1.0.0', + expression: 'fn()', }, - }); + { + adaptor: '@openfn/language-common@2.0.0', + expression: 'fn()', + }, + { + adaptor: '@openfn/language-common@3.0.0', + expression: 'fn()', + }, + ]); + const result = getAutoinstallTargets(plan); t.is(result.length, 3); t.deepEqual(result, [ '@openfn/language-common@1.0.0', @@ -164,3 +84,15 @@ test('workflow with one adaptor but different versions', (t) => { '@openfn/language-common@3.0.0', ]); }); + +test('do not return adaptors with a path', (t) => { + const plan = getPlan([ + { + expression: 'fn()', + adaptor: 'commoin=a/b/c', + }, + ]); + const result = getAutoinstallTargets(plan); + t.truthy(result); + t.is(result.length, 0); +}); diff --git a/packages/cli/test/execute/parse-adaptors.test.ts b/packages/cli/test/execute/parse-adaptors.test.ts index 46f2444dc..73a01715a 100644 --- a/packages/cli/test/execute/parse-adaptors.test.ts +++ b/packages/cli/test/execute/parse-adaptors.test.ts @@ -2,74 +2,83 @@ import test from 'ava'; import { parseAdaptors } from '../../src/execute/execute'; -test('parse a simple specifier', (t) => { - const adaptors = ['a']; - const result = parseAdaptors({ adaptors }); - t.assert(Object.keys(result).length === 1); - t.truthy(result.a); - t.falsy(Object.keys(result.a).length); -}); +// This is all useless now because we parse on an execution plan +// test('parse a simple specifier', (t) => { +// const adaptors = ['a']; +// const result = parseAdaptors({ adaptors }); +// t.assert(Object.keys(result).length === 1); +// t.truthy(result.a); +// t.falsy(Object.keys(result.a).length); +// }); -test('parse multiple specifiers', (t) => { - const adaptors = ['a', 'b']; - const result = parseAdaptors({ adaptors }); - t.assert(Object.keys(result).length === 2); - t.truthy(result.a); - t.truthy(result.b); -}); +// test('parse multiple specifiers', (t) => { +// const adaptors = ['a', 'b']; +// const result = parseAdaptors({ adaptors }); +// t.assert(Object.keys(result).length === 2); +// t.truthy(result.a); +// t.truthy(result.b); +// }); -test('parse a specifier with a path', (t) => { - const adaptors = ['a=x']; - const result = parseAdaptors({ adaptors }); - t.assert(Object.keys(result).length === 1); - t.deepEqual(result.a, { path: 'x' }); -}); +// test('parse a specifier with a path', (t) => { +// const adaptors = ['a=x']; +// const result = parseAdaptors({ adaptors }); +// t.assert(Object.keys(result).length === 1); +// t.deepEqual(result.a, { path: 'x' }); +// }); -test('parse a specifier with a version', (t) => { - const adaptors = ['a@1']; - const result = parseAdaptors({ adaptors }); - t.assert(Object.keys(result).length === 1); - t.deepEqual(result.a, { version: '1' }); -}); +// test('parse a specifier with a version', (t) => { +// const adaptors = ['a@1']; +// const result = parseAdaptors({ adaptors }); +// t.assert(Object.keys(result).length === 1); +// t.deepEqual(result.a, { version: '1' }); +// }); -test('parse a specifier with a path and version', (t) => { - const adaptors = ['a@1=x']; - const result = parseAdaptors({ adaptors }); - t.assert(Object.keys(result).length === 1); - t.deepEqual(result.a, { path: 'x', version: '1' }); -}); +// test('parse a specifier with a path and version', (t) => { +// const adaptors = ['a@1=x']; +// const result = parseAdaptors({ adaptors }); +// t.assert(Object.keys(result).length === 1); +// t.deepEqual(result.a, { path: 'x', version: '1' }); +// }); -test('parse @openfn/language-common@1.0.0=~/repo/modules/common', (t) => { - const adaptors = ['@openfn/language-common@1.0.0=~/repo/modules/common']; - const result = parseAdaptors({ adaptors }); - t.assert(Object.keys(result).length === 1); - t.deepEqual(result, { - '@openfn/language-common': { - path: '~/repo/modules/common', - version: '1.0.0', - }, - }); -}); +// test('parse @openfn/language-common@1.0.0=~/repo/modules/common', (t) => { +// const adaptors = ['@openfn/language-common@1.0.0=~/repo/modules/common']; +// const result = parseAdaptors({ adaptors }); +// t.assert(Object.keys(result).length === 1); +// t.deepEqual(result, { +// '@openfn/language-common': { +// path: '~/repo/modules/common', +// version: '1.0.0', +// }, +// }); +// }); -test('parse workflow', (t) => { - const workflow = { - start: 'a', - jobs: { - a: { - adaptor: '@openfn/language-common', - expression: 'fn()', - }, - b: { - adaptor: '@openfn/language-http@1.0.0', - expression: 'fn()', - }, - c: { - adaptor: '@openfn/language-salesforce=a/b/c', - expression: 'fn()', +test('parse plan', (t) => { + const plan = { + options: { + start: 'a', + }, + workflow: { + // TODO oh no the workflow structure accepted by the CLI isa bit different! + // its an indexed object, rather than an array + // no its not. it comes in as an array. + // what is this structure? + steps: [ + a: { + adaptor: '@openfn/language-common', + expression: 'fn()', + }, + b: { + adaptor: '@openfn/language-http@1.0.0', + expression: 'fn()', + }, + c: { + adaptor: '@openfn/language-salesforce=a/b/c', + expression: 'fn()', + }, }, }, }; - const result = parseAdaptors({ workflow }); + const result = parseAdaptors(plan); t.assert(Object.keys(result).length === 3); t.deepEqual(result, { '@openfn/language-common': {}, diff --git a/packages/cli/test/util/expand-adaptors.test.ts b/packages/cli/test/util/expand-adaptors.test.ts index fa0c19da7..23f1a006d 100644 --- a/packages/cli/test/util/expand-adaptors.test.ts +++ b/packages/cli/test/util/expand-adaptors.test.ts @@ -2,86 +2,91 @@ import test from 'ava'; import expandAdaptors from '../../src/util/expand-adaptors'; test('expands common', (t) => { - const { adaptors } = expandAdaptors({ adaptors: ['common'] }); - t.is(adaptors![0], '@openfn/language-common'); + const adaptors = expandAdaptors(['common']) as string[]; + t.is(adaptors[0], '@openfn/language-common'); }); test('expands common with version', (t) => { - const { adaptors } = expandAdaptors({ adaptors: ['common@1.0.0'] }); - t.is(adaptors![0], '@openfn/language-common@1.0.0'); + const adaptors = expandAdaptors(['common@1.0.0']) as string[]; + t.is(adaptors[0], '@openfn/language-common@1.0.0'); }); test('expands common with path', (t) => { - const { adaptors } = expandAdaptors({ adaptors: ['common=a/b/c'] }); - t.is(adaptors![0], '@openfn/language-common=a/b/c'); + const adaptors = expandAdaptors(['common=a/b/c']) as string[]; + t.is(adaptors[0], '@openfn/language-common=a/b/c'); }); test('expands http and dhis2', (t) => { - const { adaptors } = expandAdaptors({ adaptors: ['common', 'dhis2'] }); - const [a, b] = adaptors!; + const adaptors = expandAdaptors(['common', 'dhis2']) as string[]; + const [a, b] = adaptors; t.is(a, '@openfn/language-common'); t.is(b, '@openfn/language-dhis2'); }); test('expands nonsense', (t) => { - const { adaptors } = expandAdaptors({ adaptors: ['gn@25~A8fa1'] }); - t.is(adaptors![0], '@openfn/language-gn@25~A8fa1'); + const adaptors = expandAdaptors(['gn@25~A8fa1']) as string[]; + t.is(adaptors[0], '@openfn/language-gn@25~A8fa1'); }); test('does not expand a full adaptor name', (t) => { - const { adaptors } = expandAdaptors({ - adaptors: ['@openfn/language-common'], - }); - t.is(adaptors![0], '@openfn/language-common'); + const adaptors = expandAdaptors(['@openfn/language-common']) as string[]; + t.is(adaptors[0], '@openfn/language-common'); }); test('does not expand a full adaptor name with a path', (t) => { - const { adaptors } = expandAdaptors({ - adaptors: ['@openfn/language-common=a/b/c'], - }); - t.is(adaptors![0], '@openfn/language-common=a/b/c'); + const adaptors = expandAdaptors([ + '@openfn/language-common=a/b/c', + ]) as string[]; + t.is(adaptors[0], '@openfn/language-common=a/b/c'); }); test('does not expand a simple path', (t) => { - const { adaptors } = expandAdaptors({ adaptors: ['a/b'] }); - t.is(adaptors![0], 'a/b'); + const adaptors = expandAdaptors(['a/b']) as string[]; + t.is(adaptors[0], 'a/b'); }); test('does not expand an absolute path', (t) => { - const { adaptors } = expandAdaptors({ adaptors: ['/a/b/c'] }); - t.is(adaptors![0], '/a/b/c'); + const adaptors = expandAdaptors(['/a/b/c']) as string[]; + t.is(adaptors[0], '/a/b/c'); }); test('does not expand a js file', (t) => { - const { adaptors } = expandAdaptors({ adaptors: ['my-adaptor.js'] }); - t.is(adaptors![0], 'my-adaptor.js'); + const adaptors = expandAdaptors(['my-adaptor.js']) as string[]; + t.is(adaptors[0], 'my-adaptor.js'); }); -test('expands adaptors in a workflow', (t) => { - const workflow = { - start: 'a', - jobs: { - a: { - adaptor: 'common', - expression: 'fn()', - }, - b: { - adaptor: 'http@1.0.0', - expression: 'fn()', - }, - c: { - adaptor: 'salesforce=a/b/c', - expression: 'fn()', - }, - d: { - adaptor: 'a/b/c/my-adaptor.js', - expression: 'fn()', - }, +test('expands adaptors in an execution plan', (t) => { + const plan = { + workflow: { + steps: [ + { + id: 'a', + adaptor: 'common', + expression: 'fn()', + }, + { + id: 'b', + adaptor: 'http@1.0.0', + expression: 'fn()', + }, + { + id: 'c', + adaptor: 'salesforce=a/b/c', + expression: 'fn()', + }, + { + id: 'd', + adaptor: 'a/b/c/my-adaptor.js', + expression: 'fn()', + }, + ], }, + options: {}, }; - const newOpts = expandAdaptors({ workflow }); - t.is(newOpts.workflow!.jobs.a.adaptor, '@openfn/language-common'); - t.is(newOpts.workflow!.jobs.b.adaptor, '@openfn/language-http@1.0.0'); - t.is(newOpts.workflow!.jobs.c.adaptor, '@openfn/language-salesforce=a/b/c'); - t.is(newOpts.workflow!.jobs.d.adaptor, 'a/b/c/my-adaptor.js'); + expandAdaptors(plan); + const [a, b, c, d] = plan.workflow.steps; + t.is(a.adaptor, '@openfn/language-common'); + t.is(b.adaptor, '@openfn/language-http@1.0.0'); + t.is(c.adaptor, '@openfn/language-salesforce=a/b/c'); + t.is(d.adaptor, 'a/b/c/my-adaptor.js'); }); diff --git a/packages/cli/test/util/load-input.test.ts b/packages/cli/test/util/load-input.test.ts index 4ee819802..cd20ec1f5 100644 --- a/packages/cli/test/util/load-input.test.ts +++ b/packages/cli/test/util/load-input.test.ts @@ -1,18 +1,27 @@ import test from 'ava'; import mock from 'mock-fs'; import { createMockLogger } from '@openfn/logger'; +import type { ExecutionPlan } from '@openfn/lexicon'; + import loadInput from '../../src/util/load-input'; -import { ExecutionPlan } from '@openfn/runtime'; const logger = createMockLogger(undefined, { level: 'debug' }); +// TODO add support for handling old versions here test.beforeEach(() => { mock({ 'test/job.js': 'x', - 'test/wf.json': JSON.stringify({ + 'test/wf-old.json': JSON.stringify({ start: 'a', jobs: [{ id: 'a', expression: 'x()' }], }), + 'test/wf.json': JSON.stringify({ + options: { start: 'a' }, + workflow: { + // TODO rename steps + jobs: [{ id: 'a', expression: 'x()' }], + }, + }), 'test/wf-err.json': '!!!', }); }); @@ -32,7 +41,7 @@ test.serial('do nothing if no path provided', async (t) => { test.serial('return the workflow if already set ', async (t) => { const opts = { - workflow: { start: 'x', jobs: [] }, + workflow: { options: { start: 'x' }, jobs: [] }, job: 'j', jobPath: 'test/job.js', }; diff --git a/packages/cli/test/util/load-plan.test.ts b/packages/cli/test/util/load-plan.test.ts new file mode 100644 index 000000000..773893725 --- /dev/null +++ b/packages/cli/test/util/load-plan.test.ts @@ -0,0 +1,172 @@ +import test from 'ava'; +import mock from 'mock-fs'; +import { createMockLogger } from '@openfn/logger'; +import type { ExecutionPlan, Job } from '@openfn/lexicon'; + +import loadPlan from '../../src/util/load-plan'; +import { Opts } from '../../src/options'; + +const logger = createMockLogger(undefined, { level: 'debug' }); + +const sampleXPlan = { + options: { start: 'a' }, + workflow: { + steps: [{ id: 'a', expression: 'x()' }], + }, +}; + +const createPlan = (steps: Job[] = []) => ({ + workflow: { + steps, + }, + options: { + start: steps[0]?.id ?? 'a', + }, +}); + +test.beforeEach(() => { + mock({ + 'test/job.js': 'x', + 'test/wf-old.json': JSON.stringify({ + start: 'a', + jobs: [{ id: 'a', expression: 'x()' }], + }), + 'test/wf.json': JSON.stringify(sampleXPlan), + 'test/wf-err.json': '!!!', + }); +}); + +test.afterEach(() => { + logger._reset(); + mock.restore(); +}); + +test.serial('expression: load a plan from an expression.js', async (t) => { + const opts = { + jobPath: 'test/job.js', + plan: {}, + }; + + const plan = await loadPlan(opts as Opts, logger); + + t.truthy(plan); + t.deepEqual(plan.options, {}); + t.is(plan.workflow.steps.length, 1); + t.is(plan.workflow.name, 'job'); + t.deepEqual(plan.workflow.steps[0], { + expression: 'x', + }); +}); + +test.serial('expression: set an adaptor on the plan', async (t) => { + const opts = { + jobPath: 'test/job.js', + // Note that adaptor expansion should have happened before loadPlan is called + adaptors: ['@openfn/language-common'], + } as Partial; + + const plan = await loadPlan(opts as Opts, logger); + + const step = plan.workflow.steps[0] as Job; + + t.is(step.adaptor, '@openfn/language-common'); +}); + +test.serial('expression: set a timeout on the plan', async (t) => { + const opts = { + jobPath: 'test/job.js', + timeout: 111, + } as Partial; + + const plan = await loadPlan(opts as Opts, logger); + + t.is(plan.options.timeout, 111); +}); + +test.todo('expression: load a plan from an expression.js and add options'); + +test.serial('xplan: load a plan from workflow path', async (t) => { + const opts = { + workflowPath: 'test/wf.json', + plan: {}, + }; + + const plan = await loadPlan(opts as Opts, logger); + + t.truthy(plan); + t.deepEqual(plan, sampleXPlan); +}); + +test.serial('xplan: expand adaptors', async (t) => { + const opts = { + workflowPath: 'test/wf.json', + plan: {}, + }; + + const plan = createPlan([ + { + id: 'a', + expression: '.', + adaptor: 'common@1.0.0', + }, + ]); + + mock({ + 'test/wf.json': JSON.stringify(plan), + }); + + const plan = await loadPlan(opts as Opts, logger); + t.truthy(plan); + + const step = plan.workflow.steps[0] as Job; + t.is(step.adaptor, '@openfn/language-common@1.0.0'); +}); + +test.serial('xplan: map to monorepo', async (t) => { + const opts = { + workflowPath: 'test/wf.json', + plan: {}, + monorepoPath: '/repo/', + } as Partial; + + const plan = createPlan([ + { + id: 'a', + expression: '.', + adaptor: 'common', + }, + ]); + + mock({ + 'test/wf.json': JSON.stringify(plan), + }); + + const plan = await loadPlan(opts as Opts, logger); + t.truthy(plan); + + const step = plan.workflow.steps[0] as Job; + t.is(step.adaptor, '@openfn/language-common=/repo/packages/common'); +}); + +test.todo('xplan: load a plan from a workflow path and add options'); + +test.serial('old-workflow: load a plan from workflow path', async (t) => { + const opts = { + workflowPath: 'test/wf-old.json', + plan: {}, + }; + + const plan = await loadPlan(opts as Opts, logger); + + t.deepEqual(plan.options, { + start: 'a', + }); + t.is(plan.workflow.steps.length, 1); + t.is(plan.workflow.name, 'wf-old'); + t.deepEqual(plan.workflow.steps[0], { + id: 'a', + expression: 'x()', + }); +}); + +test.todo('old-workflow: load a plan from a workflow path and add options'); diff --git a/packages/cli/test/util/map-adaptors-to-monorepo.test.ts b/packages/cli/test/util/map-adaptors-to-monorepo.test.ts index a5970ad01..3c6dd9a7d 100644 --- a/packages/cli/test/util/map-adaptors-to-monorepo.test.ts +++ b/packages/cli/test/util/map-adaptors-to-monorepo.test.ts @@ -7,6 +7,7 @@ import mapAdaptorsToMonorepo, { validateMonoRepo, updatePath, } from '../../src/util/map-adaptors-to-monorepo'; +import { ExecutionPlan } from '@openfn/lexicon'; const REPO_PATH = 'a/b/c'; const ABS_REPO_PATH = path.resolve(REPO_PATH); @@ -72,13 +73,8 @@ test.serial('mapAdaptorsToMonorepo: map adaptors', async (t) => { [`${REPO_PATH}/package.json`]: '{ "name": "adaptors" }', }); - const options = { - monorepoPath: REPO_PATH, - adaptors: ['common'], - }; - - const newOptions = await mapAdaptorsToMonorepo(options, logger); - t.deepEqual(newOptions.adaptors, [`common=${ABS_REPO_PATH}/packages/common`]); + const result = await mapAdaptorsToMonorepo(REPO_PATH, ['common'], logger); + t.deepEqual(result, [`common=${ABS_REPO_PATH}/packages/common`]); }); test.serial('mapAdaptorsToMonorepo: map workflow', async (t) => { @@ -86,23 +82,23 @@ test.serial('mapAdaptorsToMonorepo: map workflow', async (t) => { [`${REPO_PATH}/package.json`]: '{ "name": "adaptors" }', }); - const options = { - monorepoPath: REPO_PATH, + const plan: ExecutionPlan = { workflow: { - id: 'x', - jobs: [ + steps: [ { + expression: '.', adaptor: 'common', }, ], }, + options: {}, }; - const newOptions = await mapAdaptorsToMonorepo(options, logger); - t.deepEqual(newOptions.workflow, { - id: 'x', - jobs: [ + await mapAdaptorsToMonorepo(REPO_PATH, plan, logger); + t.deepEqual(plan.workflow, { + steps: [ { + expression: '.', adaptor: `common=${ABS_REPO_PATH}/packages/common`, }, ], From 925bd8fb2ba01747f731dde71c76e5f75fe593c0 Mon Sep 17 00:00:00 2001 From: Joe Clark Date: Sat, 3 Feb 2024 16:01:48 +0000 Subject: [PATCH 044/128] cli: basically get the CLI working again --- packages/cli/src/compile/compile.ts | 42 +++++++++++++++------------- packages/cli/src/compile/handler.ts | 31 +++++++------------- packages/cli/src/execute/command.ts | 6 ++-- packages/cli/src/execute/handler.ts | 5 ++-- packages/cli/src/util/load-plan.ts | 17 ++++++----- packages/compiler/src/compile.ts | 4 +-- packages/lexicon/core.d.ts | 38 ++++++++++++++----------- packages/runtime/src/execute/plan.ts | 2 +- 8 files changed, 71 insertions(+), 74 deletions(-) diff --git a/packages/cli/src/compile/compile.ts b/packages/cli/src/compile/compile.ts index d08b34eda..edbe252ba 100644 --- a/packages/cli/src/compile/compile.ts +++ b/packages/cli/src/compile/compile.ts @@ -1,27 +1,28 @@ import compile, { preloadAdaptorExports, Options } from '@openfn/compiler'; -import { getModulePath, ExecutionPlan } from '@openfn/runtime'; +import { getModulePath } from '@openfn/runtime'; +import type { ExecutionPlan, Job } from '@openfn/lexicon'; + import createLogger, { COMPILER, Logger } from '../util/logger'; import abort from '../util/abort'; import type { CompileOptions } from './command'; // Load and compile a job from a file, then return the result // This is designed to be re-used in different CLI steps -export default async (opts: CompileOptions, log: Logger) => { - log.debug('Compiling...'); - let job; - if (opts.workflow) { - // Note that the workflow will be loaded into an object by this point - job = compileWorkflow(opts.workflow as ExecutionPlan, opts, log); - } else { - job = await compileJob((opts.job || opts.jobPath) as string, opts, log); +export default async ( + planOrPath: ExecutionPlan | string, + opts: CompileOptions, + log: Logger +) => { + if (typeof planOrPath === 'string') { + const result = await compileJob(planOrPath as string, opts, log); + log.success(`Compiled expression from ${opts.jobPath}`); + return result; } - if (opts.jobPath) { - log.success(`Compiled from ${opts.jobPath}`); - } else { - log.success('Compilation complete'); - } - return job; + const compiledPlan = compileWorkflow(planOrPath as ExecutionPlan, opts, log); + log.success('Compiled all expressions in workflow'); + + return compiledPlan; }; const compileJob = async ( @@ -29,7 +30,7 @@ const compileJob = async ( opts: CompileOptions, log: Logger, jobName?: string -) => { +): Promise => { try { const compilerOptions: Options = await loadTransformOptions(opts, log); return compile(job, compilerOptions); @@ -40,16 +41,19 @@ const compileJob = async ( e, 'Check the syntax of the job expression:\n\n' + job ); + // This will never actully execute + return ''; } }; // Find every expression in the job and run the compiler on it const compileWorkflow = async ( - workflow: ExecutionPlan, + plan: ExecutionPlan, opts: CompileOptions, log: Logger ) => { - for (const job of workflow.jobs) { + for (const step of plan.workflow.steps) { + const job = step as Job; const jobOpts = { ...opts, }; @@ -65,7 +69,7 @@ const compileWorkflow = async ( ); } } - return workflow; + return plan; }; // TODO this is a bit of a temporary solution diff --git a/packages/cli/src/compile/handler.ts b/packages/cli/src/compile/handler.ts index 3e78eaa11..23f10bb7d 100644 --- a/packages/cli/src/compile/handler.ts +++ b/packages/cli/src/compile/handler.ts @@ -3,34 +3,23 @@ import type { CompileOptions } from './command'; import type { Logger } from '../util/logger'; import compile from './compile'; -import loadInput from '../util/load-input'; -import expandAdaptors from '../util/expand-adaptors'; +import loadPlan from '../util/load-plan'; import assertPath from '../util/assert-path'; -import mapAdaptorsToMonorepo, { - MapAdaptorsToMonorepoOptions, -} from '../util/map-adaptors-to-monorepo'; const compileHandler = async (options: CompileOptions, logger: Logger) => { assertPath(options.path); - // TODO use loadPlan - await loadInput(options, logger); - // if (options.workflow) { - // // expand shorthand adaptors in the workflow jobs - // expandAdaptors(options); - // await mapAdaptorsToMonorepo( - // options as MapAdaptorsToMonorepoOptions, - // logger - // ); - // } - - let result = await compile(options, logger); - if (options.workflow) { - result = JSON.stringify(result); + let result; + if (options.jobPath) { + result = await compile(options.jobPath, options, logger); + } else { + const plan = await loadPlan(options, logger); + result = await compile(plan, options, logger); + result = JSON.stringify(result, null, 2); } + if (options.outputStdout) { - logger.success('Compiled code:'); - logger.success('\n' + result); + logger.success('Result:\n\n' + result); } else { await writeFile(options.outputPath!, result as string); logger.success(`Compiled to ${options.outputPath}`); diff --git a/packages/cli/src/execute/command.ts b/packages/cli/src/execute/command.ts index 6183b82a5..60866ac67 100644 --- a/packages/cli/src/execute/command.ts +++ b/packages/cli/src/execute/command.ts @@ -62,9 +62,9 @@ const options = [ const executeCommand: yargs.CommandModule = { command: 'execute [path]', - describe: `Run an openfn job or workflow. Get more help by running openfn help. - \nExecute will run a job/workflow at the path and write the output state to disk (to ./state.json unless otherwise specified) - \nBy default only state.data will be returned fron a job. Include --no-strict to write the entire state object. + describe: `Run an openfn expression or workflow. Get more help by running openfn help. + \nExecute will run a expression/workflow at the path and write the output state to disk (to ./state.json unless otherwise specified) + \nBy default only state.data will be returned fron a expression. Include --no-strict to write the entire state object. \nRemember to include the adaptor name with -a. Auto install adaptors with the -i flag.`, aliases: ['$0'], handler: ensure('execute', options), diff --git a/packages/cli/src/execute/handler.ts b/packages/cli/src/execute/handler.ts index 8b83af791..d8dd4d7b7 100644 --- a/packages/cli/src/execute/handler.ts +++ b/packages/cli/src/execute/handler.ts @@ -1,3 +1,5 @@ +import type { ExecutionPlan } from '@openfn/lexicon'; + import type { ExecuteOptions } from './command'; import execute from './execute'; import serializeOutput from './serialize-output'; @@ -5,7 +7,6 @@ import getAutoinstallTargets from './get-autoinstall-targets'; import { install } from '../repo/handler'; import compile from '../compile/compile'; -import { CompileOptions } from '../compile/command'; import { Logger, printDuration } from '../util/logger'; import loadState from '../util/load-state'; @@ -36,7 +37,7 @@ const executeHandler = async (options: ExecuteOptions, logger: Logger) => { const state = await loadState(options, logger); if (options.compile) { - plan = await compile(options as CompileOptions, logger); + plan = (await compile(plan, options, logger)) as ExecutionPlan; } else { logger.info('Skipping compilation as noCompile is set'); } diff --git a/packages/cli/src/util/load-plan.ts b/packages/cli/src/util/load-plan.ts index 7e969fecd..fd333d1be 100644 --- a/packages/cli/src/util/load-plan.ts +++ b/packages/cli/src/util/load-plan.ts @@ -12,12 +12,10 @@ import type { Opts } from '../options'; import type { Logger } from './logger'; import { OldCLIWorkflow } from '../types'; import expandAdaptors from './expand-adaptors'; -import mapAdaptorsToMonorepo, { - MapAdaptorsToMonorepoOptions, -} from './map-adaptors-to-monorepo'; +import mapAdaptorsToMonorepo from './map-adaptors-to-monorepo'; const loadPlan = async ( - options: Opts, + options: Pick, logger: Logger ): Promise => { const { workflowPath, planPath, jobPath } = options; @@ -37,6 +35,7 @@ const loadPlan = async ( return loadOldWorkflow(json, options, logger); } }; + export default loadPlan; // TODO this is way over simplified :( @@ -79,12 +78,12 @@ const maybeAssign = (a: any, b: any, keys: Array) => { }; const loadExpression = async ( - options: Opts, + options: Pick, logger: Logger ): Promise => { const jobPath = options.jobPath!; - logger.debug(`Loading job from ${jobPath}`); + logger.debug(`Loading expression from ${jobPath}`); const expression = await fs.readFile(jobPath, 'utf8'); const name = path.parse(jobPath).name; @@ -115,7 +114,7 @@ const loadExpression = async ( const loadOldWorkflow = async ( workflow: OldCLIWorkflow, - options: Opts, + options: Pick, logger: Logger ) => { const plan: ExecutionPlan = { @@ -153,7 +152,7 @@ const loadOldWorkflow = async ( // TODO default the workflow name from the file name const loadXPlan = async ( plan: ExecutionPlan, - options: Opts, + options: Pick, logger: Logger ) => { if (!plan.options) { @@ -164,7 +163,7 @@ const loadXPlan = async ( expandAdaptors(plan); await mapAdaptorsToMonorepo(options.monorepoPath, plan, logger); - // TODO: write any options from the user onto the potions object + // TODO: write any options from the user onto the options object return plan; }; diff --git a/packages/compiler/src/compile.ts b/packages/compiler/src/compile.ts index 9e37b192d..9e66d17d3 100644 --- a/packages/compiler/src/compile.ts +++ b/packages/compiler/src/compile.ts @@ -21,10 +21,10 @@ export default function compile(pathOrSource: string, options: Options = {}) { let source = pathOrSource; if (isPath(pathOrSource)) { - logger.debug('Starting compilation from file at', pathOrSource); + //logger.debug('Starting compilation from file at', pathOrSource); source = loadFile(pathOrSource); } else { - logger.debug('Starting compilation from string'); + //logger.debug('Starting compilation from string'); } const ast = parse(source); diff --git a/packages/lexicon/core.d.ts b/packages/lexicon/core.d.ts index 27469b326..f659928f4 100644 --- a/packages/lexicon/core.d.ts +++ b/packages/lexicon/core.d.ts @@ -20,6 +20,27 @@ export type Workflow = { steps: Array; }; +/** + * A type of Step which executes code + * This is some openfn expression plus metadata (adaptor, credentials) + */ +export interface Job extends Step { + adaptor?: string; + expression: Expression; + configuration?: object | string; + state?: Omit | string; +} + +/** + * A raw openfn-js script to be executed by the runtime + * + * Can be compiled as part of a job. + * + * The expression itself has no metadata. It likely needs + * an adaptor and input state to run + */ +export type Expression = string; + /** * State is an object passed into a workflow and returned from a workflow */ @@ -86,28 +107,11 @@ export type StepEdge = disabled?: boolean; }; -/** - * A type of Step which executes code - */ -export interface Job extends Step { - adaptor?: string; - expression: string; - configuration?: object | string; - state?: Omit | string; -} - /** * A no-op type of Step */ export interface Trigger extends Step {} -/** - * A raw openfn-js script to be executed by the runtime - * - * Can be compiled as part of a job - */ -export type Expression = string; - /** * An expression which has been compiled, and so includes import and export statements */ diff --git a/packages/runtime/src/execute/plan.ts b/packages/runtime/src/execute/plan.ts index c2083c76f..e61c0c19f 100644 --- a/packages/runtime/src/execute/plan.ts +++ b/packages/runtime/src/execute/plan.ts @@ -12,7 +12,7 @@ import { CompiledExecutionPlan } from '../types'; const executePlan = async ( plan: ExecutionPlan, - input: Lazy, + input: Lazy | undefined, opts: Options, logger: Logger ) => { From 0ac21f60389dca92a1795c7a0934ce64bf150ae7 Mon Sep 17 00:00:00 2001 From: Joe Clark Date: Sat, 3 Feb 2024 16:11:46 +0000 Subject: [PATCH 045/128] cli: types --- packages/cli/package.json | 1 + packages/cli/src/commands.ts | 5 ++--- packages/cli/src/util/map-adaptors-to-monorepo.ts | 7 ++++--- pnpm-lock.yaml | 3 +++ 4 files changed, 10 insertions(+), 6 deletions(-) diff --git a/packages/cli/package.json b/packages/cli/package.json index 67b286895..dac6b0207 100644 --- a/packages/cli/package.json +++ b/packages/cli/package.json @@ -34,6 +34,7 @@ "license": "ISC", "devDependencies": { "@openfn/language-common": "2.0.0-rc3", + "@openfn/lexicon": "workspace:^", "@types/mock-fs": "^4.13.1", "@types/node": "^18.15.13", "@types/rimraf": "^3.0.2", diff --git a/packages/cli/src/commands.ts b/packages/cli/src/commands.ts index e43d743f2..441213656 100644 --- a/packages/cli/src/commands.ts +++ b/packages/cli/src/commands.ts @@ -11,7 +11,6 @@ import { clean, install, pwd, list } from './repo/handler'; import createLogger, { CLI, Logger } from './util/logger'; import mapAdaptorsToMonorepo, { - MapAdaptorsToMonorepoOptions, validateMonoRepo, } from './util/map-adaptors-to-monorepo'; import printVersions from './util/print-versions'; @@ -71,11 +70,11 @@ const parse = async (options: Opts, log?: Logger) => { await validateMonoRepo(monorepoPath, logger); logger.success(`Loading adaptors from monorepo at ${monorepoPath}`); - options.adaptors = await mapAdaptorsToMonorepo( + options.adaptors = mapAdaptorsToMonorepo( monorepoPath, options.adaptors, logger - ); + ) as string[]; } // TODO it would be nice to do this in the repoDir option, but diff --git a/packages/cli/src/util/map-adaptors-to-monorepo.ts b/packages/cli/src/util/map-adaptors-to-monorepo.ts index bf53d21c5..c72ca3548 100644 --- a/packages/cli/src/util/map-adaptors-to-monorepo.ts +++ b/packages/cli/src/util/map-adaptors-to-monorepo.ts @@ -44,11 +44,11 @@ export type MapAdaptorsToMonorepoOptions = Pick< 'monorepoPath' | 'adaptors' | 'workflow' >; -const mapAdaptorsToMonorepo = async ( +const mapAdaptorsToMonorepo = ( monorepoPath: string = '', - input: string[] | ExecutionPlan, + input: string[] | ExecutionPlan = [], log: Logger -) => { +): string[] | ExecutionPlan => { if (monorepoPath) { if (Array.isArray(input)) { const adaptors = input as string[]; @@ -65,6 +65,7 @@ const mapAdaptorsToMonorepo = async ( return plan; } + return input; }; export default mapAdaptorsToMonorepo; diff --git a/pnpm-lock.yaml b/pnpm-lock.yaml index ae793d257..85cd8237a 100644 --- a/pnpm-lock.yaml +++ b/pnpm-lock.yaml @@ -185,6 +185,9 @@ importers: '@openfn/language-common': specifier: 2.0.0-rc3 version: 2.0.0-rc3 + '@openfn/lexicon': + specifier: workspace:^ + version: link:../lexicon '@types/mock-fs': specifier: ^4.13.1 version: 4.13.1 From cb21bfc88e7d013696faec283ce5d13ca083e773 Mon Sep 17 00:00:00 2001 From: Joe Clark Date: Sat, 3 Feb 2024 17:01:29 +0000 Subject: [PATCH 046/128] cli: fix a bunch of tests, update workflow parsing --- .../src/execute/get-autoinstall-targets.ts | 3 +- packages/cli/src/execute/handler.ts | 1 - packages/cli/src/util/load-plan.ts | 90 +++++- packages/cli/test/execute/execute.test.ts | 286 +++++++++++------- .../execute/get-autoinstall-targets.test.ts | 2 +- 5 files changed, 256 insertions(+), 126 deletions(-) diff --git a/packages/cli/src/execute/get-autoinstall-targets.ts b/packages/cli/src/execute/get-autoinstall-targets.ts index 79323a216..677f41f50 100644 --- a/packages/cli/src/execute/get-autoinstall-targets.ts +++ b/packages/cli/src/execute/get-autoinstall-targets.ts @@ -4,7 +4,8 @@ const getAutoinstallTargets = (plan: ExecutionPlan) => { const adaptors = {} as Record; Object.values(plan.workflow.steps).forEach((step) => { const job = step as Job; - if (job.adaptor) { + // Do not autoinstall adaptors with a path + if (job.adaptor && !/=/.test(job.adaptor)) { adaptors[job.adaptor] = true; } }); diff --git a/packages/cli/src/execute/handler.ts b/packages/cli/src/execute/handler.ts index d8dd4d7b7..060a06c22 100644 --- a/packages/cli/src/execute/handler.ts +++ b/packages/cli/src/execute/handler.ts @@ -20,7 +20,6 @@ const executeHandler = async (options: ExecuteOptions, logger: Logger) => { await validateAdaptors(options, logger); let plan = await loadPlan(options, logger); - const { repoDir, monorepoPath, autoinstall } = options; if (autoinstall) { if (monorepoPath) { diff --git a/packages/cli/src/util/load-plan.ts b/packages/cli/src/util/load-plan.ts index fd333d1be..d46ae6235 100644 --- a/packages/cli/src/util/load-plan.ts +++ b/packages/cli/src/util/load-plan.ts @@ -4,18 +4,21 @@ */ import fs from 'node:fs/promises'; import path from 'node:path'; +import { isPath } from '@openfn/compiler'; import abort from './abort'; - +import expandAdaptors from './expand-adaptors'; +import mapAdaptorsToMonorepo from './map-adaptors-to-monorepo'; import type { ExecutionPlan, Job, WorkflowOptions } from '@openfn/lexicon'; import type { Opts } from '../options'; import type { Logger } from './logger'; -import { OldCLIWorkflow } from '../types'; -import expandAdaptors from './expand-adaptors'; -import mapAdaptorsToMonorepo from './map-adaptors-to-monorepo'; +import type { OldCLIWorkflow } from '../types'; const loadPlan = async ( - options: Pick, + options: Pick< + Opts, + 'jobPath' | 'planPath' | 'workflowPath' | 'adaptors' | 'baseDir' + >, logger: Logger ): Promise => { const { workflowPath, planPath, jobPath } = options; @@ -25,10 +28,14 @@ const loadPlan = async ( } const jsonPath = planPath || workflowPath; + + if (!options.baseDir) { + options.baseDir = path.dirname(jsonPath!); + } + // TODO if neither jobPath, planPath or workflowPath is set... what happens? // I think the CLI will exit before we even get here const json = await loadJson(jsonPath!, logger); - if (json.workflow) { return loadXPlan(json, options, logger); } else { @@ -149,20 +156,89 @@ const loadOldWorkflow = async ( return final; }; +const fetchFile = async ( + jobId: string, + rootDir: string = '', + filePath: string, + log: Logger +) => { + try { + // Special handling for ~ feels like a necessary evil + const fullPath = filePath.startsWith('~') + ? filePath + : path.resolve(rootDir, filePath); + const result = await fs.readFile(fullPath, 'utf8'); + return result; + } catch (e) { + abort( + log, + `File not found for job ${jobId}: ${filePath}`, + undefined, + `This workflow references a file which cannot be found at ${filePath}\n\nPaths inside the workflow are relative to the workflow.json` + ); + + // should never get here + return '.'; + } +}; + +// TODO this is currently untested in load-plan +// (but covered a bit in execute tests) +const importExpressions = async ( + plan: ExecutionPlan, + rootDir: string, + log: Logger +) => { + let idx = 0; + for (const step of plan.workflow.steps) { + const job = step as Job; + if (!job.expression) { + continue; + } + idx += 1; + const expressionStr = + typeof job.expression === 'string' && job.expression?.trim(); + const configurationStr = + typeof job.configuration === 'string' && job.configuration?.trim(); + if (expressionStr && isPath(expressionStr)) { + job.expression = await fetchFile( + job.id || `${idx}`, + rootDir, + expressionStr, + log + ); + } + if (configurationStr && isPath(configurationStr)) { + const configString = await fetchFile( + job.id || `${idx}`, + rootDir, + configurationStr, + log + ); + job.configuration = JSON.parse(configString!); + } + } +}; + // TODO default the workflow name from the file name const loadXPlan = async ( plan: ExecutionPlan, - options: Pick, + options: Pick, logger: Logger ) => { if (!plan.options) { plan.options = {}; } + // Note that baseDir should be set up in the default function + await importExpressions(plan, options.baseDir!, logger); // expand shorthand adaptors in the workflow jobs expandAdaptors(plan); await mapAdaptorsToMonorepo(options.monorepoPath, plan, logger); + // TODO support state props to remove? + maybeAssign(options, plan.options, ['timeout', 'start']); + // TODO: write any options from the user onto the options object return plan; diff --git a/packages/cli/test/execute/execute.test.ts b/packages/cli/test/execute/execute.test.ts index 59513bec7..9d078a818 100644 --- a/packages/cli/test/execute/execute.test.ts +++ b/packages/cli/test/execute/execute.test.ts @@ -33,234 +33,288 @@ const defaultOptions = { const fn = `const fn = (fn) => (s) => fn(s); `; -test.before(() => { +const mockFs = (files: Record) => { const pnpm = path.resolve('../../node_modules/.pnpm'); mock({ - '/repo/': mock.load(path.resolve('test/__repo__/'), {}), [pnpm]: mock.load(pnpm, {}), - '/exp.js': `${fn}fn(() => ({ data: 42 }));`, - '/config.json': JSON.stringify({ id: 'x' }), - '/workflow.json': JSON.stringify({ - jobs: [ - { - expression: `${fn}fn(() => ({ data: { count: 42 } }));`, - }, - ], - }), + '/repo/': mock.load(path.resolve('test/__repo__/'), {}), + ...files, }); -}); +}; test.after(() => mock.restore()); -test('run a job', async (t) => { +test.serial('run a simple job', async (t) => { const job = `${fn}fn(() => ({ data: 42 }));`; + + mockFs({ + '/job.js': job, + }); + const options = { ...defaultOptions, - job, + jobPath: '/job.js', }; + const result = await handler(options, logger); t.is(result.data, 42); }); -test('run a job with initial state', async (t) => { +test.serial('run a job with initial state', async (t) => { const job = `${fn}fn((state) => state);`; + mockFs({ + '/job.js': job, + }); + const options = { ...defaultOptions, - job, + jobPath: '/job.js', stateStdin: JSON.stringify({ data: { count: 10 } }), }; - const result = await handler(options, logger); - t.is(result.data.count, 10); -}); -test('run a workflow from a path', async (t) => { - const options = { - ...defaultOptions, - workflowPath: '/workflow.json', - }; const result = await handler(options, logger); - t.is(result.data.count, 42); + t.is(result.data.count, 10); }); -test('run a workflow', async (t) => { +test.serial('run a workflow', async (t) => { const workflow = { - start: 'a', - jobs: [ - { - id: 'a', - expression: `${fn}fn(() => ({ data: { count: 42 } }));`, - next: { b: true }, - }, - { - id: 'b', - expression: `${fn}fn((state) => { state.data.count = state.data.count * 2; return state; });`, - }, - ], + options: { + start: 'a', + }, + workflow: { + steps: [ + { + id: 'a', + expression: `${fn}fn(() => ({ data: { count: 42 } }));`, + next: { b: true }, + }, + { + id: 'b', + expression: `${fn}fn((state) => { state.data.count = state.data.count * 2; return state; });`, + }, + ], + }, }; + mockFs({ + '/workflow.json': JSON.stringify(workflow), + }); + const options = { ...defaultOptions, - workflow, + workflowPath: '/workflow.json', }; const result = await handler(options, logger); t.is(result.data.count, 84); }); -test('run a workflow with state', async (t) => { +test.serial('run a workflow with state', async (t) => { const workflow = { - start: 'a', - jobs: [ - { - id: 'a', - state: { data: { count: 1 } }, - expression: `${fn}fn((state) => { state.data.count += 1; return state;});`, - next: { b: true }, - }, - { - id: 'b', - state: { data: { diff: 2 } }, - expression: `${fn}fn((state) => { state.data.count += state.data.diff; return state; });`, - }, - ], + workflow: { + steps: [ + { + id: 'a', + state: { data: { count: 1 } }, + expression: `${fn}fn((state) => { state.data.count += 1; return state;});`, + next: { b: true }, + }, + { + id: 'b', + state: { data: { diff: 2 } }, + expression: `${fn}fn((state) => { state.data.count += state.data.diff; return state; });`, + }, + ], + }, }; + + mockFs({ + '/workflow.json': JSON.stringify(workflow), + }); + const options = { ...defaultOptions, - workflow, + workflowPath: '/workflow.json', }; const result = await handler(options, logger); t.is(result.data.count, 4); }); -test('run a workflow with initial state', async (t) => { +test.serial('run a workflow with initial state from stdin', async (t) => { const workflow = { - start: 'a', - jobs: [ - { - id: 'a', - expression: `${fn}fn((state) => { state.data.count += 1; return state;});`, - next: { b: true }, - }, - { - id: 'b', - expression: `${fn}fn((state) => { state.data.count += 1; return state; });`, - }, - ], + workflow: { + jobs: [ + { + id: 'a', + expression: `${fn}fn((state) => { state.data.count += 1; return state;});`, + next: { b: true }, + }, + { + id: 'b', + expression: `${fn}fn((state) => { state.data.count += 1; return state; });`, + }, + ], + }, }; + mockFs({ + '/workflow.json': JSON.stringify(workflow), + }); + const options = { ...defaultOptions, - workflow, + workflowPath: '/workflow.json', stateStdin: JSON.stringify({ data: { count: 10 } }), }; const result = await handler(options, logger); t.is(result.data.count, 12); }); -test('run a workflow with an expression as a path', async (t) => { +test.serial('run a workflow with an expression as a path', async (t) => { const workflow = { - jobs: [ - { - expression: '/exp.js', - }, - ], + workflow: { + steps: [ + { + expression: '/exp.js', + }, + ], + }, }; + mockFs({ + '/workflow.json': JSON.stringify(workflow), + '/exp.js': `${fn}fn(() => ({ data: 42 }));`, + }); + const options = { ...defaultOptions, - workflow, + workflowPath: '/workflow.json', }; const result = await handler(options, logger); t.is(result.data, 42); }); -test('run a workflow with config as a path', async (t) => { +test.serial('run a workflow with config as a path', async (t) => { const workflow = { - jobs: [ - { - configuration: '/config.json', - expression: `${fn}fn((state) => { state.cfg = state.configuration; return state; })`, - }, - ], + workflow: { + steps: [ + { + configuration: '/config.json', + expression: `${fn}fn((state) => { state.cfg = state.configuration; return state; })`, + }, + ], + }, }; + mockFs({ + '/workflow.json': JSON.stringify(workflow), + '/config.json': JSON.stringify({ id: 'x' }), + }); + const options = { ...defaultOptions, - workflow, + workflowPath: '/workflow.json', }; const result = await handler(options, logger); t.is(result.cfg.id, 'x'); }); -test('run a workflow from a start node', async (t) => { +test.serial('run a workflow from a start node', async (t) => { const workflow = { - jobs: [ - { - id: 'a', - expression: `${fn}fn((state) => ({ data: { result: 'a' }}))`, - }, - { - id: 'b', - expression: `${fn}fn((state) => ({ data: { result: 'b' }}))`, - }, - ], + workflow: { + steps: [ + { + id: 'a', + expression: `${fn}fn((state) => ({ data: { result: 'a' }}))`, + }, + { + id: 'b', + expression: `${fn}fn((state) => ({ data: { result: 'b' }}))`, + }, + ], + }, }; + mockFs({ + '/workflow.json': JSON.stringify(workflow), + }); + const options = { ...defaultOptions, - workflow, + workflowPath: '/workflow.json', start: 'b', }; const result = await handler(options, logger); t.is(result.data.result, 'b'); }); -test('run a workflow with an adaptor (longform)', async (t) => { +test.serial('run a workflow with an adaptor (longform)', async (t) => { const workflow = { - jobs: [ - { - adaptor: '@openfn/language-common', - expression: `fn((state) => state);`, - }, - ], + workflow: { + steps: [ + { + adaptor: '@openfn/language-common', + expression: `fn((state) => state);`, + }, + ], + }, }; + mockFs({ + '/workflow.json': JSON.stringify(workflow), + }); + const options = { ...defaultOptions, - workflow, + workflowPath: '/workflow.json', stateStdin: JSON.stringify({ data: { count: 10 } }), }; const result = await handler(options, logger); t.is(result.data.count, 10); }); -test('run a workflow with an adaptor (shortform)', async (t) => { +test.serial('run a workflow with an adaptor (shortform)', async (t) => { const workflow = { - jobs: [ - { - adaptor: 'common', - expression: `fn((state) => state);`, - }, - ], + workflow: { + steps: [ + { + adaptor: 'common', + expression: `fn((state) => state);`, + }, + ], + }, }; + mockFs({ + '/workflow.json': JSON.stringify(workflow), + }); + const options = { ...defaultOptions, - workflow, + workflowPath: '/workflow.json', stateStdin: JSON.stringify({ data: { count: 10 } }), }; const result = await handler(options, logger); t.is(result.data.count, 10); }); -test('run a job without compilation', async (t) => { +test.serial('run a job without compilation', async (t) => { const job = `export default [() => ({ data: { count: 42 } })]`; + mockFs({ + '/job.js': job, + }); + const options = { ...defaultOptions, compile: false, - job, + jobPath: '/job.js', }; + const result = await handler(options, logger); t.is(result.data.count, 42); }); -test('run a job which does not return state', async (t) => { +test.serial('run a job which does not return state', async (t) => { const job = `${fn}fn(() => {});`; + mockFs({ + '/job.js': job, + }); + const options = { ...defaultOptions, - job, + jobPath: '/job.js', }; const result = await handler(options, logger); t.falsy(result); diff --git a/packages/cli/test/execute/get-autoinstall-targets.test.ts b/packages/cli/test/execute/get-autoinstall-targets.test.ts index e5a4adaea..33a29786b 100644 --- a/packages/cli/test/execute/get-autoinstall-targets.test.ts +++ b/packages/cli/test/execute/get-autoinstall-targets.test.ts @@ -89,7 +89,7 @@ test('do not return adaptors with a path', (t) => { const plan = getPlan([ { expression: 'fn()', - adaptor: 'commoin=a/b/c', + adaptor: 'common=a/b/c', }, ]); const result = getAutoinstallTargets(plan); From 03bbf16185bc128ad84ccc983bac42d63913833f Mon Sep 17 00:00:00 2001 From: Joe Clark Date: Mon, 5 Feb 2024 12:16:46 +0000 Subject: [PATCH 047/128] cli: fix execute and compile tests --- packages/cli/src/options.ts | 8 +- packages/cli/test/compile/compile.test.ts | 80 +++++++------ packages/cli/test/execute/execute.test.ts | 16 +-- .../cli/test/execute/parse-adaptors.test.ts | 110 +++++++++--------- packages/cli/test/util.ts | 18 +++ 5 files changed, 121 insertions(+), 111 deletions(-) create mode 100644 packages/cli/test/util.ts diff --git a/packages/cli/src/options.ts b/packages/cli/src/options.ts index a155780b2..e7aaa9d49 100644 --- a/packages/cli/src/options.ts +++ b/packages/cli/src/options.ts @@ -30,8 +30,7 @@ export type Opts = { force?: boolean; immutable?: boolean; ignoreImports?: boolean | string[]; - jobPath?: string; - job?: string; + jobPath?: string; // TODO rename to expressionPath log?: Record; logJson?: boolean; monorepoPath?: string; @@ -39,7 +38,6 @@ export type Opts = { outputPath?: string; outputStdout?: boolean; packages?: string[]; - plan?: ExecutionPlan; planPath?: string; projectPath?: string; repoDir?: string; @@ -52,11 +50,13 @@ export type Opts = { sanitize: 'none' | 'remove' | 'summarize' | 'obfuscate'; timeout?: number; // ms useAdaptorsMonorepo?: boolean; - workflow?: OldCLIWorkflow; projectId?: string; // deprecated workflowPath?: string; + job?: string; + plan?: ExecutionPlan; // TODO pretty sure this doesn't live on options + workflow?: OldCLIWorkflow; // TODO I don't think this should sit on options anymore? }; // Definition of what Yargs returns (before ensure is called) diff --git a/packages/cli/test/compile/compile.test.ts b/packages/cli/test/compile/compile.test.ts index 55b867860..fdc48c5f4 100644 --- a/packages/cli/test/compile/compile.test.ts +++ b/packages/cli/test/compile/compile.test.ts @@ -8,13 +8,14 @@ import compile, { resolveSpecifierPath, } from '../../src/compile/compile'; import { CompileOptions } from '../../src/compile/command'; -import { ExecutionPlan } from '@openfn/runtime'; +import { mockFs, resetMockFs } from '../util'; +import { ExecutionPlan, Job } from '@openfn/lexicon'; const mockLog = createMockLogger(); -test.afterEach(() => { - mock.restore(); -}); +test.after(resetMockFs); + +const jobPath = '/job.js'; type TransformOptionsWithImports = { ['add-imports']: { @@ -26,67 +27,64 @@ type TransformOptionsWithImports = { }; }; +// TODO this isn't really used and is a bit of a quirky thing +// The compiler itself probably doesn't do any path parsing? +// Just compile a source string and return the result test('compile from source string', async (t) => { const job = 'x();'; - const opts = { - job, - } as CompileOptions; + const opts = {} as CompileOptions; - const result = await compile(opts, mockLog); + const result = await compile(job, opts, mockLog); const expected = 'export default [x()];'; t.is(result, expected); }); test.serial('compile from path', async (t) => { - const pnpm = path.resolve('../../node_modules/.pnpm'); - mock({ - [pnpm]: mock.load(pnpm, {}), - '/tmp/job.js': 'x();', + const job = 'x();'; + mockFs({ + [jobPath]: job, }); - const jobPath = '/tmp/job.js'; - const opts = { jobPath, } as CompileOptions; - const result = await compile(opts, mockLog); + const result = await compile(jobPath, opts, mockLog); const expected = 'export default [x()];'; t.is(result, expected); }); -test('compile from workflow', async (t) => { - const workflow = { - start: 'a', - jobs: [ - { id: 'a', expression: 'x()' }, - { id: 'b', expression: 'x()' }, - ], - }; +test('compile from execution plan', async (t) => { + const plan = { + workflow: { + steps: [ + { id: 'a', expression: 'x()' }, + { id: 'b', expression: 'x()' }, + ], + }, + options: {}, + } as ExecutionPlan; - const opts = { - workflow, - } as CompileOptions; + const opts = {} as CompileOptions; - const result = (await compile(opts, mockLog)) as ExecutionPlan; + const result = (await compile(plan, opts, mockLog)) as ExecutionPlan; const expected = 'export default [x()];'; - t.is(result.jobs[0].expression, expected); - t.is(result.jobs[1].expression, expected); + const [a, b] = result.workflow.steps; + t.is((a as Job).expression, expected); + t.is((b as Job).expression, expected); }); test('throw an AbortError if a job is uncompilable', async (t) => { const job = 'a b'; - const opts = { - job, - } as CompileOptions; + const opts = {} as CompileOptions; const logger = createMockLogger(); - await t.throwsAsync(() => compile(opts, logger), { + await t.throwsAsync(() => compile(job, opts, logger), { message: 'Failed to compile job', }); @@ -95,18 +93,18 @@ test('throw an AbortError if a job is uncompilable', async (t) => { t.assert(logger._find('error', /critical error: aborting command/i)); }); -test('throw an AbortError if a workflow contains an uncompilable job', async (t) => { - const workflow = { - start: 'a', - jobs: [{ id: 'a', expression: 'x b' }], +test('throw an AbortError if an xplan contains an uncompilable job', async (t) => { + const plan: ExecutionPlan = { + workflow: { + steps: [{ id: 'a', expression: 'x b' }], + }, + options: {}, }; - const opts = { - workflow, - } as CompileOptions; + const opts = {} as CompileOptions; const logger = createMockLogger(); - await t.throwsAsync(() => compile(opts, logger), { + await t.throwsAsync(() => compile(plan, opts, logger), { message: 'Failed to compile job a', }); diff --git a/packages/cli/test/execute/execute.test.ts b/packages/cli/test/execute/execute.test.ts index 9d078a818..8931a0eb5 100644 --- a/packages/cli/test/execute/execute.test.ts +++ b/packages/cli/test/execute/execute.test.ts @@ -1,12 +1,11 @@ // bunch of unit tests on the execute function itself // so far this is only done in commands.test.ts, which has the cli overhead // I don't want any io or adaptor tests here, really just looking for the actual execute flow -import mock from 'mock-fs'; -import path from 'node:path'; import { createMockLogger } from '@openfn/logger'; import test from 'ava'; import { ExecuteOptions } from '../../src/execute/command'; import handler from '../../src/execute/handler'; +import { mockFs, resetMockFs } from '../util'; // Why is this logging everywhere? const logger = createMockLogger(undefined, { level: 'none' }); @@ -33,16 +32,7 @@ const defaultOptions = { const fn = `const fn = (fn) => (s) => fn(s); `; -const mockFs = (files: Record) => { - const pnpm = path.resolve('../../node_modules/.pnpm'); - mock({ - [pnpm]: mock.load(pnpm, {}), - '/repo/': mock.load(path.resolve('test/__repo__/'), {}), - ...files, - }); -}; - -test.after(() => mock.restore()); +test.after(resetMockFs); test.serial('run a simple job', async (t) => { const job = `${fn}fn(() => ({ data: 42 }));`; @@ -141,7 +131,7 @@ test.serial('run a workflow with state', async (t) => { test.serial('run a workflow with initial state from stdin', async (t) => { const workflow = { workflow: { - jobs: [ + steps: [ { id: 'a', expression: `${fn}fn((state) => { state.data.count += 1; return state;});`, diff --git a/packages/cli/test/execute/parse-adaptors.test.ts b/packages/cli/test/execute/parse-adaptors.test.ts index 73a01715a..cdbdf6753 100644 --- a/packages/cli/test/execute/parse-adaptors.test.ts +++ b/packages/cli/test/execute/parse-adaptors.test.ts @@ -1,81 +1,85 @@ import test from 'ava'; +import { ExecutionPlan, Job } from '@openfn/lexicon'; import { parseAdaptors } from '../../src/execute/execute'; -// This is all useless now because we parse on an execution plan -// test('parse a simple specifier', (t) => { -// const adaptors = ['a']; -// const result = parseAdaptors({ adaptors }); -// t.assert(Object.keys(result).length === 1); -// t.truthy(result.a); -// t.falsy(Object.keys(result.a).length); -// }); +const createPlan = (adaptor: string): ExecutionPlan => ({ + workflow: { + steps: [ + { + adaptor, + expression: '.', + }, + ], + }, + options: {}, +}); + +test('parse a simple specifier with no path or version', (t) => { + const adaptor = 'a'; + const plan = createPlan(adaptor); + const result = parseAdaptors(plan); + + t.deepEqual(result, { a: {} }); +}); + +test('parse a specifier with a path', (t) => { + const adaptor = 'a=x'; + const plan = createPlan(adaptor); + const result = parseAdaptors(plan); + + t.deepEqual(result, { a: { path: 'x' } }); +}); + +test('parse a specifier with a version', (t) => { + const adaptor = 'a@1'; + const plan = createPlan(adaptor); + const result = parseAdaptors(plan); -// test('parse multiple specifiers', (t) => { -// const adaptors = ['a', 'b']; -// const result = parseAdaptors({ adaptors }); -// t.assert(Object.keys(result).length === 2); -// t.truthy(result.a); -// t.truthy(result.b); -// }); + t.deepEqual(result, { a: { version: '1' } }); +}); -// test('parse a specifier with a path', (t) => { -// const adaptors = ['a=x']; -// const result = parseAdaptors({ adaptors }); -// t.assert(Object.keys(result).length === 1); -// t.deepEqual(result.a, { path: 'x' }); -// }); +test('parse a specifier with a path and version', (t) => { + const adaptor = 'a@1=x'; + const plan = createPlan(adaptor); + const result = parseAdaptors(plan); -// test('parse a specifier with a version', (t) => { -// const adaptors = ['a@1']; -// const result = parseAdaptors({ adaptors }); -// t.assert(Object.keys(result).length === 1); -// t.deepEqual(result.a, { version: '1' }); -// }); + t.deepEqual(result, { a: { path: 'x', version: '1' } }); +}); -// test('parse a specifier with a path and version', (t) => { -// const adaptors = ['a@1=x']; -// const result = parseAdaptors({ adaptors }); -// t.assert(Object.keys(result).length === 1); -// t.deepEqual(result.a, { path: 'x', version: '1' }); -// }); +test('parse @openfn/language-common@1.0.0=~/repo/modules/common', (t) => { + const adaptor = '@openfn/language-common@1.0.0=~/repo/modules/common'; + const plan = createPlan(adaptor); + const result = parseAdaptors(plan); -// test('parse @openfn/language-common@1.0.0=~/repo/modules/common', (t) => { -// const adaptors = ['@openfn/language-common@1.0.0=~/repo/modules/common']; -// const result = parseAdaptors({ adaptors }); -// t.assert(Object.keys(result).length === 1); -// t.deepEqual(result, { -// '@openfn/language-common': { -// path: '~/repo/modules/common', -// version: '1.0.0', -// }, -// }); -// }); + t.deepEqual(result, { + '@openfn/language-common': { + path: '~/repo/modules/common', + version: '1.0.0', + }, + }); +}); -test('parse plan', (t) => { +test('parse plan with several steps', (t) => { const plan = { options: { start: 'a', }, workflow: { - // TODO oh no the workflow structure accepted by the CLI isa bit different! - // its an indexed object, rather than an array - // no its not. it comes in as an array. - // what is this structure? steps: [ - a: { + { adaptor: '@openfn/language-common', expression: 'fn()', }, - b: { + { adaptor: '@openfn/language-http@1.0.0', expression: 'fn()', }, - c: { + { adaptor: '@openfn/language-salesforce=a/b/c', expression: 'fn()', }, - }, + ], }, }; const result = parseAdaptors(plan); diff --git a/packages/cli/test/util.ts b/packages/cli/test/util.ts new file mode 100644 index 000000000..550720736 --- /dev/null +++ b/packages/cli/test/util.ts @@ -0,0 +1,18 @@ +/* + * test utils + */ +import mock from 'mock-fs'; +import path from 'node:path'; + +export const mockFs = (files: Record) => { + const pnpm = path.resolve('../../node_modules/.pnpm'); + mock({ + [pnpm]: mock.load(pnpm, {}), + '/repo/': mock.load(path.resolve('test/__repo__/'), {}), + ...files, + }); +}; + +export const resetMockFs = () => { + mock.restore(); +}; From 27e0608b91ca0b49236de09c2e272d3ba7573bc3 Mon Sep 17 00:00:00 2001 From: Joe Clark Date: Mon, 5 Feb 2024 12:57:21 +0000 Subject: [PATCH 048/128] cli: more test fixes --- packages/cli/src/docs/handler.ts | 2 +- packages/cli/src/test/handler.ts | 66 +++++++++++++----------- packages/cli/src/util/expand-adaptors.ts | 14 +++-- packages/cli/test/docgen/handler.test.ts | 2 +- 4 files changed, 48 insertions(+), 36 deletions(-) diff --git a/packages/cli/src/docs/handler.ts b/packages/cli/src/docs/handler.ts index fe304aabc..a60acd5cc 100644 --- a/packages/cli/src/docs/handler.ts +++ b/packages/cli/src/docs/handler.ts @@ -60,7 +60,7 @@ const docsHandler = async ( // does the adaptor have a version? If not, fetch the latest // (docgen won't do this for us) - const { adaptors } = expandAdaptors([adaptor]); + const adaptors = expandAdaptors([adaptor]) as string[]; const [adaptorName] = adaptors!; let { name, version } = getNameAndVersion(adaptorName); if (!version) { diff --git a/packages/cli/src/test/handler.ts b/packages/cli/src/test/handler.ts index 8c2edde9e..52cb2cc32 100644 --- a/packages/cli/src/test/handler.ts +++ b/packages/cli/src/test/handler.ts @@ -1,3 +1,5 @@ +import type { ExecutionPlan } from '@openfn/lexicon'; + import { TestOptions } from './command'; import { createNullLogger, Logger } from '../util/logger'; import loadState from '../util/load-state'; @@ -6,44 +8,48 @@ import execute from '../execute/execute'; import { ExecuteOptions } from '../execute/command'; const testHandler = async (options: TestOptions, logger: Logger) => { - logger.log('Running test job...'); + logger.log('Running test workflow...'); const opts: Partial = { ...options }; // Preconfigure some options opts.compile = true; opts.adaptors = []; - opts.workflow = { - start: 'start', - jobs: [ - { - id: 'start', - state: { data: { defaultAnswer: 42 } }, - expression: - "const fn = () => (state) => { console.log('Starting computer...'); return state; }; fn()", - next: { - calculate: '!state.error', + const plan = { + options: { + start: 'start', + }, + workflow: { + steps: [ + { + id: 'start', + state: { data: { defaultAnswer: 42 } }, + expression: + "const fn = () => (state) => { console.log('Starting computer...'); return state; }; fn()", + next: { + calculate: '!state.error', + }, + }, + { + id: 'calculate', + expression: + "const fn = () => (state) => { console.log('Calculating to life, the universe, and everything..'); return state }; fn()", + next: { + result: true, + }, }, - }, - { - id: 'calculate', - expression: - "const fn = () => (state) => { console.log('Calculating to life, the universe, and everything..'); return state }; fn()", - next: { - result: true, + { + id: 'result', + expression: + 'const fn = () => (state) => ({ data: { answer: state.data.answer || state.data.defaultAnswer } }); fn()', }, - }, - { - id: 'result', - expression: - 'const fn = () => (state) => ({ data: { answer: state.data.answer || state.data.defaultAnswer } }); fn()', - }, - ], - }; + ], + }, + } as ExecutionPlan; logger.break(); - logger.info('Workflow object:'); - logger.info(JSON.stringify(opts.workflow, null, 2)); + logger.info('Execution plan:'); + logger.info(JSON.stringify(plan, null, 2)); logger.break(); if (!opts.stateStdin) { @@ -54,8 +60,8 @@ const testHandler = async (options: TestOptions, logger: Logger) => { } const state = await loadState(opts, createNullLogger()); - const code = await compile(opts, logger); - const result = await execute(code!, state, opts as ExecuteOptions); + const compiledPlan = (await compile(plan, opts, logger)) as ExecutionPlan; + const result = await execute(compiledPlan, state, opts as ExecuteOptions); logger.success(`Result: ${result.data.answer}`); return result; }; diff --git a/packages/cli/src/util/expand-adaptors.ts b/packages/cli/src/util/expand-adaptors.ts index ba90d6d66..45b952e9d 100644 --- a/packages/cli/src/util/expand-adaptors.ts +++ b/packages/cli/src/util/expand-adaptors.ts @@ -1,6 +1,6 @@ import { ExecutionPlan, Job } from '@openfn/lexicon'; -const expand = (name: any) => { +const expand = (name: string) => { if (typeof name === 'string') { const [left] = name.split('='); // don't expand adaptors which look like a path (or @openfn/language-) @@ -12,9 +12,15 @@ const expand = (name: any) => { return name; }; -export default (input: string[] | ExecutionPlan) => { +type ArrayOrPlan = T extends string[] ? string[] : ExecutionPlan; + +// TODO typings here aren't good,I can't get this to work! +// At least this looks nice externally +export default | ExecutionPlan>( + input: T +): ArrayOrPlan => { if (Array.isArray(input)) { - return input?.map(expand) as string[]; + return input?.map(expand) as any; } const plan = input as ExecutionPlan; @@ -25,5 +31,5 @@ export default (input: string[] | ExecutionPlan) => { } }); - return plan; + return plan as any; }; diff --git a/packages/cli/test/docgen/handler.test.ts b/packages/cli/test/docgen/handler.test.ts index 52cee0471..07e38c133 100644 --- a/packages/cli/test/docgen/handler.test.ts +++ b/packages/cli/test/docgen/handler.test.ts @@ -53,7 +53,7 @@ const options = { }; test.serial('generate mock docs', async (t) => { - const path = await docsHandler(options, logger, mockGen); + const path = (await docsHandler(options, logger, mockGen)) as string; t.is(path, `${DOCS_PATH}/${specifier}.json`); const docs = await loadJSON(path); From 9065e6adbdc7398e5fc4e0a317a14c3b55dcabfd Mon Sep 17 00:00:00 2001 From: Joe Clark Date: Mon, 5 Feb 2024 15:00:27 +0000 Subject: [PATCH 049/128] fix more cli tests --- packages/cli/src/util/load-input.ts | 3 + packages/cli/src/util/load-plan.ts | 20 ++- packages/cli/test/commands.test.ts | 97 +++++++++++--- packages/cli/test/integration.test.ts | 2 +- .../cli/test/options/ensure/inputPath.test.ts | 2 +- packages/cli/test/util/load-plan.test.ts | 122 ++++++++++++++++-- 6 files changed, 207 insertions(+), 39 deletions(-) diff --git a/packages/cli/src/util/load-input.ts b/packages/cli/src/util/load-input.ts index f57b73e49..9e6ce9b46 100644 --- a/packages/cli/src/util/load-input.ts +++ b/packages/cli/src/util/load-input.ts @@ -1,3 +1,6 @@ +// TODO remove this now +// Let's just port over any tests we want +// (acutally let's get all tests, including integration, passing first) import path from 'node:path'; import fs from 'node:fs/promises'; import { isPath } from '@openfn/compiler'; diff --git a/packages/cli/src/util/load-plan.ts b/packages/cli/src/util/load-plan.ts index d46ae6235..18fcd8eb7 100644 --- a/packages/cli/src/util/load-plan.ts +++ b/packages/cli/src/util/load-plan.ts @@ -17,7 +17,12 @@ import type { OldCLIWorkflow } from '../types'; const loadPlan = async ( options: Pick< Opts, - 'jobPath' | 'planPath' | 'workflowPath' | 'adaptors' | 'baseDir' + | 'jobPath' + | 'planPath' + | 'workflowPath' + | 'adaptors' + | 'baseDir' + | 'expandAdaptors' >, logger: Logger ): Promise => { @@ -96,7 +101,7 @@ const loadExpression = async ( const step: Job = { expression }; - // The adaptor should have been expanded nicely already, so we don't need todo much here + // The adaptor should have been expanded nicely already, so we don't need intervene here if (options.adaptors) { const [adaptor] = options.adaptors; if (adaptor) { @@ -223,7 +228,7 @@ const importExpressions = async ( // TODO default the workflow name from the file name const loadXPlan = async ( plan: ExecutionPlan, - options: Pick, + options: Pick, logger: Logger ) => { if (!plan.options) { @@ -233,13 +238,14 @@ const loadXPlan = async ( // Note that baseDir should be set up in the default function await importExpressions(plan, options.baseDir!, logger); // expand shorthand adaptors in the workflow jobs - expandAdaptors(plan); + if (options.expandAdaptors) { + expandAdaptors(plan); + } await mapAdaptorsToMonorepo(options.monorepoPath, plan, logger); - // TODO support state props to remove? + // Assign options form the CLI into the Xplan + // TODO support state props to remove maybeAssign(options, plan.options, ['timeout', 'start']); - // TODO: write any options from the user onto the options object - return plan; }; diff --git a/packages/cli/test/commands.test.ts b/packages/cli/test/commands.test.ts index 31706ce5b..09d8e924e 100644 --- a/packages/cli/test/commands.test.ts +++ b/packages/cli/test/commands.test.ts @@ -15,12 +15,12 @@ test.afterEach(() => { logger._reset(); }); -const JOB_EXPORT_42 = 'export default [() => ({ data: { count: 42 } })];'; -const JOB_TIMES_2 = +const EXPR_EXPORT_42 = 'export default [() => ({ data: { count: 42 } })];'; +const EXPR_TIMES_2 = 'export default [(state) => { state.data.count = state.data.count * 2; return state; }];'; -const JOB_MOCK_ADAPTOR = +const EXPR_MOCK_ADAPTOR = 'import { byTwo } from "times-two"; export default [byTwo];'; -const JOB_EXPORT_STATE = +const EXPR_EXPORT_STATE = "export default [() => ({ configuration: {}, data: {}, foo: 'bar' })];"; type RunOptions = { @@ -93,6 +93,65 @@ async function run(command: string, job: string, options: RunOptions = {}) { } } +test.serial('run an execution plan', async (t) => { + const plan = { + workflow: { + steps: [ + { + id: 'job1', + state: { data: { x: 0 } }, + expression: 'export default [s => { s.data.x += 1; return s; } ]', + next: { job2: true }, + }, + { + id: 'job2', + expression: 'export default [s => { s.data.x += 1; return s; } ]', + }, + ], + }, + }; + + const options = { + outputPath: 'output.json', + jobPath: 'wf.json', // just to fool the test + }; + + const result = await run('openfn wf.json', JSON.stringify(plan), options); + t.assert(result.data.x === 2); +}); + +test.serial('run an execution plan with start', async (t) => { + const state = JSON.stringify({ data: { x: 0 } }); + const plan = { + workflow: { + steps: [ + { + id: 'a', + expression: 'export default [s => { s.data.x += 1; return s; } ]', + next: { b: true }, + }, + { + id: 'b', + expression: 'export default [s => { s.data.x += 1; return s; } ]', + }, + ], + }, + }; + + const options = { + outputPath: 'output.json', + jobPath: 'wf.json', // just to fool the test + }; + + const result = await run( + `openfn wf.json -S ${state} --start b`, + JSON.stringify(plan), + options + ); + + t.assert(result.data.x === 1); +}); + test.serial('print version information with version', async (t) => { await run('version', ''); @@ -119,7 +178,7 @@ test.serial('run test job with custom state', async (t) => { }); test.serial('run a job with defaults: openfn job.js', async (t) => { - const result = await run('openfn job.js', JOB_EXPORT_42); + const result = await run('openfn job.js', EXPR_EXPORT_42); t.assert(result.data.count === 42); }); @@ -216,7 +275,7 @@ test.serial.skip( const result = await run( 'openfn ~/openfn/jobs/the-question', - JOB_EXPORT_42, + EXPR_EXPORT_42, options ); t.assert(result === 42); @@ -237,7 +296,7 @@ test.serial( }; const result = await run( 'openfn job.js --output-path=/tmp/my-output.json', - JOB_EXPORT_42, + EXPR_EXPORT_42, options ); t.is(result.data.count, 42); @@ -256,7 +315,7 @@ test.serial( }; const result = await run( 'openfn job.js -o /tmp/my-output.json', - JOB_EXPORT_42, + EXPR_EXPORT_42, options ); t.is(result.data.count, 42); @@ -276,7 +335,7 @@ test.serial( const result = await run( 'openfn job.js --output-path=/tmp/my-output.json --strict', - JOB_EXPORT_STATE, + EXPR_EXPORT_STATE, options ); t.deepEqual(result, { data: {} }); @@ -296,7 +355,7 @@ test.serial( const result = await run( 'openfn job.js --output-path=/tmp/my-output.json --no-strict-output', - JOB_EXPORT_STATE, + EXPR_EXPORT_STATE, options ); t.deepEqual(result, { data: {}, foo: 'bar' }); @@ -320,7 +379,7 @@ test.serial( const result = await run( 'openfn job.js --output-path=/tmp/my-output.json --no-strict', - JOB_EXPORT_STATE, + EXPR_EXPORT_STATE, options ); t.deepEqual(result, { data: {}, foo: 'bar' }); @@ -344,7 +403,7 @@ test.serial( }; const result = await run( 'openfn job.js --state-path=/tmp/my-state.json', - JOB_TIMES_2, + EXPR_TIMES_2, options ); t.assert(result.data.count === 66); @@ -360,7 +419,7 @@ test.serial( }; const result = await run( 'openfn job.js -s /tmp/my-state.json', - JOB_TIMES_2, + EXPR_TIMES_2, options ); t.assert(result.data.count === 66); @@ -373,7 +432,7 @@ test.serial( const state = JSON.stringify({ data: { count: 11 } }); const result = await run( `openfn job.js --state-stdin=${state}`, - JOB_TIMES_2 + EXPR_TIMES_2 ); t.assert(result.data.count === 22); } @@ -383,7 +442,7 @@ test.serial( 'read state from stdin with alias: openfn job.js -S ', async (t) => { const state = JSON.stringify({ data: { count: 44 } }); - const result = await run(`openfn job.js -S ${state}`, JOB_TIMES_2); + const result = await run(`openfn job.js -S ${state}`, EXPR_TIMES_2); t.assert(result.data.count === 88); } ); @@ -394,7 +453,7 @@ test.serial( const state = JSON.stringify({ data: { count: 49.5 } }); const result = await run( `openfn --no-expand-adaptors -S ${state} --adaptor times-two=/modules/times-two`, - JOB_MOCK_ADAPTOR + EXPR_MOCK_ADAPTOR ); t.assert(result.data.count === 99); } @@ -406,7 +465,7 @@ test.serial( const state = JSON.stringify({ data: { count: 49.5 } }); const result = await run( `openfn --no-expand-adaptors -S ${state} --adaptors times-two=/modules/times-two`, - JOB_MOCK_ADAPTOR + EXPR_MOCK_ADAPTOR ); t.assert(result.data.count === 99); } @@ -418,7 +477,7 @@ test.serial( const state = JSON.stringify({ data: { count: 49.5 } }); const result = await run( `openfn --no-expand-adaptors -S ${state} -a times-two=/modules/times-two`, - JOB_MOCK_ADAPTOR + EXPR_MOCK_ADAPTOR ); t.assert(result.data.count === 99); } @@ -588,7 +647,7 @@ test.serial('compile a workflow: openfn compile wf.json to file', async (t) => { const output = await fs.readFile('out.json', 'utf8'); const result = JSON.parse(output); t.truthy(result); - t.is(result.jobs[0].expression, 'export default [x()];'); + t.is(result.workflow.steps[0].expression, 'export default [x()];'); }); test.serial('docs should print documentation with full names', async (t) => { diff --git a/packages/cli/test/integration.test.ts b/packages/cli/test/integration.test.ts index b4499cd1c..c20b68cf5 100644 --- a/packages/cli/test/integration.test.ts +++ b/packages/cli/test/integration.test.ts @@ -4,7 +4,7 @@ import { exec } from 'node:child_process'; test('openfn help', async (t) => { await new Promise((resolve) => { exec('pnpm openfn help', (error, stdout, stderr) => { - t.regex(stdout, /Run an openfn job/); + t.regex(stdout, /Run an openfn expression/); t.falsy(error); t.falsy(stderr); resolve(); diff --git a/packages/cli/test/options/ensure/inputPath.test.ts b/packages/cli/test/options/ensure/inputPath.test.ts index 8c7690c5b..a295b8ec8 100644 --- a/packages/cli/test/options/ensure/inputPath.test.ts +++ b/packages/cli/test/options/ensure/inputPath.test.ts @@ -31,7 +31,7 @@ test('sets jobPath to path/job.js if path is a folder (trailing slash)', (t) => t.is(opts.jobPath, '/jam/job.js'); }); -test('set workflowPath if path ends in json', (t) => { +test.skip('set workflowPath if path ends in json', (t) => { const opts = { path: 'workflow.json', } as Opts; diff --git a/packages/cli/test/util/load-plan.test.ts b/packages/cli/test/util/load-plan.test.ts index 773893725..2b1e7305e 100644 --- a/packages/cli/test/util/load-plan.test.ts +++ b/packages/cli/test/util/load-plan.test.ts @@ -72,9 +72,25 @@ test.serial('expression: set an adaptor on the plan', async (t) => { t.is(step.adaptor, '@openfn/language-common'); }); +test.serial('expression: do not expand adaptors', async (t) => { + const opts = { + jobPath: 'test/job.js', + expandAdaptors: false, + // Note that adaptor expansion should have happened before loadPlan is called + adaptors: ['common'], + } as Partial; + + const plan = await loadPlan(opts as Opts, logger); + + const step = plan.workflow.steps[0] as Job; + + t.is(step.adaptor, 'common'); +}); + test.serial('expression: set a timeout on the plan', async (t) => { const opts = { jobPath: 'test/job.js', + expandAdaptors: true, timeout: 111, } as Partial; @@ -83,11 +99,21 @@ test.serial('expression: set a timeout on the plan', async (t) => { t.is(plan.options.timeout, 111); }); -test.todo('expression: load a plan from an expression.js and add options'); +test.serial('expression: set a start on the plan', async (t) => { + const opts = { + jobPath: 'test/job.js', + start: 'x', + } as Partial; + + const plan = await loadPlan(opts as Opts, logger); + + t.is(plan.options.start, 'x'); +}); test.serial('xplan: load a plan from workflow path', async (t) => { const opts = { workflowPath: 'test/wf.json', + expandAdaptors: true, plan: {}, }; @@ -100,6 +126,7 @@ test.serial('xplan: load a plan from workflow path', async (t) => { test.serial('xplan: expand adaptors', async (t) => { const opts = { workflowPath: 'test/wf.json', + expandAdaptors: true, plan: {}, }; @@ -115,16 +142,93 @@ test.serial('xplan: expand adaptors', async (t) => { 'test/wf.json': JSON.stringify(plan), }); - const plan = await loadPlan(opts as Opts, logger); - t.truthy(plan); + const result = await loadPlan(opts as Opts, logger); + t.truthy(result); - const step = plan.workflow.steps[0] as Job; + const step = result.workflow.steps[0] as Job; t.is(step.adaptor, '@openfn/language-common@1.0.0'); }); +test.serial('xplan: do not expand adaptors', async (t) => { + const opts = { + workflowPath: 'test/wf.json', + expandAdaptors: false, + plan: {}, + }; + + const plan = createPlan([ + { + id: 'a', + expression: '.', + adaptor: 'common@1.0.0', + }, + ]); + + mock({ + 'test/wf.json': JSON.stringify(plan), + }); + + const result = await loadPlan(opts as Opts, logger); + t.truthy(result); + + const step = result.workflow.steps[0] as Job; + t.is(step.adaptor, 'common@1.0.0'); +}); + +test.serial('xplan: set timeout from CLI', async (t) => { + const opts = { + workflowPath: 'test/wf.json', + timeout: 666, + plan: {}, + }; + + const plan = createPlan([ + { + id: 'a', + expression: '.', + }, + ]); + // The incoming option should overwrite this one + // @ts-ignore + plan.options.timeout = 1; + + mock({ + 'test/wf.json': JSON.stringify(plan), + }); + + const { options } = await loadPlan(opts as Opts, logger); + t.is(options.timeout, 666); +}); + +test.serial('xplan: set start from CLI', async (t) => { + const opts = { + workflowPath: 'test/wf.json', + start: 'b', + plan: {}, + }; + + const plan = createPlan([ + { + id: 'a', + expression: '.', + }, + ]); + // The incoming option should overwrite this one + // @ts-ignore + plan.options.start = 'a'; + + mock({ + 'test/wf.json': JSON.stringify(plan), + }); + + const { options } = await loadPlan(opts as Opts, logger); + t.is(options.start, 'b'); +}); + test.serial('xplan: map to monorepo', async (t) => { const opts = { workflowPath: 'test/wf.json', + expandAdaptors: true, plan: {}, monorepoPath: '/repo/', } as Partial; @@ -141,15 +245,13 @@ test.serial('xplan: map to monorepo', async (t) => { 'test/wf.json': JSON.stringify(plan), }); - const plan = await loadPlan(opts as Opts, logger); - t.truthy(plan); + const result = await loadPlan(opts as Opts, logger); + t.truthy(result); - const step = plan.workflow.steps[0] as Job; + const step = result.workflow.steps[0] as Job; t.is(step.adaptor, '@openfn/language-common=/repo/packages/common'); }); -test.todo('xplan: load a plan from a workflow path and add options'); - test.serial('old-workflow: load a plan from workflow path', async (t) => { const opts = { workflowPath: 'test/wf-old.json', @@ -168,5 +270,3 @@ test.serial('old-workflow: load a plan from workflow path', async (t) => { expression: 'x()', }); }); - -test.todo('old-workflow: load a plan from a workflow path and add options'); From 161288e2abfebc07575d2d7e07273338844ad270 Mon Sep 17 00:00:00 2001 From: Joe Clark Date: Mon, 5 Feb 2024 15:26:13 +0000 Subject: [PATCH 050/128] cli: fix integration tests --- integration-tests/cli/test/cli.test.ts | 2 +- integration-tests/cli/test/errors.test.ts | 7 ++- .../cli/test/execute-workflow.test.ts | 9 +++ .../test/fixtures/invalid-config-path.json | 3 +- integration-tests/cli/test/fixtures/plan.json | 19 +++++++ packages/cli/src/util/load-plan.ts | 55 +++++++++++-------- 6 files changed, 66 insertions(+), 29 deletions(-) create mode 100644 integration-tests/cli/test/fixtures/plan.json diff --git a/integration-tests/cli/test/cli.test.ts b/integration-tests/cli/test/cli.test.ts index 6d9b72d32..62ce56650 100644 --- a/integration-tests/cli/test/cli.test.ts +++ b/integration-tests/cli/test/cli.test.ts @@ -14,7 +14,7 @@ test.serial('openfn version', async (t) => { test.serial('openfn test', async (t) => { const { stdout } = await run(t.title); t.regex(stdout, /Versions:/); - t.regex(stdout, /Running test job.../); + t.regex(stdout, /Running test expression/); t.regex(stdout, /Result: 42/); }); diff --git a/integration-tests/cli/test/errors.test.ts b/integration-tests/cli/test/errors.test.ts index 1c3e66a43..410afbe56 100644 --- a/integration-tests/cli/test/errors.test.ts +++ b/integration-tests/cli/test/errors.test.ts @@ -2,19 +2,20 @@ import test from 'ava'; import path from 'node:path'; import run from '../src/run'; import { extractLogs, assertLog } from '../src/util'; +import { stderr } from 'node:process'; const jobsPath = path.resolve('test/fixtures'); // These are all errors that will stop the CLI from even running -test.serial('job not found', async (t) => { +test.serial('expression not found', async (t) => { const { stdout, err } = await run('openfn blah.js --log-json'); t.is(err.code, 1); const stdlogs = extractLogs(stdout); - assertLog(t, stdlogs, /job not found/i); - assertLog(t, stdlogs, /failed to load the job from blah.js/i); + assertLog(t, stdlogs, /expression not found/i); + assertLog(t, stdlogs, /failed to load the expression from blah.js/i); assertLog(t, stdlogs, /critical error: aborting command/i); }); diff --git a/integration-tests/cli/test/execute-workflow.test.ts b/integration-tests/cli/test/execute-workflow.test.ts index 57f53ee17..a2fa9230f 100644 --- a/integration-tests/cli/test/execute-workflow.test.ts +++ b/integration-tests/cli/test/execute-workflow.test.ts @@ -83,6 +83,15 @@ test.serial( } ); +// Run a new-style execution plan with custom start +test.serial(`openfn ${jobsPath}/plan.json -i`, async (t) => { + const { err } = await run(t.title); + t.falsy(err); + + const out = getJSON(); + t.deepEqual(out.data.userId, 1); +}); + test.serial(`openfn ${jobsPath}/wf-conditional.json`, async (t) => { const { err } = await run(t.title); t.falsy(err); diff --git a/integration-tests/cli/test/fixtures/invalid-config-path.json b/integration-tests/cli/test/fixtures/invalid-config-path.json index e3ed709a7..28e9ce4b1 100644 --- a/integration-tests/cli/test/fixtures/invalid-config-path.json +++ b/integration-tests/cli/test/fixtures/invalid-config-path.json @@ -1,7 +1,8 @@ { "jobs": [ { - "configuration": "does-not-exist.json" + "configuration": "does-not-exist.json", + "expression": "." } ] } diff --git a/integration-tests/cli/test/fixtures/plan.json b/integration-tests/cli/test/fixtures/plan.json new file mode 100644 index 000000000..bc21f7090 --- /dev/null +++ b/integration-tests/cli/test/fixtures/plan.json @@ -0,0 +1,19 @@ +{ + "options": { + "start": "b" + }, + "workflow": { + "steps": [ + { + "id": "a", + "adaptor": "common", + "expression": "fn((state) => { return state; });" + }, + { + "id": "b", + "adaptor": "http", + "expression": "get('https://jsonplaceholder.typicode.com/todos/1')" + } + ] + } +} diff --git a/packages/cli/src/util/load-plan.ts b/packages/cli/src/util/load-plan.ts index 18fcd8eb7..6a4496717 100644 --- a/packages/cli/src/util/load-plan.ts +++ b/packages/cli/src/util/load-plan.ts @@ -50,8 +50,6 @@ const loadPlan = async ( export default loadPlan; -// TODO this is way over simplified :( -// see load-input const loadJson = async (workflowPath: string, logger: Logger): Promise => { let text: string; @@ -96,32 +94,41 @@ const loadExpression = async ( const jobPath = options.jobPath!; logger.debug(`Loading expression from ${jobPath}`); - const expression = await fs.readFile(jobPath, 'utf8'); - const name = path.parse(jobPath).name; + try { + const expression = await fs.readFile(jobPath, 'utf8'); + const name = path.parse(jobPath).name; - const step: Job = { expression }; + const step: Job = { expression }; - // The adaptor should have been expanded nicely already, so we don't need intervene here - if (options.adaptors) { - const [adaptor] = options.adaptors; - if (adaptor) { - step.adaptor = adaptor; + // The adaptor should have been expanded nicely already, so we don't need intervene here + if (options.adaptors) { + const [adaptor] = options.adaptors; + if (adaptor) { + step.adaptor = adaptor; + } } - } - const wfOptions: WorkflowOptions = {}; - // TODO support state props to remove? - maybeAssign(options, wfOptions, ['timeout']); + const wfOptions: WorkflowOptions = {}; + // TODO support state props to remove? + maybeAssign(options, wfOptions, ['timeout']); - const plan: ExecutionPlan = { - workflow: { - name, - steps: [step], - }, - options: wfOptions, - }; - // call loadXPlan now so that any options can be written - return loadXPlan(plan, options, logger); + const plan: ExecutionPlan = { + workflow: { + name, + steps: [step], + }, + options: wfOptions, + }; + // call loadXPlan now so that any options can be written + return loadXPlan(plan, options, logger); + } catch (e) { + abort( + logger, + 'Expression not found', + undefined, + `Failed to load the expression from ${jobPath}` + ); + } }; const loadOldWorkflow = async ( @@ -220,6 +227,7 @@ const importExpressions = async ( configurationStr, log ); + console.log(configString); job.configuration = JSON.parse(configString!); } } @@ -234,7 +242,6 @@ const loadXPlan = async ( if (!plan.options) { plan.options = {}; } - // Note that baseDir should be set up in the default function await importExpressions(plan, options.baseDir!, logger); // expand shorthand adaptors in the workflow jobs From 0e22a0dfd86a5c5f41fa1644c56fd9df25c52030 Mon Sep 17 00:00:00 2001 From: Joe Clark Date: Mon, 5 Feb 2024 15:51:18 +0000 Subject: [PATCH 051/128] cli: tidy --- packages/cli/src/compile/command.ts | 2 +- packages/cli/src/compile/compile.ts | 2 +- packages/cli/src/compile/handler.ts | 4 +- packages/cli/src/execute/command.ts | 5 +- packages/cli/src/options.ts | 13 +- packages/cli/src/util/load-input.ts | 148 -------- packages/cli/src/util/load-plan.ts | 20 +- packages/cli/test/commands.test.ts | 26 +- packages/cli/test/compile/compile.test.ts | 8 +- packages/cli/test/compile/options.test.ts | 4 +- packages/cli/test/execute/execute.test.ts | 9 +- packages/cli/test/execute/options.test.ts | 4 +- .../cli/test/options/ensure/inputPath.test.ts | 12 +- packages/cli/test/options/execute.test.ts | 4 +- packages/cli/test/util/load-input.test.ts | 331 ------------------ packages/cli/test/util/load-plan.test.ts | 10 +- 16 files changed, 58 insertions(+), 544 deletions(-) delete mode 100644 packages/cli/src/util/load-input.ts delete mode 100644 packages/cli/test/util/load-input.test.ts diff --git a/packages/cli/src/compile/command.ts b/packages/cli/src/compile/command.ts index 8b8320b41..c9b1fdb21 100644 --- a/packages/cli/src/compile/command.ts +++ b/packages/cli/src/compile/command.ts @@ -9,7 +9,7 @@ export type CompileOptions = Pick< | 'command' | 'expandAdaptors' | 'ignoreImports' - | 'jobPath' + | 'expressionPath' | 'job' | 'logJson' | 'log' diff --git a/packages/cli/src/compile/compile.ts b/packages/cli/src/compile/compile.ts index edbe252ba..a2f8285f6 100644 --- a/packages/cli/src/compile/compile.ts +++ b/packages/cli/src/compile/compile.ts @@ -15,7 +15,7 @@ export default async ( ) => { if (typeof planOrPath === 'string') { const result = await compileJob(planOrPath as string, opts, log); - log.success(`Compiled expression from ${opts.jobPath}`); + log.success(`Compiled expression from ${opts.expressionPath}`); return result; } diff --git a/packages/cli/src/compile/handler.ts b/packages/cli/src/compile/handler.ts index 23f10bb7d..ac19752fb 100644 --- a/packages/cli/src/compile/handler.ts +++ b/packages/cli/src/compile/handler.ts @@ -10,8 +10,8 @@ const compileHandler = async (options: CompileOptions, logger: Logger) => { assertPath(options.path); let result; - if (options.jobPath) { - result = await compile(options.jobPath, options, logger); + if (options.expressionPath) { + result = await compile(options.expressionPath, options, logger); } else { const plan = await loadPlan(options, logger); result = await compile(plan, options, logger); diff --git a/packages/cli/src/execute/command.ts b/packages/cli/src/execute/command.ts index 60866ac67..257f52333 100644 --- a/packages/cli/src/execute/command.ts +++ b/packages/cli/src/execute/command.ts @@ -14,7 +14,7 @@ export type ExecuteOptions = Required< | 'expandAdaptors' | 'immutable' | 'ignoreImports' - | 'jobPath' + | 'expressionPath' | 'log' | 'logJson' | 'outputPath' @@ -30,7 +30,6 @@ export type ExecuteOptions = Required< | 'timeout' | 'useAdaptorsMonorepo' | 'workflowPath' - | 'workflow' > > & Pick; @@ -89,7 +88,7 @@ const executeCommand: yargs.CommandModule = { ) .example( 'openfn compile job.js -a http', - 'Compile job.js with the http adaptor and print the code to stdout' + 'Compile the expression at job.js with the http adaptor and print the code to stdout' ), }; diff --git a/packages/cli/src/options.ts b/packages/cli/src/options.ts index e7aaa9d49..da2cc6a8e 100644 --- a/packages/cli/src/options.ts +++ b/packages/cli/src/options.ts @@ -1,9 +1,7 @@ import path from 'node:path'; import yargs from 'yargs'; -import type { ExecutionPlan } from '@openfn/lexicon'; import type { CommandList } from './commands'; -import { OldCLIWorkflow } from './types'; import { DEFAULT_REPO_DIR } from './constants'; import { expandAdaptors as doExpandAdaptors, @@ -30,7 +28,7 @@ export type Opts = { force?: boolean; immutable?: boolean; ignoreImports?: boolean | string[]; - jobPath?: string; // TODO rename to expressionPath + expressionPath?: string; log?: Record; logJson?: boolean; monorepoPath?: string; @@ -54,9 +52,6 @@ export type Opts = { // deprecated workflowPath?: string; - job?: string; - plan?: ExecutionPlan; // TODO pretty sure this doesn't live on options - workflow?: OldCLIWorkflow; // TODO I don't think this should sit on options anymore? }; // Definition of what Yargs returns (before ensure is called) @@ -232,7 +227,7 @@ export const projectId: CLIOption = { }, }; -// Input path covers jobPath and workflowPath +// Input path covers expressionPath and workflowPath export const inputPath: CLIOption = { name: 'input-path', yargs: { @@ -243,10 +238,10 @@ export const inputPath: CLIOption = { if (basePath?.endsWith('.json')) { opts.planPath = basePath; } else if (basePath?.endsWith('.js')) { - opts.jobPath = basePath; + opts.expressionPath = basePath; } else { const base = getBaseDir(opts); - setDefaultValue(opts, 'jobPath', path.join(base, 'job.js')); + setDefaultValue(opts, 'expressionPath', path.join(base, 'job.js')); } }, }; diff --git a/packages/cli/src/util/load-input.ts b/packages/cli/src/util/load-input.ts deleted file mode 100644 index 9e6ce9b46..000000000 --- a/packages/cli/src/util/load-input.ts +++ /dev/null @@ -1,148 +0,0 @@ -// TODO remove this now -// Let's just port over any tests we want -// (acutally let's get all tests, including integration, passing first) -import path from 'node:path'; -import fs from 'node:fs/promises'; -import { isPath } from '@openfn/compiler'; -import type { ExecutionPlan } from '@openfn/lexicon'; -import type { Logger } from '@openfn/logger'; - -import abort from './abort'; -import type { CLIExecutionPlan } from '../types'; -import type { Opts } from '../options'; - -type LoadWorkflowOpts = Required< - Pick ->; - -export default async ( - opts: Pick, - log: Logger -) => { - const { job, workflow, jobPath, workflowPath } = opts; - if (workflow || workflowPath) { - return loadWorkflow(opts as LoadWorkflowOpts, log); - } - - if (job) { - return job; - } - if (jobPath) { - try { - log.debug(`Loading job from ${jobPath}`); - opts.job = await fs.readFile(jobPath, 'utf8'); - return opts.job; - } catch (e: any) { - abort( - log, - 'Job not found', - undefined, - `Failed to load the job from ${jobPath}` - ); - } - } -}; - -const loadWorkflow = async (opts: LoadWorkflowOpts, log: Logger) => { - const { workflowPath, workflow } = opts; - - const readWorkflow = async () => { - try { - const text = await fs.readFile(workflowPath, 'utf8'); - return text; - } catch (e) { - abort( - log, - 'Workflow not found', - undefined, - `Failed to load a workflow from ${workflowPath}` - ); - } - }; - - const parseWorkflow = (contents: string) => { - try { - return JSON.parse(contents); - } catch (e: any) { - abort( - log, - 'Invalid JSON in workflow', - e, - `Check the syntax of the JSON at ${workflowPath}` - ); - } - }; - - const fetchWorkflowFile = async ( - jobId: string, - rootDir: string = '', - filePath: string - ) => { - try { - // Special handling for ~ feels like a necessary evil - const fullPath = filePath.startsWith('~') - ? filePath - : path.resolve(rootDir, filePath); - const result = await fs.readFile(fullPath, 'utf8'); - return result; - } catch (e) { - abort( - log, - `File not found for job ${jobId}: ${filePath}`, - undefined, - `This workflow references a file which cannot be found at ${filePath}\n\nPaths inside the workflow are relative to the workflow.json` - ); - } - }; - - log.debug(`Loading workflow from ${workflowPath}`); - try { - let wf: CLIExecutionPlan; - let rootDir = opts.baseDir; - if (workflowPath) { - let workflowRaw = await readWorkflow(); - wf = parseWorkflow(workflowRaw!); - if (!rootDir) { - // TODO this may not be neccessary, but keeping just in case - rootDir = path.dirname(workflowPath); - } - } else { - wf = workflow as CLIExecutionPlan; - } - - // TODO auto generate ids? - - // identify any expressions/configs that are paths, and load them in - // All paths are relative to the workflow itself - let idx = 0; - for (const job of wf.jobs) { - idx += 1; - const expressionStr = - typeof job.expression === 'string' && job.expression?.trim(); - const configurationStr = - typeof job.configuration === 'string' && job.configuration?.trim(); - if (expressionStr && isPath(expressionStr)) { - job.expression = await fetchWorkflowFile( - job.id || `${idx}`, - rootDir, - expressionStr - ); - } - if (configurationStr && isPath(configurationStr)) { - const configString = await fetchWorkflowFile( - job.id || `${idx}`, - rootDir, - configurationStr - ); - job.configuration = JSON.parse(configString!); - } - } - - opts.workflow = wf as ExecutionPlan; - log.debug('Workflow loaded!'); - return opts.workflow; - } catch (e) { - log.error(`Error loading workflow from ${workflowPath}`); - throw e; - } -}; diff --git a/packages/cli/src/util/load-plan.ts b/packages/cli/src/util/load-plan.ts index 6a4496717..17bb0f7dd 100644 --- a/packages/cli/src/util/load-plan.ts +++ b/packages/cli/src/util/load-plan.ts @@ -17,7 +17,7 @@ import type { OldCLIWorkflow } from '../types'; const loadPlan = async ( options: Pick< Opts, - | 'jobPath' + | 'expressionPath' | 'planPath' | 'workflowPath' | 'adaptors' @@ -26,9 +26,9 @@ const loadPlan = async ( >, logger: Logger ): Promise => { - const { workflowPath, planPath, jobPath } = options; + const { workflowPath, planPath, expressionPath } = options; - if (jobPath) { + if (expressionPath) { return loadExpression(options, logger); } @@ -38,8 +38,6 @@ const loadPlan = async ( options.baseDir = path.dirname(jsonPath!); } - // TODO if neither jobPath, planPath or workflowPath is set... what happens? - // I think the CLI will exit before we even get here const json = await loadJson(jsonPath!, logger); if (json.workflow) { return loadXPlan(json, options, logger); @@ -88,15 +86,15 @@ const maybeAssign = (a: any, b: any, keys: Array) => { }; const loadExpression = async ( - options: Pick, + options: Pick, logger: Logger ): Promise => { - const jobPath = options.jobPath!; + const expressionPath = options.expressionPath!; - logger.debug(`Loading expression from ${jobPath}`); + logger.debug(`Loading expression from ${expressionPath}`); try { - const expression = await fs.readFile(jobPath, 'utf8'); - const name = path.parse(jobPath).name; + const expression = await fs.readFile(expressionPath, 'utf8'); + const name = path.parse(expressionPath).name; const step: Job = { expression }; @@ -126,7 +124,7 @@ const loadExpression = async ( logger, 'Expression not found', undefined, - `Failed to load the expression from ${jobPath}` + `Failed to load the expression from ${expressionPath}` ); } }; diff --git a/packages/cli/test/commands.test.ts b/packages/cli/test/commands.test.ts index 09d8e924e..89f271826 100644 --- a/packages/cli/test/commands.test.ts +++ b/packages/cli/test/commands.test.ts @@ -24,7 +24,7 @@ const EXPR_EXPORT_STATE = "export default [() => ({ configuration: {}, data: {}, foo: 'bar' })];"; type RunOptions = { - jobPath?: string; + expressionPath?: string; statePath?: string; outputPath?: string; state?: any; @@ -43,7 +43,7 @@ async function run(command: string, job: string, options: RunOptions = {}) { // A good reason to move all these into integration tests tbh! command = command.replace(/^openfn /, ''); - const jobPath = options.jobPath || 'job.js'; + const expressionPath = options.expressionPath || 'job.js'; const statePath = options.statePath || 'state.json'; const outputPath = options.outputPath || 'output.json'; const state = @@ -58,7 +58,7 @@ async function run(command: string, job: string, options: RunOptions = {}) { // Mock the file system in-memory if (!options.disableMock) { mock({ - [jobPath]: job, + [expressionPath]: job, [statePath]: state, [outputPath]: '{}', [pnpm]: mock.load(pnpm, {}), @@ -74,7 +74,7 @@ async function run(command: string, job: string, options: RunOptions = {}) { const opts = cmd.parse(command) as Opts; // Override some options after the command has been parsed - opts.path = jobPath; + opts.path = expressionPath; opts.repoDir = options.repoDir; opts.log = { default: 'none' }; @@ -113,7 +113,7 @@ test.serial('run an execution plan', async (t) => { const options = { outputPath: 'output.json', - jobPath: 'wf.json', // just to fool the test + expressionPath: 'wf.json', // just to fool the test }; const result = await run('openfn wf.json', JSON.stringify(plan), options); @@ -140,7 +140,7 @@ test.serial('run an execution plan with start', async (t) => { const options = { outputPath: 'output.json', - jobPath: 'wf.json', // just to fool the test + expressionPath: 'wf.json', // just to fool the test }; const result = await run( @@ -206,7 +206,7 @@ test.serial('run a workflow', async (t) => { const options = { outputPath: 'output.json', - jobPath: 'wf.json', // just to fool the test + expressionPath: 'wf.json', // just to fool the test }; const result = await run('openfn wf.json', JSON.stringify(workflow), options); @@ -227,7 +227,7 @@ test.serial('run a workflow with config as an object', async (t) => { const options = { outputPath: 'output.json', - jobPath: 'wf.json', // just to fool the test + expressionPath: 'wf.json', // just to fool the test }; const result = await run('openfn wf.json', JSON.stringify(workflow), options); t.deepEqual(result, { @@ -249,7 +249,7 @@ test.serial('run a workflow with config as a path', async (t) => { const options = { outputPath: 'output.json', - jobPath: 'wf.json', // just to fool the test + expressionPath: 'wf.json', // just to fool the test mockfs: { '/config.json': JSON.stringify({ y: 0 }), }, @@ -267,7 +267,7 @@ test.serial.skip( async (t) => { const options = { // set up the file system - jobPath: + expressionPath: '~/openfn/jobs/the-question/what-is-the-answer-to-life-the-universe-and-everything.js', outputPath: '~/openfn/jobs/the-question/output.json', statePath: '~/openfn/jobs/the-question/state.json', @@ -538,7 +538,7 @@ test.serial( const options = { outputPath: 'output.json', - jobPath: 'wf.json', + expressionPath: 'wf.json', repoDir: '/repo', }; @@ -607,7 +607,7 @@ test.serial( }); const result = await run('workflow.json -m', workflow, { - jobPath: 'workflow.json', + expressionPath: 'workflow.json', }); t.true(result.data.done); delete process.env.OPENFN_ADAPTORS_REPO; @@ -635,7 +635,7 @@ test.serial('compile a job: openfn compile job.js to file', async (t) => { test.serial('compile a workflow: openfn compile wf.json to file', async (t) => { const options = { outputPath: 'out.json', - jobPath: 'wf.json', // just to fool the test + expressionPath: 'wf.json', // just to fool the test }; const wf = JSON.stringify({ diff --git a/packages/cli/test/compile/compile.test.ts b/packages/cli/test/compile/compile.test.ts index fdc48c5f4..46f6520d1 100644 --- a/packages/cli/test/compile/compile.test.ts +++ b/packages/cli/test/compile/compile.test.ts @@ -15,7 +15,7 @@ const mockLog = createMockLogger(); test.after(resetMockFs); -const jobPath = '/job.js'; +const expressionPath = '/job.js'; type TransformOptionsWithImports = { ['add-imports']: { @@ -44,14 +44,14 @@ test('compile from source string', async (t) => { test.serial('compile from path', async (t) => { const job = 'x();'; mockFs({ - [jobPath]: job, + [expressionPath]: job, }); const opts = { - jobPath, + expressionPath, } as CompileOptions; - const result = await compile(jobPath, opts, mockLog); + const result = await compile(expressionPath, opts, mockLog); const expected = 'export default [x()];'; t.is(result, expected); diff --git a/packages/cli/test/compile/options.test.ts b/packages/cli/test/compile/options.test.ts index ed8a48390..e3d896f8d 100644 --- a/packages/cli/test/compile/options.test.ts +++ b/packages/cli/test/compile/options.test.ts @@ -13,7 +13,7 @@ test('correct default options', (t) => { t.deepEqual(options.adaptors, []); t.is(options.command, 'compile'); t.is(options.expandAdaptors, true); - t.is(options.jobPath, 'job.js'); + t.is(options.expressionPath, 'job.js'); t.falsy(options.logJson); // TODO this is undefined right now t.is(options.outputStdout, true); t.is(options.path, 'job.js'); @@ -52,7 +52,7 @@ test("don't expand adaptors if --no-expand-adaptors is set", (t) => { test('default job path', (t) => { const options = parse('compile /tmp/my-job/ --immutable'); t.is(options.path, '/tmp/my-job/'); - t.is(options.jobPath, '/tmp/my-job/job.js'); + t.is(options.expressionPath, '/tmp/my-job/job.js'); }); test('enable json logging', (t) => { diff --git a/packages/cli/test/execute/execute.test.ts b/packages/cli/test/execute/execute.test.ts index 8931a0eb5..a3e648b6b 100644 --- a/packages/cli/test/execute/execute.test.ts +++ b/packages/cli/test/execute/execute.test.ts @@ -43,7 +43,7 @@ test.serial('run a simple job', async (t) => { const options = { ...defaultOptions, - jobPath: '/job.js', + expressionPath: '/job.js', }; const result = await handler(options, logger); @@ -58,7 +58,7 @@ test.serial('run a job with initial state', async (t) => { const options = { ...defaultOptions, - jobPath: '/job.js', + expressionPath: '/job.js', stateStdin: JSON.stringify({ data: { count: 10 } }), }; @@ -275,6 +275,7 @@ test.serial('run a workflow with an adaptor (shortform)', async (t) => { ...defaultOptions, workflowPath: '/workflow.json', stateStdin: JSON.stringify({ data: { count: 10 } }), + expandAdaptors: true, }; const result = await handler(options, logger); t.is(result.data.count, 10); @@ -289,7 +290,7 @@ test.serial('run a job without compilation', async (t) => { const options = { ...defaultOptions, compile: false, - jobPath: '/job.js', + expressionPath: '/job.js', }; const result = await handler(options, logger); @@ -304,7 +305,7 @@ test.serial('run a job which does not return state', async (t) => { const options = { ...defaultOptions, - jobPath: '/job.js', + expressionPath: '/job.js', }; const result = await handler(options, logger); t.falsy(result); diff --git a/packages/cli/test/execute/options.test.ts b/packages/cli/test/execute/options.test.ts index 968523921..b3c18aaf4 100644 --- a/packages/cli/test/execute/options.test.ts +++ b/packages/cli/test/execute/options.test.ts @@ -16,7 +16,7 @@ test('correct default options', (t) => { t.is(options.compile, true); t.is(options.expandAdaptors, true); t.is(options.immutable, false); - t.is(options.jobPath, 'job.js'); + t.is(options.expressionPath, 'job.js'); t.falsy(options.logJson); // TODO this is undefined right now t.is(options.outputPath, 'output.json'); t.is(options.outputStdout, false); @@ -79,7 +79,7 @@ test('enable immutability', (t) => { test('default job path', (t) => { const options = parse('execute /tmp/my-job/ --immutable'); t.is(options.path, '/tmp/my-job/'); - t.is(options.jobPath, '/tmp/my-job/job.js'); + t.is(options.expressionPath, '/tmp/my-job/job.js'); }); test('enable json logging', (t) => { diff --git a/packages/cli/test/options/ensure/inputPath.test.ts b/packages/cli/test/options/ensure/inputPath.test.ts index a295b8ec8..e62a827b7 100644 --- a/packages/cli/test/options/ensure/inputPath.test.ts +++ b/packages/cli/test/options/ensure/inputPath.test.ts @@ -1,34 +1,34 @@ import test from 'ava'; import { inputPath, Opts } from '../../../src/options'; -test('sets jobPath using path', (t) => { +test('sets expressionPath using path', (t) => { const opts = { path: 'jam.js', } as Opts; inputPath.ensure!(opts); - t.is(opts.jobPath, 'jam.js'); + t.is(opts.expressionPath, 'jam.js'); }); -test('sets jobPath to path/job.js if path is a folder', (t) => { +test('sets expressionPath to path/job.js if path is a folder', (t) => { const opts = { path: '/jam', } as Opts; inputPath.ensure!(opts); - t.is(opts.jobPath, '/jam/job.js'); + t.is(opts.expressionPath, '/jam/job.js'); }); -test('sets jobPath to path/job.js if path is a folder (trailing slash)', (t) => { +test('sets expressionPath to path/job.js if path is a folder (trailing slash)', (t) => { const opts = { path: '/jam/', } as Opts; inputPath.ensure!(opts); - t.is(opts.jobPath, '/jam/job.js'); + t.is(opts.expressionPath, '/jam/job.js'); }); test.skip('set workflowPath if path ends in json', (t) => { diff --git a/packages/cli/test/options/execute.test.ts b/packages/cli/test/options/execute.test.ts index 720e15906..e9950e22f 100644 --- a/packages/cli/test/options/execute.test.ts +++ b/packages/cli/test/options/execute.test.ts @@ -12,9 +12,9 @@ const cmd = yargs().command(execute); const parse = (command: string) => cmd.parse(command) as yargs.Arguments; -test("execute: jobPath'.'", (t) => { +test("execute: expressionPath'.'", (t) => { const options = parse('execute job.js'); - t.assert(options.jobPath === 'job.js'); + t.assert(options.expressionPath === 'job.js'); }); test('execute: default outputPath to ./output.json', (t) => { diff --git a/packages/cli/test/util/load-input.test.ts b/packages/cli/test/util/load-input.test.ts deleted file mode 100644 index cd20ec1f5..000000000 --- a/packages/cli/test/util/load-input.test.ts +++ /dev/null @@ -1,331 +0,0 @@ -import test from 'ava'; -import mock from 'mock-fs'; -import { createMockLogger } from '@openfn/logger'; -import type { ExecutionPlan } from '@openfn/lexicon'; - -import loadInput from '../../src/util/load-input'; - -const logger = createMockLogger(undefined, { level: 'debug' }); - -// TODO add support for handling old versions here -test.beforeEach(() => { - mock({ - 'test/job.js': 'x', - 'test/wf-old.json': JSON.stringify({ - start: 'a', - jobs: [{ id: 'a', expression: 'x()' }], - }), - 'test/wf.json': JSON.stringify({ - options: { start: 'a' }, - workflow: { - // TODO rename steps - jobs: [{ id: 'a', expression: 'x()' }], - }, - }), - 'test/wf-err.json': '!!!', - }); -}); - -test.afterEach(() => { - logger._reset(); - mock.restore(); -}); - -test.serial('do nothing if no path provided', async (t) => { - const opts = {}; - - const result = await loadInput(opts, logger); - t.falsy(result); - t.assert(Object.keys(opts).length === 0); -}); - -test.serial('return the workflow if already set ', async (t) => { - const opts = { - workflow: { options: { start: 'x' }, jobs: [] }, - job: 'j', - jobPath: 'test/job.js', - }; - - const result = (await loadInput(opts, logger)) as ExecutionPlan; - t.truthy(result); - t.is(result.start, 'x'); -}); - -test.serial( - 'return the job if already set (and workflow is not)', - async (t) => { - const opts = { - job: 'j', - jobPath: 'test/job.js', - }; - - const result = await loadInput(opts, logger); - t.is(result, 'j'); - } -); - -test.serial('load a job from a path and return the result', async (t) => { - const opts = { - jobPath: 'test/job.js', - }; - - const result = await loadInput(opts, logger); - t.is(result, 'x'); -}); - -test.serial('load a job from a path and mutate opts', async (t) => { - const opts = { - jobPath: 'test/job.js', - job: '', - }; - - await loadInput(opts, logger); - t.is(opts.job, 'x'); -}); - -test.serial('abort if the job cannot be found', async (t) => { - const opts = { - jobPath: 'test/blah.js', - }; - - const logger = createMockLogger(); - await t.throwsAsync(() => loadInput(opts, logger)); - - t.assert(logger._find('error', /job not found/i)); - t.assert( - logger._find('always', /Failed to load the job from test\/blah.js/i) - ); - t.assert(logger._find('error', /critical error: aborting command/i)); -}); - -test.serial( - 'load a workflow from a path and return the result as JSON', - async (t) => { - const opts = { - workflowPath: 'test/wf.json', - }; - - const result = await loadInput(opts, logger); - t.is(result.start, 'a'); - } -); - -test.serial('abort if the workflow cannot be found', async (t) => { - const opts = { - workflowPath: 'test/blah.json', - }; - - const logger = createMockLogger(); - await t.throwsAsync(() => loadInput(opts, logger)); - - t.assert(logger._find('error', /workflow not found/i)); - t.assert( - logger._find('always', /Failed to load a workflow from test\/blah.json/i) - ); - t.assert(logger._find('error', /critical error: aborting command/i)); -}); - -test.serial('abort if the workflow contains invalid json', async (t) => { - const opts = { - workflowPath: 'test/wf-err.json', - }; - - const logger = createMockLogger(); - await t.throwsAsync(() => loadInput(opts, logger)); - - t.assert(logger._find('error', /invalid json in workflow/i)); - t.assert( - logger._find('always', /check the syntax of the json at test\/wf-err.json/i) - ); - t.assert(logger._find('error', /critical error: aborting command/i)); -}); - -test.serial('load a workflow from a path and mutate opts', async (t) => { - const opts = { - workflowPath: 'test/wf.json', - workflow: undefined, - }; - - await loadInput(opts, logger); - t.is((opts.workflow as any).start, 'a'); -}); - -test.serial('prefer workflow to job if both are somehow set', async (t) => { - const opts = { - jobPath: 'test/job.js', - workflowPath: 'test/wf.json', - }; - - const result = await loadInput(opts, logger); - t.is(result.start, 'a'); -}); - -test.serial('resolve workflow expression paths (filename)', async (t) => { - mock({ - '/test/job.js': 'x', - '/test/wf.json': JSON.stringify({ - jobs: [{ expression: 'job.js' }], - }), - }); - - const opts = { - workflowPath: '/test/wf.json', - }; - - const result = (await loadInput(opts, logger)) as ExecutionPlan; - t.is(result.jobs[0].expression, 'x'); -}); - -test.serial( - 'resolve workflow expression paths (relative same dir)', - async (t) => { - mock({ - '/test/job.js': 'x', - '/test/wf.json': JSON.stringify({ - jobs: [{ expression: './job.js' }], - }), - }); - - const opts = { - workflowPath: '/test/wf.json', - }; - - const result = (await loadInput(opts, logger)) as ExecutionPlan; - t.is(result.jobs[0].expression, 'x'); - } -); - -test.serial( - 'resolve workflow expression paths (relative different dir)', - async (t) => { - mock({ - '/jobs/job.js': 'x', - '/test/wf.json': JSON.stringify({ - jobs: [{ expression: '../jobs/job.js' }], - }), - }); - - const opts = { - workflowPath: '/test/wf.json', - }; - - const result = (await loadInput(opts, logger)) as ExecutionPlan; - t.is(result.jobs[0].expression, 'x'); - } -); - -test.serial('resolve workflow expression paths (absolute)', async (t) => { - mock({ - '/job.js': 'x', - '/test/wf.json': JSON.stringify({ - start: 'a', - jobs: [{ expression: '/job.js' }], - }), - }); - - const opts = { - workflowPath: '/test/wf.json', - }; - - const result = (await loadInput(opts, logger)) as ExecutionPlan; - t.is(result.jobs[0].expression, 'x'); -}); - -test.serial('resolve workflow expression paths (home)', async (t) => { - mock({ - '~/job.js': 'x', - '/test/wf.json': JSON.stringify({ - jobs: [{ expression: '~/job.js' }], - }), - }); - - const opts = { - workflowPath: '/test/wf.json', - }; - - const result = (await loadInput(opts, logger)) as ExecutionPlan; - t.is(result.jobs[0].expression, 'x'); -}); - -test.serial('Load a workflow path with trailing spaces', async (t) => { - const opts = { - workflow: { jobs: [{ expression: 'test/job.js ' }] }, - }; - - const result = (await loadInput(opts, logger)) as ExecutionPlan; - t.is(result.jobs[0].expression, 'x'); -}); - -// Less thorough testing on config because it goes through the same code -test.serial('resolve workflow config paths (home)', async (t) => { - const cfg = { id: 'x' }; - const cfgString = JSON.stringify(cfg); - mock({ - '~/config.json': cfgString, - '/config.json': cfgString, - '/test/config.json': cfgString, - '/test/wf.json': JSON.stringify({ - jobs: [ - { configuration: '/config.json' }, - { configuration: '~/config.json' }, - { configuration: 'config.json ' }, // trailing spaces! - { configuration: './config.json ' }, - ], - }), - }); - - const opts = { - workflowPath: '/test/wf.json', - }; - - const result = (await loadInput(opts, logger)) as ExecutionPlan; - t.is(result.jobs.length, 4); - for (const job of result.jobs) { - t.deepEqual(job.configuration, cfg); - } -}); - -test.serial( - 'abort if a workflow expression path cannot be found', - async (t) => { - const opts = { - workflow: { start: 'x', jobs: [{ id: 'a', expression: 'err.js' }] }, - }; - - const logger = createMockLogger(); - await t.throwsAsync(() => loadInput(opts, logger)); - - t.assert(logger._find('error', /file not found for job a: err.js/i)); - t.assert( - logger._find( - 'always', - /This workflow references a file which cannot be found/i - ) - ); - t.assert(logger._find('error', /critical error: aborting command/i)); - } -); - -test.serial( - 'abort if a workflow expression path cannot be found for an anonymous job', - async (t) => { - const opts = { - workflow: { - start: 'x', - jobs: [{ expression: 'jam()' }, { expression: 'err.js' }], - }, - }; - - const logger = createMockLogger(); - await t.throwsAsync(() => loadInput(opts, logger)); - - t.assert(logger._find('error', /file not found for job 2: err.js/i)); - t.assert( - logger._find( - 'always', - /This workflow references a file which cannot be found/i - ) - ); - t.assert(logger._find('error', /critical error: aborting command/i)); - } -); diff --git a/packages/cli/test/util/load-plan.test.ts b/packages/cli/test/util/load-plan.test.ts index 2b1e7305e..4b20c01cf 100644 --- a/packages/cli/test/util/load-plan.test.ts +++ b/packages/cli/test/util/load-plan.test.ts @@ -43,7 +43,7 @@ test.afterEach(() => { test.serial('expression: load a plan from an expression.js', async (t) => { const opts = { - jobPath: 'test/job.js', + expressionPath: 'test/job.js', plan: {}, }; @@ -60,7 +60,7 @@ test.serial('expression: load a plan from an expression.js', async (t) => { test.serial('expression: set an adaptor on the plan', async (t) => { const opts = { - jobPath: 'test/job.js', + expressionPath: 'test/job.js', // Note that adaptor expansion should have happened before loadPlan is called adaptors: ['@openfn/language-common'], } as Partial; @@ -74,7 +74,7 @@ test.serial('expression: set an adaptor on the plan', async (t) => { test.serial('expression: do not expand adaptors', async (t) => { const opts = { - jobPath: 'test/job.js', + expressionPath: 'test/job.js', expandAdaptors: false, // Note that adaptor expansion should have happened before loadPlan is called adaptors: ['common'], @@ -89,7 +89,7 @@ test.serial('expression: do not expand adaptors', async (t) => { test.serial('expression: set a timeout on the plan', async (t) => { const opts = { - jobPath: 'test/job.js', + expressionPath: 'test/job.js', expandAdaptors: true, timeout: 111, } as Partial; @@ -101,7 +101,7 @@ test.serial('expression: set a timeout on the plan', async (t) => { test.serial('expression: set a start on the plan', async (t) => { const opts = { - jobPath: 'test/job.js', + expressionPath: 'test/job.js', start: 'x', } as Partial; From 8dc174101853a5b74b26507845cb854afc5eb104 Mon Sep 17 00:00:00 2001 From: Joe Clark Date: Mon, 5 Feb 2024 16:08:04 +0000 Subject: [PATCH 052/128] runtime: remove strict mode --- packages/runtime/src/execute/expression.ts | 8 +-- packages/runtime/src/execute/step.ts | 7 +- packages/runtime/src/runtime.ts | 9 --- packages/runtime/src/util/assemble-state.ts | 13 ++-- .../runtime/test/execute/expression.test.ts | 36 ++-------- packages/runtime/test/execute/plan.test.ts | 33 +-------- packages/runtime/test/memory.test.ts | 1 - packages/runtime/test/runtime.test.ts | 46 ++---------- packages/runtime/test/security.test.ts | 53 ++++---------- .../runtime/test/util/assemble-state.test.ts | 70 ++++--------------- 10 files changed, 47 insertions(+), 229 deletions(-) diff --git a/packages/runtime/src/execute/expression.ts b/packages/runtime/src/execute/expression.ts index fed13bfad..1c06fe35d 100644 --- a/packages/runtime/src/execute/expression.ts +++ b/packages/runtime/src/execute/expression.ts @@ -71,12 +71,12 @@ export default ( duration = Date.now() - duration; - const finalState = prepareFinalState(plan.options, opts, result, logger); + const finalState = prepareFinalState(plan.options, result, logger); // return the final state resolve(finalState); } catch (e: any) { // whatever initial state looks like now, clean it and report it back - const finalState = prepareFinalState(plan.options, opts, input, logger); + const finalState = prepareFinalState(plan.options, input, logger); duration = Date.now() - duration; let finalError; try { @@ -155,7 +155,6 @@ const assignKeys = ( // (especially as the result get stringified again downstream) const prepareFinalState = ( options: WorkflowOptions, - opts: Options, // TODO remove this with strict mode state: any, logger: Logger ) => { @@ -177,9 +176,6 @@ const prepareFinalState = ( } }); } - if (opts.strict) { - state = assignKeys(state, {}, ['data', 'error', 'references']); - } const cleanState = stringify(state); return JSON.parse(cleanState); } diff --git a/packages/runtime/src/execute/step.ts b/packages/runtime/src/execute/step.ts index 912cae9b9..2d211fa2e 100644 --- a/packages/runtime/src/execute/step.ts +++ b/packages/runtime/src/execute/step.ts @@ -110,12 +110,7 @@ const executeStep = async ( opts.callbacks?.resolveState! // and here ); - const state = assembleState( - clone(input), - configuration, - globals, - opts.strict - ); + const state = assembleState(clone(input), configuration, globals); notify(NOTIFY_INIT_COMPLETE, { jobId, diff --git a/packages/runtime/src/runtime.ts b/packages/runtime/src/runtime.ts index 162d2420b..c13d62140 100644 --- a/packages/runtime/src/runtime.ts +++ b/packages/runtime/src/runtime.ts @@ -11,9 +11,6 @@ export type Options = { logger?: Logger; jobLogger?: Logger; - // TODO: deprecate in this work - strict?: boolean; // Be strict about handling of state returned from jobs - // Treat state as immutable (likely to break in legacy jobs) immutableState?: boolean; @@ -79,12 +76,6 @@ const run = ( const { options } = xplan; - // TODO remove - // Strict state handling by default - if (!opts.hasOwnProperty('strict')) { - opts.strict = true; - } - if (!options.hasOwnProperty('statePropsToRemove')) { options.statePropsToRemove = ['configuration']; } diff --git a/packages/runtime/src/util/assemble-state.ts b/packages/runtime/src/util/assemble-state.ts index 2f1f69204..84f5fc12e 100644 --- a/packages/runtime/src/util/assemble-state.ts +++ b/packages/runtime/src/util/assemble-state.ts @@ -13,15 +13,12 @@ const assembleData = (initialData: any, defaultData = {}) => { const assembleState = ( initialState: any = {}, // previous or initial state configuration = {}, - defaultState: any = {}, // This is default state provided by the job - strictState: boolean = true + defaultState: any = {} // This is default state provided by the job ) => { - const obj = strictState - ? {} - : { - ...defaultState, - ...initialState, - }; + const obj = { + ...defaultState, + ...initialState, + }; if (initialState.references) { obj.references = initialState.references; diff --git a/packages/runtime/test/execute/expression.test.ts b/packages/runtime/test/execute/expression.test.ts index a455a974e..d400259fb 100644 --- a/packages/runtime/test/execute/expression.test.ts +++ b/packages/runtime/test/execute/expression.test.ts @@ -1,7 +1,7 @@ import test from 'ava'; import { fn } from '@openfn/language-common'; import { createMockLogger } from '@openfn/logger'; -import type { State } from '@openfn/lexicon'; +import type { Operation, State } from '@openfn/lexicon'; import execute from '../../src/execute/expression'; import type { ExecutionContext } from '../../src/types'; @@ -151,41 +151,15 @@ test('no props are removed from state if a falsy value is passed to statePropsTo t.deepEqual(result, state); }); -test('config is removed from the result (strict)', async (t) => { +test('config is removed from the result', async (t) => { const job = [async (s: State) => s]; - const context = createContext({ opts: { strict: true } }); + const context = createContext({ opts: {} }); const result = await execute(context, job, { configuration: {} }); t.deepEqual(result, {}); }); -test('config is removed from the result (non-strict)', async (t) => { - const job = [async (s: State) => s]; - const context = createContext({ opts: { strict: false } }); - const result = await execute(context, job, { configuration: {} }); - t.deepEqual(result, {}); -}); - -test('output state is cleaned in strict mode', async (t) => { - const job = [ - async () => ({ - data: {}, - references: [], - configuration: {}, - x: true, - }), - ]; - - const context = createContext({ opts: { strict: true } }); - - const result = await execute(context, job, {}); - t.deepEqual(result, { - data: {}, - references: [], - }); -}); - -test('output state is left alone in non-strict mode', async (t) => { +test('output state is returned verbatim, apart from config', async (t) => { const state = { data: {}, references: [], @@ -194,7 +168,7 @@ test('output state is left alone in non-strict mode', async (t) => { }; const job = [async () => ({ ...state })]; - const context = createContext({ opts: { strict: false } }); + const context = createContext(); const result = await execute(context, job, {}); t.deepEqual(result, { diff --git a/packages/runtime/test/execute/plan.test.ts b/packages/runtime/test/execute/plan.test.ts index b2fea96af..f38261af6 100644 --- a/packages/runtime/test/execute/plan.test.ts +++ b/packages/runtime/test/execute/plan.test.ts @@ -259,30 +259,6 @@ test('Previous state overrides inline data', async (t) => { t.is(result.data.x, 6); }); -test('only allowed state is passed through in strict mode', async (t) => { - const plan = createPlan([ - createJob({ - expression: - 'export default [s => ({ data: {}, references: [], x: 22, y: 33 })]', - next: { - job2: true, - }, - }), - createJob({ - id: 'job2', - // Throw if we receive unexpected stuff in state - expression: - 'export default [s => { if (s.x || s.y) { throw new Error() }; return s;}]', - }), - ]); - - const result: any = await executePlan(plan, {}, { strict: true }, mockLogger); - t.deepEqual(result, { - data: {}, - references: [], - }); -}); - test('steps only receive state from upstream steps', async (t) => { const assert = (expr: string) => `if (!(${expr})) throw new Error('ASSERT FAIL')`; @@ -349,7 +325,7 @@ test('steps only receive state from upstream steps', async (t) => { }); }); -test('all state is passed through in non-strict mode', async (t) => { +test('all state is passed through successive jobs', async (t) => { const plan = createPlan([ createJob({ expression: @@ -366,12 +342,7 @@ test('all state is passed through in non-strict mode', async (t) => { }), ]); - const result: any = await executePlan( - plan, - {}, - { strict: false }, - mockLogger - ); + const result: any = await executePlan(plan, {}, {}, mockLogger); t.deepEqual(result, { data: {}, references: [], diff --git a/packages/runtime/test/memory.test.ts b/packages/runtime/test/memory.test.ts index e0a10cbec..9dd3b83f7 100644 --- a/packages/runtime/test/memory.test.ts +++ b/packages/runtime/test/memory.test.ts @@ -50,7 +50,6 @@ const run = async (t, workflow: ExecutionPlan) => { }; const state = await callRuntime(workflow, { - strict: false, callbacks: { notify }, globals: { process: { diff --git a/packages/runtime/test/runtime.test.ts b/packages/runtime/test/runtime.test.ts index f19a494c7..4eebb9d6e 100644 --- a/packages/runtime/test/runtime.test.ts +++ b/packages/runtime/test/runtime.test.ts @@ -130,7 +130,6 @@ test('resolve a credential', async (t) => { }; const options = { - strict: false, callbacks: { resolveCredential: async () => ({ password: 'password1' }), }, @@ -355,41 +354,6 @@ test('prefer initial state to inline state', async (t) => { t.is(result.data.y, 20); }); -test('do not pass extraneous state in strict mode', async (t) => { - const plan: ExecutionPlanNoOptions = { - workflow: { - steps: [ - { - expression: 'export default [() => ({ x: 1, data: {}} )]', - }, - ], - }, - }; - - const result: any = await run(plan, {}, { strict: true }); - t.deepEqual(result, { - data: {}, - }); -}); - -test('do pass extraneous state in non-strict mode', async (t) => { - const plan: ExecutionPlanNoOptions = { - workflow: { - steps: [ - { - expression: 'export default [() => ({ x: 1, data: {}} )]', - }, - ], - }, - }; - - const result: any = await run(plan, {}, { strict: false }); - t.deepEqual(result, { - x: 1, - data: {}, - }); -}); - test('Allow a job to return undefined', async (t) => { const plan: ExecutionPlanNoOptions = { workflow: { @@ -419,7 +383,7 @@ test('log errors, write to state, and continue', async (t) => { }; const logger = createMockLogger(); - const result: any = await run(plan, {}, { strict: false, logger }); + const result: any = await run(plan, {}, { logger }); t.is(result.x, 1); t.truthy(result.errors); @@ -496,7 +460,7 @@ test('error reports can be overwritten', async (t) => { }; const logger = createMockLogger(); - const result: any = await run(plan, {}, { strict: false, logger }); + const result: any = await run(plan, {}, { logger }); t.is(result.errors, 22); }); @@ -517,7 +481,7 @@ test('stuff written to state before an error is preserved', async (t) => { }; const logger = createMockLogger(); - const result: any = await run(plan, {}, { strict: false, logger }); + const result: any = await run(plan, {}, { logger }); t.is(result.x, 1); }); @@ -525,7 +489,7 @@ test('stuff written to state before an error is preserved', async (t) => { test('data can be an array (expression)', async (t) => { const expression = 'export default [() => ({ data: [1,2,3] })]'; - const result: any = await run(expression, {}, { strict: false }); + const result: any = await run(expression, {}, {}); t.deepEqual(result.data, [1, 2, 3]); }); @@ -546,7 +510,7 @@ test('data can be an array (workflow)', async (t) => { }, }; - const result: any = await run(plan, {}, { strict: false }); + const result: any = await run(plan, {}, {}); t.deepEqual(result.data, [1, 2, 3]); }); diff --git a/packages/runtime/test/security.test.ts b/packages/runtime/test/security.test.ts index 02ab842f2..57b9b6836 100644 --- a/packages/runtime/test/security.test.ts +++ b/packages/runtime/test/security.test.ts @@ -3,11 +3,7 @@ import test from 'ava'; import { createMockLogger } from '@openfn/logger'; import type { ExecutionPlan, State } from '@openfn/lexicon'; -import doRun from '../src/runtime'; - -// Disable strict mode for all these tests -const run = (plan: ExecutionPlan | string, state: State, options: any = {}) => - doRun(plan, state, { strict: false, ...options }); +import run from '../src/runtime'; const logger = createMockLogger(undefined, { level: 'default' }); @@ -33,41 +29,20 @@ test.serial( } ); -test.serial( - 'config should be scrubbed from the result state in strict mode', - async (t) => { - const src = 'export default [(s) => s]'; - - const state = { - data: {}, - configuration: { - password: 'secret', - }, - }; +test.serial('config should be scrubbed from the result state', async (t) => { + const src = 'export default [(s) => s]'; - const result: any = await run(src, state, { strict: true }); - t.deepEqual(result.data, {}); - t.is(result.configuration, undefined); - } -); + const state = { + data: {}, + configuration: { + password: 'secret', + }, + }; -test.serial( - 'config should be scrubbed from the result state in non-strict mode', - async (t) => { - const src = 'export default [(s) => s]'; - - const state = { - data: {}, - configuration: { - password: 'secret', - }, - }; - - const result: any = await run(src, state, { strict: false }); - t.deepEqual(result.data, {}); - t.is(result.configuration, undefined); - } -); + const result: any = await run(src, state, {}); + t.deepEqual(result.data, {}); + t.is(result.configuration, undefined); +}); test.serial( 'config should be scrubbed from the result state after error', @@ -81,7 +56,7 @@ test.serial( }, }; - const result: any = await run(src, state, { strict: false }); + const result: any = await run(src, state, {}); t.truthy(result.errors); t.deepEqual(result.data, {}); t.is(result.configuration, undefined); diff --git a/packages/runtime/test/util/assemble-state.test.ts b/packages/runtime/test/util/assemble-state.test.ts index 8cb87fd8d..eac478b93 100644 --- a/packages/runtime/test/util/assemble-state.test.ts +++ b/packages/runtime/test/util/assemble-state.test.ts @@ -1,13 +1,11 @@ import test from 'ava'; import assembleState from '../../src/util/assemble-state'; -// TODO: what if iniitial state or data is not an object? -// Is this an error? Maybe just in strict mode? - test('with no arguments, returns a basic state object', (t) => { const initial = undefined; const defaultState = undefined; const config = undefined; + const result = assembleState(initial, config, defaultState); t.deepEqual(result, { configuration: {}, @@ -15,34 +13,12 @@ test('with no arguments, returns a basic state object', (t) => { }); }); -test('strict: ignores initial state', (t) => { +test('includes initial state', (t) => { const initial = { x: 22 }; const defaultState = undefined; const config = undefined; - const result = assembleState(initial, config, defaultState, true); - t.deepEqual(result, { - configuration: {}, - data: {}, - }); -}); -test('strict: ignores initial state except references', (t) => { - const initial = { references: [] }; - const defaultState = undefined; - const config = undefined; - const result = assembleState(initial, config, defaultState, true); - t.deepEqual(result, { - references: [], - configuration: {}, - data: {}, - }); -}); - -test('non-strict: includes initial state', (t) => { - const initial = { x: 22 }; - const defaultState = undefined; - const config = undefined; - const result = assembleState(initial, config, defaultState, false); + const result = assembleState(initial, config, defaultState); t.deepEqual(result, { x: 22, configuration: {}, @@ -55,18 +31,14 @@ test('merges default and initial data objects', (t) => { const defaultState = { data: { y: 1 } }; const config = undefined; - const strict = assembleState(initial, config, defaultState, true); - t.deepEqual(strict, { + const result = assembleState(initial, config, defaultState); + t.deepEqual(result, { configuration: {}, data: { x: 1, y: 1, }, }); - - // Ensure the same behaviour in non-strict mode - const nonStrict = assembleState(initial, config, defaultState, false); - t.deepEqual(strict, nonStrict); }); test('Initial data is prioritised over default data', (t) => { @@ -74,16 +46,13 @@ test('Initial data is prioritised over default data', (t) => { const defaultState = { data: { x: 2 } }; const config = undefined; - const strict = assembleState(initial, config, defaultState, true); - t.deepEqual(strict, { + const result = assembleState(initial, config, defaultState); + t.deepEqual(result, { configuration: {}, data: { x: 1, }, }); - - const nonStrict = assembleState(initial, config, defaultState, false); - t.deepEqual(strict, nonStrict); }); test('Initial data does not have to be an object', (t) => { @@ -91,16 +60,11 @@ test('Initial data does not have to be an object', (t) => { const defaultState = { data: {} }; const config = undefined; - const strict = assembleState(initial, config, defaultState, true); - t.deepEqual(strict, { + const result = assembleState(initial, config, defaultState); + t.deepEqual(result, { configuration: {}, data: [1], }); - - // At this point I don't want any special handling for strict mode, - // see https://github.com/OpenFn/kit/issues/233 - const nonStrict = assembleState(initial, config, defaultState, false); - t.deepEqual(strict, nonStrict); }); test('merges default and initial config objects', (t) => { @@ -108,18 +72,14 @@ test('merges default and initial config objects', (t) => { const defaultState = undefined; const config = { y: 1 }; - const strict = assembleState(initial, config, defaultState, true); - t.deepEqual(strict, { + const result = assembleState(initial, config, defaultState); + t.deepEqual(result, { configuration: { x: 1, y: 1, }, data: {}, }); - - // Ensure the same behaviour in non-strict mode - const nonStrict = assembleState(initial, config, defaultState, false); - t.deepEqual(strict, nonStrict); }); test('configuration overrides initialState.configuration', (t) => { @@ -127,15 +87,11 @@ test('configuration overrides initialState.configuration', (t) => { const defaultState = undefined; const config = { x: 2 }; - const strict = assembleState(initial, config, defaultState, true); - t.deepEqual(strict, { + const result = assembleState(initial, config, defaultState); + t.deepEqual(result, { configuration: { x: 2, }, data: {}, }); - - // Ensure the same behaviour in non-strict mode - const nonStrict = assembleState(initial, config, defaultState, false); - t.deepEqual(strict, nonStrict); }); From 152129d15d4e066cc9805a3ba78b11b34d7ddc45 Mon Sep 17 00:00:00 2001 From: Joe Clark Date: Mon, 5 Feb 2024 16:13:41 +0000 Subject: [PATCH 053/128] remove strict mode --- .changeset/grumpy-pillows-tie.md | 7 +++ .changeset/pretty-spoons-beam.md | 1 + packages/cli/src/execute/command.ts | 4 -- packages/cli/src/execute/execute.ts | 3 -- packages/cli/src/execute/serialize-output.ts | 13 ++--- packages/cli/src/options.ts | 33 ------------ packages/cli/test/commands.test.ts | 48 +---------------- packages/cli/test/execute/options.test.ts | 11 ---- .../cli/test/options/ensure/strict.test.ts | 51 ------------------- 9 files changed, 13 insertions(+), 158 deletions(-) create mode 100644 .changeset/grumpy-pillows-tie.md delete mode 100644 packages/cli/test/options/ensure/strict.test.ts diff --git a/.changeset/grumpy-pillows-tie.md b/.changeset/grumpy-pillows-tie.md new file mode 100644 index 000000000..836dacac8 --- /dev/null +++ b/.changeset/grumpy-pillows-tie.md @@ -0,0 +1,7 @@ +--- +'@openfn/cli': major +--- + +Remove strict mode +Add support for execution plans +Update terminology diff --git a/.changeset/pretty-spoons-beam.md b/.changeset/pretty-spoons-beam.md index 61d01d3f3..49d6f215f 100644 --- a/.changeset/pretty-spoons-beam.md +++ b/.changeset/pretty-spoons-beam.md @@ -4,3 +4,4 @@ Update main run() signature Integrate with lexicon +Remove strict mode options diff --git a/packages/cli/src/execute/command.ts b/packages/cli/src/execute/command.ts index 257f52333..a18cdd40d 100644 --- a/packages/cli/src/execute/command.ts +++ b/packages/cli/src/execute/command.ts @@ -26,7 +26,6 @@ export type ExecuteOptions = Required< | 'statePath' | 'stateStdin' | 'sanitize' - | 'strict' | 'timeout' | 'useAdaptorsMonorepo' | 'workflowPath' @@ -53,8 +52,6 @@ const options = [ o.start, o.statePath, o.stateStdin, - o.strict, // order important - o.strictOutput, o.timeout, o.useAdaptorsMonorepo, ]; @@ -63,7 +60,6 @@ const executeCommand: yargs.CommandModule = { command: 'execute [path]', describe: `Run an openfn expression or workflow. Get more help by running openfn help. \nExecute will run a expression/workflow at the path and write the output state to disk (to ./state.json unless otherwise specified) - \nBy default only state.data will be returned fron a expression. Include --no-strict to write the entire state object. \nRemember to include the adaptor name with -a. Auto install adaptors with the -i flag.`, aliases: ['$0'], handler: ensure('execute', options), diff --git a/packages/cli/src/execute/execute.ts b/packages/cli/src/execute/execute.ts index 64b8f0520..9b4b4a576 100644 --- a/packages/cli/src/execute/execute.ts +++ b/packages/cli/src/execute/execute.ts @@ -16,9 +16,6 @@ export default async ( ): Promise => { try { const result = await run(plan, input, { - strict: opts.strict, - // start: opts.start, - // timeout: opts.timeout, immutableState: opts.immutable, logger: createLogger(RUNTIME, opts), jobLogger: createLogger(JOB, opts), diff --git a/packages/cli/src/execute/serialize-output.ts b/packages/cli/src/execute/serialize-output.ts index 040b43b5e..79f338f70 100644 --- a/packages/cli/src/execute/serialize-output.ts +++ b/packages/cli/src/execute/serialize-output.ts @@ -3,21 +3,14 @@ import { Logger } from '../util/logger'; import { Opts } from '../options'; const serializeOutput = async ( - options: Pick, + options: Pick, result: any, logger: Logger ) => { let output = result; if (output && (output.configuration || output.data)) { - if (options.strict) { - output = { data: output.data }; - if (result.errors) { - output.errors = result.errors; - } - } else { - const { configuration, ...rest } = result; - output = rest; - } + const { configuration, ...rest } = result; + output = rest; } if (output === undefined) { diff --git a/packages/cli/src/options.ts b/packages/cli/src/options.ts index da2cc6a8e..152aecb20 100644 --- a/packages/cli/src/options.ts +++ b/packages/cli/src/options.ts @@ -44,7 +44,6 @@ export type Opts = { start?: string; // workflow start node statePath?: string; stateStdin?: string; - strict?: boolean; // Strict state handling (only forward state.data). Defaults to true sanitize: 'none' | 'remove' | 'summarize' | 'obfuscate'; timeout?: number; // ms useAdaptorsMonorepo?: boolean; @@ -328,38 +327,6 @@ export const start: CLIOption = { }, }; -// Preserve this but hide it -export const strictOutput: CLIOption = { - name: 'no-strict-output', - yargs: { - deprecated: true, - hidden: true, - boolean: true, - }, - ensure: (opts: { strictOutput?: boolean; strict?: boolean }) => { - if (!opts.hasOwnProperty('strict')) { - // override strict not set - opts.strict = opts.strictOutput; - } - delete opts.strictOutput; - }, -}; - -export const strict: CLIOption = { - name: 'strict', - yargs: { - default: false, - boolean: true, - description: - 'Enables strict state handling, meaning only state.data is returned from a job.', - }, - ensure: (opts) => { - if (!opts.hasOwnProperty('strictOutput')) { - setDefaultValue(opts, 'strict', false); - } - }, -}; - export const skipAdaptorValidation: CLIOption = { name: 'skip-adaptor-validation', yargs: { diff --git a/packages/cli/test/commands.test.ts b/packages/cli/test/commands.test.ts index 89f271826..3d33427b4 100644 --- a/packages/cli/test/commands.test.ts +++ b/packages/cli/test/commands.test.ts @@ -327,58 +327,14 @@ test.serial( ); test.serial( - 'output to file with strict state: openfn job.js --output-path=/tmp/my-output.json --strict', + 'output to file removing configuration: openfn job.js --output-path=/tmp/my-output.json', async (t) => { const options = { outputPath: '/tmp/my-output.json', }; const result = await run( - 'openfn job.js --output-path=/tmp/my-output.json --strict', - EXPR_EXPORT_STATE, - options - ); - t.deepEqual(result, { data: {} }); - - const expectedFileContents = JSON.stringify({ data: {} }, null, 2); - const output = await fs.readFile('/tmp/my-output.json', 'utf8'); - t.is(output, expectedFileContents); - } -); - -test.serial( - 'output to file with non-strict state: openfn job.js --output-path=/tmp/my-output.json --no-strict-output', - async (t) => { - const options = { - outputPath: '/tmp/my-output.json', - }; - - const result = await run( - 'openfn job.js --output-path=/tmp/my-output.json --no-strict-output', - EXPR_EXPORT_STATE, - options - ); - t.deepEqual(result, { data: {}, foo: 'bar' }); - - const expectedFileContents = JSON.stringify( - { data: {}, foo: 'bar' }, - null, - 2 - ); - const output = await fs.readFile('/tmp/my-output.json', 'utf8'); - t.assert(output === expectedFileContents); - } -); - -test.serial( - 'output to file with non-strict state: openfn job.js --output-path=/tmp/my-output.json --no-strict', - async (t) => { - const options = { - outputPath: '/tmp/my-output.json', - }; - - const result = await run( - 'openfn job.js --output-path=/tmp/my-output.json --no-strict', + 'openfn job.js --output-path=/tmp/my-output.json', EXPR_EXPORT_STATE, options ); diff --git a/packages/cli/test/execute/options.test.ts b/packages/cli/test/execute/options.test.ts index b3c18aaf4..750303854 100644 --- a/packages/cli/test/execute/options.test.ts +++ b/packages/cli/test/execute/options.test.ts @@ -22,7 +22,6 @@ test('correct default options', (t) => { t.is(options.outputStdout, false); t.is(options.path, 'job.js'); t.is(options.skipAdaptorValidation, false); - t.is(options.strict, false); t.is(options.timeout, 300000); t.falsy(options.useAdaptorsMonorepo); }); @@ -87,16 +86,6 @@ test('enable json logging', (t) => { t.true(options.logJson); }); -test('disable strict output', (t) => { - const options = parse('execute job.js --no-strict'); - t.false(options.strict); -}); - -test('disable strict output (legacy)', (t) => { - const options = parse('execute job.js --no-strict-output'); - t.false(options.strict); -}); - test('set an output path (short)', (t) => { const options = parse('execute job.js -o /tmp/out.json'); t.is(options.outputPath, '/tmp/out.json'); diff --git a/packages/cli/test/options/ensure/strict.test.ts b/packages/cli/test/options/ensure/strict.test.ts deleted file mode 100644 index 7bdb5783e..000000000 --- a/packages/cli/test/options/ensure/strict.test.ts +++ /dev/null @@ -1,51 +0,0 @@ -import test from 'ava'; -import { strict, strictOutput, Opts } from '../../../src/options'; - -// Tests on legacy behaviour -test('strictOutput: true should set strict', (t) => { - const opts = { - strictOutput: true, - } as Opts; - strictOutput.ensure!(opts); - t.true(opts.strict); - // @ts-ignore - t.falsy(opts.strictOutput); -}); - -test('strictOutput: false should set strict', (t) => { - const opts = { - strictOutput: false, - } as Opts; - strictOutput.ensure!(opts); - t.false(opts.strict); - // @ts-ignore - t.falsy(opts.strictOutput); -}); - -test('strict should default to false', (t) => { - const opts = {} as Opts; - strict.ensure!(opts); - t.false(opts.strict); -}); - -test('strict can be set to true', (t) => { - const opts = { - strict: true, - } as Opts; - strict.ensure!(opts); - t.true(opts.strict); -}); - -test('strict overrides strictOutput', (t) => { - const opts = { - strictOutput: false, - strict: true, - } as Opts; - - // Note that the order of these two is important - strict.ensure!(opts); - strictOutput.ensure!(opts); - - t.true(opts.strict); - t.falsy(opts.strictOutput); -}); From ca6a60d6d15ccc86e155662b4f9589d96c89f33a Mon Sep 17 00:00:00 2001 From: Joe Clark Date: Mon, 5 Feb 2024 16:32:33 +0000 Subject: [PATCH 054/128] cli: default workflow name to the file name --- packages/cli/src/util/load-plan.ts | 35 +++++++++++----------- packages/cli/src/util/validate-adaptors.ts | 6 ++-- packages/cli/test/util/load-plan.test.ts | 3 +- 3 files changed, 24 insertions(+), 20 deletions(-) diff --git a/packages/cli/src/util/load-plan.ts b/packages/cli/src/util/load-plan.ts index 17bb0f7dd..58c09a6f6 100644 --- a/packages/cli/src/util/load-plan.ts +++ b/packages/cli/src/util/load-plan.ts @@ -39,10 +39,11 @@ const loadPlan = async ( } const json = await loadJson(jsonPath!, logger); + const defaultName = path.parse(jsonPath!).name; if (json.workflow) { - return loadXPlan(json, options, logger); + return loadXPlan(json, options, logger, defaultName); } else { - return loadOldWorkflow(json, options, logger); + return loadOldWorkflow(json, options, logger, defaultName); } }; @@ -88,7 +89,7 @@ const maybeAssign = (a: any, b: any, keys: Array) => { const loadExpression = async ( options: Pick, logger: Logger -): Promise => { +): Promise => { const expressionPath = options.expressionPath!; logger.debug(`Loading expression from ${expressionPath}`); @@ -132,7 +133,8 @@ const loadExpression = async ( const loadOldWorkflow = async ( workflow: OldCLIWorkflow, options: Pick, - logger: Logger + logger: Logger, + defaultName: string = '' ) => { const plan: ExecutionPlan = { workflow: { @@ -147,20 +149,13 @@ const loadOldWorkflow = async ( plan.id = workflow.id; } - try { - const name = path.parse(options.workflowPath!).name; - if (name) { - plan.workflow.name = name; - } - } catch (e) { - // do nothing - } - // call loadXPlan now so that any options can be written - const final = await loadXPlan(plan, options, logger); + const final = await loadXPlan(plan, options, logger, defaultName); // TODO this can be nicer - logger.warn('converted old workflow into execution plan'); + logger.warn( + 'converted old workflow format into new execution plan format. See below for details' + ); logger.warn(final); return final; @@ -231,15 +226,19 @@ const importExpressions = async ( } }; -// TODO default the workflow name from the file name const loadXPlan = async ( plan: ExecutionPlan, options: Pick, - logger: Logger + logger: Logger, + defaultName: string = '' ) => { if (!plan.options) { plan.options = {}; } + + if (!plan.workflow.name && defaultName) { + plan.workflow.name = defaultName; + } // Note that baseDir should be set up in the default function await importExpressions(plan, options.baseDir!, logger); // expand shorthand adaptors in the workflow jobs @@ -252,5 +251,7 @@ const loadXPlan = async ( // TODO support state props to remove maybeAssign(options, plan.options, ['timeout', 'start']); + logger.info(`Loaded workflow ${plan.workflow.name ?? ''}`); + return plan; }; diff --git a/packages/cli/src/util/validate-adaptors.ts b/packages/cli/src/util/validate-adaptors.ts index e6b2666bc..d5126ec58 100644 --- a/packages/cli/src/util/validate-adaptors.ts +++ b/packages/cli/src/util/validate-adaptors.ts @@ -9,16 +9,18 @@ const validateAdaptors = async ( | 'autoinstall' | 'repoDir' | 'workflowPath' + | 'planPath' >, logger: Logger ) => { if (options.skipAdaptorValidation) { return; } + const isPlan = options.planPath || options.workflowPath; const hasDeclaredAdaptors = options.adaptors && options.adaptors.length > 0; - if (options.workflowPath && hasDeclaredAdaptors) { + if (isPlan && hasDeclaredAdaptors) { logger.error('ERROR: adaptor and workflow provided'); logger.error( 'This is probably not what you meant to do. A workflow should declare an adaptor for each job.' @@ -29,7 +31,7 @@ const validateAdaptors = async ( // If no adaptor is specified, pass a warning // (The runtime is happy to run without) // This can be overriden from options - if (!options.workflowPath && !hasDeclaredAdaptors) { + if (!isPlan && !hasDeclaredAdaptors) { logger.warn('WARNING: No adaptor provided!'); logger.warn( 'This job will probably fail. Pass an adaptor with the -a flag, eg:' diff --git a/packages/cli/test/util/load-plan.test.ts b/packages/cli/test/util/load-plan.test.ts index 4b20c01cf..caadaad71 100644 --- a/packages/cli/test/util/load-plan.test.ts +++ b/packages/cli/test/util/load-plan.test.ts @@ -1,7 +1,7 @@ import test from 'ava'; import mock from 'mock-fs'; import { createMockLogger } from '@openfn/logger'; -import type { ExecutionPlan, Job } from '@openfn/lexicon'; +import type { Job } from '@openfn/lexicon'; import loadPlan from '../../src/util/load-plan'; import { Opts } from '../../src/options'; @@ -11,6 +11,7 @@ const logger = createMockLogger(undefined, { level: 'debug' }); const sampleXPlan = { options: { start: 'a' }, workflow: { + name: 'wf', steps: [{ id: 'a', expression: 'x()' }], }, }; From dc1dbc62d823b766e36a6f273f9bc1401b1d38bb Mon Sep 17 00:00:00 2001 From: Joe Clark Date: Mon, 5 Feb 2024 16:37:02 +0000 Subject: [PATCH 055/128] runtime: tweak log output --- packages/runtime/src/execute/expression.ts | 15 +-------------- packages/runtime/src/execute/plan.ts | 1 + packages/runtime/src/execute/step.ts | 2 +- packages/runtime/test/execute/expression.test.ts | 2 +- packages/runtime/test/execute/plan.test.ts | 4 ++-- 5 files changed, 6 insertions(+), 18 deletions(-) diff --git a/packages/runtime/src/execute/expression.ts b/packages/runtime/src/execute/expression.ts index 1c06fe35d..65c941ca3 100644 --- a/packages/runtime/src/execute/expression.ts +++ b/packages/runtime/src/execute/expression.ts @@ -109,7 +109,7 @@ export const wrapOperation = ( // TODO should we warn if an operation does not return state? // the trick is saying WHICH operation without source mapping const duration = printDuration(new Date().getTime() - start); - logger.info(`Operation ${name} complete in ${duration}`); + logger.debug(`Operation ${name} complete in ${duration}`); return result; }; }; @@ -138,19 +138,6 @@ const prepareJob = async ( } }; -const assignKeys = ( - source: Record, - target: Record, - keys: string[] -) => { - keys.forEach((k) => { - if (source.hasOwnProperty(k)) { - target[k] = source[k]; - } - }); - return target; -}; - // TODO this is suboptimal and may be slow on large objects // (especially as the result get stringified again downstream) const prepareFinalState = ( diff --git a/packages/runtime/src/execute/plan.ts b/packages/runtime/src/execute/plan.ts index e61c0c19f..ee32431fa 100644 --- a/packages/runtime/src/execute/plan.ts +++ b/packages/runtime/src/execute/plan.ts @@ -26,6 +26,7 @@ const executePlan = async ( logger.error('Aborting'); throw e; } + logger.info(`Executing ${plan.workflow.name || plan.id}`); const { workflow, options } = compiledPlan; diff --git a/packages/runtime/src/execute/step.ts b/packages/runtime/src/execute/step.ts index 2d211fa2e..47ee18168 100644 --- a/packages/runtime/src/execute/step.ts +++ b/packages/runtime/src/execute/step.ts @@ -121,7 +121,7 @@ const executeStep = async ( const timerId = `step-${jobId}`; logger.timer(timerId); - logger.always(`Starting step ${jobName}`); + logger.info(`Starting step ${jobName}`); const startTime = Date.now(); try { diff --git a/packages/runtime/test/execute/expression.test.ts b/packages/runtime/test/execute/expression.test.ts index d400259fb..d159f5266 100644 --- a/packages/runtime/test/execute/expression.test.ts +++ b/packages/runtime/test/execute/expression.test.ts @@ -351,6 +351,6 @@ test('Operations log on start and end', async (t) => { const start = logger._find('debug', /starting operation /i); t.truthy(start); - const end = logger._find('info', /operation 1 complete in \dms/i); + const end = logger._find('debug', /operation 1 complete in \dms/i); t.truthy(end); }); diff --git a/packages/runtime/test/execute/plan.test.ts b/packages/runtime/test/execute/plan.test.ts index f38261af6..c7657163f 100644 --- a/packages/runtime/test/execute/plan.test.ts +++ b/packages/runtime/test/execute/plan.test.ts @@ -988,7 +988,7 @@ test('Plans log step ids for each job start and end', async (t) => { ]); const logger = createMockLogger(undefined, { level: 'debug' }); await executePlan(plan, {}, {}, logger); - const start = logger._find('always', /starting step a/i); + const start = logger._find('info', /starting step a/i); t.is(start!.message, 'Starting step a'); const end = logger._find('success', /completed step a/i); @@ -1006,7 +1006,7 @@ test('Plans log step names for each job start and end', async (t) => { const logger = createMockLogger(undefined, { level: 'debug' }); await executePlan(plan, {}, {}, logger); - const start = logger._find('always', /starting step do-the-thing/i); + const start = logger._find('info', /starting step do-the-thing/i); t.is(start!.message, 'Starting step do-the-thing'); const end = logger._find('success', /completed step do-the-thing/i); From 7e71d5d3500d1ab1f590c8354264313e0db2bd3a Mon Sep 17 00:00:00 2001 From: Joe Clark Date: Mon, 5 Feb 2024 16:43:58 +0000 Subject: [PATCH 056/128] cli: remove log --- packages/cli/src/util/load-plan.ts | 1 - 1 file changed, 1 deletion(-) diff --git a/packages/cli/src/util/load-plan.ts b/packages/cli/src/util/load-plan.ts index 58c09a6f6..33fe0af8c 100644 --- a/packages/cli/src/util/load-plan.ts +++ b/packages/cli/src/util/load-plan.ts @@ -220,7 +220,6 @@ const importExpressions = async ( configurationStr, log ); - console.log(configString); job.configuration = JSON.parse(configString!); } } From 20b43e46e92ff6e582a29c3c7101c8f6cf5528ef Mon Sep 17 00:00:00 2001 From: Joe Clark Date: Mon, 5 Feb 2024 16:49:27 +0000 Subject: [PATCH 057/128] cli: types --- packages/cli/src/compile/command.ts | 2 -- packages/cli/src/util/load-plan.ts | 5 ++++- packages/cli/src/util/map-adaptors-to-monorepo.ts | 2 +- 3 files changed, 5 insertions(+), 4 deletions(-) diff --git a/packages/cli/src/compile/command.ts b/packages/cli/src/compile/command.ts index c9b1fdb21..5b9957b45 100644 --- a/packages/cli/src/compile/command.ts +++ b/packages/cli/src/compile/command.ts @@ -10,7 +10,6 @@ export type CompileOptions = Pick< | 'expandAdaptors' | 'ignoreImports' | 'expressionPath' - | 'job' | 'logJson' | 'log' | 'outputPath' @@ -18,7 +17,6 @@ export type CompileOptions = Pick< | 'repoDir' | 'path' | 'useAdaptorsMonorepo' - | 'workflow' > & { repoDir?: string; }; diff --git a/packages/cli/src/util/load-plan.ts b/packages/cli/src/util/load-plan.ts index 33fe0af8c..018626ac2 100644 --- a/packages/cli/src/util/load-plan.ts +++ b/packages/cli/src/util/load-plan.ts @@ -89,7 +89,7 @@ const maybeAssign = (a: any, b: any, keys: Array) => { const loadExpression = async ( options: Pick, logger: Logger -): Promise => { +): Promise => { const expressionPath = options.expressionPath!; logger.debug(`Loading expression from ${expressionPath}`); @@ -127,6 +127,9 @@ const loadExpression = async ( undefined, `Failed to load the expression from ${expressionPath}` ); + + // This will never execute + return {} as ExecutionPlan; } }; diff --git a/packages/cli/src/util/map-adaptors-to-monorepo.ts b/packages/cli/src/util/map-adaptors-to-monorepo.ts index c72ca3548..4e25d9876 100644 --- a/packages/cli/src/util/map-adaptors-to-monorepo.ts +++ b/packages/cli/src/util/map-adaptors-to-monorepo.ts @@ -41,7 +41,7 @@ export const updatePath = (adaptor: string, repoPath: string, log: Logger) => { export type MapAdaptorsToMonorepoOptions = Pick< Opts, - 'monorepoPath' | 'adaptors' | 'workflow' + 'monorepoPath' | 'adaptors' >; const mapAdaptorsToMonorepo = ( From 39001c277c352ba2b2b56aa270a4aff7d7e48859 Mon Sep 17 00:00:00 2001 From: Joe Clark Date: Tue, 6 Feb 2024 10:54:34 +0000 Subject: [PATCH 058/128] docs --- packages/cli/README.md | 85 +++++++++++++++++++++++++----------------- 1 file changed, 50 insertions(+), 35 deletions(-) diff --git a/packages/cli/README.md b/packages/cli/README.md index ca505dd0b..542860ae9 100644 --- a/packages/cli/README.md +++ b/packages/cli/README.md @@ -15,6 +15,7 @@ The CLI includes: - [Installation](#installation) - [Updating](#updating) +- [Terminology](#terminology) - [Migrating from devtools](#migrating-from-devtools) - [Basic Usage](#basic-usage) - [Advanced Usage](#advanced-usage) @@ -71,16 +72,25 @@ npm uninstall -g @openfn/cli And then re-installing. -## Migrating from devtools +## Terminology -If you're coming to the CLI from the old openfn devtools, here are a couple of key points to be aware of: +The CLI (and the wider OpenFn stack) has some very particular terminology -- The CLI has a shorter, sleeker syntax, so your command should be much shorter -- The CLI will automatically install adaptors for you (with full version control) +- An **Expression** is a string of Javascript (or Javascript-like code) written to be run in the CLI or Lightning. +- A **Job** is an expression plus some metadata required to run it - typically an adaptor and credentials. + The terms Job and Expression are often used interchangeably. +- A **Workflow** is a series of steps to be executed in sequence. Steps are usually Jobs (and so job and step are often used + interchangeably), but can be Triggers. +- An **Execution Plan** is a Workflow plus some options which inform how it should be executed (ie, start node, timeout). + The term "Execution plan" is mostly used internally and not exposed to users, and is usually interchangeable with Workflow. + +Note that an expression is not generally portable (ie, cannot run in other environments) unless it is compiled. +A compiled expression has imports and exports and, so long as packages are available, can run in a simple +JavaScript runtime. ## Basic Usage -You're probably here to run jobs (expressions) or workflows, which the CLI makes easy: +You're probably here to run Workflows (or individual jobs), which the CLI makes easy: ``` openfn path/to/workflow.json @@ -91,7 +101,7 @@ If running a single job, you MUST specify which adaptor to use. Pass the `-i` flag to auto-install any required adaptors (it's safe to do this redundantly, although the run will be a little slower). -When the finished, the CLI will write the resulting state to disk. By default the CLI will create an `output.json` next to the job file. You can pass a path to output by passing `-o path/to/output.json` and state by adding `-s path/to/state.json`. You can use `-S` and `-O` to pass state through stdin and return the output through stdout. +When finished, the CLI will write the resulting state to disk. By default the CLI will create an `output.json` next to the job file. You can pass a path to output by passing `-o path/to/output.json` and state by adding `-s path/to/state.json`. You can use `-S` and `-O` to pass state through stdin and return the output through stdout. The CLI maintains a repo for auto-installed adaptors. Run `openfn repo list` to see where the repo is, and what's in it. Set the `OPENFN_REPO_DIR` env var to specify the repo folder. When autoinstalling, the CLI will check to see if a matching version is found in the repo. `openfn repo clean` will remove all adaptors from the repo. The repo also includes any documentation and metadata built with the CLI. @@ -103,14 +113,16 @@ You can pass `--log info` to get more feedback about what's happening, or `--log ## Advanced Usage -The CLI has a number of commands (the first argument after openfn) +The CLI has a number of commands (the first argument after `openfn`): - execute - run a job -- compile - compile a job to a .js file +- compile - compile a job to a .js file (prints to stdout by default) - docs - show documentation for an adaptor function - repo - manage the repo of installed modules - docgen - generate JSON documentation for an adaptor based on its typescript +For example, `openfn compile job.js -a common` will compile the code at `job.js` with the common adaptor. + If no command is specified, execute will run. To get more information about a command, including usage examples, run `openfn help`, ie, `openfn compile help`. @@ -253,38 +265,43 @@ Pass `--log-json` to the CLI to do this. You can also set the OPENFN_LOG_JSON en ## Workflows -As of v0.0.35 the CLI supports running workflows as well as jobs. - -A workflow is in execution plan for running several jobs in a sequence. It is defined as a JSON structure. +A workflow is an execution plan for running several jobs in a sequence. It is defined as a JSON structure. To see an example workflow, run the test command with `openfn test`. -A workflow has a structure like this (better documentation is coming soon): +A workflow has a structure like this: ```json { - "start": "a", // optionally specify the start node (defaults to jobs[0]) - "jobs": [ - { - "id": "a", - "expression": "fn((state) => state)", // code or a path - "adaptor": "@openfn/language-common@1.75", // specifiy the adaptor to use (version optional) - "data": {}, // optionally pre-populate the data object (this will be overriden by keys in in previous state) - "configuration": {}, // Use this to pass credentials - "next": { - // This object defines which jobs to call next - // All edges returning true will run - // If there are no next edges, the workflow will end - "b": true, - "c": { - "condition": "!state.error" // Note that this is an expression, not a function + "workflow": { + "name": "my-workflow", // human readable name used in logging + "steps": [ + { + "name": "a", // human readable name used in logging + "expression": "fn((state) => state)", // code or a path to an expression.js file + "adaptor": "@openfn/language-common@1.7.5", // specifiy the adaptor to use (version optional) + "data": {}, // optionally pre-populate the data object (this will be overriden by keys in in previous state) + "configuration": {}, // Use this to pass credentials + "next": { + // This object defines which jobs to call next + // All edges returning true will run + // If there are no next edges, the workflow will end + "b": true, + "c": { + "condition": "!state.error" // Note that this is a strict Javascript expression, not a function, and has no adaptor support + } } } - } - ] + ] + }, + "options": { + "start": "a" // optionally specify the start node (defaults to steps[0]) + } } ``` +See `packages/lexicon` for type definitions (the workflow format is covered by the `ExecutionPlan` type)/ + ## Compilation The CLI will compile your job code into regular Javascript. It does a number of things to make your code robust and portable: @@ -298,8 +315,6 @@ The result of this is a lightweight, modern JS module. It can be executed in any The CLI uses openfn's own runtime to execute jobs in a safe environment. -All jobs which work against `@openfn/core` will work in the new CLI and runtime environment (note: although this is a work in progress and we are actively looking for help to test this!). - If you want to see how the compiler is changing your job, run `openfn compile path/to/job -a ` to return the compiled code to stdout. Add `-o path/to/output.js` to save the result to disk. ## Contributing @@ -355,10 +370,10 @@ export OPENFN_ADAPTORS_REPO=~/repo/openfn/adaptors ### Contributing changes -Open a PR at https://github.com/openfn/kit. Include a changeset and a description of your change. - -See the root readme for more details about changests, +Include a changeset and a description of your change. Run this command and follow the interactive prompt (it's really easy, promise!) ``` - +pnpm changeset ``` + +Commit the changeset files and open a PR at https://github.com/openfn/kit. From abc0307c7a577f45c058e47614e99e3fd4d23e42 Mon Sep 17 00:00:00 2001 From: Joe Clark Date: Tue, 6 Feb 2024 10:54:57 +0000 Subject: [PATCH 059/128] deploy: adjust logging --- .changeset/old-planes-sort.md | 5 +++++ packages/deploy/src/index.ts | 2 +- 2 files changed, 6 insertions(+), 1 deletion(-) create mode 100644 .changeset/old-planes-sort.md diff --git a/.changeset/old-planes-sort.md b/.changeset/old-planes-sort.md new file mode 100644 index 000000000..7d0a644c3 --- /dev/null +++ b/.changeset/old-planes-sort.md @@ -0,0 +1,5 @@ +--- +'@openfn/deploy': patch +--- + +Log the result to success (not always) diff --git a/packages/deploy/src/index.ts b/packages/deploy/src/index.ts index 1695f2bd8..ed77619f1 100644 --- a/packages/deploy/src/index.ts +++ b/packages/deploy/src/index.ts @@ -164,7 +164,7 @@ export async function deploy(config: DeployConfig, logger: Logger) { await writeState(config, deployedState); - logger.always('Deployed.'); + logger.success('Deployed'); return true; } From b37ae0f6950ac926d895d143d5739a26dd63175d Mon Sep 17 00:00:00 2001 From: Joe Clark Date: Tue, 6 Feb 2024 11:04:16 +0000 Subject: [PATCH 060/128] engine: update types --- packages/engine-multi/package.json | 1 + packages/engine-multi/src/api/autoinstall.ts | 11 +- packages/engine-multi/src/api/compile.ts | 15 ++- .../src/api/preload-credentials.ts | 7 +- packages/engine-multi/src/engine.ts | 6 +- packages/engine-multi/src/types.ts | 2 +- .../engine-multi/src/util/create-state.ts | 2 +- .../engine-multi/src/worker/thread/helpers.ts | 3 +- .../engine-multi/src/worker/thread/run.ts | 4 +- pnpm-lock.yaml | 114 ++++++++++++++++-- 10 files changed, 130 insertions(+), 35 deletions(-) diff --git a/packages/engine-multi/package.json b/packages/engine-multi/package.json index abf9e212a..ed7cc0a7c 100644 --- a/packages/engine-multi/package.json +++ b/packages/engine-multi/package.json @@ -16,6 +16,7 @@ "dependencies": { "@openfn/compiler": "workspace:*", "@openfn/language-common": "2.0.0-rc3", + "@openfn/lexicon": "workspace:^", "@openfn/logger": "workspace:*", "@openfn/runtime": "workspace:*", "fast-safe-stringify": "^2.1.1" diff --git a/packages/engine-multi/src/api/autoinstall.ts b/packages/engine-multi/src/api/autoinstall.ts index 769c25b36..534a5747f 100644 --- a/packages/engine-multi/src/api/autoinstall.ts +++ b/packages/engine-multi/src/api/autoinstall.ts @@ -1,16 +1,15 @@ import { - ExecutionPlan, ensureRepo, getAliasedName, getNameAndVersion, loadRepoPkg, } from '@openfn/runtime'; import { install as runtimeInstall } from '@openfn/runtime'; +import type { ExecutionPlan, Job } from '@openfn/lexicon'; +import type { Logger } from '@openfn/logger'; import { AUTOINSTALL_COMPLETE, AUTOINSTALL_ERROR } from '../events'; import { AutoinstallError } from '../errors'; - -import type { Logger } from '@openfn/logger'; import type { ExecutionContext } from '../types'; // none of these options should be on the plan actually @@ -206,9 +205,9 @@ const isInstalled = async ( export const identifyAdaptors = (plan: ExecutionPlan): Set => { const adaptors = new Set(); - plan.jobs - .filter((job) => job.adaptor) - .forEach((job) => adaptors.add(job.adaptor!)); + plan.workflow.steps + .filter((job) => (job as Job).adaptor) + .forEach((job) => adaptors.add((job as Job).adaptor!)); return adaptors; }; diff --git a/packages/engine-multi/src/api/compile.ts b/packages/engine-multi/src/api/compile.ts index 92830d893..1b465ae54 100644 --- a/packages/engine-multi/src/api/compile.ts +++ b/packages/engine-multi/src/api/compile.ts @@ -1,12 +1,10 @@ -// This function will compile a workflow -// Later we'll add an in-memory cache to prevent the same job -// being compiled twice - -import type { Logger } from '@openfn/logger'; import compile, { preloadAdaptorExports, Options } from '@openfn/compiler'; import { getModulePath } from '@openfn/runtime'; -import { ExecutionContext } from '../types'; +import type { Job } from '@openfn/lexicon'; +import type { Logger } from '@openfn/logger'; + import { CompileError } from '../errors'; +import type { ExecutionContext } from '../types'; // TODO this compiler is going to change anyway to run just in time // the runtime will have an onCompile hook @@ -15,8 +13,9 @@ export default async (context: ExecutionContext) => { const { logger, state, options } = context; const { repoDir, noCompile } = options; - if (!noCompile && state.plan?.jobs?.length) { - for (const job of state.plan.jobs) { + if (!noCompile && state.plan?.workflow.steps?.length) { + for (const step of state.plan.workflow.steps) { + const job = step as Job; if (job.expression) { try { job.expression = await compileJob( diff --git a/packages/engine-multi/src/api/preload-credentials.ts b/packages/engine-multi/src/api/preload-credentials.ts index fb9545ff7..08726a313 100644 --- a/packages/engine-multi/src/api/preload-credentials.ts +++ b/packages/engine-multi/src/api/preload-credentials.ts @@ -1,12 +1,13 @@ -import { CompiledExecutionPlan } from '@openfn/runtime'; +import { ExecutionPlan, Job } from '@openfn/lexicon'; export default async ( - plan: CompiledExecutionPlan, + plan: ExecutionPlan, loader: (id: string) => Promise ) => { const loaders: Promise[] = []; - Object.values(plan.jobs).forEach((job) => { + Object.values(plan.workflow.steps).forEach((step) => { + const job = step as Job; if (typeof job.configuration === 'string') { loaders.push( new Promise(async (resolve) => { diff --git a/packages/engine-multi/src/engine.ts b/packages/engine-multi/src/engine.ts index d5162cb2f..d908199bd 100644 --- a/packages/engine-multi/src/engine.ts +++ b/packages/engine-multi/src/engine.ts @@ -1,7 +1,9 @@ import { EventEmitter } from 'node:events'; import path from 'node:path'; import { fileURLToPath } from 'node:url'; -import type { ExecutionPlan } from '@openfn/runtime'; +import type { ExecutionPlan } from '@openfn/lexicon'; +import type { Logger, SanitizePolicies } from '@openfn/logger'; + import { JOB_COMPLETE, JOB_START, @@ -15,10 +17,8 @@ import execute from './api/execute'; import validateWorker from './api/validate-worker'; import ExecutionContext from './classes/ExecutionContext'; -import type { SanitizePolicies } from '@openfn/logger'; import type { LazyResolvers } from './api'; import type { EngineAPI, EventHandler, WorkflowState } from './types'; -import type { Logger } from '@openfn/logger'; import type { AutoinstallOptions } from './api/autoinstall'; const DEFAULT_RUN_TIMEOUT = 1000 * 60 * 10; // ms diff --git a/packages/engine-multi/src/types.ts b/packages/engine-multi/src/types.ts index 819b3473f..21d7997d8 100644 --- a/packages/engine-multi/src/types.ts +++ b/packages/engine-multi/src/types.ts @@ -1,5 +1,5 @@ import type { Logger, SanitizePolicies } from '@openfn/logger'; -import type { ExecutionPlan } from '@openfn/runtime'; +import type { ExecutionPlan } from '@openfn/lexicon'; import type { EventEmitter } from 'node:events'; import type { ExternalEvents, EventMap } from './events'; diff --git a/packages/engine-multi/src/util/create-state.ts b/packages/engine-multi/src/util/create-state.ts index 7e1c538aa..5e3c98ce5 100644 --- a/packages/engine-multi/src/util/create-state.ts +++ b/packages/engine-multi/src/util/create-state.ts @@ -1,4 +1,4 @@ -import { ExecutionPlan } from '@openfn/runtime'; +import { ExecutionPlan } from '@openfn/lexicon'; import { WorkflowState } from '../types'; export default (plan: ExecutionPlan, options = {}): WorkflowState => ({ diff --git a/packages/engine-multi/src/worker/thread/helpers.ts b/packages/engine-multi/src/worker/thread/helpers.ts index cb8a2d417..ad7eecf1f 100644 --- a/packages/engine-multi/src/worker/thread/helpers.ts +++ b/packages/engine-multi/src/worker/thread/helpers.ts @@ -4,14 +4,13 @@ import process from 'node:process'; import stringify from 'fast-safe-stringify'; import createLogger, { SanitizePolicies } from '@openfn/logger'; +import type { JSONLog } from '@openfn/logger'; import * as workerEvents from '../events'; import { HANDLED_EXIT_CODE } from '../../events'; import { ExecutionError, ExitError } from '../../errors'; - import { publish } from './runtime'; import serializeError from '../../util/serialize-error'; -import { JSONLog } from '@openfn/logger'; export const createLoggers = ( workflowId: string, diff --git a/packages/engine-multi/src/worker/thread/run.ts b/packages/engine-multi/src/worker/thread/run.ts index b6af70c87..30b92a9c5 100644 --- a/packages/engine-multi/src/worker/thread/run.ts +++ b/packages/engine-multi/src/worker/thread/run.ts @@ -1,9 +1,9 @@ // This is the run command that will be executed inside the worker thread // Most of the heavy lifting is actually handled by execute import run from '@openfn/runtime'; -import type { ExecutionPlan } from '@openfn/runtime'; -import type { SanitizePolicies } from '@openfn/logger'; import type { NotifyEvents } from '@openfn/runtime'; +import type { ExecutionPlan } from '@openfn/lexicon'; +import type { SanitizePolicies } from '@openfn/logger'; import { register, publish } from './runtime'; import { execute, createLoggers } from './helpers'; diff --git a/pnpm-lock.yaml b/pnpm-lock.yaml index 85cd8237a..693c42c4d 100644 --- a/pnpm-lock.yaml +++ b/pnpm-lock.yaml @@ -104,6 +104,15 @@ importers: specifier: ^3.0.2 version: 3.0.2 + integration-tests/cli/repo: + dependencies: + '@openfn/language-common_1.12.0': + specifier: npm:@openfn/language-common@^1.12.0 + version: /@openfn/language-common@1.12.0 + is-array_1.0.1: + specifier: npm:is-array@^1.0.1 + version: /is-array@1.0.1 + integration-tests/worker: dependencies: '@openfn/engine-multi': @@ -380,6 +389,9 @@ importers: '@openfn/language-common': specifier: 2.0.0-rc3 version: 2.0.0-rc3 + '@openfn/lexicon': + specifier: workspace:^ + version: link:../lexicon '@openfn/logger': specifier: workspace:* version: link:../logger @@ -1337,6 +1349,11 @@ packages: heap: 0.2.7 dev: false + /@fastify/busboy@2.1.0: + resolution: {integrity: sha512-+KpH+QxZU7O4675t3mnkQKcZZg56u+K/Ct2K+N2AZYNVK8kyeo/bI18tI8aPm3tvNNRyTWfj6s5tnGNlcbQRsA==} + engines: {node: '>=14'} + dev: false + /@inquirer/checkbox@1.3.5: resolution: {integrity: sha512-ZznkPU+8XgNICKkqaoYENa0vTw9jeToEHYyG5gUKpGmY+4PqPTsvLpSisOt9sukLkYzPRkpSCHREgJLqbCG3Fw==} engines: {node: '>=14.18.0'} @@ -1593,6 +1610,22 @@ packages: semver: 7.5.4 dev: true + /@openfn/language-common@1.12.0: + resolution: {integrity: sha512-JQjJpRNdwG5LMmAIO7P7HLgtHYS0UssoibAhMJOpoHk5/kFLDpH3tywpp40Pai33NMzgofxb5gb0MZTgoEk3fw==} + dependencies: + ajv: 8.12.0 + axios: 1.1.3 + csv-parse: 5.5.3 + csvtojson: 2.0.10 + date-fns: 2.30.0 + http-status-codes: 2.3.0 + jsonpath-plus: 4.0.0 + lodash: 4.17.21 + undici: 5.28.3 + transitivePeerDependencies: + - debug + dev: false + /@openfn/language-common@1.7.5: resolution: {integrity: sha512-QivV3v5Oq5fb4QMopzyqUUh+UGHaFXBdsGr6RCmu6bFnGXdJdcQ7GpGpW5hKNq29CkmE23L/qAna1OLr4rP/0w==} dependencies: @@ -2042,6 +2075,15 @@ packages: clean-stack: 4.2.0 indent-string: 5.0.0 + /ajv@8.12.0: + resolution: {integrity: sha512-sRu1kpcO9yLtYxBKvqfTeh9KzZEwO3STyX1HT+4CaDzC6HpTGYhIhPIzj9XuKU7KYDwnaeh5hcOwjy1QuJzBPA==} + dependencies: + fast-deep-equal: 3.1.3 + json-schema-traverse: 1.0.0 + require-from-string: 2.0.2 + uri-js: 4.4.1 + dev: false + /ansi-colors@4.1.3: resolution: {integrity: sha512-/6w/C21Pm1A7aZitlI5Ni/2J6FFQN8i1Cvz3kHABAAbw93v/NlvKdVOqz7CCWz/3iv/JplRSEEZ83XION15ovw==} engines: {node: '>=6'} @@ -2202,7 +2244,6 @@ packages: /asynckit@0.4.0: resolution: {integrity: sha512-Oei9OH4tRh0YqU3GxhX79dM/mwVgvbZJaSNaRk+bshkj0S5cfHcgYakreBjrHwatXKbz+IoIdYLxrKim2MjW0Q==} - dev: true /atob@2.1.2: resolution: {integrity: sha512-Wm6ukoaOGJi/73p/cl2GvLjTI5JM1k/O14isD73YML8StrH/7/lRFgmg8nICZgD3bZZvjwCGxtMOD3wWNAu8cg==} @@ -2350,7 +2391,6 @@ packages: proxy-from-env: 1.1.0 transitivePeerDependencies: - debug - dev: true /b4a@1.6.1: resolution: {integrity: sha512-AsKjNhz72yxteo/0EtQEiwkMUgk/tGmycXlbG4g3Ard2/ULtNLUykGOkeK0egmN27h0xMAhb76jYccW+XTBExA==} @@ -2431,6 +2471,10 @@ packages: readable-stream: 4.2.0 dev: true + /bluebird@3.7.2: + resolution: {integrity: sha512-XpNj6GDQzdfW+r2Wnn7xiSAd7TM3jzkxGXBGTtWKuSXv1xUV+azxAm8jdWZN06QTQk+2N2XB9jRDkvbmQmcRtg==} + dev: false + /blueimp-md5@2.19.0: resolution: {integrity: sha512-DRQrD6gJyy8FbiE4s+bDoXS9hiW3Vbx5uCdwvcCf3zLHL+Iv7LtGHLpr+GZV8rHG8tK766FGYBwRbu8pELTt+w==} @@ -2823,7 +2867,6 @@ packages: engines: {node: '>= 0.8'} dependencies: delayed-stream: 1.0.0 - dev: true /commander@4.1.1: resolution: {integrity: sha512-NOKm8xhkzAjzFx8B2v5OAHT+u5pRQc2UCa2Vq9jYL/31o2wi9mxBA7LIFs3sV5VSC49z6pEhfbMULvShKj26WA==} @@ -2966,6 +3009,10 @@ packages: resolution: {integrity: sha512-cO1I/zmz4w2dcKHVvpCr7JVRu8/FymG5OEpmvsZYlccYolPBLoVGKUHgNoc4ZGkFeFlWGEDmMyBM+TTqRdW/wg==} dev: true + /csv-parse@5.5.3: + resolution: {integrity: sha512-v0KW6C0qlZzoGjk6u5tLmVfyZxNgPGXZsWTXshpAgKVGmGXzaVWGdlCFxNx5iuzcXT/oJN1HHM9DZKwtAtYa+A==} + dev: false + /csv-stringify@5.6.5: resolution: {integrity: sha512-PjiQ659aQ+fUTQqSrd1XEDnOr52jh30RBurfzkscaE2tPaFsDH5wOAHJiw8XAHphRknCwMUE9KRayc4K/NbO8A==} dev: true @@ -2980,6 +3027,16 @@ packages: stream-transform: 2.1.3 dev: true + /csvtojson@2.0.10: + resolution: {integrity: sha512-lUWFxGKyhraKCW8Qghz6Z0f2l/PqB1W3AO0HKJzGIQ5JRSlR651ekJDiGJbBT4sRNNv5ddnSGVEnsxP9XRCVpQ==} + engines: {node: '>=4.0.0'} + hasBin: true + dependencies: + bluebird: 3.7.2 + lodash: 4.17.21 + strip-bom: 2.0.0 + dev: false + /currently-unhandled@0.4.1: resolution: {integrity: sha512-/fITjgjGU50vjQ4FH6eUoYu+iUoUKIXws2hL15JJpIR+BbTxaXQsMuuyjtNh2WqsSBS5nsaZHFsFecyw5CCAng==} engines: {node: '>=0.10.0'} @@ -3120,7 +3177,6 @@ packages: /delayed-stream@1.0.0: resolution: {integrity: sha512-ZySD7Nf91aLB0RxL4KGrKHBXl7Eds1DAmEdcoVawXnLD7SDhpNgtuII2aAkg7a7QS41jxPSZ17p4VdGnMHk3MQ==} engines: {node: '>=0.4.0'} - dev: true /delegates@1.0.0: resolution: {integrity: sha512-bd2L678uiWATM6m5Z1VzNCErI3jiGzt6HGY8OVICs40JQq/HALfbyNJmp0UDakEY4pMMaN0Ly5om/B1VI/+xfQ==} @@ -3946,6 +4002,10 @@ packages: - supports-color dev: true + /fast-deep-equal@3.1.3: + resolution: {integrity: sha512-f3qQ9oQy9j2AhBe/H9VC91wLmKBCCU/gDOnKNAYG5hswO7BLKj09Hc5HYNz9cGI++xlpDCIgDaitVs03ATR84Q==} + dev: false + /fast-diff@1.3.0: resolution: {integrity: sha512-VxPP4NqbUjj6MaAOafWeUn2cXWLcCtljklUtZf0Ind4XQ+QPtmA0b18zZy0jIQx+ExRVCR/ZQpBmik5lXshNsw==} @@ -4092,7 +4152,6 @@ packages: peerDependenciesMeta: debug: optional: true - dev: true /for-in@1.0.2: resolution: {integrity: sha512-7EwmXrOjyL+ChxMhmG5lnW9MPt1aIeZEwKhQzoBUdTV0N3zuwWDZYVJatDvZ2OyzPUvdIAZDsCetk3coyMfcnQ==} @@ -4123,7 +4182,6 @@ packages: asynckit: 0.4.0 combined-stream: 1.0.8 mime-types: 2.1.35 - dev: true /fragment-cache@0.2.1: resolution: {integrity: sha512-GMBAbW9antB8iZRHLoGw0b3HANt57diZYFO/HL1JGIC1MjKrdmhxvrJbupnVvpys0zsz7yBApXdQyfepKly2kA==} @@ -4499,6 +4557,10 @@ packages: - supports-color dev: true + /http-status-codes@2.3.0: + resolution: {integrity: sha512-RJ8XvFvpPM/Dmc5SV+dC4y5PCeOhT3x1Hq0NU3rjGeg5a/CqlhZ7uudknPwZFz4aeAXDcbAyaeP7GAo9lvngtA==} + dev: false + /https-proxy-agent@5.0.1: resolution: {integrity: sha512-dFcAjpTQFgoLMzC2VwU+C/CbS7uRL0lWmxDITmqm7C+7F0Odmj6s9l6alZc6AELXhrnggM2CeWSXHGOdX2YtwA==} engines: {node: '>= 6'} @@ -4634,6 +4696,10 @@ packages: kind-of: 6.0.3 dev: true + /is-array@1.0.1: + resolution: {integrity: sha512-gxiZ+y/u67AzpeFmAmo4CbtME/bs7J2C++su5zQzvQyaxUqVzkh69DI+jN+KZuSO6JaH6TIIU6M6LhqxMjxEpw==} + dev: false + /is-arrayish@0.2.1: resolution: {integrity: sha512-zz06S8t0ozoDXMG+ube26zeCTNXcKIPJZJi8hBrF4idCLms4CG9QtK7qBl1boi5ODzFpjswb5JPmHCbMpjaYzg==} dev: true @@ -4899,6 +4965,10 @@ packages: resolution: {integrity: sha512-43r2mRvz+8JRIKnWJ+3j8JtjRKZ6GmjzfaE/qiBJnikNnYv/6bagRJ1kUhNk8R5EX/GkobD+r+sfxCPJsiKBLQ==} engines: {node: '>=12'} + /is-utf8@0.2.1: + resolution: {integrity: sha512-rMYPYvCzsXywIsldgLaSoPlw5PfoB/ssr7hY4pLfcodrA5M/eArza1a9VmTiNIBNMjOGr1Ow9mTyU2o69U6U9Q==} + dev: false + /is-weakref@1.0.2: resolution: {integrity: sha512-qctsuLZmIQ0+vSSMfoVvyFe2+GSEvnmZ2ezTup1SBse9+twCCeial6EEi3Nc2KFcf6+qz2FBPnjXsk8xhKSaPQ==} dependencies: @@ -4992,6 +5062,10 @@ packages: resolution: {integrity: sha512-xyFwyhro/JEof6Ghe2iz2NcXoj2sloNsWr/XsERDK/oiPCfaNhl5ONfp+jQdAZRQQ0IJWNzH9zIZF7li91kh2w==} dev: true + /json-schema-traverse@1.0.0: + resolution: {integrity: sha512-NM8/P9n3XjXhIZn1lLhkFaACTOURQXjWhV4BA/RnOv8xvgqtqpAX9IO4mRQxSx1Rlo4tqzeqb0sOlruaOy3dug==} + dev: false + /jsonfile@4.0.0: resolution: {integrity: sha512-m6F1R3z8jjlf2imQHS2Qez5sjKWQzbuuhuJ/FKYFRZvPE3PuHcSMVZzfsLhGVOkfd20obL5SWEBew5ShlquNxg==} optionalDependencies: @@ -5006,7 +5080,6 @@ packages: /jsonpath-plus@4.0.0: resolution: {integrity: sha512-e0Jtg4KAzDJKKwzbLaUtinCn0RZseWBVRTRGihSpvFlM3wTR7ExSp+PTdeTsDrLNJUe7L7JYJe8mblHX5SCT6A==} engines: {node: '>=10.0'} - dev: true /jsonpath@1.1.1: resolution: {integrity: sha512-l6Cg7jRpixfbgoWgkrl77dgEj8RPvND0wMH6TwQmi9Qs4TFfS9u5cUFnbeKTwj5ga5Y3BTGGNI28k117LJ009w==} @@ -6313,7 +6386,6 @@ packages: /proxy-from-env@1.1.0: resolution: {integrity: sha512-D+zkORCbA9f1tdWRK0RaCR3GPv50cMxcrz4X8k5LTSUD1Dkw47mKJEZQNunItRTkWwgtaUSo1RVFRIG9ZXiFYg==} - dev: true /proxy-middleware@0.15.0: resolution: {integrity: sha512-EGCG8SeoIRVMhsqHQUdDigB2i7qU7fCsWASwn54+nPutYO8n4q6EiwMzyfWlC+dzRFExP+kvcnDFdBDHoZBU7Q==} @@ -6346,7 +6418,6 @@ packages: /punycode@2.3.0: resolution: {integrity: sha512-rRV+zQD8tVFys26lAGR9WUuS4iUAngJScM+ZRSKtvl5tKeZ2t5bvdNFdNHBW9FWR4guGHlgmsZ1G7BSm2wTbuA==} engines: {node: '>=6'} - dev: true /qs@6.11.2: resolution: {integrity: sha512-tDNIz22aBzCDxLtVH++VnTfzxlfeK5CbqohpSqpJgj1Wg/cQbStNAz3NuqCs5vV+pjBsK4x4pN9HlVh7rcYRiA==} @@ -6552,6 +6623,11 @@ packages: resolution: {integrity: sha512-fGxEI7+wsG9xrvdjsrlmL22OMTTiHRwAMroiEeMgq8gzoLC/PQr7RsRDSTLUg/bZAZtF+TVIkHc6/4RIKrui+Q==} engines: {node: '>=0.10.0'} + /require-from-string@2.0.2: + resolution: {integrity: sha512-Xf0nWe6RseziFMu+Ap9biiUbmplq6S9/p+7w7YXP/JBHhrUDDUhwa+vANyubuqfZWTveU//DYVGsDG7RKL/vEw==} + engines: {node: '>=0.10.0'} + dev: false + /require-main-filename@2.0.0: resolution: {integrity: sha512-NKN5kMDylKuldxYLSUfrbo5Tuzh4hd+2E8NPPX02mZtn1VuREQToYe/ZdlJy+J3uCpfaiGF05e7B8W0iXbQHmg==} dev: true @@ -7075,6 +7151,13 @@ packages: dependencies: ansi-regex: 6.0.1 + /strip-bom@2.0.0: + resolution: {integrity: sha512-kwrX1y7czp1E69n2ajbG65mIo9dqvJ+8aBQXOGVxqwvNbsXdFM6Lq37dLAY3mknUwru8CfcCbfOLL/gMo+fi3g==} + engines: {node: '>=0.10.0'} + dependencies: + is-utf8: 0.2.1 + dev: false + /strip-bom@3.0.0: resolution: {integrity: sha512-vavAMRXOgBVNF6nyEEmL3DBK19iRpDcoIwW+swQ+CbGiu7lju6t+JklA1MHweoWtadgt4ISVUsXLyDq34ddcwA==} engines: {node: '>=4'} @@ -7628,6 +7711,13 @@ packages: resolution: {integrity: sha512-hEQt0+ZLDVUMhebKxL4x1BTtDY7bavVofhZ9KZ4aI26X9SRaE+Y3m83XUL1UP2jn8ynjndwCCpEHdUG+9pP1Tw==} dev: true + /undici@5.28.3: + resolution: {integrity: sha512-3ItfzbrhDlINjaP0duwnNsKpDQk3acHI3gVJ1z4fmwMK31k5G9OVIAMLSIaP6w4FaGkaAkN6zaQO9LUvZ1t7VA==} + engines: {node: '>=14.0'} + dependencies: + '@fastify/busboy': 2.1.0 + dev: false + /union-value@1.0.1: resolution: {integrity: sha512-tJfXmxMeWYnczCVs7XAEvIV7ieppALdyepWMkHkwciRpZraG/xwT+s2JN8+pr1+8jCRf80FFzvr+MpQeeoF4Xg==} engines: {node: '>=0.10.0'} @@ -7678,6 +7768,12 @@ packages: engines: {node: '>=4'} dev: true + /uri-js@4.4.1: + resolution: {integrity: sha512-7rKUyy33Q1yc98pQ1DAmLtwX109F7TIfWlW1Ydo8Wl1ii1SeHieeh0HHfPeL2fMXK6z0s8ecKs9frCuLJvndBg==} + dependencies: + punycode: 2.3.0 + dev: false + /urix@0.1.0: resolution: {integrity: sha512-Am1ousAhSLBeB9cG/7k7r2R0zj50uDRlZHPGbazid5s9rlF1F/QKYObEKSIunSjIOkJZqwRRLpvewjEkM7pSqg==} deprecated: Please see https://github.com/lydell/urix#deprecated From bb35156c1f396bbb2f5a39a23c0ab814a65c15d6 Mon Sep 17 00:00:00 2001 From: Joe Clark Date: Tue, 6 Feb 2024 16:29:55 +0000 Subject: [PATCH 061/128] engine: update names and types This is 90% of the basic rename done. Tests may even pass --- packages/engine-multi/src/api/autoinstall.ts | 3 +- packages/engine-multi/src/api/compile.ts | 2 +- packages/engine-multi/src/api/execute.ts | 6 +- packages/engine-multi/src/api/lifecycle.ts | 2 +- .../src/classes/ExecutionContext.ts | 15 +- packages/engine-multi/src/engine.ts | 31 +++- packages/engine-multi/src/test/util.ts | 39 +++-- .../engine-multi/src/test/worker-functions.ts | 5 +- packages/engine-multi/src/types.ts | 28 +--- .../engine-multi/src/util/create-state.ts | 13 +- packages/engine-multi/src/worker/events.ts | 2 - packages/engine-multi/src/worker/pool.ts | 9 +- .../engine-multi/src/worker/thread/helpers.ts | 2 +- .../src/worker/thread/mock-run.ts | 17 ++- .../engine-multi/src/worker/thread/run.ts | 6 +- packages/engine-multi/test/api.test.ts | 47 +++--- .../engine-multi/test/api/autoinstall.test.ts | 81 ++++++---- .../engine-multi/test/api/call-worker.test.ts | 14 +- .../engine-multi/test/api/execute.test.ts | 103 ++++++++----- .../engine-multi/test/api/lifecycle.test.ts | 46 ++++-- .../test/api/preload-credentials.test.ts | 71 +++++---- packages/engine-multi/test/engine.test.ts | 143 +++++++++--------- packages/engine-multi/test/errors.test.ts | 121 ++++++++------- .../engine-multi/test/integration.test.ts | 74 +++++---- packages/engine-multi/test/security.test.ts | 6 +- .../test/worker/mock-worker.test.ts | 28 ++-- .../engine-multi/test/worker/pool.test.ts | 14 +- packages/engine-multi/tsconfig.json | 2 +- 28 files changed, 535 insertions(+), 395 deletions(-) diff --git a/packages/engine-multi/src/api/autoinstall.ts b/packages/engine-multi/src/api/autoinstall.ts index 534a5747f..a20113630 100644 --- a/packages/engine-multi/src/api/autoinstall.ts +++ b/packages/engine-multi/src/api/autoinstall.ts @@ -10,7 +10,7 @@ import type { Logger } from '@openfn/logger'; import { AUTOINSTALL_COMPLETE, AUTOINSTALL_ERROR } from '../events'; import { AutoinstallError } from '../errors'; -import type { ExecutionContext } from '../types'; +import ExecutionContext from '../classes/ExecutionContext'; // none of these options should be on the plan actually export type AutoinstallOptions = { @@ -139,6 +139,7 @@ const autoinstall = async (context: ExecutionContext): Promise => { // Write the adaptor version to the context // This is a reasonably accurate, but not totally bulletproof, report + // @ts-ignore context.versions[name] = v; paths[name] = { diff --git a/packages/engine-multi/src/api/compile.ts b/packages/engine-multi/src/api/compile.ts index 1b465ae54..c47660adf 100644 --- a/packages/engine-multi/src/api/compile.ts +++ b/packages/engine-multi/src/api/compile.ts @@ -4,7 +4,7 @@ import type { Job } from '@openfn/lexicon'; import type { Logger } from '@openfn/logger'; import { CompileError } from '../errors'; -import type { ExecutionContext } from '../types'; +import type ExecutionContext from '../classes/ExecutionContext'; // TODO this compiler is going to change anyway to run just in time // the runtime will have an onCompile hook diff --git a/packages/engine-multi/src/api/execute.ts b/packages/engine-multi/src/api/execute.ts index c35085581..933090142 100644 --- a/packages/engine-multi/src/api/execute.ts +++ b/packages/engine-multi/src/api/execute.ts @@ -1,7 +1,7 @@ import { timestamp } from '@openfn/logger'; import * as workerEvents from '../worker/events'; -import type { ExecutionContext } from '../types'; +import type ExecutionContext from '../classes/ExecutionContext'; import autoinstall from './autoinstall'; import compile from './compile'; import { @@ -115,11 +115,9 @@ const execute = async (context: ExecutionContext) => { error(context, { workflowId: state.plan.id, error: evt.error }); }, }; - - // TODO in the new world order, what sorts of errors are being caught here? return callWorker( 'run', - [state.plan, runOptions], + [state.plan, state.input || {}, runOptions || {}], events, workerOptions ).catch((e: any) => { diff --git a/packages/engine-multi/src/api/lifecycle.ts b/packages/engine-multi/src/api/lifecycle.ts index 68dcae76a..2c8f84d01 100644 --- a/packages/engine-multi/src/api/lifecycle.ts +++ b/packages/engine-multi/src/api/lifecycle.ts @@ -1,7 +1,7 @@ // here's here things get a bit complex event wise import * as externalEvents from '../events'; import * as internalEvents from '../worker/events'; -import { ExecutionContext } from '../types'; +import type ExecutionContext from '../classes/ExecutionContext'; export const workflowStart = ( context: ExecutionContext, diff --git a/packages/engine-multi/src/classes/ExecutionContext.ts b/packages/engine-multi/src/classes/ExecutionContext.ts index cf340407e..0e7c70480 100644 --- a/packages/engine-multi/src/classes/ExecutionContext.ts +++ b/packages/engine-multi/src/classes/ExecutionContext.ts @@ -1,13 +1,15 @@ import { EventEmitter } from 'node:events'; +import type { Logger } from '@openfn/logger'; +import loadVersions from '../util/load-versions'; import type { WorkflowState, CallWorker, ExecutionContextConstructor, ExecutionContextOptions, + Versions, } from '../types'; -import type { Logger } from '@openfn/logger'; -import loadVersions from '../util/load-versions'; +import type { ExternalEvents, EventMap } from '../events'; /** * The ExeuctionContext class wraps an event emitter with some useful context @@ -22,7 +24,7 @@ export default class ExecutionContext extends EventEmitter { logger: Logger; callWorker: CallWorker; options: ExecutionContextOptions; - versions = {}; + versions: Versions; constructor({ state, @@ -40,8 +42,11 @@ export default class ExecutionContext extends EventEmitter { // override emit to add the workflowId to all events // @ts-ignore - emit(event: string, payload: any) { - payload.workflowId = this.state.id; + emit( + event: T, + payload: Omit + ): boolean { + (payload as EventMap[T]).workflowId = this.state.id; return super.emit(event, payload); } } diff --git a/packages/engine-multi/src/engine.ts b/packages/engine-multi/src/engine.ts index d908199bd..6214c969d 100644 --- a/packages/engine-multi/src/engine.ts +++ b/packages/engine-multi/src/engine.ts @@ -1,7 +1,7 @@ import { EventEmitter } from 'node:events'; import path from 'node:path'; import { fileURLToPath } from 'node:url'; -import type { ExecutionPlan } from '@openfn/lexicon'; +import type { ExecutionPlan, State } from '@openfn/lexicon'; import type { Logger, SanitizePolicies } from '@openfn/logger'; import { @@ -18,7 +18,12 @@ import validateWorker from './api/validate-worker'; import ExecutionContext from './classes/ExecutionContext'; import type { LazyResolvers } from './api'; -import type { EngineAPI, EventHandler, WorkflowState } from './types'; +import type { + EngineAPI, + EventHandler, + RuntimeEngine, + WorkflowState, +} from './types'; import type { AutoinstallOptions } from './api/autoinstall'; const DEFAULT_RUN_TIMEOUT = 1000 * 60 * 10; // ms @@ -82,11 +87,19 @@ export type ExecuteOptions = { sanitize?: SanitizePolicies; }; +export type InternalEngine = RuntimeEngine & { + // TODONot a very good type definition, but it calms the tests down + [other: string]: any; +}; + // This creates the internal API // tbh this is actually the engine, right, this is where stuff happens // the api file is more about the public api I think // TOOD options MUST have a logger -const createEngine = async (options: EngineOptions, workerPath?: string) => { +const createEngine = async ( + options: EngineOptions, + workerPath?: string +): Promise => { const states: Record = {}; const contexts: Record = {}; const deferredListeners: Record[]> = {}; @@ -130,9 +143,9 @@ const createEngine = async (options: EngineOptions, workerPath?: string) => { // create, register and return a state object // should it also load the initial data clip? // when does that happen? No, that's inside execute - const registerWorkflow = (plan: ExecutionPlan) => { + const registerWorkflow = (plan: ExecutionPlan, input: State) => { // TODO throw if already registered? - const state = createState(plan); + const state = createState(plan, input); states[state.id] = state; return state; }; @@ -144,13 +157,17 @@ const createEngine = async (options: EngineOptions, workerPath?: string) => { // TODO too much logic in this execute function, needs farming out // I don't mind having a wrapper here but it must be super thin // TODO maybe engine options is too broad? - const executeWrapper = (plan: ExecutionPlan, opts: ExecuteOptions = {}) => { + const executeWrapper = ( + plan: ExecutionPlan, + input: State, + opts: ExecuteOptions = {} + ) => { options.logger!.debug('executing plan ', plan?.id ?? ''); const workflowId = plan.id!; // TODO throw if plan is invalid // Wait, don't throw because the server will die // Maybe return null instead - const state = registerWorkflow(plan); + const state = registerWorkflow(plan, input); const context = new ExecutionContext({ state, diff --git a/packages/engine-multi/src/test/util.ts b/packages/engine-multi/src/test/util.ts index 0777af17a..494c24e27 100644 --- a/packages/engine-multi/src/test/util.ts +++ b/packages/engine-multi/src/test/util.ts @@ -1,15 +1,26 @@ -export const createPlan = (job = {}) => ({ - id: 'wf-1', - jobs: [ - { - id: 'j1', - adaptor: 'common', // not used - credential: {}, // not used - data: {}, // Used if no expression - expression: '(s) => ({ data: { answer: s.data?.input || 42 } })', - _delay: 1, // only used in the mock - - ...job, +import { ExecutionPlan } from '@openfn/lexicon'; + +export const createPlan = (job = {}) => + ({ + id: 'wf-1', + workflow: { + steps: [ + { + id: 'j1', + adaptor: 'common', // not used + configuration: {}, // not used + expression: '(s) => ({ data: { answer: s.data?.input || 42 } })', + + // TODO is this actually used? Should I get rid? Underscore + // @ts-ignore + data: {}, // Used if no expression + + // @ts-ignore + _delay: 1, // only used in the mock + + ...job, + }, + ], }, - ], -}); + options: {}, + } as ExecutionPlan); diff --git a/packages/engine-multi/src/test/worker-functions.ts b/packages/engine-multi/src/test/worker-functions.ts index 0c516e07e..f562edbcb 100644 --- a/packages/engine-multi/src/test/worker-functions.ts +++ b/packages/engine-multi/src/test/worker-functions.ts @@ -2,6 +2,7 @@ import path from 'node:path'; import { register, publish, threadId } from '../worker/thread/runtime'; import { increment } from './counter.js'; +import { ExecutionPlan, Job } from '@openfn/lexicon'; const tasks = { test: async (result = 42) => { @@ -25,13 +26,13 @@ const tasks = { processId: async () => process.pid, // very very simple intepretation of a run function // Most tests should use the mock-worker instead - run: async (plan: any, _adaptorPaths: any) => { + run: async (plan: ExecutionPlan, _input: any, _adaptorPaths: any) => { const workflowId = plan.id; publish('worker:workflow-start', { workflowId, }); try { - const [job] = plan.jobs; + const [job] = plan.workflow.steps as Job[]; const result = eval(job.expression); publish('worker:workflow-complete', { workflowId, diff --git a/packages/engine-multi/src/types.ts b/packages/engine-multi/src/types.ts index 21d7997d8..082443885 100644 --- a/packages/engine-multi/src/types.ts +++ b/packages/engine-multi/src/types.ts @@ -1,8 +1,7 @@ import type { Logger, SanitizePolicies } from '@openfn/logger'; -import type { ExecutionPlan } from '@openfn/lexicon'; +import type { ExecutionPlan, State } from '@openfn/lexicon'; import type { EventEmitter } from 'node:events'; -import type { ExternalEvents, EventMap } from './events'; import type { EngineOptions } from './engine'; import type { ExecOpts } from './worker/pool'; @@ -23,9 +22,11 @@ export type WorkflowState = { startTime?: number; duration?: number; error?: string; - result?: any; // State + result?: State; + + // Ok this changes quite a bit huh plan: ExecutionPlan; // this doesn't include options - options: any; // TODO this is wf specific options, like logging policy + input: State; }; export type CallWorker = ( @@ -46,27 +47,13 @@ export type ExecutionContextOptions = EngineOptions & { sanitize?: SanitizePolicies; }; -export interface ExecutionContext extends EventEmitter { - constructor(args: ExecutionContextConstructor): ExecutionContext; - options: EngineOptions; - state: WorkflowState; - logger: Logger; - callWorker: CallWorker; - versions: Versions; - - emit( - event: T, - payload: Omit - ): boolean; -} - export interface EngineAPI extends EventEmitter { callWorker: CallWorker; closeWorkers: (instant?: boolean) => void; } export interface RuntimeEngine { - version: string; + version?: string; options: EngineOptions; @@ -75,14 +62,13 @@ export interface RuntimeEngine { execute( plan: ExecutionPlan, + input: State, options?: Partial ): Pick; destroy(): void; on: (evt: string, fn: (...args: any[]) => void) => void; - - // TODO my want some maintenance APIs, like getStatus. idk } export type Versions = { diff --git a/packages/engine-multi/src/util/create-state.ts b/packages/engine-multi/src/util/create-state.ts index 5e3c98ce5..3175c92cb 100644 --- a/packages/engine-multi/src/util/create-state.ts +++ b/packages/engine-multi/src/util/create-state.ts @@ -1,22 +1,15 @@ -import { ExecutionPlan } from '@openfn/lexicon'; +import { ExecutionPlan, State } from '@openfn/lexicon'; import { WorkflowState } from '../types'; -export default (plan: ExecutionPlan, options = {}): WorkflowState => ({ +export default (plan: ExecutionPlan, input: State): WorkflowState => ({ id: plan.id!, status: 'pending', plan, + input, threadId: undefined, startTime: undefined, duration: undefined, error: undefined, result: undefined, - - // this is wf-specific options - // but they should be on context, rather than state - options, - // options: { - // ...options, - // repoDir, - // }, }); diff --git a/packages/engine-multi/src/worker/events.ts b/packages/engine-multi/src/worker/events.ts index 698df06eb..eabd8876a 100644 --- a/packages/engine-multi/src/worker/events.ts +++ b/packages/engine-multi/src/worker/events.ts @@ -3,7 +3,6 @@ */ import { JSONLog } from '@openfn/logger'; -import { Versions } from '../types'; // events used by the internal thread runtime @@ -45,7 +44,6 @@ export interface WorkflowCompleteEvent extends InternalEvent { export interface JobStartEvent extends InternalEvent { jobId: string; - versions: Versions; } export interface JobCompleteEvent extends InternalEvent { diff --git a/packages/engine-multi/src/worker/pool.ts b/packages/engine-multi/src/worker/pool.ts index 74b699259..5e94f05b7 100644 --- a/packages/engine-multi/src/worker/pool.ts +++ b/packages/engine-multi/src/worker/pool.ts @@ -120,13 +120,17 @@ function createPool(script: string, options: PoolOptions = {}, logger: Logger) { } }; - const exec = (task: string, args: any[] = [], opts: ExecOpts = {}) => { + const exec = ( + task: string, + args: any[] = [], + opts: ExecOpts = {} + ): Promise => { // TODO Throw if destroyed if (destroyed) { throw new Error('Worker destroyed'); } - const promise = new Promise(async (resolve, reject) => { + const promise = new Promise(async (resolve, reject) => { // TODO what should we do if a process in the pool dies, perhaps due to OOM? const onExit = async (code: number) => { if (code !== HANDLED_EXIT_CODE) { @@ -194,7 +198,6 @@ function createPool(script: string, options: PoolOptions = {}, logger: Logger) { } try { - logger.debug(`pool: Running task "${task}" in worker ${worker.pid}`); worker.send({ type: ENGINE_RUN_TASK, task, diff --git a/packages/engine-multi/src/worker/thread/helpers.ts b/packages/engine-multi/src/worker/thread/helpers.ts index ad7eecf1f..fb3e4d9ee 100644 --- a/packages/engine-multi/src/worker/thread/helpers.ts +++ b/packages/engine-multi/src/worker/thread/helpers.ts @@ -66,7 +66,7 @@ export const createLoggers = ( // Execute wrapper function export const execute = async ( workflowId: string, - executeFn: () => Promise + executeFn: () => Promise | undefined ) => { const handleError = (err: any) => { publish(workerEvents.ERROR, { diff --git a/packages/engine-multi/src/worker/thread/mock-run.ts b/packages/engine-multi/src/worker/thread/mock-run.ts index c6b29b0d8..194ee5478 100644 --- a/packages/engine-multi/src/worker/thread/mock-run.ts +++ b/packages/engine-multi/src/worker/thread/mock-run.ts @@ -10,6 +10,7 @@ import { register, publish } from './runtime'; import { execute, createLoggers } from './helpers'; import * as workerEvents from '../events'; +import { State } from '@openfn/lexicon'; type MockJob = { id?: string; @@ -25,13 +26,19 @@ type MockJob = { type MockExecutionPlan = { id: string; - jobs: MockJob[]; + workflow: { + steps: MockJob[]; + }; }; // This is a fake runtime handler which will return a fixed value, throw, and // optionally delay -function mockRun(plan: MockExecutionPlan) { - const [job] = plan.jobs; +function mockRun(plan: MockExecutionPlan, input: State, _options = {}) { + if (!input) { + throw new Error('no input passed to state'); + } + + const [job] = plan.workflow.steps; const { jobLogger } = createLoggers(plan.id!, 'none', publish); const workflowId = plan.id; return new Promise((resolve) => { @@ -79,6 +86,6 @@ function mockRun(plan: MockExecutionPlan) { } register({ - run: async (plan: MockExecutionPlan, _options?: any) => - execute(plan.id, () => mockRun(plan)), + run: async (plan: MockExecutionPlan, input: State, _options?: any) => + execute(plan.id, () => mockRun(plan, input)), }); diff --git a/packages/engine-multi/src/worker/thread/run.ts b/packages/engine-multi/src/worker/thread/run.ts index 30b92a9c5..dacdfe66e 100644 --- a/packages/engine-multi/src/worker/thread/run.ts +++ b/packages/engine-multi/src/worker/thread/run.ts @@ -2,7 +2,7 @@ // Most of the heavy lifting is actually handled by execute import run from '@openfn/runtime'; import type { NotifyEvents } from '@openfn/runtime'; -import type { ExecutionPlan } from '@openfn/lexicon'; +import type { ExecutionPlan, State } from '@openfn/lexicon'; import type { SanitizePolicies } from '@openfn/logger'; import { register, publish } from './runtime'; @@ -26,7 +26,7 @@ const eventMap = { }; register({ - run: (plan: ExecutionPlan, runOptions: RunOptions) => { + run: (plan: ExecutionPlan, input: State, runOptions: RunOptions) => { const { adaptorPaths, whitelist, sanitize, statePropsToRemove } = runOptions; const { logger, jobLogger, adaptorLogger } = createLoggers( @@ -73,6 +73,6 @@ register({ }, }; - return execute(plan.id!, () => run(plan, undefined, options)); + return execute(plan.id!, () => run(plan, input, options)); }, }); diff --git a/packages/engine-multi/test/api.test.ts b/packages/engine-multi/test/api.test.ts index f6fd843ef..a797e76dc 100644 --- a/packages/engine-multi/test/api.test.ts +++ b/packages/engine-multi/test/api.test.ts @@ -1,9 +1,10 @@ import test from 'ava'; -import createAPI from '../src/api'; import { createMockLogger } from '@openfn/logger'; +import type { ExecutionPlan } from '@openfn/lexicon'; +import createAPI from '../src/api'; import pkg from '../package.json' assert { type: 'json' }; -import { RuntimeEngine } from '../src/types'; +import type { RuntimeEngine } from '../src/types'; // thes are tests on the public api functions generally // so these are very high level tests and don't allow mock workers or anything @@ -97,17 +98,21 @@ test.serial( }, }); - const plan = { + const plan: ExecutionPlan = { id: 'a', - jobs: [ - { - expression: 'export default [s => s]', - // with no adaptor it shouldn't try to autoinstall - }, - ], + workflow: { + steps: [ + { + expression: 'export default [s => s]', + // with no adaptor it shouldn't try to autoinstall + }, + ], + }, + options: {}, }; - const listener = api.execute(plan); + const state = { x: 1 }; + const listener = api.execute(plan, state); listener.on('workflow-complete', () => { t.pass('workflow completed'); done(); @@ -126,18 +131,22 @@ test.serial('should listen to workflow-complete', async (t) => { }, }); - const plan = { + const plan: ExecutionPlan = { id: 'a', - jobs: [ - { - expression: 'export default [s => s]', - // with no adaptor it shouldn't try to autoinstall - }, - ], + workflow: { + steps: [ + { + expression: 'export default [s => s]', + // with no adaptor it shouldn't try to autoinstall + }, + ], + }, + options: {}, }; + const state = { x: 1 }; + api.execute(plan, state); - api.execute(plan); - api.listen(plan.id, { + api.listen(plan.id!, { 'workflow-complete': () => { t.pass('workflow completed'); done(); diff --git a/packages/engine-multi/test/api/autoinstall.test.ts b/packages/engine-multi/test/api/autoinstall.test.ts index 75e3464d9..defd732bd 100644 --- a/packages/engine-multi/test/api/autoinstall.test.ts +++ b/packages/engine-multi/test/api/autoinstall.test.ts @@ -1,7 +1,11 @@ import test from 'ava'; import { createMockLogger } from '@openfn/logger'; +import type { ExecutionPlan, Job } from '@openfn/lexicon'; -import autoinstall, { identifyAdaptors } from '../../src/api/autoinstall'; +import autoinstall, { + AutoinstallOptions, + identifyAdaptors, +} from '../../src/api/autoinstall'; import { AUTOINSTALL_COMPLETE, AUTOINSTALL_ERROR } from '../../src/events'; import ExecutionContext from '../../src/classes/ExecutionContext'; import whitelist from '../../src/whitelist'; @@ -16,7 +20,7 @@ const mockIsInstalled = (pkg: PackageJson) => async (specifier: string) => { return pkg.dependencies.hasOwnProperty(alias); }; -const mockHandleInstall = async (specifier: string): Promise => +const mockHandleInstall = async (_specifier: string): Promise => new Promise((r) => r()).then(); const logger = createMockLogger(); @@ -27,18 +31,23 @@ const wait = (duration = 10) => }); const createContext = ( - autoinstallOpts?, - jobs?: any[], + autoinstallOpts?: AutoinstallOptions, + jobs?: Partial[], customWhitelist?: RegExp[] ) => new ExecutionContext({ state: { id: 'x', status: 'pending', - options: {}, plan: { - jobs: jobs || [{ adaptor: '@openfn/language-common@1.0.0' }], + workflow: { + steps: jobs || [ + { adaptor: '@openfn/language-common@1.0.0', expression: '.' }, + ], + }, + options: {}, }, + input: {}, }, logger, // @ts-ignore @@ -47,6 +56,8 @@ const createContext = ( logger, whitelist: customWhitelist || whitelist, repoDir: 'tmp/repo', + + // @ts-ignore autoinstall: autoinstallOpts || { handleInstall: mockHandleInstall, handleIsInstalled: mockIsInstalled, @@ -104,18 +115,24 @@ test('mock install: should return async', async (t) => { }); test('identifyAdaptors: pick out adaptors and remove duplicates', (t) => { - const plan = { - jobs: [ - { - adaptor: 'common@1.0.0', - }, - { - adaptor: 'common@1.0.0', - }, - { - adaptor: 'common@1.0.1', - }, - ], + const plan: ExecutionPlan = { + workflow: { + steps: [ + { + adaptor: 'common@1.0.0', + expression: '.', + }, + { + adaptor: 'common@1.0.0', + expression: '.', + }, + { + adaptor: 'common@1.0.1', + expression: '.', + }, + ], + }, + options: {}, }; const adaptors = identifyAdaptors(plan); t.true(adaptors.size === 2); @@ -160,9 +177,9 @@ test.serial( async (t) => { let callCount = 0; - const installed = {}; + const installed: Record = {}; - const mockInstall = (name) => + const mockInstall = (name: string) => new Promise((resolve) => { installed[name] = true; callCount++; @@ -172,7 +189,7 @@ test.serial( const options = { skipRepoValidation: true, handleInstall: mockInstall, - handleIsInstalled: async (name) => name in installed, + handleIsInstalled: async (name: string) => name in installed, }; const context = createContext(options); @@ -184,11 +201,11 @@ test.serial( ); test.serial('autoinstall: install in sequence', async (t) => { - const installed = {}; + const installed: Record = {}; - const states = {}; + const states: Record = {}; - const mockInstall = (name) => + const mockInstall = (name: string) => new Promise((resolve) => { // Each time install is called, // record the time the call was made @@ -205,7 +222,7 @@ test.serial('autoinstall: install in sequence', async (t) => { skipRepoValidation: true, handleInstall: mockInstall, handleIsInstalled: false, - }; + } as any; const c1 = createContext(options, [{ adaptor: '@openfn/language-common@1' }]); const c2 = createContext(options, [{ adaptor: '@openfn/language-common@2' }]); @@ -354,7 +371,7 @@ test.serial('autoinstall: support custom whitelist', async (t) => { }); test.serial('autoinstall: emit an event on completion', async (t) => { - let event; + let event: any; const jobs = [ { adaptor: '@openfn/language-common@1.0.0', @@ -366,7 +383,7 @@ test.serial('autoinstall: emit an event on completion', async (t) => { skipRepoValidation: true, handleInstall: async () => new Promise((done) => setTimeout(done, 50)), handleIsInstalled: async () => false, - }; + } as any; const context = createContext(autoinstallOpts, jobs); context.on(AUTOINSTALL_COMPLETE, (evt) => { @@ -416,14 +433,14 @@ test.serial('autoinstall: throw on error twice if pending', async (t) => { const autoinstallOpts = { handleInstall: mockInstall, handleIsInstalled: mockIsInstalled, - }; + } as any; const context = createContext(autoinstallOpts); autoinstall(context).catch(assertCatches); autoinstall(context).catch(assertCatches); - function assertCatches(e) { + function assertCatches(e: any) { t.is(e.name, 'AutoinstallError'); errCount += 1; if (errCount === 2) { @@ -436,7 +453,7 @@ test.serial('autoinstall: throw on error twice if pending', async (t) => { }); test.serial('autoinstall: emit on error', async (t) => { - let evt; + let evt: any; const mockIsInstalled = async () => false; const mockInstall = async () => { throw new Error('err'); @@ -478,7 +495,7 @@ test.serial('autoinstall: throw twice in a row', async (t) => { const autoinstallOpts = { handleInstall: mockInstall, handleIsInstalled: mockIsInstalled, - }; + } as any; const context = createContext(autoinstallOpts); await t.throwsAsync(() => autoinstall(context), { @@ -503,6 +520,7 @@ test('write versions to context', async (t) => { await autoinstall(context); + // @ts-ignore t.is(context.versions['@openfn/language-common'], '1.0.0'); }); @@ -515,5 +533,6 @@ test("write versions to context even if we don't install", async (t) => { await autoinstall(context); + // @ts-ignore t.is(context.versions['@openfn/language-common'], '1.0.0'); }); diff --git a/packages/engine-multi/test/api/call-worker.test.ts b/packages/engine-multi/test/api/call-worker.test.ts index 314314527..c5608e05e 100644 --- a/packages/engine-multi/test/api/call-worker.test.ts +++ b/packages/engine-multi/test/api/call-worker.test.ts @@ -40,7 +40,7 @@ test.serial('callWorker should return a custom result', async (t) => { }); test.serial('callWorker should trigger an event callback', async (t) => { - const onCallback = ({ result }) => { + const onCallback = ({ result }: any) => { t.is(result, 11); }; @@ -69,7 +69,7 @@ test.serial( } ); - const onCallback = (evt) => { + const onCallback = () => { t.pass('all ok'); }; @@ -81,13 +81,13 @@ test.serial('callWorker should execute in one process', async (t) => { const ids: number[] = []; await engine.callWorker('test', [], { - 'test-message': ({ processId }) => { + 'test-message': ({ processId }: any) => { ids.push(processId); }, }); await engine.callWorker('test', [], { - 'test-message': ({ processId }) => { + 'test-message': ({ processId }: any) => { ids.push(processId); }, }); @@ -100,13 +100,13 @@ test.serial('callWorker should execute in two different threads', async (t) => { const ids: number[] = []; await engine.callWorker('test', [], { - 'test-message': ({ threadId }) => { + 'test-message': ({ threadId }: any) => { ids.push(threadId); }, }); await engine.callWorker('test', [], { - 'test-message': ({ threadId }) => { + 'test-message': ({ threadId }: any) => { ids.push(threadId); }, }); @@ -167,8 +167,6 @@ test.serial( test.serial( 'By default, worker thread cannot access parent env if env not set (with options arg)', async (t) => { - const defaultAPI = {} as EngineAPI; - const { callWorker, closeWorkers } = initWorkers( workerPath, { maxWorkers: 1 }, diff --git a/packages/engine-multi/test/api/execute.test.ts b/packages/engine-multi/test/api/execute.test.ts index deda81d22..e1a1bfe57 100644 --- a/packages/engine-multi/test/api/execute.test.ts +++ b/packages/engine-multi/test/api/execute.test.ts @@ -1,8 +1,9 @@ import path from 'node:path'; import test from 'ava'; +import { createMockLogger } from '@openfn/logger'; + import initWorkers from '../../src/api/call-worker'; import execute from '../../src/api/execute'; -import { createMockLogger } from '@openfn/logger'; import { JOB_COMPLETE, JOB_START, @@ -13,20 +14,27 @@ import { } from '../../src/events'; import ExecutionContext from '../../src/classes/ExecutionContext'; -import type { RTEOptions } from '../../src/api'; -import type { WorkflowState } from '../../src/types'; -import { ExecuteOptions } from '../../src/engine'; +import type { ExecutionContextOptions, WorkflowState } from '../../src/types'; +import type { ExecuteOptions, EngineOptions } from '../../src/engine'; const workerPath = path.resolve('dist/test/mock-run.js'); -const createContext = ({ state, options }) => { +const createContext = ({ + state, + options, +}: { + state: Partial; + options: Partial; +}) => { const logger = createMockLogger(); const { callWorker } = initWorkers(workerPath, {}, logger); const ctx = new ExecutionContext({ + // @ts-ignore state: state || { workflowId: 'x' }, logger, callWorker, + // @ts-ignore options, }); @@ -37,12 +45,15 @@ const createContext = ({ state, options }) => { const plan = { id: 'x', - jobs: [ - { - id: 'j', - expression: '() => 22', - }, - ], + workflow: { + steps: [ + { + id: 'j', + expression: '() => 22', + }, + ], + }, + options: {}, }; const options = { @@ -51,13 +62,13 @@ const options = { handleInstall: async () => {}, handleIsInstalled: async () => false, }, -} as RTEOptions; +} as Partial; test.serial('execute should run a job and return the result', async (t) => { const state = { id: 'x', plan, - } as WorkflowState; + } as Partial; const context = createContext({ state, options }); @@ -80,7 +91,7 @@ test.serial('should emit a workflow-start event', async (t) => { await execute(context); // No need to do a deep test of the event payload here - t.is(workflowStart.workflowId, 'x'); + t.is(workflowStart!.workflowId!, 'x'); }); test.serial('should emit a log event with the memory limit', async (t) => { @@ -89,7 +100,7 @@ test.serial('should emit a log event with the memory limit', async (t) => { plan, } as WorkflowState; - const logs = []; + const logs: any[] = []; const context = createContext({ state, @@ -122,8 +133,8 @@ test.serial('should emit a workflow-complete event', async (t) => { await execute(context); - t.is(workflowComplete.workflowId, 'x'); - t.is(workflowComplete.state, 22); + t.is(workflowComplete!.workflowId, 'x'); + t.is(workflowComplete!.state, 22); }); test.serial('should emit a job-start event', async (t) => { @@ -132,7 +143,7 @@ test.serial('should emit a job-start event', async (t) => { plan, } as WorkflowState; - let event; + let event: any; const context = createContext({ state, options }); @@ -152,7 +163,7 @@ test.serial('should emit a job-complete event', async (t) => { plan, } as WorkflowState; - let event; + let event: any; const context = createContext({ state, options }); @@ -166,19 +177,22 @@ test.serial('should emit a job-complete event', async (t) => { }); test.serial('should emit a log event', async (t) => { - let workflowLog; + let workflowLog: any; const plan = { id: 'y', - jobs: [ - { - expression: '() => { console.log("hi"); return 33 }', - }, - ], + workflow: { + steps: [ + { + expression: '() => { console.log("hi"); return 33 }', + }, + ], + }, + options: {}, }; const state = { id: 'y', plan, - } as WorkflowState; + } as Partial; const context = createContext({ state, options }); context.once(WORKFLOW_LOG, (evt) => (workflowLog = evt)); @@ -191,14 +205,16 @@ test.serial('should emit a log event', async (t) => { }); test.serial('log events are timestamped in hr time', async (t) => { - let workflowLog; + let workflowLog: any; const plan = { id: 'y', - jobs: [ - { - expression: '() => { console.log("hi"); return 33 }', - }, - ], + workflow: { + steps: [ + { + expression: '() => { console.log("hi"); return 33 }', + }, + ], + }, }; const state = { id: 'y', @@ -220,11 +236,13 @@ test.serial('should emit error on timeout', async (t) => { const state = { id: 'zz', plan: { - jobs: [ - { - expression: '() => { while(true) {} }', - }, - ], + workflow: { + steps: [ + { + expression: '() => { while(true) {} }', + }, + ], + }, }, } as WorkflowState; @@ -233,7 +251,7 @@ test.serial('should emit error on timeout', async (t) => { runTimeoutMs: 10, }; - let event; + let event: any; const context = createContext({ state, options: wfOptions }); @@ -280,7 +298,9 @@ test.serial('should emit CompileError if compilation fails', async (t) => { const state = { id: 'baa', plan: { - jobs: [{ id: 'j', expression: 'la la la' }], + workflow: { + steps: [{ id: 'j', expression: 'la la la' }], + }, }, } as WorkflowState; const context = createContext({ state, options: {} }); @@ -299,7 +319,7 @@ test.serial('should emit CompileError if compilation fails', async (t) => { }); test.serial('should stringify the whitelist array', async (t) => { - let passedOptions; + let passedOptions: any; const state = { id: 'x', @@ -312,8 +332,9 @@ test.serial('should stringify the whitelist array', async (t) => { }; const context = createContext({ state, options: opts }); + // @ts-ignore context.callWorker = (_command, args) => { - passedOptions = args[1]; + passedOptions = args[2]; }; await execute(context); diff --git a/packages/engine-multi/test/api/lifecycle.test.ts b/packages/engine-multi/test/api/lifecycle.test.ts index b6c0566a2..c5892d890 100644 --- a/packages/engine-multi/test/api/lifecycle.test.ts +++ b/packages/engine-multi/test/api/lifecycle.test.ts @@ -12,12 +12,15 @@ import { } from '../../src/api/lifecycle'; import { WorkflowState } from '../../src/types'; import ExecutionContext from '../../src/classes/ExecutionContext'; +import * as w from '../../src/worker/events'; const createContext = (workflowId: string, state?: any) => new ExecutionContext({ state: state || { id: workflowId }, logger: createMockLogger(), + // @ts-ignore callWorker: () => {}, + // @ts-ignore options: {}, }); @@ -26,10 +29,17 @@ test(`workflowStart: emits ${e.WORKFLOW_START}`, (t) => { const workflowId = 'a'; const context = createContext(workflowId); - const event = { workflowId, threadId: '123' }; + const event: w.WorkflowStartEvent = { + type: w.WORKFLOW_START, + workflowId, + threadId: '123', + }; context.on(e.WORKFLOW_START, (evt) => { - t.deepEqual(evt, event); + t.deepEqual(evt, { + workflowId, + threadId: '123', + }); done(); }); @@ -41,7 +51,11 @@ test('onWorkflowStart: updates state', (t) => { const workflowId = 'a'; const context = createContext(workflowId); - const event = { workflowId, threadId: '123' }; + const event: w.WorkflowStartEvent = { + type: w.WORKFLOW_START, + workflowId, + threadId: '123', + }; workflowStart(context, event); @@ -66,7 +80,12 @@ test(`workflowComplete: emits ${e.WORKFLOW_COMPLETE}`, (t) => { } as WorkflowState; const context = createContext(workflowId, state); - const event = { workflowId, state: result, threadId: '1' }; + const event: w.WorkflowCompleteEvent = { + type: w.WORKFLOW_START, + workflowId, + state: result, + threadId: '1', + }; context.on(e.WORKFLOW_COMPLETE, (evt) => { t.is(evt.workflowId, workflowId); @@ -88,7 +107,12 @@ test('workflowComplete: updates state', (t) => { startTime: Date.now() - 1000, } as WorkflowState; const context = createContext(workflowId, state); - const event = { workflowId, state: result, threadId: '1' }; + const event: w.WorkflowCompleteEvent = { + type: w.WORKFLOW_COMPLETE, + workflowId, + state: result, + threadId: '1', + }; workflowComplete(context, event); @@ -108,7 +132,8 @@ test(`job-start: emits ${e.JOB_START}`, (t) => { const context = createContext(workflowId, state); - const event = { + const event: w.JobStartEvent = { + type: w.JOB_START, workflowId, threadId: '1', jobId: 'j', @@ -136,7 +161,8 @@ test(`job-complete: emits ${e.JOB_COMPLETE}`, (t) => { const context = createContext(workflowId, state); - const event = { + const event: w.JobCompleteEvent = { + type: w.JOB_COMPLETE, workflowId, threadId: '1', jobId: 'j', @@ -167,14 +193,15 @@ test(`log: emits ${e.WORKFLOW_LOG}`, (t) => { const context = createContext(workflowId); - const event = { + const event: w.LogEvent = { + type: w.LOG, workflowId, threadId: 'a', log: { level: 'info', name: 'job', message: JSON.stringify(['oh hai']), - time: Date.now() - 100, + time: (Date.now() - 100).toString(), }, }; @@ -206,6 +233,7 @@ test(`error: emits ${e.WORKFLOW_ERROR}`, (t) => { const err = new Error('test'); + // @ts-ignore error(context, { error: err }); }); }); diff --git a/packages/engine-multi/test/api/preload-credentials.test.ts b/packages/engine-multi/test/api/preload-credentials.test.ts index e31c04191..1a822fd71 100644 --- a/packages/engine-multi/test/api/preload-credentials.test.ts +++ b/packages/engine-multi/test/api/preload-credentials.test.ts @@ -1,6 +1,7 @@ import test from 'ava'; +import { ExecutionPlan, Job } from '@openfn/lexicon'; + import preloadCredentials from '../../src/api/preload-credentials'; -import { CompiledExecutionPlan } from '@openfn/runtime'; // Not very good test coverage test('handle a plan with no credentials', async (t) => { @@ -13,18 +14,21 @@ test('handle a plan with no credentials', async (t) => { const plan = { id: 'a', - jobs: [ - { - expression: '.', - }, - { - expression: '.', - }, - { - expression: '.', - }, - ], - } as unknown as CompiledExecutionPlan; + workflow: { + steps: [ + { + expression: '.', + }, + { + expression: '.', + }, + { + expression: '.', + }, + ], + }, + options: {}, + } as ExecutionPlan; const planCopy = JSON.parse(JSON.stringify(plan)); const result = await preloadCredentials(plan, loader); @@ -43,26 +47,29 @@ test('handle a plan with credentials', async (t) => { const plan = { id: 'a', - jobs: [ - { - expression: '.', - configuration: 'a', - }, - { - expression: '.', - configuration: 'b', - }, - { - expression: '.', - configuration: 'c', - }, - ], - } as unknown as CompiledExecutionPlan; + workflow: { + steps: [ + { + expression: '.', + configuration: 'a', + }, + { + expression: '.', + configuration: 'b', + }, + { + expression: '.', + configuration: 'c', + }, + ], + }, + options: {}, + } as ExecutionPlan; - const result = await preloadCredentials(plan, loader); + await preloadCredentials(plan, loader); t.is(timesCalled, 3); - t.is(plan.jobs[0].configuration, 'loaded-a'); - t.is(plan.jobs[1].configuration, 'loaded-b'); - t.is(plan.jobs[2].configuration, 'loaded-c'); + t.is((plan.workflow.steps[0] as Job).configuration, 'loaded-a'); + t.is((plan.workflow.steps[1] as Job).configuration, 'loaded-b'); + t.is((plan.workflow.steps[2] as Job).configuration, 'loaded-c'); }); diff --git a/packages/engine-multi/test/engine.test.ts b/packages/engine-multi/test/engine.test.ts index ec8c2c062..00da5c28d 100644 --- a/packages/engine-multi/test/engine.test.ts +++ b/packages/engine-multi/test/engine.test.ts @@ -1,12 +1,10 @@ import test from 'ava'; import path from 'node:path'; import { createMockLogger } from '@openfn/logger'; +import type { ExecutionPlan } from '@openfn/lexicon'; -import createEngine, { ExecuteOptions } from '../src/engine'; +import createEngine, { ExecuteOptions, InternalEngine } from '../src/engine'; import * as e from '../src/events'; -import { ExecutionPlan } from '@openfn/runtime'; - -// TOOD this becomes low level tests on the internal engine api const logger = createMockLogger('', { level: 'debug' }); @@ -23,7 +21,19 @@ const options = { }, }; -let engine; +const createPlan = (expression: string = '.', id = 'a') => ({ + id, + workflow: { + steps: [ + { + expression, + }, + ], + }, + options: {}, +}); + +let engine: InternalEngine; test.afterEach(async () => { logger._reset(); @@ -82,22 +92,17 @@ test.serial( const p = path.resolve('dist/test/worker-functions.js'); engine = await createEngine(options, p); - const plan = { - id: 'a', - jobs: [ - { - expression: '22', - }, - ], - }; + const plan = createPlan('22'); - engine.execute(plan).on(e.WORKFLOW_COMPLETE, ({ state, threadId }) => { - t.is(state, 22); - t.truthy(threadId); // proves (sort of) that this has run in a worker + engine + .execute(plan, {}) + .on(e.WORKFLOW_COMPLETE, ({ state, threadId }) => { + t.is(state, 22); + t.truthy(threadId); // proves (sort of) that this has run in a worker - // Apparently engine.destroy won't resolve if we return immediately - setTimeout(done, 1); - }); + // Apparently engine.destroy won't resolve if we return immediately + setTimeout(done, 1); + }); }); } ); @@ -107,16 +112,9 @@ test.serial('execute does not return internal state stuff', async (t) => { const p = path.resolve('dist/test/worker-functions.js'); engine = await createEngine(options, p); - const plan = { - id: 'a', - jobs: [ - { - expression: '22', - }, - ], - }; + const plan = createPlan(); - const result = engine.execute(plan, {}); + const result: any = engine.execute(plan, {}); // Execute returns an event listener t.truthy(result.on); t.truthy(result.once); @@ -132,8 +130,7 @@ test.serial('execute does not return internal state stuff', async (t) => { t.falsy(result['callWorker']); t.falsy(result['options']); - done(); - // TODO is this still running? Does it matter? + result.then(done); }); }); @@ -142,17 +139,13 @@ test.serial('listen to workflow-complete', async (t) => { const p = path.resolve('dist/test/worker-functions.js'); engine = await createEngine(options, p); - const plan = { - id: 'a', - jobs: [ - { - expression: '33', - }, - ], - }; + const plan = createPlan('33'); engine.listen(plan.id, { - [e.WORKFLOW_COMPLETE]: ({ state, threadId }) => { + [e.WORKFLOW_COMPLETE]: ({ + state, + threadId, + }: e.WorkflowCompletePayload) => { t.is(state, 33); t.truthy(threadId); // proves (sort of) that this has run in a worker @@ -160,7 +153,7 @@ test.serial('listen to workflow-complete', async (t) => { setTimeout(done, 1); }, }); - engine.execute(plan); + engine.execute(plan, {}); }); }); @@ -171,22 +164,25 @@ test.serial('call listen before execute', async (t) => { const plan = { id: 'a', - jobs: [ - { - expression: '34', - }, - ], + workflow: { + steps: [ + { + expression: '34', + }, + ], + }, + options: {}, }; engine.listen(plan.id, { - [e.WORKFLOW_COMPLETE]: ({ state }) => { + [e.WORKFLOW_COMPLETE]: ({ state }: e.WorkflowCompletePayload) => { t.is(state, 34); // Apparently engine.destroy won't resolve if we return immediately setTimeout(done, 1); }, }); - engine.execute(plan); + engine.execute(plan, {}); }); }); @@ -197,21 +193,24 @@ test.serial('catch and emit errors', async (t) => { const plan = { id: 'a', - jobs: [ - { - expression: 'throw new Error("test")', - }, - ], + workflow: { + steps: [ + { + expression: 'throw new Error("test")', + }, + ], + }, + options: {}, }; engine.listen(plan.id, { - [e.WORKFLOW_ERROR]: ({ message }) => { + [e.WORKFLOW_ERROR]: ({ message }: e.WorkflowErrorPayload) => { t.is(message, 'test'); done(); }, }); - engine.execute(plan); + engine.execute(plan, {}); }); }); @@ -224,26 +223,31 @@ test.serial( const plan = { id: 'a', - jobs: [ - { - expression: 'while(true) {}', - }, - ], + workflow: { + steps: [ + { + expression: 'while(true) {}', + }, + ], + }, + options: {}, }; + // TODO Now then - this doesn't seem right + // the timeout should be on the xplan const opts: ExecuteOptions = { runTimeoutMs: 10, }; engine.listen(plan.id, { - [e.WORKFLOW_ERROR]: ({ message, type }) => { + [e.WORKFLOW_ERROR]: ({ message, type }: e.WorkflowErrorPayload) => { t.is(type, 'TimeoutError'); t.regex(message, /failed to return within 10ms/); done(); }, }); - engine.execute(plan, opts); + engine.execute(plan, {}, opts); }); } ); @@ -263,22 +267,25 @@ test.serial( const plan = { id: 'a', - jobs: [ - { - expression: 'while(true) {}', - }, - ], + workflow: { + steps: [ + { + expression: 'while(true) {}', + }, + ], + }, + options: {}, }; engine.listen(plan.id, { - [e.WORKFLOW_ERROR]: ({ message, type }) => { + [e.WORKFLOW_ERROR]: ({ message, type }: e.WorkflowErrorPayload) => { t.is(type, 'TimeoutError'); t.regex(message, /failed to return within 22ms/); done(); }, }); - engine.execute(plan); + engine.execute(plan, {}); }); } ); diff --git a/packages/engine-multi/test/errors.test.ts b/packages/engine-multi/test/errors.test.ts index e9202584b..714d64ce6 100644 --- a/packages/engine-multi/test/errors.test.ts +++ b/packages/engine-multi/test/errors.test.ts @@ -1,11 +1,12 @@ import test from 'ava'; import path from 'node:path'; +import { createMockLogger } from '@openfn/logger'; import createEngine, { EngineOptions } from '../src/engine'; -import { createMockLogger } from '@openfn/logger'; import { WORKFLOW_ERROR } from '../src/events'; +import type { RuntimeEngine } from '../src/types'; -let engine; +let engine: RuntimeEngine; test.before(async () => { const logger = createMockLogger('', { level: 'debug' }); @@ -30,16 +31,19 @@ test.serial('syntax error: missing bracket', (t) => { return new Promise((done) => { const plan = { id: 'a', - jobs: [ - { - id: 'x', - // This is subtle syntax error - expression: 'fn((s) => { return s )', - }, - ], + workflow: { + steps: [ + { + id: 'x', + // This is subtle syntax error + expression: 'fn((s) => { return s )', + }, + ], + }, + options: {}, }; - engine.execute(plan).on(WORKFLOW_ERROR, (evt) => { + engine.execute(plan, {}).on(WORKFLOW_ERROR, (evt) => { t.is(evt.type, 'CompileError'); // compilation happens in the main thread t.is(evt.threadId, '-'); @@ -53,16 +57,19 @@ test.serial('syntax error: illegal throw', (t) => { return new Promise((done) => { const plan = { id: 'b', - jobs: [ - { - id: 'z', - // This is also subtle syntax error - expression: 'fn(() => throw "e")', - }, - ], + workflow: { + steps: [ + { + id: 'z', + // This is also subtle syntax error + expression: 'fn(() => throw "e")', + }, + ], + }, + options: {}, }; - engine.execute(plan).on(WORKFLOW_ERROR, (evt) => { + engine.execute(plan, {}).on(WORKFLOW_ERROR, (evt) => { t.is(evt.type, 'CompileError'); // compilation happens in the main thread t.is(evt.threadId, '-'); @@ -75,21 +82,24 @@ test.serial('syntax error: illegal throw', (t) => { test.serial('thread oom error', (t) => { return new Promise((done) => { const plan = { - id: 'a', - jobs: [ - { - expression: `export default [(s) => { + id: 'c', + workflow: { + steps: [ + { + expression: `export default [(s) => { s.a = []; while(true) { s.a.push(new Array(1e6).fill("oom")); } return s; }]`, - }, - ], + }, + ], + }, + options: {}, }; - engine.execute(plan).on(WORKFLOW_ERROR, (evt) => { + engine.execute(plan, {}).on(WORKFLOW_ERROR, (evt) => { t.is(evt.type, 'OOMError'); t.is(evt.severity, 'kill'); t.is(evt.message, 'Run exceeded maximum memory usage'); @@ -102,21 +112,24 @@ test.serial('thread oom error', (t) => { test.serial.skip('vm oom error', (t) => { return new Promise((done) => { const plan = { - id: 'b', - jobs: [ - { - expression: `export default [(s) => { + id: 'd', + workflow: { + steps: [ + { + expression: `export default [(s) => { s.a = []; while(true) { s.a.push(new Array(1e8).fill("oom")); } return s; }]`, - }, - ], + }, + ], + }, + options: {}, }; - engine.execute(plan).on(WORKFLOW_ERROR, (evt) => { + engine.execute(plan, {}).on(WORKFLOW_ERROR, (evt) => { t.is(evt.type, 'OOMError'); t.is(evt.severity, 'kill'); t.is(evt.message, 'Run exceeded maximum memory usage'); @@ -131,21 +144,24 @@ test.serial.skip('vm oom error', (t) => { test.serial.skip('execution error from async code', (t) => { return new Promise((done) => { const plan = { - id: 'a', - jobs: [ - { - // this error will throw within the promise, and so before the job completes - // But REALLY naughty code could throw after the job has finished - // In which case it'll be ignored - // Also note that the wrapping promise will never resolve - expression: `export default [(s) => new Promise((r) => { + id: 'e', + workflow: { + steps: [ + { + // this error will throw within the promise, and so before the job completes + // But REALLY naughty code could throw after the job has finished + // In which case it'll be ignored + // Also note that the wrapping promise will never resolve + expression: `export default [(s) => new Promise((r) => { setTimeout(() => { throw new Error(\"e1324\"); r() }, 10) })]`, - }, - ], + }, + ], + }, + options: {}, }; - engine.execute(plan).on(WORKFLOW_ERROR, (evt) => { + engine.execute(plan, {}).on(WORKFLOW_ERROR, (evt) => { t.is(evt.type, 'ExecutionError'); t.is(evt.severity, 'crash'); @@ -157,16 +173,19 @@ test.serial.skip('execution error from async code', (t) => { test.serial('emit a crash error on process.exit()', (t) => { return new Promise((done) => { const plan = { - id: 'z', - jobs: [ - { - adaptor: '@openfn/helper@1.0.0', - expression: 'export default [exit()]', - }, - ], + id: 'f', + workflow: { + steps: [ + { + adaptor: '@openfn/helper@1.0.0', + expression: 'export default [exit()]', + }, + ], + }, + options: {}, }; - engine.execute(plan).on(WORKFLOW_ERROR, (evt) => { + engine.execute(plan, {}).on(WORKFLOW_ERROR, (evt) => { t.is(evt.type, 'ExitError'); t.is(evt.severity, 'crash'); t.is(evt.message, 'Process exited with code: 42'); diff --git a/packages/engine-multi/test/integration.test.ts b/packages/engine-multi/test/integration.test.ts index fed31f5b5..e0f209cca 100644 --- a/packages/engine-multi/test/integration.test.ts +++ b/packages/engine-multi/test/integration.test.ts @@ -1,10 +1,15 @@ import test from 'ava'; import path from 'node:path'; -import createAPI from '../src/api'; import { createMockLogger } from '@openfn/logger'; +import createAPI from '../src/api'; +import type { RuntimeEngine } from '../src'; +import { ExecutionPlan } from '@openfn/lexicon'; + const logger = createMockLogger(); -let api; +let api: RuntimeEngine; + +const emptyState = {}; test.afterEach(() => { logger._reset(); @@ -23,15 +28,19 @@ const withFn = `function fn(f) { return (s) => f(s) } let idgen = 0; -const createPlan = (jobs?: any[]) => ({ - id: `${++idgen}`, - jobs: jobs || [ - { - id: 'j1', - expression: 'export default [s => s]', +const createPlan = (jobs?: any[]) => + ({ + id: `${++idgen}`, + workflow: { + steps: jobs || [ + { + id: 'j1', + expression: 'export default [s => s]', + }, + ], }, - ], -}); + options: {}, + } as ExecutionPlan); test.serial('trigger workflow-start', (t) => { return new Promise(async (done) => { @@ -44,7 +53,7 @@ test.serial('trigger workflow-start', (t) => { const plan = createPlan(); - api.execute(plan).on('workflow-start', (evt) => { + api.execute(plan, emptyState).on('workflow-start', (evt) => { t.is(evt.workflowId, plan.id); t.truthy(evt.threadId); t.pass('workflow started'); @@ -64,7 +73,7 @@ test.serial('trigger job-start', (t) => { const plan = createPlan(); - api.execute(plan).on('job-start', (e) => { + api.execute(plan, emptyState).on('job-start', (e) => { t.is(e.workflowId, '2'); t.is(e.jobId, 'j1'); t.truthy(e.threadId); @@ -86,7 +95,7 @@ test.serial('trigger job-complete', (t) => { const plan = createPlan(); - api.execute(plan).on('job-complete', (evt) => { + api.execute(plan, emptyState).on('job-complete', (evt) => { t.deepEqual(evt.next, []); t.log('duration:', evt.duration); // Very lenient duration test - this often comes in around 200ms in CI @@ -115,7 +124,7 @@ test.serial('trigger workflow-complete', (t) => { const plan = createPlan(); - api.execute(plan).on('workflow-complete', (evt) => { + api.execute(plan, emptyState).on('workflow-complete', (evt) => { t.falsy(evt.state.errors); t.is(evt.workflowId, plan.id); @@ -142,7 +151,7 @@ test.serial('trigger workflow-log for job logs', (t) => { let didLog = false; - api.execute(plan).on('workflow-log', (evt) => { + api.execute(plan, emptyState).on('workflow-log', (evt) => { if (evt.name === 'JOB') { didLog = true; t.deepEqual(evt.message, JSON.stringify(['hola'])); @@ -150,7 +159,7 @@ test.serial('trigger workflow-log for job logs', (t) => { } }); - api.execute(plan).on('workflow-complete', (evt) => { + api.execute(plan, emptyState).on('workflow-complete', (evt) => { t.true(didLog); t.falsy(evt.state.errors); done(); @@ -170,7 +179,7 @@ test.serial('log errors', (t) => { }, ]); - api.execute(plan).on('workflow-log', (evt) => { + api.execute(plan, emptyState).on('workflow-log', (evt) => { if (evt.name === 'JOB') { t.log(evt); t.deepEqual( @@ -186,7 +195,7 @@ test.serial('log errors', (t) => { } }); - api.execute(plan).on('workflow-complete', (evt) => { + api.execute(plan, emptyState).on('workflow-complete', () => { done(); }); }); @@ -208,7 +217,7 @@ test.serial('trigger workflow-log for adaptor logs', (t) => { }, ]); - api.execute(plan).on('workflow-log', (evt) => { + api.execute(plan, emptyState).on('workflow-log', (evt) => { if (evt.name === 'ADA') { t.deepEqual(evt.message, JSON.stringify(['hola'])); t.pass('workflow logged'); @@ -230,7 +239,7 @@ test.serial('compile and run', (t) => { }, ]); - api.execute(plan).on('workflow-complete', ({ state }) => { + api.execute(plan, emptyState).on('workflow-complete', ({ state }) => { t.deepEqual(state.data, 42); done(); }); @@ -249,7 +258,7 @@ test.serial('run without error if no state is returned', (t) => { }, ]); - api.execute(plan).on('workflow-complete', ({ state }) => { + api.execute(plan, emptyState).on('workflow-complete', ({ state }) => { t.falsy(state); // Ensure there are no error logs @@ -272,7 +281,7 @@ test.serial('errors get nicely serialized', (t) => { }, ]); - api.execute(plan).on('job-error', (evt) => { + api.execute(plan, emptyState).on('job-error', (evt) => { t.is(evt.error.type, 'TypeError'); t.is(evt.error.severity, 'fail'); t.is( @@ -284,7 +293,9 @@ test.serial('errors get nicely serialized', (t) => { }); }); -test.serial( +// TODO I need to get options working before I can fix this one +// statePropsToRemove needs to be fed through to the actual runtime on the option key +test.serial.skip( 'execute should remove the configuration and response keys', (t) => { return new Promise(async (done) => { @@ -299,7 +310,7 @@ test.serial( }, ]); - api.execute(plan).on('workflow-complete', ({ state }) => { + api.execute(plan, emptyState).on('workflow-complete', ({ state }) => { t.deepEqual(state, { a: 1 }); done(); }); @@ -307,7 +318,8 @@ test.serial( } ); -test.serial('use custom state-props-to-remove', (t) => { +// TODO ditto +test.serial.skip('use custom state-props-to-remove', (t) => { return new Promise(async (done) => { api = await createAPI({ logger, @@ -321,7 +333,7 @@ test.serial('use custom state-props-to-remove', (t) => { }, ]); - api.execute(plan).on('workflow-complete', ({ state }) => { + api.execute(plan, emptyState).on('workflow-complete', ({ state }) => { t.deepEqual(state, { configuration: {}, response: {} }); done(); }); @@ -354,7 +366,7 @@ test.serial('evaluate conditional edges', (t) => { const plan = createPlan(jobs); - api.execute(plan).on('workflow-complete', ({ state }) => { + api.execute(plan, emptyState).on('workflow-complete', ({ state }) => { t.deepEqual(state.data, 'b'); done(); }); @@ -411,17 +423,15 @@ test.serial('accept initial state', (t) => { const plan = createPlan(); - // important! The runtime must use both x and y as initial state - // if we run the runtime in strict mode, x will be ignored - plan.initialState = { + const input = { x: 1, data: { y: 1, }, }; - api.execute(plan).on('workflow-complete', ({ state }) => { - t.deepEqual(state, plan.initialState); + api.execute(plan, input).on('workflow-complete', ({ state }) => { + t.deepEqual(state, input); done(); }); }); diff --git a/packages/engine-multi/test/security.test.ts b/packages/engine-multi/test/security.test.ts index 45b42634d..8c760d1f0 100644 --- a/packages/engine-multi/test/security.test.ts +++ b/packages/engine-multi/test/security.test.ts @@ -8,7 +8,7 @@ import test from 'ava'; import path from 'node:path'; import { createMockLogger } from '@openfn/logger'; -import createEngine from '../src/engine'; +import createEngine, { InternalEngine } from '../src/engine'; const logger = createMockLogger('', { level: 'debug' }); @@ -20,7 +20,7 @@ const options = { maxWorkers: 1, }; -let engine; +let engine: InternalEngine; test.before(async () => { engine = await createEngine( @@ -43,11 +43,13 @@ test.serial('parent env is hidden from sandbox', async (t) => { }); test.serial('sandbox does not share a global scope', async (t) => { + // @ts-ignore t.is(global.x, undefined); // Set a global inside the first task await engine.callWorker('setGlobalX', [9]); + // @ts-ignore // (this should not affect us outside) t.is(global.x, undefined); diff --git a/packages/engine-multi/test/worker/mock-worker.test.ts b/packages/engine-multi/test/worker/mock-worker.test.ts index 679f663a1..2947ae0c0 100644 --- a/packages/engine-multi/test/worker/mock-worker.test.ts +++ b/packages/engine-multi/test/worker/mock-worker.test.ts @@ -26,7 +26,7 @@ const workers = createPool( test('execute a mock plan inside a worker thread', async (t) => { const plan = createPlan(); - const result = await workers.exec('run', [plan]); + const result = await workers.exec('run', [plan, {}]); t.deepEqual(result, { data: { answer: 42 } }); }); @@ -35,7 +35,7 @@ test('execute a mock plan with data', async (t) => { id: 'j2', data: { input: 44 }, }); - const result = await workers.exec('run', [plan]); + const result = await workers.exec('run', [plan, {}]); t.deepEqual(result, { data: { answer: 44 } }); }); @@ -44,7 +44,7 @@ test('execute a mock plan with an expression', async (t) => { id: 'j2', expression: '() => ({ data: { answer: 46 } })', }); - const result = await workers.exec('run', [plan]); + const result = await workers.exec('run', [plan, {}]); t.deepEqual(result, { data: { answer: 46 } }); }); @@ -54,7 +54,7 @@ test('execute a mock plan with an expression which uses state', async (t) => { data: { input: 2 }, expression: '(s) => ({ data: { answer: s.data.input * 2 } })', }); - const result = await workers.exec('run', [plan]); + const result = await workers.exec('run', [plan, {}]); t.deepEqual(result, { data: { answer: 4 } }); }); @@ -68,7 +68,7 @@ test('execute a mock plan with a promise expression', async (t) => { }, 1); })`, }); - const result = await workers.exec('run', [plan]); + const result = await workers.exec('run', [plan, {}]); t.deepEqual(result, { data: { answer: 46 } }); }); @@ -78,16 +78,16 @@ test('expression state overrides data', async (t) => { data: { answer: 44 }, expression: '() => ({ data: { agent: "007" } })', }); - const result = await workers.exec('run', [plan]); + const result = await workers.exec('run', [plan, {}]); t.deepEqual(result, { data: { agent: '007' } }); }); test('write an exception to state', async (t) => { const plan = createPlan({ id: 'j2', - expression: 'ƸӜƷ', // it's a butterfly, obviously (and mmore importantly, invalid JSON) + expression: 'ƸӜƷ', // it's a butterfly, obviously (and more importantly, invalid JSON) }); - const result = await workers.exec('run', [plan]); + const result = await workers.exec('run', [plan, {}]); t.truthy(result.data); t.truthy(result.error); }); @@ -98,7 +98,7 @@ test('execute a mock plan with delay', async (t) => { id: 'j1', _delay: 50, }); - await workers.exec('run', [plan]); + await workers.exec('run', [plan, {}]); const elapsed = new Date().getTime() - start; t.log(elapsed); t.assert(elapsed > 40); @@ -108,7 +108,7 @@ test('Publish workflow-start event', async (t) => { const plan = createPlan(); plan.id = 'xx'; let didFire = false; - await workers.exec('run', [plan], { + await workers.exec('run', [plan, {}], { on: ({ type }) => { if (type === e.WORKFLOW_START) { didFire = true; @@ -122,7 +122,7 @@ test('Publish workflow-complete event with state', async (t) => { const plan = createPlan(); let didFire = false; let state; - await workers.exec('run', [plan], { + await workers.exec('run', [plan, {}], { on: ({ type, ...args }) => { if (type === e.WORKFLOW_COMPLETE) { didFire = true; @@ -142,9 +142,9 @@ test('Publish a job log event', async (t) => { }`, }); let didFire = false; - let log; + let log: any; let id; - await workers.exec('run', [plan], { + await workers.exec('run', [plan, {}], { on: ({ workflowId, type, log: _log }) => { if (type === e.LOG) { didFire = true; @@ -154,7 +154,7 @@ test('Publish a job log event', async (t) => { }, }); t.true(didFire); - t.is(id, plan.id); + t.is(id, plan.id as any); t.is(log.level, 'info'); t.is(log.name, 'JOB'); diff --git a/packages/engine-multi/test/worker/pool.test.ts b/packages/engine-multi/test/worker/pool.test.ts index ab679efe9..aa74d6a6f 100644 --- a/packages/engine-multi/test/worker/pool.test.ts +++ b/packages/engine-multi/test/worker/pool.test.ts @@ -56,7 +56,7 @@ test.serial( async (t) => { const pool = createPool(workerPath, { maxWorkers: 1 }, logger); - const ids = {}; + const ids: Record = {}; const saveProcessId = (id: string) => { if (!ids[id]) { @@ -98,8 +98,8 @@ test('Remove a worker from the pool and release it when finished', async (t) => return p.then(() => { t.is(pool._pool.length, 5); - // the first thing in the queue should be a worker - t.true(pool[0] !== false); + // the last thing in the queue should be a worker + t.true(pool._pool[4] !== false); }); }); @@ -168,7 +168,7 @@ test('throw if the task throws', async (t) => { try { await pool.exec('throw', []); - } catch (e) { + } catch (e: any) { // NB e is not an error isntance t.is(e.message, 'test_error'); } @@ -179,7 +179,7 @@ test('throw if memory limit is exceeded', async (t) => { try { await pool.exec('blowMemory', [], { memoryLimitMb: 100 }); - } catch (e) { + } catch (e: any) { t.is(e.message, 'Run exceeded maximum memory usage'); t.is(e.name, 'OOMError'); } @@ -398,13 +398,13 @@ test('events should disconnect between executions', (t) => { return new Promise(async (done) => { const pool = createPool(workerPath, { capacity: 1 }, logger); - const counts = { + const counts: Record = { a: 0, b: 0, c: 0, }; - const on = (event) => { + const on = (event: { type: string; result: number }) => { if (event.type === 'test-message') { counts[event.result] += 1; } diff --git a/packages/engine-multi/tsconfig.json b/packages/engine-multi/tsconfig.json index b3d766fc1..fda756656 100644 --- a/packages/engine-multi/tsconfig.json +++ b/packages/engine-multi/tsconfig.json @@ -1,4 +1,4 @@ { "extends": "../../tsconfig.common", - "include": ["src/**/*.ts"] + "include": ["src/**/*.ts", "test/**/*.ts"] } From e3a249ecdae4880b93f806ab0318961c1eeea65b Mon Sep 17 00:00:00 2001 From: Joe Clark Date: Tue, 6 Feb 2024 16:55:29 +0000 Subject: [PATCH 062/128] runtime: male statePropsToRemove a system options, rather than workflow specific If a workflow wants to remove props, it'll add an fn bock --- packages/lexicon/core.d.ts | 2 - packages/runtime/src/execute/expression.ts | 40 ++++++++++--------- packages/runtime/src/runtime.ts | 9 ++--- .../runtime/test/execute/expression.test.ts | 16 ++++---- packages/runtime/test/runtime.test.ts | 6 +-- 5 files changed, 34 insertions(+), 39 deletions(-) diff --git a/packages/lexicon/core.d.ts b/packages/lexicon/core.d.ts index f659928f4..a67dcef48 100644 --- a/packages/lexicon/core.d.ts +++ b/packages/lexicon/core.d.ts @@ -75,9 +75,7 @@ export type WorkflowOptions = { // TODO Both numbers in minutes maybe timeout?: number; stepTimeout?: number; - start?: StepId; - statePropsToRemove?: string[]; }; export type StepId = string; diff --git a/packages/runtime/src/execute/expression.ts b/packages/runtime/src/execute/expression.ts index 65c941ca3..f2f4bc20a 100644 --- a/packages/runtime/src/execute/expression.ts +++ b/packages/runtime/src/execute/expression.ts @@ -1,6 +1,6 @@ import { printDuration, Logger } from '@openfn/logger'; import stringify from 'fast-safe-stringify'; -import type { Operation, State, WorkflowOptions } from '@openfn/lexicon'; +import type { Operation, State } from '@openfn/lexicon'; import loadModule from '../modules/module-loader'; import { Options, DEFAULT_TIMEOUT_MS } from '../runtime'; @@ -34,7 +34,7 @@ export default ( let duration = Date.now(); const { logger, plan, opts = {} } = ctx; try { - const timeout = plan.options.timeout ?? DEFAULT_TIMEOUT_MS; + const timeout = plan.options?.timeout ?? DEFAULT_TIMEOUT_MS; // Setup an execution context const context = buildContext(input, opts); @@ -71,12 +71,20 @@ export default ( duration = Date.now() - duration; - const finalState = prepareFinalState(plan.options, result, logger); + const finalState = prepareFinalState( + result, + logger, + opts.statePropsToRemove + ); // return the final state resolve(finalState); } catch (e: any) { // whatever initial state looks like now, clean it and report it back - const finalState = prepareFinalState(plan.options, input, logger); + const finalState = prepareFinalState( + input, + logger, + opts.statePropsToRemove + ); duration = Date.now() - duration; let finalError; try { @@ -141,28 +149,24 @@ const prepareJob = async ( // TODO this is suboptimal and may be slow on large objects // (especially as the result get stringified again downstream) const prepareFinalState = ( - options: WorkflowOptions, state: any, - logger: Logger + logger: Logger, + statePropsToRemove?: string[] ) => { if (state) { - let statePropsToRemove; - if (options.hasOwnProperty('statePropsToRemove')) { - ({ statePropsToRemove } = options); - } else { + if (!statePropsToRemove) { // As a strict default, remove the configuration key // tbh this should happen higher up in the stack but it causes havoc in unit testing statePropsToRemove = ['configuration']; } - if (statePropsToRemove && statePropsToRemove.forEach) { - statePropsToRemove.forEach((prop) => { - if (state.hasOwnProperty(prop)) { - delete state[prop]; - logger.debug(`Removed ${prop} from final state`); - } - }); - } + statePropsToRemove.forEach((prop) => { + if (state.hasOwnProperty(prop)) { + delete state[prop]; + logger.debug(`Removed ${prop} from final state`); + } + }); + const cleanState = stringify(state); return JSON.parse(cleanState); } diff --git a/packages/runtime/src/runtime.ts b/packages/runtime/src/runtime.ts index c13d62140..1389bf0c1 100644 --- a/packages/runtime/src/runtime.ts +++ b/packages/runtime/src/runtime.ts @@ -26,6 +26,9 @@ export type Options = { // inject globals into the environment // TODO leaving this here for now, but maybe its actually on the xplan? globals?: any; + + // TODO state props to remove is a system-level option, not a workflow level one + statePropsToRemove?: string[]; }; type RawOptions = Omit & { @@ -74,11 +77,6 @@ const run = ( input = clone(defaultState); } - const { options } = xplan; - - if (!options.hasOwnProperty('statePropsToRemove')) { - options.statePropsToRemove = ['configuration']; - } if (opts.linker?.whitelist) { opts.linker.whitelist = opts.linker.whitelist.map((w) => { if (typeof w === 'string') { @@ -87,7 +85,6 @@ const run = ( return w; }); } - return executePlan(xplan as ExecutionPlan, input, opts as Options, logger); }; diff --git a/packages/runtime/test/execute/expression.test.ts b/packages/runtime/test/execute/expression.test.ts index d159f5266..5b14567e4 100644 --- a/packages/runtime/test/execute/expression.test.ts +++ b/packages/runtime/test/execute/expression.test.ts @@ -23,10 +23,10 @@ const createContext = (args = {}, options = {}) => // @ts-ignore ({ logger, - plan: { - options, + plan: {}, + opts: { + ...options, }, - opts: {}, notify: () => {}, report: () => {}, ...args, @@ -329,12 +329,10 @@ test('Throws after custom timeout', async (t) => { const job = `export default [() => new Promise((resolve) => setTimeout(resolve, 100))];`; - const context = createContext( - { - opts: { jobLogger: logger }, - }, - { timeout: 10 } - ); + const context = createContext({ + plan: { options: { timeout: 10 } }, + opts: { jobLogger: logger }, + }); const state = createState(); await t.throwsAsync(async () => execute(context, job, state), { message: 'Job took longer than 10ms to complete', diff --git a/packages/runtime/test/runtime.test.ts b/packages/runtime/test/runtime.test.ts index 4eebb9d6e..e7f8af39e 100644 --- a/packages/runtime/test/runtime.test.ts +++ b/packages/runtime/test/runtime.test.ts @@ -115,7 +115,7 @@ test('notify job error even after crash', async (t) => { }); test('resolve a credential', async (t) => { - const plan: ExecutionPlan = { + const plan: Partial = { workflow: { steps: [ { @@ -124,12 +124,10 @@ test('resolve a credential', async (t) => { }, ], }, - options: { - statePropsToRemove: [], - }, }; const options = { + statePropsToRemove: [], callbacks: { resolveCredential: async () => ({ password: 'password1' }), }, From 81ce00e467fa22a7f308163ce2369b9eaa912a7e Mon Sep 17 00:00:00 2001 From: Joe Clark Date: Tue, 6 Feb 2024 17:07:22 +0000 Subject: [PATCH 063/128] engine: restore statePropsToRemove tests --- packages/engine-multi/src/test/worker-functions.ts | 1 + packages/engine-multi/src/worker/thread/run.ts | 1 - packages/engine-multi/test/engine.test.ts | 2 +- packages/engine-multi/test/integration.test.ts | 7 ++----- 4 files changed, 4 insertions(+), 7 deletions(-) diff --git a/packages/engine-multi/src/test/worker-functions.ts b/packages/engine-multi/src/test/worker-functions.ts index f562edbcb..a10b92a90 100644 --- a/packages/engine-multi/src/test/worker-functions.ts +++ b/packages/engine-multi/src/test/worker-functions.ts @@ -27,6 +27,7 @@ const tasks = { // very very simple intepretation of a run function // Most tests should use the mock-worker instead run: async (plan: ExecutionPlan, _input: any, _adaptorPaths: any) => { + console.log(' >> RUN'); const workflowId = plan.id; publish('worker:workflow-start', { workflowId, diff --git a/packages/engine-multi/src/worker/thread/run.ts b/packages/engine-multi/src/worker/thread/run.ts index dacdfe66e..9dd3585d4 100644 --- a/packages/engine-multi/src/worker/thread/run.ts +++ b/packages/engine-multi/src/worker/thread/run.ts @@ -15,7 +15,6 @@ type RunOptions = { whitelist?: RegExp[]; sanitize: SanitizePolicies; statePropsToRemove?: string[]; - // TODO timeout }; const eventMap = { diff --git a/packages/engine-multi/test/engine.test.ts b/packages/engine-multi/test/engine.test.ts index 00da5c28d..f7b7757a4 100644 --- a/packages/engine-multi/test/engine.test.ts +++ b/packages/engine-multi/test/engine.test.ts @@ -130,7 +130,7 @@ test.serial('execute does not return internal state stuff', async (t) => { t.falsy(result['callWorker']); t.falsy(result['options']); - result.then(done); + done(); }); }); diff --git a/packages/engine-multi/test/integration.test.ts b/packages/engine-multi/test/integration.test.ts index e0f209cca..5a1c80051 100644 --- a/packages/engine-multi/test/integration.test.ts +++ b/packages/engine-multi/test/integration.test.ts @@ -293,9 +293,7 @@ test.serial('errors get nicely serialized', (t) => { }); }); -// TODO I need to get options working before I can fix this one -// statePropsToRemove needs to be fed through to the actual runtime on the option key -test.serial.skip( +test.serial( 'execute should remove the configuration and response keys', (t) => { return new Promise(async (done) => { @@ -318,8 +316,7 @@ test.serial.skip( } ); -// TODO ditto -test.serial.skip('use custom state-props-to-remove', (t) => { +test.serial('use custom state-props-to-remove', (t) => { return new Promise(async (done) => { api = await createAPI({ logger, From 15898e9db6a04d26e901942a91be14e4a86bf395 Mon Sep 17 00:00:00 2001 From: Joe Clark Date: Wed, 7 Feb 2024 11:03:53 +0000 Subject: [PATCH 064/128] mock: update to lexicon --- packages/lexicon/lightning.d.ts | 132 ++++++++++++++++++ packages/lexicon/package.json | 9 +- packages/lightning-mock/package.json | 1 + packages/lightning-mock/src/api-dev.ts | 13 +- packages/lightning-mock/src/api-sockets.ts | 46 +++--- packages/lightning-mock/src/server.ts | 6 +- packages/lightning-mock/src/types.ts | 107 +------------- .../test/channels/claim.test.ts | 10 +- .../lightning-mock/test/channels/run.test.ts | 30 ++-- .../lightning-mock/test/events/log.test.ts | 8 +- .../test/events/run-complete.test.ts | 4 +- .../test/events/run-start.test.ts | 4 +- .../test/events/step-complete.test.ts | 10 +- .../test/events/step-start.test.ts | 4 +- packages/lightning-mock/test/server.test.ts | 12 +- .../lightning-mock/test/socket-server.test.ts | 13 +- packages/lightning-mock/test/util.ts | 2 +- packages/lightning-mock/tsconfig.json | 2 +- pnpm-lock.yaml | 3 + 19 files changed, 226 insertions(+), 190 deletions(-) diff --git a/packages/lexicon/lightning.d.ts b/packages/lexicon/lightning.d.ts index 499e0192b..ae5247ffd 100644 --- a/packages/lexicon/lightning.d.ts +++ b/packages/lexicon/lightning.d.ts @@ -1,3 +1,135 @@ /** * Type definitions for Lightning and Worker interfaces + * + * This is the lightning-worker contract + * + * It is helpful to have these in the lexicon to avoid a circular dependency between lightning and the worker + * It's also kinda nice that the contract isn't in the worker itself, it's on neutral ground */ +// An run object returned by Lightning +export type Run = { + id: string; + dataclip_id: string; + starting_node_id: string; + + triggers: Node[]; + jobs: Node[]; + edges: Edge[]; + + options?: Record; // TODO type the expected options +}; + +// TODO rename to step +// maybe also split into jobs and triggers +export type Node = { + id: string; + body?: string; + adaptor?: string; + credential?: any; // TODO tighten this up, string or object + type?: 'webhook' | 'cron'; // trigger only + state?: any; // Initial state / defaults +}; + +export interface Edge { + id: string; + source_job_id?: string; + source_trigger_id?: string; + target_job_id: string; + name?: string; + condition?: string; + error_path?: boolean; + errors?: any; +} + +export type DataClip = object; + +export type Credential = object; + +export type ExitReasonStrings = + | 'success' + | 'fail' + | 'crash' + | 'kill' + | 'cancel' + | 'exception'; + +export type CONNECT = 'socket:connect'; + +// client left or joined a channel +export type CHANNEL_JOIN = 'socket:channel-join'; +export type CHANNEL_LEAVE = 'socket:channel-leave'; + +// Queue Channel + +// This is the event name +export type CLAIM = 'claim'; + +// This is the payload in the message sent to lightning +export type ClaimPayload = { demand?: number }; + +// This is the response from lightning +export type ClaimReply = { runs: Array }; +export type ClaimRun = { id: string; token: string }; + +// Run channel + +export type GET_PLAN = 'fetch:plan'; +export type GET_CREDENTIAL = 'fetch:credential'; +export type GET_DATACLIP = 'fetch:dataclip'; +export type RUN_START = 'run:start'; +export type RUN_COMPLETE = 'run:complete'; +export type RUN_LOG = 'run:log'; +export type STEP_START = 'step:start'; +export type STEP_COMPLETE = 'step:complete'; + +export type ExitReason = { + reason: ExitReasonStrings; + error_message: string | null; + error_type: string | null; +}; + +export type GetPlanPayload = void; // no payload +export type GetPlanReply = Run; + +export type GetCredentialPayload = { id: string }; +// credential in-line, no wrapper, arbitrary data +export type GetCredentialReply = {}; + +export type GetDataclipPayload = { id: string }; +export type GetDataClipReply = Uint8Array; // represents a json string Run + +export type RunStartPayload = void; // no payload +export type RunStartReply = {}; // no payload + +export type RunCompletePayload = ExitReason & { + final_dataclip_id?: string; // TODO this will be removed soon +}; +export type RunCompleteReply = undefined; + +export type RunLogPayload = { + message: Array; + timestamp: string; + run_id: string; + level?: string; + source?: string; // namespace + job_id?: string; + step_id?: string; +}; +export type RunLogReply = void; + +export type StepStartPayload = { + job_id: string; + step_id: string; + run_id?: string; + input_dataclip_id?: string; +}; +export type StepStartReply = void; + +export type StepCompletePayload = ExitReason & { + run_id?: string; + job_id: string; + step_id: string; + output_dataclip?: string; + output_dataclip_id?: string; +}; +export type StepCompleteReply = void; diff --git a/packages/lexicon/package.json b/packages/lexicon/package.json index 55e20d513..20ea6cbc8 100644 --- a/packages/lexicon/package.json +++ b/packages/lexicon/package.json @@ -3,5 +3,12 @@ "version": "1.0.0", "description": "Central repo of names and type definitions", "author": "Open Function Group ", - "license": "ISC" + "license": "ISC", + "exports": { + "lightning": { + "import": { + "types": "./lightning.d/.ts" + } + } + } } diff --git a/packages/lightning-mock/package.json b/packages/lightning-mock/package.json index bae475e51..150e22e5e 100644 --- a/packages/lightning-mock/package.json +++ b/packages/lightning-mock/package.json @@ -18,6 +18,7 @@ "dependencies": { "@koa/router": "^12.0.0", "@openfn/engine-multi": "workspace:*", + "@openfn/lexicon": "workspace:^", "@openfn/logger": "workspace:*", "@openfn/runtime": "workspace:*", "@types/koa-logger": "^3.1.2", diff --git a/packages/lightning-mock/src/api-dev.ts b/packages/lightning-mock/src/api-dev.ts index 2ac6bd23b..ea494ee23 100644 --- a/packages/lightning-mock/src/api-dev.ts +++ b/packages/lightning-mock/src/api-dev.ts @@ -2,19 +2,14 @@ * This module sets up a bunch of dev-only APIs * These are not intended to be reflected in Lightning itself */ +import crypto from 'node:crypto'; import Router from '@koa/router'; import { Logger } from '@openfn/logger'; -import crypto from 'node:crypto'; -import { RUN_COMPLETE } from './events'; +import type { Run, RunCompletePayload } from '@openfn/lexicon/lightning'; import { ServerState } from './server'; - -import type { - RunCompletePayload, - Run, - DevServer, - LightningEvents, -} from './types'; +import { RUN_COMPLETE } from './events'; +import type { DevServer, LightningEvents } from './types'; type Api = { startRun(runId: string): void; diff --git a/packages/lightning-mock/src/api-sockets.ts b/packages/lightning-mock/src/api-sockets.ts index 2aa013a55..816a19813 100644 --- a/packages/lightning-mock/src/api-sockets.ts +++ b/packages/lightning-mock/src/api-sockets.ts @@ -1,26 +1,6 @@ import { WebSocketServer } from 'ws'; import createLogger, { LogLevel, Logger } from '@openfn/logger'; import type { Server } from 'http'; - -import createPheonixMockSocketServer, { - DevSocket, - PhoenixEvent, - PhoenixEventStatus, -} from './socket-server'; -import { - RUN_COMPLETE, - RUN_LOG, - RUN_START, - CLAIM, - GET_PLAN, - GET_CREDENTIAL, - GET_DATACLIP, - STEP_COMPLETE, - STEP_START, -} from './events'; -import { extractRunId, stringify } from './util'; - -import type { ServerState } from './server'; import type { RunStartPayload, RunStartReply, @@ -41,7 +21,26 @@ import type { StepCompleteReply, StepStartPayload, StepStartReply, -} from './types'; +} from '@openfn/lexicon/lightning'; + +import createPheonixMockSocketServer, { + DevSocket, + PhoenixEvent, + PhoenixEventStatus, +} from './socket-server'; +import { + RUN_COMPLETE, + RUN_LOG, + RUN_START, + CLAIM, + GET_PLAN, + GET_CREDENTIAL, + GET_DATACLIP, + STEP_COMPLETE, + STEP_START, +} from './events'; +import { extractRunId, stringify } from './util'; +import type { ServerState } from './server'; // dumb cloning id // just an idea for unit tests @@ -232,10 +231,7 @@ const createSocketAPI = ( let payload = { status: 'ok' as PhoenixEventStatus, }; - if ( - !state.pending[runId] || - state.pending[runId].status !== 'started' - ) { + if (!state.pending[runId] || state.pending[runId].status !== 'started') { payload = { status: 'error', }; diff --git a/packages/lightning-mock/src/server.ts b/packages/lightning-mock/src/server.ts index 8191c23f9..928019a3b 100644 --- a/packages/lightning-mock/src/server.ts +++ b/packages/lightning-mock/src/server.ts @@ -10,10 +10,10 @@ import createLogger, { import createWebSocketAPI from './api-sockets'; import createDevAPI from './api-dev'; +import type { StepId } from '@openfn/lexicon'; +import type { RunLogPayload, Run } from '@openfn/lexicon/lightning'; +import type { DevServer } from './types'; -import type { RunLogPayload, Run, DevServer } from './types'; - -type StepId = string; type JobId = string; export type RunState = { diff --git a/packages/lightning-mock/src/types.ts b/packages/lightning-mock/src/types.ts index ce9a492b3..571ba3191 100644 --- a/packages/lightning-mock/src/types.ts +++ b/packages/lightning-mock/src/types.ts @@ -1,43 +1,9 @@ import Koa from 'koa'; +import type { Run, DataClip, Credential } from '@openfn/lexicon/lightning'; import type { ServerState } from './server'; -export type Node = { - id: string; - body?: string; - adaptor?: string; - credential?: any; // TODO tighten this up, string or object - type?: 'webhook' | 'cron'; // trigger only - state?: any; // Initial state / defaults -}; - -export interface Edge { - id: string; - source_job_id?: string; - source_trigger_id?: string; - target_job_id: string; - name?: string; - condition?: string; - error_path?: boolean; - errors?: any; -} - -// An run object returned by Lightning -export type Run = { - id: string; - dataclip_id: string; - starting_node_id: string; - - triggers: Node[]; - jobs: Node[]; - edges: Edge[]; - - options?: Record; // TODO type the expected options -}; - export type LightningEvents = 'log' | 'run-complete'; -export type DataClip = any; - export type DevServer = Koa & { state: ServerState; addCredential(id: string, cred: Credential): void; @@ -63,74 +29,3 @@ export type DevServer = Koa & { startRun(id: string): any; waitForResult(runId: string): Promise; }; - -/** - * These are duplicated from the worker and subject to drift! - * We cannot import them directly because it creates a circular build dependency mock <-> worker - * We cannot declare an internal private types module because the generated dts will try to import from it - * - * The list of types is small enough right now that this is just about manageable - **/ -export type ExitReasonStrings = - | 'success' - | 'fail' - | 'crash' - | 'kill' - | 'cancel' - | 'exception'; - -export type ExitReason = { - reason: ExitReasonStrings; - error_message: string | null; - error_type: string | null; -}; - -export type ClaimPayload = { demand?: number }; -export type ClaimReply = { runs: Array }; -export type ClaimRun = { id: string; token: string }; - -export type GetPlanPayload = void; // no payload -export type GetPlanReply = Run; - -export type GetCredentialPayload = { id: string }; -// credential in-line, no wrapper, arbitrary data -export type GetCredentialReply = {}; - -export type GetDataclipPayload = { id: string }; -export type GetDataClipReply = Uint8Array; // represents a json string Run - -export type RunStartPayload = void; // no payload -export type RunStartReply = {}; // no payload - -export type RunCompletePayload = ExitReason & { - final_dataclip_id?: string; // TODO this will be removed soon -}; -export type RunCompleteReply = undefined; - -export type RunLogPayload = { - message: Array; - timestamp: string; - run_id: string; - level?: string; - source?: string; // namespace - job_id?: string; - step_id?: string; -}; -export type RunLogReply = void; - -export type StepStartPayload = { - job_id: string; - step_id: string; - run_id?: string; - input_dataclip_id?: string; -}; -export type StepStartReply = void; - -export type StepCompletePayload = ExitReason & { - run_id?: string; - job_id: string; - step_id: string; - output_dataclip?: string; - output_dataclip_id?: string; -}; -export type StepCompleteReply = void; diff --git a/packages/lightning-mock/test/channels/claim.test.ts b/packages/lightning-mock/test/channels/claim.test.ts index f0c4fd6f8..54befecdb 100644 --- a/packages/lightning-mock/test/channels/claim.test.ts +++ b/packages/lightning-mock/test/channels/claim.test.ts @@ -8,8 +8,8 @@ const port = 4444; type Channel = any; -let server; -let client; +let server: any; +let client: any; test.before(async () => ({ server, client } = await setup(port))); @@ -31,7 +31,7 @@ const join = (channelName: string, params: any = {}): Promise => .receive('ok', () => { done(channel); }) - .receive('error', (err) => { + .receive('error', (err: any) => { // err will be the response message on the payload (ie, invalid_token, invalid_run_id etc) reject(new Error(err)); }); @@ -46,7 +46,7 @@ test.serial( const channel = await join('worker:queue'); // response is an array of run ids - channel.push(CLAIM).receive('ok', (response) => { + channel.push(CLAIM).receive('ok', (response: any) => { const { runs } = response; t.assert(Array.isArray(runs)); t.is(runs.length, 0); @@ -67,7 +67,7 @@ test.serial( const channel = await join('worker:queue'); // response is an array of run ids - channel.push(CLAIM).receive('ok', (response) => { + channel.push(CLAIM).receive('ok', (response: any) => { const { runs } = response; t.truthy(runs); t.is(runs.length, 1); diff --git a/packages/lightning-mock/test/channels/run.test.ts b/packages/lightning-mock/test/channels/run.test.ts index d01889c3d..5bfe6dd41 100644 --- a/packages/lightning-mock/test/channels/run.test.ts +++ b/packages/lightning-mock/test/channels/run.test.ts @@ -1,4 +1,10 @@ import test from 'ava'; +import type { + Run, + RunCompletePayload, + Credential, + DataClip, +} from '@openfn/lexicon/lightning'; import { setup } from '../util'; import { runs, credentials, dataclips } from '../data'; @@ -9,16 +15,14 @@ import { GET_DATACLIP, } from '../../src/events'; -import { RunCompletePayload } from '@openfn/ws-worker'; - const enc = new TextDecoder('utf-8'); type Channel = any; const port = 7777; -let server; -let client; +let server: any; +let client: any; // Set up a lightning server and a phoenix socket client before each test test.before(async () => ({ server, client } = await setup(port))); @@ -41,7 +45,7 @@ const join = (channelName: string, params: any = {}): Promise => .receive('ok', () => { done(channel); }) - .receive('error', (err) => { + .receive('error', (err: any) => { // err will be the response message on the payload (ie, invalid_token, invalid_run_id etc) reject(new Error(err)); }); @@ -72,7 +76,7 @@ test.serial('get run data through the run channel', async (t) => { server.startRun(run1.id); const channel = await join(`run:${run1.id}`, { token: 'a.b.c' }); - channel.push(GET_PLAN).receive('ok', (run) => { + channel.push(GET_PLAN).receive('ok', (run: Run) => { t.deepEqual(run, run1); done(); }); @@ -126,10 +130,12 @@ test.serial('get credential through the run channel', async (t) => { server.addCredential('a', credentials['a']); const channel = await join(`run:${run1.id}`, { token: 'a.b.c' }); - channel.push(GET_CREDENTIAL, { id: 'a' }).receive('ok', (result) => { - t.deepEqual(result, credentials['a']); - done(); - }); + channel + .push(GET_CREDENTIAL, { id: 'a' }) + .receive('ok', (result: Credential) => { + t.deepEqual(result, credentials['a']); + done(); + }); }); }); @@ -139,7 +145,7 @@ test.serial('get dataclip through the run channel', async (t) => { server.addDataclip('d', dataclips['d']); const channel = await join(`run:${run1.id}`, { token: 'a.b.c' }); - channel.push(GET_DATACLIP, { id: 'd' }).receive('ok', (result) => { + channel.push(GET_DATACLIP, { id: 'd' }).receive('ok', (result: any) => { const str = enc.decode(new Uint8Array(result)); const dataclip = JSON.parse(str); t.deepEqual(dataclip, dataclips['d']); @@ -159,7 +165,7 @@ test.serial( server.startRun(run1.id); server.addDataclip('result', result); - server.waitForResult(run1.id).then((dataclip) => { + server.waitForResult(run1.id).then((dataclip: DataClip) => { t.deepEqual(result, dataclip); done(); }); diff --git a/packages/lightning-mock/test/events/log.test.ts b/packages/lightning-mock/test/events/log.test.ts index 99b326011..f57d020b4 100644 --- a/packages/lightning-mock/test/events/log.test.ts +++ b/packages/lightning-mock/test/events/log.test.ts @@ -3,8 +3,8 @@ import { RUN_LOG } from '../../src/events'; import { join, setup, createRun } from '../util'; -let server; -let client; +let server: any; +let client: any; const port = 5501; @@ -26,7 +26,7 @@ test.serial('acknowledge valid message (run log)', async (t) => { const channel = await join(client, run.id); - channel.push(RUN_LOG, event).receive('ok', (evt) => { + channel.push(RUN_LOG, event).receive('ok', () => { t.pass('event acknowledged'); done(); }); @@ -50,7 +50,7 @@ test.serial('acknowledge valid message (job log)', async (t) => { const channel = await join(client, run.id); - channel.push(RUN_LOG, event).receive('ok', (evt) => { + channel.push(RUN_LOG, event).receive('ok', () => { t.pass('event acknowledged'); done(); }); diff --git a/packages/lightning-mock/test/events/run-complete.test.ts b/packages/lightning-mock/test/events/run-complete.test.ts index 42ef2f878..9f00fb575 100644 --- a/packages/lightning-mock/test/events/run-complete.test.ts +++ b/packages/lightning-mock/test/events/run-complete.test.ts @@ -2,8 +2,8 @@ import test from 'ava'; import { join, setup, createRun } from '../util'; import { RUN_COMPLETE } from '../../src/events'; -let server; -let client; +let server: any; +let client: any; const port = 5501; diff --git a/packages/lightning-mock/test/events/run-start.test.ts b/packages/lightning-mock/test/events/run-start.test.ts index 30781d7e4..51419f0ab 100644 --- a/packages/lightning-mock/test/events/run-start.test.ts +++ b/packages/lightning-mock/test/events/run-start.test.ts @@ -2,8 +2,8 @@ import test from 'ava'; import { join, setup, createRun } from '../util'; import { RUN_START } from '../../src/events'; -let server; -let client; +let server: any; +let client: any; const port = 5500; diff --git a/packages/lightning-mock/test/events/step-complete.test.ts b/packages/lightning-mock/test/events/step-complete.test.ts index 5422b0671..a23d48d62 100644 --- a/packages/lightning-mock/test/events/step-complete.test.ts +++ b/packages/lightning-mock/test/events/step-complete.test.ts @@ -3,8 +3,8 @@ import { STEP_COMPLETE } from '../../src/events'; import { join, setup, createRun } from '../util'; -let server; -let client; +let server: any; +let client: any; const port = 5501; @@ -24,7 +24,7 @@ test.serial('acknowledge valid message', async (t) => { const channel = await join(client, run.id); - channel.push(STEP_COMPLETE, event).receive('ok', (evt) => { + channel.push(STEP_COMPLETE, event).receive('ok', () => { t.pass('event acknowledged'); done(); }); @@ -88,7 +88,7 @@ test.serial('error if no output dataclip', async (t) => { }; const channel = await join(client, run.id); - channel.push(STEP_COMPLETE, event).receive('error', (e) => { + channel.push(STEP_COMPLETE, event).receive('error', (e: any) => { t.is(e.toString(), 'no output_dataclip'); done(); }); @@ -108,7 +108,7 @@ test.serial('error if no output dataclip_id', async (t) => { }; const channel = await join(client, run.id); - channel.push(STEP_COMPLETE, event).receive('error', (e) => { + channel.push(STEP_COMPLETE, event).receive('error', (e: any) => { t.is(e.toString(), 'no output_dataclip_id'); done(); }); diff --git a/packages/lightning-mock/test/events/step-start.test.ts b/packages/lightning-mock/test/events/step-start.test.ts index f870ba9b7..3f1924905 100644 --- a/packages/lightning-mock/test/events/step-start.test.ts +++ b/packages/lightning-mock/test/events/step-start.test.ts @@ -2,8 +2,8 @@ import test from 'ava'; import { STEP_START } from '../../src/events'; import { join, setup, createRun } from '../util'; -let server; -let client; +let server: any; +let client: any; const port = 5501; diff --git a/packages/lightning-mock/test/server.test.ts b/packages/lightning-mock/test/server.test.ts index ee73bd3b5..5ef9fa120 100644 --- a/packages/lightning-mock/test/server.test.ts +++ b/packages/lightning-mock/test/server.test.ts @@ -2,12 +2,12 @@ import test from 'ava'; import { Socket } from 'phoenix'; import { WebSocket } from 'ws'; +import type { Run } from '@openfn/lexicon/lightning'; -import { createRun, setup } from './util'; -import type { Run } from '../src/types'; +import { setup } from './util'; -let server; -let client; +let server: any; +let client: any; const port = 3333; @@ -82,10 +82,10 @@ test.serial('reject ws connections without a token', (t) => { }); test.serial('respond to channel join requests', (t) => { - return new Promise(async (done, reject) => { + return new Promise(async (done) => { const channel = client.channel('x', {}); - channel.join().receive('ok', (res) => { + channel.join().receive('ok', (res: any) => { t.is(res, 'ok'); done(); }); diff --git a/packages/lightning-mock/test/socket-server.test.ts b/packages/lightning-mock/test/socket-server.test.ts index d0fc34e0c..c21dd6a9f 100644 --- a/packages/lightning-mock/test/socket-server.test.ts +++ b/packages/lightning-mock/test/socket-server.test.ts @@ -4,9 +4,9 @@ import { Socket } from 'phoenix'; import { WebSocket } from 'ws'; import createSocketServer from '../src/socket-server'; -let socket; -let server; -let messages; +let socket: any; +let server: any; +let messages: any; const wait = (duration = 10) => new Promise((resolve) => { @@ -19,6 +19,7 @@ test.beforeEach( messages = []; // @ts-ignore I don't care about missing server options here server = createSocketServer({ + // @ts-ignore state: { events: new EventEmitter(), }, @@ -48,13 +49,13 @@ test.serial('respond to connection join requests', async (t) => { channel .join() - .receive('ok', (resp) => { + .receive('ok', (resp: any) => { t.is(resp, 'ok'); channel.push('hello'); resolve(); }) - .receive('error', (e) => { + .receive('error', (e: any) => { console.log(e); }); }); @@ -64,7 +65,7 @@ test.serial('send a message', async (t) => { return new Promise((resolve) => { const channel = socket.channel('x', {}); - server.listenToChannel('x', (_ws, { payload, event }) => { + server.listenToChannel('x', (_ws: any, { payload, event }: any) => { t.is(event, 'hello'); t.deepEqual(payload, { x: 1 }); diff --git a/packages/lightning-mock/test/util.ts b/packages/lightning-mock/test/util.ts index 937ebf369..cabe11b1f 100644 --- a/packages/lightning-mock/test/util.ts +++ b/packages/lightning-mock/test/util.ts @@ -33,7 +33,7 @@ export const join = (client: any, runId: string): Promise => .receive('ok', () => { done(channel); }) - .receive('error', (err) => { + .receive('error', (err: any) => { reject(new Error(err)); }); }); diff --git a/packages/lightning-mock/tsconfig.json b/packages/lightning-mock/tsconfig.json index ba1452256..8906c56a5 100644 --- a/packages/lightning-mock/tsconfig.json +++ b/packages/lightning-mock/tsconfig.json @@ -1,6 +1,6 @@ { "extends": "../../tsconfig.common", - "include": ["src/**/*.ts"], + "include": ["src/**/*.ts", "test/**/*.ts"], "compilerOptions": { "module": "ESNext" } diff --git a/pnpm-lock.yaml b/pnpm-lock.yaml index 693c42c4d..60a1d46ec 100644 --- a/pnpm-lock.yaml +++ b/pnpm-lock.yaml @@ -438,6 +438,9 @@ importers: '@openfn/engine-multi': specifier: workspace:* version: link:../engine-multi + '@openfn/lexicon': + specifier: workspace:^ + version: link:../lexicon '@openfn/logger': specifier: workspace:* version: link:../logger From 81765589382ba040f506185aeb3296ec3c3088ff Mon Sep 17 00:00:00 2001 From: Joe Clark Date: Wed, 7 Feb 2024 13:02:01 +0000 Subject: [PATCH 065/128] worker: start mapping to lexicon. Handled run-> plan conversion --- packages/ws-worker/package.json | 1 + packages/ws-worker/src/api/claim.ts | 3 +- packages/ws-worker/src/api/execute.ts | 37 +-- packages/ws-worker/src/events.ts | 86 +++---- packages/ws-worker/src/types.d.ts | 75 ------ packages/ws-worker/src/util/convert-run.ts | 107 ++++++--- .../ws-worker/test/util/convert-run.test.ts | 227 ++++++++++-------- 7 files changed, 253 insertions(+), 283 deletions(-) diff --git a/packages/ws-worker/package.json b/packages/ws-worker/package.json index 670a14606..b70a1fc73 100644 --- a/packages/ws-worker/package.json +++ b/packages/ws-worker/package.json @@ -22,6 +22,7 @@ "dependencies": { "@koa/router": "^12.0.0", "@openfn/engine-multi": "workspace:*", + "@openfn/lexicon": "workspace:^", "@openfn/logger": "workspace:*", "@openfn/runtime": "workspace:*", "@types/koa-logger": "^3.1.2", diff --git a/packages/ws-worker/src/api/claim.ts b/packages/ws-worker/src/api/claim.ts index 18d2e68d0..a54d7ebda 100644 --- a/packages/ws-worker/src/api/claim.ts +++ b/packages/ws-worker/src/api/claim.ts @@ -1,5 +1,6 @@ import { Logger, createMockLogger } from '@openfn/logger'; -import { CLAIM, ClaimPayload, ClaimReply } from '../events'; +import { ClaimPayload, ClaimReply } from '@openfn/lexicon/lightning'; +import { CLAIM } from '../events'; import type { ServerApp } from '../server'; diff --git a/packages/ws-worker/src/api/execute.ts b/packages/ws-worker/src/api/execute.ts index 9ce817bf4..7511bcd77 100644 --- a/packages/ws-worker/src/api/execute.ts +++ b/packages/ws-worker/src/api/execute.ts @@ -1,32 +1,36 @@ +import type { ExecutionPlan } from '@openfn/lexicon'; +import type { + RunLogPayload, + RunStartPayload, + RunOptions, +} from '@openfn/lexicon/lightning'; +import type { Logger } from '@openfn/logger'; +import type { + RuntimeEngine, + Resolvers, + WorkflowStartPayload, +} from '@openfn/engine-multi'; + +import { + getWithReply, + createRunState, + throttle as createThrottle, +} from '../util'; import { RUN_COMPLETE, RUN_LOG, - RunLogPayload, RUN_START, - RunStartPayload, - GET_CREDENTIAL, GET_DATACLIP, STEP_COMPLETE, STEP_START, + GET_CREDENTIAL, } from '../events'; -import { - getWithReply, - createRunState, - throttle as createThrottle, -} from '../util'; import handleStepComplete from '../events/step-complete'; import handleStepStart from '../events/step-start'; import handleRunComplete from '../events/run-complete'; import handleRunError from '../events/run-error'; -import type { RunOptions, Channel, RunState, JSONLog } from '../types'; -import type { Logger } from '@openfn/logger'; -import type { - RuntimeEngine, - Resolvers, - WorkflowStartPayload, -} from '@openfn/engine-multi'; -import type { ExecutionPlan } from '@openfn/runtime'; +import type { Channel, RunState, JSONLog } from '../types'; const enc = new TextDecoder('utf-8'); @@ -130,6 +134,7 @@ export function execute( .then(async () => { // TODO we need to remove this from here and let the runtime take care of it through // the resolver. See https://github.com/OpenFn/kit/issues/403 + // TODO come back and work out how initial state will work if (typeof plan.initialState === 'string') { logger.debug('loading dataclip', plan.initialState); plan.initialState = await loadDataclip(channel, plan.initialState); diff --git a/packages/ws-worker/src/events.ts b/packages/ws-worker/src/events.ts index 89cdeffca..fc157d5d8 100644 --- a/packages/ws-worker/src/events.ts +++ b/packages/ws-worker/src/events.ts @@ -1,69 +1,43 @@ -import { Run, ExitReason } from './types'; +import * as l from '@openfn/lexicon/lightning'; // These are worker-lightning events, used in the websocket - export const CLAIM = 'claim'; - -export type ClaimPayload = { demand?: number }; -export type ClaimReply = { runs: Array }; -export type ClaimRun = { id: string; token: string }; - export const GET_PLAN = 'fetch:plan'; -export type GetPlanPayload = void; // no payload -export type GetPlanReply = Run; - -export const GET_CREDENTIAL = 'fetch:credential'; -export type GetCredentialPayload = { id: string }; -// credential in-line, no wrapper, arbitrary data -export type GetCredentialReply = {}; - export const GET_DATACLIP = 'fetch:dataclip'; -export type GetDataclipPayload = { id: string }; -export type GetDataClipReply = Uint8Array; // represents a json string Run - -export const RUN_START = 'run:start'; // runId, timestamp -export type RunStartPayload = void; // no payload -export type RunStartReply = {}; // no payload +export const GET_CREDENTIAL = 'fetch:credential'; +export const RUN_START = 'run:start'; +export const RUN_COMPLETE = 'run:complete'; +export const RUN_LOG = 'run:log'; +export const STEP_START = 'step:start'; +export const STEP_COMPLETE = 'step:complete'; +export const INTERNAL_RUN_COMPLETE = 'server:run-complete'; -export const RUN_COMPLETE = 'run:complete'; // runId, timestamp, result, stats -export type RunCompletePayload = ExitReason & { - final_dataclip_id?: string; // TODO this will be removed soon +export type QueueEvents = { + [CLAIM]: l.ClaimPayload; }; -export type RunCompleteReply = undefined; -export const RUN_LOG = 'run:log'; // level, namespace (job,runtime,adaptor), message, time -export type RunLogPayload = { - message: Array; - timestamp: string; - run_id: string; - level?: string; - source?: string; // namespace - job_id?: string; - step_id?: string; +export type QueueEventReplies = { + [CLAIM]: l.ClaimReply; }; -export type RunLogReply = void; -export const STEP_START = 'step:start'; -export type StepStartPayload = { - job_id: string; - step_id: string; - run_id?: string; - input_dataclip_id?: string; - versions: Record; +export type RunEvents = { + [GET_PLAN]: l.GetPlanPayload; + [GET_CREDENTIAL]: l.GetCredentialPayload; + [GET_DATACLIP]: l.GetDataclipPayload; + [RUN_START]: l.RunStartPayload; + [RUN_COMPLETE]: l.RunCompletePayload; + [RUN_LOG]: l.RunLogPayload; + [STEP_START]: l.StepStartPayload; + [STEP_COMPLETE]: l.StepCompletePayload; }; -export type StepStartReply = void; -export const STEP_COMPLETE = 'step:complete'; -export type StepCompletePayload = ExitReason & { - run_id?: string; - job_id: string; - step_id: string; - output_dataclip?: string; - output_dataclip_id?: string; +export type RunReplies = { + [GET_PLAN]: l.GetPlanReply; + [GET_CREDENTIAL]: l.GetCredentialReply; + [GET_DATACLIP]: l.GetDataClipReply; + [RUN_START]: l.RunStartReply; + [RUN_COMPLETE]: l.RunCompleteReply; + [RUN_LOG]: l.RunLogReply; + [STEP_START]: l.StepStartReply; + [STEP_COMPLETE]: l.StepCompleteReply; }; -export type StepCompleteReply = void; - -// These are internal server events -// Explicitly (and awkwardly) namespaced to avoid confusion - -export const INTERNAL_RUN_COMPLETE = 'server:run-complete'; diff --git a/packages/ws-worker/src/types.d.ts b/packages/ws-worker/src/types.d.ts index 8cc0709dd..2a296d1b0 100644 --- a/packages/ws-worker/src/types.d.ts +++ b/packages/ws-worker/src/types.d.ts @@ -4,81 +4,6 @@ import type { ExecutionPlan } from '@openfn/runtime'; export { Socket }; -export type Credential = Record; - -export type State = { - data: { - [key: string]: any; - }; - configuration?: { - [key: string]: any; - }; - errors?: { - [jobId: string]: { - type: string; - message: string; - }; - }; - - // technically there should be nothing here - [key: string]: any; -}; - -export type ExitReasonStrings = - | 'success' - | 'fail' - | 'crash' - | 'kill' - | 'cancel' - | 'exception'; - -export type ExitReason = { - reason: ExitReasonStrings; - error_message: string | null; - error_type: string | null; -}; - -export type Node = { - id: string; - body?: string; - adaptor?: string; - credential?: object; - credential_id?: string; - type?: 'webhook' | 'cron'; // trigger only - state?: any; // Initial state / defaults -}; - -export interface Edge { - id: string; - source_job_id?: string; - source_trigger_id?: string; - target_job_id: string; - name?: string; - condition?: string; - error_path?: boolean; - errors?: any; - enabled?: boolean; -} - -// An run object returned by Lightning -export type Run = { - id: string; - dataclip_id: string; - starting_node_id: string; - - triggers: Node[]; - jobs: Node[]; - edges: Edge[]; - - options?: RunOptions; -}; - -export type RunOptions = { - runTimeoutMs?: number; - - sanitize?: SanitizePolicies; -}; - // Internal server state for each run export type RunState = { activeStep?: string; diff --git a/packages/ws-worker/src/util/convert-run.ts b/packages/ws-worker/src/util/convert-run.ts index 35b200de2..edb0ee6a8 100644 --- a/packages/ws-worker/src/util/convert-run.ts +++ b/packages/ws-worker/src/util/convert-run.ts @@ -1,11 +1,16 @@ import crypto from 'node:crypto'; import type { - JobNode, - JobNodeID, - JobEdge, + Step, + StepId, ExecutionPlan, -} from '@openfn/runtime'; -import { Run, RunOptions, Edge } from '../types'; + State, + Job, + Trigger, + StepEdge, + WorkflowOptions, + Lazy, +} from '@openfn/lexicon'; +import { Run, RunOptions, Edge } from '@openfn/lexicon/lightning'; export const conditions: Record string | null> = { @@ -33,36 +38,44 @@ const mapTriggerEdgeCondition = (edge: Edge) => { return condition; }; -const mapOptions = (options: RunOptions): RunOptions => { - return options; +const mapOptions = ( + runOptions: RunOptions = {}, + workflowOptions: WorkflowOptions = {} +): WorkflowOptions => { + if (runOptions?.runTimeoutMs) { + workflowOptions.timeout = runOptions?.runTimeoutMs; + } + if (runOptions?.sanitize) { + workflowOptions.sanitize = runOptions?.sanitize; + } + return workflowOptions; }; export default ( run: Run -): { plan: ExecutionPlan; options: RunOptions } => { - const options = run.options || {}; +): { plan: ExecutionPlan; options: WorkflowOptions; input: Lazy } => { + const opts: WorkflowOptions = {}; + const plan: Partial = { id: run.id, }; + let initialState; if (run.dataclip_id) { - // This is tricky - we're assining a string to the XPlan - // which is fine becuase it'll be handled later - // I guess we need a new type for now? Like a lazy XPlan - // @ts-ignore - plan.initialState = run.dataclip_id; + initialState = run.dataclip_id; } + if (run.starting_node_id) { - plan.start = run.starting_node_id; + opts.start = run.starting_node_id; } - const nodes: Record = {}; + const nodes: Record = {}; - const edges = run.edges ?? []; + const edges: Edge[] = run.edges ?? []; // We don't really care about triggers, it's mostly just a empty node if (run.triggers?.length) { - run.triggers.forEach((trigger) => { + run.triggers.forEach((trigger: Trigger) => { const id = trigger.id || 'trigger'; nodes[id] = { @@ -72,13 +85,16 @@ export default ( // TODO do we need to support multiple edges here? Likely const connectedEdges = edges.filter((e) => e.source_trigger_id === id); if (connectedEdges.length) { - nodes[id].next = connectedEdges.reduce((obj, edge) => { - if (edge.enabled !== false) { - // @ts-ignore - obj[edge.target_job_id] = mapTriggerEdgeCondition(edge); - } - return obj; - }, {}); + nodes[id].next = connectedEdges.reduce( + (obj: Partial, edge) => { + if (edge.enabled !== false) { + // @ts-ignore + obj[edge.target_job_id] = mapTriggerEdgeCondition(edge); + } + return obj; + }, + {} + ); } else { // TODO what if the edge isn't found? } @@ -86,25 +102,27 @@ export default ( } if (run.jobs?.length) { - run.jobs.forEach((job) => { - const id = job.id || crypto.randomUUID(); - - nodes[id] = { + run.jobs.forEach((step) => { + const id = step.id || crypto.randomUUID(); + const job: Job = { id, - configuration: job.credential || job.credential_id, - expression: job.body, - adaptor: job.adaptor, + configuration: step.credential || step.credential_id, + expression: step.body!, + adaptor: step.adaptor, }; - if (job.state) { - // TODO this is likely to change - nodes[id].state = job.state; + if (step.name) { + job.name = step.name; + } + + if (step.state) { + job.state = step.state; } const next = edges .filter((e) => e.source_job_id === id) .reduce((obj, edge) => { - const newEdge: JobEdge = {}; + const newEdge: StepEdge = {}; const condition = mapEdgeCondition(edge); if (condition) { @@ -117,18 +135,27 @@ export default ( ? newEdge : true; return obj; - }, {} as Record); + }, {} as Record); if (Object.keys(next).length) { - nodes[id].next = next; + job.next = next; } + + nodes[id] = job; }); } - plan.jobs = Object.values(nodes); + plan.workflow = { + steps: Object.values(nodes), + }; + + if (run.name) { + plan.workflow.name = run.name; + } return { plan: plan as ExecutionPlan, - options: mapOptions(options), + options: mapOptions(run.options, opts), + input: initialState || {}, }; }; diff --git a/packages/ws-worker/test/util/convert-run.test.ts b/packages/ws-worker/test/util/convert-run.test.ts index 3cfd58ec3..ac042de93 100644 --- a/packages/ws-worker/test/util/convert-run.test.ts +++ b/packages/ws-worker/test/util/convert-run.test.ts @@ -1,6 +1,12 @@ import test from 'ava'; +import type { Run, Node } from '@openfn/lexicon/lightning'; import convertRun, { conditions } from '../../src/util/convert-run'; -import { Run, Node } from '../../src/types'; +import { + ConditionalStepEdge, + Job, + StepEdge, + StepEdgeObj, +} from '@openfn/lexicon'; // Creates a lightning node (job or trigger) const createNode = (props = {}) => @@ -52,7 +58,28 @@ test('convert a single job', (t) => { t.deepEqual(plan, { id: 'w', - jobs: [createJob()], + workflow: { + steps: [createJob()], + }, + }); +}); + +test('convert a single job with names', (t) => { + const run: Partial = { + id: 'w', + name: 'my-workflow', + jobs: [createNode({ name: 'my-job' })], + triggers: [], + edges: [], + }; + const { plan } = convertRun(run as Run); + + t.deepEqual(plan, { + id: 'w', + workflow: { + name: 'my-workflow', + steps: [createJob({ name: 'my-job' })], + }, }); }); @@ -71,9 +98,14 @@ test('convert a single job with options', (t) => { t.deepEqual(plan, { id: 'w', - jobs: [createJob()], + workflow: { + steps: [createJob()], + }, + }); + t.deepEqual(options, { + timeout: 10, + sanitize: 'obfuscate', }); - t.deepEqual(options, run.options); }); // Note idk how lightningg will handle state/defaults on a job @@ -89,7 +121,9 @@ test('convert a single job with data', (t) => { t.deepEqual(plan, { id: 'w', - jobs: [createJob({ state: { data: { x: 22 } } })], + workflow: { + steps: [createJob({ state: { data: { x: 22 } } })], + }, }); t.deepEqual(options, {}); }); @@ -102,36 +136,32 @@ test('Accept a partial run object', (t) => { t.deepEqual(plan, { id: 'w', - jobs: [], + workflow: { + steps: [], + }, }); t.deepEqual(options, {}); }); -test('handle dataclip_id', (t) => { +test('handle dataclip_id as input', (t) => { const run: Partial = { id: 'w', dataclip_id: 'xyz', }; - const { plan } = convertRun(run as Run); + const { input } = convertRun(run as Run); - t.deepEqual(plan, { - id: 'w', - initialState: 'xyz', - jobs: [], - }); + t.deepEqual(input, 'xyz'); }); -test('handle starting_node_id', (t) => { +test('handle starting_node_id as options', (t) => { const run: Partial = { id: 'w', starting_node_id: 'j1', }; - const { plan } = convertRun(run as Run); + const { options } = convertRun(run as Run); - t.deepEqual(plan, { - id: 'w', + t.deepEqual(options, { start: 'j1', - jobs: [], }); }); @@ -146,11 +176,13 @@ test('convert a single trigger', (t) => { t.deepEqual(plan, { id: 'w', - jobs: [ - { - id: 't', - }, - ], + workflow: { + steps: [ + { + id: 't', + }, + ], + }, }); }); @@ -166,7 +198,9 @@ test('ignore a single edge', (t) => { t.deepEqual(plan, { id: 'w', - jobs: [], + workflow: { + steps: [], + }, }); }); @@ -187,15 +221,17 @@ test('convert a single trigger with an edge', (t) => { t.deepEqual(plan, { id: 'w', - jobs: [ - { - id: 't', - next: { - a: true, + workflow: { + steps: [ + { + id: 't', + next: { + a: true, + }, }, - }, - createJob(), - ], + createJob(), + ], + }, }); }); @@ -221,17 +257,19 @@ test('convert a single trigger with two edges', (t) => { t.deepEqual(plan, { id: 'w', - jobs: [ - { - id: 't', - next: { - a: true, - b: true, + workflow: { + steps: [ + { + id: 't', + next: { + a: true, + b: true, + }, }, - }, - createJob({ id: 'a' }), - createJob({ id: 'b' }), - ], + createJob({ id: 'a' }), + createJob({ id: 'b' }), + ], + }, }); }); @@ -253,13 +291,15 @@ test('convert a disabled trigger', (t) => { t.deepEqual(plan, { id: 'w', - jobs: [ - { - id: 't', - next: {}, - }, - createJob({ id: 'a' }), - ], + workflow: { + steps: [ + { + id: 't', + next: {}, + }, + createJob({ id: 'a' }), + ], + }, }); }); @@ -274,7 +314,12 @@ test('convert two linked jobs', (t) => { t.deepEqual(plan, { id: 'w', - jobs: [createJob({ id: 'a', next: { b: true } }), createJob({ id: 'b' })], + workflow: { + steps: [ + createJob({ id: 'a', next: { b: true } }), + createJob({ id: 'b' }), + ], + }, }); }); @@ -294,11 +339,13 @@ test('convert a job with two upstream jobs', (t) => { t.deepEqual(plan, { id: 'w', - jobs: [ - createJob({ id: 'a', next: { x: true } }), - createJob({ id: 'b', next: { x: true } }), - createJob({ id: 'x' }), - ], + workflow: { + steps: [ + createJob({ id: 'a', next: { x: true } }), + createJob({ id: 'b', next: { x: true } }), + createJob({ id: 'x' }), + ], + }, }); }); @@ -314,10 +361,12 @@ test('convert two linked jobs with an edge condition', (t) => { t.deepEqual(plan, { id: 'w', - jobs: [ - createJob({ id: 'a', next: { b: { condition } } }), - createJob({ id: 'b' }), - ], + workflow: { + steps: [ + createJob({ id: 'a', next: { b: { condition } } }), + createJob({ id: 'b' }), + ], + }, }); }); @@ -332,10 +381,12 @@ test('convert two linked jobs with a disabled edge', (t) => { t.deepEqual(plan, { id: 'w', - jobs: [ - createJob({ id: 'a', next: { b: { disabled: true } } }), - createJob({ id: 'b' }), - ], + workflow: { + steps: [ + createJob({ id: 'a', next: { b: { disabled: true } } }), + createJob({ id: 'b' }), + ], + }, }); }); @@ -446,12 +497,12 @@ test('convert edge condition on_job_success', (t) => { }; const { plan } = convertRun(run as Run); - const [job] = plan.jobs; - - t.truthy(job.next?.b); - t.is(job.next.b.condition, conditions.on_job_success('a')); + const [job] = plan.workflow.steps as Job[]; + const edge = job.next as Record; - t.true(testEdgeCondition(job.next.b.condition, {})); + t.truthy(edge.b); + t.is(edge.b.condition, conditions.on_job_success('a')!); + t.true(testEdgeCondition(edge.b.condition, {})); }); test('convert edge condition on_job_failure', (t) => { @@ -463,14 +514,14 @@ test('convert edge condition on_job_failure', (t) => { }; const { plan } = convertRun(run as Run); - const [job] = plan.jobs; - - t.truthy(job.next?.b); - t.is(job.next.b.condition, conditions.on_job_failure('a')); + const [job] = plan.workflow.steps as Job[]; + const edge = job.next as Record; + t.truthy(edge.b); + t.is(edge.b.condition, conditions.on_job_failure('a')!); // Check that this is valid js t.true( - testEdgeCondition(job.next.b.condition, { + testEdgeCondition(edge.b.condition, { errors: { a: {} }, }) ); @@ -485,13 +536,13 @@ test('convert edge condition on_job_success with a funky id', (t) => { edges: [createEdge(id_a, 'b', { condition: 'on_job_success' })], }; const { plan } = convertRun(run as Run); - const [job] = plan.jobs; - - t.truthy(job.next?.b); - t.is(job.next.b.condition, conditions.on_job_success(id_a)); + const [job] = plan.workflow.steps as Job[]; + const edge = job.next as Record; + t.truthy(edge.b); + t.is(edge.b.condition, conditions.on_job_success(id_a)!); // Check that this is valid js - t.true(testEdgeCondition(job.next.b.condition, {})); + t.true(testEdgeCondition(edge.b.condition, {})); }); test('convert edge condition always', (t) => { @@ -503,21 +554,7 @@ test('convert edge condition always', (t) => { }; const { plan } = convertRun(run as Run); - const [job] = plan.jobs; - - t.false(job.next.b.hasOwnProperty('condition')); -}); - -test('convert random options', (t) => { - const run: Partial = { - id: 'w', - options: { - a: 1, - b: 2, - c: 3, - }, - }; - const { options } = convertRun(run as Run); - - t.deepEqual(options, { a: 1, b: 2, c: 3 }); + const [job] = plan.workflow.steps as Job[]; + const edge = job.next as Record; + t.false(edge.b.hasOwnProperty('condition')); }); From 6b5583bf793d02d8bd793fe7a2110fa1575683a7 Mon Sep 17 00:00:00 2001 From: Joe Clark Date: Wed, 7 Feb 2024 15:13:16 +0000 Subject: [PATCH 066/128] worker: typings --- packages/lexicon/core.d.ts | 20 +-- packages/lexicon/lightning.d.ts | 30 ++++- packages/lexicon/package.json | 3 + packages/ws-worker/src/api/execute.ts | 19 +-- packages/ws-worker/src/api/reasons.ts | 19 ++- packages/ws-worker/src/channels/run.ts | 18 +-- packages/ws-worker/src/events/run-complete.ts | 3 +- packages/ws-worker/src/events/run-error.ts | 6 +- .../ws-worker/src/events/step-complete.ts | 6 +- packages/ws-worker/src/events/step-start.ts | 10 +- packages/ws-worker/src/mock/resolvers.ts | 3 +- packages/ws-worker/src/mock/runtime-engine.ts | 9 +- packages/ws-worker/src/server.ts | 6 +- packages/ws-worker/src/types.d.ts | 4 +- .../ws-worker/src/util/create-run-state.ts | 24 ++-- .../ws-worker/src/util/log-final-reason.ts | 2 +- .../test/util/create-run-state.test.ts | 117 +++++++++--------- pnpm-lock.yaml | 9 +- 18 files changed, 174 insertions(+), 134 deletions(-) diff --git a/packages/lexicon/core.d.ts b/packages/lexicon/core.d.ts index a67dcef48..17a79e59b 100644 --- a/packages/lexicon/core.d.ts +++ b/packages/lexicon/core.d.ts @@ -1,3 +1,5 @@ +import { SanitizePolicies } from '@openfn/logger'; + /** * An execution plan is a portable definition of a Work Order, * or, a unit of work to execute @@ -76,6 +78,9 @@ export type WorkflowOptions = { timeout?: number; stepTimeout?: number; start?: StepId; + + // TODO not supported yet I don't think? + sanitize?: SanitizePolicies; }; export type StepId = string; @@ -96,14 +101,13 @@ export interface Step { * Not actually keen on the node/edge semantics here * Maybe StepLink? */ -export type StepEdge = - | boolean - | string - | { - condition?: string; // Javascript expression (function body, not function) - label?: string; - disabled?: boolean; - }; +export type StepEdge = boolean | string | ConditionalStepEdge; + +export type ConditionalStepEdge = { + condition?: string; // Javascript expression (function body, not function) + label?: string; + disabled?: boolean; +}; /** * A no-op type of Step diff --git a/packages/lexicon/lightning.d.ts b/packages/lexicon/lightning.d.ts index ae5247ffd..2a14282be 100644 --- a/packages/lexicon/lightning.d.ts +++ b/packages/lexicon/lightning.d.ts @@ -1,3 +1,7 @@ +import type { SanitizePolicies } from '@openfn/logger'; + +type StepId = string; + /** * Type definitions for Lightning and Worker interfaces * @@ -9,6 +13,7 @@ // An run object returned by Lightning export type Run = { id: string; + name?: string; dataclip_id: string; starting_node_id: string; @@ -16,18 +21,32 @@ export type Run = { jobs: Node[]; edges: Edge[]; - options?: Record; // TODO type the expected options + options?: RunOptions; +}; + +/** + * These are options that can be sent to the worker with an execution plan + * They broadly map to the Workflow Options that are fed straight into the runtime + * and saved to the plan itself + * (although at the time of writing timeout is handled by the worker, not the runtime) + */ +export type RunOptions = { + runTimeoutMs?: number; + sanitize?: SanitizePolicies; + start?: StepId; }; // TODO rename to step // maybe also split into jobs and triggers export type Node = { id: string; + name?: string; body?: string; adaptor?: string; - credential?: any; // TODO tighten this up, string or object + credential?: any; + credential_id?: string; type?: 'webhook' | 'cron'; // trigger only - state?: any; // Initial state / defaults + state?: State; }; export interface Edge { @@ -39,11 +58,12 @@ export interface Edge { condition?: string; error_path?: boolean; errors?: any; + enabled?: boolean; } -export type DataClip = object; +export type DataClip = Record; -export type Credential = object; +export type Credential = Record; export type ExitReasonStrings = | 'success' diff --git a/packages/lexicon/package.json b/packages/lexicon/package.json index 20ea6cbc8..b4589f210 100644 --- a/packages/lexicon/package.json +++ b/packages/lexicon/package.json @@ -10,5 +10,8 @@ "types": "./lightning.d/.ts" } } + }, + "devDependencies": { + "@openfn/logger": "workspace:^" } } diff --git a/packages/ws-worker/src/api/execute.ts b/packages/ws-worker/src/api/execute.ts index 7511bcd77..d06e51275 100644 --- a/packages/ws-worker/src/api/execute.ts +++ b/packages/ws-worker/src/api/execute.ts @@ -1,4 +1,4 @@ -import type { ExecutionPlan } from '@openfn/lexicon'; +import type { ExecutionPlan, Lazy, State } from '@openfn/lexicon'; import type { RunLogPayload, RunStartPayload, @@ -62,12 +62,13 @@ export function execute( engine: RuntimeEngine, logger: Logger, plan: ExecutionPlan, + input: Lazy, options: RunOptions = {}, onFinish = (_result: any) => {} ) { logger.info('executing ', plan.id); - const state = createRunState(plan, options); + const state = createRunState(plan, input); const context: Context = { channel, state, logger, engine, onFinish }; @@ -135,18 +136,18 @@ export function execute( // TODO we need to remove this from here and let the runtime take care of it through // the resolver. See https://github.com/OpenFn/kit/issues/403 // TODO come back and work out how initial state will work - if (typeof plan.initialState === 'string') { - logger.debug('loading dataclip', plan.initialState); - plan.initialState = await loadDataclip(channel, plan.initialState); + if (typeof input === 'string') { + logger.debug('loading dataclip', input); + const loadedInput = await loadDataclip(channel, input); logger.success('dataclip loaded'); - logger.debug(plan.initialState); + return loadedInput; } - return plan; + return input; }) // Execute (which we have to wrap in a promise chain to handle initial state) - .then(() => { + .then((input: State) => { try { - engine.execute(plan, { resolvers, ...options }); + engine.execute(plan, input, { resolvers, ...options }); } catch (e: any) { // TODO what if there's an error? handleRunError(context, { diff --git a/packages/ws-worker/src/api/reasons.ts b/packages/ws-worker/src/api/reasons.ts index 73fbd0661..37411a020 100644 --- a/packages/ws-worker/src/api/reasons.ts +++ b/packages/ws-worker/src/api/reasons.ts @@ -1,11 +1,6 @@ -import type { - ExitReason, - ExitReasonStrings, - State, - RunState, -} from '../types'; - -import type { JobNode } from '@openfn/runtime'; +import { State, Step } from '@openfn/lexicon'; +import { ExitReason, ExitReasonStrings } from '@openfn/lexicon/lightning'; +import type { RunState } from '../types'; // This takes the result state and error from the job const calculateJobExitReason = ( @@ -30,7 +25,7 @@ const calculateJobExitReason = ( }; // It has next jobs, but they weren't executed -const isLeafNode = (state: RunState, job: JobNode) => { +const isLeafNode = (state: RunState, job: Step) => { // A node is a leaf if: // It has no `next` jobs at all if (!job.next || Object.keys(job.next).length == 0) { @@ -47,11 +42,11 @@ const calculateRunExitReason = (state: RunState): ExitReason => { // basically becomes the exit reason // So If we get here, we basically just need to look to see if there's a fail on a leaf node // (we ignore fails on non-leaf nodes) - const leafJobReasons: ExitReason[] = state.plan.jobs - .filter((job: JobNode) => isLeafNode(state, job)) + const leafJobReasons: ExitReason[] = state.plan.workflow.steps + .filter((job) => isLeafNode(state, job)) // TODO what if somehow there is no exit reason for a job? // This implies some kind of exception error, no? - .map(({ id }: JobNode) => state.reasons[id!]); + .map(({ id }) => state.reasons[id!]); const fail = leafJobReasons.find((r) => r && r.reason === 'fail'); if (fail) { diff --git a/packages/ws-worker/src/channels/run.ts b/packages/ws-worker/src/channels/run.ts index f7a1dffdd..4cac9e7ab 100644 --- a/packages/ws-worker/src/channels/run.ts +++ b/packages/ws-worker/src/channels/run.ts @@ -1,11 +1,12 @@ -import convertRun from '../util/convert-run'; -import { getWithReply } from '../util'; -import { Run, RunOptions, Channel, Socket } from '../types'; -import { ExecutionPlan } from '@openfn/runtime'; -import { GET_PLAN, GetPlanReply } from '../events'; - +import type { ExecutionPlan, Lazy, State } from '@openfn/lexicon'; +import type { GetPlanReply, Run, RunOptions } from '@openfn/lexicon/lightning'; import type { Logger } from '@openfn/logger'; +import { getWithReply } from '../util'; +import convertRun from '../util/convert-run'; +import { GET_PLAN } from '../events'; +import type { Channel, Socket } from '../types'; + // TODO what happens if this channel join fails? // Lightning could vanish, channel could error on its side, or auth could be wrong // We don't have a good feedback mechanism yet - worker:queue is the only channel @@ -21,6 +22,7 @@ const joinRunChannel = ( channel: Channel; plan: ExecutionPlan; options: RunOptions; + input: Lazy; }>((resolve, reject) => { // TMP - lightning seems to be sending two responses to me // just for now, I'm gonna gate the handling here @@ -36,9 +38,9 @@ const joinRunChannel = ( if (!didReceiveOk) { didReceiveOk = true; logger.success(`connected to ${channelName}`, e); - const { plan, options } = await loadRun(channel); + const { plan, options, input } = await loadRun(channel); logger.debug('converted run as execution plan:', plan); - resolve({ channel, plan, options }); + resolve({ channel, plan, options, input }); } }) .receive('error', (err: any) => { diff --git a/packages/ws-worker/src/events/run-complete.ts b/packages/ws-worker/src/events/run-complete.ts index 1554fb569..75c52f351 100644 --- a/packages/ws-worker/src/events/run-complete.ts +++ b/packages/ws-worker/src/events/run-complete.ts @@ -1,6 +1,7 @@ import type { WorkflowCompletePayload } from '@openfn/engine-multi'; +import type { RunCompletePayload } from '@openfn/lexicon/lightning'; -import { RUN_COMPLETE, RunCompletePayload } from '../events'; +import { RUN_COMPLETE } from '../events'; import { calculateRunExitReason } from '../api/reasons'; import { sendEvent, Context } from '../api/execute'; import logFinalReason from '../util/log-final-reason'; diff --git a/packages/ws-worker/src/events/run-error.ts b/packages/ws-worker/src/events/run-error.ts index 7f8375c64..d37feb5ca 100644 --- a/packages/ws-worker/src/events/run-error.ts +++ b/packages/ws-worker/src/events/run-error.ts @@ -1,8 +1,8 @@ -import { calculateJobExitReason } from '../api/reasons'; - +import type { RunCompletePayload } from '@openfn/lexicon/lightning'; import type { WorkflowErrorPayload } from '@openfn/engine-multi'; -import { RUN_COMPLETE, RunCompletePayload } from '../events'; +import { calculateJobExitReason } from '../api/reasons'; +import { RUN_COMPLETE } from '../events'; import { sendEvent, Context, onJobError } from '../api/execute'; import logFinalReason from '../util/log-final-reason'; diff --git a/packages/ws-worker/src/events/step-complete.ts b/packages/ws-worker/src/events/step-complete.ts index 5400dc897..51c5bfe8d 100644 --- a/packages/ws-worker/src/events/step-complete.ts +++ b/packages/ws-worker/src/events/step-complete.ts @@ -1,12 +1,12 @@ import crypto from 'node:crypto'; +import type { StepCompletePayload } from '@openfn/lexicon/lightning'; +import type { JobCompletePayload } from '@openfn/engine-multi'; -import { STEP_COMPLETE, StepCompletePayload } from '../events'; +import { STEP_COMPLETE } from '../events'; import { stringify } from '../util'; import { calculateJobExitReason } from '../api/reasons'; import { sendEvent, Context } from '../api/execute'; -import type { JobCompletePayload } from '@openfn/engine-multi'; - export default function onStepComplete( { channel, state }: Context, event: JobCompletePayload, diff --git a/packages/ws-worker/src/events/step-start.ts b/packages/ws-worker/src/events/step-start.ts index 9703fb0e5..561652431 100644 --- a/packages/ws-worker/src/events/step-start.ts +++ b/packages/ws-worker/src/events/step-start.ts @@ -1,9 +1,11 @@ import crypto from 'node:crypto'; -import { JobStartPayload } from '@openfn/engine-multi'; import { timestamp } from '@openfn/logger'; +import { JobStartPayload } from '@openfn/engine-multi'; +import type { Job } from '@openfn/lexicon'; +import type { StepStartPayload } from '@openfn/lexicon/lightning'; import pkg from '../../package.json' assert { type: 'json' }; -import { STEP_START, StepStartPayload } from '../events'; +import { STEP_START } from '../events'; import { sendEvent, Context, onJobLog } from '../api/execute'; import calculateVersionString from '../util/versions'; @@ -20,7 +22,9 @@ export default async function onStepStart( state.activeStep = crypto.randomUUID(); state.activeJob = event.jobId; - const job = state.plan.jobs.find(({ id }) => id === event.jobId); + const job = state.plan.workflow.steps.find( + ({ id }) => id === event.jobId + ) as Job; const input_dataclip_id = state.inputDataclips[event.jobId]; diff --git a/packages/ws-worker/src/mock/resolvers.ts b/packages/ws-worker/src/mock/resolvers.ts index 489107e95..25ad81559 100644 --- a/packages/ws-worker/src/mock/resolvers.ts +++ b/packages/ws-worker/src/mock/resolvers.ts @@ -1,4 +1,5 @@ -import type { State, Credential } from '../types'; +import type { State } from '@openfn/lexicon'; +import type { Credential } from '@openfn/lexicon/lightning'; import { Resolvers } from '@openfn/engine-multi'; const mockResolveCredential = (_credId: string) => diff --git a/packages/ws-worker/src/mock/runtime-engine.ts b/packages/ws-worker/src/mock/runtime-engine.ts index b8f2741e5..f1bc22468 100644 --- a/packages/ws-worker/src/mock/runtime-engine.ts +++ b/packages/ws-worker/src/mock/runtime-engine.ts @@ -1,7 +1,8 @@ import { EventEmitter } from 'node:events'; import crypto from 'node:crypto'; -import run, { ExecutionPlan } from '@openfn/runtime'; +import run from '@openfn/runtime'; import * as engine from '@openfn/engine-multi'; +import type { ExecutionPlan, Job } from '@openfn/lexicon'; import mockResolvers from './resolvers'; @@ -79,12 +80,14 @@ async function createMock() { resolvers: mockResolvers, } ) => { - const { id, jobs } = xplan; + const { id } = xplan; + const { steps } = xplan.workflow; activeWorkflows[id!] = true; const threadId = crypto.randomUUID(); - for (const job of jobs) { + for (const step of steps) { + const job = step as Job; if (typeof job.configuration === 'string') { // Call the crendtial callback, but don't do anything with it job.configuration = await options.resolvers?.credential?.( diff --git a/packages/ws-worker/src/server.ts b/packages/ws-worker/src/server.ts index 6616dd4ef..6bd6fb217 100644 --- a/packages/ws-worker/src/server.ts +++ b/packages/ws-worker/src/server.ts @@ -5,8 +5,8 @@ import koaLogger from 'koa-logger'; import Router from '@koa/router'; import { humanId } from 'human-id'; import { createMockLogger, Logger } from '@openfn/logger'; - -import { INTERNAL_RUN_COMPLETE, ClaimRun } from './events'; +import { ClaimRun } from '@openfn/lexicon/lightning'; +import { INTERNAL_RUN_COMPLETE } from './events'; import destroy from './api/destroy'; import startWorkloop from './api/workloop'; import claim from './api/claim'; @@ -162,6 +162,7 @@ function createServer(engine: RuntimeEngine, options: ServerOptions = {}) { channel: runChannel, plan, options, + input, } = await joinRunChannel(app.socket, token, id, logger); // Callback to be triggered when the work is done (including errors) @@ -176,6 +177,7 @@ function createServer(engine: RuntimeEngine, options: ServerOptions = {}) { engine, logger, plan, + input, options, onFinish ); diff --git a/packages/ws-worker/src/types.d.ts b/packages/ws-worker/src/types.d.ts index 2a296d1b0..a9d09fbeb 100644 --- a/packages/ws-worker/src/types.d.ts +++ b/packages/ws-worker/src/types.d.ts @@ -1,6 +1,6 @@ import { SanitizePolicies } from '@openfn/logger'; +import type { ExecutionPlan, Lazy, State } from '@openfn/lexicon'; import type { Channel as PhxChannel } from 'phoenix'; -import type { ExecutionPlan } from '@openfn/runtime'; export { Socket }; @@ -9,7 +9,7 @@ export type RunState = { activeStep?: string; activeJob?: string; plan: ExecutionPlan; - options: RunOptions; + input: Lazy; dataclips: Record; // For each run, map the input ids // TODO better name maybe? diff --git a/packages/ws-worker/src/util/create-run-state.ts b/packages/ws-worker/src/util/create-run-state.ts index b9134e73a..7227da833 100644 --- a/packages/ws-worker/src/util/create-run-state.ts +++ b/packages/ws-worker/src/util/create-run-state.ts @@ -1,20 +1,17 @@ -import type { ExecutionPlan } from '@openfn/runtime'; -import type { RunOptions, RunState } from '../types'; +import type { ExecutionPlan, Job, Lazy, State } from '@openfn/lexicon'; +import type { RunState } from '../types'; -export default ( - plan: ExecutionPlan, - options: RunOptions = {} -): RunState => { +export default (plan: ExecutionPlan, input?: Lazy): RunState => { const state = { - plan, lastDataclipId: '', dataclips: {}, inputDataclips: {}, reasons: {}, - options, + plan, + input, } as RunState; - if (typeof plan.initialState === 'string') { + if (typeof input === 'string') { // We need to initialise inputDataclips so that the first run // has its inputDataclip set properly // Difficulty: the starting node is a trigger and NOT a run @@ -22,9 +19,10 @@ export default ( // and set the input state on THAT // find the first job - let startNode = plan.jobs[0]; - if (plan.start) { - startNode = plan.jobs.find(({ id }) => id === plan.start)!; + const jobs = plan.workflow.steps as Job[]; + let startNode = jobs[0]; + if (plan.options.start) { + startNode = jobs.find(({ id }) => id === plan.options.start)!; } // TODO throw with validation error of some kind if this node could not be found @@ -40,7 +38,7 @@ export default ( // For any runs downstream of the initial state, // Set up the input dataclip initialRuns.forEach((id) => { - state.inputDataclips[id] = plan.initialState as string; + state.inputDataclips[id] = input; }); } else { // what if initial state is an object? diff --git a/packages/ws-worker/src/util/log-final-reason.ts b/packages/ws-worker/src/util/log-final-reason.ts index aaa37c492..4a2f83981 100644 --- a/packages/ws-worker/src/util/log-final-reason.ts +++ b/packages/ws-worker/src/util/log-final-reason.ts @@ -1,6 +1,6 @@ import { timestamp } from '@openfn/logger'; +import { ExitReason } from '@openfn/lexicon/lightning'; import { Context, onJobLog } from '../api/execute'; -import { ExitReason } from '../types'; export default async (context: Context, reason: ExitReason) => { const time = (timestamp() - BigInt(10e6)).toString(); diff --git a/packages/ws-worker/test/util/create-run-state.test.ts b/packages/ws-worker/test/util/create-run-state.test.ts index 9bf15e960..7424ee957 100644 --- a/packages/ws-worker/test/util/create-run-state.test.ts +++ b/packages/ws-worker/test/util/create-run-state.test.ts @@ -1,101 +1,100 @@ import test from 'ava'; +import type { ExecutionPlan, Job } from '@openfn/lexicon'; import { createRunState } from '../../src/util'; +const createPlan = (jobs: Partial[]) => + ({ + workflow: { + steps: jobs.map((j) => ({ expression: '.', ...j })), + }, + options: {}, + } as ExecutionPlan); + test('create run', (t) => { - const options = { timeout: 666 }; - const plan = { jobs: [{ id: 'a' }] }; - const run = createRunState(plan, options); + const plan = createPlan([{ id: 'a' }]); + const input = undefined; + + const run = createRunState(plan, input); t.deepEqual(run.plan, plan); t.deepEqual(run.lastDataclipId, ''); t.deepEqual(run.dataclips, {}); t.deepEqual(run.inputDataclips, {}); t.deepEqual(run.reasons, {}); - t.deepEqual(run.options, options); }); test('Set initial input dataclip if no explicit start and first job is a step', (t) => { - const plan = { initialState: 'x', jobs: [{ id: 'a', expression: '.' }] }; - const run = createRunState(plan); + const plan = createPlan([{ id: 'a' }]); + const input = 'x'; + + const run = createRunState(plan, input); t.deepEqual(run.inputDataclips, { a: 'x' }); }); test('Set initial input dataclip if the explicit start is a step', (t) => { - const plan = { - initialState: 'x', - start: 'a', - jobs: [ - { id: 'b', expression: '.' }, - { id: 'a', expression: '.' }, - ], - }; - const run = createRunState(plan); + const plan = createPlan([{ id: 'a' }, { id: 'b' }]); + plan.options.start = 'a'; + const input = 'x'; + + const run = createRunState(plan, input); t.deepEqual(run.inputDataclips, { a: 'x' }); }); test('Set initial input dataclip if the start is a trigger (simple)', (t) => { - const plan = { - initialState: 's', - start: 't', - jobs: [ - { id: 't', next: { a: true } }, - { id: 'a', expression: '.' }, - ], - }; - const run = createRunState(plan); + const plan = createPlan([{ id: 't', next: { a: true } }, { id: 'a' }]); + plan.options.start = 'a'; + const input = 's'; + + const run = createRunState(plan, input); t.deepEqual(run.inputDataclips, { a: 's' }); }); test('Set initial input dataclip if the start is a trigger (complex)', (t) => { - const plan = { - initialState: 's', - start: 't', - jobs: [ - { id: 'a', expression: '.' }, - { id: 'b', expression: '.' }, - { id: 'c', expression: '.' }, - { id: 'd', expression: '.' }, - { id: 't', next: { c: true } }, - ], - }; - const run = createRunState(plan); + const plan = createPlan([ + { id: 'a' }, + { id: 'b' }, + { id: 'c' }, + { id: 'd' }, + { id: 't', next: { c: true }, expression: undefined }, + ]); + plan.options.start = 't'; + const input = 's'; + + const run = createRunState(plan, input); t.deepEqual(run.inputDataclips, { c: 's' }); }); test('Set initial input dataclip with a trigger as implicit start', (t) => { - const plan = { - initialState: 's', - jobs: [ - { id: 't', next: { c: true } }, - { id: 'a', expression: '.' }, - { id: 'b', expression: '.' }, - { id: 'c', expression: '.' }, - { id: 'd', expression: '.' }, - ], - }; - const run = createRunState(plan); + const plan = createPlan([ + { id: 't', next: { c: true }, expression: undefined }, + { id: 'a', expression: '.' }, + { id: 'b', expression: '.' }, + { id: 'c', expression: '.' }, + { id: 'd', expression: '.' }, + ]); + const input = 's'; + + const run = createRunState(plan, input); t.deepEqual(run.inputDataclips, { c: 's' }); }); test('Set initial input dataclip with a trigger with multiple downstream jobs', (t) => { - const plan = { - initialState: 's', - start: 't', - jobs: [ - { id: 'a', expression: '.' }, - { id: 'b', expression: '.' }, - { id: 't', next: { a: true, b: true, c: true } }, - { id: 'c', expression: '.' }, - { id: 'd', expression: '.' }, - ], - }; - const run = createRunState(plan); + const plan = createPlan([ + { id: 'a' }, + { id: 'b' }, + { id: 't', next: { a: true, b: true, c: true }, expression: undefined }, + { id: 'c' }, + { id: 'd' }, + ]); + plan.options.start = 't'; + const input = 's'; + const run = createRunState(plan, input); t.deepEqual(run.inputDataclips, { a: 's', b: 's', c: 's' }); }); diff --git a/pnpm-lock.yaml b/pnpm-lock.yaml index 60a1d46ec..2108be315 100644 --- a/pnpm-lock.yaml +++ b/pnpm-lock.yaml @@ -428,7 +428,11 @@ importers: packages/engine-multi/tmp/repo: {} - packages/lexicon: {} + packages/lexicon: + devDependencies: + '@openfn/logger': + specifier: workspace:^ + version: link:../logger packages/lightning-mock: dependencies: @@ -618,6 +622,9 @@ importers: '@openfn/engine-multi': specifier: workspace:* version: link:../engine-multi + '@openfn/lexicon': + specifier: workspace:^ + version: link:../lexicon '@openfn/logger': specifier: workspace:* version: link:../logger From f4b54afb7804512b09c743ea79727bf411a64b85 Mon Sep 17 00:00:00 2001 From: Joe Clark Date: Wed, 7 Feb 2024 16:52:09 +0000 Subject: [PATCH 067/128] worker: fix all tests --- packages/ws-worker/src/mock/runtime-engine.ts | 22 +- packages/ws-worker/src/util/convert-run.ts | 37 ++-- packages/ws-worker/test/api/execute.test.ts | 15 +- packages/ws-worker/test/channels/run.test.ts | 20 +- .../test/events/run-complete.test.ts | 9 +- .../test/events/step-complete.test.ts | 14 +- .../ws-worker/test/events/step-start.test.ts | 37 ++-- packages/ws-worker/test/lightning.test.ts | 145 ++++++------ .../test/mock/runtime-engine.test.ts | 206 +++++++++--------- packages/ws-worker/test/reasons.test.ts | 17 +- packages/ws-worker/test/util.ts | 9 +- .../ws-worker/test/util/convert-run.test.ts | 20 +- 12 files changed, 276 insertions(+), 275 deletions(-) diff --git a/packages/ws-worker/src/mock/runtime-engine.ts b/packages/ws-worker/src/mock/runtime-engine.ts index f1bc22468..eddd866e2 100644 --- a/packages/ws-worker/src/mock/runtime-engine.ts +++ b/packages/ws-worker/src/mock/runtime-engine.ts @@ -2,7 +2,7 @@ import { EventEmitter } from 'node:events'; import crypto from 'node:crypto'; import run from '@openfn/runtime'; import * as engine from '@openfn/engine-multi'; -import type { ExecutionPlan, Job } from '@openfn/lexicon'; +import type { ExecutionPlan, Job, State } from '@openfn/lexicon'; import mockResolvers from './resolvers'; @@ -14,23 +14,6 @@ export type EngineEvent = | typeof engine.WORKFLOW_LOG | typeof engine.WORKFLOW_START; -export type WorkflowStartEvent = { - workflowId: string; - threadId: string; -}; - -export type WorkflowCompleteEvent = { - workflowId: string; - error?: any; // hmm maybe not - threadId: string; -}; - -export type WorkflowErrorEvent = { - workflowId: string; - threadId: string; - message: string; -}; - // this is basically a fake adaptor // these functions will be injected into scope const helpers = { @@ -76,6 +59,7 @@ async function createMock() { const execute = async ( xplan: ExecutionPlan, + input: State, options: { resolvers?: engine.Resolvers; throw?: boolean } = { resolvers: mockResolvers, } @@ -137,7 +121,7 @@ async function createMock() { dispatch('workflow-start', { workflowId: id, threadId: threadId }); try { - await run(xplan, undefined, opts as any); + await run(xplan, input, opts as any); dispatch('workflow-complete', { workflowId: id, threadId: threadId }); } catch (e: any) { dispatch('workflow-error', { diff --git a/packages/ws-worker/src/util/convert-run.ts b/packages/ws-worker/src/util/convert-run.ts index edb0ee6a8..d15ba35b3 100644 --- a/packages/ws-worker/src/util/convert-run.ts +++ b/packages/ws-worker/src/util/convert-run.ts @@ -10,7 +10,8 @@ import type { WorkflowOptions, Lazy, } from '@openfn/lexicon'; -import { Run, RunOptions, Edge } from '@openfn/lexicon/lightning'; +import { Run, Edge } from '@openfn/lexicon/lightning'; +import { ExecuteOptions } from '@openfn/engine-multi'; export const conditions: Record string | null> = { @@ -38,26 +39,26 @@ const mapTriggerEdgeCondition = (edge: Edge) => { return condition; }; -const mapOptions = ( - runOptions: RunOptions = {}, - workflowOptions: WorkflowOptions = {} -): WorkflowOptions => { - if (runOptions?.runTimeoutMs) { - workflowOptions.timeout = runOptions?.runTimeoutMs; - } - if (runOptions?.sanitize) { - workflowOptions.sanitize = runOptions?.sanitize; - } - return workflowOptions; -}; - export default ( run: Run -): { plan: ExecutionPlan; options: WorkflowOptions; input: Lazy } => { - const opts: WorkflowOptions = {}; +): { plan: ExecutionPlan; options: ExecuteOptions; input: Lazy } => { + // Some options get mapped straight through to the runtime's workflow options + // TODO or maybe not? Maybe they're all sent to the engine instead? + const runtimeOpts: Omit = {}; + + // But some need to get passed down into the engine's options + const engineOpts: ExecuteOptions = {}; + + if (run.options?.runTimeoutMs) { + engineOpts.runTimeoutMs = run.options.runTimeoutMs; + } + if (run.options?.sanitize) { + engineOpts.sanitize = run.options.sanitize; + } const plan: Partial = { id: run.id, + options: runtimeOpts, }; let initialState; @@ -66,7 +67,7 @@ export default ( } if (run.starting_node_id) { - opts.start = run.starting_node_id; + runtimeOpts.start = run.starting_node_id; } const nodes: Record = {}; @@ -155,7 +156,7 @@ export default ( return { plan: plan as ExecutionPlan, - options: mapOptions(run.options, opts), + options: engineOpts, input: initialState || {}, }; }; diff --git a/packages/ws-worker/test/api/execute.test.ts b/packages/ws-worker/test/api/execute.test.ts index 3d23375ba..69b59e3c0 100644 --- a/packages/ws-worker/test/api/execute.test.ts +++ b/packages/ws-worker/test/api/execute.test.ts @@ -275,17 +275,20 @@ test('execute should pass the final result to onFinish', async (t) => { const plan = { id: 'a', - jobs: [ - { - expression: 'fn(() => ({ done: true }))', - }, - ], + workflow: { + steps: [ + { + expression: 'fn(() => ({ done: true }))', + }, + ], + }, }; const options = {}; + const input = {}; return new Promise((done) => { - execute(channel, engine, logger, plan, options, (result) => { + execute(channel, engine, logger, plan, input, options, (result) => { t.deepEqual(result.state, { done: true }); done(); }); diff --git a/packages/ws-worker/test/channels/run.test.ts b/packages/ws-worker/test/channels/run.test.ts index e5e580bb7..f66dc7a3f 100644 --- a/packages/ws-worker/test/channels/run.test.ts +++ b/packages/ws-worker/test/channels/run.test.ts @@ -36,14 +36,16 @@ test('loadRun should return an execution plan and options', async (t) => { const { plan, options } = await loadRun(channel); t.like(plan, { id: 'run-1', - jobs: [ - { - id: 'job-1', - configuration: 'a', - expression: 'fn(a => a)', - adaptor: '@openfn/language-common@1.0.0', - }, - ], + workflow: { + steps: [ + { + id: 'job-1', + configuration: 'a', + expression: 'fn(a => a)', + adaptor: '@openfn/language-common@1.0.0', + }, + ], + }, }); t.is(options.sanitize, 'obfuscate'); t.is(options.runTimeoutMs, 10); @@ -70,7 +72,7 @@ test('should join an run channel with a token', async (t) => { ); t.truthy(channel); - t.deepEqual(plan, { id: 'a', jobs: [] }); + t.deepEqual(plan, { id: 'a', workflow: { steps: [] }, options: {} }); t.deepEqual(options, { runTimeoutMs: 10 }); }); diff --git a/packages/ws-worker/test/events/run-complete.test.ts b/packages/ws-worker/test/events/run-complete.test.ts index 8ca730082..957ef4454 100644 --- a/packages/ws-worker/test/events/run-complete.test.ts +++ b/packages/ws-worker/test/events/run-complete.test.ts @@ -4,10 +4,11 @@ import handleRunComplete from '../../src/events/run-complete'; import { mockChannel } from '../../src/mock/sockets'; import { RUN_COMPLETE, RUN_LOG } from '../../src/events'; import { createRunState } from '../../src/util'; +import { createPlan } from '../util'; test('should send an run:complete event', async (t) => { const result = { answer: 42 }; - const plan = { id: 'run-1', jobs: [] }; + const plan = createPlan(); const state = createRunState(plan); state.dataclips = { @@ -30,7 +31,7 @@ test('should send an run:complete event', async (t) => { test('should call onFinish with final dataclip', async (t) => { const result = { answer: 42 }; - const plan = { id: 'run-1', jobs: [] }; + const plan = createPlan(); const state = createRunState(plan); state.dataclips = { @@ -58,7 +59,7 @@ test('should call onFinish with final dataclip', async (t) => { test('should send a reason log and return reason for success', async (t) => { const result = { answer: 42 }; - const plan = { id: 'run-1', jobs: [] }; + const plan = createPlan(); const state = createRunState(plan); state.dataclips = { @@ -98,7 +99,7 @@ test('should send a reason log and return reason for success', async (t) => { test('should send a reason log and return reason for fail', async (t) => { const result = { answer: 42 }; - const plan = { id: 'run-1', jobs: [{ id: 'x' }] }; + const plan = createPlan({ id: 'x', expression: '.' }); const state = createRunState(plan); state.dataclips = { diff --git a/packages/ws-worker/test/events/step-complete.test.ts b/packages/ws-worker/test/events/step-complete.test.ts index f12d1a47a..6effc4a44 100644 --- a/packages/ws-worker/test/events/step-complete.test.ts +++ b/packages/ws-worker/test/events/step-complete.test.ts @@ -5,10 +5,10 @@ import { mockChannel } from '../../src/mock/sockets'; import { createRunState } from '../../src/util'; import { STEP_COMPLETE } from '../../src/events'; -import type { ExecutionPlan } from '@openfn/runtime'; +import { createPlan } from '../util'; test('clear the step id and active job on state', async (t) => { - const plan = { id: 'run-1' }; + const plan = createPlan(); const jobId = 'job-1'; const state = createRunState(plan); @@ -28,7 +28,7 @@ test('clear the step id and active job on state', async (t) => { test('setup input mappings on on state', async (t) => { let lightningEvent; - const plan = { id: 'run-1' }; + const plan = createPlan(); const jobId = 'job-1'; const state = createRunState(plan); @@ -50,7 +50,7 @@ test('setup input mappings on on state', async (t) => { }); test('save the dataclip to state', async (t) => { - const plan = { id: 'run-1' } as ExecutionPlan; + const plan = createPlan(); const jobId = 'job-1'; const state = createRunState(plan); @@ -70,7 +70,7 @@ test('save the dataclip to state', async (t) => { }); test('write a reason to state', async (t) => { - const plan = { id: 'run-1' } as ExecutionPlan; + const plan = createPlan(); const jobId = 'job-1'; const state = createRunState(plan); @@ -95,7 +95,7 @@ test('write a reason to state', async (t) => { }); test('generate an exit reason: success', async (t) => { - const plan = { id: 'run-1' } as ExecutionPlan; + const plan = createPlan(); const jobId = 'job-1'; const state = createRunState(plan); @@ -119,7 +119,7 @@ test('generate an exit reason: success', async (t) => { }); test('send a step:complete event', async (t) => { - const plan = { id: 'run-1' }; + const plan = createPlan(); const jobId = 'job-1'; const result = { x: 10 }; diff --git a/packages/ws-worker/test/events/step-start.test.ts b/packages/ws-worker/test/events/step-start.test.ts index 9cebbdc94..85f2d869e 100644 --- a/packages/ws-worker/test/events/step-start.test.ts +++ b/packages/ws-worker/test/events/step-start.test.ts @@ -10,7 +10,7 @@ import { RUN_LOG, STEP_START } from '../../src/events'; import pkg from '../../package.json' assert { type: 'json' }; test('set a step id and active job on state', async (t) => { - const plan = { id: 'run-1', jobs: [{ id: 'job-1' }] }; + const plan = { id: 'run-1', workflow: { steps: [{ id: 'job-1' }] } }; const jobId = 'job-1'; const state = createRunState(plan); @@ -29,22 +29,25 @@ test('set a step id and active job on state', async (t) => { test('send a step:start event', async (t) => { const plan = { id: 'run-1', - initialState: 'abc', - jobs: [ - { id: 'job-1', expression: '.' }, - { id: 'job-2', expression: '.' }, - ], + workflow: { + steps: [ + { id: 'job-1', expression: '.' }, + { id: 'job-2', expression: '.' }, + ], + }, + options: {}, }; + const input = 'abc'; const jobId = 'job-1'; - const state = createRunState(plan); + const state = createRunState(plan, input); state.activeJob = jobId; state.activeStep = 'b'; const channel = mockChannel({ [STEP_START]: (evt) => { t.is(evt.job_id, jobId); - t.is(evt.input_dataclip_id, plan.initialState); + t.is(evt.input_dataclip_id, input); t.truthy(evt.step_id); return true; }, @@ -57,9 +60,12 @@ test('send a step:start event', async (t) => { test('step:start event should include versions', async (t) => { const plan = { id: 'run-1', - initialState: 'abc', - jobs: [{ id: 'job-1', expression: '.' }], + workflow: { + steps: [{ id: 'job-1', expression: '.' }], + }, + options: {}, }; + const input = 'abc'; const jobId = 'job-1'; const versions = { @@ -76,7 +82,7 @@ test('step:start event should include versions', async (t) => { versions, }; - const state = createRunState(plan); + const state = createRunState(plan, input); state.activeJob = jobId; state.activeStep = 'b'; @@ -98,9 +104,12 @@ test('also logs the version number', async (t) => { let logEvent; const plan = { id: 'run-1', - initialState: 'abc', - jobs: [{ id: 'job-1', expression: '.' }], + workflow: { + steps: [{ id: 'job-1', expression: '.' }], + }, + options: {}, }; + const input = 'abc'; const jobId = 'job-1'; const versions = { @@ -117,7 +126,7 @@ test('also logs the version number', async (t) => { versions, }; - const state = createRunState(plan); + const state = createRunState(plan, input); state.activeJob = jobId; state.activeStep = 'b'; diff --git a/packages/ws-worker/test/lightning.test.ts b/packages/ws-worker/test/lightning.test.ts index 337e1847a..d2cb941b8 100644 --- a/packages/ws-worker/test/lightning.test.ts +++ b/packages/ws-worker/test/lightning.test.ts @@ -88,7 +88,7 @@ test.serial( ); test.serial( - 'should run an run which returns an expression as JSON', + 'should run a run which returns an expression as JSON', async (t) => { return new Promise((done) => { const run = { @@ -110,7 +110,7 @@ test.serial( } ); -test.serial('should run an run which returns intial state', async (t) => { +test.serial('should run a run which returns initial state', async (t) => { return new Promise((done) => { lng.addDataclip('x', { data: 66, @@ -160,29 +160,26 @@ test.todo(`events: lightning should receive a ${e.RUN_START} event`); // for each event we can see a copy of the server state // (if that helps anything?) -test.serial( - `events: lightning should receive a ${e.GET_PLAN} event`, - (t) => { - return new Promise((done) => { - const run = getRun(); - - let didCallEvent = false; - lng.onSocketEvent(e.GET_PLAN, run.id, ({ payload }) => { - // This doesn't test that the correct run gets sent back - // We'd have to add an event to the engine for that - // (not a bad idea) - didCallEvent = true; - }); +test.serial(`events: lightning should receive a ${e.GET_PLAN} event`, (t) => { + return new Promise((done) => { + const run = getRun(); - lng.onSocketEvent(e.RUN_COMPLETE, run.id, (evt) => { - t.true(didCallEvent); - done(); - }); + let didCallEvent = false; + lng.onSocketEvent(e.GET_PLAN, run.id, ({ payload }) => { + // This doesn't test that the correct run gets sent back + // We'd have to add an event to the engine for that + // (not a bad idea) + didCallEvent = true; + }); - lng.enqueueRun(run); + lng.onSocketEvent(e.RUN_COMPLETE, run.id, (evt) => { + t.true(didCallEvent); + done(); }); - } -); + + lng.enqueueRun(run); + }); +}); test.serial( `events: lightning should receive a ${e.GET_CREDENTIAL} event`, @@ -313,37 +310,34 @@ test.serial( } ); -test.serial( - `events: lightning should receive a ${e.RUN_LOG} event`, - (t) => { - return new Promise((done) => { - const run = { - id: 'run-1', - jobs: [ - { - body: 'fn((s) => { console.log("x"); return s })', - }, - ], - }; - - lng.onSocketEvent(e.RUN_LOG, run.id, ({ payload }) => { - const log = payload; +test.serial(`events: lightning should receive a ${e.RUN_LOG} event`, (t) => { + return new Promise((done) => { + const run = { + id: 'run-1', + jobs: [ + { + body: 'fn((s) => { console.log("x"); return s })', + }, + ], + }; - t.is(log.level, 'info'); - t.truthy(log.run_id); - t.truthy(log.step_id); - t.truthy(log.message); - t.deepEqual(log.message, ['x']); - }); + lng.onSocketEvent(e.RUN_LOG, run.id, ({ payload }) => { + const log = payload; - lng.onSocketEvent(e.RUN_COMPLETE, run.id, (evt) => { - done(); - }); + t.is(log.level, 'info'); + t.truthy(log.run_id); + t.truthy(log.step_id); + t.truthy(log.message); + t.deepEqual(log.message, ['x']); + }); - lng.enqueueRun(run); + lng.onSocketEvent(e.RUN_COMPLETE, run.id, (evt) => { + done(); }); - } -); + + lng.enqueueRun(run); + }); +}); // Skipping because this is flaky at microsecond resolution // See branch hrtime-send-nanoseconds-to-lightning where this should be more robust @@ -417,37 +411,34 @@ test.serial( } ); -test.serial( - 'should register and de-register runs to the server', - async (t) => { - return new Promise((done) => { - const run = { - id: 'run-1', - jobs: [ - { - body: 'fn(() => ({ count: 122 }))', - }, - ], - }; - - worker.on(e.RUN_START, () => { - t.truthy(worker.workflows[run.id]); - }); +test.serial('should register and de-register runs to the server', async (t) => { + return new Promise((done) => { + const run = { + id: 'run-1', + jobs: [ + { + body: 'fn(() => ({ count: 122 }))', + }, + ], + }; - lng.onSocketEvent(e.RUN_COMPLETE, run.id, (evt) => { - t.truthy(worker.workflows[run.id]); - // Tidyup is done AFTER lightning receives the event - // This timeout is crude but should work - setTimeout(() => { - t.falsy(worker.workflows[run.id]); - done(); - }, 10); - }); + worker.on(e.RUN_START, () => { + t.truthy(worker.workflows[run.id]); + }); - lng.enqueueRun(run); + lng.onSocketEvent(e.RUN_COMPLETE, run.id, (evt) => { + t.truthy(worker.workflows[run.id]); + // Tidyup is done AFTER lightning receives the event + // This timeout is crude but should work + setTimeout(() => { + t.falsy(worker.workflows[run.id]); + done(); + }, 10); }); - } -); + + lng.enqueueRun(run); + }); +}); // TODO this is a server test // What I am testing here is that the first job completes diff --git a/packages/ws-worker/test/mock/runtime-engine.test.ts b/packages/ws-worker/test/mock/runtime-engine.test.ts index bfb9eba63..d9e7bc8c5 100644 --- a/packages/ws-worker/test/mock/runtime-engine.test.ts +++ b/packages/ws-worker/test/mock/runtime-engine.test.ts @@ -1,22 +1,26 @@ import test from 'ava'; -import create, { +import type { ExecutionPlan } from '@openfn/lexicon'; + +import type { JobCompleteEvent, JobStartEvent, WorkflowCompleteEvent, WorkflowStartEvent, -} from '../../src/mock/runtime-engine'; -import type { ExecutionPlan } from '@openfn/runtime'; -import { waitForEvent, clone } from '../util'; +} from '@openfn/engine-multi'; +import create from '../../src/mock/runtime-engine'; +import { waitForEvent, clone, createPlan } from '../util'; const sampleWorkflow = { id: 'w1', - jobs: [ - { - id: 'j1', - adaptor: 'common@1.0.0', - expression: 'fn(() => ({ data: { x: 10 } }))', - }, - ], + workflow: { + steps: [ + { + id: 'j1', + adaptor: 'common@1.0.0', + expression: 'fn(() => ({ data: { x: 10 } }))', + }, + ], + }, } as ExecutionPlan; let engine; @@ -25,20 +29,20 @@ test.before(async () => { engine = await create(); }); -test('getStatus() should should have no active workflows', async (t) => { +test.serial('getStatus() should should have no active workflows', async (t) => { const { active } = engine.getStatus(); t.is(active, 0); }); -test('Dispatch start events for a new workflow', async (t) => { +test.serial('Dispatch start events for a new workflow', async (t) => { engine.execute(sampleWorkflow); const evt = await waitForEvent(engine, 'workflow-start'); t.truthy(evt); t.is(evt.workflowId, 'w1'); }); -test('getStatus should report one active workflow', async (t) => { +test.serial('getStatus should report one active workflow', async (t) => { engine.execute(sampleWorkflow); const { active } = engine.getStatus(); @@ -46,7 +50,7 @@ test('getStatus should report one active workflow', async (t) => { t.is(active, 1); }); -test('Dispatch complete events when a workflow completes', async (t) => { +test.serial('Dispatch complete events when a workflow completes', async (t) => { engine.execute(sampleWorkflow); const evt = await waitForEvent( engine, @@ -57,7 +61,7 @@ test('Dispatch complete events when a workflow completes', async (t) => { t.truthy(evt.threadId); }); -test('Dispatch start events for a job', async (t) => { +test.serial('Dispatch start events for a job', async (t) => { engine.execute(sampleWorkflow); const evt = await waitForEvent(engine, 'job-start'); t.truthy(evt); @@ -65,7 +69,7 @@ test('Dispatch start events for a job', async (t) => { t.is(evt.jobId, 'j1'); }); -test('Dispatch complete events for a job', async (t) => { +test.serial('Dispatch complete events for a job', async (t) => { engine.execute(sampleWorkflow); const evt = await waitForEvent(engine, 'job-complete'); t.truthy(evt); @@ -74,36 +78,26 @@ test('Dispatch complete events for a job', async (t) => { t.deepEqual(evt.state, { data: { x: 10 } }); }); -test('Dispatch error event for a crash', async (t) => { - const wf = { - id: 'xyz', - jobs: [ - { - id: 'j1', - adaptor: 'common@1.0.0', - expression: 'fn(() => ( @~!"@£!4 )', - }, - ], - } as ExecutionPlan; +test.serial('Dispatch error event for a crash', async (t) => { + const wf = createPlan({ + id: 'j1', + adaptor: 'common@1.0.0', + expression: 'fn(() => ( @~!"@£!4 )', + }); engine.execute(wf); const evt = await waitForEvent(engine, 'workflow-error'); - t.is(evt.workflowId, 'xyz'); + t.is(evt.workflowId, wf.id); t.is(evt.type, 'RuntimeCrash'); t.regex(evt.message, /invalid or unexpected token/i); }); -test('wait function', async (t) => { - const wf = { - id: 'w1', - jobs: [ - { - id: 'j1', - expression: 'wait(100)', - }, - ], - } as ExecutionPlan; +test.serial('wait function', async (t) => { + const wf = createPlan({ + id: 'j1', + expression: 'wait(100)', + }); engine.execute(wf); const start = Date.now(); @@ -113,24 +107,28 @@ test('wait function', async (t) => { t.true(end > 90); }); -test('resolve credential before job-start if credential is a string', async (t) => { - const wf = clone(sampleWorkflow); - wf.jobs[0].configuration = 'x'; +test.serial( + 'resolve credential before job-start if credential is a string', + async (t) => { + const wf = clone(sampleWorkflow); + wf.id = t.title; + wf.workflow.steps[0].configuration = 'x'; - let didCallCredentials; - const credential = async (_id) => { - didCallCredentials = true; - return {}; - }; + let didCallCredentials; + const credential = async (_id) => { + didCallCredentials = true; + return {}; + }; - // @ts-ignore - engine.execute(wf, { resolvers: { credential } }); + // @ts-ignore + engine.execute(wf, {}, { resolvers: { credential } }); - await waitForEvent(engine, 'job-start'); - t.true(didCallCredentials); -}); + await waitForEvent(engine, 'job-start'); + t.true(didCallCredentials); + } +); -test('listen to events', async (t) => { +test.serial('listen to events', async (t) => { const called = { 'job-start': false, 'job-complete': false, @@ -139,27 +137,22 @@ test('listen to events', async (t) => { 'workflow-complete': false, }; - const wf = { - id: 'wibble', - jobs: [ - { - id: 'j1', - adaptor: 'common@1.0.0', - expression: 'export default [() => { console.log("x"); }]', - }, - ], - } as ExecutionPlan; + const wf = createPlan({ + id: 'j1', + adaptor: 'common@1.0.0', + expression: 'export default [() => { console.log("x"); }]', + }); engine.listen(wf.id, { 'job-start': ({ workflowId, jobId }) => { called['job-start'] = true; t.is(workflowId, wf.id); - t.is(jobId, wf.jobs[0].id); + t.is(jobId, wf.workflow.steps[0].id); }, 'job-complete': ({ workflowId, jobId }) => { called['job-complete'] = true; t.is(workflowId, wf.id); - t.is(jobId, wf.jobs[0].id); + t.is(jobId, wf.workflow.steps[0].id); // TODO includes state? }, 'workflow-log': ({ workflowId, message }) => { @@ -182,7 +175,7 @@ test('listen to events', async (t) => { t.assert(Object.values(called).every((v) => v === true)); }); -test('only listen to events for the correct workflow', async (t) => { +test.serial('only listen to events for the correct workflow', async (t) => { engine.listen('bobby mcgee', { 'workflow-start': ({ workflowId }) => { throw new Error('should not have called this!!'); @@ -194,9 +187,10 @@ test('only listen to events for the correct workflow', async (t) => { t.pass(); }); -test('log events should stringify a string message', async (t) => { +test.serial('log events should stringify a string message', async (t) => { const wf = clone(sampleWorkflow); - wf.jobs[0].expression = + wf.id = t.title; + wf.workflow.steps[0].expression = 'fn((s) => {console.log("haul away joe"); return s; })'; engine.listen(wf.id, { @@ -211,9 +205,11 @@ test('log events should stringify a string message', async (t) => { await waitForEvent(engine, 'workflow-complete'); }); -test('log events should stringify an object message', async (t) => { +test.serial('log events should stringify an object message', async (t) => { const wf = clone(sampleWorkflow); - wf.jobs[0].expression = 'fn((s) => {console.log({ x: 22 }); return s; })'; + wf.id = t.title; + wf.workflow.steps[0].expression = + 'fn((s) => {console.log({ x: 22 }); return s; })'; engine.listen(wf.id, { 'workflow-log': ({ message }) => { @@ -227,50 +223,48 @@ test('log events should stringify an object message', async (t) => { await waitForEvent(engine, 'workflow-complete'); }); -test('do nothing for a job if no expression and adaptor (trigger node)', async (t) => { - const workflow = { - id: 'w1', - jobs: [ - { - id: 'j1', - adaptor: '@openfn/language-common@1.0.0', - }, - ], - } as ExecutionPlan; +test.serial( + 'do nothing for a job if no expression and adaptor (trigger node)', + async (t) => { + // @ts-ignore + const workflow = createPlan({ + id: 'j1', + adaptor: '@openfn/language-common@1.0.0', + }); - let didCallEvent = false; + let didCallEvent = false; - engine.listen(workflow.id, { - 'job-start': () => { - didCallEvent = true; - }, - 'job-complete': () => { - didCallEvent = true; - }, - 'workflow-log': () => { - // this can be called - }, - 'workflow-start': () => { - // ditto - }, - 'workflow-complete': () => { - // ditto - }, - }); + engine.listen(workflow.id, { + 'job-start': () => { + didCallEvent = true; + }, + 'job-complete': () => { + didCallEvent = true; + }, + 'workflow-log': () => { + // this can be called + }, + 'workflow-start': () => { + // ditto + }, + 'workflow-complete': () => { + // ditto + }, + }); - engine.execute(workflow); - await waitForEvent(engine, 'workflow-complete'); + engine.execute(workflow); + await waitForEvent(engine, 'workflow-complete'); - t.false(didCallEvent); -}); + t.false(didCallEvent); + } +); -test('timeout', async (t) => { +test.skip('timeout', async (t) => { const wf = clone(sampleWorkflow); - wf.jobs[0].expression = 'wait(1000)'; - // wf.options = { timeout: 10 }; + wf.workflow.steps[0].expression = 'wait(1000)'; // @ts-ignore - engine.execute(wf, { timeout: 10 }); + engine.execute(wf, {}, { timeout: 10 }); const evt = await waitForEvent( engine, diff --git a/packages/ws-worker/test/reasons.test.ts b/packages/ws-worker/test/reasons.test.ts index 3ceecfb1a..b392e1caa 100644 --- a/packages/ws-worker/test/reasons.test.ts +++ b/packages/ws-worker/test/reasons.test.ts @@ -1,11 +1,11 @@ import test from 'ava'; import createRTE from '@openfn/engine-multi'; import { createMockLogger } from '@openfn/logger'; +import type { ExitReason } from '@openfn/lexicon/lightning'; import { createPlan } from './util'; import { execute as doExecute } from '../src/api/execute'; import { mockChannel } from '../src/mock/sockets'; - import { STEP_START, STEP_COMPLETE, @@ -13,7 +13,6 @@ import { RUN_START, RUN_COMPLETE, } from '../src/events'; -import { ExitReason } from '../src/types'; let engine; let logger; @@ -39,7 +38,7 @@ test.before(async () => { test.after(async () => engine.destroy()); // Wrap up an execute call, capture the on complete state -const execute = async (plan, options = {}) => +const execute = async (plan, input = {}, options = {}) => new Promise<{ reason: ExitReason; state: any }>((done) => { // Ignore all channel events // In these test we assume that the correct messages are sent to the channel @@ -55,8 +54,7 @@ const execute = async (plan, options = {}) => done(result); }; - // @ts-ignore - doExecute(channel, engine, logger, plan, options, onFinish); + doExecute(channel, engine, logger, plan, input, options, onFinish); }); test('success', async (t) => { @@ -65,9 +63,9 @@ test('success', async (t) => { expression: '(s) => s', }); - plan.initialState = { data: { result: 42 } }; + const input = { data: { result: 42 } }; - const { reason } = await execute(plan); + const { reason } = await execute(plan, input); t.is(reason.reason, 'success'); }); @@ -165,10 +163,11 @@ test('fail: error in the first job, with downstream job that is not run', async { id: 'a', expression: 'export default [(s) => {throw "abort!"}]', - next: { b: true }, + next: { b: '!state.errors' }, }, { id: 'b', + expression: 'export default [(s) => s]', } ); @@ -231,7 +230,7 @@ test('kill: timeout', async (t) => { runTimeoutMs: 100, }; - const { reason } = await execute(plan, options); + const { reason } = await execute(plan, {}, options); t.is(reason.reason, 'kill'); t.is(reason.error_type, 'TimeoutError'); t.is(reason.error_message, 'Workflow failed to return within 100ms'); diff --git a/packages/ws-worker/test/util.ts b/packages/ws-worker/test/util.ts index fe663d009..aea7ffae7 100644 --- a/packages/ws-worker/test/util.ts +++ b/packages/ws-worker/test/util.ts @@ -1,4 +1,4 @@ -import { ExecutionPlan } from '@openfn/runtime'; +import { ExecutionPlan, Job } from '@openfn/lexicon'; import crypto from 'node:crypto'; export const wait = (fn, maxRuns = 100) => @@ -33,10 +33,13 @@ export const sleep = (delay = 100) => setTimeout(resolve, delay); }); -export const createPlan = (...jobs) => +export const createPlan = (...steps: Job[]) => ({ id: crypto.randomUUID(), - jobs: [...jobs], + workflow: { + steps, + }, + options: {}, } as ExecutionPlan); export const createEdge = (from: string, to: string) => ({ diff --git a/packages/ws-worker/test/util/convert-run.test.ts b/packages/ws-worker/test/util/convert-run.test.ts index ac042de93..887b4d622 100644 --- a/packages/ws-worker/test/util/convert-run.test.ts +++ b/packages/ws-worker/test/util/convert-run.test.ts @@ -58,6 +58,7 @@ test('convert a single job', (t) => { t.deepEqual(plan, { id: 'w', + options: {}, workflow: { steps: [createJob()], }, @@ -76,6 +77,7 @@ test('convert a single job with names', (t) => { t.deepEqual(plan, { id: 'w', + options: {}, workflow: { name: 'my-workflow', steps: [createJob({ name: 'my-job' })], @@ -98,12 +100,13 @@ test('convert a single job with options', (t) => { t.deepEqual(plan, { id: 'w', + options: {}, workflow: { steps: [createJob()], }, }); t.deepEqual(options, { - timeout: 10, + runTimeoutMs: 10, sanitize: 'obfuscate', }); }); @@ -121,6 +124,7 @@ test('convert a single job with data', (t) => { t.deepEqual(plan, { id: 'w', + options: {}, workflow: { steps: [createJob({ state: { data: { x: 22 } } })], }, @@ -136,6 +140,7 @@ test('Accept a partial run object', (t) => { t.deepEqual(plan, { id: 'w', + options: {}, workflow: { steps: [], }, @@ -158,9 +163,9 @@ test('handle starting_node_id as options', (t) => { id: 'w', starting_node_id: 'j1', }; - const { options } = convertRun(run as Run); + const { plan } = convertRun(run as Run); - t.deepEqual(options, { + t.deepEqual(plan.options, { start: 'j1', }); }); @@ -176,6 +181,7 @@ test('convert a single trigger', (t) => { t.deepEqual(plan, { id: 'w', + options: {}, workflow: { steps: [ { @@ -198,6 +204,7 @@ test('ignore a single edge', (t) => { t.deepEqual(plan, { id: 'w', + options: {}, workflow: { steps: [], }, @@ -221,6 +228,7 @@ test('convert a single trigger with an edge', (t) => { t.deepEqual(plan, { id: 'w', + options: {}, workflow: { steps: [ { @@ -257,6 +265,7 @@ test('convert a single trigger with two edges', (t) => { t.deepEqual(plan, { id: 'w', + options: {}, workflow: { steps: [ { @@ -291,6 +300,7 @@ test('convert a disabled trigger', (t) => { t.deepEqual(plan, { id: 'w', + options: {}, workflow: { steps: [ { @@ -314,6 +324,7 @@ test('convert two linked jobs', (t) => { t.deepEqual(plan, { id: 'w', + options: {}, workflow: { steps: [ createJob({ id: 'a', next: { b: true } }), @@ -339,6 +350,7 @@ test('convert a job with two upstream jobs', (t) => { t.deepEqual(plan, { id: 'w', + options: {}, workflow: { steps: [ createJob({ id: 'a', next: { x: true } }), @@ -361,6 +373,7 @@ test('convert two linked jobs with an edge condition', (t) => { t.deepEqual(plan, { id: 'w', + options: {}, workflow: { steps: [ createJob({ id: 'a', next: { b: { condition } } }), @@ -381,6 +394,7 @@ test('convert two linked jobs with a disabled edge', (t) => { t.deepEqual(plan, { id: 'w', + options: {}, workflow: { steps: [ createJob({ id: 'a', next: { b: { disabled: true } } }), From 487c172009c3d8d2afb3f6569a07c1ed3acc431c Mon Sep 17 00:00:00 2001 From: Joe Clark Date: Wed, 7 Feb 2024 17:53:50 +0000 Subject: [PATCH 068/128] engine: types --- packages/engine-multi/src/engine.ts | 12 +++--------- packages/engine-multi/src/types.ts | 8 ++++++++ packages/engine-multi/test/api/execute.test.ts | 8 ++++++-- packages/engine-multi/test/engine.test.ts | 3 ++- 4 files changed, 19 insertions(+), 12 deletions(-) diff --git a/packages/engine-multi/src/engine.ts b/packages/engine-multi/src/engine.ts index 6214c969d..991501997 100644 --- a/packages/engine-multi/src/engine.ts +++ b/packages/engine-multi/src/engine.ts @@ -2,7 +2,7 @@ import { EventEmitter } from 'node:events'; import path from 'node:path'; import { fileURLToPath } from 'node:url'; import type { ExecutionPlan, State } from '@openfn/lexicon'; -import type { Logger, SanitizePolicies } from '@openfn/logger'; +import type { Logger } from '@openfn/logger'; import { JOB_COMPLETE, @@ -21,6 +21,7 @@ import type { LazyResolvers } from './api'; import type { EngineAPI, EventHandler, + ExecuteOptions, RuntimeEngine, WorkflowState, } from './types'; @@ -75,18 +76,11 @@ export type EngineOptions = { repoDir: string; resolvers?: LazyResolvers; runtimelogger?: Logger; - runTimeoutMs?: number; + runTimeoutMs?: number; // default timeout statePropsToRemove?: string[]; whitelist?: RegExp[]; }; -export type ExecuteOptions = { - memoryLimitMb?: number; - resolvers?: LazyResolvers; - runTimeoutMs?: number; - sanitize?: SanitizePolicies; -}; - export type InternalEngine = RuntimeEngine & { // TODONot a very good type definition, but it calms the tests down [other: string]: any; diff --git a/packages/engine-multi/src/types.ts b/packages/engine-multi/src/types.ts index 082443885..bc69b7445 100644 --- a/packages/engine-multi/src/types.ts +++ b/packages/engine-multi/src/types.ts @@ -4,6 +4,7 @@ import type { EventEmitter } from 'node:events'; import type { EngineOptions } from './engine'; import type { ExecOpts } from './worker/pool'; +import { LazyResolvers } from './api'; export type Resolver = (id: string) => Promise; @@ -43,6 +44,13 @@ export type ExecutionContextConstructor = { options: ExecutionContextOptions; }; +export type ExecuteOptions = { + memoryLimitMb?: number; + resolvers?: LazyResolvers; + runTimeoutMs?: number; + sanitize?: SanitizePolicies; +}; + export type ExecutionContextOptions = EngineOptions & { sanitize?: SanitizePolicies; }; diff --git a/packages/engine-multi/test/api/execute.test.ts b/packages/engine-multi/test/api/execute.test.ts index e1a1bfe57..9b46e2a74 100644 --- a/packages/engine-multi/test/api/execute.test.ts +++ b/packages/engine-multi/test/api/execute.test.ts @@ -14,8 +14,12 @@ import { } from '../../src/events'; import ExecutionContext from '../../src/classes/ExecutionContext'; -import type { ExecutionContextOptions, WorkflowState } from '../../src/types'; -import type { ExecuteOptions, EngineOptions } from '../../src/engine'; +import type { + ExecuteOptions, + ExecutionContextOptions, + WorkflowState, +} from '../../src/types'; +import type { EngineOptions } from '../../src/engine'; const workerPath = path.resolve('dist/test/mock-run.js'); diff --git a/packages/engine-multi/test/engine.test.ts b/packages/engine-multi/test/engine.test.ts index f7b7757a4..d93c85f62 100644 --- a/packages/engine-multi/test/engine.test.ts +++ b/packages/engine-multi/test/engine.test.ts @@ -3,8 +3,9 @@ import path from 'node:path'; import { createMockLogger } from '@openfn/logger'; import type { ExecutionPlan } from '@openfn/lexicon'; -import createEngine, { ExecuteOptions, InternalEngine } from '../src/engine'; +import createEngine, { InternalEngine } from '../src/engine'; import * as e from '../src/events'; +import type { ExecuteOptions } from '../src/types'; const logger = createMockLogger('', { level: 'debug' }); From 93869197c127593392294f5d40906df26c7bb210 Mon Sep 17 00:00:00 2001 From: Joe Clark Date: Thu, 8 Feb 2024 09:35:04 +0000 Subject: [PATCH 069/128] worker: fix cheeky test somehow missed it last time --- packages/ws-worker/test/api/execute.test.ts | 15 +++++++++------ 1 file changed, 9 insertions(+), 6 deletions(-) diff --git a/packages/ws-worker/test/api/execute.test.ts b/packages/ws-worker/test/api/execute.test.ts index 69b59e3c0..07e51f957 100644 --- a/packages/ws-worker/test/api/execute.test.ts +++ b/packages/ws-worker/test/api/execute.test.ts @@ -302,14 +302,17 @@ test('execute should return a context object', async (t) => { const plan = { id: 'a', - jobs: [ - { - expression: 'fn(() => ({ done: true }))', - }, - ], + workflow: { + steps: [ + { + expression: 'fn(() => ({ done: true }))', + }, + ], + }, }; const options = {}; + const input = {}; return new Promise((done) => { const context = execute( @@ -317,13 +320,13 @@ test('execute should return a context object', async (t) => { engine, logger, plan, + input, options, (result) => { done(); } ); t.truthy(context.state); - t.deepEqual(context.state.options, options); t.deepEqual(context.channel, channel); t.deepEqual(context.logger, logger); }); From 6e2db1b18fabc98a1a83858991e8361a444b6b2a Mon Sep 17 00:00:00 2001 From: Joe Clark Date: Thu, 8 Feb 2024 09:43:05 +0000 Subject: [PATCH 070/128] tests: fix cli tests --- integration-tests/cli/test/cli.test.ts | 2 +- .../cli/test/execute-workflow.test.ts | 32 +----------- .../cli/test/fixtures/circular.json | 26 +++++----- .../test/fixtures/invalid-config-path.json | 14 +++--- .../cli/test/fixtures/invalid-exp-path.json | 12 +++-- .../cli/test/fixtures/invalid-start.json | 18 ++++--- .../cli/test/fixtures/invalid-syntax.json | 12 +++-- .../cli/test/fixtures/multiple-inputs.json | 34 +++++++------ .../cli/test/fixtures/wf-array.json | 40 ++++++++------- .../cli/test/fixtures/wf-conditional.json | 50 ++++++++++--------- .../cli/test/fixtures/wf-count.json | 26 +++++----- .../cli/test/fixtures/wf-errors.json | 44 ++++++++-------- .../cli/test/fixtures/wf-simple.json | 14 +++--- .../cli/test/fixtures/wf-strict.json | 30 +++++------ 14 files changed, 175 insertions(+), 179 deletions(-) diff --git a/integration-tests/cli/test/cli.test.ts b/integration-tests/cli/test/cli.test.ts index 62ce56650..05778ff2d 100644 --- a/integration-tests/cli/test/cli.test.ts +++ b/integration-tests/cli/test/cli.test.ts @@ -14,7 +14,7 @@ test.serial('openfn version', async (t) => { test.serial('openfn test', async (t) => { const { stdout } = await run(t.title); t.regex(stdout, /Versions:/); - t.regex(stdout, /Running test expression/); + t.regex(stdout, /Running test workflow/); t.regex(stdout, /Result: 42/); }); diff --git a/integration-tests/cli/test/execute-workflow.test.ts b/integration-tests/cli/test/execute-workflow.test.ts index a2fa9230f..2162652e8 100644 --- a/integration-tests/cli/test/execute-workflow.test.ts +++ b/integration-tests/cli/test/execute-workflow.test.ts @@ -133,36 +133,6 @@ test.serial( } ); -test.serial(`openfn ${jobsPath}/wf-strict.json --strict`, async (t) => { - const { err } = await run(t.title); - t.falsy(err); - - const out = getJSON(); - t.deepEqual(out, { - data: { - name: 'jane', - }, - }); -}); - -test.serial(`openfn ${jobsPath}/wf-strict.json --no-strict`, async (t) => { - const { err } = await run(t.title); - t.falsy(err); - - const out = getJSON(); - t.deepEqual(out, { - x: 22, - data: { - name: 'jane', - }, - references: [ - { - name: 'bob', - }, - ], - }); -}); - test.serial( `openfn ${jobsPath}/wf-errors.json -S "{ \\"data\\": { \\"number\\": 2 } }"`, async (t) => { @@ -181,7 +151,7 @@ test.serial( test.serial( `openfn ${jobsPath}/wf-errors.json -S "{ \\"data\\": { \\"number\\": 32 } }"`, async (t) => { - const { err } = await run(t.title); + const { stdout, err } = await run(t.title); t.falsy(err); const out = getJSON(); diff --git a/integration-tests/cli/test/fixtures/circular.json b/integration-tests/cli/test/fixtures/circular.json index 2b3077d7a..d209b2a85 100644 --- a/integration-tests/cli/test/fixtures/circular.json +++ b/integration-tests/cli/test/fixtures/circular.json @@ -1,14 +1,16 @@ { - "jobs": [ - { - "id": "a", - "expression": "x", - "next": { "b": true } - }, - { - "id": "b", - "expression": "x", - "next": { "a": true } - } - ] + "workflow": { + "steps": [ + { + "id": "a", + "expression": "x", + "next": { "b": true } + }, + { + "id": "b", + "expression": "x", + "next": { "a": true } + } + ] + } } diff --git a/integration-tests/cli/test/fixtures/invalid-config-path.json b/integration-tests/cli/test/fixtures/invalid-config-path.json index 28e9ce4b1..1a343436e 100644 --- a/integration-tests/cli/test/fixtures/invalid-config-path.json +++ b/integration-tests/cli/test/fixtures/invalid-config-path.json @@ -1,8 +1,10 @@ { - "jobs": [ - { - "configuration": "does-not-exist.json", - "expression": "." - } - ] + "workflow": { + "steps": [ + { + "configuration": "does-not-exist.json", + "expression": "." + } + ] + } } diff --git a/integration-tests/cli/test/fixtures/invalid-exp-path.json b/integration-tests/cli/test/fixtures/invalid-exp-path.json index 7cff3440c..6ce8c42ac 100644 --- a/integration-tests/cli/test/fixtures/invalid-exp-path.json +++ b/integration-tests/cli/test/fixtures/invalid-exp-path.json @@ -1,7 +1,9 @@ { - "jobs": [ - { - "expression": "does-not-exist.js" - } - ] + "workflow": { + "steps": [ + { + "expression": "does-not-exist.js" + } + ] + } } diff --git a/integration-tests/cli/test/fixtures/invalid-start.json b/integration-tests/cli/test/fixtures/invalid-start.json index 6fc284da5..13f0f9ee1 100644 --- a/integration-tests/cli/test/fixtures/invalid-start.json +++ b/integration-tests/cli/test/fixtures/invalid-start.json @@ -1,9 +1,13 @@ { - "start": "nope", - "jobs": [ - { - "id": "x", - "expression": "fn((state) => state)" - } - ] + "options": { + "start": "nope" + }, + "workflow": { + "steps": [ + { + "id": "x", + "expression": "fn((state) => state)" + } + ] + } } diff --git a/integration-tests/cli/test/fixtures/invalid-syntax.json b/integration-tests/cli/test/fixtures/invalid-syntax.json index 7028961f2..651f73f93 100644 --- a/integration-tests/cli/test/fixtures/invalid-syntax.json +++ b/integration-tests/cli/test/fixtures/invalid-syntax.json @@ -1,7 +1,9 @@ { - "jobs": [ - { - "expression": "invalid.js" - } - ] + "workflow": { + "steps": [ + { + "expression": "invalid.js" + } + ] + } } diff --git a/integration-tests/cli/test/fixtures/multiple-inputs.json b/integration-tests/cli/test/fixtures/multiple-inputs.json index 25c28dd9b..59a33a755 100644 --- a/integration-tests/cli/test/fixtures/multiple-inputs.json +++ b/integration-tests/cli/test/fixtures/multiple-inputs.json @@ -1,18 +1,20 @@ { - "jobs": [ - { - "id": "a", - "expression": "x", - "next": { "b": true, "c": true } - }, - { - "id": "b", - "expression": "x", - "next": { "c": true } - }, - { - "id": "c", - "expression": "x" - } - ] + "workflow": { + "steps": [ + { + "id": "a", + "expression": "x", + "next": { "b": true, "c": true } + }, + { + "id": "b", + "expression": "x", + "next": { "c": true } + }, + { + "id": "c", + "expression": "x" + } + ] + } } diff --git a/integration-tests/cli/test/fixtures/wf-array.json b/integration-tests/cli/test/fixtures/wf-array.json index 763d15457..76b9decaf 100644 --- a/integration-tests/cli/test/fixtures/wf-array.json +++ b/integration-tests/cli/test/fixtures/wf-array.json @@ -1,21 +1,23 @@ { - "jobs": [ - { - "id": "a", - "adaptor": "common", - "expression": "fn((state) => { if (!state.data.items) { state.data.items = []; } return state; });", - "next": { "b": true } - }, - { - "id": "b", - "adaptor": "common", - "expression": "fn((state) => { state.data.items.push('b'); return state; });", - "next": { "c": true } - }, - { - "id": "c", - "adaptor": "common", - "expression": "fn((state) => { state.data.items.push('c'); return state; });" - } - ] + "workflow": { + "steps": [ + { + "id": "a", + "adaptor": "common", + "expression": "fn((state) => { if (!state.data.items) { state.data.items = []; } return state; });", + "next": { "b": true } + }, + { + "id": "b", + "adaptor": "common", + "expression": "fn((state) => { state.data.items.push('b'); return state; });", + "next": { "c": true } + }, + { + "id": "c", + "adaptor": "common", + "expression": "fn((state) => { state.data.items.push('c'); return state; });" + } + ] + } } diff --git a/integration-tests/cli/test/fixtures/wf-conditional.json b/integration-tests/cli/test/fixtures/wf-conditional.json index 203f7ce41..4aa758ab5 100644 --- a/integration-tests/cli/test/fixtures/wf-conditional.json +++ b/integration-tests/cli/test/fixtures/wf-conditional.json @@ -1,29 +1,31 @@ { - "start": "start", - "jobs": [ - { - "id": "start", - "state": { - "data": { - "number": 1 + "options": { "start": "start" }, + "workflow": { + "steps": [ + { + "id": "start", + "state": { + "data": { + "number": 1 + } + }, + "adaptor": "common", + "expression": "fn((state) => state);", + "next": { + "small": { "condition": "state.data.number < 10" }, + "large": { "condition": "state.data.number >= 10" } } }, - "adaptor": "common", - "expression": "fn((state) => state);", - "next": { - "small": { "condition": "state.data.number < 10" }, - "large": { "condition": "state.data.number >= 10" } + { + "id": "small", + "adaptor": "common", + "expression": "fn((state) => { state.data.result = \"small\"; return state; });" + }, + { + "id": "large", + "adaptor": "common", + "expression": "fn((state) => { state.data.result = \"large\"; return state; });" } - }, - { - "id": "small", - "adaptor": "common", - "expression": "fn((state) => { state.data.result = \"small\"; return state; });" - }, - { - "id": "large", - "adaptor": "common", - "expression": "fn((state) => { state.data.result = \"large\"; return state; });" - } - ] + ] + } } diff --git a/integration-tests/cli/test/fixtures/wf-count.json b/integration-tests/cli/test/fixtures/wf-count.json index e20e7a604..5d8e50c71 100644 --- a/integration-tests/cli/test/fixtures/wf-count.json +++ b/integration-tests/cli/test/fixtures/wf-count.json @@ -1,14 +1,16 @@ { - "jobs": [ - { - "adaptor": "common", - "expression": "fn((state) => (state.data.count ? state : { data: { count: 21 } }));", - "next": { "b": true } - }, - { - "id": "b", - "adaptor": "common", - "expression": "fn((state) => { state.data.count = state.data.count * 2; return state; });" - } - ] + "workflow": { + "steps": [ + { + "adaptor": "common", + "expression": "fn((state) => (state.data.count ? state : { data: { count: 21 } }));", + "next": { "b": true } + }, + { + "id": "b", + "adaptor": "common", + "expression": "fn((state) => { state.data.count = state.data.count * 2; return state; });" + } + ] + } } diff --git a/integration-tests/cli/test/fixtures/wf-errors.json b/integration-tests/cli/test/fixtures/wf-errors.json index 6464479d5..354feeab7 100644 --- a/integration-tests/cli/test/fixtures/wf-errors.json +++ b/integration-tests/cli/test/fixtures/wf-errors.json @@ -1,24 +1,26 @@ { - "start": "start", - "jobs": [ - { - "id": "start", - "adaptor": "common", - "expression": "fn((state) => { if (state.data.number > 10) { throw new Error('abort') }; return state; });", - "next": { - "increment": { "condition": "!state.errors" }, - "do nothing": { "condition": "state.errors" } + "options": { "start": "start" }, + "workflow": { + "steps": [ + { + "id": "start", + "adaptor": "common", + "expression": "fn((state) => { if (state.data.number > 10) { throw new Error('abort') }; return state; });", + "next": { + "increment": { "condition": "!state.errors" }, + "do nothing": { "condition": "state.errors" } + } + }, + { + "id": "increment", + "adaptor": "common", + "expression": "fn((state) => { state.data.number += 1; return state; });" + }, + { + "id": "do nothing", + "adaptor": "common", + "expression": "fn((state) => state);" } - }, - { - "id": "increment", - "adaptor": "common", - "expression": "fn((state) => { state.data.number += 1; return state; });" - }, - { - "id": "do nothing", - "adaptor": "common", - "expression": "fn((state) => state);" - } - ] + ] + } } diff --git a/integration-tests/cli/test/fixtures/wf-simple.json b/integration-tests/cli/test/fixtures/wf-simple.json index dfd904068..07caaa188 100644 --- a/integration-tests/cli/test/fixtures/wf-simple.json +++ b/integration-tests/cli/test/fixtures/wf-simple.json @@ -1,8 +1,10 @@ { - "jobs": [ - { - "adaptor": "common", - "expression": "simple.js" - } - ] + "workflow": { + "steps": [ + { + "adaptor": "common", + "expression": "simple.js" + } + ] + } } diff --git a/integration-tests/cli/test/fixtures/wf-strict.json b/integration-tests/cli/test/fixtures/wf-strict.json index 7461a276a..370afd61f 100644 --- a/integration-tests/cli/test/fixtures/wf-strict.json +++ b/integration-tests/cli/test/fixtures/wf-strict.json @@ -1,17 +1,19 @@ { - "jobs": [ - { - "id": "a", - "adaptor": "common", - "expression": "fn((state) => ({ x: 22, data: { name: 'bob' }, references: [] }));", - "next": { - "b": true + "workflow": { + "steps": [ + { + "id": "a", + "adaptor": "common", + "expression": "fn((state) => ({ x: 22, data: { name: 'bob' }, references: [] }));", + "next": { + "b": true + } + }, + { + "id": "b", + "adaptor": "common", + "expression": "fn(state => composeNextState(state, { name: 'jane' }));" } - }, - { - "id": "b", - "adaptor": "common", - "expression": "fn(state => composeNextState(state, { name: 'jane' }));" - } - ] + ] + } } From 03db316e72c9027ad48d1e0c087921d92b494c79 Mon Sep 17 00:00:00 2001 From: Joe Clark Date: Thu, 8 Feb 2024 09:50:23 +0000 Subject: [PATCH 071/128] worker: update test --- integration-tests/worker/test/integration.test.ts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/integration-tests/worker/test/integration.test.ts b/integration-tests/worker/test/integration.test.ts index 399968c81..ae83b4331 100644 --- a/integration-tests/worker/test/integration.test.ts +++ b/integration-tests/worker/test/integration.test.ts @@ -157,7 +157,7 @@ test("Don't send job logs to stdout", (t) => { // But it SHOULD log engine stuff const runtimeLog = jsonLogs.find( - (l) => l.name === 'R/T' && l.message[0].match(/completed job/i) + (l) => l.name === 'R/T' && l.message[0].match(/completed step/i) ); t.truthy(runtimeLog); done(); From 48ff58892db094f04c5ff74d0176f29ea7736fe8 Mon Sep 17 00:00:00 2001 From: Joe Clark Date: Thu, 8 Feb 2024 09:57:40 +0000 Subject: [PATCH 072/128] package lock --- pnpm-lock.yaml | 111 ++++--------------------------------------------- 1 file changed, 9 insertions(+), 102 deletions(-) diff --git a/pnpm-lock.yaml b/pnpm-lock.yaml index 2108be315..19c497887 100644 --- a/pnpm-lock.yaml +++ b/pnpm-lock.yaml @@ -104,15 +104,6 @@ importers: specifier: ^3.0.2 version: 3.0.2 - integration-tests/cli/repo: - dependencies: - '@openfn/language-common_1.12.0': - specifier: npm:@openfn/language-common@^1.12.0 - version: /@openfn/language-common@1.12.0 - is-array_1.0.1: - specifier: npm:is-array@^1.0.1 - version: /is-array@1.0.1 - integration-tests/worker: dependencies: '@openfn/engine-multi': @@ -1359,11 +1350,6 @@ packages: heap: 0.2.7 dev: false - /@fastify/busboy@2.1.0: - resolution: {integrity: sha512-+KpH+QxZU7O4675t3mnkQKcZZg56u+K/Ct2K+N2AZYNVK8kyeo/bI18tI8aPm3tvNNRyTWfj6s5tnGNlcbQRsA==} - engines: {node: '>=14'} - dev: false - /@inquirer/checkbox@1.3.5: resolution: {integrity: sha512-ZznkPU+8XgNICKkqaoYENa0vTw9jeToEHYyG5gUKpGmY+4PqPTsvLpSisOt9sukLkYzPRkpSCHREgJLqbCG3Fw==} engines: {node: '>=14.18.0'} @@ -1620,22 +1606,6 @@ packages: semver: 7.5.4 dev: true - /@openfn/language-common@1.12.0: - resolution: {integrity: sha512-JQjJpRNdwG5LMmAIO7P7HLgtHYS0UssoibAhMJOpoHk5/kFLDpH3tywpp40Pai33NMzgofxb5gb0MZTgoEk3fw==} - dependencies: - ajv: 8.12.0 - axios: 1.1.3 - csv-parse: 5.5.3 - csvtojson: 2.0.10 - date-fns: 2.30.0 - http-status-codes: 2.3.0 - jsonpath-plus: 4.0.0 - lodash: 4.17.21 - undici: 5.28.3 - transitivePeerDependencies: - - debug - dev: false - /@openfn/language-common@1.7.5: resolution: {integrity: sha512-QivV3v5Oq5fb4QMopzyqUUh+UGHaFXBdsGr6RCmu6bFnGXdJdcQ7GpGpW5hKNq29CkmE23L/qAna1OLr4rP/0w==} dependencies: @@ -2085,15 +2055,6 @@ packages: clean-stack: 4.2.0 indent-string: 5.0.0 - /ajv@8.12.0: - resolution: {integrity: sha512-sRu1kpcO9yLtYxBKvqfTeh9KzZEwO3STyX1HT+4CaDzC6HpTGYhIhPIzj9XuKU7KYDwnaeh5hcOwjy1QuJzBPA==} - dependencies: - fast-deep-equal: 3.1.3 - json-schema-traverse: 1.0.0 - require-from-string: 2.0.2 - uri-js: 4.4.1 - dev: false - /ansi-colors@4.1.3: resolution: {integrity: sha512-/6w/C21Pm1A7aZitlI5Ni/2J6FFQN8i1Cvz3kHABAAbw93v/NlvKdVOqz7CCWz/3iv/JplRSEEZ83XION15ovw==} engines: {node: '>=6'} @@ -2254,6 +2215,7 @@ packages: /asynckit@0.4.0: resolution: {integrity: sha512-Oei9OH4tRh0YqU3GxhX79dM/mwVgvbZJaSNaRk+bshkj0S5cfHcgYakreBjrHwatXKbz+IoIdYLxrKim2MjW0Q==} + dev: true /atob@2.1.2: resolution: {integrity: sha512-Wm6ukoaOGJi/73p/cl2GvLjTI5JM1k/O14isD73YML8StrH/7/lRFgmg8nICZgD3bZZvjwCGxtMOD3wWNAu8cg==} @@ -2401,6 +2363,7 @@ packages: proxy-from-env: 1.1.0 transitivePeerDependencies: - debug + dev: true /b4a@1.6.1: resolution: {integrity: sha512-AsKjNhz72yxteo/0EtQEiwkMUgk/tGmycXlbG4g3Ard2/ULtNLUykGOkeK0egmN27h0xMAhb76jYccW+XTBExA==} @@ -2481,10 +2444,6 @@ packages: readable-stream: 4.2.0 dev: true - /bluebird@3.7.2: - resolution: {integrity: sha512-XpNj6GDQzdfW+r2Wnn7xiSAd7TM3jzkxGXBGTtWKuSXv1xUV+azxAm8jdWZN06QTQk+2N2XB9jRDkvbmQmcRtg==} - dev: false - /blueimp-md5@2.19.0: resolution: {integrity: sha512-DRQrD6gJyy8FbiE4s+bDoXS9hiW3Vbx5uCdwvcCf3zLHL+Iv7LtGHLpr+GZV8rHG8tK766FGYBwRbu8pELTt+w==} @@ -2877,6 +2836,7 @@ packages: engines: {node: '>= 0.8'} dependencies: delayed-stream: 1.0.0 + dev: true /commander@4.1.1: resolution: {integrity: sha512-NOKm8xhkzAjzFx8B2v5OAHT+u5pRQc2UCa2Vq9jYL/31o2wi9mxBA7LIFs3sV5VSC49z6pEhfbMULvShKj26WA==} @@ -3019,10 +2979,6 @@ packages: resolution: {integrity: sha512-cO1I/zmz4w2dcKHVvpCr7JVRu8/FymG5OEpmvsZYlccYolPBLoVGKUHgNoc4ZGkFeFlWGEDmMyBM+TTqRdW/wg==} dev: true - /csv-parse@5.5.3: - resolution: {integrity: sha512-v0KW6C0qlZzoGjk6u5tLmVfyZxNgPGXZsWTXshpAgKVGmGXzaVWGdlCFxNx5iuzcXT/oJN1HHM9DZKwtAtYa+A==} - dev: false - /csv-stringify@5.6.5: resolution: {integrity: sha512-PjiQ659aQ+fUTQqSrd1XEDnOr52jh30RBurfzkscaE2tPaFsDH5wOAHJiw8XAHphRknCwMUE9KRayc4K/NbO8A==} dev: true @@ -3037,16 +2993,6 @@ packages: stream-transform: 2.1.3 dev: true - /csvtojson@2.0.10: - resolution: {integrity: sha512-lUWFxGKyhraKCW8Qghz6Z0f2l/PqB1W3AO0HKJzGIQ5JRSlR651ekJDiGJbBT4sRNNv5ddnSGVEnsxP9XRCVpQ==} - engines: {node: '>=4.0.0'} - hasBin: true - dependencies: - bluebird: 3.7.2 - lodash: 4.17.21 - strip-bom: 2.0.0 - dev: false - /currently-unhandled@0.4.1: resolution: {integrity: sha512-/fITjgjGU50vjQ4FH6eUoYu+iUoUKIXws2hL15JJpIR+BbTxaXQsMuuyjtNh2WqsSBS5nsaZHFsFecyw5CCAng==} engines: {node: '>=0.10.0'} @@ -3187,6 +3133,7 @@ packages: /delayed-stream@1.0.0: resolution: {integrity: sha512-ZySD7Nf91aLB0RxL4KGrKHBXl7Eds1DAmEdcoVawXnLD7SDhpNgtuII2aAkg7a7QS41jxPSZ17p4VdGnMHk3MQ==} engines: {node: '>=0.4.0'} + dev: true /delegates@1.0.0: resolution: {integrity: sha512-bd2L678uiWATM6m5Z1VzNCErI3jiGzt6HGY8OVICs40JQq/HALfbyNJmp0UDakEY4pMMaN0Ly5om/B1VI/+xfQ==} @@ -4012,10 +3959,6 @@ packages: - supports-color dev: true - /fast-deep-equal@3.1.3: - resolution: {integrity: sha512-f3qQ9oQy9j2AhBe/H9VC91wLmKBCCU/gDOnKNAYG5hswO7BLKj09Hc5HYNz9cGI++xlpDCIgDaitVs03ATR84Q==} - dev: false - /fast-diff@1.3.0: resolution: {integrity: sha512-VxPP4NqbUjj6MaAOafWeUn2cXWLcCtljklUtZf0Ind4XQ+QPtmA0b18zZy0jIQx+ExRVCR/ZQpBmik5lXshNsw==} @@ -4162,6 +4105,7 @@ packages: peerDependenciesMeta: debug: optional: true + dev: true /for-in@1.0.2: resolution: {integrity: sha512-7EwmXrOjyL+ChxMhmG5lnW9MPt1aIeZEwKhQzoBUdTV0N3zuwWDZYVJatDvZ2OyzPUvdIAZDsCetk3coyMfcnQ==} @@ -4192,6 +4136,7 @@ packages: asynckit: 0.4.0 combined-stream: 1.0.8 mime-types: 2.1.35 + dev: true /fragment-cache@0.2.1: resolution: {integrity: sha512-GMBAbW9antB8iZRHLoGw0b3HANt57diZYFO/HL1JGIC1MjKrdmhxvrJbupnVvpys0zsz7yBApXdQyfepKly2kA==} @@ -4567,10 +4512,6 @@ packages: - supports-color dev: true - /http-status-codes@2.3.0: - resolution: {integrity: sha512-RJ8XvFvpPM/Dmc5SV+dC4y5PCeOhT3x1Hq0NU3rjGeg5a/CqlhZ7uudknPwZFz4aeAXDcbAyaeP7GAo9lvngtA==} - dev: false - /https-proxy-agent@5.0.1: resolution: {integrity: sha512-dFcAjpTQFgoLMzC2VwU+C/CbS7uRL0lWmxDITmqm7C+7F0Odmj6s9l6alZc6AELXhrnggM2CeWSXHGOdX2YtwA==} engines: {node: '>= 6'} @@ -4706,10 +4647,6 @@ packages: kind-of: 6.0.3 dev: true - /is-array@1.0.1: - resolution: {integrity: sha512-gxiZ+y/u67AzpeFmAmo4CbtME/bs7J2C++su5zQzvQyaxUqVzkh69DI+jN+KZuSO6JaH6TIIU6M6LhqxMjxEpw==} - dev: false - /is-arrayish@0.2.1: resolution: {integrity: sha512-zz06S8t0ozoDXMG+ube26zeCTNXcKIPJZJi8hBrF4idCLms4CG9QtK7qBl1boi5ODzFpjswb5JPmHCbMpjaYzg==} dev: true @@ -4975,10 +4912,6 @@ packages: resolution: {integrity: sha512-43r2mRvz+8JRIKnWJ+3j8JtjRKZ6GmjzfaE/qiBJnikNnYv/6bagRJ1kUhNk8R5EX/GkobD+r+sfxCPJsiKBLQ==} engines: {node: '>=12'} - /is-utf8@0.2.1: - resolution: {integrity: sha512-rMYPYvCzsXywIsldgLaSoPlw5PfoB/ssr7hY4pLfcodrA5M/eArza1a9VmTiNIBNMjOGr1Ow9mTyU2o69U6U9Q==} - dev: false - /is-weakref@1.0.2: resolution: {integrity: sha512-qctsuLZmIQ0+vSSMfoVvyFe2+GSEvnmZ2ezTup1SBse9+twCCeial6EEi3Nc2KFcf6+qz2FBPnjXsk8xhKSaPQ==} dependencies: @@ -5072,10 +5005,6 @@ packages: resolution: {integrity: sha512-xyFwyhro/JEof6Ghe2iz2NcXoj2sloNsWr/XsERDK/oiPCfaNhl5ONfp+jQdAZRQQ0IJWNzH9zIZF7li91kh2w==} dev: true - /json-schema-traverse@1.0.0: - resolution: {integrity: sha512-NM8/P9n3XjXhIZn1lLhkFaACTOURQXjWhV4BA/RnOv8xvgqtqpAX9IO4mRQxSx1Rlo4tqzeqb0sOlruaOy3dug==} - dev: false - /jsonfile@4.0.0: resolution: {integrity: sha512-m6F1R3z8jjlf2imQHS2Qez5sjKWQzbuuhuJ/FKYFRZvPE3PuHcSMVZzfsLhGVOkfd20obL5SWEBew5ShlquNxg==} optionalDependencies: @@ -5090,6 +5019,7 @@ packages: /jsonpath-plus@4.0.0: resolution: {integrity: sha512-e0Jtg4KAzDJKKwzbLaUtinCn0RZseWBVRTRGihSpvFlM3wTR7ExSp+PTdeTsDrLNJUe7L7JYJe8mblHX5SCT6A==} engines: {node: '>=10.0'} + dev: true /jsonpath@1.1.1: resolution: {integrity: sha512-l6Cg7jRpixfbgoWgkrl77dgEj8RPvND0wMH6TwQmi9Qs4TFfS9u5cUFnbeKTwj5ga5Y3BTGGNI28k117LJ009w==} @@ -6396,6 +6326,7 @@ packages: /proxy-from-env@1.1.0: resolution: {integrity: sha512-D+zkORCbA9f1tdWRK0RaCR3GPv50cMxcrz4X8k5LTSUD1Dkw47mKJEZQNunItRTkWwgtaUSo1RVFRIG9ZXiFYg==} + dev: true /proxy-middleware@0.15.0: resolution: {integrity: sha512-EGCG8SeoIRVMhsqHQUdDigB2i7qU7fCsWASwn54+nPutYO8n4q6EiwMzyfWlC+dzRFExP+kvcnDFdBDHoZBU7Q==} @@ -6428,6 +6359,7 @@ packages: /punycode@2.3.0: resolution: {integrity: sha512-rRV+zQD8tVFys26lAGR9WUuS4iUAngJScM+ZRSKtvl5tKeZ2t5bvdNFdNHBW9FWR4guGHlgmsZ1G7BSm2wTbuA==} engines: {node: '>=6'} + dev: true /qs@6.11.2: resolution: {integrity: sha512-tDNIz22aBzCDxLtVH++VnTfzxlfeK5CbqohpSqpJgj1Wg/cQbStNAz3NuqCs5vV+pjBsK4x4pN9HlVh7rcYRiA==} @@ -6633,11 +6565,6 @@ packages: resolution: {integrity: sha512-fGxEI7+wsG9xrvdjsrlmL22OMTTiHRwAMroiEeMgq8gzoLC/PQr7RsRDSTLUg/bZAZtF+TVIkHc6/4RIKrui+Q==} engines: {node: '>=0.10.0'} - /require-from-string@2.0.2: - resolution: {integrity: sha512-Xf0nWe6RseziFMu+Ap9biiUbmplq6S9/p+7w7YXP/JBHhrUDDUhwa+vANyubuqfZWTveU//DYVGsDG7RKL/vEw==} - engines: {node: '>=0.10.0'} - dev: false - /require-main-filename@2.0.0: resolution: {integrity: sha512-NKN5kMDylKuldxYLSUfrbo5Tuzh4hd+2E8NPPX02mZtn1VuREQToYe/ZdlJy+J3uCpfaiGF05e7B8W0iXbQHmg==} dev: true @@ -7161,13 +7088,6 @@ packages: dependencies: ansi-regex: 6.0.1 - /strip-bom@2.0.0: - resolution: {integrity: sha512-kwrX1y7czp1E69n2ajbG65mIo9dqvJ+8aBQXOGVxqwvNbsXdFM6Lq37dLAY3mknUwru8CfcCbfOLL/gMo+fi3g==} - engines: {node: '>=0.10.0'} - dependencies: - is-utf8: 0.2.1 - dev: false - /strip-bom@3.0.0: resolution: {integrity: sha512-vavAMRXOgBVNF6nyEEmL3DBK19iRpDcoIwW+swQ+CbGiu7lju6t+JklA1MHweoWtadgt4ISVUsXLyDq34ddcwA==} engines: {node: '>=4'} @@ -7721,13 +7641,6 @@ packages: resolution: {integrity: sha512-hEQt0+ZLDVUMhebKxL4x1BTtDY7bavVofhZ9KZ4aI26X9SRaE+Y3m83XUL1UP2jn8ynjndwCCpEHdUG+9pP1Tw==} dev: true - /undici@5.28.3: - resolution: {integrity: sha512-3ItfzbrhDlINjaP0duwnNsKpDQk3acHI3gVJ1z4fmwMK31k5G9OVIAMLSIaP6w4FaGkaAkN6zaQO9LUvZ1t7VA==} - engines: {node: '>=14.0'} - dependencies: - '@fastify/busboy': 2.1.0 - dev: false - /union-value@1.0.1: resolution: {integrity: sha512-tJfXmxMeWYnczCVs7XAEvIV7ieppALdyepWMkHkwciRpZraG/xwT+s2JN8+pr1+8jCRf80FFzvr+MpQeeoF4Xg==} engines: {node: '>=0.10.0'} @@ -7778,12 +7691,6 @@ packages: engines: {node: '>=4'} dev: true - /uri-js@4.4.1: - resolution: {integrity: sha512-7rKUyy33Q1yc98pQ1DAmLtwX109F7TIfWlW1Ydo8Wl1ii1SeHieeh0HHfPeL2fMXK6z0s8ecKs9frCuLJvndBg==} - dependencies: - punycode: 2.3.0 - dev: false - /urix@0.1.0: resolution: {integrity: sha512-Am1ousAhSLBeB9cG/7k7r2R0zj50uDRlZHPGbazid5s9rlF1F/QKYObEKSIunSjIOkJZqwRRLpvewjEkM7pSqg==} deprecated: Please see https://github.com/lydell/urix#deprecated From b0694fd6ce6ad8570c7fe5cc801d85f36515d06d Mon Sep 17 00:00:00 2001 From: Joe Clark Date: Thu, 8 Feb 2024 09:59:26 +0000 Subject: [PATCH 073/128] tests: update test --- integration-tests/cli/test/execute-workflow.test.ts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/integration-tests/cli/test/execute-workflow.test.ts b/integration-tests/cli/test/execute-workflow.test.ts index 2162652e8..464729ad6 100644 --- a/integration-tests/cli/test/execute-workflow.test.ts +++ b/integration-tests/cli/test/execute-workflow.test.ts @@ -168,7 +168,7 @@ test.serial( severity: 'fail', source: 'runtime', }, - jobId: 'start', + stepId: 'start', message: 'abort', type: 'JobError', }, From ac8acd60f76a801e4efa74dd1c0cf15646a7a500 Mon Sep 17 00:00:00 2001 From: Joe Clark Date: Thu, 8 Feb 2024 10:11:03 +0000 Subject: [PATCH 074/128] changesets and housekeeping --- .changeset/grumpy-pillows-tie.md | 11 ++++++++--- .changeset/pretty-spoons-beam.md | 8 +++++--- .changeset/ten-dingos-explode.md | 6 ++++++ packages/cli/src/util/load-plan.ts | 9 +-------- 4 files changed, 20 insertions(+), 14 deletions(-) create mode 100644 .changeset/ten-dingos-explode.md diff --git a/.changeset/grumpy-pillows-tie.md b/.changeset/grumpy-pillows-tie.md index 836dacac8..c4b59e4db 100644 --- a/.changeset/grumpy-pillows-tie.md +++ b/.changeset/grumpy-pillows-tie.md @@ -2,6 +2,11 @@ '@openfn/cli': major --- -Remove strict mode -Add support for execution plans -Update terminology +The 1.0 Release of the CLI updates the language and input of the CLI to match the nomenclature of Lightning. + +See the readme for details of the new terminology. + +- Add support for execution plans +- Deprecate old workflow format (old workflows are supported and will be automatically converted into the new "execution plans") +- Update terminology across the codebase and docs +- Remove strict mode diff --git a/.changeset/pretty-spoons-beam.md b/.changeset/pretty-spoons-beam.md index 49d6f215f..64ab0fd77 100644 --- a/.changeset/pretty-spoons-beam.md +++ b/.changeset/pretty-spoons-beam.md @@ -2,6 +2,8 @@ '@openfn/runtime': major --- -Update main run() signature -Integrate with lexicon -Remove strict mode options +The 1.0 release of the runtime updates the signatures and language of the runtime to match Lightning. It also includes some housekeeping. + +- Update main run() signature +- Remove strict mode options +- Integrate with lexicon diff --git a/.changeset/ten-dingos-explode.md b/.changeset/ten-dingos-explode.md new file mode 100644 index 000000000..703d5a20b --- /dev/null +++ b/.changeset/ten-dingos-explode.md @@ -0,0 +1,6 @@ +--- +'@openfn/engine-multi': major +'@openfn/ws-worker': major +--- + +The 1.0 release updates the language and input of the CLI to match the nomenclature of Lightning. diff --git a/packages/cli/src/util/load-plan.ts b/packages/cli/src/util/load-plan.ts index 018626ac2..490fadede 100644 --- a/packages/cli/src/util/load-plan.ts +++ b/packages/cli/src/util/load-plan.ts @@ -1,7 +1,3 @@ -/* - * New entry point for loading up the input/execution plan - Note that htere's a lot of complexity from load input that I need to deal with here :( - */ import fs from 'node:fs/promises'; import path from 'node:path'; import { isPath } from '@openfn/compiler'; @@ -155,10 +151,7 @@ const loadOldWorkflow = async ( // call loadXPlan now so that any options can be written const final = await loadXPlan(plan, options, logger, defaultName); - // TODO this can be nicer - logger.warn( - 'converted old workflow format into new execution plan format. See below for details' - ); + logger.warn('Converted workflow into new format:'); logger.warn(final); return final; From eef617cdb29a31f4284e874ebc0de1ccde98c990 Mon Sep 17 00:00:00 2001 From: Joe Clark Date: Thu, 8 Feb 2024 10:20:27 +0000 Subject: [PATCH 075/128] more housekeeping --- packages/engine-multi/src/engine.ts | 2 +- packages/engine-multi/src/test/worker-functions.ts | 1 - packages/runtime/src/runtime.ts | 1 - 3 files changed, 1 insertion(+), 3 deletions(-) diff --git a/packages/engine-multi/src/engine.ts b/packages/engine-multi/src/engine.ts index 991501997..ad868a40e 100644 --- a/packages/engine-multi/src/engine.ts +++ b/packages/engine-multi/src/engine.ts @@ -82,7 +82,7 @@ export type EngineOptions = { }; export type InternalEngine = RuntimeEngine & { - // TODONot a very good type definition, but it calms the tests down + // TODO Not a very good type definition, but it calms the tests down [other: string]: any; }; diff --git a/packages/engine-multi/src/test/worker-functions.ts b/packages/engine-multi/src/test/worker-functions.ts index a10b92a90..f562edbcb 100644 --- a/packages/engine-multi/src/test/worker-functions.ts +++ b/packages/engine-multi/src/test/worker-functions.ts @@ -27,7 +27,6 @@ const tasks = { // very very simple intepretation of a run function // Most tests should use the mock-worker instead run: async (plan: ExecutionPlan, _input: any, _adaptorPaths: any) => { - console.log(' >> RUN'); const workflowId = plan.id; publish('worker:workflow-start', { workflowId, diff --git a/packages/runtime/src/runtime.ts b/packages/runtime/src/runtime.ts index 1389bf0c1..4c8e3a892 100644 --- a/packages/runtime/src/runtime.ts +++ b/packages/runtime/src/runtime.ts @@ -27,7 +27,6 @@ export type Options = { // TODO leaving this here for now, but maybe its actually on the xplan? globals?: any; - // TODO state props to remove is a system-level option, not a workflow level one statePropsToRemove?: string[]; }; From 92ee5258647bb89cdeaad7e5cb4b00d76258a580 Mon Sep 17 00:00:00 2001 From: Joe Clark Date: Thu, 8 Feb 2024 11:02:06 +0000 Subject: [PATCH 076/128] engine: tweak test --- .../engine-multi/test/integration.test.ts | 39 ++++++++++--------- 1 file changed, 20 insertions(+), 19 deletions(-) diff --git a/packages/engine-multi/test/integration.test.ts b/packages/engine-multi/test/integration.test.ts index 5a1c80051..378123eaa 100644 --- a/packages/engine-multi/test/integration.test.ts +++ b/packages/engine-multi/test/integration.test.ts @@ -179,25 +179,26 @@ test.serial('log errors', (t) => { }, ]); - api.execute(plan, emptyState).on('workflow-log', (evt) => { - if (evt.name === 'JOB') { - t.log(evt); - t.deepEqual( - evt.message, - JSON.stringify([ - { - name: 'Error', - message: 'hola', - }, - ]) - ); - t.pass('workflow logged'); - } - }); - - api.execute(plan, emptyState).on('workflow-complete', () => { - done(); - }); + api + .execute(plan, emptyState) + .on('workflow-log', (evt) => { + if (evt.name === 'JOB') { + t.log(evt); + t.deepEqual( + evt.message, + JSON.stringify([ + { + name: 'Error', + message: 'hola', + }, + ]) + ); + t.pass('workflow logged'); + } + }) + .on('workflow-complete', () => { + done(); + }); }); }); From ba7ac084212506caf0e1b4eb4546f06409ca032e Mon Sep 17 00:00:00 2001 From: Joe Clark Date: Thu, 8 Feb 2024 11:25:49 +0000 Subject: [PATCH 077/128] runtime: tweak error messages --- integration-tests/cli/test/execute-workflow.test.ts | 6 +++--- packages/lexicon/core.d.ts | 1 - packages/runtime/src/util/log-error.ts | 11 +++++------ 3 files changed, 8 insertions(+), 10 deletions(-) diff --git a/integration-tests/cli/test/execute-workflow.test.ts b/integration-tests/cli/test/execute-workflow.test.ts index 464729ad6..550128c7c 100644 --- a/integration-tests/cli/test/execute-workflow.test.ts +++ b/integration-tests/cli/test/execute-workflow.test.ts @@ -148,10 +148,10 @@ test.serial( } ); -test.serial( - `openfn ${jobsPath}/wf-errors.json -S "{ \\"data\\": { \\"number\\": 32 } }"`, +test.serial.only( + `openfn ${jobsPath}/wf-errors.json -iS "{ \\"data\\": { \\"number\\": 32 } }"`, async (t) => { - const { stdout, err } = await run(t.title); + const { err } = await run(t.title); t.falsy(err); const out = getJSON(); diff --git a/packages/lexicon/core.d.ts b/packages/lexicon/core.d.ts index 17a79e59b..ec21ec13a 100644 --- a/packages/lexicon/core.d.ts +++ b/packages/lexicon/core.d.ts @@ -123,7 +123,6 @@ export type ErrorReport = { type: string; // The name/type of error, ie Error, TypeError message: string; // simple human readable message stepId: StepId; // ID of the associated job - jobId?: StepId; // deprecated error: Error; // the original underlying error object code?: string; // The error code, if any (found on node errors) diff --git a/packages/runtime/src/util/log-error.ts b/packages/runtime/src/util/log-error.ts index bbf6ce61f..7c23e5021 100644 --- a/packages/runtime/src/util/log-error.ts +++ b/packages/runtime/src/util/log-error.ts @@ -3,7 +3,7 @@ import type { State, ErrorReport, StepId } from '@openfn/lexicon'; export type ErrorReporter = ( state: State, - jobId: StepId, + stepId: StepId, error: NodeJS.ErrnoException & { severity?: string; handled?: boolean; @@ -16,11 +16,10 @@ export type ErrorReporter = ( // Because we're taking closer control of errors // we should be able to report more simply const createErrorReporter = (logger: Logger): ErrorReporter => { - return (state, jobId, error) => { + return (state, stepId, error) => { const report: ErrorReport = { type: error.subtype || error.type || error.name, - jobId, - stepId: jobId, + stepId, message: error.message, error: error, }; @@ -46,13 +45,13 @@ const createErrorReporter = (logger: Logger): ErrorReporter => { } if (error.severity === 'fail') { - logger.error(`Check state.errors.${jobId} for details.`); + logger.error(`Check state.errors.${stepId} for details.`); if (!state.errors) { state.errors = {}; } - state.errors[jobId] = report; + state.errors[stepId] = report; } return report as ErrorReport; From a5958c8376512de6b0bf5485c8d3bfacde54bad3 Mon Sep 17 00:00:00 2001 From: Joe Clark Date: Thu, 8 Feb 2024 12:29:36 +0000 Subject: [PATCH 078/128] worker: stricter type checkign on tests --- packages/ws-worker/src/mock/sockets.ts | 4 +- packages/ws-worker/test/api/destroy.test.ts | 71 ++++++------ packages/ws-worker/test/api/execute.test.ts | 102 ++++++++++-------- packages/ws-worker/test/api/reasons.test.ts | 14 +-- packages/ws-worker/test/api/workloop.test.ts | 17 ++- .../test/channels/worker-queue.test.ts | 33 +++--- .../test/events/run-complete.test.ts | 22 ++-- .../ws-worker/test/events/run-error.test.ts | 10 +- .../test/events/step-complete.test.ts | 16 +-- .../ws-worker/test/events/step-start.test.ts | 16 +-- packages/ws-worker/test/lightning.test.ts | 78 +++++++------- .../test/mock/runtime-engine.test.ts | 57 +++++----- packages/ws-worker/test/reasons.test.ts | 9 +- packages/ws-worker/test/server.test.ts | 2 +- packages/ws-worker/test/util.ts | 29 ++--- .../ws-worker/test/util/convert-run.test.ts | 39 +++---- packages/ws-worker/test/util/throttle.test.ts | 2 +- packages/ws-worker/tsconfig.json | 2 +- 18 files changed, 269 insertions(+), 254 deletions(-) diff --git a/packages/ws-worker/src/mock/sockets.ts b/packages/ws-worker/src/mock/sockets.ts index 942e7c7f7..599c755dd 100644 --- a/packages/ws-worker/src/mock/sockets.ts +++ b/packages/ws-worker/src/mock/sockets.ts @@ -1,7 +1,9 @@ type EventHandler = (evt?: any) => void; // Mock websocket implementations -export const mockChannel = (callbacks: Record = {}) => { +export const mockChannel = ( + callbacks: Record = {} +): any => { const c = { on: (event: string, fn: EventHandler) => { // TODO support multiple callbacks diff --git a/packages/ws-worker/test/api/destroy.test.ts b/packages/ws-worker/test/api/destroy.test.ts index 8fa2c26ea..be12fa887 100644 --- a/packages/ws-worker/test/api/destroy.test.ts +++ b/packages/ws-worker/test/api/destroy.test.ts @@ -1,20 +1,20 @@ import test from 'ava'; import crypto from 'node:crypto'; - import createLightningServer from '@openfn/lightning-mock'; +import { createMockLogger } from '@openfn/logger'; +import { Run } from '@openfn/lexicon/lightning'; + import createWorker from '../../src/server'; import createMockRTE from '../../src/mock/runtime-engine'; - import destroy from '../../src/api/destroy'; -import { createMockLogger } from '@openfn/logger'; -import { Run } from '../../src/types'; const workerPort = 9876; const lightningPort = workerPort + 1; const logger = createMockLogger(); const lightning = createLightningServer({ port: lightningPort }); -let worker; + +let worker: any; test.beforeEach(async () => { const engine = await createMockRTE(); @@ -120,46 +120,43 @@ test.serial('destroy a worker while one run is active', async (t) => { }); }); -test.serial( - 'destroy a worker while multiple runs are active', - async (t) => { - return new Promise((done) => { - let completeCount = 0; - let startCount = 0; - - const doDestroy = async () => { - await destroy(worker, logger); +test.serial('destroy a worker while multiple runs are active', async (t) => { + return new Promise((done) => { + let completeCount = 0; + let startCount = 0; - // Ensure all three runs completed - t.is(completeCount, 3); + const doDestroy = async () => { + await destroy(worker, logger); - // should not respond to get - t.false(await ping()); - // should not be claiming - t.false(await waitForClaim()); + // Ensure all three runs completed + t.is(completeCount, 3); - done(); - }; + // should not respond to get + t.false(await ping()); + // should not be claiming + t.false(await waitForClaim()); - lightning.on('run:start', () => { - startCount++; + done(); + }; - // Let all three workflows start before we kill the server - if (startCount === 3) { - doDestroy(); - } - }); + lightning.on('run:start', () => { + startCount++; - lightning.on('run:complete', () => { - completeCount++; - }); + // Let all three workflows start before we kill the server + if (startCount === 3) { + doDestroy(); + } + }); - lightning.enqueueRun(createRun()); - lightning.enqueueRun(createRun()); - lightning.enqueueRun(createRun()); + lightning.on('run:complete', () => { + completeCount++; }); - } -); + + lightning.enqueueRun(createRun()); + lightning.enqueueRun(createRun()); + lightning.enqueueRun(createRun()); + }); +}); test("don't claim after destroy", (t) => { return new Promise((done) => { diff --git a/packages/ws-worker/test/api/execute.test.ts b/packages/ws-worker/test/api/execute.test.ts index 07e51f957..dd4961fec 100644 --- a/packages/ws-worker/test/api/execute.test.ts +++ b/packages/ws-worker/test/api/execute.test.ts @@ -1,5 +1,6 @@ import test from 'ava'; import { createMockLogger } from '@openfn/logger'; +import type { ExecutionPlan } from '@openfn/lexicon'; import { STEP_START, @@ -23,8 +24,7 @@ import createMockRTE from '../../src/mock/runtime-engine'; import { mockChannel } from '../../src/mock/sockets'; import { stringify, createRunState } from '../../src/util'; -import type { ExecutionPlan } from '@openfn/runtime'; -import type { Run, RunState, JSONLog } from '../../src/types'; +import type { RunState, JSONLog } from '../../src/types'; const enc = new TextEncoder(); @@ -98,7 +98,7 @@ test('jobLog should should send a log event outside a run', async (t) => { }, }); - await onJobLog({ channel, state }, log); + await onJobLog({ channel, state } as any, log); }); test('jobLog should should send a log event inside a run', async (t) => { @@ -131,13 +131,13 @@ test('jobLog should should send a log event inside a run', async (t) => { }, }); - await onJobLog({ channel, state }, log); + await onJobLog({ channel, state } as any, log); }); test('jobError should trigger step:complete with a reason', async (t) => { - let stepCompleteEvent; + let stepCompleteEvent: any; - const state = createRunState({ id: 'run-23' } as Run); + const state = createRunState({ id: 'run-23' } as ExecutionPlan); state.activeJob = 'job-1'; state.activeStep = 'b'; @@ -153,7 +153,7 @@ test('jobError should trigger step:complete with a reason', async (t) => { error: { message: 'nope', severity: 'kill', type: 'TEST' }, state: exitState, }; - await onJobError({ channel, state }, event); + await onJobError({ channel, state } as any, event); t.is(stepCompleteEvent.reason, 'kill'); t.is(stepCompleteEvent.error_message, 'nope'); @@ -162,9 +162,9 @@ test('jobError should trigger step:complete with a reason', async (t) => { }); test('jobError should trigger step:complete with a reason and default state', async (t) => { - let stepCompleteEvent; + let stepCompleteEvent: any; - const state = createRunState({ id: 'run-23' } as Run); + const state = createRunState({ id: 'run-23' } as ExecutionPlan); const channel = mockChannel({ [STEP_COMPLETE]: (evt) => { @@ -176,7 +176,7 @@ test('jobError should trigger step:complete with a reason and default state', as const event = { error: { message: 'nope', severity: 'kill', type: 'TEST' }, }; - await onJobError({ channel, state }, event); + await onJobError({ channel, state } as any, event); t.deepEqual(stepCompleteEvent.output_dataclip, '{}'); }); @@ -188,6 +188,7 @@ test('workflowStart should send an empty run:start event', async (t) => { }, }); + // @ts-ignore await onWorkflowStart({ channel }); }); @@ -282,7 +283,7 @@ test('execute should pass the final result to onFinish', async (t) => { }, ], }, - }; + } as ExecutionPlan; const options = {}; const input = {}; @@ -309,7 +310,7 @@ test('execute should return a context object', async (t) => { }, ], }, - }; + } as ExecutionPlan; const options = {}; const input = {}; @@ -322,7 +323,7 @@ test('execute should return a context object', async (t) => { plan, input, options, - (result) => { + () => { done(); } ); @@ -349,18 +350,21 @@ test('execute should lazy-load a credential', async (t) => { const plan = { id: 'a', - jobs: [ - { - configuration: 'abc', - expression: 'fn(() => ({ done: true }))', - }, - ], - }; + workflow: { + steps: [ + { + configuration: 'abc', + expression: 'fn(() => ({ done: true }))', + }, + ], + }, + } as ExecutionPlan; const options = {}; + const input = {}; return new Promise((done) => { - execute(channel, engine, logger, plan, options, (result) => { + execute(channel, engine, logger, plan, input, options, (result) => { t.true(didCallCredentials); done(); }); @@ -369,34 +373,35 @@ test('execute should lazy-load a credential', async (t) => { test('execute should lazy-load initial state', async (t) => { const logger = createMockLogger(); - let didCallState = false; + let didLoadState = false; const channel = mockChannel({ ...mockEventHandlers, [GET_DATACLIP]: (id) => { t.truthy(id); - didCallState = true; + didLoadState = true; return toArrayBuffer({}); }, }); const engine = await createMockRTE(); - const plan: Partial = { + const plan = { id: 'a', - // @ts-ignore - initialState: 'abc', - jobs: [ - { - expression: 'fn(() => ({ done: true }))', - }, - ], - }; + workflow: { + steps: [ + { + expression: 'fn(() => ({ done: true }))', + }, + ], + }, + } as ExecutionPlan; const options = {}; + const input = 'abc'; return new Promise((done) => { - execute(channel, engine, logger, plan, options, (result) => { - t.true(didCallState); + execute(channel, engine, logger, plan, input, options, () => { + t.true(didLoadState); done(); }); }); @@ -406,10 +411,10 @@ test('execute should call all events on the socket', async (t) => { const logger = createMockLogger(); const engine = await createMockRTE(); - const events = {}; + const events: Record = {}; - const toEventMap = (obj, evt: string) => { - obj[evt] = (e) => { + const toEventMap = (obj: any, evt: string) => { + obj[evt] = (e: any) => { events[evt] = e || true; }; return obj; @@ -430,20 +435,23 @@ test('execute should call all events on the socket', async (t) => { const plan = { id: 'run-1', - jobs: [ - { - id: 'trigger', - configuration: 'a', - adaptor: '@openfn/language-common@1.0.0', - expression: 'fn(() => console.log("x"))', - }, - ], - }; + workflow: { + steps: [ + { + id: 'trigger', + configuration: 'a', + adaptor: '@openfn/language-common@1.0.0', + expression: 'fn(() => console.log("x"))', + }, + ], + }, + } as ExecutionPlan; const options = {}; + const input = {}; return new Promise((done) => { - execute(channel, engine, logger, plan, options, (result) => { + execute(channel, engine, logger, plan, input, options, () => { // Check that events were passed to the socket // This is deliberately crude t.assert(allEvents.every((e) => events[e])); diff --git a/packages/ws-worker/test/api/reasons.test.ts b/packages/ws-worker/test/api/reasons.test.ts index d2d81ef2c..1e753c88c 100644 --- a/packages/ws-worker/test/api/reasons.test.ts +++ b/packages/ws-worker/test/api/reasons.test.ts @@ -4,7 +4,7 @@ import { calculateJobExitReason } from '../../src/api/reasons'; test('success', (t) => { const jobId = 'a'; - const state = {}; + const state: any = {}; const error = undefined; const r = calculateJobExitReason(jobId, state, error); @@ -15,7 +15,7 @@ test('success', (t) => { test('still success if a prior job has errors', (t) => { const jobId = 'a'; - const state = { + const state: any = { errors: { b: { type: 'RuntimeError', @@ -34,7 +34,7 @@ test('still success if a prior job has errors', (t) => { test('fail', (t) => { const jobId = 'a'; - const state = { + const state: any = { errors: { a: { type: 'RuntimeError', @@ -52,7 +52,7 @@ test('fail', (t) => { test('crash', (t) => { const jobId = 'a'; - const state = {}; + const state: any = {}; const error = new RuntimeCrash(new ReferenceError('x is not defined')); const r = calculateJobExitReason(jobId, state, error); @@ -63,7 +63,7 @@ test('crash', (t) => { test('crash has priority over fail', (t) => { const jobId = 'a'; - const state = { + const state: any = { errors: { b: { type: 'RuntimeError', @@ -83,7 +83,7 @@ test('crash has priority over fail', (t) => { // But it should not stop us calculating a reason test('success if no state is passed', (t) => { const jobId = 'a'; - const state = undefined; + const state: any = undefined; const error = undefined; const r = calculateJobExitReason(jobId, state, error); @@ -94,7 +94,7 @@ test('success if no state is passed', (t) => { test('success if boolean state is passed', (t) => { const jobId = 'a'; - const state = true; + const state: any = true; const error = undefined; const r = calculateJobExitReason(jobId, state, error); diff --git a/packages/ws-worker/test/api/workloop.test.ts b/packages/ws-worker/test/api/workloop.test.ts index a1772ee01..0b8d3dc24 100644 --- a/packages/ws-worker/test/api/workloop.test.ts +++ b/packages/ws-worker/test/api/workloop.test.ts @@ -1,13 +1,12 @@ import test from 'ava'; +import { createMockLogger } from '@openfn/logger'; import { sleep } from '../util'; - import { mockChannel } from '../../src/mock/sockets'; import startWorkloop from '../../src/api/workloop'; import { CLAIM } from '../../src/events'; -import { createMockLogger } from '@openfn/logger'; -let cancel; +let cancel: any; const logger = createMockLogger(); @@ -17,7 +16,7 @@ test.afterEach(() => { test('workloop can be cancelled', async (t) => { let count = 0; - let cancel; + let cancel: any; const app = { queueChannel: mockChannel({ [CLAIM]: () => { @@ -29,7 +28,7 @@ test('workloop can be cancelled', async (t) => { execute: () => {}, }; - cancel = startWorkloop(app, logger, 1, 1); + cancel = startWorkloop(app as any, logger, 1, 1); await sleep(100); // A quirk of how cancel works is that the loop will be called a few times @@ -51,7 +50,7 @@ test('workloop sends the runs:claim event', (t) => { }), execute: () => {}, }; - cancel = startWorkloop(app, logger, 1, 1); + cancel = startWorkloop(app as any, logger, 1, 1); }); }); @@ -73,7 +72,7 @@ test('workloop sends the runs:claim event several times ', (t) => { }), execute: () => {}, }; - cancel = startWorkloop(app, logger, 1, 1); + cancel = startWorkloop(app as any, logger, 1, 1); }); }); @@ -88,13 +87,13 @@ test('workloop calls execute if runs:claim returns runs', (t) => { runs: [{ id: 'a', token: 'x.y.z' }], }), }), - execute: (run) => { + execute: (run: any) => { t.deepEqual(run, { id: 'a', token: 'x.y.z' }); t.pass(); done(); }, }; - cancel = startWorkloop(app, logger, 1, 1); + cancel = startWorkloop(app as any, logger, 1, 1); }); }); diff --git a/packages/ws-worker/test/channels/worker-queue.test.ts b/packages/ws-worker/test/channels/worker-queue.test.ts index 53997e5d5..4dd66b066 100644 --- a/packages/ws-worker/test/channels/worker-queue.test.ts +++ b/packages/ws-worker/test/channels/worker-queue.test.ts @@ -8,7 +8,7 @@ const logger = createMockLogger(); test('should connect', async (t) => { return new Promise((done) => { - connectToWorkerQueue('www', 'a', 'secret', logger, mockSocket).on( + connectToWorkerQueue('www', 'a', 'secret', logger, mockSocket as any).on( 'connect', ({ socket, channel }) => { t.truthy(socket); @@ -28,7 +28,7 @@ test('should connect with an auth token', async (t) => { const secret = 'xyz'; const encodedSecret = new TextEncoder().encode(secret); - function createSocket(endpoint, options) { + function createSocket(endpoint: string, options: any) { const socket = mockSocket(endpoint, {}, async () => { const { token } = options.params; @@ -38,17 +38,20 @@ test('should connect with an auth token', async (t) => { return socket; } - connectToWorkerQueue('www', workerId, secret, logger, createSocket).on( - 'connect', - ({ socket, channel }) => { - t.truthy(socket); - t.truthy(socket.connect); - t.truthy(channel); - t.truthy(channel.join); - t.pass('connected'); - done(); - } - ); + connectToWorkerQueue( + 'www', + workerId, + secret, + logger, + createSocket as any + ).on('connect', ({ socket, channel }) => { + t.truthy(socket); + t.truthy(socket.connect); + t.truthy(channel); + t.truthy(channel.join); + t.pass('connected'); + done(); + }); }); }); @@ -58,7 +61,7 @@ test('should fail to connect with an invalid auth token', async (t) => { const secret = 'xyz'; const encodedSecret = new TextEncoder().encode(secret); - function createSocket(endpoint, options) { + function createSocket(endpoint: string, options: any) { const socket = mockSocket(endpoint, {}, async () => { const { token } = options.params; @@ -77,7 +80,7 @@ test('should fail to connect with an invalid auth token', async (t) => { workerId, 'wrong-secret!', logger, - createSocket + createSocket as any ).on('error', (e) => { t.is(e, 'auth_fail'); t.pass('error thrown'); diff --git a/packages/ws-worker/test/events/run-complete.test.ts b/packages/ws-worker/test/events/run-complete.test.ts index 957ef4454..dedbfa3aa 100644 --- a/packages/ws-worker/test/events/run-complete.test.ts +++ b/packages/ws-worker/test/events/run-complete.test.ts @@ -25,7 +25,7 @@ test('should send an run:complete event', async (t) => { const event = {}; - const context = { channel, state, onFinish: () => {} }; + const context: any = { channel, state, onFinish: () => {} }; await handleRunComplete(context, event); }); @@ -44,10 +44,10 @@ test('should call onFinish with final dataclip', async (t) => { [RUN_COMPLETE]: () => true, }); - const context = { + const context: any = { channel, state, - onFinish: ({ state: finalState }) => { + onFinish: ({ state: finalState }: any) => { t.deepEqual(result, finalState); }, }; @@ -67,8 +67,8 @@ test('should send a reason log and return reason for success', async (t) => { }; state.lastDataclipId = 'x'; - let logEvent; - let completeEvent; + let logEvent: any; + let completeEvent: any; const channel = mockChannel({ [RUN_LOG]: (e) => { @@ -79,10 +79,10 @@ test('should send a reason log and return reason for success', async (t) => { }, }); - const context = { + const context: any = { channel, state, - onFinish: ({ state: finalState }) => { + onFinish: ({ state: finalState }: any) => { t.deepEqual(result, finalState); }, }; @@ -114,8 +114,8 @@ test('should send a reason log and return reason for fail', async (t) => { }, }; - let logEvent; - let completeEvent; + let logEvent: any; + let completeEvent: any; const channel = mockChannel({ [RUN_LOG]: (e) => { @@ -126,10 +126,10 @@ test('should send a reason log and return reason for fail', async (t) => { }, }); - const context = { + const context: any = { channel, state, - onFinish: ({ state: finalState }) => { + onFinish: ({ state: finalState }: any) => { t.deepEqual(result, finalState); }, }; diff --git a/packages/ws-worker/test/events/run-error.test.ts b/packages/ws-worker/test/events/run-error.test.ts index 2583d2257..90ad86b0d 100644 --- a/packages/ws-worker/test/events/run-error.test.ts +++ b/packages/ws-worker/test/events/run-error.test.ts @@ -5,7 +5,7 @@ import { mockChannel } from '../../src/mock/sockets'; import { RUN_COMPLETE, RUN_LOG, STEP_COMPLETE } from '../../src/events'; import { createRunState } from '../../src/util'; -const plan = { id: 'run-1', jobs: [] }; +const plan = { id: 'run-1', workflow: { steps: [] }, options: {} }; test('runError should trigger runComplete with a reason', async (t) => { const jobId = 'job-1'; @@ -33,7 +33,7 @@ test('runError should trigger runComplete with a reason', async (t) => { const context = { channel, state, onFinish: () => {} }; - await onRunError(context, event); + await onRunError(context as any, event); }); test('workflow error should send reason to onFinish', async (t) => { @@ -67,7 +67,7 @@ test('workflow error should send reason to onFinish', async (t) => { }, }; - await onRunError(context, event); + await onRunError(context as any, event); }); test('runError should not call job complete if the job is not active', async (t) => { @@ -100,7 +100,7 @@ test('runError should not call job complete if the job is not active', async (t) }, }; - await onRunError(context, event); + await onRunError(context as any, event); }); test('runError should log the reason', async (t) => { @@ -133,6 +133,6 @@ test('runError should log the reason', async (t) => { const context = { channel, state, onFinish: () => {} }; - await onRunError(context, event); + await onRunError(context as any, event); t.is(logEvent.message[0], 'Run complete with status: crash\nErr: it crashed'); }); diff --git a/packages/ws-worker/test/events/step-complete.test.ts b/packages/ws-worker/test/events/step-complete.test.ts index 6effc4a44..ac3928c3d 100644 --- a/packages/ws-worker/test/events/step-complete.test.ts +++ b/packages/ws-worker/test/events/step-complete.test.ts @@ -20,14 +20,14 @@ test('clear the step id and active job on state', async (t) => { }); const event = { state: { x: 10 } }; - await handleStepStart({ channel, state }, event); + await handleStepStart({ channel, state } as any, event); t.falsy(state.activeJob); t.falsy(state.activeStep); }); test('setup input mappings on on state', async (t) => { - let lightningEvent; + let lightningEvent: any; const plan = createPlan(); const jobId = 'job-1'; @@ -42,7 +42,7 @@ test('setup input mappings on on state', async (t) => { }); const engineEvent = { state: { x: 10 }, next: ['job-2'] }; - await handleStepStart({ channel, state }, engineEvent); + await handleStepStart({ channel, state } as any, engineEvent); t.deepEqual(state.inputDataclips, { ['job-2']: lightningEvent.output_dataclip_id, @@ -62,7 +62,7 @@ test('save the dataclip to state', async (t) => { }); const event = { state: { x: 10 } }; - await handleStepStart({ channel, state }, event); + await handleStepStart({ channel, state } as any, event); t.is(Object.keys(state.dataclips).length, 1); const [dataclip] = Object.values(state.dataclips); @@ -84,7 +84,7 @@ test('write a reason to state', async (t) => { }); const event = { state: { x: 10 } }; - await handleStepStart({ channel, state }, event); + await handleStepStart({ channel, state } as any, event); t.is(Object.keys(state.reasons).length, 1); t.deepEqual(state.reasons[jobId], { @@ -102,7 +102,7 @@ test('generate an exit reason: success', async (t) => { state.activeJob = jobId; state.activeStep = 'b'; - let event; + let event: any; const channel = mockChannel({ [STEP_COMPLETE]: (e) => { @@ -110,7 +110,7 @@ test('generate an exit reason: success', async (t) => { }, }); - await handleStepStart({ channel, state }, { state: { x: 10 } }); + await handleStepStart({ channel, state } as any, { state: { x: 10 } }); t.truthy(event); t.is(event.reason, 'success'); @@ -146,5 +146,5 @@ test('send a step:complete event', async (t) => { duration: 61, threadId: 'abc', }; - await handleStepStart({ channel, state }, event); + await handleStepStart({ channel, state } as any, event); }); diff --git a/packages/ws-worker/test/events/step-start.test.ts b/packages/ws-worker/test/events/step-start.test.ts index 85f2d869e..183c3693f 100644 --- a/packages/ws-worker/test/events/step-start.test.ts +++ b/packages/ws-worker/test/events/step-start.test.ts @@ -10,7 +10,11 @@ import { RUN_LOG, STEP_START } from '../../src/events'; import pkg from '../../package.json' assert { type: 'json' }; test('set a step id and active job on state', async (t) => { - const plan = { id: 'run-1', workflow: { steps: [{ id: 'job-1' }] } }; + const plan = { + id: 'run-1', + workflow: { steps: [{ id: 'job-1' }] }, + options: {}, + }; const jobId = 'job-1'; const state = createRunState(plan); @@ -20,7 +24,7 @@ test('set a step id and active job on state', async (t) => { [RUN_LOG]: (x) => x, }); - await handleStepStart({ channel, state }, { jobId }); + await handleStepStart({ channel, state } as any, { jobId }); t.is(state.activeJob, jobId); t.truthy(state.activeStep); @@ -54,7 +58,7 @@ test('send a step:start event', async (t) => { [RUN_LOG]: () => true, }); - await handleStepStart({ channel, state }, { jobId }); + await handleStepStart({ channel, state } as any, { jobId }); }); test('step:start event should include versions', async (t) => { @@ -97,11 +101,11 @@ test('step:start event should include versions', async (t) => { [RUN_LOG]: () => true, }); - await handleStepStart({ channel, state }, event); + await handleStepStart({ channel, state } as any, event); }); test('also logs the version number', async (t) => { - let logEvent; + let logEvent: any; const plan = { id: 'run-1', workflow: { @@ -140,7 +144,7 @@ test('also logs the version number', async (t) => { }, }); - await handleStepStart({ channel, state }, event); + await handleStepStart({ channel, state } as any, event); t.truthy(logEvent); t.is(logEvent.level, 'info'); diff --git a/packages/ws-worker/test/lightning.test.ts b/packages/ws-worker/test/lightning.test.ts index d2cb941b8..0d06858b0 100644 --- a/packages/ws-worker/test/lightning.test.ts +++ b/packages/ws-worker/test/lightning.test.ts @@ -11,8 +11,8 @@ import createWorkerServer from '../src/server'; import createMockRTE from '../src/mock/runtime-engine'; import * as e from '../src/events'; -let lng; -let worker; +let lng: any; +let worker: any; const urls = { worker: 'http://localhost:4567', @@ -51,7 +51,7 @@ const getRun = (ext = {}, jobs?: any) => ({ test.serial(`events: lightning should respond to a ${e.CLAIM} event`, (t) => { return new Promise((done) => { - lng.on(e.CLAIM, (evt) => { + lng.on(e.CLAIM, (evt: any) => { const response = evt.payload; t.deepEqual(response, []); done(); @@ -64,9 +64,9 @@ test.serial( (t) => { return new Promise((done) => { const run = getRun(); - let response; + let response: any; - lng.on(e.CLAIM, ({ payload }) => { + lng.on(e.CLAIM, ({ payload }: any) => { if (payload.length) { response = payload[0]; } @@ -100,7 +100,7 @@ test.serial( ], }; - lng.waitForResult(run.id).then((result) => { + lng.waitForResult(run.id).then((result: any) => { t.deepEqual(result, { count: 122 }); done(); }); @@ -126,7 +126,7 @@ test.serial('should run a run which returns initial state', async (t) => { ], }; - lng.waitForResult(run.id).then((result) => { + lng.waitForResult(run.id).then((result: any) => { t.deepEqual(result, { data: 66 }); done(); }); @@ -142,7 +142,7 @@ test.serial( (t) => { return new Promise((done) => { const run = getRun(); - lng.onSocketEvent(e.RUN_COMPLETE, run.id, (evt) => { + lng.onSocketEvent(e.RUN_COMPLETE, run.id, (evt: any) => { const { final_dataclip_id } = evt.payload; t.assert(typeof final_dataclip_id === 'string'); t.pass('run complete event received'); @@ -165,14 +165,14 @@ test.serial(`events: lightning should receive a ${e.GET_PLAN} event`, (t) => { const run = getRun(); let didCallEvent = false; - lng.onSocketEvent(e.GET_PLAN, run.id, ({ payload }) => { + lng.onSocketEvent(e.GET_PLAN, run.id, () => { // This doesn't test that the correct run gets sent back // We'd have to add an event to the engine for that // (not a bad idea) didCallEvent = true; }); - lng.onSocketEvent(e.RUN_COMPLETE, run.id, (evt) => { + lng.onSocketEvent(e.RUN_COMPLETE, run.id, (evt: any) => { t.true(didCallEvent); done(); }); @@ -221,7 +221,7 @@ test.serial( }); let didCallEvent = false; - lng.onSocketEvent(e.GET_DATACLIP, run.id, ({ payload }) => { + lng.onSocketEvent(e.GET_DATACLIP, run.id, ({ payload }: any) => { // payload is the incoming/request payload - this tells us which dataclip // the worker is asking for // Note that it doesn't tell us much about what is returned @@ -244,13 +244,13 @@ test.serial(`events: lightning should receive a ${e.STEP_START} event`, (t) => { return new Promise((done) => { const run = getRun(); - lng.onSocketEvent(e.STEP_START, run.id, ({ payload }) => { + lng.onSocketEvent(e.STEP_START, run.id, ({ payload }: any) => { t.is(payload.job_id, 'j'); t.truthy(payload.step_id); t.pass('called run start'); }); - lng.onSocketEvent(e.RUN_COMPLETE, run.id, (evt) => { + lng.onSocketEvent(e.RUN_COMPLETE, run.id, () => { done(); }); @@ -264,7 +264,7 @@ test.serial( return new Promise((done) => { const run = getRun(); - lng.onSocketEvent(e.STEP_COMPLETE, run.id, ({ payload }) => { + lng.onSocketEvent(e.STEP_COMPLETE, run.id, ({ payload }: any) => { t.is(payload.job_id, 'j'); t.truthy(payload.step_id); t.truthy(payload.output_dataclip); @@ -275,7 +275,7 @@ test.serial( t.pass('called run complete'); }); - lng.onSocketEvent(e.RUN_COMPLETE, run.id, (evt) => { + lng.onSocketEvent(e.RUN_COMPLETE, run.id, (evt: any) => { done(); }); @@ -296,12 +296,12 @@ test.serial( }, ]); - lng.onSocketEvent(e.STEP_COMPLETE, run.id, ({ payload }) => { + lng.onSocketEvent(e.STEP_COMPLETE, run.id, ({ payload }: any) => { t.is(payload.reason, 'fail'); t.pass('called step complete'); }); - lng.onSocketEvent(e.RUN_COMPLETE, run.id, ({ payload }) => { + lng.onSocketEvent(e.RUN_COMPLETE, run.id, ({ payload }: any) => { done(); }); @@ -321,7 +321,7 @@ test.serial(`events: lightning should receive a ${e.RUN_LOG} event`, (t) => { ], }; - lng.onSocketEvent(e.RUN_LOG, run.id, ({ payload }) => { + lng.onSocketEvent(e.RUN_LOG, run.id, ({ payload }: any) => { const log = payload; t.is(log.level, 'info'); @@ -331,7 +331,7 @@ test.serial(`events: lightning should receive a ${e.RUN_LOG} event`, (t) => { t.deepEqual(log.message, ['x']); }); - lng.onSocketEvent(e.RUN_COMPLETE, run.id, (evt) => { + lng.onSocketEvent(e.RUN_COMPLETE, run.id, () => { done(); }); @@ -360,13 +360,13 @@ test.serial.skip(`events: logs should have increasing timestamps`, (t) => { lng.onSocketEvent( e.RUN_LOG, run.id, - ({ payload }) => { + ({ payload }: any) => { history.push(BigInt(payload.timestamp)); }, false ); - lng.onSocketEvent(e.RUN_COMPLETE, run.id, (evt) => { + lng.onSocketEvent(e.RUN_COMPLETE, run.id, () => { t.log(history); let last = BigInt(0); @@ -401,7 +401,7 @@ test.serial( return new Promise((done) => { const run = getRun(); - lng.onSocketEvent(e.RUN_COMPLETE, run.id, (evt) => { + lng.onSocketEvent(e.RUN_COMPLETE, run.id, () => { t.pass('called run:complete'); done(); }); @@ -426,7 +426,7 @@ test.serial('should register and de-register runs to the server', async (t) => { t.truthy(worker.workflows[run.id]); }); - lng.onSocketEvent(e.RUN_COMPLETE, run.id, (evt) => { + lng.onSocketEvent(e.RUN_COMPLETE, run.id, () => { t.truthy(worker.workflows[run.id]); // Tidyup is done AFTER lightning receives the event // This timeout is crude but should work @@ -460,10 +460,10 @@ test.skip('should not claim while at capacity', async (t) => { id: 'run-2', }; - let run1Start; + let run1Start: any; // When the first run starts, we should only have run 1 in progress - lng.onSocketEvent(e.RUN_START, run1.id, (evt) => { + lng.onSocketEvent(e.RUN_START, run1.id, () => { run1Start = Date.now(); t.truthy(worker.workflows[run1.id]); @@ -471,7 +471,7 @@ test.skip('should not claim while at capacity', async (t) => { }); // When the second run starts, we should only have run 2 in progress - lng.onSocketEvent(e.RUN_START, run2.id, (evt) => { + lng.onSocketEvent(e.RUN_START, run2.id, () => { const duration = Date.now() - run1Start; t.true(duration > 490); @@ -481,7 +481,7 @@ test.skip('should not claim while at capacity', async (t) => { // also, the now date should be around 500 ms after the first start }); - lng.onSocketEvent(e.RUN_COMPLETE, run2.id, (evt) => { + lng.onSocketEvent(e.RUN_COMPLETE, run2.id, () => { done(); }); @@ -497,9 +497,9 @@ test.serial('should pass the right dataclip when running in parallel', (t) => { body: `fn((s) => { s.data.${id} = true; return s; })`, }); - const outputDataclipIds = {}; - const inputDataclipIds = {}; - const outputs = {}; + const outputDataclipIds: any = {}; + const inputDataclipIds: any = {}; + const outputs: any = {}; const a = { id: 'a', body: 'fn(() => ({ data: { a: true } }))', @@ -526,7 +526,7 @@ test.serial('should pass the right dataclip when running in parallel', (t) => { const unsub2 = lng.onSocketEvent( e.STEP_START, run.id, - ({ payload }) => { + ({ payload }: any) => { inputDataclipIds[payload.job_id] = payload.input_dataclip_id; }, false @@ -536,14 +536,14 @@ test.serial('should pass the right dataclip when running in parallel', (t) => { const unsub1 = lng.onSocketEvent( e.STEP_COMPLETE, run.id, - ({ payload }) => { + ({ payload }: any) => { outputDataclipIds[payload.job_id] = payload.output_dataclip_id; outputs[payload.job_id] = JSON.parse(payload.output_dataclip); }, false ); - lng.onSocketEvent(e.RUN_COMPLETE, run.id, (evt) => { + lng.onSocketEvent(e.RUN_COMPLETE, run.id, () => { unsub1(); unsub2(); @@ -594,13 +594,13 @@ test.serial( const unsub = lng.onSocketEvent( e.STEP_COMPLETE, run.id, - (evt) => { + (evt: any) => { results[evt.payload.job_id] = JSON.parse(evt.payload.output_dataclip); }, false ); - lng.onSocketEvent(e.RUN_COMPLETE, run.id, (evt) => { + lng.onSocketEvent(e.RUN_COMPLETE, run.id, (evt: any) => { t.is(evt.payload.reason, 'success'); // What we REALLY care about is that the b-c edge condition @@ -620,7 +620,7 @@ test.serial( test.serial(`worker should send a success reason in the logs`, (t) => { return new Promise((done) => { - let log; + let log: any; const run = { id: 'run-1', @@ -631,7 +631,7 @@ test.serial(`worker should send a success reason in the logs`, (t) => { ], }; - lng.onSocketEvent(e.RUN_LOG, run.id, ({ payload }) => { + lng.onSocketEvent(e.RUN_LOG, run.id, ({ payload }: any) => { if (payload.message[0].match(/Run complete with status: success/)) { log = payload.message[0]; } @@ -648,7 +648,7 @@ test.serial(`worker should send a success reason in the logs`, (t) => { test.serial(`worker should send a fail reason in the logs`, (t) => { return new Promise((done) => { - let log; + let log: any; const run = { id: 'run-1', @@ -659,7 +659,7 @@ test.serial(`worker should send a fail reason in the logs`, (t) => { ], }; - lng.onSocketEvent(e.RUN_LOG, run.id, ({ payload }) => { + lng.onSocketEvent(e.RUN_LOG, run.id, ({ payload }: any) => { if (payload.message[0].match(/Run complete with status: fail/)) { log = payload.message[0]; } diff --git a/packages/ws-worker/test/mock/runtime-engine.test.ts b/packages/ws-worker/test/mock/runtime-engine.test.ts index d9e7bc8c5..38b65c43f 100644 --- a/packages/ws-worker/test/mock/runtime-engine.test.ts +++ b/packages/ws-worker/test/mock/runtime-engine.test.ts @@ -2,10 +2,10 @@ import test from 'ava'; import type { ExecutionPlan } from '@openfn/lexicon'; import type { - JobCompleteEvent, - JobStartEvent, - WorkflowCompleteEvent, - WorkflowStartEvent, + JobCompletePayload, + JobStartePayload, + WorkflowCompletePayload, + WorkflowStartPayload, } from '@openfn/engine-multi'; import create from '../../src/mock/runtime-engine'; import { waitForEvent, clone, createPlan } from '../util'; @@ -23,7 +23,7 @@ const sampleWorkflow = { }, } as ExecutionPlan; -let engine; +let engine: any; test.before(async () => { engine = await create(); @@ -37,7 +37,10 @@ test.serial('getStatus() should should have no active workflows', async (t) => { test.serial('Dispatch start events for a new workflow', async (t) => { engine.execute(sampleWorkflow); - const evt = await waitForEvent(engine, 'workflow-start'); + const evt = await waitForEvent( + engine, + 'workflow-start' + ); t.truthy(evt); t.is(evt.workflowId, 'w1'); }); @@ -52,7 +55,7 @@ test.serial('getStatus should report one active workflow', async (t) => { test.serial('Dispatch complete events when a workflow completes', async (t) => { engine.execute(sampleWorkflow); - const evt = await waitForEvent( + const evt = await waitForEvent( engine, 'workflow-complete' ); @@ -63,7 +66,7 @@ test.serial('Dispatch complete events when a workflow completes', async (t) => { test.serial('Dispatch start events for a job', async (t) => { engine.execute(sampleWorkflow); - const evt = await waitForEvent(engine, 'job-start'); + const evt = await waitForEvent(engine, 'job-start'); t.truthy(evt); t.is(evt.workflowId, 'w1'); t.is(evt.jobId, 'j1'); @@ -71,7 +74,7 @@ test.serial('Dispatch start events for a job', async (t) => { test.serial('Dispatch complete events for a job', async (t) => { engine.execute(sampleWorkflow); - const evt = await waitForEvent(engine, 'job-complete'); + const evt = await waitForEvent(engine, 'job-complete'); t.truthy(evt); t.is(evt.workflowId, 'w1'); t.is(evt.jobId, 'j1'); @@ -86,7 +89,7 @@ test.serial('Dispatch error event for a crash', async (t) => { }); engine.execute(wf); - const evt = await waitForEvent(engine, 'workflow-error'); + const evt = await waitForEvent(engine, 'workflow-error'); t.is(evt.workflowId, wf.id); t.is(evt.type, 'RuntimeCrash'); @@ -101,7 +104,7 @@ test.serial('wait function', async (t) => { engine.execute(wf); const start = Date.now(); - await waitForEvent(engine, 'workflow-complete'); + await waitForEvent(engine, 'workflow-complete'); const end = Date.now() - start; t.true(end > 90); @@ -115,7 +118,7 @@ test.serial( wf.workflow.steps[0].configuration = 'x'; let didCallCredentials; - const credential = async (_id) => { + const credential = async () => { didCallCredentials = true; return {}; }; @@ -123,7 +126,7 @@ test.serial( // @ts-ignore engine.execute(wf, {}, { resolvers: { credential } }); - await waitForEvent(engine, 'job-start'); + await waitForEvent(engine, 'job-start'); t.true(didCallCredentials); } ); @@ -144,46 +147,46 @@ test.serial('listen to events', async (t) => { }); engine.listen(wf.id, { - 'job-start': ({ workflowId, jobId }) => { + 'job-start': ({ workflowId, jobId }: any) => { called['job-start'] = true; t.is(workflowId, wf.id); t.is(jobId, wf.workflow.steps[0].id); }, - 'job-complete': ({ workflowId, jobId }) => { + 'job-complete': ({ workflowId, jobId }: any) => { called['job-complete'] = true; t.is(workflowId, wf.id); t.is(jobId, wf.workflow.steps[0].id); // TODO includes state? }, - 'workflow-log': ({ workflowId, message }) => { + 'workflow-log': ({ workflowId, message }: any) => { called['workflow-log'] = true; t.is(workflowId, wf.id); t.truthy(message); }, - 'workflow-start': ({ workflowId }) => { + 'workflow-start': ({ workflowId }: any) => { called['workflow-start'] = true; t.is(workflowId, wf.id); }, - 'workflow-complete': ({ workflowId }) => { + 'workflow-complete': ({ workflowId }: any) => { called['workflow-complete'] = true; t.is(workflowId, wf.id); }, }); engine.execute(wf); - await waitForEvent(engine, 'workflow-complete'); + await waitForEvent(engine, 'workflow-complete'); t.assert(Object.values(called).every((v) => v === true)); }); test.serial('only listen to events for the correct workflow', async (t) => { engine.listen('bobby mcgee', { - 'workflow-start': ({ workflowId }) => { + 'workflow-start': () => { throw new Error('should not have called this!!'); }, }); engine.execute(sampleWorkflow); - await waitForEvent(engine, 'workflow-complete'); + await waitForEvent(engine, 'workflow-complete'); t.pass(); }); @@ -194,7 +197,7 @@ test.serial('log events should stringify a string message', async (t) => { 'fn((s) => {console.log("haul away joe"); return s; })'; engine.listen(wf.id, { - 'workflow-log': ({ message }) => { + 'workflow-log': ({ message }: any) => { t.is(typeof message, 'string'); const result = JSON.parse(message); t.deepEqual(result, ['haul away joe']); @@ -202,7 +205,7 @@ test.serial('log events should stringify a string message', async (t) => { }); engine.execute(wf); - await waitForEvent(engine, 'workflow-complete'); + await waitForEvent(engine, 'workflow-complete'); }); test.serial('log events should stringify an object message', async (t) => { @@ -212,7 +215,7 @@ test.serial('log events should stringify an object message', async (t) => { 'fn((s) => {console.log({ x: 22 }); return s; })'; engine.listen(wf.id, { - 'workflow-log': ({ message }) => { + 'workflow-log': ({ message }: any) => { t.is(typeof message, 'string'); const result = JSON.parse(message); t.deepEqual(result, [{ x: 22 }]); @@ -220,7 +223,7 @@ test.serial('log events should stringify an object message', async (t) => { }); engine.execute(wf); - await waitForEvent(engine, 'workflow-complete'); + await waitForEvent(engine, 'workflow-complete'); }); test.serial( @@ -253,7 +256,7 @@ test.serial( }); engine.execute(workflow); - await waitForEvent(engine, 'workflow-complete'); + await waitForEvent(engine, 'workflow-complete'); t.false(didCallEvent); } @@ -266,7 +269,7 @@ test.skip('timeout', async (t) => { // @ts-ignore engine.execute(wf, {}, { timeout: 10 }); - const evt = await waitForEvent( + const evt = await waitForEvent( engine, 'workflow-error' ); diff --git a/packages/ws-worker/test/reasons.test.ts b/packages/ws-worker/test/reasons.test.ts index b392e1caa..c93d4da8f 100644 --- a/packages/ws-worker/test/reasons.test.ts +++ b/packages/ws-worker/test/reasons.test.ts @@ -13,9 +13,10 @@ import { RUN_START, RUN_COMPLETE, } from '../src/events'; +import { ExecutionPlan } from '@openfn/lexicon'; -let engine; -let logger; +let engine: any; +let logger: any; test.before(async () => { logger = createMockLogger(); @@ -38,7 +39,7 @@ test.before(async () => { test.after(async () => engine.destroy()); // Wrap up an execute call, capture the on complete state -const execute = async (plan, input = {}, options = {}) => +const execute = async (plan: ExecutionPlan, input = {}, options = {}) => new Promise<{ reason: ExitReason; state: any }>((done) => { // Ignore all channel events // In these test we assume that the correct messages are sent to the channel @@ -50,7 +51,7 @@ const execute = async (plan, input = {}, options = {}) => [RUN_COMPLETE]: async () => true, }); - const onFinish = (result) => { + const onFinish = (result: any) => { done(result); }; diff --git a/packages/ws-worker/test/server.test.ts b/packages/ws-worker/test/server.test.ts index c6eb919b1..1ac45a6cc 100644 --- a/packages/ws-worker/test/server.test.ts +++ b/packages/ws-worker/test/server.test.ts @@ -4,7 +4,7 @@ import createWorkerServer from '../src/server'; test.before(async () => { const engine = await createMockRTE(); - createWorkerServer(engine, { + createWorkerServer(engine as any, { port: 2323, secret: 'abc', maxWorkflows: 1, diff --git a/packages/ws-worker/test/util.ts b/packages/ws-worker/test/util.ts index aea7ffae7..df70a3c99 100644 --- a/packages/ws-worker/test/util.ts +++ b/packages/ws-worker/test/util.ts @@ -1,7 +1,8 @@ import { ExecutionPlan, Job } from '@openfn/lexicon'; +import { Edge, Node } from '@openfn/lexicon/lightning'; import crypto from 'node:crypto'; -export const wait = (fn, maxRuns = 100) => +export const wait = (fn: () => any, maxRuns = 100) => new Promise((resolve) => { let count = 0; let ival = setInterval(() => { @@ -19,11 +20,11 @@ export const wait = (fn, maxRuns = 100) => }, 100); }); -export const clone = (obj) => JSON.parse(JSON.stringify(obj)); +export const clone = (obj: any) => JSON.parse(JSON.stringify(obj)); -export const waitForEvent = (engine, eventName) => +export const waitForEvent = (engine: any, eventName: string) => new Promise((resolve) => { - engine.once(eventName, (e) => { + engine.once(eventName, (e: any) => { resolve(e); }); }); @@ -42,16 +43,18 @@ export const createPlan = (...steps: Job[]) => options: {}, } as ExecutionPlan); -export const createEdge = (from: string, to: string) => ({ - id: `${from}-${to}`, - source_job_id: from, - target_job_id: to, -}); +export const createEdge = (from: string, to: string) => + ({ + id: `${from}-${to}`, + source_job_id: from, + target_job_id: to, + } as Edge); -export const createJob = (body?: string, id?: string) => ({ - id: id || crypto.randomUUID(), - body: body || `fn((s) => s)`, -}); +export const createJob = (body?: string, id?: string) => + ({ + id: id || crypto.randomUUID(), + body: body || `fn((s) => s)`, + } as Node); export const createRun = (jobs = [], edges = [], triggers = []) => ({ id: crypto.randomUUID(), diff --git a/packages/ws-worker/test/util/convert-run.test.ts b/packages/ws-worker/test/util/convert-run.test.ts index 887b4d622..c593e8852 100644 --- a/packages/ws-worker/test/util/convert-run.test.ts +++ b/packages/ws-worker/test/util/convert-run.test.ts @@ -1,12 +1,7 @@ import test from 'ava'; import type { Run, Node } from '@openfn/lexicon/lightning'; import convertRun, { conditions } from '../../src/util/convert-run'; -import { - ConditionalStepEdge, - Job, - StepEdge, - StepEdgeObj, -} from '@openfn/lexicon'; +import { ConditionalStepEdge, Job } from '@openfn/lexicon'; // Creates a lightning node (job or trigger) const createNode = (props = {}) => @@ -18,7 +13,7 @@ const createNode = (props = {}) => ...props, } as Node); -const createEdge = (from, to, props = {}) => ({ +const createEdge = (from: string, to: string, props = {}) => ({ id: `${from}-${to}`, source_job_id: from, target_job_id: to, @@ -42,7 +37,7 @@ const createJob = (props = {}) => ({ ...props, }); -const testEdgeCondition = (expr, state) => { +const testEdgeCondition = (expr: string, state: any) => { const fn = new Function('state', 'return ' + expr); return fn(state); }; @@ -408,7 +403,7 @@ test('on_job_success condition: return true if no errors', (t) => { const condition = conditions.on_job_success('a'); const state = {}; - const result = testEdgeCondition(condition, state); + const result = testEdgeCondition(condition!, state); t.is(result, true); }); @@ -419,7 +414,7 @@ test('on_job_success condition: return true if state is undefined', (t) => { const condition = conditions.on_job_success('a'); const state = undefined; - const result = testEdgeCondition(condition, state); + const result = testEdgeCondition(condition!, state); t.is(result, true); }); @@ -434,7 +429,7 @@ test('on_job_success condition: return true if unconnected upstream errors', (t) }, }, }; - const result = testEdgeCondition(condition, state); + const result = testEdgeCondition(condition!, state); t.is(result, true); }); @@ -449,7 +444,7 @@ test('on_job_success condition: return false if the upstream job errored', (t) = }, }, }; - const result = testEdgeCondition(condition, state); + const result = testEdgeCondition(condition!, state); t.is(result, false); }); @@ -464,7 +459,7 @@ test('on_job_failure condition: return true if error immediately upstream', (t) }, }, }; - const result = testEdgeCondition(condition, state); + const result = testEdgeCondition(condition!, state); t.is(result, true); }); @@ -479,7 +474,7 @@ test('on_job_failure condition: return false if unrelated error upstream', (t) = }, }, }; - const result = testEdgeCondition(condition, state); + const result = testEdgeCondition(condition!, state); t.is(result, false); }); @@ -488,7 +483,7 @@ test('on_job_failure condition: return false if no errors', (t) => { const condition = conditions.on_job_failure('a'); const state = {}; - const result = testEdgeCondition(condition, state); + const result = testEdgeCondition(condition!, state); t.is(result, false); }); @@ -497,7 +492,7 @@ test('on_job_failure condition: return false if state is undefined', (t) => { const condition = conditions.on_job_failure('a'); const state = undefined; - const result = testEdgeCondition(condition, state); + const result = testEdgeCondition(condition!, state); t.is(result, false); }); @@ -515,8 +510,8 @@ test('convert edge condition on_job_success', (t) => { const edge = job.next as Record; t.truthy(edge.b); - t.is(edge.b.condition, conditions.on_job_success('a')!); - t.true(testEdgeCondition(edge.b.condition, {})); + t.is(edge.b.condition!, conditions.on_job_success('a')!); + t.true(testEdgeCondition(edge.b.condition!, {})); }); test('convert edge condition on_job_failure', (t) => { @@ -532,10 +527,10 @@ test('convert edge condition on_job_failure', (t) => { const edge = job.next as Record; t.truthy(edge.b); - t.is(edge.b.condition, conditions.on_job_failure('a')!); + t.is(edge.b.condition!, conditions.on_job_failure('a')!); // Check that this is valid js t.true( - testEdgeCondition(edge.b.condition, { + testEdgeCondition(edge.b.condition!, { errors: { a: {} }, }) ); @@ -554,9 +549,9 @@ test('convert edge condition on_job_success with a funky id', (t) => { const edge = job.next as Record; t.truthy(edge.b); - t.is(edge.b.condition, conditions.on_job_success(id_a)!); + t.is(edge.b.condition!, conditions.on_job_success(id_a)!); // Check that this is valid js - t.true(testEdgeCondition(edge.b.condition, {})); + t.true(testEdgeCondition(edge.b.condition!, {})); }); test('convert edge condition always', (t) => { diff --git a/packages/ws-worker/test/util/throttle.test.ts b/packages/ws-worker/test/util/throttle.test.ts index f865f2aca..10260abe8 100644 --- a/packages/ws-worker/test/util/throttle.test.ts +++ b/packages/ws-worker/test/util/throttle.test.ts @@ -88,7 +88,7 @@ test('return in order', async (t) => { const results: string[] = []; - const fn = (name: string, delay: number) => + const fn = (name: string) => new Promise((resolve) => { setTimeout(() => { results.push(name); diff --git a/packages/ws-worker/tsconfig.json b/packages/ws-worker/tsconfig.json index 3be5c53e0..834d5af09 100644 --- a/packages/ws-worker/tsconfig.json +++ b/packages/ws-worker/tsconfig.json @@ -1,6 +1,6 @@ { "extends": "../../tsconfig.common", - "include": ["src/**/*.ts", "test/mock/data.ts", "src/channels/runs"], + "include": ["src/**/*.ts", "test/**/*.ts", "src/channels/runs"], "compilerOptions": { "module": "ESNext" } From 0e742c84f0c4fcccc0509b857e4ef1abacd1c9ea Mon Sep 17 00:00:00 2001 From: Joe Clark Date: Thu, 8 Feb 2024 12:30:52 +0000 Subject: [PATCH 079/128] fix test --- packages/ws-worker/test/api/execute.test.ts | 3 +- pnpm-lock.yaml | 104 ++++++++++++++++++-- 2 files changed, 97 insertions(+), 10 deletions(-) diff --git a/packages/ws-worker/test/api/execute.test.ts b/packages/ws-worker/test/api/execute.test.ts index dd4961fec..e869012a8 100644 --- a/packages/ws-worker/test/api/execute.test.ts +++ b/packages/ws-worker/test/api/execute.test.ts @@ -364,7 +364,7 @@ test('execute should lazy-load a credential', async (t) => { const input = {}; return new Promise((done) => { - execute(channel, engine, logger, plan, input, options, (result) => { + execute(channel, engine, logger, plan, input, options, () => { t.true(didCallCredentials); done(); }); @@ -394,6 +394,7 @@ test('execute should lazy-load initial state', async (t) => { }, ], }, + options: {}, } as ExecutionPlan; const options = {}; diff --git a/pnpm-lock.yaml b/pnpm-lock.yaml index 19c497887..179639c57 100644 --- a/pnpm-lock.yaml +++ b/pnpm-lock.yaml @@ -104,6 +104,12 @@ importers: specifier: ^3.0.2 version: 3.0.2 + integration-tests/cli/repo: + dependencies: + '@openfn/language-common_1.12.0': + specifier: npm:@openfn/language-common@^1.12.0 + version: /@openfn/language-common@1.12.0 + integration-tests/worker: dependencies: '@openfn/engine-multi': @@ -1350,6 +1356,11 @@ packages: heap: 0.2.7 dev: false + /@fastify/busboy@2.1.0: + resolution: {integrity: sha512-+KpH+QxZU7O4675t3mnkQKcZZg56u+K/Ct2K+N2AZYNVK8kyeo/bI18tI8aPm3tvNNRyTWfj6s5tnGNlcbQRsA==} + engines: {node: '>=14'} + dev: false + /@inquirer/checkbox@1.3.5: resolution: {integrity: sha512-ZznkPU+8XgNICKkqaoYENa0vTw9jeToEHYyG5gUKpGmY+4PqPTsvLpSisOt9sukLkYzPRkpSCHREgJLqbCG3Fw==} engines: {node: '>=14.18.0'} @@ -1606,6 +1617,22 @@ packages: semver: 7.5.4 dev: true + /@openfn/language-common@1.12.0: + resolution: {integrity: sha512-JQjJpRNdwG5LMmAIO7P7HLgtHYS0UssoibAhMJOpoHk5/kFLDpH3tywpp40Pai33NMzgofxb5gb0MZTgoEk3fw==} + dependencies: + ajv: 8.12.0 + axios: 1.1.3 + csv-parse: 5.5.3 + csvtojson: 2.0.10 + date-fns: 2.30.0 + http-status-codes: 2.3.0 + jsonpath-plus: 4.0.0 + lodash: 4.17.21 + undici: 5.28.3 + transitivePeerDependencies: + - debug + dev: false + /@openfn/language-common@1.7.5: resolution: {integrity: sha512-QivV3v5Oq5fb4QMopzyqUUh+UGHaFXBdsGr6RCmu6bFnGXdJdcQ7GpGpW5hKNq29CkmE23L/qAna1OLr4rP/0w==} dependencies: @@ -2055,6 +2082,15 @@ packages: clean-stack: 4.2.0 indent-string: 5.0.0 + /ajv@8.12.0: + resolution: {integrity: sha512-sRu1kpcO9yLtYxBKvqfTeh9KzZEwO3STyX1HT+4CaDzC6HpTGYhIhPIzj9XuKU7KYDwnaeh5hcOwjy1QuJzBPA==} + dependencies: + fast-deep-equal: 3.1.3 + json-schema-traverse: 1.0.0 + require-from-string: 2.0.2 + uri-js: 4.4.1 + dev: false + /ansi-colors@4.1.3: resolution: {integrity: sha512-/6w/C21Pm1A7aZitlI5Ni/2J6FFQN8i1Cvz3kHABAAbw93v/NlvKdVOqz7CCWz/3iv/JplRSEEZ83XION15ovw==} engines: {node: '>=6'} @@ -2215,7 +2251,6 @@ packages: /asynckit@0.4.0: resolution: {integrity: sha512-Oei9OH4tRh0YqU3GxhX79dM/mwVgvbZJaSNaRk+bshkj0S5cfHcgYakreBjrHwatXKbz+IoIdYLxrKim2MjW0Q==} - dev: true /atob@2.1.2: resolution: {integrity: sha512-Wm6ukoaOGJi/73p/cl2GvLjTI5JM1k/O14isD73YML8StrH/7/lRFgmg8nICZgD3bZZvjwCGxtMOD3wWNAu8cg==} @@ -2363,7 +2398,6 @@ packages: proxy-from-env: 1.1.0 transitivePeerDependencies: - debug - dev: true /b4a@1.6.1: resolution: {integrity: sha512-AsKjNhz72yxteo/0EtQEiwkMUgk/tGmycXlbG4g3Ard2/ULtNLUykGOkeK0egmN27h0xMAhb76jYccW+XTBExA==} @@ -2444,6 +2478,10 @@ packages: readable-stream: 4.2.0 dev: true + /bluebird@3.7.2: + resolution: {integrity: sha512-XpNj6GDQzdfW+r2Wnn7xiSAd7TM3jzkxGXBGTtWKuSXv1xUV+azxAm8jdWZN06QTQk+2N2XB9jRDkvbmQmcRtg==} + dev: false + /blueimp-md5@2.19.0: resolution: {integrity: sha512-DRQrD6gJyy8FbiE4s+bDoXS9hiW3Vbx5uCdwvcCf3zLHL+Iv7LtGHLpr+GZV8rHG8tK766FGYBwRbu8pELTt+w==} @@ -2836,7 +2874,6 @@ packages: engines: {node: '>= 0.8'} dependencies: delayed-stream: 1.0.0 - dev: true /commander@4.1.1: resolution: {integrity: sha512-NOKm8xhkzAjzFx8B2v5OAHT+u5pRQc2UCa2Vq9jYL/31o2wi9mxBA7LIFs3sV5VSC49z6pEhfbMULvShKj26WA==} @@ -2979,6 +3016,10 @@ packages: resolution: {integrity: sha512-cO1I/zmz4w2dcKHVvpCr7JVRu8/FymG5OEpmvsZYlccYolPBLoVGKUHgNoc4ZGkFeFlWGEDmMyBM+TTqRdW/wg==} dev: true + /csv-parse@5.5.3: + resolution: {integrity: sha512-v0KW6C0qlZzoGjk6u5tLmVfyZxNgPGXZsWTXshpAgKVGmGXzaVWGdlCFxNx5iuzcXT/oJN1HHM9DZKwtAtYa+A==} + dev: false + /csv-stringify@5.6.5: resolution: {integrity: sha512-PjiQ659aQ+fUTQqSrd1XEDnOr52jh30RBurfzkscaE2tPaFsDH5wOAHJiw8XAHphRknCwMUE9KRayc4K/NbO8A==} dev: true @@ -2993,6 +3034,16 @@ packages: stream-transform: 2.1.3 dev: true + /csvtojson@2.0.10: + resolution: {integrity: sha512-lUWFxGKyhraKCW8Qghz6Z0f2l/PqB1W3AO0HKJzGIQ5JRSlR651ekJDiGJbBT4sRNNv5ddnSGVEnsxP9XRCVpQ==} + engines: {node: '>=4.0.0'} + hasBin: true + dependencies: + bluebird: 3.7.2 + lodash: 4.17.21 + strip-bom: 2.0.0 + dev: false + /currently-unhandled@0.4.1: resolution: {integrity: sha512-/fITjgjGU50vjQ4FH6eUoYu+iUoUKIXws2hL15JJpIR+BbTxaXQsMuuyjtNh2WqsSBS5nsaZHFsFecyw5CCAng==} engines: {node: '>=0.10.0'} @@ -3133,7 +3184,6 @@ packages: /delayed-stream@1.0.0: resolution: {integrity: sha512-ZySD7Nf91aLB0RxL4KGrKHBXl7Eds1DAmEdcoVawXnLD7SDhpNgtuII2aAkg7a7QS41jxPSZ17p4VdGnMHk3MQ==} engines: {node: '>=0.4.0'} - dev: true /delegates@1.0.0: resolution: {integrity: sha512-bd2L678uiWATM6m5Z1VzNCErI3jiGzt6HGY8OVICs40JQq/HALfbyNJmp0UDakEY4pMMaN0Ly5om/B1VI/+xfQ==} @@ -3959,6 +4009,10 @@ packages: - supports-color dev: true + /fast-deep-equal@3.1.3: + resolution: {integrity: sha512-f3qQ9oQy9j2AhBe/H9VC91wLmKBCCU/gDOnKNAYG5hswO7BLKj09Hc5HYNz9cGI++xlpDCIgDaitVs03ATR84Q==} + dev: false + /fast-diff@1.3.0: resolution: {integrity: sha512-VxPP4NqbUjj6MaAOafWeUn2cXWLcCtljklUtZf0Ind4XQ+QPtmA0b18zZy0jIQx+ExRVCR/ZQpBmik5lXshNsw==} @@ -4105,7 +4159,6 @@ packages: peerDependenciesMeta: debug: optional: true - dev: true /for-in@1.0.2: resolution: {integrity: sha512-7EwmXrOjyL+ChxMhmG5lnW9MPt1aIeZEwKhQzoBUdTV0N3zuwWDZYVJatDvZ2OyzPUvdIAZDsCetk3coyMfcnQ==} @@ -4136,7 +4189,6 @@ packages: asynckit: 0.4.0 combined-stream: 1.0.8 mime-types: 2.1.35 - dev: true /fragment-cache@0.2.1: resolution: {integrity: sha512-GMBAbW9antB8iZRHLoGw0b3HANt57diZYFO/HL1JGIC1MjKrdmhxvrJbupnVvpys0zsz7yBApXdQyfepKly2kA==} @@ -4512,6 +4564,10 @@ packages: - supports-color dev: true + /http-status-codes@2.3.0: + resolution: {integrity: sha512-RJ8XvFvpPM/Dmc5SV+dC4y5PCeOhT3x1Hq0NU3rjGeg5a/CqlhZ7uudknPwZFz4aeAXDcbAyaeP7GAo9lvngtA==} + dev: false + /https-proxy-agent@5.0.1: resolution: {integrity: sha512-dFcAjpTQFgoLMzC2VwU+C/CbS7uRL0lWmxDITmqm7C+7F0Odmj6s9l6alZc6AELXhrnggM2CeWSXHGOdX2YtwA==} engines: {node: '>= 6'} @@ -4912,6 +4968,10 @@ packages: resolution: {integrity: sha512-43r2mRvz+8JRIKnWJ+3j8JtjRKZ6GmjzfaE/qiBJnikNnYv/6bagRJ1kUhNk8R5EX/GkobD+r+sfxCPJsiKBLQ==} engines: {node: '>=12'} + /is-utf8@0.2.1: + resolution: {integrity: sha512-rMYPYvCzsXywIsldgLaSoPlw5PfoB/ssr7hY4pLfcodrA5M/eArza1a9VmTiNIBNMjOGr1Ow9mTyU2o69U6U9Q==} + dev: false + /is-weakref@1.0.2: resolution: {integrity: sha512-qctsuLZmIQ0+vSSMfoVvyFe2+GSEvnmZ2ezTup1SBse9+twCCeial6EEi3Nc2KFcf6+qz2FBPnjXsk8xhKSaPQ==} dependencies: @@ -5005,6 +5065,10 @@ packages: resolution: {integrity: sha512-xyFwyhro/JEof6Ghe2iz2NcXoj2sloNsWr/XsERDK/oiPCfaNhl5ONfp+jQdAZRQQ0IJWNzH9zIZF7li91kh2w==} dev: true + /json-schema-traverse@1.0.0: + resolution: {integrity: sha512-NM8/P9n3XjXhIZn1lLhkFaACTOURQXjWhV4BA/RnOv8xvgqtqpAX9IO4mRQxSx1Rlo4tqzeqb0sOlruaOy3dug==} + dev: false + /jsonfile@4.0.0: resolution: {integrity: sha512-m6F1R3z8jjlf2imQHS2Qez5sjKWQzbuuhuJ/FKYFRZvPE3PuHcSMVZzfsLhGVOkfd20obL5SWEBew5ShlquNxg==} optionalDependencies: @@ -5019,7 +5083,6 @@ packages: /jsonpath-plus@4.0.0: resolution: {integrity: sha512-e0Jtg4KAzDJKKwzbLaUtinCn0RZseWBVRTRGihSpvFlM3wTR7ExSp+PTdeTsDrLNJUe7L7JYJe8mblHX5SCT6A==} engines: {node: '>=10.0'} - dev: true /jsonpath@1.1.1: resolution: {integrity: sha512-l6Cg7jRpixfbgoWgkrl77dgEj8RPvND0wMH6TwQmi9Qs4TFfS9u5cUFnbeKTwj5ga5Y3BTGGNI28k117LJ009w==} @@ -6326,7 +6389,6 @@ packages: /proxy-from-env@1.1.0: resolution: {integrity: sha512-D+zkORCbA9f1tdWRK0RaCR3GPv50cMxcrz4X8k5LTSUD1Dkw47mKJEZQNunItRTkWwgtaUSo1RVFRIG9ZXiFYg==} - dev: true /proxy-middleware@0.15.0: resolution: {integrity: sha512-EGCG8SeoIRVMhsqHQUdDigB2i7qU7fCsWASwn54+nPutYO8n4q6EiwMzyfWlC+dzRFExP+kvcnDFdBDHoZBU7Q==} @@ -6359,7 +6421,6 @@ packages: /punycode@2.3.0: resolution: {integrity: sha512-rRV+zQD8tVFys26lAGR9WUuS4iUAngJScM+ZRSKtvl5tKeZ2t5bvdNFdNHBW9FWR4guGHlgmsZ1G7BSm2wTbuA==} engines: {node: '>=6'} - dev: true /qs@6.11.2: resolution: {integrity: sha512-tDNIz22aBzCDxLtVH++VnTfzxlfeK5CbqohpSqpJgj1Wg/cQbStNAz3NuqCs5vV+pjBsK4x4pN9HlVh7rcYRiA==} @@ -6565,6 +6626,11 @@ packages: resolution: {integrity: sha512-fGxEI7+wsG9xrvdjsrlmL22OMTTiHRwAMroiEeMgq8gzoLC/PQr7RsRDSTLUg/bZAZtF+TVIkHc6/4RIKrui+Q==} engines: {node: '>=0.10.0'} + /require-from-string@2.0.2: + resolution: {integrity: sha512-Xf0nWe6RseziFMu+Ap9biiUbmplq6S9/p+7w7YXP/JBHhrUDDUhwa+vANyubuqfZWTveU//DYVGsDG7RKL/vEw==} + engines: {node: '>=0.10.0'} + dev: false + /require-main-filename@2.0.0: resolution: {integrity: sha512-NKN5kMDylKuldxYLSUfrbo5Tuzh4hd+2E8NPPX02mZtn1VuREQToYe/ZdlJy+J3uCpfaiGF05e7B8W0iXbQHmg==} dev: true @@ -7088,6 +7154,13 @@ packages: dependencies: ansi-regex: 6.0.1 + /strip-bom@2.0.0: + resolution: {integrity: sha512-kwrX1y7czp1E69n2ajbG65mIo9dqvJ+8aBQXOGVxqwvNbsXdFM6Lq37dLAY3mknUwru8CfcCbfOLL/gMo+fi3g==} + engines: {node: '>=0.10.0'} + dependencies: + is-utf8: 0.2.1 + dev: false + /strip-bom@3.0.0: resolution: {integrity: sha512-vavAMRXOgBVNF6nyEEmL3DBK19iRpDcoIwW+swQ+CbGiu7lju6t+JklA1MHweoWtadgt4ISVUsXLyDq34ddcwA==} engines: {node: '>=4'} @@ -7641,6 +7714,13 @@ packages: resolution: {integrity: sha512-hEQt0+ZLDVUMhebKxL4x1BTtDY7bavVofhZ9KZ4aI26X9SRaE+Y3m83XUL1UP2jn8ynjndwCCpEHdUG+9pP1Tw==} dev: true + /undici@5.28.3: + resolution: {integrity: sha512-3ItfzbrhDlINjaP0duwnNsKpDQk3acHI3gVJ1z4fmwMK31k5G9OVIAMLSIaP6w4FaGkaAkN6zaQO9LUvZ1t7VA==} + engines: {node: '>=14.0'} + dependencies: + '@fastify/busboy': 2.1.0 + dev: false + /union-value@1.0.1: resolution: {integrity: sha512-tJfXmxMeWYnczCVs7XAEvIV7ieppALdyepWMkHkwciRpZraG/xwT+s2JN8+pr1+8jCRf80FFzvr+MpQeeoF4Xg==} engines: {node: '>=0.10.0'} @@ -7691,6 +7771,12 @@ packages: engines: {node: '>=4'} dev: true + /uri-js@4.4.1: + resolution: {integrity: sha512-7rKUyy33Q1yc98pQ1DAmLtwX109F7TIfWlW1Ydo8Wl1ii1SeHieeh0HHfPeL2fMXK6z0s8ecKs9frCuLJvndBg==} + dependencies: + punycode: 2.3.0 + dev: false + /urix@0.1.0: resolution: {integrity: sha512-Am1ousAhSLBeB9cG/7k7r2R0zj50uDRlZHPGbazid5s9rlF1F/QKYObEKSIunSjIOkJZqwRRLpvewjEkM7pSqg==} deprecated: Please see https://github.com/lydell/urix#deprecated From 68fb396c645cdd10436ae2251ff29571a5d6cdb9 Mon Sep 17 00:00:00 2001 From: Joe Clark Date: Thu, 8 Feb 2024 12:49:17 +0000 Subject: [PATCH 080/128] typing in worker tests --- packages/lexicon/lightning.d.ts | 6 ++++ packages/ws-worker/src/mock/runtime-engine.ts | 3 +- packages/ws-worker/test/api/destroy.test.ts | 2 +- packages/ws-worker/test/api/execute.test.ts | 2 +- packages/ws-worker/test/api/workloop.test.ts | 6 ---- .../test/events/run-complete.test.ts | 8 ++--- .../ws-worker/test/events/run-error.test.ts | 23 ++++++------ .../test/events/step-complete.test.ts | 36 +++++++++++-------- .../ws-worker/test/events/step-start.test.ts | 6 ++-- packages/ws-worker/test/lightning.test.ts | 8 ++--- .../test/mock/runtime-engine.test.ts | 18 ++++++---- packages/ws-worker/test/mock/sockets.test.ts | 2 +- 12 files changed, 67 insertions(+), 53 deletions(-) diff --git a/packages/lexicon/lightning.d.ts b/packages/lexicon/lightning.d.ts index 2a14282be..33c103167 100644 --- a/packages/lexicon/lightning.d.ts +++ b/packages/lexicon/lightning.d.ts @@ -151,5 +151,11 @@ export type StepCompletePayload = ExitReason & { step_id: string; output_dataclip?: string; output_dataclip_id?: string; + thread_id?: string; + mem: { + job: number; + system: number; + }; + duration: number; }; export type StepCompleteReply = void; diff --git a/packages/ws-worker/src/mock/runtime-engine.ts b/packages/ws-worker/src/mock/runtime-engine.ts index eddd866e2..f96541056 100644 --- a/packages/ws-worker/src/mock/runtime-engine.ts +++ b/packages/ws-worker/src/mock/runtime-engine.ts @@ -5,6 +5,7 @@ import * as engine from '@openfn/engine-multi'; import type { ExecutionPlan, Job, State } from '@openfn/lexicon'; import mockResolvers from './resolvers'; +import { RuntimeEngine } from '@openfn/engine-multi'; export type EngineEvent = | typeof engine.JOB_COMPLETE @@ -155,7 +156,7 @@ async function createMock() { getStatus, listen, destroy, - }; + } as unknown as RuntimeEngine; } export default createMock; diff --git a/packages/ws-worker/test/api/destroy.test.ts b/packages/ws-worker/test/api/destroy.test.ts index be12fa887..ef5fc4d13 100644 --- a/packages/ws-worker/test/api/destroy.test.ts +++ b/packages/ws-worker/test/api/destroy.test.ts @@ -17,7 +17,7 @@ const lightning = createLightningServer({ port: lightningPort }); let worker: any; test.beforeEach(async () => { - const engine = await createMockRTE(); + const engine: any = await createMockRTE(); worker = createWorker(engine, { logger, diff --git a/packages/ws-worker/test/api/execute.test.ts b/packages/ws-worker/test/api/execute.test.ts index e869012a8..b1148424b 100644 --- a/packages/ws-worker/test/api/execute.test.ts +++ b/packages/ws-worker/test/api/execute.test.ts @@ -54,7 +54,7 @@ test('send event should resolve when the event is acknowledged', async (t) => { test('send event should throw if an event errors', async (t) => { const channel = mockChannel({ - throw: (x) => { + throw: () => { throw new Error('err'); }, }); diff --git a/packages/ws-worker/test/api/workloop.test.ts b/packages/ws-worker/test/api/workloop.test.ts index 0b8d3dc24..a1ce6df1c 100644 --- a/packages/ws-worker/test/api/workloop.test.ts +++ b/packages/ws-worker/test/api/workloop.test.ts @@ -16,7 +16,6 @@ test.afterEach(() => { test('workloop can be cancelled', async (t) => { let count = 0; - let cancel: any; const app = { queueChannel: mockChannel({ [CLAIM]: () => { @@ -37,8 +36,6 @@ test('workloop can be cancelled', async (t) => { test('workloop sends the runs:claim event', (t) => { return new Promise((done) => { - let cancel; - const app = { workflows: {}, queueChannel: mockChannel({ @@ -56,7 +53,6 @@ test('workloop sends the runs:claim event', (t) => { test('workloop sends the runs:claim event several times ', (t) => { return new Promise((done) => { - let cancel; let count = 0; const app = { workflows: {}, @@ -78,8 +74,6 @@ test('workloop sends the runs:claim event several times ', (t) => { test('workloop calls execute if runs:claim returns runs', (t) => { return new Promise((done) => { - let cancel; - const app = { workflows: {}, queueChannel: mockChannel({ diff --git a/packages/ws-worker/test/events/run-complete.test.ts b/packages/ws-worker/test/events/run-complete.test.ts index dedbfa3aa..9220ee28a 100644 --- a/packages/ws-worker/test/events/run-complete.test.ts +++ b/packages/ws-worker/test/events/run-complete.test.ts @@ -23,7 +23,7 @@ test('should send an run:complete event', async (t) => { }, }); - const event = {}; + const event: any = {}; const context: any = { channel, state, onFinish: () => {} }; await handleRunComplete(context, event); @@ -52,7 +52,7 @@ test('should call onFinish with final dataclip', async (t) => { }, }; - const event = { state: result }; + const event: any = { state: result }; await handleRunComplete(context, event); }); @@ -87,7 +87,7 @@ test('should send a reason log and return reason for success', async (t) => { }, }; - const event = { state: result }; + const event: any = { state: result }; await handleRunComplete(context, event); @@ -134,7 +134,7 @@ test('should send a reason log and return reason for fail', async (t) => { }, }; - const event = { state: result }; + const event: any = { state: result }; await handleRunComplete(context, event); diff --git a/packages/ws-worker/test/events/run-error.test.ts b/packages/ws-worker/test/events/run-error.test.ts index 90ad86b0d..34404327c 100644 --- a/packages/ws-worker/test/events/run-error.test.ts +++ b/packages/ws-worker/test/events/run-error.test.ts @@ -25,7 +25,7 @@ test('runError should trigger runComplete with a reason', async (t) => { [RUN_COMPLETE]: () => true, }); - const event = { + const event: any = { severity: 'crash', type: 'Err', message: 'it crashed', @@ -46,11 +46,11 @@ test('workflow error should send reason to onFinish', async (t) => { const channel = mockChannel({ [RUN_LOG]: () => true, - [STEP_COMPLETE]: (evt) => true, + [STEP_COMPLETE]: () => true, [RUN_COMPLETE]: () => true, }); - const event = { + const event: any = { error: { severity: 'crash', type: 'Err', @@ -62,7 +62,7 @@ test('workflow error should send reason to onFinish', async (t) => { const context = { channel, state, - onFinish: (evt) => { + onFinish: (evt: any) => { t.is(evt.reason.reason, 'crash'); }, }; @@ -76,14 +76,14 @@ test('runError should not call job complete if the job is not active', async (t) const channel = mockChannel({ [RUN_LOG]: () => true, - [STEP_COMPLETE]: (evt) => { + [STEP_COMPLETE]: () => { t.fail('should not call!'); return true; }, [RUN_COMPLETE]: () => true, }); - const event = { + const event: any = { error: { severity: 'crash', type: 'Err', @@ -108,26 +108,29 @@ test('runError should log the reason', async (t) => { const state = createRunState({ id: 'run-1', - jobs: [{ id: 'job-1' }], + workflow: { + steps: [{ id: 'job-1' }], + }, + options: {}, }); state.lastDataclipId = 'x'; state.activeStep = 'b'; state.activeJob = jobId; - const event = { + const event: any = { severity: 'crash', type: 'Err', message: 'it crashed', }; state.reasons['x'] = event; - let logEvent; + let logEvent: any; const channel = mockChannel({ [RUN_LOG]: (e) => { logEvent = e; }, - [STEP_COMPLETE]: (evt) => true, + [STEP_COMPLETE]: () => true, [RUN_COMPLETE]: () => true, }); diff --git a/packages/ws-worker/test/events/step-complete.test.ts b/packages/ws-worker/test/events/step-complete.test.ts index ac3928c3d..8ba8a8e3d 100644 --- a/packages/ws-worker/test/events/step-complete.test.ts +++ b/packages/ws-worker/test/events/step-complete.test.ts @@ -1,11 +1,12 @@ import test from 'ava'; -import handleStepStart from '../../src/events/step-complete'; +import type { StepCompletePayload } from '@openfn/lexicon/lightning'; +import handleStepComplete from '../../src/events/step-complete'; import { mockChannel } from '../../src/mock/sockets'; import { createRunState } from '../../src/util'; import { STEP_COMPLETE } from '../../src/events'; - import { createPlan } from '../util'; +import { JobCompletePayload } from '@openfn/engine-multi'; test('clear the step id and active job on state', async (t) => { const plan = createPlan(); @@ -19,8 +20,8 @@ test('clear the step id and active job on state', async (t) => { [STEP_COMPLETE]: () => true, }); - const event = { state: { x: 10 } }; - await handleStepStart({ channel, state } as any, event); + const event = { state: { x: 10 } } as any; + await handleStepComplete({ channel, state } as any, event); t.falsy(state.activeJob); t.falsy(state.activeStep); @@ -41,8 +42,8 @@ test('setup input mappings on on state', async (t) => { }, }); - const engineEvent = { state: { x: 10 }, next: ['job-2'] }; - await handleStepStart({ channel, state } as any, engineEvent); + const engineEvent = { state: { x: 10 }, next: ['job-2'] } as any; + await handleStepComplete({ channel, state } as any, engineEvent); t.deepEqual(state.inputDataclips, { ['job-2']: lightningEvent.output_dataclip_id, @@ -61,8 +62,8 @@ test('save the dataclip to state', async (t) => { [STEP_COMPLETE]: () => true, }); - const event = { state: { x: 10 } }; - await handleStepStart({ channel, state } as any, event); + const event = { state: { x: 10 } } as any; + await handleStepComplete({ channel, state } as any, event); t.is(Object.keys(state.dataclips).length, 1); const [dataclip] = Object.values(state.dataclips); @@ -83,8 +84,8 @@ test('write a reason to state', async (t) => { [STEP_COMPLETE]: () => true, }); - const event = { state: { x: 10 } }; - await handleStepStart({ channel, state } as any, event); + const event = { state: { x: 10 } } as any; + await handleStepComplete({ channel, state } as any, event); t.is(Object.keys(state.reasons).length, 1); t.deepEqual(state.reasons[jobId], { @@ -110,7 +111,10 @@ test('generate an exit reason: success', async (t) => { }, }); - await handleStepStart({ channel, state } as any, { state: { x: 10 } }); + await handleStepComplete( + { channel, state } as any, + { state: { x: 10 } } as any + ); t.truthy(event); t.is(event.reason, 'success'); @@ -128,7 +132,7 @@ test('send a step:complete event', async (t) => { state.activeStep = 'b'; const channel = mockChannel({ - [STEP_COMPLETE]: (evt) => { + [STEP_COMPLETE]: (evt: StepCompletePayload) => { t.is(evt.job_id, jobId); t.truthy(evt.step_id); t.truthy(evt.output_dataclip_id); @@ -140,11 +144,13 @@ test('send a step:complete event', async (t) => { }); const event = { + jobId, + workflowId: plan.id, state: result, next: ['a'], mem: { job: 1, system: 10 }, duration: 61, - threadId: 'abc', - }; - await handleStepStart({ channel, state } as any, event); + thread_id: 'abc', + } as JobCompletePayload; + await handleStepComplete({ channel, state } as any, event); }); diff --git a/packages/ws-worker/test/events/step-start.test.ts b/packages/ws-worker/test/events/step-start.test.ts index 183c3693f..e97b69a61 100644 --- a/packages/ws-worker/test/events/step-start.test.ts +++ b/packages/ws-worker/test/events/step-start.test.ts @@ -24,7 +24,7 @@ test('set a step id and active job on state', async (t) => { [RUN_LOG]: (x) => x, }); - await handleStepStart({ channel, state } as any, { jobId }); + await handleStepStart({ channel, state } as any, { jobId } as any); t.is(state.activeJob, jobId); t.truthy(state.activeStep); @@ -58,7 +58,7 @@ test('send a step:start event', async (t) => { [RUN_LOG]: () => true, }); - await handleStepStart({ channel, state } as any, { jobId }); + await handleStepStart({ channel, state } as any, { jobId } as any); }); test('step:start event should include versions', async (t) => { @@ -135,7 +135,7 @@ test('also logs the version number', async (t) => { state.activeStep = 'b'; const channel = mockChannel({ - [STEP_START]: (evt) => true, + [STEP_START]: () => true, [RUN_LOG]: (evt) => { if (evt.source === 'VER') { logEvent = evt; diff --git a/packages/ws-worker/test/lightning.test.ts b/packages/ws-worker/test/lightning.test.ts index 0d06858b0..3a36a98e1 100644 --- a/packages/ws-worker/test/lightning.test.ts +++ b/packages/ws-worker/test/lightning.test.ts @@ -172,7 +172,7 @@ test.serial(`events: lightning should receive a ${e.GET_PLAN} event`, (t) => { didCallEvent = true; }); - lng.onSocketEvent(e.RUN_COMPLETE, run.id, (evt: any) => { + lng.onSocketEvent(e.RUN_COMPLETE, run.id, () => { t.true(didCallEvent); done(); }); @@ -275,7 +275,7 @@ test.serial( t.pass('called run complete'); }); - lng.onSocketEvent(e.RUN_COMPLETE, run.id, (evt: any) => { + lng.onSocketEvent(e.RUN_COMPLETE, run.id, () => { done(); }); @@ -301,7 +301,7 @@ test.serial( t.pass('called step complete'); }); - lng.onSocketEvent(e.RUN_COMPLETE, run.id, ({ payload }: any) => { + lng.onSocketEvent(e.RUN_COMPLETE, run.id, () => { done(); }); @@ -586,7 +586,7 @@ test.serial( const bc = createEdge('b', 'c'); bc.condition = 'on_job_success'; - const run = createRun([a, b, c], [ab, bc]); + const run = createRun([a, b, c] as any, [ab, bc] as any); const results: Record = {}; diff --git a/packages/ws-worker/test/mock/runtime-engine.test.ts b/packages/ws-worker/test/mock/runtime-engine.test.ts index 38b65c43f..d60c5b2ab 100644 --- a/packages/ws-worker/test/mock/runtime-engine.test.ts +++ b/packages/ws-worker/test/mock/runtime-engine.test.ts @@ -3,12 +3,13 @@ import type { ExecutionPlan } from '@openfn/lexicon'; import type { JobCompletePayload, - JobStartePayload, + JobStartPayload, WorkflowCompletePayload, WorkflowStartPayload, } from '@openfn/engine-multi'; import create from '../../src/mock/runtime-engine'; import { waitForEvent, clone, createPlan } from '../util'; +import { WorkflowErrorPayload } from '@openfn/engine-multi'; const sampleWorkflow = { id: 'w1', @@ -66,7 +67,7 @@ test.serial('Dispatch complete events when a workflow completes', async (t) => { test.serial('Dispatch start events for a job', async (t) => { engine.execute(sampleWorkflow); - const evt = await waitForEvent(engine, 'job-start'); + const evt = await waitForEvent(engine, 'job-start'); t.truthy(evt); t.is(evt.workflowId, 'w1'); t.is(evt.jobId, 'j1'); @@ -89,9 +90,12 @@ test.serial('Dispatch error event for a crash', async (t) => { }); engine.execute(wf); - const evt = await waitForEvent(engine, 'workflow-error'); + const evt = await waitForEvent( + engine, + 'workflow-error' + ); - t.is(evt.workflowId, wf.id); + t.is(evt.workflowId, wf.id!); t.is(evt.type, 'RuntimeCrash'); t.regex(evt.message, /invalid or unexpected token/i); }); @@ -104,7 +108,7 @@ test.serial('wait function', async (t) => { engine.execute(wf); const start = Date.now(); - await waitForEvent(engine, 'workflow-complete'); + await waitForEvent(engine, 'workflow-complete'); const end = Date.now() - start; t.true(end > 90); @@ -126,7 +130,7 @@ test.serial( // @ts-ignore engine.execute(wf, {}, { resolvers: { credential } }); - await waitForEvent(engine, 'job-start'); + await waitForEvent(engine, 'job-start'); t.true(didCallCredentials); } ); @@ -269,7 +273,7 @@ test.skip('timeout', async (t) => { // @ts-ignore engine.execute(wf, {}, { timeout: 10 }); - const evt = await waitForEvent( + const evt = await waitForEvent( engine, 'workflow-error' ); diff --git a/packages/ws-worker/test/mock/sockets.test.ts b/packages/ws-worker/test/mock/sockets.test.ts index 46ad6aa4c..2312d9582 100644 --- a/packages/ws-worker/test/mock/sockets.test.ts +++ b/packages/ws-worker/test/mock/sockets.test.ts @@ -68,7 +68,7 @@ test('mock channel: invoke the ok handler with the callback result', (t) => { }, }); - channel.push('ping', 'abc').receive('ok', (evt) => { + channel.push('ping', 'abc').receive('ok', (evt: any) => { t.is(evt, 'pong!'); t.pass(); done(); From 81c4b345a8ad7625fba50acf1dc605b1b8977c83 Mon Sep 17 00:00:00 2001 From: Joe Clark Date: Thu, 8 Feb 2024 12:53:21 +0000 Subject: [PATCH 081/128] worker: update channel mock --- packages/ws-worker/src/mock/sockets.ts | 1 + 1 file changed, 1 insertion(+) diff --git a/packages/ws-worker/src/mock/sockets.ts b/packages/ws-worker/src/mock/sockets.ts index 599c755dd..172d0b6a5 100644 --- a/packages/ws-worker/src/mock/sockets.ts +++ b/packages/ws-worker/src/mock/sockets.ts @@ -63,6 +63,7 @@ export const mockChannel = ( }; return receive; }, + leave: () => {}, }; return c; }; From f2a530bb893443cf1a0e38108e03c4e2b86a470c Mon Sep 17 00:00:00 2001 From: Joe Clark Date: Thu, 8 Feb 2024 13:05:41 +0000 Subject: [PATCH 082/128] lexicon: docs --- packages/lexicon/README.md | 39 +++++++++++++++++++++++++++++++++++++- 1 file changed, 38 insertions(+), 1 deletion(-) diff --git a/packages/lexicon/README.md b/packages/lexicon/README.md index 16e4ec4a9..baa736f5f 100644 --- a/packages/lexicon/README.md +++ b/packages/lexicon/README.md @@ -2,4 +2,41 @@ The lexicon (aka the OpenFunctionicon) is a central repositoty of key type and w It's a types repo and glossary at the same time. -TODO: should it also capture constants for cross-package events? +## Overview + +The OpenFunction stack is built on the concepts of Workflows, Runs, Jobs and Expressions (and more). Some of these terms can be used interchangable, or used differently in certain contexts. + +Here are the key concepts + +- An **Expression** is a string of Javascript (or Javascript-like code) written to be run in the CLI or Lightning. +- A **Job** is an expression plus some metadata required to run it - typically an adaptor and credentials. + The terms Job and Expression are often used interchangeably. +- A **Workflow** is a series of steps to be executed in sequence. Steps are usually Jobs (and so job and step are often used + interchangeably), but can be Triggers. +- An **Execution Plan** is a Workflow plus some options which inform how it should be executed (ie, start node, timeout). + +The term "Execution plan" is mostly used internally and not exposed to users, and is usually interchangeable with Workflow. + +You can find formal type definition of these and more in `src/core.d.ts`. + +Lightning also introduces it's own terminolgy as it is standalone application and has features that the runtime itself does not. + +In Lightning, a Step can be a Job or a Trigger. Jobs are connected by Paths (also known sometimes as Edges), which may be conditional. + +You can find lightning-specific typings in `src/lightning.d.ts` + +## Usage + +This repo only contains type definitions. It is unlikely to be of use outside the repo - although users are free to import and use it. + +To use the core types, simply import what you need: + +``` +import { ExecutionPlan } from '@openfn/lexicon +``` + +To use the lightning types, use `@openfn/lexicon/lightning` + +``` +import { Run } from '@openfn/lexicon/lightning +``` From e9287c0fb7190de9c5e4a5e333d51956eea5d295 Mon Sep 17 00:00:00 2001 From: Joe Clark Date: Thu, 8 Feb 2024 15:08:22 +0000 Subject: [PATCH 083/128] Run -> LightningPlan --- packages/lexicon/lightning.d.ts | 33 ++++++-- packages/lightning-mock/src/api-dev.ts | 9 +- packages/lightning-mock/src/server.ts | 4 +- packages/lightning-mock/src/types.ts | 12 ++- .../lightning-mock/test/channels/run.test.ts | 4 +- packages/lightning-mock/test/server.test.ts | 4 +- packages/ws-worker/src/api/execute.ts | 4 +- packages/ws-worker/src/channels/run.ts | 12 ++- ...nvert-run.ts => convert-lightning-plan.ts} | 4 +- packages/ws-worker/src/util/index.ts | 2 +- packages/ws-worker/test/api/destroy.test.ts | 4 +- ...test.ts => convert-lightning-plan.test.ts} | 84 +++++++++---------- 12 files changed, 103 insertions(+), 73 deletions(-) rename packages/ws-worker/src/util/{convert-run.ts => convert-lightning-plan.ts} (97%) rename packages/ws-worker/test/util/{convert-run.test.ts => convert-lightning-plan.test.ts} (84%) diff --git a/packages/lexicon/lightning.d.ts b/packages/lexicon/lightning.d.ts index 33c103167..5f4104a8b 100644 --- a/packages/lexicon/lightning.d.ts +++ b/packages/lexicon/lightning.d.ts @@ -1,4 +1,5 @@ import type { SanitizePolicies } from '@openfn/logger'; +import { State } from './core'; type StepId = string; @@ -10,8 +11,15 @@ type StepId = string; * It is helpful to have these in the lexicon to avoid a circular dependency between lightning and the worker * It's also kinda nice that the contract isn't in the worker itself, it's on neutral ground */ -// An run object returned by Lightning -export type Run = { + +/** + * An execution plan representing a Ligyhtning 'Run'. + * This represents the execution of a workflow. + * + * The data stucture that Lightning sends is converted by the Worker into + * a runtime ExecutionPlan (as found in Core) + */ +export type LightningPlan = { id: string; name?: string; dataclip_id: string; @@ -21,7 +29,7 @@ export type Run = { jobs: Node[]; edges: Edge[]; - options?: RunOptions; + options?: LightningPlanOptions; }; /** @@ -30,14 +38,19 @@ export type Run = { * and saved to the plan itself * (although at the time of writing timeout is handled by the worker, not the runtime) */ -export type RunOptions = { +export type LightningPlanOptions = { runTimeoutMs?: number; sanitize?: SanitizePolicies; start?: StepId; }; -// TODO rename to step -// maybe also split into jobs and triggers +/** + * This is a Job or Trigger node in a Lightning plan, + * AKA a Step. + * + * Sticking with the Node/Edge semantics to help distinguish the + * Lightning and runtime typings + */ export type Node = { id: string; name?: string; @@ -49,6 +62,12 @@ export type Node = { state?: State; }; +/** + * This is a Path (or link) between two Jobs in a Plan. + * + * Sticking with the Node/Edge semantics to help distinguish the + * Lightning and runtime typings + */ export interface Edge { id: string; source_job_id?: string; @@ -109,7 +128,7 @@ export type ExitReason = { }; export type GetPlanPayload = void; // no payload -export type GetPlanReply = Run; +export type GetPlanReply = LightningPlan; export type GetCredentialPayload = { id: string }; // credential in-line, no wrapper, arbitrary data diff --git a/packages/lightning-mock/src/api-dev.ts b/packages/lightning-mock/src/api-dev.ts index ea494ee23..c9477781a 100644 --- a/packages/lightning-mock/src/api-dev.ts +++ b/packages/lightning-mock/src/api-dev.ts @@ -5,7 +5,10 @@ import crypto from 'node:crypto'; import Router from '@koa/router'; import { Logger } from '@openfn/logger'; -import type { Run, RunCompletePayload } from '@openfn/lexicon/lightning'; +import type { + LightningPlan, + RunCompletePayload, +} from '@openfn/lexicon/lightning'; import { ServerState } from './server'; import { RUN_COMPLETE } from './events'; @@ -36,7 +39,7 @@ const setupDevAPI = ( app.getDataclip = (id: string) => state.dataclips[id]; - app.enqueueRun = (run: Run, workerId = 'rte') => { + app.enqueueRun = (run: LightningPlan, workerId = 'rte') => { state.runs[run.id] = run; state.results[run.id] = { workerId, // TODO @@ -135,7 +138,7 @@ const setupRestAPI = (app: DevServer, state: ServerState, logger: Logger) => { const router = new Router(); router.post('/run', (ctx) => { - const run = ctx.request.body as Run; + const run = ctx.request.body as LightningPlan; if (!run) { ctx.response.status = 400; diff --git a/packages/lightning-mock/src/server.ts b/packages/lightning-mock/src/server.ts index 928019a3b..c6ceac0db 100644 --- a/packages/lightning-mock/src/server.ts +++ b/packages/lightning-mock/src/server.ts @@ -11,7 +11,7 @@ import createLogger, { import createWebSocketAPI from './api-sockets'; import createDevAPI from './api-dev'; import type { StepId } from '@openfn/lexicon'; -import type { RunLogPayload, Run } from '@openfn/lexicon/lightning'; +import type { RunLogPayload, LightningPlan } from '@openfn/lexicon/lightning'; import type { DevServer } from './types'; type JobId = string; @@ -29,7 +29,7 @@ export type ServerState = { credentials: Record; // list of runs by id - runs: Record; + runs: Record; // list of dataclips by id dataclips: Record; diff --git a/packages/lightning-mock/src/types.ts b/packages/lightning-mock/src/types.ts index 571ba3191..b3613f986 100644 --- a/packages/lightning-mock/src/types.ts +++ b/packages/lightning-mock/src/types.ts @@ -1,5 +1,9 @@ import Koa from 'koa'; -import type { Run, DataClip, Credential } from '@openfn/lexicon/lightning'; +import type { + LightningPlan, + DataClip, + Credential, +} from '@openfn/lexicon/lightning'; import type { ServerState } from './server'; export type LightningEvents = 'log' | 'run-complete'; @@ -8,9 +12,9 @@ export type DevServer = Koa & { state: ServerState; addCredential(id: string, cred: Credential): void; addDataclip(id: string, data: DataClip): void; - enqueueRun(run: Run): void; + enqueueRun(run: LightningPlan): void; destroy: () => void; - getRun(id: string): Run; + getRun(id: string): LightningPlan; getCredential(id: string): Credential; getDataclip(id: string): DataClip; getQueueLength(): number; @@ -23,7 +27,7 @@ export type DevServer = Koa & { runId: string, fn: (evt: any) => void ): void; - registerRun(run: Run): void; + registerRun(run: LightningPlan): void; removeAllListeners(): void; reset(): void; startRun(id: string): any; diff --git a/packages/lightning-mock/test/channels/run.test.ts b/packages/lightning-mock/test/channels/run.test.ts index 5bfe6dd41..59b9239f6 100644 --- a/packages/lightning-mock/test/channels/run.test.ts +++ b/packages/lightning-mock/test/channels/run.test.ts @@ -1,6 +1,6 @@ import test from 'ava'; import type { - Run, + LightningPlan, RunCompletePayload, Credential, DataClip, @@ -76,7 +76,7 @@ test.serial('get run data through the run channel', async (t) => { server.startRun(run1.id); const channel = await join(`run:${run1.id}`, { token: 'a.b.c' }); - channel.push(GET_PLAN).receive('ok', (run: Run) => { + channel.push(GET_PLAN).receive('ok', (run: LightningPlan) => { t.deepEqual(run, run1); done(); }); diff --git a/packages/lightning-mock/test/server.test.ts b/packages/lightning-mock/test/server.test.ts index 5ef9fa120..5b0e9a3c1 100644 --- a/packages/lightning-mock/test/server.test.ts +++ b/packages/lightning-mock/test/server.test.ts @@ -2,7 +2,7 @@ import test from 'ava'; import { Socket } from 'phoenix'; import { WebSocket } from 'ws'; -import type { Run } from '@openfn/lexicon/lightning'; +import type { LightningPlan } from '@openfn/lexicon/lightning'; import { setup } from './util'; @@ -22,7 +22,7 @@ test.serial('should setup an run at /POST /run', async (t) => { t.is(Object.keys(state.runs).length, 0); t.is(Object.keys(state.runs).length, 0); - const run: Run = { + const run: LightningPlan = { id: 'a', dataclip_id: 'a', starting_node_id: 'j', diff --git a/packages/ws-worker/src/api/execute.ts b/packages/ws-worker/src/api/execute.ts index d06e51275..14a9640e6 100644 --- a/packages/ws-worker/src/api/execute.ts +++ b/packages/ws-worker/src/api/execute.ts @@ -2,7 +2,7 @@ import type { ExecutionPlan, Lazy, State } from '@openfn/lexicon'; import type { RunLogPayload, RunStartPayload, - RunOptions, + LightningPlanOptions, } from '@openfn/lexicon/lightning'; import type { Logger } from '@openfn/logger'; import type { @@ -63,7 +63,7 @@ export function execute( logger: Logger, plan: ExecutionPlan, input: Lazy, - options: RunOptions = {}, + options: LightningPlanOptions = {}, onFinish = (_result: any) => {} ) { logger.info('executing ', plan.id); diff --git a/packages/ws-worker/src/channels/run.ts b/packages/ws-worker/src/channels/run.ts index 4cac9e7ab..104fba82d 100644 --- a/packages/ws-worker/src/channels/run.ts +++ b/packages/ws-worker/src/channels/run.ts @@ -1,9 +1,13 @@ import type { ExecutionPlan, Lazy, State } from '@openfn/lexicon'; -import type { GetPlanReply, Run, RunOptions } from '@openfn/lexicon/lightning'; +import type { + GetPlanReply, + LightningPlan, + LightningPlanOptions, +} from '@openfn/lexicon/lightning'; import type { Logger } from '@openfn/logger'; import { getWithReply } from '../util'; -import convertRun from '../util/convert-run'; +import convertRun from '../util/convert-lightning-plan'; import { GET_PLAN } from '../events'; import type { Channel, Socket } from '../types'; @@ -21,7 +25,7 @@ const joinRunChannel = ( return new Promise<{ channel: Channel; plan: ExecutionPlan; - options: RunOptions; + options: LightningPlanOptions; input: Lazy; }>((resolve, reject) => { // TMP - lightning seems to be sending two responses to me @@ -56,5 +60,5 @@ export async function loadRun(channel: Channel) { // first we get the run body through the socket const runBody = await getWithReply(channel, GET_PLAN); // then we generate the execution plan - return convertRun(runBody as Run); + return convertRun(runBody as LightningPlan); } diff --git a/packages/ws-worker/src/util/convert-run.ts b/packages/ws-worker/src/util/convert-lightning-plan.ts similarity index 97% rename from packages/ws-worker/src/util/convert-run.ts rename to packages/ws-worker/src/util/convert-lightning-plan.ts index d15ba35b3..df2427a3d 100644 --- a/packages/ws-worker/src/util/convert-run.ts +++ b/packages/ws-worker/src/util/convert-lightning-plan.ts @@ -10,7 +10,7 @@ import type { WorkflowOptions, Lazy, } from '@openfn/lexicon'; -import { Run, Edge } from '@openfn/lexicon/lightning'; +import { LightningPlan, Edge } from '@openfn/lexicon/lightning'; import { ExecuteOptions } from '@openfn/engine-multi'; export const conditions: Record string | null> = @@ -40,7 +40,7 @@ const mapTriggerEdgeCondition = (edge: Edge) => { }; export default ( - run: Run + run: LightningPlan ): { plan: ExecutionPlan; options: ExecuteOptions; input: Lazy } => { // Some options get mapped straight through to the runtime's workflow options // TODO or maybe not? Maybe they're all sent to the engine instead? diff --git a/packages/ws-worker/src/util/index.ts b/packages/ws-worker/src/util/index.ts index 6c9b2b0e3..776d274e5 100644 --- a/packages/ws-worker/src/util/index.ts +++ b/packages/ws-worker/src/util/index.ts @@ -1,4 +1,4 @@ -import convertRun from './convert-run'; +import convertRun from './convert-lightning-plan'; import tryWithBackoff from './try-with-backoff'; import getWithReply from './get-with-reply'; import stringify from './stringify'; diff --git a/packages/ws-worker/test/api/destroy.test.ts b/packages/ws-worker/test/api/destroy.test.ts index ef5fc4d13..2b47105c1 100644 --- a/packages/ws-worker/test/api/destroy.test.ts +++ b/packages/ws-worker/test/api/destroy.test.ts @@ -2,7 +2,7 @@ import test from 'ava'; import crypto from 'node:crypto'; import createLightningServer from '@openfn/lightning-mock'; import { createMockLogger } from '@openfn/logger'; -import { Run } from '@openfn/lexicon/lightning'; +import { LightningPlan } from '@openfn/lexicon/lightning'; import createWorker from '../../src/server'; import createMockRTE from '../../src/mock/runtime-engine'; @@ -40,7 +40,7 @@ const createRun = () => body: `wait(${500 + Math.random() * 1000})`, }, ], - } as Run); + } as LightningPlan); const waitForClaim = (timeout: number = 1000) => new Promise((resolve) => { diff --git a/packages/ws-worker/test/util/convert-run.test.ts b/packages/ws-worker/test/util/convert-lightning-plan.test.ts similarity index 84% rename from packages/ws-worker/test/util/convert-run.test.ts rename to packages/ws-worker/test/util/convert-lightning-plan.test.ts index c593e8852..dbed7a503 100644 --- a/packages/ws-worker/test/util/convert-run.test.ts +++ b/packages/ws-worker/test/util/convert-lightning-plan.test.ts @@ -1,6 +1,6 @@ import test from 'ava'; -import type { Run, Node } from '@openfn/lexicon/lightning'; -import convertRun, { conditions } from '../../src/util/convert-run'; +import type { LightningPlan, Node } from '@openfn/lexicon/lightning'; +import convertPlan, { conditions } from '../../src/util/convert-lightning-plan'; import { ConditionalStepEdge, Job } from '@openfn/lexicon'; // Creates a lightning node (job or trigger) @@ -43,13 +43,13 @@ const testEdgeCondition = (expr: string, state: any) => { }; test('convert a single job', (t) => { - const run: Partial = { + const run: Partial = { id: 'w', jobs: [createNode()], triggers: [], edges: [], }; - const { plan } = convertRun(run as Run); + const { plan } = convertPlan(run as LightningPlan); t.deepEqual(plan, { id: 'w', @@ -61,14 +61,14 @@ test('convert a single job', (t) => { }); test('convert a single job with names', (t) => { - const run: Partial = { + const run: Partial = { id: 'w', name: 'my-workflow', jobs: [createNode({ name: 'my-job' })], triggers: [], edges: [], }; - const { plan } = convertRun(run as Run); + const { plan } = convertPlan(run as LightningPlan); t.deepEqual(plan, { id: 'w', @@ -81,7 +81,7 @@ test('convert a single job with names', (t) => { }); test('convert a single job with options', (t) => { - const run: Partial = { + const run: Partial = { id: 'w', jobs: [createNode()], triggers: [], @@ -91,7 +91,7 @@ test('convert a single job with options', (t) => { runTimeoutMs: 10, }, }; - const { plan, options } = convertRun(run as Run); + const { plan, options } = convertPlan(run as LightningPlan); t.deepEqual(plan, { id: 'w', @@ -109,13 +109,13 @@ test('convert a single job with options', (t) => { // Note idk how lightningg will handle state/defaults on a job // but this is what we'll do right now test('convert a single job with data', (t) => { - const run: Partial = { + const run: Partial = { id: 'w', jobs: [createNode({ state: { data: { x: 22 } } })], triggers: [], edges: [], }; - const { plan, options } = convertRun(run as Run); + const { plan, options } = convertPlan(run as LightningPlan); t.deepEqual(plan, { id: 'w', @@ -128,10 +128,10 @@ test('convert a single job with data', (t) => { }); test('Accept a partial run object', (t) => { - const run: Partial = { + const run: Partial = { id: 'w', }; - const { plan, options } = convertRun(run as Run); + const { plan, options } = convertPlan(run as LightningPlan); t.deepEqual(plan, { id: 'w', @@ -144,21 +144,21 @@ test('Accept a partial run object', (t) => { }); test('handle dataclip_id as input', (t) => { - const run: Partial = { + const run: Partial = { id: 'w', dataclip_id: 'xyz', }; - const { input } = convertRun(run as Run); + const { input } = convertPlan(run as LightningPlan); t.deepEqual(input, 'xyz'); }); test('handle starting_node_id as options', (t) => { - const run: Partial = { + const run: Partial = { id: 'w', starting_node_id: 'j1', }; - const { plan } = convertRun(run as Run); + const { plan } = convertPlan(run as LightningPlan); t.deepEqual(plan.options, { start: 'j1', @@ -166,13 +166,13 @@ test('handle starting_node_id as options', (t) => { }); test('convert a single trigger', (t) => { - const run: Partial = { + const run: Partial = { id: 'w', triggers: [createTrigger()], jobs: [], edges: [], }; - const { plan } = convertRun(run as Run); + const { plan } = convertPlan(run as LightningPlan); t.deepEqual(plan, { id: 'w', @@ -189,13 +189,13 @@ test('convert a single trigger', (t) => { // This exhibits current behaviour. This should never happen though test('ignore a single edge', (t) => { - const run: Partial = { + const run: Partial = { id: 'w', jobs: [], triggers: [], edges: [createEdge('a', 'b')], }; - const { plan } = convertRun(run as Run); + const { plan } = convertPlan(run as LightningPlan); t.deepEqual(plan, { id: 'w', @@ -207,7 +207,7 @@ test('ignore a single edge', (t) => { }); test('convert a single trigger with an edge', (t) => { - const run: Partial = { + const run: Partial = { id: 'w', triggers: [createTrigger()], jobs: [createNode()], @@ -219,7 +219,7 @@ test('convert a single trigger with an edge', (t) => { }, ], }; - const { plan } = convertRun(run as Run); + const { plan } = convertPlan(run as LightningPlan); t.deepEqual(plan, { id: 'w', @@ -239,7 +239,7 @@ test('convert a single trigger with an edge', (t) => { }); test('convert a single trigger with two edges', (t) => { - const run: Partial = { + const run: Partial = { id: 'w', triggers: [createTrigger()], jobs: [createNode({ id: 'a' }), createNode({ id: 'b' })], @@ -256,7 +256,7 @@ test('convert a single trigger with two edges', (t) => { }, ], }; - const { plan } = convertRun(run as Run); + const { plan } = convertPlan(run as LightningPlan); t.deepEqual(plan, { id: 'w', @@ -278,7 +278,7 @@ test('convert a single trigger with two edges', (t) => { }); test('convert a disabled trigger', (t) => { - const run: Partial = { + const run: Partial = { id: 'w', triggers: [createTrigger()], jobs: [createNode({ id: 'a' })], @@ -291,7 +291,7 @@ test('convert a disabled trigger', (t) => { }, ], }; - const { plan } = convertRun(run as Run); + const { plan } = convertPlan(run as LightningPlan); t.deepEqual(plan, { id: 'w', @@ -309,13 +309,13 @@ test('convert a disabled trigger', (t) => { }); test('convert two linked jobs', (t) => { - const run: Partial = { + const run: Partial = { id: 'w', jobs: [createNode({ id: 'a' }), createNode({ id: 'b' })], triggers: [], edges: [createEdge('a', 'b')], }; - const { plan } = convertRun(run as Run); + const { plan } = convertPlan(run as LightningPlan); t.deepEqual(plan, { id: 'w', @@ -331,7 +331,7 @@ test('convert two linked jobs', (t) => { // This isn't supported by the runtime, but it'll survive the conversion test('convert a job with two upstream jobs', (t) => { - const run: Partial = { + const run: Partial = { id: 'w', jobs: [ createNode({ id: 'a' }), @@ -341,7 +341,7 @@ test('convert a job with two upstream jobs', (t) => { triggers: [], edges: [createEdge('a', 'x'), createEdge('b', 'x')], }; - const { plan } = convertRun(run as Run); + const { plan } = convertPlan(run as LightningPlan); t.deepEqual(plan, { id: 'w', @@ -358,13 +358,13 @@ test('convert a job with two upstream jobs', (t) => { test('convert two linked jobs with an edge condition', (t) => { const condition = 'state.age > 10'; - const run: Partial = { + const run: Partial = { id: 'w', jobs: [createNode({ id: 'a' }), createNode({ id: 'b' })], triggers: [], edges: [createEdge('a', 'b', { condition })], }; - const { plan } = convertRun(run as Run); + const { plan } = convertPlan(run as LightningPlan); t.deepEqual(plan, { id: 'w', @@ -379,13 +379,13 @@ test('convert two linked jobs with an edge condition', (t) => { }); test('convert two linked jobs with a disabled edge', (t) => { - const run: Partial = { + const run: Partial = { id: 'w', jobs: [createNode({ id: 'a' }), createNode({ id: 'b' })], triggers: [], edges: [createEdge('a', 'b', { enabled: false })], }; - const { plan } = convertRun(run as Run); + const { plan } = convertPlan(run as LightningPlan); t.deepEqual(plan, { id: 'w', @@ -498,13 +498,13 @@ test('on_job_failure condition: return false if state is undefined', (t) => { }); test('convert edge condition on_job_success', (t) => { - const run: Partial = { + const run: Partial = { id: 'w', jobs: [createNode({ id: 'a' }), createNode({ id: 'b' })], triggers: [], edges: [createEdge('a', 'b', { condition: 'on_job_success' })], }; - const { plan } = convertRun(run as Run); + const { plan } = convertPlan(run as LightningPlan); const [job] = plan.workflow.steps as Job[]; const edge = job.next as Record; @@ -515,13 +515,13 @@ test('convert edge condition on_job_success', (t) => { }); test('convert edge condition on_job_failure', (t) => { - const run: Partial = { + const run: Partial = { id: 'w', jobs: [createNode({ id: 'a' }), createNode({ id: 'b' })], triggers: [], edges: [createEdge('a', 'b', { condition: 'on_job_failure' })], }; - const { plan } = convertRun(run as Run); + const { plan } = convertPlan(run as LightningPlan); const [job] = plan.workflow.steps as Job[]; const edge = job.next as Record; @@ -538,13 +538,13 @@ test('convert edge condition on_job_failure', (t) => { test('convert edge condition on_job_success with a funky id', (t) => { const id_a = 'a-b-c@ # {} !£'; - const run: Partial = { + const run: Partial = { id: 'w', jobs: [createNode({ id: id_a }), createNode({ id: 'b' })], triggers: [], edges: [createEdge(id_a, 'b', { condition: 'on_job_success' })], }; - const { plan } = convertRun(run as Run); + const { plan } = convertPlan(run as LightningPlan); const [job] = plan.workflow.steps as Job[]; const edge = job.next as Record; @@ -555,13 +555,13 @@ test('convert edge condition on_job_success with a funky id', (t) => { }); test('convert edge condition always', (t) => { - const run: Partial = { + const run: Partial = { id: 'w', jobs: [createNode({ id: 'a' }), createNode({ id: 'b' })], triggers: [], edges: [createEdge('a', 'b', { condition: 'always' })], }; - const { plan } = convertRun(run as Run); + const { plan } = convertPlan(run as LightningPlan); const [job] = plan.workflow.steps as Job[]; const edge = job.next as Record; From 2bdb92fd921fcf0b96e42bcf20dd24f8a5fe87ee Mon Sep 17 00:00:00 2001 From: Joe Clark Date: Thu, 8 Feb 2024 15:40:59 +0000 Subject: [PATCH 084/128] version bumps for logger and mock --- .changeset/two-pugs-burn.md | 6 ++++++ 1 file changed, 6 insertions(+) create mode 100644 .changeset/two-pugs-burn.md diff --git a/.changeset/two-pugs-burn.md b/.changeset/two-pugs-burn.md new file mode 100644 index 000000000..080b1307b --- /dev/null +++ b/.changeset/two-pugs-burn.md @@ -0,0 +1,6 @@ +--- +'@openfn/lightning-mock': major +'@openfn/logger': major +--- + +Symbolic 1.0 version release From bb2fc32d2a6849f60fd61f0e06604580846aab36 Mon Sep 17 00:00:00 2001 From: Joe Clark Date: Fri, 9 Feb 2024 11:03:43 +0000 Subject: [PATCH 085/128] mock: return error if dataclip not found --- packages/lightning-mock/src/api-sockets.ts | 20 ++++++++++++------- .../lightning-mock/test/channels/run.test.ts | 17 ++++++++++++++++ 2 files changed, 30 insertions(+), 7 deletions(-) diff --git a/packages/lightning-mock/src/api-sockets.ts b/packages/lightning-mock/src/api-sockets.ts index 816a19813..10ab917e8 100644 --- a/packages/lightning-mock/src/api-sockets.ts +++ b/packages/lightning-mock/src/api-sockets.ts @@ -263,8 +263,6 @@ const createSocketAPI = ( }); } - // TODO this mock function is broken in the phoenix package update - // (I am not TOO worried, the actual integration works fine) function getDataclip( state: ServerState, ws: DevSocket, @@ -273,11 +271,19 @@ const createSocketAPI = ( const { ref, topic, join_ref } = evt; const dataclip = state.dataclips[evt.payload.id]; - // Send the data as an ArrayBuffer (our stringify function will do this) - const payload = { - status: 'ok', - response: enc.encode(stringify(dataclip)), - }; + let payload; + if (dataclip) { + payload = { + status: 'ok', + response: enc.encode(stringify(dataclip)), + }; + } else { + // TODO I think this actually tidier than what lightning does... + payload = { + status: 'error', + response: 'not_found', + }; + } ws.reply({ ref, diff --git a/packages/lightning-mock/test/channels/run.test.ts b/packages/lightning-mock/test/channels/run.test.ts index 59b9239f6..d7eff9902 100644 --- a/packages/lightning-mock/test/channels/run.test.ts +++ b/packages/lightning-mock/test/channels/run.test.ts @@ -154,6 +154,23 @@ test.serial('get dataclip through the run channel', async (t) => { }); }); +test.serial( + 'get dataclip should throw if the dataclip does not exist', + async (t) => { + return new Promise(async (done) => { + server.startRun(run1.id); + + const channel = await join(`run:${run1.id}`, { token: 'a.b.c' }); + channel + .push(GET_DATACLIP, { id: 'd' }) + .receive('error', (result: any) => { + t.is(result, 'not_found'); + done(); + }); + }); + } +); + // TODO test that all events are proxied out to server.on test.serial( From 33e72f85b7c8766e1693c54587d5dd90ace1c945 Mon Sep 17 00:00:00 2001 From: Joe Clark Date: Fri, 9 Feb 2024 11:12:00 +0000 Subject: [PATCH 086/128] worker: better handling of dataclip errors --- packages/lexicon/lightning.d.ts | 2 + packages/ws-worker/src/api/execute.ts | 52 +++++++++++-------- packages/ws-worker/src/util/get-with-reply.ts | 17 ++++-- packages/ws-worker/test/lightning.test.ts | 32 ++++++++++++ 4 files changed, 77 insertions(+), 26 deletions(-) diff --git a/packages/lexicon/lightning.d.ts b/packages/lexicon/lightning.d.ts index 5f4104a8b..114211166 100644 --- a/packages/lexicon/lightning.d.ts +++ b/packages/lexicon/lightning.d.ts @@ -84,6 +84,8 @@ export type DataClip = Record; export type Credential = Record; +// TODO export reason strings from this repo +// and explain what each reason means export type ExitReasonStrings = | 'success' | 'fail' diff --git a/packages/ws-worker/src/api/execute.ts b/packages/ws-worker/src/api/execute.ts index 14a9640e6..d8f0d9bf5 100644 --- a/packages/ws-worker/src/api/execute.ts +++ b/packages/ws-worker/src/api/execute.ts @@ -130,34 +130,44 @@ export function execute( // dataclip: (id: string) => loadDataclip(channel, id), } as Resolvers; - Promise.resolve() + setTimeout(async () => { + let loadedInput = input; + // Optionally resolve initial state - .then(async () => { - // TODO we need to remove this from here and let the runtime take care of it through - // the resolver. See https://github.com/OpenFn/kit/issues/403 - // TODO come back and work out how initial state will work - if (typeof input === 'string') { - logger.debug('loading dataclip', input); - const loadedInput = await loadDataclip(channel, input); + // TODO we need to remove this from here and let the runtime take care of it through + // the resolver. See https://github.com/OpenFn/kit/issues/403 + // TODO come back and work out how initial state will work + if (typeof input === 'string') { + logger.debug('loading dataclip', input); + + try { + loadedInput = await loadDataclip(channel, input); logger.success('dataclip loaded'); return loadedInput; - } - return input; - }) - // Execute (which we have to wrap in a promise chain to handle initial state) - .then((input: State) => { - try { - engine.execute(plan, input, { resolvers, ...options }); } catch (e: any) { - // TODO what if there's an error? - handleRunError(context, { + // abort with error + return handleRunError(context, { workflowId: plan.id!, - message: e.message, - type: e.type, - severity: e.severity, + message: `Failed to load dataclip ${input}${ + e.message ? `: ${e.message}` : '' + }`, + type: 'DataClipError', + severity: 'exception', }); } - }); + } + + try { + engine.execute(plan, loadedInput as State, { resolvers, ...options }); + } catch (e: any) { + handleRunError(context, { + workflowId: plan.id!, + message: e.message, + type: e.type, + severity: e.severity, + }); + } + }); return context; } diff --git a/packages/ws-worker/src/util/get-with-reply.ts b/packages/ws-worker/src/util/get-with-reply.ts index ac6b0c778..5da1ad7e5 100644 --- a/packages/ws-worker/src/util/get-with-reply.ts +++ b/packages/ws-worker/src/util/get-with-reply.ts @@ -1,9 +1,16 @@ import { Channel } from '../types'; export default (channel: Channel, event: string, payload?: any) => - new Promise((resolve) => { - channel.push(event, payload).receive('ok', (evt: any) => { - resolve(evt); - }); - // TODO handle errors and timeouts too + new Promise((resolve, reject) => { + channel + .push(event, payload) + .receive('ok', (evt: any) => { + resolve(evt); + }) + .receive('error', (e: any) => { + reject(e); + }) + .receive('timeout', (e: any) => { + reject(e); + }); }); diff --git a/packages/ws-worker/test/lightning.test.ts b/packages/ws-worker/test/lightning.test.ts index 3a36a98e1..55c4202ac 100644 --- a/packages/ws-worker/test/lightning.test.ts +++ b/packages/ws-worker/test/lightning.test.ts @@ -10,6 +10,7 @@ import { createRun, createEdge, createJob } from './util'; import createWorkerServer from '../src/server'; import createMockRTE from '../src/mock/runtime-engine'; import * as e from '../src/events'; +import { RunCompletePayload } from '@openfn/lexicon/lightning'; let lng: any; let worker: any; @@ -240,6 +241,37 @@ test.serial( } ); +test.serial( + `events: worker should send an error if ${e.GET_DATACLIP} references a non-existant dataclip`, + (t) => { + return new Promise((done) => { + const run = getRun({ + dataclip_id: 'xyz', + }); + // Do not load the dataclip into lightning + + let didCallEvent = false; + lng.onSocketEvent(e.GET_DATACLIP, run.id, () => { + didCallEvent = true; + }); + + lng.onSocketEvent( + e.RUN_COMPLETE, + run.id, + ({ payload }: { payload: RunCompletePayload }) => { + t.true(didCallEvent); + t.is(payload.reason, 'exception'); + t.is(payload.error_type, 'DataClipError'); + t.regex(payload.error_message!, /Failed to load dataclip abc/); + done(); + } + ); + + lng.enqueueRun(run); + }); + } +); + test.serial(`events: lightning should receive a ${e.STEP_START} event`, (t) => { return new Promise((done) => { const run = getRun(); From 1da18fb5f520945949dbdd2e0c3ad3ec5beefe7a Mon Sep 17 00:00:00 2001 From: Joe Clark Date: Fri, 9 Feb 2024 11:42:49 +0000 Subject: [PATCH 087/128] lightning-mock: fix test --- packages/lightning-mock/test/channels/run.test.ts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packages/lightning-mock/test/channels/run.test.ts b/packages/lightning-mock/test/channels/run.test.ts index d7eff9902..8fcffc244 100644 --- a/packages/lightning-mock/test/channels/run.test.ts +++ b/packages/lightning-mock/test/channels/run.test.ts @@ -162,7 +162,7 @@ test.serial( const channel = await join(`run:${run1.id}`, { token: 'a.b.c' }); channel - .push(GET_DATACLIP, { id: 'd' }) + .push(GET_DATACLIP, { id: 'x' }) .receive('error', (result: any) => { t.is(result, 'not_found'); done(); From a97eb2651270ee7e1a4a8e94187767f5b44d6ad3 Mon Sep 17 00:00:00 2001 From: Joe Clark Date: Fri, 9 Feb 2024 11:43:15 +0000 Subject: [PATCH 088/128] worker: changeset --- .changeset/three-shrimps-approve.md | 5 +++++ 1 file changed, 5 insertions(+) create mode 100644 .changeset/three-shrimps-approve.md diff --git a/.changeset/three-shrimps-approve.md b/.changeset/three-shrimps-approve.md new file mode 100644 index 000000000..9086eb90b --- /dev/null +++ b/.changeset/three-shrimps-approve.md @@ -0,0 +1,5 @@ +--- +'@openfn/ws-worker': patch +--- + +Better error handling for invalid dataclips From 21365ea01dde0665482dfcdf7e2b539eb041bdf2 Mon Sep 17 00:00:00 2001 From: Joe Clark Date: Fri, 9 Feb 2024 11:55:06 +0000 Subject: [PATCH 089/128] worker: fix test Don't return the loaded dataclip after the refactor --- packages/ws-worker/src/api/execute.ts | 2 -- packages/ws-worker/test/api/execute.test.ts | 1 + 2 files changed, 1 insertion(+), 2 deletions(-) diff --git a/packages/ws-worker/src/api/execute.ts b/packages/ws-worker/src/api/execute.ts index d8f0d9bf5..a86139168 100644 --- a/packages/ws-worker/src/api/execute.ts +++ b/packages/ws-worker/src/api/execute.ts @@ -143,7 +143,6 @@ export function execute( try { loadedInput = await loadDataclip(channel, input); logger.success('dataclip loaded'); - return loadedInput; } catch (e: any) { // abort with error return handleRunError(context, { @@ -156,7 +155,6 @@ export function execute( }); } } - try { engine.execute(plan, loadedInput as State, { resolvers, ...options }); } catch (e: any) { diff --git a/packages/ws-worker/test/api/execute.test.ts b/packages/ws-worker/test/api/execute.test.ts index b1148424b..4c74bfb65 100644 --- a/packages/ws-worker/test/api/execute.test.ts +++ b/packages/ws-worker/test/api/execute.test.ts @@ -378,6 +378,7 @@ test('execute should lazy-load initial state', async (t) => { const channel = mockChannel({ ...mockEventHandlers, [GET_DATACLIP]: (id) => { + console.log('> GET DATACLIP'); t.truthy(id); didLoadState = true; return toArrayBuffer({}); From 14b217887593772b023901c90e970f8b3078ccd7 Mon Sep 17 00:00:00 2001 From: Joe Clark Date: Fri, 9 Feb 2024 12:08:19 +0000 Subject: [PATCH 090/128] worker: fix test again --- packages/ws-worker/test/lightning.test.ts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packages/ws-worker/test/lightning.test.ts b/packages/ws-worker/test/lightning.test.ts index 55c4202ac..15edea4e1 100644 --- a/packages/ws-worker/test/lightning.test.ts +++ b/packages/ws-worker/test/lightning.test.ts @@ -262,7 +262,7 @@ test.serial( t.true(didCallEvent); t.is(payload.reason, 'exception'); t.is(payload.error_type, 'DataClipError'); - t.regex(payload.error_message!, /Failed to load dataclip abc/); + t.regex(payload.error_message!, /Failed to load dataclip xyz/); done(); } ); From 86dd668e57e3654e4bbcb8b4303b0f674021e945 Mon Sep 17 00:00:00 2001 From: josephjclark Date: Fri, 9 Feb 2024 14:37:26 +0000 Subject: [PATCH 091/128] Backend renaming (1.0 version bumps plus the lexicon) (#585) * lexicon: start building a central lexicon of definitions * runtime: huge refactor of runtime core API * runtime: more refactoring * runtime: take initial state out of the execution plan * fix tests * runtime: changeset * runtime: extra type tweakings * runtime: readme * runtime: jobs -> steps (mostly) there are cases where job is more accurate and useful * cli: start refactoring towrads new runtime API Done a big chunk of execute but still a way to go * cli: basically get the CLI working again * cli: types * cli: fix a bunch of tests, update workflow parsing * cli: fix execute and compile tests * cli: more test fixes * fix more cli tests * cli: fix integration tests * cli: tidy * runtime: remove strict mode * remove strict mode * cli: default workflow name to the file name * runtime: tweak log output * cli: remove log * cli: types * docs * deploy: adjust logging * engine: update types * engine: update names and types This is 90% of the basic rename done. Tests may even pass * runtime: male statePropsToRemove a system options, rather than workflow specific If a workflow wants to remove props, it'll add an fn bock * engine: restore statePropsToRemove tests * mock: update to lexicon * worker: start mapping to lexicon. Handled run-> plan conversion * worker: typings * worker: fix all tests * engine: types * worker: fix cheeky test somehow missed it last time * tests: fix cli tests * worker: update test * package lock * tests: update test * changesets and housekeeping * more housekeeping * engine: tweak test * runtime: tweak error messages * worker: stricter type checkign on tests * fix test * typing in worker tests * worker: update channel mock * lexicon: docs * Run -> LightningPlan * version bumps for logger and mock * Send worker versions (#593) * worker: send worker and API versions to Lightning * lexicon: fix API_VERSION export --- .changeset/grumpy-pillows-tie.md | 12 + .changeset/old-planes-sort.md | 5 + .changeset/pretty-spoons-beam.md | 9 + .changeset/ten-dingos-explode.md | 6 + .changeset/two-pugs-burn.md | 6 + integration-tests/cli/test/cli.test.ts | 2 +- integration-tests/cli/test/errors.test.ts | 7 +- .../cli/test/execute-workflow.test.ts | 45 +- .../cli/test/fixtures/circular.json | 26 +- .../test/fixtures/invalid-config-path.json | 13 +- .../cli/test/fixtures/invalid-exp-path.json | 12 +- .../cli/test/fixtures/invalid-start.json | 18 +- .../cli/test/fixtures/invalid-syntax.json | 12 +- .../cli/test/fixtures/multiple-inputs.json | 34 +- integration-tests/cli/test/fixtures/plan.json | 19 + .../cli/test/fixtures/wf-array.json | 40 +- .../cli/test/fixtures/wf-conditional.json | 50 +- .../cli/test/fixtures/wf-count.json | 26 +- .../cli/test/fixtures/wf-errors.json | 44 +- .../cli/test/fixtures/wf-simple.json | 14 +- .../cli/test/fixtures/wf-strict.json | 30 +- .../worker/test/integration.test.ts | 2 +- packages/cli/README.md | 85 +- packages/cli/package.json | 1 + packages/cli/src/commands.ts | 19 +- packages/cli/src/compile/command.ts | 4 +- packages/cli/src/compile/compile.ts | 42 +- packages/cli/src/compile/handler.ts | 28 +- packages/cli/src/docs/handler.ts | 2 +- packages/cli/src/execute/command.ts | 13 +- packages/cli/src/execute/execute.ts | 48 +- .../src/execute/get-autoinstall-targets.ts | 30 +- packages/cli/src/execute/handler.ts | 27 +- packages/cli/src/execute/serialize-output.ts | 13 +- packages/cli/src/options.ts | 76 +- packages/cli/src/test/handler.ts | 66 +- packages/cli/src/types.ts | 6 + packages/cli/src/util/expand-adaptors.ts | 32 +- packages/cli/src/util/index.d.ts | 1 - packages/cli/src/util/index.ts | 6 + packages/cli/src/util/load-input.ts | 144 -- packages/cli/src/util/load-plan.ts | 252 ++++ .../cli/src/util/map-adaptors-to-monorepo.ts | 45 +- packages/cli/src/util/validate-adaptors.ts | 6 +- packages/cli/test/commands.test.ts | 163 ++- packages/cli/test/compile/compile.test.ts | 82 +- packages/cli/test/compile/options.test.ts | 4 +- packages/cli/test/docgen/handler.test.ts | 2 +- packages/cli/test/execute/execute.test.ts | 293 ++-- .../execute/get-autoinstall-targets.test.ts | 198 +-- packages/cli/test/execute/options.test.ts | 15 +- .../cli/test/execute/parse-adaptors.test.ts | 101 +- packages/cli/test/integration.test.ts | 2 +- .../cli/test/options/ensure/inputPath.test.ts | 14 +- .../cli/test/options/ensure/strict.test.ts | 51 - packages/cli/test/options/execute.test.ts | 4 +- packages/cli/test/util.ts | 18 + .../cli/test/util/expand-adaptors.test.ts | 103 +- packages/cli/test/util/load-input.test.ts | 322 ----- packages/cli/test/util/load-plan.test.ts | 273 ++++ .../util/map-adaptors-to-monorepo.test.ts | 26 +- packages/compiler/src/compile.ts | 4 +- packages/deploy/src/index.ts | 2 +- packages/engine-multi/package.json | 1 + packages/engine-multi/src/api/autoinstall.ts | 14 +- packages/engine-multi/src/api/compile.ts | 15 +- packages/engine-multi/src/api/execute.ts | 6 +- packages/engine-multi/src/api/lifecycle.ts | 2 +- .../src/api/preload-credentials.ts | 7 +- .../src/classes/ExecutionContext.ts | 15 +- packages/engine-multi/src/engine.ts | 41 +- packages/engine-multi/src/test/util.ts | 39 +- .../engine-multi/src/test/worker-functions.ts | 5 +- packages/engine-multi/src/types.ts | 36 +- .../engine-multi/src/util/create-state.ts | 13 +- packages/engine-multi/src/worker/events.ts | 2 - packages/engine-multi/src/worker/pool.ts | 9 +- .../engine-multi/src/worker/thread/helpers.ts | 5 +- .../src/worker/thread/mock-run.ts | 17 +- .../engine-multi/src/worker/thread/run.ts | 9 +- packages/engine-multi/test/api.test.ts | 47 +- .../engine-multi/test/api/autoinstall.test.ts | 81 +- .../engine-multi/test/api/call-worker.test.ts | 14 +- .../engine-multi/test/api/execute.test.ts | 107 +- .../engine-multi/test/api/lifecycle.test.ts | 46 +- .../test/api/preload-credentials.test.ts | 71 +- packages/engine-multi/test/engine.test.ts | 142 +- packages/engine-multi/test/errors.test.ts | 121 +- .../engine-multi/test/integration.test.ts | 102 +- packages/engine-multi/test/security.test.ts | 6 +- .../test/worker/mock-worker.test.ts | 28 +- .../engine-multi/test/worker/pool.test.ts | 14 +- packages/engine-multi/tsconfig.json | 2 +- packages/lexicon/README.md | 42 + packages/lexicon/core.d.ts | 137 ++ packages/lexicon/index.d.ts | 2 + packages/lexicon/index.js | 1 + packages/lexicon/lightning.d.ts | 182 +++ packages/lexicon/lightning.js | 6 + packages/lexicon/package.json | 26 + packages/lightning-mock/package.json | 1 + packages/lightning-mock/src/api-dev.ts | 20 +- packages/lightning-mock/src/api-sockets.ts | 46 +- packages/lightning-mock/src/server.ts | 8 +- packages/lightning-mock/src/types.ts | 117 +- .../test/channels/claim.test.ts | 10 +- .../lightning-mock/test/channels/run.test.ts | 30 +- .../lightning-mock/test/events/log.test.ts | 8 +- .../test/events/run-complete.test.ts | 4 +- .../test/events/run-start.test.ts | 4 +- .../test/events/step-complete.test.ts | 10 +- .../test/events/step-start.test.ts | 4 +- packages/lightning-mock/test/server.test.ts | 14 +- .../lightning-mock/test/socket-server.test.ts | 13 +- packages/lightning-mock/test/util.ts | 2 +- packages/lightning-mock/tsconfig.json | 2 +- packages/runtime/README.md | 44 +- packages/runtime/package.json | 1 + packages/runtime/src/execute/compile-plan.ts | 81 +- packages/runtime/src/execute/context.ts | 7 +- packages/runtime/src/execute/expression.ts | 73 +- packages/runtime/src/execute/plan.ts | 35 +- .../runtime/src/execute/{job.ts => step.ts} | 103 +- packages/runtime/src/modules/module-loader.ts | 2 +- packages/runtime/src/runtime.ts | 93 +- packages/runtime/src/types.ts | 104 +- packages/runtime/src/util/assemble-state.ts | 13 +- packages/runtime/src/util/clone.ts | 2 +- packages/runtime/src/util/default-state.ts | 1 + packages/runtime/src/util/execute.ts | 2 +- packages/runtime/src/util/index.ts | 19 + packages/runtime/src/util/log-error.ts | 12 +- packages/runtime/src/util/validate-plan.ts | 17 +- packages/runtime/test/context.test.ts | 26 +- packages/runtime/test/errors.test.ts | 44 +- .../runtime/test/execute/compile-plan.test.ts | 363 +++-- .../runtime/test/execute/expression.test.ts | 61 +- packages/runtime/test/execute/plan.test.ts | 1248 ++++++++--------- .../execute/{job.test.ts => step.test.ts} | 99 +- packages/runtime/test/memory.test.ts | 26 +- packages/runtime/test/runtime.test.ts | 437 +++--- packages/runtime/test/security.test.ts | 95 +- .../runtime/test/util/assemble-state.test.ts | 70 +- .../test/util/{regex.ts => regex.test.ts} | 0 .../runtime/test/util/validate-plan.test.ts | 131 +- packages/ws-worker/package.json | 1 + packages/ws-worker/src/api/claim.ts | 3 +- packages/ws-worker/src/api/execute.ts | 56 +- packages/ws-worker/src/api/reasons.ts | 19 +- packages/ws-worker/src/channels/run.ts | 26 +- .../ws-worker/src/channels/worker-queue.ts | 16 +- packages/ws-worker/src/events.ts | 86 +- packages/ws-worker/src/events/run-complete.ts | 3 +- packages/ws-worker/src/events/run-error.ts | 6 +- .../ws-worker/src/events/step-complete.ts | 6 +- packages/ws-worker/src/events/step-start.ts | 10 +- packages/ws-worker/src/mock/resolvers.ts | 3 +- packages/ws-worker/src/mock/runtime-engine.ts | 32 +- packages/ws-worker/src/mock/sockets.ts | 5 +- packages/ws-worker/src/server.ts | 6 +- packages/ws-worker/src/types.d.ts | 79 +- ...nvert-run.ts => convert-lightning-plan.ts} | 114 +- .../ws-worker/src/util/create-run-state.ts | 24 +- packages/ws-worker/src/util/index.ts | 2 +- .../ws-worker/src/util/log-final-reason.ts | 2 +- packages/ws-worker/test/api/destroy.test.ts | 75 +- packages/ws-worker/test/api/execute.test.ts | 135 +- packages/ws-worker/test/api/reasons.test.ts | 14 +- packages/ws-worker/test/api/workloop.test.ts | 21 +- packages/ws-worker/test/channels/run.test.ts | 20 +- .../test/channels/worker-queue.test.ts | 57 +- .../test/events/run-complete.test.ts | 39 +- .../ws-worker/test/events/run-error.test.ts | 33 +- .../test/events/step-complete.test.ts | 54 +- .../ws-worker/test/events/step-start.test.ts | 53 +- packages/ws-worker/test/lightning.test.ts | 215 ++- .../test/mock/runtime-engine.test.ts | 261 ++-- packages/ws-worker/test/mock/sockets.test.ts | 2 +- packages/ws-worker/test/reasons.test.ts | 24 +- packages/ws-worker/test/server.test.ts | 2 +- packages/ws-worker/test/util.ts | 38 +- ...test.ts => convert-lightning-plan.test.ts} | 330 +++-- .../test/util/create-run-state.test.ts | 117 +- packages/ws-worker/test/util/throttle.test.ts | 2 +- packages/ws-worker/tsconfig.json | 2 +- pnpm-lock.yaml | 125 +- 186 files changed, 5275 insertions(+), 4813 deletions(-) create mode 100644 .changeset/grumpy-pillows-tie.md create mode 100644 .changeset/old-planes-sort.md create mode 100644 .changeset/pretty-spoons-beam.md create mode 100644 .changeset/ten-dingos-explode.md create mode 100644 .changeset/two-pugs-burn.md create mode 100644 integration-tests/cli/test/fixtures/plan.json delete mode 100644 packages/cli/src/util/index.d.ts create mode 100644 packages/cli/src/util/index.ts delete mode 100644 packages/cli/src/util/load-input.ts create mode 100644 packages/cli/src/util/load-plan.ts delete mode 100644 packages/cli/test/options/ensure/strict.test.ts create mode 100644 packages/cli/test/util.ts delete mode 100644 packages/cli/test/util/load-input.test.ts create mode 100644 packages/cli/test/util/load-plan.test.ts create mode 100644 packages/lexicon/README.md create mode 100644 packages/lexicon/core.d.ts create mode 100644 packages/lexicon/index.d.ts create mode 100644 packages/lexicon/index.js create mode 100644 packages/lexicon/lightning.d.ts create mode 100644 packages/lexicon/lightning.js create mode 100644 packages/lexicon/package.json rename packages/runtime/src/execute/{job.ts => step.ts} (69%) create mode 100644 packages/runtime/src/util/default-state.ts create mode 100644 packages/runtime/src/util/index.ts rename packages/runtime/test/execute/{job.test.ts => step.test.ts} (73%) rename packages/runtime/test/util/{regex.ts => regex.test.ts} (100%) rename packages/ws-worker/src/util/{convert-run.ts => convert-lightning-plan.ts} (52%) rename packages/ws-worker/test/util/{convert-run.test.ts => convert-lightning-plan.test.ts} (56%) diff --git a/.changeset/grumpy-pillows-tie.md b/.changeset/grumpy-pillows-tie.md new file mode 100644 index 000000000..c4b59e4db --- /dev/null +++ b/.changeset/grumpy-pillows-tie.md @@ -0,0 +1,12 @@ +--- +'@openfn/cli': major +--- + +The 1.0 Release of the CLI updates the language and input of the CLI to match the nomenclature of Lightning. + +See the readme for details of the new terminology. + +- Add support for execution plans +- Deprecate old workflow format (old workflows are supported and will be automatically converted into the new "execution plans") +- Update terminology across the codebase and docs +- Remove strict mode diff --git a/.changeset/old-planes-sort.md b/.changeset/old-planes-sort.md new file mode 100644 index 000000000..7d0a644c3 --- /dev/null +++ b/.changeset/old-planes-sort.md @@ -0,0 +1,5 @@ +--- +'@openfn/deploy': patch +--- + +Log the result to success (not always) diff --git a/.changeset/pretty-spoons-beam.md b/.changeset/pretty-spoons-beam.md new file mode 100644 index 000000000..64ab0fd77 --- /dev/null +++ b/.changeset/pretty-spoons-beam.md @@ -0,0 +1,9 @@ +--- +'@openfn/runtime': major +--- + +The 1.0 release of the runtime updates the signatures and language of the runtime to match Lightning. It also includes some housekeeping. + +- Update main run() signature +- Remove strict mode options +- Integrate with lexicon diff --git a/.changeset/ten-dingos-explode.md b/.changeset/ten-dingos-explode.md new file mode 100644 index 000000000..703d5a20b --- /dev/null +++ b/.changeset/ten-dingos-explode.md @@ -0,0 +1,6 @@ +--- +'@openfn/engine-multi': major +'@openfn/ws-worker': major +--- + +The 1.0 release updates the language and input of the CLI to match the nomenclature of Lightning. diff --git a/.changeset/two-pugs-burn.md b/.changeset/two-pugs-burn.md new file mode 100644 index 000000000..080b1307b --- /dev/null +++ b/.changeset/two-pugs-burn.md @@ -0,0 +1,6 @@ +--- +'@openfn/lightning-mock': major +'@openfn/logger': major +--- + +Symbolic 1.0 version release diff --git a/integration-tests/cli/test/cli.test.ts b/integration-tests/cli/test/cli.test.ts index 6d9b72d32..05778ff2d 100644 --- a/integration-tests/cli/test/cli.test.ts +++ b/integration-tests/cli/test/cli.test.ts @@ -14,7 +14,7 @@ test.serial('openfn version', async (t) => { test.serial('openfn test', async (t) => { const { stdout } = await run(t.title); t.regex(stdout, /Versions:/); - t.regex(stdout, /Running test job.../); + t.regex(stdout, /Running test workflow/); t.regex(stdout, /Result: 42/); }); diff --git a/integration-tests/cli/test/errors.test.ts b/integration-tests/cli/test/errors.test.ts index 1c3e66a43..410afbe56 100644 --- a/integration-tests/cli/test/errors.test.ts +++ b/integration-tests/cli/test/errors.test.ts @@ -2,19 +2,20 @@ import test from 'ava'; import path from 'node:path'; import run from '../src/run'; import { extractLogs, assertLog } from '../src/util'; +import { stderr } from 'node:process'; const jobsPath = path.resolve('test/fixtures'); // These are all errors that will stop the CLI from even running -test.serial('job not found', async (t) => { +test.serial('expression not found', async (t) => { const { stdout, err } = await run('openfn blah.js --log-json'); t.is(err.code, 1); const stdlogs = extractLogs(stdout); - assertLog(t, stdlogs, /job not found/i); - assertLog(t, stdlogs, /failed to load the job from blah.js/i); + assertLog(t, stdlogs, /expression not found/i); + assertLog(t, stdlogs, /failed to load the expression from blah.js/i); assertLog(t, stdlogs, /critical error: aborting command/i); }); diff --git a/integration-tests/cli/test/execute-workflow.test.ts b/integration-tests/cli/test/execute-workflow.test.ts index 57f53ee17..550128c7c 100644 --- a/integration-tests/cli/test/execute-workflow.test.ts +++ b/integration-tests/cli/test/execute-workflow.test.ts @@ -83,6 +83,15 @@ test.serial( } ); +// Run a new-style execution plan with custom start +test.serial(`openfn ${jobsPath}/plan.json -i`, async (t) => { + const { err } = await run(t.title); + t.falsy(err); + + const out = getJSON(); + t.deepEqual(out.data.userId, 1); +}); + test.serial(`openfn ${jobsPath}/wf-conditional.json`, async (t) => { const { err } = await run(t.title); t.falsy(err); @@ -124,36 +133,6 @@ test.serial( } ); -test.serial(`openfn ${jobsPath}/wf-strict.json --strict`, async (t) => { - const { err } = await run(t.title); - t.falsy(err); - - const out = getJSON(); - t.deepEqual(out, { - data: { - name: 'jane', - }, - }); -}); - -test.serial(`openfn ${jobsPath}/wf-strict.json --no-strict`, async (t) => { - const { err } = await run(t.title); - t.falsy(err); - - const out = getJSON(); - t.deepEqual(out, { - x: 22, - data: { - name: 'jane', - }, - references: [ - { - name: 'bob', - }, - ], - }); -}); - test.serial( `openfn ${jobsPath}/wf-errors.json -S "{ \\"data\\": { \\"number\\": 2 } }"`, async (t) => { @@ -169,8 +148,8 @@ test.serial( } ); -test.serial( - `openfn ${jobsPath}/wf-errors.json -S "{ \\"data\\": { \\"number\\": 32 } }"`, +test.serial.only( + `openfn ${jobsPath}/wf-errors.json -iS "{ \\"data\\": { \\"number\\": 32 } }"`, async (t) => { const { err } = await run(t.title); t.falsy(err); @@ -189,7 +168,7 @@ test.serial( severity: 'fail', source: 'runtime', }, - jobId: 'start', + stepId: 'start', message: 'abort', type: 'JobError', }, diff --git a/integration-tests/cli/test/fixtures/circular.json b/integration-tests/cli/test/fixtures/circular.json index 2b3077d7a..d209b2a85 100644 --- a/integration-tests/cli/test/fixtures/circular.json +++ b/integration-tests/cli/test/fixtures/circular.json @@ -1,14 +1,16 @@ { - "jobs": [ - { - "id": "a", - "expression": "x", - "next": { "b": true } - }, - { - "id": "b", - "expression": "x", - "next": { "a": true } - } - ] + "workflow": { + "steps": [ + { + "id": "a", + "expression": "x", + "next": { "b": true } + }, + { + "id": "b", + "expression": "x", + "next": { "a": true } + } + ] + } } diff --git a/integration-tests/cli/test/fixtures/invalid-config-path.json b/integration-tests/cli/test/fixtures/invalid-config-path.json index e3ed709a7..1a343436e 100644 --- a/integration-tests/cli/test/fixtures/invalid-config-path.json +++ b/integration-tests/cli/test/fixtures/invalid-config-path.json @@ -1,7 +1,10 @@ { - "jobs": [ - { - "configuration": "does-not-exist.json" - } - ] + "workflow": { + "steps": [ + { + "configuration": "does-not-exist.json", + "expression": "." + } + ] + } } diff --git a/integration-tests/cli/test/fixtures/invalid-exp-path.json b/integration-tests/cli/test/fixtures/invalid-exp-path.json index 7cff3440c..6ce8c42ac 100644 --- a/integration-tests/cli/test/fixtures/invalid-exp-path.json +++ b/integration-tests/cli/test/fixtures/invalid-exp-path.json @@ -1,7 +1,9 @@ { - "jobs": [ - { - "expression": "does-not-exist.js" - } - ] + "workflow": { + "steps": [ + { + "expression": "does-not-exist.js" + } + ] + } } diff --git a/integration-tests/cli/test/fixtures/invalid-start.json b/integration-tests/cli/test/fixtures/invalid-start.json index 6fc284da5..13f0f9ee1 100644 --- a/integration-tests/cli/test/fixtures/invalid-start.json +++ b/integration-tests/cli/test/fixtures/invalid-start.json @@ -1,9 +1,13 @@ { - "start": "nope", - "jobs": [ - { - "id": "x", - "expression": "fn((state) => state)" - } - ] + "options": { + "start": "nope" + }, + "workflow": { + "steps": [ + { + "id": "x", + "expression": "fn((state) => state)" + } + ] + } } diff --git a/integration-tests/cli/test/fixtures/invalid-syntax.json b/integration-tests/cli/test/fixtures/invalid-syntax.json index 7028961f2..651f73f93 100644 --- a/integration-tests/cli/test/fixtures/invalid-syntax.json +++ b/integration-tests/cli/test/fixtures/invalid-syntax.json @@ -1,7 +1,9 @@ { - "jobs": [ - { - "expression": "invalid.js" - } - ] + "workflow": { + "steps": [ + { + "expression": "invalid.js" + } + ] + } } diff --git a/integration-tests/cli/test/fixtures/multiple-inputs.json b/integration-tests/cli/test/fixtures/multiple-inputs.json index 25c28dd9b..59a33a755 100644 --- a/integration-tests/cli/test/fixtures/multiple-inputs.json +++ b/integration-tests/cli/test/fixtures/multiple-inputs.json @@ -1,18 +1,20 @@ { - "jobs": [ - { - "id": "a", - "expression": "x", - "next": { "b": true, "c": true } - }, - { - "id": "b", - "expression": "x", - "next": { "c": true } - }, - { - "id": "c", - "expression": "x" - } - ] + "workflow": { + "steps": [ + { + "id": "a", + "expression": "x", + "next": { "b": true, "c": true } + }, + { + "id": "b", + "expression": "x", + "next": { "c": true } + }, + { + "id": "c", + "expression": "x" + } + ] + } } diff --git a/integration-tests/cli/test/fixtures/plan.json b/integration-tests/cli/test/fixtures/plan.json new file mode 100644 index 000000000..bc21f7090 --- /dev/null +++ b/integration-tests/cli/test/fixtures/plan.json @@ -0,0 +1,19 @@ +{ + "options": { + "start": "b" + }, + "workflow": { + "steps": [ + { + "id": "a", + "adaptor": "common", + "expression": "fn((state) => { return state; });" + }, + { + "id": "b", + "adaptor": "http", + "expression": "get('https://jsonplaceholder.typicode.com/todos/1')" + } + ] + } +} diff --git a/integration-tests/cli/test/fixtures/wf-array.json b/integration-tests/cli/test/fixtures/wf-array.json index 763d15457..76b9decaf 100644 --- a/integration-tests/cli/test/fixtures/wf-array.json +++ b/integration-tests/cli/test/fixtures/wf-array.json @@ -1,21 +1,23 @@ { - "jobs": [ - { - "id": "a", - "adaptor": "common", - "expression": "fn((state) => { if (!state.data.items) { state.data.items = []; } return state; });", - "next": { "b": true } - }, - { - "id": "b", - "adaptor": "common", - "expression": "fn((state) => { state.data.items.push('b'); return state; });", - "next": { "c": true } - }, - { - "id": "c", - "adaptor": "common", - "expression": "fn((state) => { state.data.items.push('c'); return state; });" - } - ] + "workflow": { + "steps": [ + { + "id": "a", + "adaptor": "common", + "expression": "fn((state) => { if (!state.data.items) { state.data.items = []; } return state; });", + "next": { "b": true } + }, + { + "id": "b", + "adaptor": "common", + "expression": "fn((state) => { state.data.items.push('b'); return state; });", + "next": { "c": true } + }, + { + "id": "c", + "adaptor": "common", + "expression": "fn((state) => { state.data.items.push('c'); return state; });" + } + ] + } } diff --git a/integration-tests/cli/test/fixtures/wf-conditional.json b/integration-tests/cli/test/fixtures/wf-conditional.json index 203f7ce41..4aa758ab5 100644 --- a/integration-tests/cli/test/fixtures/wf-conditional.json +++ b/integration-tests/cli/test/fixtures/wf-conditional.json @@ -1,29 +1,31 @@ { - "start": "start", - "jobs": [ - { - "id": "start", - "state": { - "data": { - "number": 1 + "options": { "start": "start" }, + "workflow": { + "steps": [ + { + "id": "start", + "state": { + "data": { + "number": 1 + } + }, + "adaptor": "common", + "expression": "fn((state) => state);", + "next": { + "small": { "condition": "state.data.number < 10" }, + "large": { "condition": "state.data.number >= 10" } } }, - "adaptor": "common", - "expression": "fn((state) => state);", - "next": { - "small": { "condition": "state.data.number < 10" }, - "large": { "condition": "state.data.number >= 10" } + { + "id": "small", + "adaptor": "common", + "expression": "fn((state) => { state.data.result = \"small\"; return state; });" + }, + { + "id": "large", + "adaptor": "common", + "expression": "fn((state) => { state.data.result = \"large\"; return state; });" } - }, - { - "id": "small", - "adaptor": "common", - "expression": "fn((state) => { state.data.result = \"small\"; return state; });" - }, - { - "id": "large", - "adaptor": "common", - "expression": "fn((state) => { state.data.result = \"large\"; return state; });" - } - ] + ] + } } diff --git a/integration-tests/cli/test/fixtures/wf-count.json b/integration-tests/cli/test/fixtures/wf-count.json index e20e7a604..5d8e50c71 100644 --- a/integration-tests/cli/test/fixtures/wf-count.json +++ b/integration-tests/cli/test/fixtures/wf-count.json @@ -1,14 +1,16 @@ { - "jobs": [ - { - "adaptor": "common", - "expression": "fn((state) => (state.data.count ? state : { data: { count: 21 } }));", - "next": { "b": true } - }, - { - "id": "b", - "adaptor": "common", - "expression": "fn((state) => { state.data.count = state.data.count * 2; return state; });" - } - ] + "workflow": { + "steps": [ + { + "adaptor": "common", + "expression": "fn((state) => (state.data.count ? state : { data: { count: 21 } }));", + "next": { "b": true } + }, + { + "id": "b", + "adaptor": "common", + "expression": "fn((state) => { state.data.count = state.data.count * 2; return state; });" + } + ] + } } diff --git a/integration-tests/cli/test/fixtures/wf-errors.json b/integration-tests/cli/test/fixtures/wf-errors.json index 6464479d5..354feeab7 100644 --- a/integration-tests/cli/test/fixtures/wf-errors.json +++ b/integration-tests/cli/test/fixtures/wf-errors.json @@ -1,24 +1,26 @@ { - "start": "start", - "jobs": [ - { - "id": "start", - "adaptor": "common", - "expression": "fn((state) => { if (state.data.number > 10) { throw new Error('abort') }; return state; });", - "next": { - "increment": { "condition": "!state.errors" }, - "do nothing": { "condition": "state.errors" } + "options": { "start": "start" }, + "workflow": { + "steps": [ + { + "id": "start", + "adaptor": "common", + "expression": "fn((state) => { if (state.data.number > 10) { throw new Error('abort') }; return state; });", + "next": { + "increment": { "condition": "!state.errors" }, + "do nothing": { "condition": "state.errors" } + } + }, + { + "id": "increment", + "adaptor": "common", + "expression": "fn((state) => { state.data.number += 1; return state; });" + }, + { + "id": "do nothing", + "adaptor": "common", + "expression": "fn((state) => state);" } - }, - { - "id": "increment", - "adaptor": "common", - "expression": "fn((state) => { state.data.number += 1; return state; });" - }, - { - "id": "do nothing", - "adaptor": "common", - "expression": "fn((state) => state);" - } - ] + ] + } } diff --git a/integration-tests/cli/test/fixtures/wf-simple.json b/integration-tests/cli/test/fixtures/wf-simple.json index dfd904068..07caaa188 100644 --- a/integration-tests/cli/test/fixtures/wf-simple.json +++ b/integration-tests/cli/test/fixtures/wf-simple.json @@ -1,8 +1,10 @@ { - "jobs": [ - { - "adaptor": "common", - "expression": "simple.js" - } - ] + "workflow": { + "steps": [ + { + "adaptor": "common", + "expression": "simple.js" + } + ] + } } diff --git a/integration-tests/cli/test/fixtures/wf-strict.json b/integration-tests/cli/test/fixtures/wf-strict.json index 7461a276a..370afd61f 100644 --- a/integration-tests/cli/test/fixtures/wf-strict.json +++ b/integration-tests/cli/test/fixtures/wf-strict.json @@ -1,17 +1,19 @@ { - "jobs": [ - { - "id": "a", - "adaptor": "common", - "expression": "fn((state) => ({ x: 22, data: { name: 'bob' }, references: [] }));", - "next": { - "b": true + "workflow": { + "steps": [ + { + "id": "a", + "adaptor": "common", + "expression": "fn((state) => ({ x: 22, data: { name: 'bob' }, references: [] }));", + "next": { + "b": true + } + }, + { + "id": "b", + "adaptor": "common", + "expression": "fn(state => composeNextState(state, { name: 'jane' }));" } - }, - { - "id": "b", - "adaptor": "common", - "expression": "fn(state => composeNextState(state, { name: 'jane' }));" - } - ] + ] + } } diff --git a/integration-tests/worker/test/integration.test.ts b/integration-tests/worker/test/integration.test.ts index 399968c81..ae83b4331 100644 --- a/integration-tests/worker/test/integration.test.ts +++ b/integration-tests/worker/test/integration.test.ts @@ -157,7 +157,7 @@ test("Don't send job logs to stdout", (t) => { // But it SHOULD log engine stuff const runtimeLog = jsonLogs.find( - (l) => l.name === 'R/T' && l.message[0].match(/completed job/i) + (l) => l.name === 'R/T' && l.message[0].match(/completed step/i) ); t.truthy(runtimeLog); done(); diff --git a/packages/cli/README.md b/packages/cli/README.md index ca505dd0b..542860ae9 100644 --- a/packages/cli/README.md +++ b/packages/cli/README.md @@ -15,6 +15,7 @@ The CLI includes: - [Installation](#installation) - [Updating](#updating) +- [Terminology](#terminology) - [Migrating from devtools](#migrating-from-devtools) - [Basic Usage](#basic-usage) - [Advanced Usage](#advanced-usage) @@ -71,16 +72,25 @@ npm uninstall -g @openfn/cli And then re-installing. -## Migrating from devtools +## Terminology -If you're coming to the CLI from the old openfn devtools, here are a couple of key points to be aware of: +The CLI (and the wider OpenFn stack) has some very particular terminology -- The CLI has a shorter, sleeker syntax, so your command should be much shorter -- The CLI will automatically install adaptors for you (with full version control) +- An **Expression** is a string of Javascript (or Javascript-like code) written to be run in the CLI or Lightning. +- A **Job** is an expression plus some metadata required to run it - typically an adaptor and credentials. + The terms Job and Expression are often used interchangeably. +- A **Workflow** is a series of steps to be executed in sequence. Steps are usually Jobs (and so job and step are often used + interchangeably), but can be Triggers. +- An **Execution Plan** is a Workflow plus some options which inform how it should be executed (ie, start node, timeout). + The term "Execution plan" is mostly used internally and not exposed to users, and is usually interchangeable with Workflow. + +Note that an expression is not generally portable (ie, cannot run in other environments) unless it is compiled. +A compiled expression has imports and exports and, so long as packages are available, can run in a simple +JavaScript runtime. ## Basic Usage -You're probably here to run jobs (expressions) or workflows, which the CLI makes easy: +You're probably here to run Workflows (or individual jobs), which the CLI makes easy: ``` openfn path/to/workflow.json @@ -91,7 +101,7 @@ If running a single job, you MUST specify which adaptor to use. Pass the `-i` flag to auto-install any required adaptors (it's safe to do this redundantly, although the run will be a little slower). -When the finished, the CLI will write the resulting state to disk. By default the CLI will create an `output.json` next to the job file. You can pass a path to output by passing `-o path/to/output.json` and state by adding `-s path/to/state.json`. You can use `-S` and `-O` to pass state through stdin and return the output through stdout. +When finished, the CLI will write the resulting state to disk. By default the CLI will create an `output.json` next to the job file. You can pass a path to output by passing `-o path/to/output.json` and state by adding `-s path/to/state.json`. You can use `-S` and `-O` to pass state through stdin and return the output through stdout. The CLI maintains a repo for auto-installed adaptors. Run `openfn repo list` to see where the repo is, and what's in it. Set the `OPENFN_REPO_DIR` env var to specify the repo folder. When autoinstalling, the CLI will check to see if a matching version is found in the repo. `openfn repo clean` will remove all adaptors from the repo. The repo also includes any documentation and metadata built with the CLI. @@ -103,14 +113,16 @@ You can pass `--log info` to get more feedback about what's happening, or `--log ## Advanced Usage -The CLI has a number of commands (the first argument after openfn) +The CLI has a number of commands (the first argument after `openfn`): - execute - run a job -- compile - compile a job to a .js file +- compile - compile a job to a .js file (prints to stdout by default) - docs - show documentation for an adaptor function - repo - manage the repo of installed modules - docgen - generate JSON documentation for an adaptor based on its typescript +For example, `openfn compile job.js -a common` will compile the code at `job.js` with the common adaptor. + If no command is specified, execute will run. To get more information about a command, including usage examples, run `openfn help`, ie, `openfn compile help`. @@ -253,38 +265,43 @@ Pass `--log-json` to the CLI to do this. You can also set the OPENFN_LOG_JSON en ## Workflows -As of v0.0.35 the CLI supports running workflows as well as jobs. - -A workflow is in execution plan for running several jobs in a sequence. It is defined as a JSON structure. +A workflow is an execution plan for running several jobs in a sequence. It is defined as a JSON structure. To see an example workflow, run the test command with `openfn test`. -A workflow has a structure like this (better documentation is coming soon): +A workflow has a structure like this: ```json { - "start": "a", // optionally specify the start node (defaults to jobs[0]) - "jobs": [ - { - "id": "a", - "expression": "fn((state) => state)", // code or a path - "adaptor": "@openfn/language-common@1.75", // specifiy the adaptor to use (version optional) - "data": {}, // optionally pre-populate the data object (this will be overriden by keys in in previous state) - "configuration": {}, // Use this to pass credentials - "next": { - // This object defines which jobs to call next - // All edges returning true will run - // If there are no next edges, the workflow will end - "b": true, - "c": { - "condition": "!state.error" // Note that this is an expression, not a function + "workflow": { + "name": "my-workflow", // human readable name used in logging + "steps": [ + { + "name": "a", // human readable name used in logging + "expression": "fn((state) => state)", // code or a path to an expression.js file + "adaptor": "@openfn/language-common@1.7.5", // specifiy the adaptor to use (version optional) + "data": {}, // optionally pre-populate the data object (this will be overriden by keys in in previous state) + "configuration": {}, // Use this to pass credentials + "next": { + // This object defines which jobs to call next + // All edges returning true will run + // If there are no next edges, the workflow will end + "b": true, + "c": { + "condition": "!state.error" // Note that this is a strict Javascript expression, not a function, and has no adaptor support + } } } - } - ] + ] + }, + "options": { + "start": "a" // optionally specify the start node (defaults to steps[0]) + } } ``` +See `packages/lexicon` for type definitions (the workflow format is covered by the `ExecutionPlan` type)/ + ## Compilation The CLI will compile your job code into regular Javascript. It does a number of things to make your code robust and portable: @@ -298,8 +315,6 @@ The result of this is a lightweight, modern JS module. It can be executed in any The CLI uses openfn's own runtime to execute jobs in a safe environment. -All jobs which work against `@openfn/core` will work in the new CLI and runtime environment (note: although this is a work in progress and we are actively looking for help to test this!). - If you want to see how the compiler is changing your job, run `openfn compile path/to/job -a ` to return the compiled code to stdout. Add `-o path/to/output.js` to save the result to disk. ## Contributing @@ -355,10 +370,10 @@ export OPENFN_ADAPTORS_REPO=~/repo/openfn/adaptors ### Contributing changes -Open a PR at https://github.com/openfn/kit. Include a changeset and a description of your change. - -See the root readme for more details about changests, +Include a changeset and a description of your change. Run this command and follow the interactive prompt (it's really easy, promise!) ``` - +pnpm changeset ``` + +Commit the changeset files and open a PR at https://github.com/openfn/kit. diff --git a/packages/cli/package.json b/packages/cli/package.json index 67b286895..dac6b0207 100644 --- a/packages/cli/package.json +++ b/packages/cli/package.json @@ -34,6 +34,7 @@ "license": "ISC", "devDependencies": { "@openfn/language-common": "2.0.0-rc3", + "@openfn/lexicon": "workspace:^", "@types/mock-fs": "^4.13.1", "@types/node": "^18.15.13", "@types/rimraf": "^3.0.2", diff --git a/packages/cli/src/commands.ts b/packages/cli/src/commands.ts index 328f0ce05..441213656 100644 --- a/packages/cli/src/commands.ts +++ b/packages/cli/src/commands.ts @@ -11,7 +11,7 @@ import { clean, install, pwd, list } from './repo/handler'; import createLogger, { CLI, Logger } from './util/logger'; import mapAdaptorsToMonorepo, { - MapAdaptorsToMonorepoOptions, + validateMonoRepo, } from './util/map-adaptors-to-monorepo'; import printVersions from './util/print-versions'; @@ -56,18 +56,25 @@ const parse = async (options: Opts, log?: Logger) => { await printVersions(logger, options); } - if (options.monorepoPath) { - if (options.monorepoPath === 'ERR') { + const { monorepoPath } = options; + if (monorepoPath) { + // TODO how does this occur? + if (monorepoPath === 'ERR') { logger.error( 'ERROR: --use-adaptors-monorepo was passed, but OPENFN_ADAPTORS_REPO env var is undefined' ); logger.error('Set OPENFN_ADAPTORS_REPO to a path pointing to the repo'); process.exit(9); // invalid argument } - await mapAdaptorsToMonorepo( - options as MapAdaptorsToMonorepoOptions, + + await validateMonoRepo(monorepoPath, logger); + logger.success(`Loading adaptors from monorepo at ${monorepoPath}`); + + options.adaptors = mapAdaptorsToMonorepo( + monorepoPath, + options.adaptors, logger - ); + ) as string[]; } // TODO it would be nice to do this in the repoDir option, but diff --git a/packages/cli/src/compile/command.ts b/packages/cli/src/compile/command.ts index 8b8320b41..5b9957b45 100644 --- a/packages/cli/src/compile/command.ts +++ b/packages/cli/src/compile/command.ts @@ -9,8 +9,7 @@ export type CompileOptions = Pick< | 'command' | 'expandAdaptors' | 'ignoreImports' - | 'jobPath' - | 'job' + | 'expressionPath' | 'logJson' | 'log' | 'outputPath' @@ -18,7 +17,6 @@ export type CompileOptions = Pick< | 'repoDir' | 'path' | 'useAdaptorsMonorepo' - | 'workflow' > & { repoDir?: string; }; diff --git a/packages/cli/src/compile/compile.ts b/packages/cli/src/compile/compile.ts index d08b34eda..a2f8285f6 100644 --- a/packages/cli/src/compile/compile.ts +++ b/packages/cli/src/compile/compile.ts @@ -1,27 +1,28 @@ import compile, { preloadAdaptorExports, Options } from '@openfn/compiler'; -import { getModulePath, ExecutionPlan } from '@openfn/runtime'; +import { getModulePath } from '@openfn/runtime'; +import type { ExecutionPlan, Job } from '@openfn/lexicon'; + import createLogger, { COMPILER, Logger } from '../util/logger'; import abort from '../util/abort'; import type { CompileOptions } from './command'; // Load and compile a job from a file, then return the result // This is designed to be re-used in different CLI steps -export default async (opts: CompileOptions, log: Logger) => { - log.debug('Compiling...'); - let job; - if (opts.workflow) { - // Note that the workflow will be loaded into an object by this point - job = compileWorkflow(opts.workflow as ExecutionPlan, opts, log); - } else { - job = await compileJob((opts.job || opts.jobPath) as string, opts, log); +export default async ( + planOrPath: ExecutionPlan | string, + opts: CompileOptions, + log: Logger +) => { + if (typeof planOrPath === 'string') { + const result = await compileJob(planOrPath as string, opts, log); + log.success(`Compiled expression from ${opts.expressionPath}`); + return result; } - if (opts.jobPath) { - log.success(`Compiled from ${opts.jobPath}`); - } else { - log.success('Compilation complete'); - } - return job; + const compiledPlan = compileWorkflow(planOrPath as ExecutionPlan, opts, log); + log.success('Compiled all expressions in workflow'); + + return compiledPlan; }; const compileJob = async ( @@ -29,7 +30,7 @@ const compileJob = async ( opts: CompileOptions, log: Logger, jobName?: string -) => { +): Promise => { try { const compilerOptions: Options = await loadTransformOptions(opts, log); return compile(job, compilerOptions); @@ -40,16 +41,19 @@ const compileJob = async ( e, 'Check the syntax of the job expression:\n\n' + job ); + // This will never actully execute + return ''; } }; // Find every expression in the job and run the compiler on it const compileWorkflow = async ( - workflow: ExecutionPlan, + plan: ExecutionPlan, opts: CompileOptions, log: Logger ) => { - for (const job of workflow.jobs) { + for (const step of plan.workflow.steps) { + const job = step as Job; const jobOpts = { ...opts, }; @@ -65,7 +69,7 @@ const compileWorkflow = async ( ); } } - return workflow; + return plan; }; // TODO this is a bit of a temporary solution diff --git a/packages/cli/src/compile/handler.ts b/packages/cli/src/compile/handler.ts index 2435ccd19..ac19752fb 100644 --- a/packages/cli/src/compile/handler.ts +++ b/packages/cli/src/compile/handler.ts @@ -3,33 +3,23 @@ import type { CompileOptions } from './command'; import type { Logger } from '../util/logger'; import compile from './compile'; -import loadInput from '../util/load-input'; -import expandAdaptors from '../util/expand-adaptors'; +import loadPlan from '../util/load-plan'; import assertPath from '../util/assert-path'; -import mapAdaptorsToMonorepo, { - MapAdaptorsToMonorepoOptions, -} from '../util/map-adaptors-to-monorepo'; const compileHandler = async (options: CompileOptions, logger: Logger) => { assertPath(options.path); - await loadInput(options, logger); - if (options.workflow) { - // expand shorthand adaptors in the workflow jobs - expandAdaptors(options); - await mapAdaptorsToMonorepo( - options as MapAdaptorsToMonorepoOptions, - logger - ); + let result; + if (options.expressionPath) { + result = await compile(options.expressionPath, options, logger); + } else { + const plan = await loadPlan(options, logger); + result = await compile(plan, options, logger); + result = JSON.stringify(result, null, 2); } - let result = await compile(options, logger); - if (options.workflow) { - result = JSON.stringify(result); - } if (options.outputStdout) { - logger.success('Compiled code:'); - logger.success('\n' + result); + logger.success('Result:\n\n' + result); } else { await writeFile(options.outputPath!, result as string); logger.success(`Compiled to ${options.outputPath}`); diff --git a/packages/cli/src/docs/handler.ts b/packages/cli/src/docs/handler.ts index 5be8d0792..a60acd5cc 100644 --- a/packages/cli/src/docs/handler.ts +++ b/packages/cli/src/docs/handler.ts @@ -60,7 +60,7 @@ const docsHandler = async ( // does the adaptor have a version? If not, fetch the latest // (docgen won't do this for us) - const { adaptors } = expandAdaptors({ adaptors: [adaptor] }); + const adaptors = expandAdaptors([adaptor]) as string[]; const [adaptorName] = adaptors!; let { name, version } = getNameAndVersion(adaptorName); if (!version) { diff --git a/packages/cli/src/execute/command.ts b/packages/cli/src/execute/command.ts index 6183b82a5..a18cdd40d 100644 --- a/packages/cli/src/execute/command.ts +++ b/packages/cli/src/execute/command.ts @@ -14,7 +14,7 @@ export type ExecuteOptions = Required< | 'expandAdaptors' | 'immutable' | 'ignoreImports' - | 'jobPath' + | 'expressionPath' | 'log' | 'logJson' | 'outputPath' @@ -26,11 +26,9 @@ export type ExecuteOptions = Required< | 'statePath' | 'stateStdin' | 'sanitize' - | 'strict' | 'timeout' | 'useAdaptorsMonorepo' | 'workflowPath' - | 'workflow' > > & Pick; @@ -54,17 +52,14 @@ const options = [ o.start, o.statePath, o.stateStdin, - o.strict, // order important - o.strictOutput, o.timeout, o.useAdaptorsMonorepo, ]; const executeCommand: yargs.CommandModule = { command: 'execute [path]', - describe: `Run an openfn job or workflow. Get more help by running openfn help. - \nExecute will run a job/workflow at the path and write the output state to disk (to ./state.json unless otherwise specified) - \nBy default only state.data will be returned fron a job. Include --no-strict to write the entire state object. + describe: `Run an openfn expression or workflow. Get more help by running openfn help. + \nExecute will run a expression/workflow at the path and write the output state to disk (to ./state.json unless otherwise specified) \nRemember to include the adaptor name with -a. Auto install adaptors with the -i flag.`, aliases: ['$0'], handler: ensure('execute', options), @@ -89,7 +84,7 @@ const executeCommand: yargs.CommandModule = { ) .example( 'openfn compile job.js -a http', - 'Compile job.js with the http adaptor and print the code to stdout' + 'Compile the expression at job.js with the http adaptor and print the code to stdout' ), }; diff --git a/packages/cli/src/execute/execute.ts b/packages/cli/src/execute/execute.ts index 487924ca5..9b4b4a576 100644 --- a/packages/cli/src/execute/execute.ts +++ b/packages/cli/src/execute/execute.ts @@ -1,5 +1,7 @@ import run, { getNameAndVersion } from '@openfn/runtime'; -import type { ModuleInfo, ModuleInfoMap, ExecutionPlan } from '@openfn/runtime'; +import type { ExecutionPlan, Job } from '@openfn/lexicon'; +import type { ModuleInfo, ModuleInfoMap } from '@openfn/runtime'; + import createLogger, { RUNTIME, JOB } from '../util/logger'; import { ExecuteOptions } from './command'; @@ -8,21 +10,18 @@ type ExtendedModuleInfo = ModuleInfo & { }; export default async ( - input: string | ExecutionPlan, - state: any, - opts: Omit + plan: ExecutionPlan, + input: any, + opts: ExecuteOptions ): Promise => { try { - const result = await run(input, state, { - strict: opts.strict, - start: opts.start, - timeout: opts.timeout, + const result = await run(plan, input, { immutableState: opts.immutable, logger: createLogger(RUNTIME, opts), jobLogger: createLogger(JOB, opts), linker: { repo: opts.repoDir, - modules: parseAdaptors(opts), + modules: parseAdaptors(plan), }, }); return result; @@ -34,9 +33,7 @@ export default async ( }; // TODO we should throw if the adaptor strings are invalid for any reason -export function parseAdaptors( - opts: Partial> -) { +export function parseAdaptors(plan: ExecutionPlan) { const extractInfo = (specifier: string) => { const [module, path] = specifier.split('='); const { name, version } = getNameAndVersion(module); @@ -54,24 +51,15 @@ export function parseAdaptors( const adaptors: ModuleInfoMap = {}; - if (opts.adaptors) { - opts.adaptors.reduce((obj, exp) => { - const { name, ...maybeVersionAndPath } = extractInfo(exp); - obj[name] = { ...maybeVersionAndPath }; - return obj; - }, adaptors); - } - - if (opts.workflow) { - // TODO what if there are different versions of the same adaptor? - // This structure can't handle it - we'd need to build it for every job - Object.values(opts.workflow.jobs).forEach((job) => { - if (job.adaptor) { - const { name, ...maybeVersionAndPath } = extractInfo(job.adaptor); - adaptors[name] = { ...maybeVersionAndPath }; - } - }); - } + // TODO what if there are different versions of the same adaptor? + // This structure can't handle it - we'd need to build it for every job + Object.values(plan.workflow.steps).forEach((step) => { + const job = step as Job; + if (job.adaptor) { + const { name, ...maybeVersionAndPath } = extractInfo(job.adaptor); + adaptors[name] = maybeVersionAndPath; + } + }); return adaptors; } diff --git a/packages/cli/src/execute/get-autoinstall-targets.ts b/packages/cli/src/execute/get-autoinstall-targets.ts index eead48820..677f41f50 100644 --- a/packages/cli/src/execute/get-autoinstall-targets.ts +++ b/packages/cli/src/execute/get-autoinstall-targets.ts @@ -1,23 +1,15 @@ -import type { ExecuteOptions } from './command'; +import { ExecutionPlan, Job } from '@openfn/lexicon'; -const getAutoinstallTargets = ( - options: Partial< - Pick - > -) => { - if (options.workflow) { - const adaptors = {} as Record; - Object.values(options.workflow.jobs).forEach((job) => { - if (job.adaptor) { - adaptors[job.adaptor] = true; - } - }); - return Object.keys(adaptors); - } - if (options.adaptors) { - return options.adaptors?.filter((a) => !/=/.test(a)); - } - return []; +const getAutoinstallTargets = (plan: ExecutionPlan) => { + const adaptors = {} as Record; + Object.values(plan.workflow.steps).forEach((step) => { + const job = step as Job; + // Do not autoinstall adaptors with a path + if (job.adaptor && !/=/.test(job.adaptor)) { + adaptors[job.adaptor] = true; + } + }); + return Object.keys(adaptors); }; export default getAutoinstallTargets; diff --git a/packages/cli/src/execute/handler.ts b/packages/cli/src/execute/handler.ts index aefb6894e..060a06c22 100644 --- a/packages/cli/src/execute/handler.ts +++ b/packages/cli/src/execute/handler.ts @@ -1,3 +1,5 @@ +import type { ExecutionPlan } from '@openfn/lexicon'; + import type { ExecuteOptions } from './command'; import execute from './execute'; import serializeOutput from './serialize-output'; @@ -5,16 +7,11 @@ import getAutoinstallTargets from './get-autoinstall-targets'; import { install } from '../repo/handler'; import compile from '../compile/compile'; -import { CompileOptions } from '../compile/command'; import { Logger, printDuration } from '../util/logger'; import loadState from '../util/load-state'; import validateAdaptors from '../util/validate-adaptors'; -import loadInput from '../util/load-input'; -import expandAdaptors from '../util/expand-adaptors'; -import mapAdaptorsToMonorepo, { - MapAdaptorsToMonorepoOptions, -} from '../util/map-adaptors-to-monorepo'; +import loadPlan from '../util/load-plan'; import assertPath from '../util/assert-path'; const executeHandler = async (options: ExecuteOptions, logger: Logger) => { @@ -22,23 +19,13 @@ const executeHandler = async (options: ExecuteOptions, logger: Logger) => { assertPath(options.path); await validateAdaptors(options, logger); - let input = await loadInput(options, logger); - - if (options.workflow) { - // expand shorthand adaptors in the workflow jobs - expandAdaptors(options); - await mapAdaptorsToMonorepo( - options as MapAdaptorsToMonorepoOptions, - logger - ); - } - + let plan = await loadPlan(options, logger); const { repoDir, monorepoPath, autoinstall } = options; if (autoinstall) { if (monorepoPath) { logger.warn('Skipping auto-install as monorepo is being used'); } else { - const autoInstallTargets = getAutoinstallTargets(options); + const autoInstallTargets = getAutoinstallTargets(plan); if (autoInstallTargets.length) { logger.info('Auto-installing language adaptors'); await install({ packages: autoInstallTargets, repoDir }, logger); @@ -49,13 +36,13 @@ const executeHandler = async (options: ExecuteOptions, logger: Logger) => { const state = await loadState(options, logger); if (options.compile) { - input = await compile(options as CompileOptions, logger); + plan = (await compile(plan, options, logger)) as ExecutionPlan; } else { logger.info('Skipping compilation as noCompile is set'); } try { - const result = await execute(input!, state, options); + const result = await execute(plan, state, options); await serializeOutput(options, result, logger); const duration = printDuration(new Date().getTime() - start); if (result?.errors) { diff --git a/packages/cli/src/execute/serialize-output.ts b/packages/cli/src/execute/serialize-output.ts index 040b43b5e..79f338f70 100644 --- a/packages/cli/src/execute/serialize-output.ts +++ b/packages/cli/src/execute/serialize-output.ts @@ -3,21 +3,14 @@ import { Logger } from '../util/logger'; import { Opts } from '../options'; const serializeOutput = async ( - options: Pick, + options: Pick, result: any, logger: Logger ) => { let output = result; if (output && (output.configuration || output.data)) { - if (options.strict) { - output = { data: output.data }; - if (result.errors) { - output.errors = result.errors; - } - } else { - const { configuration, ...rest } = result; - output = rest; - } + const { configuration, ...rest } = result; + output = rest; } if (output === undefined) { diff --git a/packages/cli/src/options.ts b/packages/cli/src/options.ts index a29be4c7f..152aecb20 100644 --- a/packages/cli/src/options.ts +++ b/packages/cli/src/options.ts @@ -1,13 +1,13 @@ import path from 'node:path'; - import yargs from 'yargs'; -import type { ExecutionPlan } from '@openfn/runtime'; + import type { CommandList } from './commands'; -import { CLIExecutionPlan } from './types'; import { DEFAULT_REPO_DIR } from './constants'; -import doExpandAdaptors from './util/expand-adaptors'; -import ensureLogOpts from './util/ensure-log-opts'; -import { LogLevel } from './util'; +import { + expandAdaptors as doExpandAdaptors, + ensureLogOpts, + LogLevel, +} from './util'; // Central type definition for the main options // This represents the types coming out of yargs, @@ -28,8 +28,7 @@ export type Opts = { force?: boolean; immutable?: boolean; ignoreImports?: boolean | string[]; - jobPath?: string; - job?: string; + expressionPath?: string; log?: Record; logJson?: boolean; monorepoPath?: string; @@ -37,6 +36,7 @@ export type Opts = { outputPath?: string; outputStdout?: boolean; packages?: string[]; + planPath?: string; projectPath?: string; repoDir?: string; skipAdaptorValidation?: boolean; @@ -44,13 +44,13 @@ export type Opts = { start?: string; // workflow start node statePath?: string; stateStdin?: string; - strict?: boolean; // Strict state handling (only forward state.data). Defaults to true sanitize: 'none' | 'remove' | 'summarize' | 'obfuscate'; timeout?: number; // ms useAdaptorsMonorepo?: boolean; - workflow?: CLIExecutionPlan | ExecutionPlan; - workflowPath?: string; projectId?: string; + + // deprecated + workflowPath?: string; }; // Definition of what Yargs returns (before ensure is called) @@ -97,8 +97,10 @@ export const adaptors: CLIOption = { opts.adaptors = []; } + // TODO this might be redundant now as load-plan should handle it + // maybe commands other than execute need it if (opts.expandAdaptors) { - doExpandAdaptors(opts); + opts.adaptors = doExpandAdaptors(opts.adaptors) as string[]; } // delete the aliases as they have not been expanded @@ -218,15 +220,13 @@ export const projectId: CLIOption = { hidden: true, }, ensure: (opts) => { - const projectId = opts.projectId; - //check that this is a uuid - return projectId; - }, + const projectId = opts.projectId; + //check that this is a uuid + return projectId; + }, }; - - -// Input path covers jobPath and workflowPath +// Input path covers expressionPath and workflowPath export const inputPath: CLIOption = { name: 'input-path', yargs: { @@ -235,12 +235,12 @@ export const inputPath: CLIOption = { ensure: (opts) => { const { path: basePath } = opts; if (basePath?.endsWith('.json')) { - opts.workflowPath = basePath; + opts.planPath = basePath; } else if (basePath?.endsWith('.js')) { - opts.jobPath = basePath; + opts.expressionPath = basePath; } else { const base = getBaseDir(opts); - setDefaultValue(opts, 'jobPath', path.join(base, 'job.js')); + setDefaultValue(opts, 'expressionPath', path.join(base, 'job.js')); } }, }; @@ -327,38 +327,6 @@ export const start: CLIOption = { }, }; -// Preserve this but hide it -export const strictOutput: CLIOption = { - name: 'no-strict-output', - yargs: { - deprecated: true, - hidden: true, - boolean: true, - }, - ensure: (opts: { strictOutput?: boolean; strict?: boolean }) => { - if (!opts.hasOwnProperty('strict')) { - // override strict not set - opts.strict = opts.strictOutput; - } - delete opts.strictOutput; - }, -}; - -export const strict: CLIOption = { - name: 'strict', - yargs: { - default: false, - boolean: true, - description: - 'Enables strict state handling, meaning only state.data is returned from a job.', - }, - ensure: (opts) => { - if (!opts.hasOwnProperty('strictOutput')) { - setDefaultValue(opts, 'strict', false); - } - }, -}; - export const skipAdaptorValidation: CLIOption = { name: 'skip-adaptor-validation', yargs: { diff --git a/packages/cli/src/test/handler.ts b/packages/cli/src/test/handler.ts index 8c2edde9e..52cb2cc32 100644 --- a/packages/cli/src/test/handler.ts +++ b/packages/cli/src/test/handler.ts @@ -1,3 +1,5 @@ +import type { ExecutionPlan } from '@openfn/lexicon'; + import { TestOptions } from './command'; import { createNullLogger, Logger } from '../util/logger'; import loadState from '../util/load-state'; @@ -6,44 +8,48 @@ import execute from '../execute/execute'; import { ExecuteOptions } from '../execute/command'; const testHandler = async (options: TestOptions, logger: Logger) => { - logger.log('Running test job...'); + logger.log('Running test workflow...'); const opts: Partial = { ...options }; // Preconfigure some options opts.compile = true; opts.adaptors = []; - opts.workflow = { - start: 'start', - jobs: [ - { - id: 'start', - state: { data: { defaultAnswer: 42 } }, - expression: - "const fn = () => (state) => { console.log('Starting computer...'); return state; }; fn()", - next: { - calculate: '!state.error', + const plan = { + options: { + start: 'start', + }, + workflow: { + steps: [ + { + id: 'start', + state: { data: { defaultAnswer: 42 } }, + expression: + "const fn = () => (state) => { console.log('Starting computer...'); return state; }; fn()", + next: { + calculate: '!state.error', + }, + }, + { + id: 'calculate', + expression: + "const fn = () => (state) => { console.log('Calculating to life, the universe, and everything..'); return state }; fn()", + next: { + result: true, + }, }, - }, - { - id: 'calculate', - expression: - "const fn = () => (state) => { console.log('Calculating to life, the universe, and everything..'); return state }; fn()", - next: { - result: true, + { + id: 'result', + expression: + 'const fn = () => (state) => ({ data: { answer: state.data.answer || state.data.defaultAnswer } }); fn()', }, - }, - { - id: 'result', - expression: - 'const fn = () => (state) => ({ data: { answer: state.data.answer || state.data.defaultAnswer } }); fn()', - }, - ], - }; + ], + }, + } as ExecutionPlan; logger.break(); - logger.info('Workflow object:'); - logger.info(JSON.stringify(opts.workflow, null, 2)); + logger.info('Execution plan:'); + logger.info(JSON.stringify(plan, null, 2)); logger.break(); if (!opts.stateStdin) { @@ -54,8 +60,8 @@ const testHandler = async (options: TestOptions, logger: Logger) => { } const state = await loadState(opts, createNullLogger()); - const code = await compile(opts, logger); - const result = await execute(code!, state, opts as ExecuteOptions); + const compiledPlan = (await compile(plan, opts, logger)) as ExecutionPlan; + const result = await execute(compiledPlan, state, opts as ExecuteOptions); logger.success(`Result: ${result.data.answer}`); return result; }; diff --git a/packages/cli/src/types.ts b/packages/cli/src/types.ts index 3c6b781e8..ed27ef8bc 100644 --- a/packages/cli/src/types.ts +++ b/packages/cli/src/types.ts @@ -2,6 +2,12 @@ // Ie config can be a string export type JobNodeID = string; +export type OldCLIWorkflow = { + id?: string; // UUID for this plan + start?: JobNodeID; + jobs: CLIJobNode[]; +}; + export type CLIExecutionPlan = { id?: string; // UUID for this plan start?: JobNodeID; diff --git a/packages/cli/src/util/expand-adaptors.ts b/packages/cli/src/util/expand-adaptors.ts index d60f1a0ab..45b952e9d 100644 --- a/packages/cli/src/util/expand-adaptors.ts +++ b/packages/cli/src/util/expand-adaptors.ts @@ -1,6 +1,6 @@ -import { Opts } from '../options'; +import { ExecutionPlan, Job } from '@openfn/lexicon'; -const expand = (name: any) => { +const expand = (name: string) => { if (typeof name === 'string') { const [left] = name.split('='); // don't expand adaptors which look like a path (or @openfn/language-) @@ -12,20 +12,24 @@ const expand = (name: any) => { return name; }; -export default (opts: Partial>) => { - const { adaptors, workflow } = opts; +type ArrayOrPlan = T extends string[] ? string[] : ExecutionPlan; - if (adaptors) { - opts.adaptors = adaptors?.map(expand); +// TODO typings here aren't good,I can't get this to work! +// At least this looks nice externally +export default | ExecutionPlan>( + input: T +): ArrayOrPlan => { + if (Array.isArray(input)) { + return input?.map(expand) as any; } - if (workflow) { - Object.values(workflow.jobs).forEach((job) => { - if (job.adaptor) { - job.adaptor = expand(job.adaptor); - } - }); - } + const plan = input as ExecutionPlan; + Object.values(plan.workflow.steps).forEach((step) => { + const job = step as Job; + if (job.adaptor) { + job.adaptor = expand(job.adaptor); + } + }); - return opts; + return plan as any; }; diff --git a/packages/cli/src/util/index.d.ts b/packages/cli/src/util/index.d.ts deleted file mode 100644 index 1ff09efd4..000000000 --- a/packages/cli/src/util/index.d.ts +++ /dev/null @@ -1 +0,0 @@ -export * from './logger'; diff --git a/packages/cli/src/util/index.ts b/packages/cli/src/util/index.ts new file mode 100644 index 000000000..640967359 --- /dev/null +++ b/packages/cli/src/util/index.ts @@ -0,0 +1,6 @@ +import expandAdaptors from './expand-adaptors'; +import ensureLogOpts from './ensure-log-opts'; + +export * from './logger'; + +export { expandAdaptors, ensureLogOpts }; diff --git a/packages/cli/src/util/load-input.ts b/packages/cli/src/util/load-input.ts deleted file mode 100644 index da1e58f1b..000000000 --- a/packages/cli/src/util/load-input.ts +++ /dev/null @@ -1,144 +0,0 @@ -import path from 'node:path'; -import fs from 'node:fs/promises'; -import { isPath } from '@openfn/compiler'; -import type { Logger } from '@openfn/logger'; -import type { Opts } from '../options'; -import { CLIExecutionPlan } from '../types'; -import { ExecutionPlan } from '@openfn/runtime'; -import abort from './abort'; - -type LoadWorkflowOpts = Required< - Pick ->; - -export default async ( - opts: Pick, - log: Logger -) => { - const { job, workflow, jobPath, workflowPath } = opts; - if (workflow || workflowPath) { - return loadWorkflow(opts as LoadWorkflowOpts, log); - } - - if (job) { - return job; - } - if (jobPath) { - try { - log.debug(`Loading job from ${jobPath}`); - opts.job = await fs.readFile(jobPath, 'utf8'); - return opts.job; - } catch (e: any) { - abort( - log, - 'Job not found', - undefined, - `Failed to load the job from ${jobPath}` - ); - } - } -}; - -const loadWorkflow = async (opts: LoadWorkflowOpts, log: Logger) => { - const { workflowPath, workflow } = opts; - - const readWorkflow = async () => { - try { - const text = await fs.readFile(workflowPath, 'utf8'); - return text; - } catch (e) { - abort( - log, - 'Workflow not found', - undefined, - `Failed to load a workflow from ${workflowPath}` - ); - } - }; - - const parseWorkflow = (contents: string) => { - try { - return JSON.parse(contents); - } catch (e: any) { - abort( - log, - 'Invalid JSON in workflow', - e, - `Check the syntax of the JSON at ${workflowPath}` - ); - } - }; - - const fetchWorkflowFile = async ( - jobId: string, - rootDir: string = '', - filePath: string - ) => { - try { - // Special handling for ~ feels like a necessary evil - const fullPath = filePath.startsWith('~') - ? filePath - : path.resolve(rootDir, filePath); - const result = await fs.readFile(fullPath, 'utf8'); - return result; - } catch (e) { - abort( - log, - `File not found for job ${jobId}: ${filePath}`, - undefined, - `This workflow references a file which cannot be found at ${filePath}\n\nPaths inside the workflow are relative to the workflow.json` - ); - } - }; - - log.debug(`Loading workflow from ${workflowPath}`); - try { - let wf: CLIExecutionPlan; - let rootDir = opts.baseDir; - if (workflowPath) { - let workflowRaw = await readWorkflow(); - wf = parseWorkflow(workflowRaw!); - if (!rootDir) { - // TODO this may not be neccessary, but keeping just in case - rootDir = path.dirname(workflowPath); - } - } else { - wf = workflow as CLIExecutionPlan; - } - - // TODO auto generate ids? - - // identify any expressions/configs that are paths, and load them in - // All paths are relative to the workflow itself - let idx = 0; - for (const job of wf.jobs) { - idx += 1; - const expressionStr = - typeof job.expression === 'string' && job.expression?.trim(); - const configurationStr = - typeof job.configuration === 'string' && job.configuration?.trim(); - if (expressionStr && isPath(expressionStr)) { - job.expression = await fetchWorkflowFile( - job.id || `${idx}`, - rootDir, - expressionStr - ); - } - if (configurationStr && isPath(configurationStr)) { - const configString = await fetchWorkflowFile( - job.id || `${idx}`, - rootDir, - configurationStr - ); - job.configuration = JSON.parse(configString!); - } - } - - opts.workflow = wf as ExecutionPlan; - log.debug('Workflow loaded!'); - return opts.workflow; - } catch (e) { - log.error(`Error loading workflow from ${workflowPath}`); - throw e; - } -}; diff --git a/packages/cli/src/util/load-plan.ts b/packages/cli/src/util/load-plan.ts new file mode 100644 index 000000000..490fadede --- /dev/null +++ b/packages/cli/src/util/load-plan.ts @@ -0,0 +1,252 @@ +import fs from 'node:fs/promises'; +import path from 'node:path'; +import { isPath } from '@openfn/compiler'; + +import abort from './abort'; +import expandAdaptors from './expand-adaptors'; +import mapAdaptorsToMonorepo from './map-adaptors-to-monorepo'; +import type { ExecutionPlan, Job, WorkflowOptions } from '@openfn/lexicon'; +import type { Opts } from '../options'; +import type { Logger } from './logger'; +import type { OldCLIWorkflow } from '../types'; + +const loadPlan = async ( + options: Pick< + Opts, + | 'expressionPath' + | 'planPath' + | 'workflowPath' + | 'adaptors' + | 'baseDir' + | 'expandAdaptors' + >, + logger: Logger +): Promise => { + const { workflowPath, planPath, expressionPath } = options; + + if (expressionPath) { + return loadExpression(options, logger); + } + + const jsonPath = planPath || workflowPath; + + if (!options.baseDir) { + options.baseDir = path.dirname(jsonPath!); + } + + const json = await loadJson(jsonPath!, logger); + const defaultName = path.parse(jsonPath!).name; + if (json.workflow) { + return loadXPlan(json, options, logger, defaultName); + } else { + return loadOldWorkflow(json, options, logger, defaultName); + } +}; + +export default loadPlan; + +const loadJson = async (workflowPath: string, logger: Logger): Promise => { + let text: string; + + try { + text = await fs.readFile(workflowPath, 'utf8'); + } catch (e) { + return abort( + logger, + 'Workflow not found', + undefined, + `Failed to load a workflow from ${workflowPath}` + ); + } + + let json: object; + try { + json = JSON.parse(text); + } catch (e: any) { + return abort( + logger, + 'Invalid JSON in workflow', + e, + `Check the syntax of the JSON at ${workflowPath}` + ); + } + + return json; +}; + +const maybeAssign = (a: any, b: any, keys: Array) => { + keys.forEach((key) => { + if (a.hasOwnProperty(key)) { + b[key] = a[key]; + } + }); +}; + +const loadExpression = async ( + options: Pick, + logger: Logger +): Promise => { + const expressionPath = options.expressionPath!; + + logger.debug(`Loading expression from ${expressionPath}`); + try { + const expression = await fs.readFile(expressionPath, 'utf8'); + const name = path.parse(expressionPath).name; + + const step: Job = { expression }; + + // The adaptor should have been expanded nicely already, so we don't need intervene here + if (options.adaptors) { + const [adaptor] = options.adaptors; + if (adaptor) { + step.adaptor = adaptor; + } + } + + const wfOptions: WorkflowOptions = {}; + // TODO support state props to remove? + maybeAssign(options, wfOptions, ['timeout']); + + const plan: ExecutionPlan = { + workflow: { + name, + steps: [step], + }, + options: wfOptions, + }; + // call loadXPlan now so that any options can be written + return loadXPlan(plan, options, logger); + } catch (e) { + abort( + logger, + 'Expression not found', + undefined, + `Failed to load the expression from ${expressionPath}` + ); + + // This will never execute + return {} as ExecutionPlan; + } +}; + +const loadOldWorkflow = async ( + workflow: OldCLIWorkflow, + options: Pick, + logger: Logger, + defaultName: string = '' +) => { + const plan: ExecutionPlan = { + workflow: { + steps: workflow.jobs, + }, + options: { + start: workflow.start, + }, + }; + + if (workflow.id) { + plan.id = workflow.id; + } + + // call loadXPlan now so that any options can be written + const final = await loadXPlan(plan, options, logger, defaultName); + + logger.warn('Converted workflow into new format:'); + logger.warn(final); + + return final; +}; + +const fetchFile = async ( + jobId: string, + rootDir: string = '', + filePath: string, + log: Logger +) => { + try { + // Special handling for ~ feels like a necessary evil + const fullPath = filePath.startsWith('~') + ? filePath + : path.resolve(rootDir, filePath); + const result = await fs.readFile(fullPath, 'utf8'); + return result; + } catch (e) { + abort( + log, + `File not found for job ${jobId}: ${filePath}`, + undefined, + `This workflow references a file which cannot be found at ${filePath}\n\nPaths inside the workflow are relative to the workflow.json` + ); + + // should never get here + return '.'; + } +}; + +// TODO this is currently untested in load-plan +// (but covered a bit in execute tests) +const importExpressions = async ( + plan: ExecutionPlan, + rootDir: string, + log: Logger +) => { + let idx = 0; + for (const step of plan.workflow.steps) { + const job = step as Job; + if (!job.expression) { + continue; + } + idx += 1; + const expressionStr = + typeof job.expression === 'string' && job.expression?.trim(); + const configurationStr = + typeof job.configuration === 'string' && job.configuration?.trim(); + if (expressionStr && isPath(expressionStr)) { + job.expression = await fetchFile( + job.id || `${idx}`, + rootDir, + expressionStr, + log + ); + } + if (configurationStr && isPath(configurationStr)) { + const configString = await fetchFile( + job.id || `${idx}`, + rootDir, + configurationStr, + log + ); + job.configuration = JSON.parse(configString!); + } + } +}; + +const loadXPlan = async ( + plan: ExecutionPlan, + options: Pick, + logger: Logger, + defaultName: string = '' +) => { + if (!plan.options) { + plan.options = {}; + } + + if (!plan.workflow.name && defaultName) { + plan.workflow.name = defaultName; + } + // Note that baseDir should be set up in the default function + await importExpressions(plan, options.baseDir!, logger); + // expand shorthand adaptors in the workflow jobs + if (options.expandAdaptors) { + expandAdaptors(plan); + } + await mapAdaptorsToMonorepo(options.monorepoPath, plan, logger); + + // Assign options form the CLI into the Xplan + // TODO support state props to remove + maybeAssign(options, plan.options, ['timeout', 'start']); + + logger.info(`Loaded workflow ${plan.workflow.name ?? ''}`); + + return plan; +}; diff --git a/packages/cli/src/util/map-adaptors-to-monorepo.ts b/packages/cli/src/util/map-adaptors-to-monorepo.ts index e4e33fce2..4e25d9876 100644 --- a/packages/cli/src/util/map-adaptors-to-monorepo.ts +++ b/packages/cli/src/util/map-adaptors-to-monorepo.ts @@ -3,6 +3,8 @@ import path from 'node:path'; import assert from 'node:assert'; import { Logger } from '@openfn/logger'; import { getNameAndVersion } from '@openfn/runtime'; +import type { ExecutionPlan, Job } from '@openfn/lexicon'; + import type { Opts } from '../options'; export const validateMonoRepo = async (repoPath: string, log: Logger) => { @@ -32,39 +34,38 @@ export const updatePath = (adaptor: string, repoPath: string, log: Logger) => { } const shortName = name.replace('@openfn/language-', ''); const abspath = path.resolve(repoPath, 'packages', shortName); + + log.info(`Mapped adaptor ${name} to monorepo: ${abspath}`); return `${name}=${abspath}`; }; export type MapAdaptorsToMonorepoOptions = Pick< Opts, - 'monorepoPath' | 'adaptors' | 'workflow' + 'monorepoPath' | 'adaptors' >; -// This will mutate options (adaptors, workflow) to support the monorepo -const mapAdaptorsToMonorepo = async ( - options: MapAdaptorsToMonorepoOptions, +const mapAdaptorsToMonorepo = ( + monorepoPath: string = '', + input: string[] | ExecutionPlan = [], log: Logger -) => { - const { adaptors, monorepoPath, workflow } = options; +): string[] | ExecutionPlan => { if (monorepoPath) { - await validateMonoRepo(monorepoPath, log); - log.success(`Loading adaptors from monorepo at ${monorepoPath}`); - if (adaptors) { - options.adaptors = adaptors.map((a) => { - const p = updatePath(a, monorepoPath, log); - log.info(`Mapped adaptor ${a} to monorepo: ${p.split('=')[1]}`); - return p; - }); - } - if (workflow) { - Object.values(workflow.jobs).forEach((job) => { - if (job.adaptor) { - job.adaptor = updatePath(job.adaptor, monorepoPath, log); - } - }); + if (Array.isArray(input)) { + const adaptors = input as string[]; + return adaptors.map((a) => updatePath(a, monorepoPath, log)); } + + const plan = input as ExecutionPlan; + Object.values(plan.workflow.steps).forEach((step) => { + const job = step as Job; + if (job.adaptor) { + job.adaptor = updatePath(job.adaptor, monorepoPath, log); + } + }); + + return plan; } - return options; + return input; }; export default mapAdaptorsToMonorepo; diff --git a/packages/cli/src/util/validate-adaptors.ts b/packages/cli/src/util/validate-adaptors.ts index e6b2666bc..d5126ec58 100644 --- a/packages/cli/src/util/validate-adaptors.ts +++ b/packages/cli/src/util/validate-adaptors.ts @@ -9,16 +9,18 @@ const validateAdaptors = async ( | 'autoinstall' | 'repoDir' | 'workflowPath' + | 'planPath' >, logger: Logger ) => { if (options.skipAdaptorValidation) { return; } + const isPlan = options.planPath || options.workflowPath; const hasDeclaredAdaptors = options.adaptors && options.adaptors.length > 0; - if (options.workflowPath && hasDeclaredAdaptors) { + if (isPlan && hasDeclaredAdaptors) { logger.error('ERROR: adaptor and workflow provided'); logger.error( 'This is probably not what you meant to do. A workflow should declare an adaptor for each job.' @@ -29,7 +31,7 @@ const validateAdaptors = async ( // If no adaptor is specified, pass a warning // (The runtime is happy to run without) // This can be overriden from options - if (!options.workflowPath && !hasDeclaredAdaptors) { + if (!isPlan && !hasDeclaredAdaptors) { logger.warn('WARNING: No adaptor provided!'); logger.warn( 'This job will probably fail. Pass an adaptor with the -a flag, eg:' diff --git a/packages/cli/test/commands.test.ts b/packages/cli/test/commands.test.ts index 31706ce5b..3d33427b4 100644 --- a/packages/cli/test/commands.test.ts +++ b/packages/cli/test/commands.test.ts @@ -15,16 +15,16 @@ test.afterEach(() => { logger._reset(); }); -const JOB_EXPORT_42 = 'export default [() => ({ data: { count: 42 } })];'; -const JOB_TIMES_2 = +const EXPR_EXPORT_42 = 'export default [() => ({ data: { count: 42 } })];'; +const EXPR_TIMES_2 = 'export default [(state) => { state.data.count = state.data.count * 2; return state; }];'; -const JOB_MOCK_ADAPTOR = +const EXPR_MOCK_ADAPTOR = 'import { byTwo } from "times-two"; export default [byTwo];'; -const JOB_EXPORT_STATE = +const EXPR_EXPORT_STATE = "export default [() => ({ configuration: {}, data: {}, foo: 'bar' })];"; type RunOptions = { - jobPath?: string; + expressionPath?: string; statePath?: string; outputPath?: string; state?: any; @@ -43,7 +43,7 @@ async function run(command: string, job: string, options: RunOptions = {}) { // A good reason to move all these into integration tests tbh! command = command.replace(/^openfn /, ''); - const jobPath = options.jobPath || 'job.js'; + const expressionPath = options.expressionPath || 'job.js'; const statePath = options.statePath || 'state.json'; const outputPath = options.outputPath || 'output.json'; const state = @@ -58,7 +58,7 @@ async function run(command: string, job: string, options: RunOptions = {}) { // Mock the file system in-memory if (!options.disableMock) { mock({ - [jobPath]: job, + [expressionPath]: job, [statePath]: state, [outputPath]: '{}', [pnpm]: mock.load(pnpm, {}), @@ -74,7 +74,7 @@ async function run(command: string, job: string, options: RunOptions = {}) { const opts = cmd.parse(command) as Opts; // Override some options after the command has been parsed - opts.path = jobPath; + opts.path = expressionPath; opts.repoDir = options.repoDir; opts.log = { default: 'none' }; @@ -93,6 +93,65 @@ async function run(command: string, job: string, options: RunOptions = {}) { } } +test.serial('run an execution plan', async (t) => { + const plan = { + workflow: { + steps: [ + { + id: 'job1', + state: { data: { x: 0 } }, + expression: 'export default [s => { s.data.x += 1; return s; } ]', + next: { job2: true }, + }, + { + id: 'job2', + expression: 'export default [s => { s.data.x += 1; return s; } ]', + }, + ], + }, + }; + + const options = { + outputPath: 'output.json', + expressionPath: 'wf.json', // just to fool the test + }; + + const result = await run('openfn wf.json', JSON.stringify(plan), options); + t.assert(result.data.x === 2); +}); + +test.serial('run an execution plan with start', async (t) => { + const state = JSON.stringify({ data: { x: 0 } }); + const plan = { + workflow: { + steps: [ + { + id: 'a', + expression: 'export default [s => { s.data.x += 1; return s; } ]', + next: { b: true }, + }, + { + id: 'b', + expression: 'export default [s => { s.data.x += 1; return s; } ]', + }, + ], + }, + }; + + const options = { + outputPath: 'output.json', + expressionPath: 'wf.json', // just to fool the test + }; + + const result = await run( + `openfn wf.json -S ${state} --start b`, + JSON.stringify(plan), + options + ); + + t.assert(result.data.x === 1); +}); + test.serial('print version information with version', async (t) => { await run('version', ''); @@ -119,7 +178,7 @@ test.serial('run test job with custom state', async (t) => { }); test.serial('run a job with defaults: openfn job.js', async (t) => { - const result = await run('openfn job.js', JOB_EXPORT_42); + const result = await run('openfn job.js', EXPR_EXPORT_42); t.assert(result.data.count === 42); }); @@ -147,7 +206,7 @@ test.serial('run a workflow', async (t) => { const options = { outputPath: 'output.json', - jobPath: 'wf.json', // just to fool the test + expressionPath: 'wf.json', // just to fool the test }; const result = await run('openfn wf.json', JSON.stringify(workflow), options); @@ -168,7 +227,7 @@ test.serial('run a workflow with config as an object', async (t) => { const options = { outputPath: 'output.json', - jobPath: 'wf.json', // just to fool the test + expressionPath: 'wf.json', // just to fool the test }; const result = await run('openfn wf.json', JSON.stringify(workflow), options); t.deepEqual(result, { @@ -190,7 +249,7 @@ test.serial('run a workflow with config as a path', async (t) => { const options = { outputPath: 'output.json', - jobPath: 'wf.json', // just to fool the test + expressionPath: 'wf.json', // just to fool the test mockfs: { '/config.json': JSON.stringify({ y: 0 }), }, @@ -208,7 +267,7 @@ test.serial.skip( async (t) => { const options = { // set up the file system - jobPath: + expressionPath: '~/openfn/jobs/the-question/what-is-the-answer-to-life-the-universe-and-everything.js', outputPath: '~/openfn/jobs/the-question/output.json', statePath: '~/openfn/jobs/the-question/state.json', @@ -216,7 +275,7 @@ test.serial.skip( const result = await run( 'openfn ~/openfn/jobs/the-question', - JOB_EXPORT_42, + EXPR_EXPORT_42, options ); t.assert(result === 42); @@ -237,7 +296,7 @@ test.serial( }; const result = await run( 'openfn job.js --output-path=/tmp/my-output.json', - JOB_EXPORT_42, + EXPR_EXPORT_42, options ); t.is(result.data.count, 42); @@ -256,7 +315,7 @@ test.serial( }; const result = await run( 'openfn job.js -o /tmp/my-output.json', - JOB_EXPORT_42, + EXPR_EXPORT_42, options ); t.is(result.data.count, 42); @@ -268,59 +327,15 @@ test.serial( ); test.serial( - 'output to file with strict state: openfn job.js --output-path=/tmp/my-output.json --strict', + 'output to file removing configuration: openfn job.js --output-path=/tmp/my-output.json', async (t) => { const options = { outputPath: '/tmp/my-output.json', }; const result = await run( - 'openfn job.js --output-path=/tmp/my-output.json --strict', - JOB_EXPORT_STATE, - options - ); - t.deepEqual(result, { data: {} }); - - const expectedFileContents = JSON.stringify({ data: {} }, null, 2); - const output = await fs.readFile('/tmp/my-output.json', 'utf8'); - t.is(output, expectedFileContents); - } -); - -test.serial( - 'output to file with non-strict state: openfn job.js --output-path=/tmp/my-output.json --no-strict-output', - async (t) => { - const options = { - outputPath: '/tmp/my-output.json', - }; - - const result = await run( - 'openfn job.js --output-path=/tmp/my-output.json --no-strict-output', - JOB_EXPORT_STATE, - options - ); - t.deepEqual(result, { data: {}, foo: 'bar' }); - - const expectedFileContents = JSON.stringify( - { data: {}, foo: 'bar' }, - null, - 2 - ); - const output = await fs.readFile('/tmp/my-output.json', 'utf8'); - t.assert(output === expectedFileContents); - } -); - -test.serial( - 'output to file with non-strict state: openfn job.js --output-path=/tmp/my-output.json --no-strict', - async (t) => { - const options = { - outputPath: '/tmp/my-output.json', - }; - - const result = await run( - 'openfn job.js --output-path=/tmp/my-output.json --no-strict', - JOB_EXPORT_STATE, + 'openfn job.js --output-path=/tmp/my-output.json', + EXPR_EXPORT_STATE, options ); t.deepEqual(result, { data: {}, foo: 'bar' }); @@ -344,7 +359,7 @@ test.serial( }; const result = await run( 'openfn job.js --state-path=/tmp/my-state.json', - JOB_TIMES_2, + EXPR_TIMES_2, options ); t.assert(result.data.count === 66); @@ -360,7 +375,7 @@ test.serial( }; const result = await run( 'openfn job.js -s /tmp/my-state.json', - JOB_TIMES_2, + EXPR_TIMES_2, options ); t.assert(result.data.count === 66); @@ -373,7 +388,7 @@ test.serial( const state = JSON.stringify({ data: { count: 11 } }); const result = await run( `openfn job.js --state-stdin=${state}`, - JOB_TIMES_2 + EXPR_TIMES_2 ); t.assert(result.data.count === 22); } @@ -383,7 +398,7 @@ test.serial( 'read state from stdin with alias: openfn job.js -S ', async (t) => { const state = JSON.stringify({ data: { count: 44 } }); - const result = await run(`openfn job.js -S ${state}`, JOB_TIMES_2); + const result = await run(`openfn job.js -S ${state}`, EXPR_TIMES_2); t.assert(result.data.count === 88); } ); @@ -394,7 +409,7 @@ test.serial( const state = JSON.stringify({ data: { count: 49.5 } }); const result = await run( `openfn --no-expand-adaptors -S ${state} --adaptor times-two=/modules/times-two`, - JOB_MOCK_ADAPTOR + EXPR_MOCK_ADAPTOR ); t.assert(result.data.count === 99); } @@ -406,7 +421,7 @@ test.serial( const state = JSON.stringify({ data: { count: 49.5 } }); const result = await run( `openfn --no-expand-adaptors -S ${state} --adaptors times-two=/modules/times-two`, - JOB_MOCK_ADAPTOR + EXPR_MOCK_ADAPTOR ); t.assert(result.data.count === 99); } @@ -418,7 +433,7 @@ test.serial( const state = JSON.stringify({ data: { count: 49.5 } }); const result = await run( `openfn --no-expand-adaptors -S ${state} -a times-two=/modules/times-two`, - JOB_MOCK_ADAPTOR + EXPR_MOCK_ADAPTOR ); t.assert(result.data.count === 99); } @@ -479,7 +494,7 @@ test.serial( const options = { outputPath: 'output.json', - jobPath: 'wf.json', + expressionPath: 'wf.json', repoDir: '/repo', }; @@ -548,7 +563,7 @@ test.serial( }); const result = await run('workflow.json -m', workflow, { - jobPath: 'workflow.json', + expressionPath: 'workflow.json', }); t.true(result.data.done); delete process.env.OPENFN_ADAPTORS_REPO; @@ -576,7 +591,7 @@ test.serial('compile a job: openfn compile job.js to file', async (t) => { test.serial('compile a workflow: openfn compile wf.json to file', async (t) => { const options = { outputPath: 'out.json', - jobPath: 'wf.json', // just to fool the test + expressionPath: 'wf.json', // just to fool the test }; const wf = JSON.stringify({ @@ -588,7 +603,7 @@ test.serial('compile a workflow: openfn compile wf.json to file', async (t) => { const output = await fs.readFile('out.json', 'utf8'); const result = JSON.parse(output); t.truthy(result); - t.is(result.jobs[0].expression, 'export default [x()];'); + t.is(result.workflow.steps[0].expression, 'export default [x()];'); }); test.serial('docs should print documentation with full names', async (t) => { diff --git a/packages/cli/test/compile/compile.test.ts b/packages/cli/test/compile/compile.test.ts index 55b867860..46f6520d1 100644 --- a/packages/cli/test/compile/compile.test.ts +++ b/packages/cli/test/compile/compile.test.ts @@ -8,13 +8,14 @@ import compile, { resolveSpecifierPath, } from '../../src/compile/compile'; import { CompileOptions } from '../../src/compile/command'; -import { ExecutionPlan } from '@openfn/runtime'; +import { mockFs, resetMockFs } from '../util'; +import { ExecutionPlan, Job } from '@openfn/lexicon'; const mockLog = createMockLogger(); -test.afterEach(() => { - mock.restore(); -}); +test.after(resetMockFs); + +const expressionPath = '/job.js'; type TransformOptionsWithImports = { ['add-imports']: { @@ -26,67 +27,64 @@ type TransformOptionsWithImports = { }; }; +// TODO this isn't really used and is a bit of a quirky thing +// The compiler itself probably doesn't do any path parsing? +// Just compile a source string and return the result test('compile from source string', async (t) => { const job = 'x();'; - const opts = { - job, - } as CompileOptions; + const opts = {} as CompileOptions; - const result = await compile(opts, mockLog); + const result = await compile(job, opts, mockLog); const expected = 'export default [x()];'; t.is(result, expected); }); test.serial('compile from path', async (t) => { - const pnpm = path.resolve('../../node_modules/.pnpm'); - mock({ - [pnpm]: mock.load(pnpm, {}), - '/tmp/job.js': 'x();', + const job = 'x();'; + mockFs({ + [expressionPath]: job, }); - const jobPath = '/tmp/job.js'; - const opts = { - jobPath, + expressionPath, } as CompileOptions; - const result = await compile(opts, mockLog); + const result = await compile(expressionPath, opts, mockLog); const expected = 'export default [x()];'; t.is(result, expected); }); -test('compile from workflow', async (t) => { - const workflow = { - start: 'a', - jobs: [ - { id: 'a', expression: 'x()' }, - { id: 'b', expression: 'x()' }, - ], - }; +test('compile from execution plan', async (t) => { + const plan = { + workflow: { + steps: [ + { id: 'a', expression: 'x()' }, + { id: 'b', expression: 'x()' }, + ], + }, + options: {}, + } as ExecutionPlan; - const opts = { - workflow, - } as CompileOptions; + const opts = {} as CompileOptions; - const result = (await compile(opts, mockLog)) as ExecutionPlan; + const result = (await compile(plan, opts, mockLog)) as ExecutionPlan; const expected = 'export default [x()];'; - t.is(result.jobs[0].expression, expected); - t.is(result.jobs[1].expression, expected); + const [a, b] = result.workflow.steps; + t.is((a as Job).expression, expected); + t.is((b as Job).expression, expected); }); test('throw an AbortError if a job is uncompilable', async (t) => { const job = 'a b'; - const opts = { - job, - } as CompileOptions; + const opts = {} as CompileOptions; const logger = createMockLogger(); - await t.throwsAsync(() => compile(opts, logger), { + await t.throwsAsync(() => compile(job, opts, logger), { message: 'Failed to compile job', }); @@ -95,18 +93,18 @@ test('throw an AbortError if a job is uncompilable', async (t) => { t.assert(logger._find('error', /critical error: aborting command/i)); }); -test('throw an AbortError if a workflow contains an uncompilable job', async (t) => { - const workflow = { - start: 'a', - jobs: [{ id: 'a', expression: 'x b' }], +test('throw an AbortError if an xplan contains an uncompilable job', async (t) => { + const plan: ExecutionPlan = { + workflow: { + steps: [{ id: 'a', expression: 'x b' }], + }, + options: {}, }; - const opts = { - workflow, - } as CompileOptions; + const opts = {} as CompileOptions; const logger = createMockLogger(); - await t.throwsAsync(() => compile(opts, logger), { + await t.throwsAsync(() => compile(plan, opts, logger), { message: 'Failed to compile job a', }); diff --git a/packages/cli/test/compile/options.test.ts b/packages/cli/test/compile/options.test.ts index ed8a48390..e3d896f8d 100644 --- a/packages/cli/test/compile/options.test.ts +++ b/packages/cli/test/compile/options.test.ts @@ -13,7 +13,7 @@ test('correct default options', (t) => { t.deepEqual(options.adaptors, []); t.is(options.command, 'compile'); t.is(options.expandAdaptors, true); - t.is(options.jobPath, 'job.js'); + t.is(options.expressionPath, 'job.js'); t.falsy(options.logJson); // TODO this is undefined right now t.is(options.outputStdout, true); t.is(options.path, 'job.js'); @@ -52,7 +52,7 @@ test("don't expand adaptors if --no-expand-adaptors is set", (t) => { test('default job path', (t) => { const options = parse('compile /tmp/my-job/ --immutable'); t.is(options.path, '/tmp/my-job/'); - t.is(options.jobPath, '/tmp/my-job/job.js'); + t.is(options.expressionPath, '/tmp/my-job/job.js'); }); test('enable json logging', (t) => { diff --git a/packages/cli/test/docgen/handler.test.ts b/packages/cli/test/docgen/handler.test.ts index 52cee0471..07e38c133 100644 --- a/packages/cli/test/docgen/handler.test.ts +++ b/packages/cli/test/docgen/handler.test.ts @@ -53,7 +53,7 @@ const options = { }; test.serial('generate mock docs', async (t) => { - const path = await docsHandler(options, logger, mockGen); + const path = (await docsHandler(options, logger, mockGen)) as string; t.is(path, `${DOCS_PATH}/${specifier}.json`); const docs = await loadJSON(path); diff --git a/packages/cli/test/execute/execute.test.ts b/packages/cli/test/execute/execute.test.ts index 59513bec7..a3e648b6b 100644 --- a/packages/cli/test/execute/execute.test.ts +++ b/packages/cli/test/execute/execute.test.ts @@ -1,12 +1,11 @@ // bunch of unit tests on the execute function itself // so far this is only done in commands.test.ts, which has the cli overhead // I don't want any io or adaptor tests here, really just looking for the actual execute flow -import mock from 'mock-fs'; -import path from 'node:path'; import { createMockLogger } from '@openfn/logger'; import test from 'ava'; import { ExecuteOptions } from '../../src/execute/command'; import handler from '../../src/execute/handler'; +import { mockFs, resetMockFs } from '../util'; // Why is this logging everywhere? const logger = createMockLogger(undefined, { level: 'none' }); @@ -33,234 +32,280 @@ const defaultOptions = { const fn = `const fn = (fn) => (s) => fn(s); `; -test.before(() => { - const pnpm = path.resolve('../../node_modules/.pnpm'); - mock({ - '/repo/': mock.load(path.resolve('test/__repo__/'), {}), - [pnpm]: mock.load(pnpm, {}), - '/exp.js': `${fn}fn(() => ({ data: 42 }));`, - '/config.json': JSON.stringify({ id: 'x' }), - '/workflow.json': JSON.stringify({ - jobs: [ - { - expression: `${fn}fn(() => ({ data: { count: 42 } }));`, - }, - ], - }), - }); -}); - -test.after(() => mock.restore()); +test.after(resetMockFs); -test('run a job', async (t) => { +test.serial('run a simple job', async (t) => { const job = `${fn}fn(() => ({ data: 42 }));`; + + mockFs({ + '/job.js': job, + }); + const options = { ...defaultOptions, - job, + expressionPath: '/job.js', }; + const result = await handler(options, logger); t.is(result.data, 42); }); -test('run a job with initial state', async (t) => { +test.serial('run a job with initial state', async (t) => { const job = `${fn}fn((state) => state);`; + mockFs({ + '/job.js': job, + }); + const options = { ...defaultOptions, - job, + expressionPath: '/job.js', stateStdin: JSON.stringify({ data: { count: 10 } }), }; - const result = await handler(options, logger); - t.is(result.data.count, 10); -}); -test('run a workflow from a path', async (t) => { - const options = { - ...defaultOptions, - workflowPath: '/workflow.json', - }; const result = await handler(options, logger); - t.is(result.data.count, 42); + t.is(result.data.count, 10); }); -test('run a workflow', async (t) => { +test.serial('run a workflow', async (t) => { const workflow = { - start: 'a', - jobs: [ - { - id: 'a', - expression: `${fn}fn(() => ({ data: { count: 42 } }));`, - next: { b: true }, - }, - { - id: 'b', - expression: `${fn}fn((state) => { state.data.count = state.data.count * 2; return state; });`, - }, - ], + options: { + start: 'a', + }, + workflow: { + steps: [ + { + id: 'a', + expression: `${fn}fn(() => ({ data: { count: 42 } }));`, + next: { b: true }, + }, + { + id: 'b', + expression: `${fn}fn((state) => { state.data.count = state.data.count * 2; return state; });`, + }, + ], + }, }; + mockFs({ + '/workflow.json': JSON.stringify(workflow), + }); + const options = { ...defaultOptions, - workflow, + workflowPath: '/workflow.json', }; const result = await handler(options, logger); t.is(result.data.count, 84); }); -test('run a workflow with state', async (t) => { +test.serial('run a workflow with state', async (t) => { const workflow = { - start: 'a', - jobs: [ - { - id: 'a', - state: { data: { count: 1 } }, - expression: `${fn}fn((state) => { state.data.count += 1; return state;});`, - next: { b: true }, - }, - { - id: 'b', - state: { data: { diff: 2 } }, - expression: `${fn}fn((state) => { state.data.count += state.data.diff; return state; });`, - }, - ], + workflow: { + steps: [ + { + id: 'a', + state: { data: { count: 1 } }, + expression: `${fn}fn((state) => { state.data.count += 1; return state;});`, + next: { b: true }, + }, + { + id: 'b', + state: { data: { diff: 2 } }, + expression: `${fn}fn((state) => { state.data.count += state.data.diff; return state; });`, + }, + ], + }, }; + + mockFs({ + '/workflow.json': JSON.stringify(workflow), + }); + const options = { ...defaultOptions, - workflow, + workflowPath: '/workflow.json', }; const result = await handler(options, logger); t.is(result.data.count, 4); }); -test('run a workflow with initial state', async (t) => { +test.serial('run a workflow with initial state from stdin', async (t) => { const workflow = { - start: 'a', - jobs: [ - { - id: 'a', - expression: `${fn}fn((state) => { state.data.count += 1; return state;});`, - next: { b: true }, - }, - { - id: 'b', - expression: `${fn}fn((state) => { state.data.count += 1; return state; });`, - }, - ], + workflow: { + steps: [ + { + id: 'a', + expression: `${fn}fn((state) => { state.data.count += 1; return state;});`, + next: { b: true }, + }, + { + id: 'b', + expression: `${fn}fn((state) => { state.data.count += 1; return state; });`, + }, + ], + }, }; + mockFs({ + '/workflow.json': JSON.stringify(workflow), + }); + const options = { ...defaultOptions, - workflow, + workflowPath: '/workflow.json', stateStdin: JSON.stringify({ data: { count: 10 } }), }; const result = await handler(options, logger); t.is(result.data.count, 12); }); -test('run a workflow with an expression as a path', async (t) => { +test.serial('run a workflow with an expression as a path', async (t) => { const workflow = { - jobs: [ - { - expression: '/exp.js', - }, - ], + workflow: { + steps: [ + { + expression: '/exp.js', + }, + ], + }, }; + mockFs({ + '/workflow.json': JSON.stringify(workflow), + '/exp.js': `${fn}fn(() => ({ data: 42 }));`, + }); + const options = { ...defaultOptions, - workflow, + workflowPath: '/workflow.json', }; const result = await handler(options, logger); t.is(result.data, 42); }); -test('run a workflow with config as a path', async (t) => { +test.serial('run a workflow with config as a path', async (t) => { const workflow = { - jobs: [ - { - configuration: '/config.json', - expression: `${fn}fn((state) => { state.cfg = state.configuration; return state; })`, - }, - ], + workflow: { + steps: [ + { + configuration: '/config.json', + expression: `${fn}fn((state) => { state.cfg = state.configuration; return state; })`, + }, + ], + }, }; + mockFs({ + '/workflow.json': JSON.stringify(workflow), + '/config.json': JSON.stringify({ id: 'x' }), + }); + const options = { ...defaultOptions, - workflow, + workflowPath: '/workflow.json', }; const result = await handler(options, logger); t.is(result.cfg.id, 'x'); }); -test('run a workflow from a start node', async (t) => { +test.serial('run a workflow from a start node', async (t) => { const workflow = { - jobs: [ - { - id: 'a', - expression: `${fn}fn((state) => ({ data: { result: 'a' }}))`, - }, - { - id: 'b', - expression: `${fn}fn((state) => ({ data: { result: 'b' }}))`, - }, - ], + workflow: { + steps: [ + { + id: 'a', + expression: `${fn}fn((state) => ({ data: { result: 'a' }}))`, + }, + { + id: 'b', + expression: `${fn}fn((state) => ({ data: { result: 'b' }}))`, + }, + ], + }, }; + mockFs({ + '/workflow.json': JSON.stringify(workflow), + }); + const options = { ...defaultOptions, - workflow, + workflowPath: '/workflow.json', start: 'b', }; const result = await handler(options, logger); t.is(result.data.result, 'b'); }); -test('run a workflow with an adaptor (longform)', async (t) => { +test.serial('run a workflow with an adaptor (longform)', async (t) => { const workflow = { - jobs: [ - { - adaptor: '@openfn/language-common', - expression: `fn((state) => state);`, - }, - ], + workflow: { + steps: [ + { + adaptor: '@openfn/language-common', + expression: `fn((state) => state);`, + }, + ], + }, }; + mockFs({ + '/workflow.json': JSON.stringify(workflow), + }); + const options = { ...defaultOptions, - workflow, + workflowPath: '/workflow.json', stateStdin: JSON.stringify({ data: { count: 10 } }), }; const result = await handler(options, logger); t.is(result.data.count, 10); }); -test('run a workflow with an adaptor (shortform)', async (t) => { +test.serial('run a workflow with an adaptor (shortform)', async (t) => { const workflow = { - jobs: [ - { - adaptor: 'common', - expression: `fn((state) => state);`, - }, - ], + workflow: { + steps: [ + { + adaptor: 'common', + expression: `fn((state) => state);`, + }, + ], + }, }; + mockFs({ + '/workflow.json': JSON.stringify(workflow), + }); + const options = { ...defaultOptions, - workflow, + workflowPath: '/workflow.json', stateStdin: JSON.stringify({ data: { count: 10 } }), + expandAdaptors: true, }; const result = await handler(options, logger); t.is(result.data.count, 10); }); -test('run a job without compilation', async (t) => { +test.serial('run a job without compilation', async (t) => { const job = `export default [() => ({ data: { count: 42 } })]`; + mockFs({ + '/job.js': job, + }); + const options = { ...defaultOptions, compile: false, - job, + expressionPath: '/job.js', }; + const result = await handler(options, logger); t.is(result.data.count, 42); }); -test('run a job which does not return state', async (t) => { +test.serial('run a job which does not return state', async (t) => { const job = `${fn}fn(() => {});`; + mockFs({ + '/job.js': job, + }); + const options = { ...defaultOptions, - job, + expressionPath: '/job.js', }; const result = await handler(options, logger); t.falsy(result); diff --git a/packages/cli/test/execute/get-autoinstall-targets.test.ts b/packages/cli/test/execute/get-autoinstall-targets.test.ts index 9dc275a6d..33a29786b 100644 --- a/packages/cli/test/execute/get-autoinstall-targets.test.ts +++ b/packages/cli/test/execute/get-autoinstall-targets.test.ts @@ -1,162 +1,82 @@ import test from 'ava'; import getAutoinstallTargets from '../../src/execute/get-autoinstall-targets'; +import { ExecutionPlan, Job } from '@openfn/lexicon'; -test('return empty if an empty array is passed', (t) => { - const result = getAutoinstallTargets({ - adaptors: [], - }); - t.truthy(result); - t.is(result.length, 0); -}); - -test('return 2 valid targets', (t) => { - const result = getAutoinstallTargets({ - adaptors: ['a', 'b'], - }); - t.truthy(result); - t.is(result.length, 2); - t.deepEqual(result, ['a', 'b']); -}); - -test('return empty if a path is passed', (t) => { - const result = getAutoinstallTargets({ - adaptors: ['a=a/b/c'], - }); - t.truthy(result); - t.is(result.length, 0); -}); - -test('return 1 valid target', (t) => { - const result = getAutoinstallTargets({ - adaptors: ['a=/some/path', 'b@1.2.3'], - }); - t.truthy(result); - t.is(result.length, 1); - t.deepEqual(result, ['b@1.2.3']); -}); - -test('return language common', (t) => { - const result = getAutoinstallTargets({ - adaptors: ['@openfn/language-common'], - }); - t.truthy(result); - t.is(result.length, 1); - t.deepEqual(result, ['@openfn/language-common']); -}); - -test('return language common with specifier', (t) => { - const result = getAutoinstallTargets({ - adaptors: ['@openfn/language-common@1.0.0'], - }); - t.truthy(result); - t.is(result.length, 1); - t.deepEqual(result, ['@openfn/language-common@1.0.0']); -}); - -test('reject language common with path', (t) => { - const result = getAutoinstallTargets({ - adaptors: ['@openfn/language-common=/a/b/c'], - }); - t.truthy(result); - t.is(result.length, 0); -}); - -test('reject language common with specifier and path', (t) => { - const result = getAutoinstallTargets({ - adaptors: ['@openfn/language-common@1.0.0=/tmp/repo/common'], - }); - t.truthy(result); - t.is(result.length, 0); -}); - -test('empty workflow', (t) => { - const result = getAutoinstallTargets({ +const getPlan = (steps: Job[]) => + ({ workflow: { - start: 'a', - jobs: {}, + steps, }, - }); + options: {}, + } as ExecutionPlan); + +test('empty plan', (t) => { + const plan = getPlan([]); + const result = getAutoinstallTargets(plan); t.truthy(result); t.is(result.length, 0); }); -test('workflow with zero adaptors', (t) => { - const result = getAutoinstallTargets({ - workflow: { - start: 'a', - jobs: { - a: { - expression: 'fn()', - }, - }, +test('plan with zero adaptors', (t) => { + const plan = getPlan([ + { + expression: 'fn()', }, - }); + ]); + const result = getAutoinstallTargets(plan); t.truthy(result); t.is(result.length, 0); }); -test('workflow with multiple adaptors', (t) => { - const result = getAutoinstallTargets({ - workflow: { - start: 'a', - jobs: { - a: { - adaptor: '@openfn/language-common', - expression: 'fn()', - }, - b: { - adaptor: '@openfn/language-http', - expression: 'fn()', - }, - }, +test('plan with multiple adaptors', (t) => { + const plan = getPlan([ + { + adaptor: '@openfn/language-common', + expression: 'fn()', + }, + { + adaptor: '@openfn/language-http', + expression: 'fn()', }, - }); + ]); + const result = getAutoinstallTargets(plan); t.is(result.length, 2); t.deepEqual(result, ['@openfn/language-common', '@openfn/language-http']); }); -test('workflow with duplicate adaptors', (t) => { - const result = getAutoinstallTargets({ - workflow: { - start: 'a', - jobs: { - a: { - adaptor: '@openfn/language-common', - expression: 'fn()', - }, - b: { - adaptor: '@openfn/language-common', - expression: 'fn()', - }, - }, +test('plan with duplicate adaptors', (t) => { + const plan = getPlan([ + { + adaptor: '@openfn/language-common', + expression: 'fn()', + }, + { + adaptor: '@openfn/language-common', + expression: 'fn()', }, - }); + ]); + const result = getAutoinstallTargets(plan); t.is(result.length, 1); t.deepEqual(result, ['@openfn/language-common']); }); -test('workflow with one adaptor but different versions', (t) => { - const result = getAutoinstallTargets({ - adaptors: [], - workflow: { - start: 'a', - jobs: { - a: { - adaptor: '@openfn/language-common@1.0.0', - expression: 'fn()', - }, - b: { - adaptor: '@openfn/language-common@2.0.0', - expression: 'fn()', - }, - c: { - adaptor: '@openfn/language-common@3.0.0', - expression: 'fn()', - }, - }, +test('plan with one adaptor but different versions', (t) => { + const plan = getPlan([ + { + adaptor: '@openfn/language-common@1.0.0', + expression: 'fn()', }, - }); + { + adaptor: '@openfn/language-common@2.0.0', + expression: 'fn()', + }, + { + adaptor: '@openfn/language-common@3.0.0', + expression: 'fn()', + }, + ]); + const result = getAutoinstallTargets(plan); t.is(result.length, 3); t.deepEqual(result, [ '@openfn/language-common@1.0.0', @@ -164,3 +84,15 @@ test('workflow with one adaptor but different versions', (t) => { '@openfn/language-common@3.0.0', ]); }); + +test('do not return adaptors with a path', (t) => { + const plan = getPlan([ + { + expression: 'fn()', + adaptor: 'common=a/b/c', + }, + ]); + const result = getAutoinstallTargets(plan); + t.truthy(result); + t.is(result.length, 0); +}); diff --git a/packages/cli/test/execute/options.test.ts b/packages/cli/test/execute/options.test.ts index 968523921..750303854 100644 --- a/packages/cli/test/execute/options.test.ts +++ b/packages/cli/test/execute/options.test.ts @@ -16,13 +16,12 @@ test('correct default options', (t) => { t.is(options.compile, true); t.is(options.expandAdaptors, true); t.is(options.immutable, false); - t.is(options.jobPath, 'job.js'); + t.is(options.expressionPath, 'job.js'); t.falsy(options.logJson); // TODO this is undefined right now t.is(options.outputPath, 'output.json'); t.is(options.outputStdout, false); t.is(options.path, 'job.js'); t.is(options.skipAdaptorValidation, false); - t.is(options.strict, false); t.is(options.timeout, 300000); t.falsy(options.useAdaptorsMonorepo); }); @@ -79,7 +78,7 @@ test('enable immutability', (t) => { test('default job path', (t) => { const options = parse('execute /tmp/my-job/ --immutable'); t.is(options.path, '/tmp/my-job/'); - t.is(options.jobPath, '/tmp/my-job/job.js'); + t.is(options.expressionPath, '/tmp/my-job/job.js'); }); test('enable json logging', (t) => { @@ -87,16 +86,6 @@ test('enable json logging', (t) => { t.true(options.logJson); }); -test('disable strict output', (t) => { - const options = parse('execute job.js --no-strict'); - t.false(options.strict); -}); - -test('disable strict output (legacy)', (t) => { - const options = parse('execute job.js --no-strict-output'); - t.false(options.strict); -}); - test('set an output path (short)', (t) => { const options = parse('execute job.js -o /tmp/out.json'); t.is(options.outputPath, '/tmp/out.json'); diff --git a/packages/cli/test/execute/parse-adaptors.test.ts b/packages/cli/test/execute/parse-adaptors.test.ts index 46f2444dc..cdbdf6753 100644 --- a/packages/cli/test/execute/parse-adaptors.test.ts +++ b/packages/cli/test/execute/parse-adaptors.test.ts @@ -1,48 +1,57 @@ import test from 'ava'; +import { ExecutionPlan, Job } from '@openfn/lexicon'; import { parseAdaptors } from '../../src/execute/execute'; -test('parse a simple specifier', (t) => { - const adaptors = ['a']; - const result = parseAdaptors({ adaptors }); - t.assert(Object.keys(result).length === 1); - t.truthy(result.a); - t.falsy(Object.keys(result.a).length); +const createPlan = (adaptor: string): ExecutionPlan => ({ + workflow: { + steps: [ + { + adaptor, + expression: '.', + }, + ], + }, + options: {}, }); -test('parse multiple specifiers', (t) => { - const adaptors = ['a', 'b']; - const result = parseAdaptors({ adaptors }); - t.assert(Object.keys(result).length === 2); - t.truthy(result.a); - t.truthy(result.b); +test('parse a simple specifier with no path or version', (t) => { + const adaptor = 'a'; + const plan = createPlan(adaptor); + const result = parseAdaptors(plan); + + t.deepEqual(result, { a: {} }); }); test('parse a specifier with a path', (t) => { - const adaptors = ['a=x']; - const result = parseAdaptors({ adaptors }); - t.assert(Object.keys(result).length === 1); - t.deepEqual(result.a, { path: 'x' }); + const adaptor = 'a=x'; + const plan = createPlan(adaptor); + const result = parseAdaptors(plan); + + t.deepEqual(result, { a: { path: 'x' } }); }); test('parse a specifier with a version', (t) => { - const adaptors = ['a@1']; - const result = parseAdaptors({ adaptors }); - t.assert(Object.keys(result).length === 1); - t.deepEqual(result.a, { version: '1' }); + const adaptor = 'a@1'; + const plan = createPlan(adaptor); + const result = parseAdaptors(plan); + + t.deepEqual(result, { a: { version: '1' } }); }); test('parse a specifier with a path and version', (t) => { - const adaptors = ['a@1=x']; - const result = parseAdaptors({ adaptors }); - t.assert(Object.keys(result).length === 1); - t.deepEqual(result.a, { path: 'x', version: '1' }); + const adaptor = 'a@1=x'; + const plan = createPlan(adaptor); + const result = parseAdaptors(plan); + + t.deepEqual(result, { a: { path: 'x', version: '1' } }); }); test('parse @openfn/language-common@1.0.0=~/repo/modules/common', (t) => { - const adaptors = ['@openfn/language-common@1.0.0=~/repo/modules/common']; - const result = parseAdaptors({ adaptors }); - t.assert(Object.keys(result).length === 1); + const adaptor = '@openfn/language-common@1.0.0=~/repo/modules/common'; + const plan = createPlan(adaptor); + const result = parseAdaptors(plan); + t.deepEqual(result, { '@openfn/language-common': { path: '~/repo/modules/common', @@ -51,25 +60,29 @@ test('parse @openfn/language-common@1.0.0=~/repo/modules/common', (t) => { }); }); -test('parse workflow', (t) => { - const workflow = { - start: 'a', - jobs: { - a: { - adaptor: '@openfn/language-common', - expression: 'fn()', - }, - b: { - adaptor: '@openfn/language-http@1.0.0', - expression: 'fn()', - }, - c: { - adaptor: '@openfn/language-salesforce=a/b/c', - expression: 'fn()', - }, +test('parse plan with several steps', (t) => { + const plan = { + options: { + start: 'a', + }, + workflow: { + steps: [ + { + adaptor: '@openfn/language-common', + expression: 'fn()', + }, + { + adaptor: '@openfn/language-http@1.0.0', + expression: 'fn()', + }, + { + adaptor: '@openfn/language-salesforce=a/b/c', + expression: 'fn()', + }, + ], }, }; - const result = parseAdaptors({ workflow }); + const result = parseAdaptors(plan); t.assert(Object.keys(result).length === 3); t.deepEqual(result, { '@openfn/language-common': {}, diff --git a/packages/cli/test/integration.test.ts b/packages/cli/test/integration.test.ts index b4499cd1c..c20b68cf5 100644 --- a/packages/cli/test/integration.test.ts +++ b/packages/cli/test/integration.test.ts @@ -4,7 +4,7 @@ import { exec } from 'node:child_process'; test('openfn help', async (t) => { await new Promise((resolve) => { exec('pnpm openfn help', (error, stdout, stderr) => { - t.regex(stdout, /Run an openfn job/); + t.regex(stdout, /Run an openfn expression/); t.falsy(error); t.falsy(stderr); resolve(); diff --git a/packages/cli/test/options/ensure/inputPath.test.ts b/packages/cli/test/options/ensure/inputPath.test.ts index 8c7690c5b..e62a827b7 100644 --- a/packages/cli/test/options/ensure/inputPath.test.ts +++ b/packages/cli/test/options/ensure/inputPath.test.ts @@ -1,37 +1,37 @@ import test from 'ava'; import { inputPath, Opts } from '../../../src/options'; -test('sets jobPath using path', (t) => { +test('sets expressionPath using path', (t) => { const opts = { path: 'jam.js', } as Opts; inputPath.ensure!(opts); - t.is(opts.jobPath, 'jam.js'); + t.is(opts.expressionPath, 'jam.js'); }); -test('sets jobPath to path/job.js if path is a folder', (t) => { +test('sets expressionPath to path/job.js if path is a folder', (t) => { const opts = { path: '/jam', } as Opts; inputPath.ensure!(opts); - t.is(opts.jobPath, '/jam/job.js'); + t.is(opts.expressionPath, '/jam/job.js'); }); -test('sets jobPath to path/job.js if path is a folder (trailing slash)', (t) => { +test('sets expressionPath to path/job.js if path is a folder (trailing slash)', (t) => { const opts = { path: '/jam/', } as Opts; inputPath.ensure!(opts); - t.is(opts.jobPath, '/jam/job.js'); + t.is(opts.expressionPath, '/jam/job.js'); }); -test('set workflowPath if path ends in json', (t) => { +test.skip('set workflowPath if path ends in json', (t) => { const opts = { path: 'workflow.json', } as Opts; diff --git a/packages/cli/test/options/ensure/strict.test.ts b/packages/cli/test/options/ensure/strict.test.ts deleted file mode 100644 index 7bdb5783e..000000000 --- a/packages/cli/test/options/ensure/strict.test.ts +++ /dev/null @@ -1,51 +0,0 @@ -import test from 'ava'; -import { strict, strictOutput, Opts } from '../../../src/options'; - -// Tests on legacy behaviour -test('strictOutput: true should set strict', (t) => { - const opts = { - strictOutput: true, - } as Opts; - strictOutput.ensure!(opts); - t.true(opts.strict); - // @ts-ignore - t.falsy(opts.strictOutput); -}); - -test('strictOutput: false should set strict', (t) => { - const opts = { - strictOutput: false, - } as Opts; - strictOutput.ensure!(opts); - t.false(opts.strict); - // @ts-ignore - t.falsy(opts.strictOutput); -}); - -test('strict should default to false', (t) => { - const opts = {} as Opts; - strict.ensure!(opts); - t.false(opts.strict); -}); - -test('strict can be set to true', (t) => { - const opts = { - strict: true, - } as Opts; - strict.ensure!(opts); - t.true(opts.strict); -}); - -test('strict overrides strictOutput', (t) => { - const opts = { - strictOutput: false, - strict: true, - } as Opts; - - // Note that the order of these two is important - strict.ensure!(opts); - strictOutput.ensure!(opts); - - t.true(opts.strict); - t.falsy(opts.strictOutput); -}); diff --git a/packages/cli/test/options/execute.test.ts b/packages/cli/test/options/execute.test.ts index 720e15906..e9950e22f 100644 --- a/packages/cli/test/options/execute.test.ts +++ b/packages/cli/test/options/execute.test.ts @@ -12,9 +12,9 @@ const cmd = yargs().command(execute); const parse = (command: string) => cmd.parse(command) as yargs.Arguments; -test("execute: jobPath'.'", (t) => { +test("execute: expressionPath'.'", (t) => { const options = parse('execute job.js'); - t.assert(options.jobPath === 'job.js'); + t.assert(options.expressionPath === 'job.js'); }); test('execute: default outputPath to ./output.json', (t) => { diff --git a/packages/cli/test/util.ts b/packages/cli/test/util.ts new file mode 100644 index 000000000..550720736 --- /dev/null +++ b/packages/cli/test/util.ts @@ -0,0 +1,18 @@ +/* + * test utils + */ +import mock from 'mock-fs'; +import path from 'node:path'; + +export const mockFs = (files: Record) => { + const pnpm = path.resolve('../../node_modules/.pnpm'); + mock({ + [pnpm]: mock.load(pnpm, {}), + '/repo/': mock.load(path.resolve('test/__repo__/'), {}), + ...files, + }); +}; + +export const resetMockFs = () => { + mock.restore(); +}; diff --git a/packages/cli/test/util/expand-adaptors.test.ts b/packages/cli/test/util/expand-adaptors.test.ts index fa0c19da7..23f1a006d 100644 --- a/packages/cli/test/util/expand-adaptors.test.ts +++ b/packages/cli/test/util/expand-adaptors.test.ts @@ -2,86 +2,91 @@ import test from 'ava'; import expandAdaptors from '../../src/util/expand-adaptors'; test('expands common', (t) => { - const { adaptors } = expandAdaptors({ adaptors: ['common'] }); - t.is(adaptors![0], '@openfn/language-common'); + const adaptors = expandAdaptors(['common']) as string[]; + t.is(adaptors[0], '@openfn/language-common'); }); test('expands common with version', (t) => { - const { adaptors } = expandAdaptors({ adaptors: ['common@1.0.0'] }); - t.is(adaptors![0], '@openfn/language-common@1.0.0'); + const adaptors = expandAdaptors(['common@1.0.0']) as string[]; + t.is(adaptors[0], '@openfn/language-common@1.0.0'); }); test('expands common with path', (t) => { - const { adaptors } = expandAdaptors({ adaptors: ['common=a/b/c'] }); - t.is(adaptors![0], '@openfn/language-common=a/b/c'); + const adaptors = expandAdaptors(['common=a/b/c']) as string[]; + t.is(adaptors[0], '@openfn/language-common=a/b/c'); }); test('expands http and dhis2', (t) => { - const { adaptors } = expandAdaptors({ adaptors: ['common', 'dhis2'] }); - const [a, b] = adaptors!; + const adaptors = expandAdaptors(['common', 'dhis2']) as string[]; + const [a, b] = adaptors; t.is(a, '@openfn/language-common'); t.is(b, '@openfn/language-dhis2'); }); test('expands nonsense', (t) => { - const { adaptors } = expandAdaptors({ adaptors: ['gn@25~A8fa1'] }); - t.is(adaptors![0], '@openfn/language-gn@25~A8fa1'); + const adaptors = expandAdaptors(['gn@25~A8fa1']) as string[]; + t.is(adaptors[0], '@openfn/language-gn@25~A8fa1'); }); test('does not expand a full adaptor name', (t) => { - const { adaptors } = expandAdaptors({ - adaptors: ['@openfn/language-common'], - }); - t.is(adaptors![0], '@openfn/language-common'); + const adaptors = expandAdaptors(['@openfn/language-common']) as string[]; + t.is(adaptors[0], '@openfn/language-common'); }); test('does not expand a full adaptor name with a path', (t) => { - const { adaptors } = expandAdaptors({ - adaptors: ['@openfn/language-common=a/b/c'], - }); - t.is(adaptors![0], '@openfn/language-common=a/b/c'); + const adaptors = expandAdaptors([ + '@openfn/language-common=a/b/c', + ]) as string[]; + t.is(adaptors[0], '@openfn/language-common=a/b/c'); }); test('does not expand a simple path', (t) => { - const { adaptors } = expandAdaptors({ adaptors: ['a/b'] }); - t.is(adaptors![0], 'a/b'); + const adaptors = expandAdaptors(['a/b']) as string[]; + t.is(adaptors[0], 'a/b'); }); test('does not expand an absolute path', (t) => { - const { adaptors } = expandAdaptors({ adaptors: ['/a/b/c'] }); - t.is(adaptors![0], '/a/b/c'); + const adaptors = expandAdaptors(['/a/b/c']) as string[]; + t.is(adaptors[0], '/a/b/c'); }); test('does not expand a js file', (t) => { - const { adaptors } = expandAdaptors({ adaptors: ['my-adaptor.js'] }); - t.is(adaptors![0], 'my-adaptor.js'); + const adaptors = expandAdaptors(['my-adaptor.js']) as string[]; + t.is(adaptors[0], 'my-adaptor.js'); }); -test('expands adaptors in a workflow', (t) => { - const workflow = { - start: 'a', - jobs: { - a: { - adaptor: 'common', - expression: 'fn()', - }, - b: { - adaptor: 'http@1.0.0', - expression: 'fn()', - }, - c: { - adaptor: 'salesforce=a/b/c', - expression: 'fn()', - }, - d: { - adaptor: 'a/b/c/my-adaptor.js', - expression: 'fn()', - }, +test('expands adaptors in an execution plan', (t) => { + const plan = { + workflow: { + steps: [ + { + id: 'a', + adaptor: 'common', + expression: 'fn()', + }, + { + id: 'b', + adaptor: 'http@1.0.0', + expression: 'fn()', + }, + { + id: 'c', + adaptor: 'salesforce=a/b/c', + expression: 'fn()', + }, + { + id: 'd', + adaptor: 'a/b/c/my-adaptor.js', + expression: 'fn()', + }, + ], }, + options: {}, }; - const newOpts = expandAdaptors({ workflow }); - t.is(newOpts.workflow!.jobs.a.adaptor, '@openfn/language-common'); - t.is(newOpts.workflow!.jobs.b.adaptor, '@openfn/language-http@1.0.0'); - t.is(newOpts.workflow!.jobs.c.adaptor, '@openfn/language-salesforce=a/b/c'); - t.is(newOpts.workflow!.jobs.d.adaptor, 'a/b/c/my-adaptor.js'); + expandAdaptors(plan); + const [a, b, c, d] = plan.workflow.steps; + t.is(a.adaptor, '@openfn/language-common'); + t.is(b.adaptor, '@openfn/language-http@1.0.0'); + t.is(c.adaptor, '@openfn/language-salesforce=a/b/c'); + t.is(d.adaptor, 'a/b/c/my-adaptor.js'); }); diff --git a/packages/cli/test/util/load-input.test.ts b/packages/cli/test/util/load-input.test.ts deleted file mode 100644 index 4ee819802..000000000 --- a/packages/cli/test/util/load-input.test.ts +++ /dev/null @@ -1,322 +0,0 @@ -import test from 'ava'; -import mock from 'mock-fs'; -import { createMockLogger } from '@openfn/logger'; -import loadInput from '../../src/util/load-input'; -import { ExecutionPlan } from '@openfn/runtime'; - -const logger = createMockLogger(undefined, { level: 'debug' }); - -test.beforeEach(() => { - mock({ - 'test/job.js': 'x', - 'test/wf.json': JSON.stringify({ - start: 'a', - jobs: [{ id: 'a', expression: 'x()' }], - }), - 'test/wf-err.json': '!!!', - }); -}); - -test.afterEach(() => { - logger._reset(); - mock.restore(); -}); - -test.serial('do nothing if no path provided', async (t) => { - const opts = {}; - - const result = await loadInput(opts, logger); - t.falsy(result); - t.assert(Object.keys(opts).length === 0); -}); - -test.serial('return the workflow if already set ', async (t) => { - const opts = { - workflow: { start: 'x', jobs: [] }, - job: 'j', - jobPath: 'test/job.js', - }; - - const result = (await loadInput(opts, logger)) as ExecutionPlan; - t.truthy(result); - t.is(result.start, 'x'); -}); - -test.serial( - 'return the job if already set (and workflow is not)', - async (t) => { - const opts = { - job: 'j', - jobPath: 'test/job.js', - }; - - const result = await loadInput(opts, logger); - t.is(result, 'j'); - } -); - -test.serial('load a job from a path and return the result', async (t) => { - const opts = { - jobPath: 'test/job.js', - }; - - const result = await loadInput(opts, logger); - t.is(result, 'x'); -}); - -test.serial('load a job from a path and mutate opts', async (t) => { - const opts = { - jobPath: 'test/job.js', - job: '', - }; - - await loadInput(opts, logger); - t.is(opts.job, 'x'); -}); - -test.serial('abort if the job cannot be found', async (t) => { - const opts = { - jobPath: 'test/blah.js', - }; - - const logger = createMockLogger(); - await t.throwsAsync(() => loadInput(opts, logger)); - - t.assert(logger._find('error', /job not found/i)); - t.assert( - logger._find('always', /Failed to load the job from test\/blah.js/i) - ); - t.assert(logger._find('error', /critical error: aborting command/i)); -}); - -test.serial( - 'load a workflow from a path and return the result as JSON', - async (t) => { - const opts = { - workflowPath: 'test/wf.json', - }; - - const result = await loadInput(opts, logger); - t.is(result.start, 'a'); - } -); - -test.serial('abort if the workflow cannot be found', async (t) => { - const opts = { - workflowPath: 'test/blah.json', - }; - - const logger = createMockLogger(); - await t.throwsAsync(() => loadInput(opts, logger)); - - t.assert(logger._find('error', /workflow not found/i)); - t.assert( - logger._find('always', /Failed to load a workflow from test\/blah.json/i) - ); - t.assert(logger._find('error', /critical error: aborting command/i)); -}); - -test.serial('abort if the workflow contains invalid json', async (t) => { - const opts = { - workflowPath: 'test/wf-err.json', - }; - - const logger = createMockLogger(); - await t.throwsAsync(() => loadInput(opts, logger)); - - t.assert(logger._find('error', /invalid json in workflow/i)); - t.assert( - logger._find('always', /check the syntax of the json at test\/wf-err.json/i) - ); - t.assert(logger._find('error', /critical error: aborting command/i)); -}); - -test.serial('load a workflow from a path and mutate opts', async (t) => { - const opts = { - workflowPath: 'test/wf.json', - workflow: undefined, - }; - - await loadInput(opts, logger); - t.is((opts.workflow as any).start, 'a'); -}); - -test.serial('prefer workflow to job if both are somehow set', async (t) => { - const opts = { - jobPath: 'test/job.js', - workflowPath: 'test/wf.json', - }; - - const result = await loadInput(opts, logger); - t.is(result.start, 'a'); -}); - -test.serial('resolve workflow expression paths (filename)', async (t) => { - mock({ - '/test/job.js': 'x', - '/test/wf.json': JSON.stringify({ - jobs: [{ expression: 'job.js' }], - }), - }); - - const opts = { - workflowPath: '/test/wf.json', - }; - - const result = (await loadInput(opts, logger)) as ExecutionPlan; - t.is(result.jobs[0].expression, 'x'); -}); - -test.serial( - 'resolve workflow expression paths (relative same dir)', - async (t) => { - mock({ - '/test/job.js': 'x', - '/test/wf.json': JSON.stringify({ - jobs: [{ expression: './job.js' }], - }), - }); - - const opts = { - workflowPath: '/test/wf.json', - }; - - const result = (await loadInput(opts, logger)) as ExecutionPlan; - t.is(result.jobs[0].expression, 'x'); - } -); - -test.serial( - 'resolve workflow expression paths (relative different dir)', - async (t) => { - mock({ - '/jobs/job.js': 'x', - '/test/wf.json': JSON.stringify({ - jobs: [{ expression: '../jobs/job.js' }], - }), - }); - - const opts = { - workflowPath: '/test/wf.json', - }; - - const result = (await loadInput(opts, logger)) as ExecutionPlan; - t.is(result.jobs[0].expression, 'x'); - } -); - -test.serial('resolve workflow expression paths (absolute)', async (t) => { - mock({ - '/job.js': 'x', - '/test/wf.json': JSON.stringify({ - start: 'a', - jobs: [{ expression: '/job.js' }], - }), - }); - - const opts = { - workflowPath: '/test/wf.json', - }; - - const result = (await loadInput(opts, logger)) as ExecutionPlan; - t.is(result.jobs[0].expression, 'x'); -}); - -test.serial('resolve workflow expression paths (home)', async (t) => { - mock({ - '~/job.js': 'x', - '/test/wf.json': JSON.stringify({ - jobs: [{ expression: '~/job.js' }], - }), - }); - - const opts = { - workflowPath: '/test/wf.json', - }; - - const result = (await loadInput(opts, logger)) as ExecutionPlan; - t.is(result.jobs[0].expression, 'x'); -}); - -test.serial('Load a workflow path with trailing spaces', async (t) => { - const opts = { - workflow: { jobs: [{ expression: 'test/job.js ' }] }, - }; - - const result = (await loadInput(opts, logger)) as ExecutionPlan; - t.is(result.jobs[0].expression, 'x'); -}); - -// Less thorough testing on config because it goes through the same code -test.serial('resolve workflow config paths (home)', async (t) => { - const cfg = { id: 'x' }; - const cfgString = JSON.stringify(cfg); - mock({ - '~/config.json': cfgString, - '/config.json': cfgString, - '/test/config.json': cfgString, - '/test/wf.json': JSON.stringify({ - jobs: [ - { configuration: '/config.json' }, - { configuration: '~/config.json' }, - { configuration: 'config.json ' }, // trailing spaces! - { configuration: './config.json ' }, - ], - }), - }); - - const opts = { - workflowPath: '/test/wf.json', - }; - - const result = (await loadInput(opts, logger)) as ExecutionPlan; - t.is(result.jobs.length, 4); - for (const job of result.jobs) { - t.deepEqual(job.configuration, cfg); - } -}); - -test.serial( - 'abort if a workflow expression path cannot be found', - async (t) => { - const opts = { - workflow: { start: 'x', jobs: [{ id: 'a', expression: 'err.js' }] }, - }; - - const logger = createMockLogger(); - await t.throwsAsync(() => loadInput(opts, logger)); - - t.assert(logger._find('error', /file not found for job a: err.js/i)); - t.assert( - logger._find( - 'always', - /This workflow references a file which cannot be found/i - ) - ); - t.assert(logger._find('error', /critical error: aborting command/i)); - } -); - -test.serial( - 'abort if a workflow expression path cannot be found for an anonymous job', - async (t) => { - const opts = { - workflow: { - start: 'x', - jobs: [{ expression: 'jam()' }, { expression: 'err.js' }], - }, - }; - - const logger = createMockLogger(); - await t.throwsAsync(() => loadInput(opts, logger)); - - t.assert(logger._find('error', /file not found for job 2: err.js/i)); - t.assert( - logger._find( - 'always', - /This workflow references a file which cannot be found/i - ) - ); - t.assert(logger._find('error', /critical error: aborting command/i)); - } -); diff --git a/packages/cli/test/util/load-plan.test.ts b/packages/cli/test/util/load-plan.test.ts new file mode 100644 index 000000000..caadaad71 --- /dev/null +++ b/packages/cli/test/util/load-plan.test.ts @@ -0,0 +1,273 @@ +import test from 'ava'; +import mock from 'mock-fs'; +import { createMockLogger } from '@openfn/logger'; +import type { Job } from '@openfn/lexicon'; + +import loadPlan from '../../src/util/load-plan'; +import { Opts } from '../../src/options'; + +const logger = createMockLogger(undefined, { level: 'debug' }); + +const sampleXPlan = { + options: { start: 'a' }, + workflow: { + name: 'wf', + steps: [{ id: 'a', expression: 'x()' }], + }, +}; + +const createPlan = (steps: Job[] = []) => ({ + workflow: { + steps, + }, + options: { + start: steps[0]?.id ?? 'a', + }, +}); + +test.beforeEach(() => { + mock({ + 'test/job.js': 'x', + 'test/wf-old.json': JSON.stringify({ + start: 'a', + jobs: [{ id: 'a', expression: 'x()' }], + }), + 'test/wf.json': JSON.stringify(sampleXPlan), + 'test/wf-err.json': '!!!', + }); +}); + +test.afterEach(() => { + logger._reset(); + mock.restore(); +}); + +test.serial('expression: load a plan from an expression.js', async (t) => { + const opts = { + expressionPath: 'test/job.js', + plan: {}, + }; + + const plan = await loadPlan(opts as Opts, logger); + + t.truthy(plan); + t.deepEqual(plan.options, {}); + t.is(plan.workflow.steps.length, 1); + t.is(plan.workflow.name, 'job'); + t.deepEqual(plan.workflow.steps[0], { + expression: 'x', + }); +}); + +test.serial('expression: set an adaptor on the plan', async (t) => { + const opts = { + expressionPath: 'test/job.js', + // Note that adaptor expansion should have happened before loadPlan is called + adaptors: ['@openfn/language-common'], + } as Partial; + + const plan = await loadPlan(opts as Opts, logger); + + const step = plan.workflow.steps[0] as Job; + + t.is(step.adaptor, '@openfn/language-common'); +}); + +test.serial('expression: do not expand adaptors', async (t) => { + const opts = { + expressionPath: 'test/job.js', + expandAdaptors: false, + // Note that adaptor expansion should have happened before loadPlan is called + adaptors: ['common'], + } as Partial; + + const plan = await loadPlan(opts as Opts, logger); + + const step = plan.workflow.steps[0] as Job; + + t.is(step.adaptor, 'common'); +}); + +test.serial('expression: set a timeout on the plan', async (t) => { + const opts = { + expressionPath: 'test/job.js', + expandAdaptors: true, + timeout: 111, + } as Partial; + + const plan = await loadPlan(opts as Opts, logger); + + t.is(plan.options.timeout, 111); +}); + +test.serial('expression: set a start on the plan', async (t) => { + const opts = { + expressionPath: 'test/job.js', + start: 'x', + } as Partial; + + const plan = await loadPlan(opts as Opts, logger); + + t.is(plan.options.start, 'x'); +}); + +test.serial('xplan: load a plan from workflow path', async (t) => { + const opts = { + workflowPath: 'test/wf.json', + expandAdaptors: true, + plan: {}, + }; + + const plan = await loadPlan(opts as Opts, logger); + + t.truthy(plan); + t.deepEqual(plan, sampleXPlan); +}); + +test.serial('xplan: expand adaptors', async (t) => { + const opts = { + workflowPath: 'test/wf.json', + expandAdaptors: true, + plan: {}, + }; + + const plan = createPlan([ + { + id: 'a', + expression: '.', + adaptor: 'common@1.0.0', + }, + ]); + + mock({ + 'test/wf.json': JSON.stringify(plan), + }); + + const result = await loadPlan(opts as Opts, logger); + t.truthy(result); + + const step = result.workflow.steps[0] as Job; + t.is(step.adaptor, '@openfn/language-common@1.0.0'); +}); + +test.serial('xplan: do not expand adaptors', async (t) => { + const opts = { + workflowPath: 'test/wf.json', + expandAdaptors: false, + plan: {}, + }; + + const plan = createPlan([ + { + id: 'a', + expression: '.', + adaptor: 'common@1.0.0', + }, + ]); + + mock({ + 'test/wf.json': JSON.stringify(plan), + }); + + const result = await loadPlan(opts as Opts, logger); + t.truthy(result); + + const step = result.workflow.steps[0] as Job; + t.is(step.adaptor, 'common@1.0.0'); +}); + +test.serial('xplan: set timeout from CLI', async (t) => { + const opts = { + workflowPath: 'test/wf.json', + timeout: 666, + plan: {}, + }; + + const plan = createPlan([ + { + id: 'a', + expression: '.', + }, + ]); + // The incoming option should overwrite this one + // @ts-ignore + plan.options.timeout = 1; + + mock({ + 'test/wf.json': JSON.stringify(plan), + }); + + const { options } = await loadPlan(opts as Opts, logger); + t.is(options.timeout, 666); +}); + +test.serial('xplan: set start from CLI', async (t) => { + const opts = { + workflowPath: 'test/wf.json', + start: 'b', + plan: {}, + }; + + const plan = createPlan([ + { + id: 'a', + expression: '.', + }, + ]); + // The incoming option should overwrite this one + // @ts-ignore + plan.options.start = 'a'; + + mock({ + 'test/wf.json': JSON.stringify(plan), + }); + + const { options } = await loadPlan(opts as Opts, logger); + t.is(options.start, 'b'); +}); + +test.serial('xplan: map to monorepo', async (t) => { + const opts = { + workflowPath: 'test/wf.json', + expandAdaptors: true, + plan: {}, + monorepoPath: '/repo/', + } as Partial; + + const plan = createPlan([ + { + id: 'a', + expression: '.', + adaptor: 'common', + }, + ]); + + mock({ + 'test/wf.json': JSON.stringify(plan), + }); + + const result = await loadPlan(opts as Opts, logger); + t.truthy(result); + + const step = result.workflow.steps[0] as Job; + t.is(step.adaptor, '@openfn/language-common=/repo/packages/common'); +}); + +test.serial('old-workflow: load a plan from workflow path', async (t) => { + const opts = { + workflowPath: 'test/wf-old.json', + plan: {}, + }; + + const plan = await loadPlan(opts as Opts, logger); + + t.deepEqual(plan.options, { + start: 'a', + }); + t.is(plan.workflow.steps.length, 1); + t.is(plan.workflow.name, 'wf-old'); + t.deepEqual(plan.workflow.steps[0], { + id: 'a', + expression: 'x()', + }); +}); diff --git a/packages/cli/test/util/map-adaptors-to-monorepo.test.ts b/packages/cli/test/util/map-adaptors-to-monorepo.test.ts index a5970ad01..3c6dd9a7d 100644 --- a/packages/cli/test/util/map-adaptors-to-monorepo.test.ts +++ b/packages/cli/test/util/map-adaptors-to-monorepo.test.ts @@ -7,6 +7,7 @@ import mapAdaptorsToMonorepo, { validateMonoRepo, updatePath, } from '../../src/util/map-adaptors-to-monorepo'; +import { ExecutionPlan } from '@openfn/lexicon'; const REPO_PATH = 'a/b/c'; const ABS_REPO_PATH = path.resolve(REPO_PATH); @@ -72,13 +73,8 @@ test.serial('mapAdaptorsToMonorepo: map adaptors', async (t) => { [`${REPO_PATH}/package.json`]: '{ "name": "adaptors" }', }); - const options = { - monorepoPath: REPO_PATH, - adaptors: ['common'], - }; - - const newOptions = await mapAdaptorsToMonorepo(options, logger); - t.deepEqual(newOptions.adaptors, [`common=${ABS_REPO_PATH}/packages/common`]); + const result = await mapAdaptorsToMonorepo(REPO_PATH, ['common'], logger); + t.deepEqual(result, [`common=${ABS_REPO_PATH}/packages/common`]); }); test.serial('mapAdaptorsToMonorepo: map workflow', async (t) => { @@ -86,23 +82,23 @@ test.serial('mapAdaptorsToMonorepo: map workflow', async (t) => { [`${REPO_PATH}/package.json`]: '{ "name": "adaptors" }', }); - const options = { - monorepoPath: REPO_PATH, + const plan: ExecutionPlan = { workflow: { - id: 'x', - jobs: [ + steps: [ { + expression: '.', adaptor: 'common', }, ], }, + options: {}, }; - const newOptions = await mapAdaptorsToMonorepo(options, logger); - t.deepEqual(newOptions.workflow, { - id: 'x', - jobs: [ + await mapAdaptorsToMonorepo(REPO_PATH, plan, logger); + t.deepEqual(plan.workflow, { + steps: [ { + expression: '.', adaptor: `common=${ABS_REPO_PATH}/packages/common`, }, ], diff --git a/packages/compiler/src/compile.ts b/packages/compiler/src/compile.ts index 9e37b192d..9e66d17d3 100644 --- a/packages/compiler/src/compile.ts +++ b/packages/compiler/src/compile.ts @@ -21,10 +21,10 @@ export default function compile(pathOrSource: string, options: Options = {}) { let source = pathOrSource; if (isPath(pathOrSource)) { - logger.debug('Starting compilation from file at', pathOrSource); + //logger.debug('Starting compilation from file at', pathOrSource); source = loadFile(pathOrSource); } else { - logger.debug('Starting compilation from string'); + //logger.debug('Starting compilation from string'); } const ast = parse(source); diff --git a/packages/deploy/src/index.ts b/packages/deploy/src/index.ts index 1695f2bd8..ed77619f1 100644 --- a/packages/deploy/src/index.ts +++ b/packages/deploy/src/index.ts @@ -164,7 +164,7 @@ export async function deploy(config: DeployConfig, logger: Logger) { await writeState(config, deployedState); - logger.always('Deployed.'); + logger.success('Deployed'); return true; } diff --git a/packages/engine-multi/package.json b/packages/engine-multi/package.json index abf9e212a..ed7cc0a7c 100644 --- a/packages/engine-multi/package.json +++ b/packages/engine-multi/package.json @@ -16,6 +16,7 @@ "dependencies": { "@openfn/compiler": "workspace:*", "@openfn/language-common": "2.0.0-rc3", + "@openfn/lexicon": "workspace:^", "@openfn/logger": "workspace:*", "@openfn/runtime": "workspace:*", "fast-safe-stringify": "^2.1.1" diff --git a/packages/engine-multi/src/api/autoinstall.ts b/packages/engine-multi/src/api/autoinstall.ts index 769c25b36..a20113630 100644 --- a/packages/engine-multi/src/api/autoinstall.ts +++ b/packages/engine-multi/src/api/autoinstall.ts @@ -1,17 +1,16 @@ import { - ExecutionPlan, ensureRepo, getAliasedName, getNameAndVersion, loadRepoPkg, } from '@openfn/runtime'; import { install as runtimeInstall } from '@openfn/runtime'; +import type { ExecutionPlan, Job } from '@openfn/lexicon'; +import type { Logger } from '@openfn/logger'; import { AUTOINSTALL_COMPLETE, AUTOINSTALL_ERROR } from '../events'; import { AutoinstallError } from '../errors'; - -import type { Logger } from '@openfn/logger'; -import type { ExecutionContext } from '../types'; +import ExecutionContext from '../classes/ExecutionContext'; // none of these options should be on the plan actually export type AutoinstallOptions = { @@ -140,6 +139,7 @@ const autoinstall = async (context: ExecutionContext): Promise => { // Write the adaptor version to the context // This is a reasonably accurate, but not totally bulletproof, report + // @ts-ignore context.versions[name] = v; paths[name] = { @@ -206,9 +206,9 @@ const isInstalled = async ( export const identifyAdaptors = (plan: ExecutionPlan): Set => { const adaptors = new Set(); - plan.jobs - .filter((job) => job.adaptor) - .forEach((job) => adaptors.add(job.adaptor!)); + plan.workflow.steps + .filter((job) => (job as Job).adaptor) + .forEach((job) => adaptors.add((job as Job).adaptor!)); return adaptors; }; diff --git a/packages/engine-multi/src/api/compile.ts b/packages/engine-multi/src/api/compile.ts index 92830d893..c47660adf 100644 --- a/packages/engine-multi/src/api/compile.ts +++ b/packages/engine-multi/src/api/compile.ts @@ -1,12 +1,10 @@ -// This function will compile a workflow -// Later we'll add an in-memory cache to prevent the same job -// being compiled twice - -import type { Logger } from '@openfn/logger'; import compile, { preloadAdaptorExports, Options } from '@openfn/compiler'; import { getModulePath } from '@openfn/runtime'; -import { ExecutionContext } from '../types'; +import type { Job } from '@openfn/lexicon'; +import type { Logger } from '@openfn/logger'; + import { CompileError } from '../errors'; +import type ExecutionContext from '../classes/ExecutionContext'; // TODO this compiler is going to change anyway to run just in time // the runtime will have an onCompile hook @@ -15,8 +13,9 @@ export default async (context: ExecutionContext) => { const { logger, state, options } = context; const { repoDir, noCompile } = options; - if (!noCompile && state.plan?.jobs?.length) { - for (const job of state.plan.jobs) { + if (!noCompile && state.plan?.workflow.steps?.length) { + for (const step of state.plan.workflow.steps) { + const job = step as Job; if (job.expression) { try { job.expression = await compileJob( diff --git a/packages/engine-multi/src/api/execute.ts b/packages/engine-multi/src/api/execute.ts index c35085581..933090142 100644 --- a/packages/engine-multi/src/api/execute.ts +++ b/packages/engine-multi/src/api/execute.ts @@ -1,7 +1,7 @@ import { timestamp } from '@openfn/logger'; import * as workerEvents from '../worker/events'; -import type { ExecutionContext } from '../types'; +import type ExecutionContext from '../classes/ExecutionContext'; import autoinstall from './autoinstall'; import compile from './compile'; import { @@ -115,11 +115,9 @@ const execute = async (context: ExecutionContext) => { error(context, { workflowId: state.plan.id, error: evt.error }); }, }; - - // TODO in the new world order, what sorts of errors are being caught here? return callWorker( 'run', - [state.plan, runOptions], + [state.plan, state.input || {}, runOptions || {}], events, workerOptions ).catch((e: any) => { diff --git a/packages/engine-multi/src/api/lifecycle.ts b/packages/engine-multi/src/api/lifecycle.ts index 68dcae76a..2c8f84d01 100644 --- a/packages/engine-multi/src/api/lifecycle.ts +++ b/packages/engine-multi/src/api/lifecycle.ts @@ -1,7 +1,7 @@ // here's here things get a bit complex event wise import * as externalEvents from '../events'; import * as internalEvents from '../worker/events'; -import { ExecutionContext } from '../types'; +import type ExecutionContext from '../classes/ExecutionContext'; export const workflowStart = ( context: ExecutionContext, diff --git a/packages/engine-multi/src/api/preload-credentials.ts b/packages/engine-multi/src/api/preload-credentials.ts index fb9545ff7..08726a313 100644 --- a/packages/engine-multi/src/api/preload-credentials.ts +++ b/packages/engine-multi/src/api/preload-credentials.ts @@ -1,12 +1,13 @@ -import { CompiledExecutionPlan } from '@openfn/runtime'; +import { ExecutionPlan, Job } from '@openfn/lexicon'; export default async ( - plan: CompiledExecutionPlan, + plan: ExecutionPlan, loader: (id: string) => Promise ) => { const loaders: Promise[] = []; - Object.values(plan.jobs).forEach((job) => { + Object.values(plan.workflow.steps).forEach((step) => { + const job = step as Job; if (typeof job.configuration === 'string') { loaders.push( new Promise(async (resolve) => { diff --git a/packages/engine-multi/src/classes/ExecutionContext.ts b/packages/engine-multi/src/classes/ExecutionContext.ts index cf340407e..0e7c70480 100644 --- a/packages/engine-multi/src/classes/ExecutionContext.ts +++ b/packages/engine-multi/src/classes/ExecutionContext.ts @@ -1,13 +1,15 @@ import { EventEmitter } from 'node:events'; +import type { Logger } from '@openfn/logger'; +import loadVersions from '../util/load-versions'; import type { WorkflowState, CallWorker, ExecutionContextConstructor, ExecutionContextOptions, + Versions, } from '../types'; -import type { Logger } from '@openfn/logger'; -import loadVersions from '../util/load-versions'; +import type { ExternalEvents, EventMap } from '../events'; /** * The ExeuctionContext class wraps an event emitter with some useful context @@ -22,7 +24,7 @@ export default class ExecutionContext extends EventEmitter { logger: Logger; callWorker: CallWorker; options: ExecutionContextOptions; - versions = {}; + versions: Versions; constructor({ state, @@ -40,8 +42,11 @@ export default class ExecutionContext extends EventEmitter { // override emit to add the workflowId to all events // @ts-ignore - emit(event: string, payload: any) { - payload.workflowId = this.state.id; + emit( + event: T, + payload: Omit + ): boolean { + (payload as EventMap[T]).workflowId = this.state.id; return super.emit(event, payload); } } diff --git a/packages/engine-multi/src/engine.ts b/packages/engine-multi/src/engine.ts index d5162cb2f..ad868a40e 100644 --- a/packages/engine-multi/src/engine.ts +++ b/packages/engine-multi/src/engine.ts @@ -1,7 +1,9 @@ import { EventEmitter } from 'node:events'; import path from 'node:path'; import { fileURLToPath } from 'node:url'; -import type { ExecutionPlan } from '@openfn/runtime'; +import type { ExecutionPlan, State } from '@openfn/lexicon'; +import type { Logger } from '@openfn/logger'; + import { JOB_COMPLETE, JOB_START, @@ -15,10 +17,14 @@ import execute from './api/execute'; import validateWorker from './api/validate-worker'; import ExecutionContext from './classes/ExecutionContext'; -import type { SanitizePolicies } from '@openfn/logger'; import type { LazyResolvers } from './api'; -import type { EngineAPI, EventHandler, WorkflowState } from './types'; -import type { Logger } from '@openfn/logger'; +import type { + EngineAPI, + EventHandler, + ExecuteOptions, + RuntimeEngine, + WorkflowState, +} from './types'; import type { AutoinstallOptions } from './api/autoinstall'; const DEFAULT_RUN_TIMEOUT = 1000 * 60 * 10; // ms @@ -70,23 +76,24 @@ export type EngineOptions = { repoDir: string; resolvers?: LazyResolvers; runtimelogger?: Logger; - runTimeoutMs?: number; + runTimeoutMs?: number; // default timeout statePropsToRemove?: string[]; whitelist?: RegExp[]; }; -export type ExecuteOptions = { - memoryLimitMb?: number; - resolvers?: LazyResolvers; - runTimeoutMs?: number; - sanitize?: SanitizePolicies; +export type InternalEngine = RuntimeEngine & { + // TODO Not a very good type definition, but it calms the tests down + [other: string]: any; }; // This creates the internal API // tbh this is actually the engine, right, this is where stuff happens // the api file is more about the public api I think // TOOD options MUST have a logger -const createEngine = async (options: EngineOptions, workerPath?: string) => { +const createEngine = async ( + options: EngineOptions, + workerPath?: string +): Promise => { const states: Record = {}; const contexts: Record = {}; const deferredListeners: Record[]> = {}; @@ -130,9 +137,9 @@ const createEngine = async (options: EngineOptions, workerPath?: string) => { // create, register and return a state object // should it also load the initial data clip? // when does that happen? No, that's inside execute - const registerWorkflow = (plan: ExecutionPlan) => { + const registerWorkflow = (plan: ExecutionPlan, input: State) => { // TODO throw if already registered? - const state = createState(plan); + const state = createState(plan, input); states[state.id] = state; return state; }; @@ -144,13 +151,17 @@ const createEngine = async (options: EngineOptions, workerPath?: string) => { // TODO too much logic in this execute function, needs farming out // I don't mind having a wrapper here but it must be super thin // TODO maybe engine options is too broad? - const executeWrapper = (plan: ExecutionPlan, opts: ExecuteOptions = {}) => { + const executeWrapper = ( + plan: ExecutionPlan, + input: State, + opts: ExecuteOptions = {} + ) => { options.logger!.debug('executing plan ', plan?.id ?? ''); const workflowId = plan.id!; // TODO throw if plan is invalid // Wait, don't throw because the server will die // Maybe return null instead - const state = registerWorkflow(plan); + const state = registerWorkflow(plan, input); const context = new ExecutionContext({ state, diff --git a/packages/engine-multi/src/test/util.ts b/packages/engine-multi/src/test/util.ts index 0777af17a..494c24e27 100644 --- a/packages/engine-multi/src/test/util.ts +++ b/packages/engine-multi/src/test/util.ts @@ -1,15 +1,26 @@ -export const createPlan = (job = {}) => ({ - id: 'wf-1', - jobs: [ - { - id: 'j1', - adaptor: 'common', // not used - credential: {}, // not used - data: {}, // Used if no expression - expression: '(s) => ({ data: { answer: s.data?.input || 42 } })', - _delay: 1, // only used in the mock - - ...job, +import { ExecutionPlan } from '@openfn/lexicon'; + +export const createPlan = (job = {}) => + ({ + id: 'wf-1', + workflow: { + steps: [ + { + id: 'j1', + adaptor: 'common', // not used + configuration: {}, // not used + expression: '(s) => ({ data: { answer: s.data?.input || 42 } })', + + // TODO is this actually used? Should I get rid? Underscore + // @ts-ignore + data: {}, // Used if no expression + + // @ts-ignore + _delay: 1, // only used in the mock + + ...job, + }, + ], }, - ], -}); + options: {}, + } as ExecutionPlan); diff --git a/packages/engine-multi/src/test/worker-functions.ts b/packages/engine-multi/src/test/worker-functions.ts index 0c516e07e..f562edbcb 100644 --- a/packages/engine-multi/src/test/worker-functions.ts +++ b/packages/engine-multi/src/test/worker-functions.ts @@ -2,6 +2,7 @@ import path from 'node:path'; import { register, publish, threadId } from '../worker/thread/runtime'; import { increment } from './counter.js'; +import { ExecutionPlan, Job } from '@openfn/lexicon'; const tasks = { test: async (result = 42) => { @@ -25,13 +26,13 @@ const tasks = { processId: async () => process.pid, // very very simple intepretation of a run function // Most tests should use the mock-worker instead - run: async (plan: any, _adaptorPaths: any) => { + run: async (plan: ExecutionPlan, _input: any, _adaptorPaths: any) => { const workflowId = plan.id; publish('worker:workflow-start', { workflowId, }); try { - const [job] = plan.jobs; + const [job] = plan.workflow.steps as Job[]; const result = eval(job.expression); publish('worker:workflow-complete', { workflowId, diff --git a/packages/engine-multi/src/types.ts b/packages/engine-multi/src/types.ts index 819b3473f..bc69b7445 100644 --- a/packages/engine-multi/src/types.ts +++ b/packages/engine-multi/src/types.ts @@ -1,10 +1,10 @@ import type { Logger, SanitizePolicies } from '@openfn/logger'; -import type { ExecutionPlan } from '@openfn/runtime'; +import type { ExecutionPlan, State } from '@openfn/lexicon'; import type { EventEmitter } from 'node:events'; -import type { ExternalEvents, EventMap } from './events'; import type { EngineOptions } from './engine'; import type { ExecOpts } from './worker/pool'; +import { LazyResolvers } from './api'; export type Resolver = (id: string) => Promise; @@ -23,9 +23,11 @@ export type WorkflowState = { startTime?: number; duration?: number; error?: string; - result?: any; // State + result?: State; + + // Ok this changes quite a bit huh plan: ExecutionPlan; // this doesn't include options - options: any; // TODO this is wf specific options, like logging policy + input: State; }; export type CallWorker = ( @@ -42,23 +44,16 @@ export type ExecutionContextConstructor = { options: ExecutionContextOptions; }; -export type ExecutionContextOptions = EngineOptions & { +export type ExecuteOptions = { + memoryLimitMb?: number; + resolvers?: LazyResolvers; + runTimeoutMs?: number; sanitize?: SanitizePolicies; }; -export interface ExecutionContext extends EventEmitter { - constructor(args: ExecutionContextConstructor): ExecutionContext; - options: EngineOptions; - state: WorkflowState; - logger: Logger; - callWorker: CallWorker; - versions: Versions; - - emit( - event: T, - payload: Omit - ): boolean; -} +export type ExecutionContextOptions = EngineOptions & { + sanitize?: SanitizePolicies; +}; export interface EngineAPI extends EventEmitter { callWorker: CallWorker; @@ -66,7 +61,7 @@ export interface EngineAPI extends EventEmitter { } export interface RuntimeEngine { - version: string; + version?: string; options: EngineOptions; @@ -75,14 +70,13 @@ export interface RuntimeEngine { execute( plan: ExecutionPlan, + input: State, options?: Partial ): Pick; destroy(): void; on: (evt: string, fn: (...args: any[]) => void) => void; - - // TODO my want some maintenance APIs, like getStatus. idk } export type Versions = { diff --git a/packages/engine-multi/src/util/create-state.ts b/packages/engine-multi/src/util/create-state.ts index 7e1c538aa..3175c92cb 100644 --- a/packages/engine-multi/src/util/create-state.ts +++ b/packages/engine-multi/src/util/create-state.ts @@ -1,22 +1,15 @@ -import { ExecutionPlan } from '@openfn/runtime'; +import { ExecutionPlan, State } from '@openfn/lexicon'; import { WorkflowState } from '../types'; -export default (plan: ExecutionPlan, options = {}): WorkflowState => ({ +export default (plan: ExecutionPlan, input: State): WorkflowState => ({ id: plan.id!, status: 'pending', plan, + input, threadId: undefined, startTime: undefined, duration: undefined, error: undefined, result: undefined, - - // this is wf-specific options - // but they should be on context, rather than state - options, - // options: { - // ...options, - // repoDir, - // }, }); diff --git a/packages/engine-multi/src/worker/events.ts b/packages/engine-multi/src/worker/events.ts index 698df06eb..eabd8876a 100644 --- a/packages/engine-multi/src/worker/events.ts +++ b/packages/engine-multi/src/worker/events.ts @@ -3,7 +3,6 @@ */ import { JSONLog } from '@openfn/logger'; -import { Versions } from '../types'; // events used by the internal thread runtime @@ -45,7 +44,6 @@ export interface WorkflowCompleteEvent extends InternalEvent { export interface JobStartEvent extends InternalEvent { jobId: string; - versions: Versions; } export interface JobCompleteEvent extends InternalEvent { diff --git a/packages/engine-multi/src/worker/pool.ts b/packages/engine-multi/src/worker/pool.ts index 74b699259..5e94f05b7 100644 --- a/packages/engine-multi/src/worker/pool.ts +++ b/packages/engine-multi/src/worker/pool.ts @@ -120,13 +120,17 @@ function createPool(script: string, options: PoolOptions = {}, logger: Logger) { } }; - const exec = (task: string, args: any[] = [], opts: ExecOpts = {}) => { + const exec = ( + task: string, + args: any[] = [], + opts: ExecOpts = {} + ): Promise => { // TODO Throw if destroyed if (destroyed) { throw new Error('Worker destroyed'); } - const promise = new Promise(async (resolve, reject) => { + const promise = new Promise(async (resolve, reject) => { // TODO what should we do if a process in the pool dies, perhaps due to OOM? const onExit = async (code: number) => { if (code !== HANDLED_EXIT_CODE) { @@ -194,7 +198,6 @@ function createPool(script: string, options: PoolOptions = {}, logger: Logger) { } try { - logger.debug(`pool: Running task "${task}" in worker ${worker.pid}`); worker.send({ type: ENGINE_RUN_TASK, task, diff --git a/packages/engine-multi/src/worker/thread/helpers.ts b/packages/engine-multi/src/worker/thread/helpers.ts index cb8a2d417..fb3e4d9ee 100644 --- a/packages/engine-multi/src/worker/thread/helpers.ts +++ b/packages/engine-multi/src/worker/thread/helpers.ts @@ -4,14 +4,13 @@ import process from 'node:process'; import stringify from 'fast-safe-stringify'; import createLogger, { SanitizePolicies } from '@openfn/logger'; +import type { JSONLog } from '@openfn/logger'; import * as workerEvents from '../events'; import { HANDLED_EXIT_CODE } from '../../events'; import { ExecutionError, ExitError } from '../../errors'; - import { publish } from './runtime'; import serializeError from '../../util/serialize-error'; -import { JSONLog } from '@openfn/logger'; export const createLoggers = ( workflowId: string, @@ -67,7 +66,7 @@ export const createLoggers = ( // Execute wrapper function export const execute = async ( workflowId: string, - executeFn: () => Promise + executeFn: () => Promise | undefined ) => { const handleError = (err: any) => { publish(workerEvents.ERROR, { diff --git a/packages/engine-multi/src/worker/thread/mock-run.ts b/packages/engine-multi/src/worker/thread/mock-run.ts index c6b29b0d8..194ee5478 100644 --- a/packages/engine-multi/src/worker/thread/mock-run.ts +++ b/packages/engine-multi/src/worker/thread/mock-run.ts @@ -10,6 +10,7 @@ import { register, publish } from './runtime'; import { execute, createLoggers } from './helpers'; import * as workerEvents from '../events'; +import { State } from '@openfn/lexicon'; type MockJob = { id?: string; @@ -25,13 +26,19 @@ type MockJob = { type MockExecutionPlan = { id: string; - jobs: MockJob[]; + workflow: { + steps: MockJob[]; + }; }; // This is a fake runtime handler which will return a fixed value, throw, and // optionally delay -function mockRun(plan: MockExecutionPlan) { - const [job] = plan.jobs; +function mockRun(plan: MockExecutionPlan, input: State, _options = {}) { + if (!input) { + throw new Error('no input passed to state'); + } + + const [job] = plan.workflow.steps; const { jobLogger } = createLoggers(plan.id!, 'none', publish); const workflowId = plan.id; return new Promise((resolve) => { @@ -79,6 +86,6 @@ function mockRun(plan: MockExecutionPlan) { } register({ - run: async (plan: MockExecutionPlan, _options?: any) => - execute(plan.id, () => mockRun(plan)), + run: async (plan: MockExecutionPlan, input: State, _options?: any) => + execute(plan.id, () => mockRun(plan, input)), }); diff --git a/packages/engine-multi/src/worker/thread/run.ts b/packages/engine-multi/src/worker/thread/run.ts index b6af70c87..9dd3585d4 100644 --- a/packages/engine-multi/src/worker/thread/run.ts +++ b/packages/engine-multi/src/worker/thread/run.ts @@ -1,9 +1,9 @@ // This is the run command that will be executed inside the worker thread // Most of the heavy lifting is actually handled by execute import run from '@openfn/runtime'; -import type { ExecutionPlan } from '@openfn/runtime'; -import type { SanitizePolicies } from '@openfn/logger'; import type { NotifyEvents } from '@openfn/runtime'; +import type { ExecutionPlan, State } from '@openfn/lexicon'; +import type { SanitizePolicies } from '@openfn/logger'; import { register, publish } from './runtime'; import { execute, createLoggers } from './helpers'; @@ -15,7 +15,6 @@ type RunOptions = { whitelist?: RegExp[]; sanitize: SanitizePolicies; statePropsToRemove?: string[]; - // TODO timeout }; const eventMap = { @@ -26,7 +25,7 @@ const eventMap = { }; register({ - run: (plan: ExecutionPlan, runOptions: RunOptions) => { + run: (plan: ExecutionPlan, input: State, runOptions: RunOptions) => { const { adaptorPaths, whitelist, sanitize, statePropsToRemove } = runOptions; const { logger, jobLogger, adaptorLogger } = createLoggers( @@ -73,6 +72,6 @@ register({ }, }; - return execute(plan.id!, () => run(plan, undefined, options)); + return execute(plan.id!, () => run(plan, input, options)); }, }); diff --git a/packages/engine-multi/test/api.test.ts b/packages/engine-multi/test/api.test.ts index f6fd843ef..a797e76dc 100644 --- a/packages/engine-multi/test/api.test.ts +++ b/packages/engine-multi/test/api.test.ts @@ -1,9 +1,10 @@ import test from 'ava'; -import createAPI from '../src/api'; import { createMockLogger } from '@openfn/logger'; +import type { ExecutionPlan } from '@openfn/lexicon'; +import createAPI from '../src/api'; import pkg from '../package.json' assert { type: 'json' }; -import { RuntimeEngine } from '../src/types'; +import type { RuntimeEngine } from '../src/types'; // thes are tests on the public api functions generally // so these are very high level tests and don't allow mock workers or anything @@ -97,17 +98,21 @@ test.serial( }, }); - const plan = { + const plan: ExecutionPlan = { id: 'a', - jobs: [ - { - expression: 'export default [s => s]', - // with no adaptor it shouldn't try to autoinstall - }, - ], + workflow: { + steps: [ + { + expression: 'export default [s => s]', + // with no adaptor it shouldn't try to autoinstall + }, + ], + }, + options: {}, }; - const listener = api.execute(plan); + const state = { x: 1 }; + const listener = api.execute(plan, state); listener.on('workflow-complete', () => { t.pass('workflow completed'); done(); @@ -126,18 +131,22 @@ test.serial('should listen to workflow-complete', async (t) => { }, }); - const plan = { + const plan: ExecutionPlan = { id: 'a', - jobs: [ - { - expression: 'export default [s => s]', - // with no adaptor it shouldn't try to autoinstall - }, - ], + workflow: { + steps: [ + { + expression: 'export default [s => s]', + // with no adaptor it shouldn't try to autoinstall + }, + ], + }, + options: {}, }; + const state = { x: 1 }; + api.execute(plan, state); - api.execute(plan); - api.listen(plan.id, { + api.listen(plan.id!, { 'workflow-complete': () => { t.pass('workflow completed'); done(); diff --git a/packages/engine-multi/test/api/autoinstall.test.ts b/packages/engine-multi/test/api/autoinstall.test.ts index 75e3464d9..defd732bd 100644 --- a/packages/engine-multi/test/api/autoinstall.test.ts +++ b/packages/engine-multi/test/api/autoinstall.test.ts @@ -1,7 +1,11 @@ import test from 'ava'; import { createMockLogger } from '@openfn/logger'; +import type { ExecutionPlan, Job } from '@openfn/lexicon'; -import autoinstall, { identifyAdaptors } from '../../src/api/autoinstall'; +import autoinstall, { + AutoinstallOptions, + identifyAdaptors, +} from '../../src/api/autoinstall'; import { AUTOINSTALL_COMPLETE, AUTOINSTALL_ERROR } from '../../src/events'; import ExecutionContext from '../../src/classes/ExecutionContext'; import whitelist from '../../src/whitelist'; @@ -16,7 +20,7 @@ const mockIsInstalled = (pkg: PackageJson) => async (specifier: string) => { return pkg.dependencies.hasOwnProperty(alias); }; -const mockHandleInstall = async (specifier: string): Promise => +const mockHandleInstall = async (_specifier: string): Promise => new Promise((r) => r()).then(); const logger = createMockLogger(); @@ -27,18 +31,23 @@ const wait = (duration = 10) => }); const createContext = ( - autoinstallOpts?, - jobs?: any[], + autoinstallOpts?: AutoinstallOptions, + jobs?: Partial[], customWhitelist?: RegExp[] ) => new ExecutionContext({ state: { id: 'x', status: 'pending', - options: {}, plan: { - jobs: jobs || [{ adaptor: '@openfn/language-common@1.0.0' }], + workflow: { + steps: jobs || [ + { adaptor: '@openfn/language-common@1.0.0', expression: '.' }, + ], + }, + options: {}, }, + input: {}, }, logger, // @ts-ignore @@ -47,6 +56,8 @@ const createContext = ( logger, whitelist: customWhitelist || whitelist, repoDir: 'tmp/repo', + + // @ts-ignore autoinstall: autoinstallOpts || { handleInstall: mockHandleInstall, handleIsInstalled: mockIsInstalled, @@ -104,18 +115,24 @@ test('mock install: should return async', async (t) => { }); test('identifyAdaptors: pick out adaptors and remove duplicates', (t) => { - const plan = { - jobs: [ - { - adaptor: 'common@1.0.0', - }, - { - adaptor: 'common@1.0.0', - }, - { - adaptor: 'common@1.0.1', - }, - ], + const plan: ExecutionPlan = { + workflow: { + steps: [ + { + adaptor: 'common@1.0.0', + expression: '.', + }, + { + adaptor: 'common@1.0.0', + expression: '.', + }, + { + adaptor: 'common@1.0.1', + expression: '.', + }, + ], + }, + options: {}, }; const adaptors = identifyAdaptors(plan); t.true(adaptors.size === 2); @@ -160,9 +177,9 @@ test.serial( async (t) => { let callCount = 0; - const installed = {}; + const installed: Record = {}; - const mockInstall = (name) => + const mockInstall = (name: string) => new Promise((resolve) => { installed[name] = true; callCount++; @@ -172,7 +189,7 @@ test.serial( const options = { skipRepoValidation: true, handleInstall: mockInstall, - handleIsInstalled: async (name) => name in installed, + handleIsInstalled: async (name: string) => name in installed, }; const context = createContext(options); @@ -184,11 +201,11 @@ test.serial( ); test.serial('autoinstall: install in sequence', async (t) => { - const installed = {}; + const installed: Record = {}; - const states = {}; + const states: Record = {}; - const mockInstall = (name) => + const mockInstall = (name: string) => new Promise((resolve) => { // Each time install is called, // record the time the call was made @@ -205,7 +222,7 @@ test.serial('autoinstall: install in sequence', async (t) => { skipRepoValidation: true, handleInstall: mockInstall, handleIsInstalled: false, - }; + } as any; const c1 = createContext(options, [{ adaptor: '@openfn/language-common@1' }]); const c2 = createContext(options, [{ adaptor: '@openfn/language-common@2' }]); @@ -354,7 +371,7 @@ test.serial('autoinstall: support custom whitelist', async (t) => { }); test.serial('autoinstall: emit an event on completion', async (t) => { - let event; + let event: any; const jobs = [ { adaptor: '@openfn/language-common@1.0.0', @@ -366,7 +383,7 @@ test.serial('autoinstall: emit an event on completion', async (t) => { skipRepoValidation: true, handleInstall: async () => new Promise((done) => setTimeout(done, 50)), handleIsInstalled: async () => false, - }; + } as any; const context = createContext(autoinstallOpts, jobs); context.on(AUTOINSTALL_COMPLETE, (evt) => { @@ -416,14 +433,14 @@ test.serial('autoinstall: throw on error twice if pending', async (t) => { const autoinstallOpts = { handleInstall: mockInstall, handleIsInstalled: mockIsInstalled, - }; + } as any; const context = createContext(autoinstallOpts); autoinstall(context).catch(assertCatches); autoinstall(context).catch(assertCatches); - function assertCatches(e) { + function assertCatches(e: any) { t.is(e.name, 'AutoinstallError'); errCount += 1; if (errCount === 2) { @@ -436,7 +453,7 @@ test.serial('autoinstall: throw on error twice if pending', async (t) => { }); test.serial('autoinstall: emit on error', async (t) => { - let evt; + let evt: any; const mockIsInstalled = async () => false; const mockInstall = async () => { throw new Error('err'); @@ -478,7 +495,7 @@ test.serial('autoinstall: throw twice in a row', async (t) => { const autoinstallOpts = { handleInstall: mockInstall, handleIsInstalled: mockIsInstalled, - }; + } as any; const context = createContext(autoinstallOpts); await t.throwsAsync(() => autoinstall(context), { @@ -503,6 +520,7 @@ test('write versions to context', async (t) => { await autoinstall(context); + // @ts-ignore t.is(context.versions['@openfn/language-common'], '1.0.0'); }); @@ -515,5 +533,6 @@ test("write versions to context even if we don't install", async (t) => { await autoinstall(context); + // @ts-ignore t.is(context.versions['@openfn/language-common'], '1.0.0'); }); diff --git a/packages/engine-multi/test/api/call-worker.test.ts b/packages/engine-multi/test/api/call-worker.test.ts index 314314527..c5608e05e 100644 --- a/packages/engine-multi/test/api/call-worker.test.ts +++ b/packages/engine-multi/test/api/call-worker.test.ts @@ -40,7 +40,7 @@ test.serial('callWorker should return a custom result', async (t) => { }); test.serial('callWorker should trigger an event callback', async (t) => { - const onCallback = ({ result }) => { + const onCallback = ({ result }: any) => { t.is(result, 11); }; @@ -69,7 +69,7 @@ test.serial( } ); - const onCallback = (evt) => { + const onCallback = () => { t.pass('all ok'); }; @@ -81,13 +81,13 @@ test.serial('callWorker should execute in one process', async (t) => { const ids: number[] = []; await engine.callWorker('test', [], { - 'test-message': ({ processId }) => { + 'test-message': ({ processId }: any) => { ids.push(processId); }, }); await engine.callWorker('test', [], { - 'test-message': ({ processId }) => { + 'test-message': ({ processId }: any) => { ids.push(processId); }, }); @@ -100,13 +100,13 @@ test.serial('callWorker should execute in two different threads', async (t) => { const ids: number[] = []; await engine.callWorker('test', [], { - 'test-message': ({ threadId }) => { + 'test-message': ({ threadId }: any) => { ids.push(threadId); }, }); await engine.callWorker('test', [], { - 'test-message': ({ threadId }) => { + 'test-message': ({ threadId }: any) => { ids.push(threadId); }, }); @@ -167,8 +167,6 @@ test.serial( test.serial( 'By default, worker thread cannot access parent env if env not set (with options arg)', async (t) => { - const defaultAPI = {} as EngineAPI; - const { callWorker, closeWorkers } = initWorkers( workerPath, { maxWorkers: 1 }, diff --git a/packages/engine-multi/test/api/execute.test.ts b/packages/engine-multi/test/api/execute.test.ts index deda81d22..9b46e2a74 100644 --- a/packages/engine-multi/test/api/execute.test.ts +++ b/packages/engine-multi/test/api/execute.test.ts @@ -1,8 +1,9 @@ import path from 'node:path'; import test from 'ava'; +import { createMockLogger } from '@openfn/logger'; + import initWorkers from '../../src/api/call-worker'; import execute from '../../src/api/execute'; -import { createMockLogger } from '@openfn/logger'; import { JOB_COMPLETE, JOB_START, @@ -13,20 +14,31 @@ import { } from '../../src/events'; import ExecutionContext from '../../src/classes/ExecutionContext'; -import type { RTEOptions } from '../../src/api'; -import type { WorkflowState } from '../../src/types'; -import { ExecuteOptions } from '../../src/engine'; +import type { + ExecuteOptions, + ExecutionContextOptions, + WorkflowState, +} from '../../src/types'; +import type { EngineOptions } from '../../src/engine'; const workerPath = path.resolve('dist/test/mock-run.js'); -const createContext = ({ state, options }) => { +const createContext = ({ + state, + options, +}: { + state: Partial; + options: Partial; +}) => { const logger = createMockLogger(); const { callWorker } = initWorkers(workerPath, {}, logger); const ctx = new ExecutionContext({ + // @ts-ignore state: state || { workflowId: 'x' }, logger, callWorker, + // @ts-ignore options, }); @@ -37,12 +49,15 @@ const createContext = ({ state, options }) => { const plan = { id: 'x', - jobs: [ - { - id: 'j', - expression: '() => 22', - }, - ], + workflow: { + steps: [ + { + id: 'j', + expression: '() => 22', + }, + ], + }, + options: {}, }; const options = { @@ -51,13 +66,13 @@ const options = { handleInstall: async () => {}, handleIsInstalled: async () => false, }, -} as RTEOptions; +} as Partial; test.serial('execute should run a job and return the result', async (t) => { const state = { id: 'x', plan, - } as WorkflowState; + } as Partial; const context = createContext({ state, options }); @@ -80,7 +95,7 @@ test.serial('should emit a workflow-start event', async (t) => { await execute(context); // No need to do a deep test of the event payload here - t.is(workflowStart.workflowId, 'x'); + t.is(workflowStart!.workflowId!, 'x'); }); test.serial('should emit a log event with the memory limit', async (t) => { @@ -89,7 +104,7 @@ test.serial('should emit a log event with the memory limit', async (t) => { plan, } as WorkflowState; - const logs = []; + const logs: any[] = []; const context = createContext({ state, @@ -122,8 +137,8 @@ test.serial('should emit a workflow-complete event', async (t) => { await execute(context); - t.is(workflowComplete.workflowId, 'x'); - t.is(workflowComplete.state, 22); + t.is(workflowComplete!.workflowId, 'x'); + t.is(workflowComplete!.state, 22); }); test.serial('should emit a job-start event', async (t) => { @@ -132,7 +147,7 @@ test.serial('should emit a job-start event', async (t) => { plan, } as WorkflowState; - let event; + let event: any; const context = createContext({ state, options }); @@ -152,7 +167,7 @@ test.serial('should emit a job-complete event', async (t) => { plan, } as WorkflowState; - let event; + let event: any; const context = createContext({ state, options }); @@ -166,19 +181,22 @@ test.serial('should emit a job-complete event', async (t) => { }); test.serial('should emit a log event', async (t) => { - let workflowLog; + let workflowLog: any; const plan = { id: 'y', - jobs: [ - { - expression: '() => { console.log("hi"); return 33 }', - }, - ], + workflow: { + steps: [ + { + expression: '() => { console.log("hi"); return 33 }', + }, + ], + }, + options: {}, }; const state = { id: 'y', plan, - } as WorkflowState; + } as Partial; const context = createContext({ state, options }); context.once(WORKFLOW_LOG, (evt) => (workflowLog = evt)); @@ -191,14 +209,16 @@ test.serial('should emit a log event', async (t) => { }); test.serial('log events are timestamped in hr time', async (t) => { - let workflowLog; + let workflowLog: any; const plan = { id: 'y', - jobs: [ - { - expression: '() => { console.log("hi"); return 33 }', - }, - ], + workflow: { + steps: [ + { + expression: '() => { console.log("hi"); return 33 }', + }, + ], + }, }; const state = { id: 'y', @@ -220,11 +240,13 @@ test.serial('should emit error on timeout', async (t) => { const state = { id: 'zz', plan: { - jobs: [ - { - expression: '() => { while(true) {} }', - }, - ], + workflow: { + steps: [ + { + expression: '() => { while(true) {} }', + }, + ], + }, }, } as WorkflowState; @@ -233,7 +255,7 @@ test.serial('should emit error on timeout', async (t) => { runTimeoutMs: 10, }; - let event; + let event: any; const context = createContext({ state, options: wfOptions }); @@ -280,7 +302,9 @@ test.serial('should emit CompileError if compilation fails', async (t) => { const state = { id: 'baa', plan: { - jobs: [{ id: 'j', expression: 'la la la' }], + workflow: { + steps: [{ id: 'j', expression: 'la la la' }], + }, }, } as WorkflowState; const context = createContext({ state, options: {} }); @@ -299,7 +323,7 @@ test.serial('should emit CompileError if compilation fails', async (t) => { }); test.serial('should stringify the whitelist array', async (t) => { - let passedOptions; + let passedOptions: any; const state = { id: 'x', @@ -312,8 +336,9 @@ test.serial('should stringify the whitelist array', async (t) => { }; const context = createContext({ state, options: opts }); + // @ts-ignore context.callWorker = (_command, args) => { - passedOptions = args[1]; + passedOptions = args[2]; }; await execute(context); diff --git a/packages/engine-multi/test/api/lifecycle.test.ts b/packages/engine-multi/test/api/lifecycle.test.ts index b6c0566a2..c5892d890 100644 --- a/packages/engine-multi/test/api/lifecycle.test.ts +++ b/packages/engine-multi/test/api/lifecycle.test.ts @@ -12,12 +12,15 @@ import { } from '../../src/api/lifecycle'; import { WorkflowState } from '../../src/types'; import ExecutionContext from '../../src/classes/ExecutionContext'; +import * as w from '../../src/worker/events'; const createContext = (workflowId: string, state?: any) => new ExecutionContext({ state: state || { id: workflowId }, logger: createMockLogger(), + // @ts-ignore callWorker: () => {}, + // @ts-ignore options: {}, }); @@ -26,10 +29,17 @@ test(`workflowStart: emits ${e.WORKFLOW_START}`, (t) => { const workflowId = 'a'; const context = createContext(workflowId); - const event = { workflowId, threadId: '123' }; + const event: w.WorkflowStartEvent = { + type: w.WORKFLOW_START, + workflowId, + threadId: '123', + }; context.on(e.WORKFLOW_START, (evt) => { - t.deepEqual(evt, event); + t.deepEqual(evt, { + workflowId, + threadId: '123', + }); done(); }); @@ -41,7 +51,11 @@ test('onWorkflowStart: updates state', (t) => { const workflowId = 'a'; const context = createContext(workflowId); - const event = { workflowId, threadId: '123' }; + const event: w.WorkflowStartEvent = { + type: w.WORKFLOW_START, + workflowId, + threadId: '123', + }; workflowStart(context, event); @@ -66,7 +80,12 @@ test(`workflowComplete: emits ${e.WORKFLOW_COMPLETE}`, (t) => { } as WorkflowState; const context = createContext(workflowId, state); - const event = { workflowId, state: result, threadId: '1' }; + const event: w.WorkflowCompleteEvent = { + type: w.WORKFLOW_START, + workflowId, + state: result, + threadId: '1', + }; context.on(e.WORKFLOW_COMPLETE, (evt) => { t.is(evt.workflowId, workflowId); @@ -88,7 +107,12 @@ test('workflowComplete: updates state', (t) => { startTime: Date.now() - 1000, } as WorkflowState; const context = createContext(workflowId, state); - const event = { workflowId, state: result, threadId: '1' }; + const event: w.WorkflowCompleteEvent = { + type: w.WORKFLOW_COMPLETE, + workflowId, + state: result, + threadId: '1', + }; workflowComplete(context, event); @@ -108,7 +132,8 @@ test(`job-start: emits ${e.JOB_START}`, (t) => { const context = createContext(workflowId, state); - const event = { + const event: w.JobStartEvent = { + type: w.JOB_START, workflowId, threadId: '1', jobId: 'j', @@ -136,7 +161,8 @@ test(`job-complete: emits ${e.JOB_COMPLETE}`, (t) => { const context = createContext(workflowId, state); - const event = { + const event: w.JobCompleteEvent = { + type: w.JOB_COMPLETE, workflowId, threadId: '1', jobId: 'j', @@ -167,14 +193,15 @@ test(`log: emits ${e.WORKFLOW_LOG}`, (t) => { const context = createContext(workflowId); - const event = { + const event: w.LogEvent = { + type: w.LOG, workflowId, threadId: 'a', log: { level: 'info', name: 'job', message: JSON.stringify(['oh hai']), - time: Date.now() - 100, + time: (Date.now() - 100).toString(), }, }; @@ -206,6 +233,7 @@ test(`error: emits ${e.WORKFLOW_ERROR}`, (t) => { const err = new Error('test'); + // @ts-ignore error(context, { error: err }); }); }); diff --git a/packages/engine-multi/test/api/preload-credentials.test.ts b/packages/engine-multi/test/api/preload-credentials.test.ts index e31c04191..1a822fd71 100644 --- a/packages/engine-multi/test/api/preload-credentials.test.ts +++ b/packages/engine-multi/test/api/preload-credentials.test.ts @@ -1,6 +1,7 @@ import test from 'ava'; +import { ExecutionPlan, Job } from '@openfn/lexicon'; + import preloadCredentials from '../../src/api/preload-credentials'; -import { CompiledExecutionPlan } from '@openfn/runtime'; // Not very good test coverage test('handle a plan with no credentials', async (t) => { @@ -13,18 +14,21 @@ test('handle a plan with no credentials', async (t) => { const plan = { id: 'a', - jobs: [ - { - expression: '.', - }, - { - expression: '.', - }, - { - expression: '.', - }, - ], - } as unknown as CompiledExecutionPlan; + workflow: { + steps: [ + { + expression: '.', + }, + { + expression: '.', + }, + { + expression: '.', + }, + ], + }, + options: {}, + } as ExecutionPlan; const planCopy = JSON.parse(JSON.stringify(plan)); const result = await preloadCredentials(plan, loader); @@ -43,26 +47,29 @@ test('handle a plan with credentials', async (t) => { const plan = { id: 'a', - jobs: [ - { - expression: '.', - configuration: 'a', - }, - { - expression: '.', - configuration: 'b', - }, - { - expression: '.', - configuration: 'c', - }, - ], - } as unknown as CompiledExecutionPlan; + workflow: { + steps: [ + { + expression: '.', + configuration: 'a', + }, + { + expression: '.', + configuration: 'b', + }, + { + expression: '.', + configuration: 'c', + }, + ], + }, + options: {}, + } as ExecutionPlan; - const result = await preloadCredentials(plan, loader); + await preloadCredentials(plan, loader); t.is(timesCalled, 3); - t.is(plan.jobs[0].configuration, 'loaded-a'); - t.is(plan.jobs[1].configuration, 'loaded-b'); - t.is(plan.jobs[2].configuration, 'loaded-c'); + t.is((plan.workflow.steps[0] as Job).configuration, 'loaded-a'); + t.is((plan.workflow.steps[1] as Job).configuration, 'loaded-b'); + t.is((plan.workflow.steps[2] as Job).configuration, 'loaded-c'); }); diff --git a/packages/engine-multi/test/engine.test.ts b/packages/engine-multi/test/engine.test.ts index ec8c2c062..d93c85f62 100644 --- a/packages/engine-multi/test/engine.test.ts +++ b/packages/engine-multi/test/engine.test.ts @@ -1,12 +1,11 @@ import test from 'ava'; import path from 'node:path'; import { createMockLogger } from '@openfn/logger'; +import type { ExecutionPlan } from '@openfn/lexicon'; -import createEngine, { ExecuteOptions } from '../src/engine'; +import createEngine, { InternalEngine } from '../src/engine'; import * as e from '../src/events'; -import { ExecutionPlan } from '@openfn/runtime'; - -// TOOD this becomes low level tests on the internal engine api +import type { ExecuteOptions } from '../src/types'; const logger = createMockLogger('', { level: 'debug' }); @@ -23,7 +22,19 @@ const options = { }, }; -let engine; +const createPlan = (expression: string = '.', id = 'a') => ({ + id, + workflow: { + steps: [ + { + expression, + }, + ], + }, + options: {}, +}); + +let engine: InternalEngine; test.afterEach(async () => { logger._reset(); @@ -82,22 +93,17 @@ test.serial( const p = path.resolve('dist/test/worker-functions.js'); engine = await createEngine(options, p); - const plan = { - id: 'a', - jobs: [ - { - expression: '22', - }, - ], - }; + const plan = createPlan('22'); - engine.execute(plan).on(e.WORKFLOW_COMPLETE, ({ state, threadId }) => { - t.is(state, 22); - t.truthy(threadId); // proves (sort of) that this has run in a worker + engine + .execute(plan, {}) + .on(e.WORKFLOW_COMPLETE, ({ state, threadId }) => { + t.is(state, 22); + t.truthy(threadId); // proves (sort of) that this has run in a worker - // Apparently engine.destroy won't resolve if we return immediately - setTimeout(done, 1); - }); + // Apparently engine.destroy won't resolve if we return immediately + setTimeout(done, 1); + }); }); } ); @@ -107,16 +113,9 @@ test.serial('execute does not return internal state stuff', async (t) => { const p = path.resolve('dist/test/worker-functions.js'); engine = await createEngine(options, p); - const plan = { - id: 'a', - jobs: [ - { - expression: '22', - }, - ], - }; + const plan = createPlan(); - const result = engine.execute(plan, {}); + const result: any = engine.execute(plan, {}); // Execute returns an event listener t.truthy(result.on); t.truthy(result.once); @@ -133,7 +132,6 @@ test.serial('execute does not return internal state stuff', async (t) => { t.falsy(result['options']); done(); - // TODO is this still running? Does it matter? }); }); @@ -142,17 +140,13 @@ test.serial('listen to workflow-complete', async (t) => { const p = path.resolve('dist/test/worker-functions.js'); engine = await createEngine(options, p); - const plan = { - id: 'a', - jobs: [ - { - expression: '33', - }, - ], - }; + const plan = createPlan('33'); engine.listen(plan.id, { - [e.WORKFLOW_COMPLETE]: ({ state, threadId }) => { + [e.WORKFLOW_COMPLETE]: ({ + state, + threadId, + }: e.WorkflowCompletePayload) => { t.is(state, 33); t.truthy(threadId); // proves (sort of) that this has run in a worker @@ -160,7 +154,7 @@ test.serial('listen to workflow-complete', async (t) => { setTimeout(done, 1); }, }); - engine.execute(plan); + engine.execute(plan, {}); }); }); @@ -171,22 +165,25 @@ test.serial('call listen before execute', async (t) => { const plan = { id: 'a', - jobs: [ - { - expression: '34', - }, - ], + workflow: { + steps: [ + { + expression: '34', + }, + ], + }, + options: {}, }; engine.listen(plan.id, { - [e.WORKFLOW_COMPLETE]: ({ state }) => { + [e.WORKFLOW_COMPLETE]: ({ state }: e.WorkflowCompletePayload) => { t.is(state, 34); // Apparently engine.destroy won't resolve if we return immediately setTimeout(done, 1); }, }); - engine.execute(plan); + engine.execute(plan, {}); }); }); @@ -197,21 +194,24 @@ test.serial('catch and emit errors', async (t) => { const plan = { id: 'a', - jobs: [ - { - expression: 'throw new Error("test")', - }, - ], + workflow: { + steps: [ + { + expression: 'throw new Error("test")', + }, + ], + }, + options: {}, }; engine.listen(plan.id, { - [e.WORKFLOW_ERROR]: ({ message }) => { + [e.WORKFLOW_ERROR]: ({ message }: e.WorkflowErrorPayload) => { t.is(message, 'test'); done(); }, }); - engine.execute(plan); + engine.execute(plan, {}); }); }); @@ -224,26 +224,31 @@ test.serial( const plan = { id: 'a', - jobs: [ - { - expression: 'while(true) {}', - }, - ], + workflow: { + steps: [ + { + expression: 'while(true) {}', + }, + ], + }, + options: {}, }; + // TODO Now then - this doesn't seem right + // the timeout should be on the xplan const opts: ExecuteOptions = { runTimeoutMs: 10, }; engine.listen(plan.id, { - [e.WORKFLOW_ERROR]: ({ message, type }) => { + [e.WORKFLOW_ERROR]: ({ message, type }: e.WorkflowErrorPayload) => { t.is(type, 'TimeoutError'); t.regex(message, /failed to return within 10ms/); done(); }, }); - engine.execute(plan, opts); + engine.execute(plan, {}, opts); }); } ); @@ -263,22 +268,25 @@ test.serial( const plan = { id: 'a', - jobs: [ - { - expression: 'while(true) {}', - }, - ], + workflow: { + steps: [ + { + expression: 'while(true) {}', + }, + ], + }, + options: {}, }; engine.listen(plan.id, { - [e.WORKFLOW_ERROR]: ({ message, type }) => { + [e.WORKFLOW_ERROR]: ({ message, type }: e.WorkflowErrorPayload) => { t.is(type, 'TimeoutError'); t.regex(message, /failed to return within 22ms/); done(); }, }); - engine.execute(plan); + engine.execute(plan, {}); }); } ); diff --git a/packages/engine-multi/test/errors.test.ts b/packages/engine-multi/test/errors.test.ts index e9202584b..714d64ce6 100644 --- a/packages/engine-multi/test/errors.test.ts +++ b/packages/engine-multi/test/errors.test.ts @@ -1,11 +1,12 @@ import test from 'ava'; import path from 'node:path'; +import { createMockLogger } from '@openfn/logger'; import createEngine, { EngineOptions } from '../src/engine'; -import { createMockLogger } from '@openfn/logger'; import { WORKFLOW_ERROR } from '../src/events'; +import type { RuntimeEngine } from '../src/types'; -let engine; +let engine: RuntimeEngine; test.before(async () => { const logger = createMockLogger('', { level: 'debug' }); @@ -30,16 +31,19 @@ test.serial('syntax error: missing bracket', (t) => { return new Promise((done) => { const plan = { id: 'a', - jobs: [ - { - id: 'x', - // This is subtle syntax error - expression: 'fn((s) => { return s )', - }, - ], + workflow: { + steps: [ + { + id: 'x', + // This is subtle syntax error + expression: 'fn((s) => { return s )', + }, + ], + }, + options: {}, }; - engine.execute(plan).on(WORKFLOW_ERROR, (evt) => { + engine.execute(plan, {}).on(WORKFLOW_ERROR, (evt) => { t.is(evt.type, 'CompileError'); // compilation happens in the main thread t.is(evt.threadId, '-'); @@ -53,16 +57,19 @@ test.serial('syntax error: illegal throw', (t) => { return new Promise((done) => { const plan = { id: 'b', - jobs: [ - { - id: 'z', - // This is also subtle syntax error - expression: 'fn(() => throw "e")', - }, - ], + workflow: { + steps: [ + { + id: 'z', + // This is also subtle syntax error + expression: 'fn(() => throw "e")', + }, + ], + }, + options: {}, }; - engine.execute(plan).on(WORKFLOW_ERROR, (evt) => { + engine.execute(plan, {}).on(WORKFLOW_ERROR, (evt) => { t.is(evt.type, 'CompileError'); // compilation happens in the main thread t.is(evt.threadId, '-'); @@ -75,21 +82,24 @@ test.serial('syntax error: illegal throw', (t) => { test.serial('thread oom error', (t) => { return new Promise((done) => { const plan = { - id: 'a', - jobs: [ - { - expression: `export default [(s) => { + id: 'c', + workflow: { + steps: [ + { + expression: `export default [(s) => { s.a = []; while(true) { s.a.push(new Array(1e6).fill("oom")); } return s; }]`, - }, - ], + }, + ], + }, + options: {}, }; - engine.execute(plan).on(WORKFLOW_ERROR, (evt) => { + engine.execute(plan, {}).on(WORKFLOW_ERROR, (evt) => { t.is(evt.type, 'OOMError'); t.is(evt.severity, 'kill'); t.is(evt.message, 'Run exceeded maximum memory usage'); @@ -102,21 +112,24 @@ test.serial('thread oom error', (t) => { test.serial.skip('vm oom error', (t) => { return new Promise((done) => { const plan = { - id: 'b', - jobs: [ - { - expression: `export default [(s) => { + id: 'd', + workflow: { + steps: [ + { + expression: `export default [(s) => { s.a = []; while(true) { s.a.push(new Array(1e8).fill("oom")); } return s; }]`, - }, - ], + }, + ], + }, + options: {}, }; - engine.execute(plan).on(WORKFLOW_ERROR, (evt) => { + engine.execute(plan, {}).on(WORKFLOW_ERROR, (evt) => { t.is(evt.type, 'OOMError'); t.is(evt.severity, 'kill'); t.is(evt.message, 'Run exceeded maximum memory usage'); @@ -131,21 +144,24 @@ test.serial.skip('vm oom error', (t) => { test.serial.skip('execution error from async code', (t) => { return new Promise((done) => { const plan = { - id: 'a', - jobs: [ - { - // this error will throw within the promise, and so before the job completes - // But REALLY naughty code could throw after the job has finished - // In which case it'll be ignored - // Also note that the wrapping promise will never resolve - expression: `export default [(s) => new Promise((r) => { + id: 'e', + workflow: { + steps: [ + { + // this error will throw within the promise, and so before the job completes + // But REALLY naughty code could throw after the job has finished + // In which case it'll be ignored + // Also note that the wrapping promise will never resolve + expression: `export default [(s) => new Promise((r) => { setTimeout(() => { throw new Error(\"e1324\"); r() }, 10) })]`, - }, - ], + }, + ], + }, + options: {}, }; - engine.execute(plan).on(WORKFLOW_ERROR, (evt) => { + engine.execute(plan, {}).on(WORKFLOW_ERROR, (evt) => { t.is(evt.type, 'ExecutionError'); t.is(evt.severity, 'crash'); @@ -157,16 +173,19 @@ test.serial.skip('execution error from async code', (t) => { test.serial('emit a crash error on process.exit()', (t) => { return new Promise((done) => { const plan = { - id: 'z', - jobs: [ - { - adaptor: '@openfn/helper@1.0.0', - expression: 'export default [exit()]', - }, - ], + id: 'f', + workflow: { + steps: [ + { + adaptor: '@openfn/helper@1.0.0', + expression: 'export default [exit()]', + }, + ], + }, + options: {}, }; - engine.execute(plan).on(WORKFLOW_ERROR, (evt) => { + engine.execute(plan, {}).on(WORKFLOW_ERROR, (evt) => { t.is(evt.type, 'ExitError'); t.is(evt.severity, 'crash'); t.is(evt.message, 'Process exited with code: 42'); diff --git a/packages/engine-multi/test/integration.test.ts b/packages/engine-multi/test/integration.test.ts index fed31f5b5..378123eaa 100644 --- a/packages/engine-multi/test/integration.test.ts +++ b/packages/engine-multi/test/integration.test.ts @@ -1,10 +1,15 @@ import test from 'ava'; import path from 'node:path'; -import createAPI from '../src/api'; import { createMockLogger } from '@openfn/logger'; +import createAPI from '../src/api'; +import type { RuntimeEngine } from '../src'; +import { ExecutionPlan } from '@openfn/lexicon'; + const logger = createMockLogger(); -let api; +let api: RuntimeEngine; + +const emptyState = {}; test.afterEach(() => { logger._reset(); @@ -23,15 +28,19 @@ const withFn = `function fn(f) { return (s) => f(s) } let idgen = 0; -const createPlan = (jobs?: any[]) => ({ - id: `${++idgen}`, - jobs: jobs || [ - { - id: 'j1', - expression: 'export default [s => s]', +const createPlan = (jobs?: any[]) => + ({ + id: `${++idgen}`, + workflow: { + steps: jobs || [ + { + id: 'j1', + expression: 'export default [s => s]', + }, + ], }, - ], -}); + options: {}, + } as ExecutionPlan); test.serial('trigger workflow-start', (t) => { return new Promise(async (done) => { @@ -44,7 +53,7 @@ test.serial('trigger workflow-start', (t) => { const plan = createPlan(); - api.execute(plan).on('workflow-start', (evt) => { + api.execute(plan, emptyState).on('workflow-start', (evt) => { t.is(evt.workflowId, plan.id); t.truthy(evt.threadId); t.pass('workflow started'); @@ -64,7 +73,7 @@ test.serial('trigger job-start', (t) => { const plan = createPlan(); - api.execute(plan).on('job-start', (e) => { + api.execute(plan, emptyState).on('job-start', (e) => { t.is(e.workflowId, '2'); t.is(e.jobId, 'j1'); t.truthy(e.threadId); @@ -86,7 +95,7 @@ test.serial('trigger job-complete', (t) => { const plan = createPlan(); - api.execute(plan).on('job-complete', (evt) => { + api.execute(plan, emptyState).on('job-complete', (evt) => { t.deepEqual(evt.next, []); t.log('duration:', evt.duration); // Very lenient duration test - this often comes in around 200ms in CI @@ -115,7 +124,7 @@ test.serial('trigger workflow-complete', (t) => { const plan = createPlan(); - api.execute(plan).on('workflow-complete', (evt) => { + api.execute(plan, emptyState).on('workflow-complete', (evt) => { t.falsy(evt.state.errors); t.is(evt.workflowId, plan.id); @@ -142,7 +151,7 @@ test.serial('trigger workflow-log for job logs', (t) => { let didLog = false; - api.execute(plan).on('workflow-log', (evt) => { + api.execute(plan, emptyState).on('workflow-log', (evt) => { if (evt.name === 'JOB') { didLog = true; t.deepEqual(evt.message, JSON.stringify(['hola'])); @@ -150,7 +159,7 @@ test.serial('trigger workflow-log for job logs', (t) => { } }); - api.execute(plan).on('workflow-complete', (evt) => { + api.execute(plan, emptyState).on('workflow-complete', (evt) => { t.true(didLog); t.falsy(evt.state.errors); done(); @@ -170,25 +179,26 @@ test.serial('log errors', (t) => { }, ]); - api.execute(plan).on('workflow-log', (evt) => { - if (evt.name === 'JOB') { - t.log(evt); - t.deepEqual( - evt.message, - JSON.stringify([ - { - name: 'Error', - message: 'hola', - }, - ]) - ); - t.pass('workflow logged'); - } - }); - - api.execute(plan).on('workflow-complete', (evt) => { - done(); - }); + api + .execute(plan, emptyState) + .on('workflow-log', (evt) => { + if (evt.name === 'JOB') { + t.log(evt); + t.deepEqual( + evt.message, + JSON.stringify([ + { + name: 'Error', + message: 'hola', + }, + ]) + ); + t.pass('workflow logged'); + } + }) + .on('workflow-complete', () => { + done(); + }); }); }); @@ -208,7 +218,7 @@ test.serial('trigger workflow-log for adaptor logs', (t) => { }, ]); - api.execute(plan).on('workflow-log', (evt) => { + api.execute(plan, emptyState).on('workflow-log', (evt) => { if (evt.name === 'ADA') { t.deepEqual(evt.message, JSON.stringify(['hola'])); t.pass('workflow logged'); @@ -230,7 +240,7 @@ test.serial('compile and run', (t) => { }, ]); - api.execute(plan).on('workflow-complete', ({ state }) => { + api.execute(plan, emptyState).on('workflow-complete', ({ state }) => { t.deepEqual(state.data, 42); done(); }); @@ -249,7 +259,7 @@ test.serial('run without error if no state is returned', (t) => { }, ]); - api.execute(plan).on('workflow-complete', ({ state }) => { + api.execute(plan, emptyState).on('workflow-complete', ({ state }) => { t.falsy(state); // Ensure there are no error logs @@ -272,7 +282,7 @@ test.serial('errors get nicely serialized', (t) => { }, ]); - api.execute(plan).on('job-error', (evt) => { + api.execute(plan, emptyState).on('job-error', (evt) => { t.is(evt.error.type, 'TypeError'); t.is(evt.error.severity, 'fail'); t.is( @@ -299,7 +309,7 @@ test.serial( }, ]); - api.execute(plan).on('workflow-complete', ({ state }) => { + api.execute(plan, emptyState).on('workflow-complete', ({ state }) => { t.deepEqual(state, { a: 1 }); done(); }); @@ -321,7 +331,7 @@ test.serial('use custom state-props-to-remove', (t) => { }, ]); - api.execute(plan).on('workflow-complete', ({ state }) => { + api.execute(plan, emptyState).on('workflow-complete', ({ state }) => { t.deepEqual(state, { configuration: {}, response: {} }); done(); }); @@ -354,7 +364,7 @@ test.serial('evaluate conditional edges', (t) => { const plan = createPlan(jobs); - api.execute(plan).on('workflow-complete', ({ state }) => { + api.execute(plan, emptyState).on('workflow-complete', ({ state }) => { t.deepEqual(state.data, 'b'); done(); }); @@ -411,17 +421,15 @@ test.serial('accept initial state', (t) => { const plan = createPlan(); - // important! The runtime must use both x and y as initial state - // if we run the runtime in strict mode, x will be ignored - plan.initialState = { + const input = { x: 1, data: { y: 1, }, }; - api.execute(plan).on('workflow-complete', ({ state }) => { - t.deepEqual(state, plan.initialState); + api.execute(plan, input).on('workflow-complete', ({ state }) => { + t.deepEqual(state, input); done(); }); }); diff --git a/packages/engine-multi/test/security.test.ts b/packages/engine-multi/test/security.test.ts index 45b42634d..8c760d1f0 100644 --- a/packages/engine-multi/test/security.test.ts +++ b/packages/engine-multi/test/security.test.ts @@ -8,7 +8,7 @@ import test from 'ava'; import path from 'node:path'; import { createMockLogger } from '@openfn/logger'; -import createEngine from '../src/engine'; +import createEngine, { InternalEngine } from '../src/engine'; const logger = createMockLogger('', { level: 'debug' }); @@ -20,7 +20,7 @@ const options = { maxWorkers: 1, }; -let engine; +let engine: InternalEngine; test.before(async () => { engine = await createEngine( @@ -43,11 +43,13 @@ test.serial('parent env is hidden from sandbox', async (t) => { }); test.serial('sandbox does not share a global scope', async (t) => { + // @ts-ignore t.is(global.x, undefined); // Set a global inside the first task await engine.callWorker('setGlobalX', [9]); + // @ts-ignore // (this should not affect us outside) t.is(global.x, undefined); diff --git a/packages/engine-multi/test/worker/mock-worker.test.ts b/packages/engine-multi/test/worker/mock-worker.test.ts index 679f663a1..2947ae0c0 100644 --- a/packages/engine-multi/test/worker/mock-worker.test.ts +++ b/packages/engine-multi/test/worker/mock-worker.test.ts @@ -26,7 +26,7 @@ const workers = createPool( test('execute a mock plan inside a worker thread', async (t) => { const plan = createPlan(); - const result = await workers.exec('run', [plan]); + const result = await workers.exec('run', [plan, {}]); t.deepEqual(result, { data: { answer: 42 } }); }); @@ -35,7 +35,7 @@ test('execute a mock plan with data', async (t) => { id: 'j2', data: { input: 44 }, }); - const result = await workers.exec('run', [plan]); + const result = await workers.exec('run', [plan, {}]); t.deepEqual(result, { data: { answer: 44 } }); }); @@ -44,7 +44,7 @@ test('execute a mock plan with an expression', async (t) => { id: 'j2', expression: '() => ({ data: { answer: 46 } })', }); - const result = await workers.exec('run', [plan]); + const result = await workers.exec('run', [plan, {}]); t.deepEqual(result, { data: { answer: 46 } }); }); @@ -54,7 +54,7 @@ test('execute a mock plan with an expression which uses state', async (t) => { data: { input: 2 }, expression: '(s) => ({ data: { answer: s.data.input * 2 } })', }); - const result = await workers.exec('run', [plan]); + const result = await workers.exec('run', [plan, {}]); t.deepEqual(result, { data: { answer: 4 } }); }); @@ -68,7 +68,7 @@ test('execute a mock plan with a promise expression', async (t) => { }, 1); })`, }); - const result = await workers.exec('run', [plan]); + const result = await workers.exec('run', [plan, {}]); t.deepEqual(result, { data: { answer: 46 } }); }); @@ -78,16 +78,16 @@ test('expression state overrides data', async (t) => { data: { answer: 44 }, expression: '() => ({ data: { agent: "007" } })', }); - const result = await workers.exec('run', [plan]); + const result = await workers.exec('run', [plan, {}]); t.deepEqual(result, { data: { agent: '007' } }); }); test('write an exception to state', async (t) => { const plan = createPlan({ id: 'j2', - expression: 'ƸӜƷ', // it's a butterfly, obviously (and mmore importantly, invalid JSON) + expression: 'ƸӜƷ', // it's a butterfly, obviously (and more importantly, invalid JSON) }); - const result = await workers.exec('run', [plan]); + const result = await workers.exec('run', [plan, {}]); t.truthy(result.data); t.truthy(result.error); }); @@ -98,7 +98,7 @@ test('execute a mock plan with delay', async (t) => { id: 'j1', _delay: 50, }); - await workers.exec('run', [plan]); + await workers.exec('run', [plan, {}]); const elapsed = new Date().getTime() - start; t.log(elapsed); t.assert(elapsed > 40); @@ -108,7 +108,7 @@ test('Publish workflow-start event', async (t) => { const plan = createPlan(); plan.id = 'xx'; let didFire = false; - await workers.exec('run', [plan], { + await workers.exec('run', [plan, {}], { on: ({ type }) => { if (type === e.WORKFLOW_START) { didFire = true; @@ -122,7 +122,7 @@ test('Publish workflow-complete event with state', async (t) => { const plan = createPlan(); let didFire = false; let state; - await workers.exec('run', [plan], { + await workers.exec('run', [plan, {}], { on: ({ type, ...args }) => { if (type === e.WORKFLOW_COMPLETE) { didFire = true; @@ -142,9 +142,9 @@ test('Publish a job log event', async (t) => { }`, }); let didFire = false; - let log; + let log: any; let id; - await workers.exec('run', [plan], { + await workers.exec('run', [plan, {}], { on: ({ workflowId, type, log: _log }) => { if (type === e.LOG) { didFire = true; @@ -154,7 +154,7 @@ test('Publish a job log event', async (t) => { }, }); t.true(didFire); - t.is(id, plan.id); + t.is(id, plan.id as any); t.is(log.level, 'info'); t.is(log.name, 'JOB'); diff --git a/packages/engine-multi/test/worker/pool.test.ts b/packages/engine-multi/test/worker/pool.test.ts index ab679efe9..aa74d6a6f 100644 --- a/packages/engine-multi/test/worker/pool.test.ts +++ b/packages/engine-multi/test/worker/pool.test.ts @@ -56,7 +56,7 @@ test.serial( async (t) => { const pool = createPool(workerPath, { maxWorkers: 1 }, logger); - const ids = {}; + const ids: Record = {}; const saveProcessId = (id: string) => { if (!ids[id]) { @@ -98,8 +98,8 @@ test('Remove a worker from the pool and release it when finished', async (t) => return p.then(() => { t.is(pool._pool.length, 5); - // the first thing in the queue should be a worker - t.true(pool[0] !== false); + // the last thing in the queue should be a worker + t.true(pool._pool[4] !== false); }); }); @@ -168,7 +168,7 @@ test('throw if the task throws', async (t) => { try { await pool.exec('throw', []); - } catch (e) { + } catch (e: any) { // NB e is not an error isntance t.is(e.message, 'test_error'); } @@ -179,7 +179,7 @@ test('throw if memory limit is exceeded', async (t) => { try { await pool.exec('blowMemory', [], { memoryLimitMb: 100 }); - } catch (e) { + } catch (e: any) { t.is(e.message, 'Run exceeded maximum memory usage'); t.is(e.name, 'OOMError'); } @@ -398,13 +398,13 @@ test('events should disconnect between executions', (t) => { return new Promise(async (done) => { const pool = createPool(workerPath, { capacity: 1 }, logger); - const counts = { + const counts: Record = { a: 0, b: 0, c: 0, }; - const on = (event) => { + const on = (event: { type: string; result: number }) => { if (event.type === 'test-message') { counts[event.result] += 1; } diff --git a/packages/engine-multi/tsconfig.json b/packages/engine-multi/tsconfig.json index b3d766fc1..fda756656 100644 --- a/packages/engine-multi/tsconfig.json +++ b/packages/engine-multi/tsconfig.json @@ -1,4 +1,4 @@ { "extends": "../../tsconfig.common", - "include": ["src/**/*.ts"] + "include": ["src/**/*.ts", "test/**/*.ts"] } diff --git a/packages/lexicon/README.md b/packages/lexicon/README.md new file mode 100644 index 000000000..baa736f5f --- /dev/null +++ b/packages/lexicon/README.md @@ -0,0 +1,42 @@ +The lexicon (aka the OpenFunctionicon) is a central repositoty of key type and word definitions. + +It's a types repo and glossary at the same time. + +## Overview + +The OpenFunction stack is built on the concepts of Workflows, Runs, Jobs and Expressions (and more). Some of these terms can be used interchangable, or used differently in certain contexts. + +Here are the key concepts + +- An **Expression** is a string of Javascript (or Javascript-like code) written to be run in the CLI or Lightning. +- A **Job** is an expression plus some metadata required to run it - typically an adaptor and credentials. + The terms Job and Expression are often used interchangeably. +- A **Workflow** is a series of steps to be executed in sequence. Steps are usually Jobs (and so job and step are often used + interchangeably), but can be Triggers. +- An **Execution Plan** is a Workflow plus some options which inform how it should be executed (ie, start node, timeout). + +The term "Execution plan" is mostly used internally and not exposed to users, and is usually interchangeable with Workflow. + +You can find formal type definition of these and more in `src/core.d.ts`. + +Lightning also introduces it's own terminolgy as it is standalone application and has features that the runtime itself does not. + +In Lightning, a Step can be a Job or a Trigger. Jobs are connected by Paths (also known sometimes as Edges), which may be conditional. + +You can find lightning-specific typings in `src/lightning.d.ts` + +## Usage + +This repo only contains type definitions. It is unlikely to be of use outside the repo - although users are free to import and use it. + +To use the core types, simply import what you need: + +``` +import { ExecutionPlan } from '@openfn/lexicon +``` + +To use the lightning types, use `@openfn/lexicon/lightning` + +``` +import { Run } from '@openfn/lexicon/lightning +``` diff --git a/packages/lexicon/core.d.ts b/packages/lexicon/core.d.ts new file mode 100644 index 000000000..ec21ec13a --- /dev/null +++ b/packages/lexicon/core.d.ts @@ -0,0 +1,137 @@ +import { SanitizePolicies } from '@openfn/logger'; + +/** + * An execution plan is a portable definition of a Work Order, + * or, a unit of work to execute + */ +export type ExecutionPlan = { + id?: UUID; // this would be the run (nee attempt) id + workflow: Workflow; + options: WorkflowOptions; +}; + +/** + * A workflow is just a series of steps, executed start to finish + */ +export type Workflow = { + id?: UUID; // unique id used to track this workflow. Could be autogenerated + + // TODO: make required (worker and cli may have to generate a name) + name?: string; + + steps: Array; +}; + +/** + * A type of Step which executes code + * This is some openfn expression plus metadata (adaptor, credentials) + */ +export interface Job extends Step { + adaptor?: string; + expression: Expression; + configuration?: object | string; + state?: Omit | string; +} + +/** + * A raw openfn-js script to be executed by the runtime + * + * Can be compiled as part of a job. + * + * The expression itself has no metadata. It likely needs + * an adaptor and input state to run + */ +export type Expression = string; + +/** + * State is an object passed into a workflow and returned from a workflow + */ +export declare interface State { + // Core state props used by the runtime + configuration?: C; + data?: S; + errors?: Record; + + // Props added by common + references?: Array; + + // Props commonly used by other adaptors + index?: number; + response?: any; + query?: any; + + [other: string]: any; +} + +/** + * An operation function that runs in an Expression + */ +export declare interface Operation | State> { + (state: State): T; +} + +/** + * Options which can be set on a workflow as part of an execution plan + */ +export type WorkflowOptions = { + // TODO Both numbers in minutes maybe + timeout?: number; + stepTimeout?: number; + start?: StepId; + + // TODO not supported yet I don't think? + sanitize?: SanitizePolicies; +}; + +export type StepId = string; + +/** + * A thing to be run as part of a workflow + * (usually a job) + */ +export interface Step { + id?: StepId; + name?: string; // user-friendly name used in logging + + next?: string | Record; + previous?: StepId; +} + +/** + * Not actually keen on the node/edge semantics here + * Maybe StepLink? + */ +export type StepEdge = boolean | string | ConditionalStepEdge; + +export type ConditionalStepEdge = { + condition?: string; // Javascript expression (function body, not function) + label?: string; + disabled?: boolean; +}; + +/** + * A no-op type of Step + */ +export interface Trigger extends Step {} + +/** + * An expression which has been compiled, and so includes import and export statements + */ +export type CompiledExpression = Expression; + +export type ErrorReport = { + type: string; // The name/type of error, ie Error, TypeError + message: string; // simple human readable message + stepId: StepId; // ID of the associated job + error: Error; // the original underlying error object + + code?: string; // The error code, if any (found on node errors) + stack?: string; // not sure this is useful? + data?: any; // General store for related error information +}; + +// TODO standard shape of error object in our stack + +type UUID = string; + +export type Lazy = T | string; diff --git a/packages/lexicon/index.d.ts b/packages/lexicon/index.d.ts new file mode 100644 index 000000000..5ee3e64b9 --- /dev/null +++ b/packages/lexicon/index.d.ts @@ -0,0 +1,2 @@ +export * from './core'; +export * as lightning from './lighting'; diff --git a/packages/lexicon/index.js b/packages/lexicon/index.js new file mode 100644 index 000000000..3d2bc3b3d --- /dev/null +++ b/packages/lexicon/index.js @@ -0,0 +1 @@ +export * as lightning from './lightning'; diff --git a/packages/lexicon/lightning.d.ts b/packages/lexicon/lightning.d.ts new file mode 100644 index 000000000..dc2b653d0 --- /dev/null +++ b/packages/lexicon/lightning.d.ts @@ -0,0 +1,182 @@ +import type { SanitizePolicies } from '@openfn/logger'; +import { State } from './core'; + +export const API_VERSION: number; + +type StepId = string; + +/** + * Type definitions for Lightning and Worker interfaces + * + * This is the lightning-worker contract + * + * It is helpful to have these in the lexicon to avoid a circular dependency between lightning and the worker + * It's also kinda nice that the contract isn't in the worker itself, it's on neutral ground + */ + +/** + * An execution plan representing a Ligyhtning 'Run'. + * This represents the execution of a workflow. + * + * The data stucture that Lightning sends is converted by the Worker into + * a runtime ExecutionPlan (as found in Core) + */ +export type LightningPlan = { + id: string; + name?: string; + dataclip_id: string; + starting_node_id: string; + + triggers: Node[]; + jobs: Node[]; + edges: Edge[]; + + options?: LightningPlanOptions; +}; + +/** + * These are options that can be sent to the worker with an execution plan + * They broadly map to the Workflow Options that are fed straight into the runtime + * and saved to the plan itself + * (although at the time of writing timeout is handled by the worker, not the runtime) + */ +export type LightningPlanOptions = { + runTimeoutMs?: number; + sanitize?: SanitizePolicies; + start?: StepId; +}; + +/** + * This is a Job or Trigger node in a Lightning plan, + * AKA a Step. + * + * Sticking with the Node/Edge semantics to help distinguish the + * Lightning and runtime typings + */ +export type Node = { + id: string; + name?: string; + body?: string; + adaptor?: string; + credential?: any; + credential_id?: string; + type?: 'webhook' | 'cron'; // trigger only + state?: State; +}; + +/** + * This is a Path (or link) between two Jobs in a Plan. + * + * Sticking with the Node/Edge semantics to help distinguish the + * Lightning and runtime typings + */ +export interface Edge { + id: string; + source_job_id?: string; + source_trigger_id?: string; + target_job_id: string; + name?: string; + condition?: string; + error_path?: boolean; + errors?: any; + enabled?: boolean; +} + +export type DataClip = Record; + +export type Credential = Record; + +export type ExitReasonStrings = + | 'success' + | 'fail' + | 'crash' + | 'kill' + | 'cancel' + | 'exception'; + +export type CONNECT = 'socket:connect'; + +// client left or joined a channel +export type CHANNEL_JOIN = 'socket:channel-join'; +export type CHANNEL_LEAVE = 'socket:channel-leave'; + +// Queue Channel + +// This is the event name +export type CLAIM = 'claim'; + +// This is the payload in the message sent to lightning +export type ClaimPayload = { demand?: number }; + +// This is the response from lightning +export type ClaimReply = { runs: Array }; +export type ClaimRun = { id: string; token: string }; + +// Run channel + +export type GET_PLAN = 'fetch:plan'; +export type GET_CREDENTIAL = 'fetch:credential'; +export type GET_DATACLIP = 'fetch:dataclip'; +export type RUN_START = 'run:start'; +export type RUN_COMPLETE = 'run:complete'; +export type RUN_LOG = 'run:log'; +export type STEP_START = 'step:start'; +export type STEP_COMPLETE = 'step:complete'; + +export type ExitReason = { + reason: ExitReasonStrings; + error_message: string | null; + error_type: string | null; +}; + +export type GetPlanPayload = void; // no payload +export type GetPlanReply = LightningPlan; + +export type GetCredentialPayload = { id: string }; +// credential in-line, no wrapper, arbitrary data +export type GetCredentialReply = {}; + +export type GetDataclipPayload = { id: string }; +export type GetDataClipReply = Uint8Array; // represents a json string Run + +export type RunStartPayload = void; // no payload +export type RunStartReply = {}; // no payload + +export type RunCompletePayload = ExitReason & { + final_dataclip_id?: string; // TODO this will be removed soon +}; +export type RunCompleteReply = undefined; + +export type RunLogPayload = { + message: Array; + timestamp: string; + run_id: string; + level?: string; + source?: string; // namespace + job_id?: string; + step_id?: string; +}; +export type RunLogReply = void; + +export type StepStartPayload = { + job_id: string; + step_id: string; + run_id?: string; + input_dataclip_id?: string; +}; +export type StepStartReply = void; + +export type StepCompletePayload = ExitReason & { + run_id?: string; + job_id: string; + step_id: string; + output_dataclip?: string; + output_dataclip_id?: string; + thread_id?: string; + mem: { + job: number; + system: number; + }; + duration: number; +}; +export type StepCompleteReply = void; diff --git a/packages/lexicon/lightning.js b/packages/lexicon/lightning.js new file mode 100644 index 000000000..de59e06c6 --- /dev/null +++ b/packages/lexicon/lightning.js @@ -0,0 +1,6 @@ +/* + * The API SPEC version represented in lighting.d.ts + * Note that the major version represents the API spec version, while the minor version + * represents the lexicon implementation of it + */ +export const API_VERSION = 1.1; diff --git a/packages/lexicon/package.json b/packages/lexicon/package.json new file mode 100644 index 000000000..0a19ddd8d --- /dev/null +++ b/packages/lexicon/package.json @@ -0,0 +1,26 @@ +{ + "name": "@openfn/lexicon", + "version": "1.0.0", + "description": "Central repo of names and type definitions", + "author": "Open Function Group ", + "license": "ISC", + "type": "module", + "main": "index.js", + "exports": { + ".": { + "import": { + "default": "./index.js", + "types": "./core.d.ts" + } + }, + "./lightning": { + "import": { + "default": "./lightning.js", + "types": "./lightning.d.ts" + } + } + }, + "devDependencies": { + "@openfn/logger": "workspace:^" + } +} diff --git a/packages/lightning-mock/package.json b/packages/lightning-mock/package.json index bae475e51..150e22e5e 100644 --- a/packages/lightning-mock/package.json +++ b/packages/lightning-mock/package.json @@ -18,6 +18,7 @@ "dependencies": { "@koa/router": "^12.0.0", "@openfn/engine-multi": "workspace:*", + "@openfn/lexicon": "workspace:^", "@openfn/logger": "workspace:*", "@openfn/runtime": "workspace:*", "@types/koa-logger": "^3.1.2", diff --git a/packages/lightning-mock/src/api-dev.ts b/packages/lightning-mock/src/api-dev.ts index 2ac6bd23b..c9477781a 100644 --- a/packages/lightning-mock/src/api-dev.ts +++ b/packages/lightning-mock/src/api-dev.ts @@ -2,19 +2,17 @@ * This module sets up a bunch of dev-only APIs * These are not intended to be reflected in Lightning itself */ +import crypto from 'node:crypto'; import Router from '@koa/router'; import { Logger } from '@openfn/logger'; -import crypto from 'node:crypto'; -import { RUN_COMPLETE } from './events'; - -import { ServerState } from './server'; - import type { + LightningPlan, RunCompletePayload, - Run, - DevServer, - LightningEvents, -} from './types'; +} from '@openfn/lexicon/lightning'; + +import { ServerState } from './server'; +import { RUN_COMPLETE } from './events'; +import type { DevServer, LightningEvents } from './types'; type Api = { startRun(runId: string): void; @@ -41,7 +39,7 @@ const setupDevAPI = ( app.getDataclip = (id: string) => state.dataclips[id]; - app.enqueueRun = (run: Run, workerId = 'rte') => { + app.enqueueRun = (run: LightningPlan, workerId = 'rte') => { state.runs[run.id] = run; state.results[run.id] = { workerId, // TODO @@ -140,7 +138,7 @@ const setupRestAPI = (app: DevServer, state: ServerState, logger: Logger) => { const router = new Router(); router.post('/run', (ctx) => { - const run = ctx.request.body as Run; + const run = ctx.request.body as LightningPlan; if (!run) { ctx.response.status = 400; diff --git a/packages/lightning-mock/src/api-sockets.ts b/packages/lightning-mock/src/api-sockets.ts index 2aa013a55..816a19813 100644 --- a/packages/lightning-mock/src/api-sockets.ts +++ b/packages/lightning-mock/src/api-sockets.ts @@ -1,26 +1,6 @@ import { WebSocketServer } from 'ws'; import createLogger, { LogLevel, Logger } from '@openfn/logger'; import type { Server } from 'http'; - -import createPheonixMockSocketServer, { - DevSocket, - PhoenixEvent, - PhoenixEventStatus, -} from './socket-server'; -import { - RUN_COMPLETE, - RUN_LOG, - RUN_START, - CLAIM, - GET_PLAN, - GET_CREDENTIAL, - GET_DATACLIP, - STEP_COMPLETE, - STEP_START, -} from './events'; -import { extractRunId, stringify } from './util'; - -import type { ServerState } from './server'; import type { RunStartPayload, RunStartReply, @@ -41,7 +21,26 @@ import type { StepCompleteReply, StepStartPayload, StepStartReply, -} from './types'; +} from '@openfn/lexicon/lightning'; + +import createPheonixMockSocketServer, { + DevSocket, + PhoenixEvent, + PhoenixEventStatus, +} from './socket-server'; +import { + RUN_COMPLETE, + RUN_LOG, + RUN_START, + CLAIM, + GET_PLAN, + GET_CREDENTIAL, + GET_DATACLIP, + STEP_COMPLETE, + STEP_START, +} from './events'; +import { extractRunId, stringify } from './util'; +import type { ServerState } from './server'; // dumb cloning id // just an idea for unit tests @@ -232,10 +231,7 @@ const createSocketAPI = ( let payload = { status: 'ok' as PhoenixEventStatus, }; - if ( - !state.pending[runId] || - state.pending[runId].status !== 'started' - ) { + if (!state.pending[runId] || state.pending[runId].status !== 'started') { payload = { status: 'error', }; diff --git a/packages/lightning-mock/src/server.ts b/packages/lightning-mock/src/server.ts index 8191c23f9..c6ceac0db 100644 --- a/packages/lightning-mock/src/server.ts +++ b/packages/lightning-mock/src/server.ts @@ -10,10 +10,10 @@ import createLogger, { import createWebSocketAPI from './api-sockets'; import createDevAPI from './api-dev'; +import type { StepId } from '@openfn/lexicon'; +import type { RunLogPayload, LightningPlan } from '@openfn/lexicon/lightning'; +import type { DevServer } from './types'; -import type { RunLogPayload, Run, DevServer } from './types'; - -type StepId = string; type JobId = string; export type RunState = { @@ -29,7 +29,7 @@ export type ServerState = { credentials: Record; // list of runs by id - runs: Record; + runs: Record; // list of dataclips by id dataclips: Record; diff --git a/packages/lightning-mock/src/types.ts b/packages/lightning-mock/src/types.ts index ce9a492b3..b3613f986 100644 --- a/packages/lightning-mock/src/types.ts +++ b/packages/lightning-mock/src/types.ts @@ -1,50 +1,20 @@ import Koa from 'koa'; +import type { + LightningPlan, + DataClip, + Credential, +} from '@openfn/lexicon/lightning'; import type { ServerState } from './server'; -export type Node = { - id: string; - body?: string; - adaptor?: string; - credential?: any; // TODO tighten this up, string or object - type?: 'webhook' | 'cron'; // trigger only - state?: any; // Initial state / defaults -}; - -export interface Edge { - id: string; - source_job_id?: string; - source_trigger_id?: string; - target_job_id: string; - name?: string; - condition?: string; - error_path?: boolean; - errors?: any; -} - -// An run object returned by Lightning -export type Run = { - id: string; - dataclip_id: string; - starting_node_id: string; - - triggers: Node[]; - jobs: Node[]; - edges: Edge[]; - - options?: Record; // TODO type the expected options -}; - export type LightningEvents = 'log' | 'run-complete'; -export type DataClip = any; - export type DevServer = Koa & { state: ServerState; addCredential(id: string, cred: Credential): void; addDataclip(id: string, data: DataClip): void; - enqueueRun(run: Run): void; + enqueueRun(run: LightningPlan): void; destroy: () => void; - getRun(id: string): Run; + getRun(id: string): LightningPlan; getCredential(id: string): Credential; getDataclip(id: string): DataClip; getQueueLength(): number; @@ -57,80 +27,9 @@ export type DevServer = Koa & { runId: string, fn: (evt: any) => void ): void; - registerRun(run: Run): void; + registerRun(run: LightningPlan): void; removeAllListeners(): void; reset(): void; startRun(id: string): any; waitForResult(runId: string): Promise; }; - -/** - * These are duplicated from the worker and subject to drift! - * We cannot import them directly because it creates a circular build dependency mock <-> worker - * We cannot declare an internal private types module because the generated dts will try to import from it - * - * The list of types is small enough right now that this is just about manageable - **/ -export type ExitReasonStrings = - | 'success' - | 'fail' - | 'crash' - | 'kill' - | 'cancel' - | 'exception'; - -export type ExitReason = { - reason: ExitReasonStrings; - error_message: string | null; - error_type: string | null; -}; - -export type ClaimPayload = { demand?: number }; -export type ClaimReply = { runs: Array }; -export type ClaimRun = { id: string; token: string }; - -export type GetPlanPayload = void; // no payload -export type GetPlanReply = Run; - -export type GetCredentialPayload = { id: string }; -// credential in-line, no wrapper, arbitrary data -export type GetCredentialReply = {}; - -export type GetDataclipPayload = { id: string }; -export type GetDataClipReply = Uint8Array; // represents a json string Run - -export type RunStartPayload = void; // no payload -export type RunStartReply = {}; // no payload - -export type RunCompletePayload = ExitReason & { - final_dataclip_id?: string; // TODO this will be removed soon -}; -export type RunCompleteReply = undefined; - -export type RunLogPayload = { - message: Array; - timestamp: string; - run_id: string; - level?: string; - source?: string; // namespace - job_id?: string; - step_id?: string; -}; -export type RunLogReply = void; - -export type StepStartPayload = { - job_id: string; - step_id: string; - run_id?: string; - input_dataclip_id?: string; -}; -export type StepStartReply = void; - -export type StepCompletePayload = ExitReason & { - run_id?: string; - job_id: string; - step_id: string; - output_dataclip?: string; - output_dataclip_id?: string; -}; -export type StepCompleteReply = void; diff --git a/packages/lightning-mock/test/channels/claim.test.ts b/packages/lightning-mock/test/channels/claim.test.ts index f0c4fd6f8..54befecdb 100644 --- a/packages/lightning-mock/test/channels/claim.test.ts +++ b/packages/lightning-mock/test/channels/claim.test.ts @@ -8,8 +8,8 @@ const port = 4444; type Channel = any; -let server; -let client; +let server: any; +let client: any; test.before(async () => ({ server, client } = await setup(port))); @@ -31,7 +31,7 @@ const join = (channelName: string, params: any = {}): Promise => .receive('ok', () => { done(channel); }) - .receive('error', (err) => { + .receive('error', (err: any) => { // err will be the response message on the payload (ie, invalid_token, invalid_run_id etc) reject(new Error(err)); }); @@ -46,7 +46,7 @@ test.serial( const channel = await join('worker:queue'); // response is an array of run ids - channel.push(CLAIM).receive('ok', (response) => { + channel.push(CLAIM).receive('ok', (response: any) => { const { runs } = response; t.assert(Array.isArray(runs)); t.is(runs.length, 0); @@ -67,7 +67,7 @@ test.serial( const channel = await join('worker:queue'); // response is an array of run ids - channel.push(CLAIM).receive('ok', (response) => { + channel.push(CLAIM).receive('ok', (response: any) => { const { runs } = response; t.truthy(runs); t.is(runs.length, 1); diff --git a/packages/lightning-mock/test/channels/run.test.ts b/packages/lightning-mock/test/channels/run.test.ts index d01889c3d..59b9239f6 100644 --- a/packages/lightning-mock/test/channels/run.test.ts +++ b/packages/lightning-mock/test/channels/run.test.ts @@ -1,4 +1,10 @@ import test from 'ava'; +import type { + LightningPlan, + RunCompletePayload, + Credential, + DataClip, +} from '@openfn/lexicon/lightning'; import { setup } from '../util'; import { runs, credentials, dataclips } from '../data'; @@ -9,16 +15,14 @@ import { GET_DATACLIP, } from '../../src/events'; -import { RunCompletePayload } from '@openfn/ws-worker'; - const enc = new TextDecoder('utf-8'); type Channel = any; const port = 7777; -let server; -let client; +let server: any; +let client: any; // Set up a lightning server and a phoenix socket client before each test test.before(async () => ({ server, client } = await setup(port))); @@ -41,7 +45,7 @@ const join = (channelName: string, params: any = {}): Promise => .receive('ok', () => { done(channel); }) - .receive('error', (err) => { + .receive('error', (err: any) => { // err will be the response message on the payload (ie, invalid_token, invalid_run_id etc) reject(new Error(err)); }); @@ -72,7 +76,7 @@ test.serial('get run data through the run channel', async (t) => { server.startRun(run1.id); const channel = await join(`run:${run1.id}`, { token: 'a.b.c' }); - channel.push(GET_PLAN).receive('ok', (run) => { + channel.push(GET_PLAN).receive('ok', (run: LightningPlan) => { t.deepEqual(run, run1); done(); }); @@ -126,10 +130,12 @@ test.serial('get credential through the run channel', async (t) => { server.addCredential('a', credentials['a']); const channel = await join(`run:${run1.id}`, { token: 'a.b.c' }); - channel.push(GET_CREDENTIAL, { id: 'a' }).receive('ok', (result) => { - t.deepEqual(result, credentials['a']); - done(); - }); + channel + .push(GET_CREDENTIAL, { id: 'a' }) + .receive('ok', (result: Credential) => { + t.deepEqual(result, credentials['a']); + done(); + }); }); }); @@ -139,7 +145,7 @@ test.serial('get dataclip through the run channel', async (t) => { server.addDataclip('d', dataclips['d']); const channel = await join(`run:${run1.id}`, { token: 'a.b.c' }); - channel.push(GET_DATACLIP, { id: 'd' }).receive('ok', (result) => { + channel.push(GET_DATACLIP, { id: 'd' }).receive('ok', (result: any) => { const str = enc.decode(new Uint8Array(result)); const dataclip = JSON.parse(str); t.deepEqual(dataclip, dataclips['d']); @@ -159,7 +165,7 @@ test.serial( server.startRun(run1.id); server.addDataclip('result', result); - server.waitForResult(run1.id).then((dataclip) => { + server.waitForResult(run1.id).then((dataclip: DataClip) => { t.deepEqual(result, dataclip); done(); }); diff --git a/packages/lightning-mock/test/events/log.test.ts b/packages/lightning-mock/test/events/log.test.ts index 99b326011..f57d020b4 100644 --- a/packages/lightning-mock/test/events/log.test.ts +++ b/packages/lightning-mock/test/events/log.test.ts @@ -3,8 +3,8 @@ import { RUN_LOG } from '../../src/events'; import { join, setup, createRun } from '../util'; -let server; -let client; +let server: any; +let client: any; const port = 5501; @@ -26,7 +26,7 @@ test.serial('acknowledge valid message (run log)', async (t) => { const channel = await join(client, run.id); - channel.push(RUN_LOG, event).receive('ok', (evt) => { + channel.push(RUN_LOG, event).receive('ok', () => { t.pass('event acknowledged'); done(); }); @@ -50,7 +50,7 @@ test.serial('acknowledge valid message (job log)', async (t) => { const channel = await join(client, run.id); - channel.push(RUN_LOG, event).receive('ok', (evt) => { + channel.push(RUN_LOG, event).receive('ok', () => { t.pass('event acknowledged'); done(); }); diff --git a/packages/lightning-mock/test/events/run-complete.test.ts b/packages/lightning-mock/test/events/run-complete.test.ts index 42ef2f878..9f00fb575 100644 --- a/packages/lightning-mock/test/events/run-complete.test.ts +++ b/packages/lightning-mock/test/events/run-complete.test.ts @@ -2,8 +2,8 @@ import test from 'ava'; import { join, setup, createRun } from '../util'; import { RUN_COMPLETE } from '../../src/events'; -let server; -let client; +let server: any; +let client: any; const port = 5501; diff --git a/packages/lightning-mock/test/events/run-start.test.ts b/packages/lightning-mock/test/events/run-start.test.ts index 30781d7e4..51419f0ab 100644 --- a/packages/lightning-mock/test/events/run-start.test.ts +++ b/packages/lightning-mock/test/events/run-start.test.ts @@ -2,8 +2,8 @@ import test from 'ava'; import { join, setup, createRun } from '../util'; import { RUN_START } from '../../src/events'; -let server; -let client; +let server: any; +let client: any; const port = 5500; diff --git a/packages/lightning-mock/test/events/step-complete.test.ts b/packages/lightning-mock/test/events/step-complete.test.ts index 5422b0671..a23d48d62 100644 --- a/packages/lightning-mock/test/events/step-complete.test.ts +++ b/packages/lightning-mock/test/events/step-complete.test.ts @@ -3,8 +3,8 @@ import { STEP_COMPLETE } from '../../src/events'; import { join, setup, createRun } from '../util'; -let server; -let client; +let server: any; +let client: any; const port = 5501; @@ -24,7 +24,7 @@ test.serial('acknowledge valid message', async (t) => { const channel = await join(client, run.id); - channel.push(STEP_COMPLETE, event).receive('ok', (evt) => { + channel.push(STEP_COMPLETE, event).receive('ok', () => { t.pass('event acknowledged'); done(); }); @@ -88,7 +88,7 @@ test.serial('error if no output dataclip', async (t) => { }; const channel = await join(client, run.id); - channel.push(STEP_COMPLETE, event).receive('error', (e) => { + channel.push(STEP_COMPLETE, event).receive('error', (e: any) => { t.is(e.toString(), 'no output_dataclip'); done(); }); @@ -108,7 +108,7 @@ test.serial('error if no output dataclip_id', async (t) => { }; const channel = await join(client, run.id); - channel.push(STEP_COMPLETE, event).receive('error', (e) => { + channel.push(STEP_COMPLETE, event).receive('error', (e: any) => { t.is(e.toString(), 'no output_dataclip_id'); done(); }); diff --git a/packages/lightning-mock/test/events/step-start.test.ts b/packages/lightning-mock/test/events/step-start.test.ts index f870ba9b7..3f1924905 100644 --- a/packages/lightning-mock/test/events/step-start.test.ts +++ b/packages/lightning-mock/test/events/step-start.test.ts @@ -2,8 +2,8 @@ import test from 'ava'; import { STEP_START } from '../../src/events'; import { join, setup, createRun } from '../util'; -let server; -let client; +let server: any; +let client: any; const port = 5501; diff --git a/packages/lightning-mock/test/server.test.ts b/packages/lightning-mock/test/server.test.ts index ee73bd3b5..5b0e9a3c1 100644 --- a/packages/lightning-mock/test/server.test.ts +++ b/packages/lightning-mock/test/server.test.ts @@ -2,12 +2,12 @@ import test from 'ava'; import { Socket } from 'phoenix'; import { WebSocket } from 'ws'; +import type { LightningPlan } from '@openfn/lexicon/lightning'; -import { createRun, setup } from './util'; -import type { Run } from '../src/types'; +import { setup } from './util'; -let server; -let client; +let server: any; +let client: any; const port = 3333; @@ -22,7 +22,7 @@ test.serial('should setup an run at /POST /run', async (t) => { t.is(Object.keys(state.runs).length, 0); t.is(Object.keys(state.runs).length, 0); - const run: Run = { + const run: LightningPlan = { id: 'a', dataclip_id: 'a', starting_node_id: 'j', @@ -82,10 +82,10 @@ test.serial('reject ws connections without a token', (t) => { }); test.serial('respond to channel join requests', (t) => { - return new Promise(async (done, reject) => { + return new Promise(async (done) => { const channel = client.channel('x', {}); - channel.join().receive('ok', (res) => { + channel.join().receive('ok', (res: any) => { t.is(res, 'ok'); done(); }); diff --git a/packages/lightning-mock/test/socket-server.test.ts b/packages/lightning-mock/test/socket-server.test.ts index d0fc34e0c..c21dd6a9f 100644 --- a/packages/lightning-mock/test/socket-server.test.ts +++ b/packages/lightning-mock/test/socket-server.test.ts @@ -4,9 +4,9 @@ import { Socket } from 'phoenix'; import { WebSocket } from 'ws'; import createSocketServer from '../src/socket-server'; -let socket; -let server; -let messages; +let socket: any; +let server: any; +let messages: any; const wait = (duration = 10) => new Promise((resolve) => { @@ -19,6 +19,7 @@ test.beforeEach( messages = []; // @ts-ignore I don't care about missing server options here server = createSocketServer({ + // @ts-ignore state: { events: new EventEmitter(), }, @@ -48,13 +49,13 @@ test.serial('respond to connection join requests', async (t) => { channel .join() - .receive('ok', (resp) => { + .receive('ok', (resp: any) => { t.is(resp, 'ok'); channel.push('hello'); resolve(); }) - .receive('error', (e) => { + .receive('error', (e: any) => { console.log(e); }); }); @@ -64,7 +65,7 @@ test.serial('send a message', async (t) => { return new Promise((resolve) => { const channel = socket.channel('x', {}); - server.listenToChannel('x', (_ws, { payload, event }) => { + server.listenToChannel('x', (_ws: any, { payload, event }: any) => { t.is(event, 'hello'); t.deepEqual(payload, { x: 1 }); diff --git a/packages/lightning-mock/test/util.ts b/packages/lightning-mock/test/util.ts index 937ebf369..cabe11b1f 100644 --- a/packages/lightning-mock/test/util.ts +++ b/packages/lightning-mock/test/util.ts @@ -33,7 +33,7 @@ export const join = (client: any, runId: string): Promise => .receive('ok', () => { done(channel); }) - .receive('error', (err) => { + .receive('error', (err: any) => { reject(new Error(err)); }); }); diff --git a/packages/lightning-mock/tsconfig.json b/packages/lightning-mock/tsconfig.json index ba1452256..8906c56a5 100644 --- a/packages/lightning-mock/tsconfig.json +++ b/packages/lightning-mock/tsconfig.json @@ -1,6 +1,6 @@ { "extends": "../../tsconfig.common", - "include": ["src/**/*.ts"], + "include": ["src/**/*.ts", "test/**/*.ts"], "compilerOptions": { "module": "ESNext" } diff --git a/packages/runtime/README.md b/packages/runtime/README.md index 52723b6cd..7ae05271b 100644 --- a/packages/runtime/README.md +++ b/packages/runtime/README.md @@ -54,29 +54,35 @@ It is expected that that long-running runtimes will have some kind of purge func ## Execution Plans -The runtime can accept an Execution Plan (or workflow) as an input. +The runtime can accept an Execution Plan (or workflow) as an input. This defines a graph of of jobs (expressions) to run in sequence. Each node in the graph is a job, and contains a set of edges which tell the runtime what to execute next. The runtime will return the final state when there is nothing left to execute. -A workflow looks like this: -``` +An execution plan looks like this: + +```js { - start: 'a', - jobs: [{ - id: 'a', - expression: "source or path", - state: { /* default state */ }, - configuration: { /* credentials */ }, - next: { - 'b': true, // edge to another job - 'c': { condition: "state.data.age > 18", // conditional edge to another job - } - adaptor: "common", // it's complicated - }] + workflow: { + jobs: [{ + id: 'a', + expression: "source or path", + state: { /* default state */ }, + configuration: { /* credentials */ }, + next: { + 'b': true, // edge to another job + 'c': { condition: "state.data.age > 18", // conditional edge to another job + } + adaptor: "common", // it's complicated + }] + }, + options: { + start: 'a', + } } ``` + State and start node can be passed to the runtime as inputs. If no start node is provided, the first job in the jobs array will run first. @@ -88,9 +94,10 @@ The runtime itself does not use the `adaptor` key, as it expects jobs to be comp See src/types.ts for a full definition of an execution plan, and `test/runtime.test.ts` for examples. At the time of writing, exectuion plans have some restrictions: -* Jobs execute in series (but parallisation can be simulated) -* A job can only have one input node (`a -> z <- b` is not allowed) -* Jobs cannot have circular references (`a -> b -> a` is not allowed) + +- Jobs execute in series (but parallisation can be simulated) +- A job can only have one input node (`a -> z <- b` is not allowed) +- Jobs cannot have circular references (`a -> b -> a` is not allowed) Support for more complex plans will be introduced later. @@ -149,6 +156,7 @@ When a job calls `import` to import a dependent module, the runtime must resolve It does this through a `linker` function, which takes as arguments a package specifier and `vm` context, and an options object. It will load the module using a dynamic `import` and proxy the interface through a `vm.SyntheticModules`, usng the experimental `vm.SourceTextModule` API. Modules can be loaded from: + - An explicit path (pass as a dictionary of name: path strings into the options) - The current working repo (see below) - The current working node_modules (should we somehow disallow this?) diff --git a/packages/runtime/package.json b/packages/runtime/package.json index f4c1be126..d39198a72 100644 --- a/packages/runtime/package.json +++ b/packages/runtime/package.json @@ -27,6 +27,7 @@ "license": "ISC", "devDependencies": { "@openfn/language-common": "2.0.0-rc3", + "@openfn/lexicon": "workspace:^", "@types/mock-fs": "^4.13.1", "@types/node": "^18.15.13", "@types/semver": "^7.5.0", diff --git a/packages/runtime/src/execute/compile-plan.ts b/packages/runtime/src/execute/compile-plan.ts index f5c7291c0..ccb692f0c 100644 --- a/packages/runtime/src/execute/compile-plan.ts +++ b/packages/runtime/src/execute/compile-plan.ts @@ -1,17 +1,16 @@ import type { + CompiledEdge, CompiledExecutionPlan, - CompiledJobEdge, - CompiledJobNode, - ExecutionPlan, - JobEdge, + CompiledStep, } from '../types'; import compileFunction from '../modules/compile-function'; import { conditionContext, Context } from './context'; +import { ExecutionPlan, Job, StepEdge, Workflow } from '@openfn/lexicon'; const compileEdges = ( from: string, - edges: string | Record, + edges: string | Record, context: Context ) => { if (typeof edges === 'string') { @@ -19,7 +18,7 @@ const compileEdges = ( } const errs = []; - const result = {} as Record; + const result = {} as Record; for (const edgeId in edges) { try { const edge = edges[edgeId]; @@ -34,7 +33,7 @@ const compileEdges = ( if (typeof edge.condition === 'string') { (newEdge as any).condition = compileFunction(edge.condition, context); } - result[edgeId] = newEdge as CompiledJobEdge; + result[edgeId] = newEdge as CompiledEdge; } } catch (e: any) { errs.push( @@ -55,8 +54,8 @@ const compileEdges = ( // find the upstream job for a given job // Inefficient but fine for now (note that validation does something similar) // Note that right now we only support one upstream job -const findUpstream = (plan: ExecutionPlan, id: string) => { - for (const job of plan.jobs) { +const findUpstream = (workflow: Workflow, id: string) => { + for (const job of workflow.steps) { if (job.next) if (typeof job.next === 'string') { if (job.next === id) { @@ -69,7 +68,9 @@ const findUpstream = (plan: ExecutionPlan, id: string) => { }; export default (plan: ExecutionPlan) => { + const { workflow, options = {} } = plan; let autoJobId = 0; + const generateJobId = () => `job-${++autoJobId}`; const context = conditionContext(); @@ -89,42 +90,50 @@ export default (plan: ExecutionPlan) => { } }; - // ensure ids before we start - for (const job of plan.jobs) { + for (const job of workflow.steps) { if (!job.id) { job.id = generateJobId(); } } - const newPlan = { - jobs: {}, - start: plan.start, - initialState: plan.initialState, - } as Pick; - - for (const job of plan.jobs) { - const jobId = job.id!; - if (!newPlan.start) { - // Default the start job to the first - newPlan.start = jobId; - } - const newJob: CompiledJobNode = { - id: jobId, - expression: job.expression, // TODO we should compile this here + const newPlan: CompiledExecutionPlan = { + workflow: { + steps: {}, + }, + options: { + ...options, + start: options.start ?? workflow.steps[0]?.id!, + }, + }; + + const maybeAssign = (a: any, b: any, keys: Array) => { + keys.forEach((key) => { + if (a.hasOwnProperty(key)) { + b[key] = a[key]; + } + }); + }; + + for (const step of workflow.steps) { + const stepId = step.id!; + const newStep: CompiledStep = { + id: stepId, }; - if (job.state) { - newJob.state = job.state; - } - if (job.configuration) { - newJob.configuration = job.configuration; - } - if (job.next) { + + maybeAssign(step, newStep, [ + 'expression', + 'state', + 'configuration', + 'name', + ]); + + if (step.next) { trapErrors(() => { - newJob.next = compileEdges(jobId, job.next!, context); + newStep.next = compileEdges(stepId, step.next!, context); }); } - newJob.previous = findUpstream(plan, jobId); - newPlan.jobs[jobId] = newJob; + newStep.previous = findUpstream(workflow, stepId); + newPlan.workflow.steps[stepId] = newStep; } if (errs.length) { diff --git a/packages/runtime/src/execute/context.ts b/packages/runtime/src/execute/context.ts index 585567199..afe45cc52 100644 --- a/packages/runtime/src/execute/context.ts +++ b/packages/runtime/src/execute/context.ts @@ -1,5 +1,5 @@ import vm from 'node:vm'; -import type { State } from '../types'; +import type { State } from '@openfn/lexicon'; import type { Options } from '../runtime'; const freezeAll = ( @@ -15,7 +15,10 @@ const freezeAll = ( // Build a safe and helpful execution context // This will be shared by all jobs -export default (state: State, options: Pick) => { +export default ( + state: State, + options: Pick +) => { const logger = options.jobLogger ?? console; const globals = options.globals || {}; const context = vm.createContext( diff --git a/packages/runtime/src/execute/expression.ts b/packages/runtime/src/execute/expression.ts index 324f611bb..f2f4bc20a 100644 --- a/packages/runtime/src/execute/expression.ts +++ b/packages/runtime/src/execute/expression.ts @@ -1,8 +1,9 @@ import { printDuration, Logger } from '@openfn/logger'; import stringify from 'fast-safe-stringify'; +import type { Operation, State } from '@openfn/lexicon'; + import loadModule from '../modules/module-loader'; -import { Operation, JobModule, State, ExecutionContext } from '../types'; -import { Options, TIMEOUT } from '../runtime'; +import { Options, DEFAULT_TIMEOUT_MS } from '../runtime'; import buildContext, { Context } from './context'; import defaultExecute from '../util/execute'; import clone from '../util/clone'; @@ -16,25 +17,27 @@ import { assertRuntimeError, assertSecurityKill, } from '../errors'; +import type { JobModule, ExecutionContext } from '../types'; export type ExecutionErrorWrapper = { state: any; error: any; }; +// TODO don't send the whole context because it's a bit confusing - just the options maybe? export default ( ctx: ExecutionContext, expression: string | Operation[], - initialState: State + input: State ) => new Promise(async (resolve, reject) => { let duration = Date.now(); - const { logger, opts = {} } = ctx; + const { logger, plan, opts = {} } = ctx; try { - const timeout = opts.timeout ?? TIMEOUT; + const timeout = plan.options?.timeout ?? DEFAULT_TIMEOUT_MS; // Setup an execution context - const context = buildContext(initialState, opts); + const context = buildContext(input, opts); const { operations, execute } = await prepareJob( expression, @@ -61,19 +64,27 @@ export default ( } // Note that any errors will be trapped by the containing Job - const result = await reducer(initialState); + const result = await reducer(input); clearTimeout(tid); logger.debug('Expression complete!'); duration = Date.now() - duration; - const finalState = prepareFinalState(opts, result, logger); + const finalState = prepareFinalState( + result, + logger, + opts.statePropsToRemove + ); // return the final state resolve(finalState); } catch (e: any) { // whatever initial state looks like now, clean it and report it back - const finalState = prepareFinalState(opts, initialState, logger); + const finalState = prepareFinalState( + input, + logger, + opts.statePropsToRemove + ); duration = Date.now() - duration; let finalError; try { @@ -106,7 +117,7 @@ export const wrapOperation = ( // TODO should we warn if an operation does not return state? // the trick is saying WHICH operation without source mapping const duration = printDuration(new Date().getTime() - start); - logger.info(`Operation ${name} complete in ${duration}`); + logger.debug(`Operation ${name} complete in ${duration}`); return result; }; }; @@ -135,43 +146,27 @@ const prepareJob = async ( } }; -const assignKeys = ( - source: Record, - target: Record, - keys: string[] -) => { - keys.forEach((k) => { - if (source.hasOwnProperty(k)) { - target[k] = source[k]; - } - }); - return target; -}; - // TODO this is suboptimal and may be slow on large objects // (especially as the result get stringified again downstream) -const prepareFinalState = (opts: Options, state: any, logger: Logger) => { +const prepareFinalState = ( + state: any, + logger: Logger, + statePropsToRemove?: string[] +) => { if (state) { - let statePropsToRemove; - if (opts.hasOwnProperty('statePropsToRemove')) { - ({ statePropsToRemove } = opts); - } else { + if (!statePropsToRemove) { // As a strict default, remove the configuration key // tbh this should happen higher up in the stack but it causes havoc in unit testing statePropsToRemove = ['configuration']; } - if (statePropsToRemove && statePropsToRemove.forEach) { - statePropsToRemove.forEach((prop) => { - if (state.hasOwnProperty(prop)) { - delete state[prop]; - logger.debug(`Removed ${prop} from final state`); - } - }); - } - if (opts.strict) { - state = assignKeys(state, {}, ['data', 'error', 'references']); - } + statePropsToRemove.forEach((prop) => { + if (state.hasOwnProperty(prop)) { + delete state[prop]; + logger.debug(`Removed ${prop} from final state`); + } + }); + const cleanState = stringify(state); return JSON.parse(cleanState); } diff --git a/packages/runtime/src/execute/plan.ts b/packages/runtime/src/execute/plan.ts index b4085d2e3..ee32431fa 100644 --- a/packages/runtime/src/execute/plan.ts +++ b/packages/runtime/src/execute/plan.ts @@ -1,19 +1,22 @@ import type { Logger } from '@openfn/logger'; -import executeJob from './job'; +import type { ExecutionPlan, State, Lazy } from '@openfn/lexicon'; + +import executeStep from './step'; import compilePlan from './compile-plan'; -import type { ExecutionPlan } from '../types'; import type { Options } from '../runtime'; import validatePlan from '../util/validate-plan'; import createErrorReporter from '../util/log-error'; import { NOTIFY_STATE_LOAD } from '../events'; +import { CompiledExecutionPlan } from '../types'; const executePlan = async ( plan: ExecutionPlan, + input: Lazy | undefined, opts: Options, logger: Logger ) => { - let compiledPlan; + let compiledPlan: CompiledExecutionPlan; try { validatePlan(plan); compiledPlan = compilePlan(plan); @@ -23,8 +26,11 @@ const executePlan = async ( logger.error('Aborting'); throw e; } + logger.info(`Executing ${plan.workflow.name || plan.id}`); + + const { workflow, options } = compiledPlan; - let queue: string[] = [opts.start || compiledPlan.start]; + let queue: string[] = [options.start]; const ctx = { plan: compiledPlan, @@ -34,35 +40,31 @@ const executePlan = async ( notify: opts.callbacks?.notify ?? (() => {}), }; - type State = any; // record of state returned by every job const stateHistory: Record = {}; + // Record of state on lead nodes (nodes with no next) const leaves: Record = {}; - let { initialState } = compiledPlan; - if (typeof initialState === 'string') { - const id = initialState; + if (typeof input === 'string') { + const id = input; const startTime = Date.now(); logger.debug(`fetching intial state ${id}`); - initialState = await opts.callbacks?.resolveState?.(id); - + input = await opts.callbacks?.resolveState?.(id); const duration = Date.now() - startTime; opts.callbacks?.notify?.(NOTIFY_STATE_LOAD, { duration, jobId: id }); logger.success(`loaded state for ${id} in ${duration}ms`); - - // TODO catch and re-throw } // Right now this executes in series, even if jobs are parallelised while (queue.length) { const next = queue.shift()!; - const job = compiledPlan.jobs[next]; + const job = workflow.steps[next]; - const prevState = stateHistory[job.previous || ''] ?? initialState; + const prevState = stateHistory[job.previous || ''] ?? input; - const result = await executeJob(ctx, job, prevState); + const result = await executeStep(ctx, job, prevState); stateHistory[next] = result.state; if (!result.next.length) { @@ -78,7 +80,8 @@ const executePlan = async ( if (Object.keys(leaves).length > 1) { return leaves; } - // Return a single value + + // Otherwise return a single value return Object.values(leaves)[0]; }; diff --git a/packages/runtime/src/execute/job.ts b/packages/runtime/src/execute/step.ts similarity index 69% rename from packages/runtime/src/execute/job.ts rename to packages/runtime/src/execute/step.ts index b5880a59d..47ee18168 100644 --- a/packages/runtime/src/execute/job.ts +++ b/packages/runtime/src/execute/step.ts @@ -1,16 +1,12 @@ // TODO hmm. I have a horrible feeling that the callbacks should go here // at least the resolvesrs -import executeExpression, { ExecutionErrorWrapper } from './expression'; +import type { Job, State, StepId } from '@openfn/lexicon'; +import type { Logger } from '@openfn/logger'; +import executeExpression, { ExecutionErrorWrapper } from './expression'; import clone from '../util/clone'; import assembleState from '../util/assemble-state'; -import type { - CompiledJobNode, - ExecutionContext, - JobNodeID, - State, -} from '../types'; -import { Logger } from '@openfn/logger'; +import type { CompiledStep, ExecutionContext } from '../types'; import { EdgeConditionError } from '../errors'; import { NOTIFY_INIT_COMPLETE, @@ -21,7 +17,7 @@ import { } from '../events'; const loadCredentials = async ( - job: CompiledJobNode, + job: Job, resolver: (id: string) => Promise ) => { if (typeof job.configuration === 'string') { @@ -32,10 +28,7 @@ const loadCredentials = async ( return job.configuration; }; -const loadState = async ( - job: CompiledJobNode, - resolver: (id: string) => Promise -) => { +const loadState = async (job: Job, resolver: (id: string) => Promise) => { if (typeof job.state === 'string') { // TODO let's log and notify something useful if we're lazy loading // TODO throw a controlled error if there's no resolver @@ -44,7 +37,7 @@ const loadState = async ( return job.state; }; -const calculateNext = (job: CompiledJobNode, result: any, logger: Logger) => { +const calculateNext = (job: CompiledStep, result: any, logger: Logger) => { const next: string[] = []; if (job.next) { for (const nextJobId in job.next) { @@ -82,50 +75,54 @@ const calculateNext = (job: CompiledJobNode, result: any, logger: Logger) => { // The job handler is responsible for preparing the job // and working out where to go next // it'll resolve credentials and state and notify how long init took -const executeJob = async ( +const executeStep = async ( ctx: ExecutionContext, - job: CompiledJobNode, - initialState: State = {} -): Promise<{ next: JobNodeID[]; state: any }> => { + step: CompiledStep, + input: State = {} +): Promise<{ next: StepId[]; state: any }> => { const { opts, notify, logger, report } = ctx; const duration = Date.now(); - const jobId = job.id; + const stepId = step.id; + + // The expression SHOULD return state, but COULD return anything + let result: any = input; + let next: string[] = []; + let didError = false; + + if (step.expression) { + const job = step as Job; + const jobId = job.id!; + const jobName = job.name || job.id; - notify(NOTIFY_INIT_START, { jobId }); + // The notify events only apply to jobs - not steps - so names don't need to be changed here + notify(NOTIFY_INIT_START, { jobId }); - // lazy load config and state - const configuration = await loadCredentials( - job, - opts.callbacks?.resolveCredential! // cheat - we need to handle the error case here - ); + // lazy load config and state + const configuration = await loadCredentials( + job, + opts.callbacks?.resolveCredential! // cheat - we need to handle the error case here + ); - const globals = await loadState( - job, - opts.callbacks?.resolveState! // and here - ); + const globals = await loadState( + job, + opts.callbacks?.resolveState! // and here + ); - const state = assembleState( - clone(initialState), - configuration, - globals, - opts.strict - ); + const state = assembleState(clone(input), configuration, globals); - notify(NOTIFY_INIT_COMPLETE, { jobId, duration: Date.now() - duration }); + notify(NOTIFY_INIT_COMPLETE, { + jobId, + duration: Date.now() - duration, + }); - // We should by this point have validated the plan, so the job MUST exist + // We should by this point have validated the plan, so the step MUST exist - const timerId = `job-${jobId}`; - logger.timer(timerId); - logger.always('Starting job', jobId); + const timerId = `step-${jobId}`; + logger.timer(timerId); + logger.info(`Starting step ${jobName}`); - // The expression SHOULD return state, but COULD return anything - let result: any = state; - let next: string[] = []; - let didError = false; - if (job.expression) { const startTime = Date.now(); try { // TODO include the upstream job? @@ -140,10 +137,10 @@ const executeJob = async ( result = state; const duration = logger.timer(timerId); - logger.error(`Failed job ${jobId} after ${duration}`); + logger.error(`Failed step ${jobName} after ${duration}`); report(state, jobId, error); - next = calculateNext(job, result, logger); + next = calculateNext(step, result, logger); notify(NOTIFY_JOB_ERROR, { duration: Date.now() - startTime, @@ -165,7 +162,7 @@ const executeJob = async ( if (!didError) { const humanDuration = logger.timer(timerId); - logger.success(`Completed job ${jobId} in ${humanDuration}`); + logger.success(`Completed step ${jobName} in ${humanDuration}`); // Take a memory snapshot // IMPORTANT: this runs _after_ the state object has been serialized @@ -179,10 +176,10 @@ const executeJob = async ( const humanJobMemory = Math.round(jobMemory / 1024 / 1024); const humanSystemMemory = Math.round(systemMemory / 1024 / 1024); logger.debug( - `Final memory usage: [job ${humanJobMemory}mb] [system ${humanSystemMemory}mb]` + `Final memory usage: [step ${humanJobMemory}mb] [system ${humanSystemMemory}mb]` ); - next = calculateNext(job, result, logger); + next = calculateNext(step, result, logger); notify(NOTIFY_JOB_COMPLETE, { duration: Date.now() - duration, state: result, @@ -196,16 +193,16 @@ const executeJob = async ( } } else { // calculate next for trigger nodes - next = calculateNext(job, result, logger); + next = calculateNext(step, result, logger); } if (next.length && !didError && !result) { logger.warn( - `WARNING: job ${jobId} did not return a state object. This may cause downstream jobs to fail.` + `WARNING: step ${stepId} did not return a state object. This may cause downstream jobs to fail.` ); } return { next, state: result }; }; -export default executeJob; +export default executeStep; diff --git a/packages/runtime/src/modules/module-loader.ts b/packages/runtime/src/modules/module-loader.ts index 5e87653bb..fa239b319 100644 --- a/packages/runtime/src/modules/module-loader.ts +++ b/packages/runtime/src/modules/module-loader.ts @@ -4,7 +4,7 @@ import vm, { Context } from './experimental-vm'; import mainLinker, { Linker, LinkerOptions } from './linker'; -import type { Operation } from '../types'; +import type { Operation } from '@openfn/lexicon'; import type { Logger } from '@openfn/logger'; type Options = LinkerOptions & { diff --git a/packages/runtime/src/runtime.ts b/packages/runtime/src/runtime.ts index 6d91ea408..4c8e3a892 100644 --- a/packages/runtime/src/runtime.ts +++ b/packages/runtime/src/runtime.ts @@ -1,27 +1,16 @@ import { createMockLogger, Logger } from '@openfn/logger'; - -import type { - Operation, - ExecutionPlan, - State, - JobNodeID, - ExecutionCallbacks, -} from './types'; +import type { ExecutionPlan, State } from '@openfn/lexicon'; +import type { ExecutionCallbacks } from './types'; import type { LinkerOptions } from './modules/linker'; import executePlan from './execute/plan'; -import clone from './util/clone'; -import parseRegex from './util/regex'; +import { defaultState, parseRegex, clone } from './util/index'; -export const TIMEOUT = 5 * 60 * 1000; // 5 minutes +export const DEFAULT_TIMEOUT_MS = 5 * 60 * 1000; // 5 minutes export type Options = { - start?: JobNodeID; logger?: Logger; jobLogger?: Logger; - timeout?: number; // this is timeout used per job, not per workflow - strict?: boolean; // Be strict about handling of state returned from jobs - // Treat state as immutable (likely to break in legacy jobs) immutableState?: boolean; @@ -35,9 +24,9 @@ export type Options = { callbacks?: ExecutionCallbacks; // inject globals into the environment + // TODO leaving this here for now, but maybe its actually on the xplan? globals?: any; - // all listed props will be removed from the state object at the end of a job statePropsToRemove?: string[]; }; @@ -47,27 +36,46 @@ type RawOptions = Omit & { }; }; -const defaultState = { data: {}, configuration: {} }; - // Log nothing by default const defaultLogger = createMockLogger(); -// TODO doesn't really make sense to pass in a state object to an xplan, -// so maybe state becomes an option in the opts object +const loadPlanFromString = (expression: string, logger: Logger) => { + const plan: ExecutionPlan = { + workflow: { + steps: [ + { + expression, + }, + ], + }, + options: {}, + }; + + logger.debug('Generated execution plan for incoming expression'); + logger.debug(plan); + + return plan; +}; + const run = ( - expressionOrXPlan: string | Operation[] | ExecutionPlan, - state?: State, + xplan: Partial | string, + input?: State, opts: RawOptions = {} ) => { const logger = opts.logger || defaultLogger; - // Strict state handling by default - if (!opts.hasOwnProperty('strict')) { - opts.strict = true; + if (typeof xplan === 'string') { + xplan = loadPlanFromString(xplan, logger); } - if (!opts.hasOwnProperty('statePropsToRemove')) { - opts.statePropsToRemove = ['configuration']; + + if (!xplan.options) { + xplan.options = {}; } + + if (!input) { + input = clone(defaultState); + } + if (opts.linker?.whitelist) { opts.linker.whitelist = opts.linker.whitelist.map((w) => { if (typeof w === 'string') { @@ -76,36 +84,7 @@ const run = ( return w; }); } - - // TODO the plan doesn't have an id, should it be given one? - // Ditto the jobs? - let plan: ExecutionPlan; - if ( - typeof expressionOrXPlan == 'string' || - !expressionOrXPlan.hasOwnProperty('jobs') - ) { - // Build an execution plan for an incoming expression - plan = { - jobs: [ - { - expression: expressionOrXPlan, - }, - ], - } as ExecutionPlan; - logger.debug('Generated execution plan for incoming expression'); - // TODO how do we sanitise state.config? - logger.debug(plan); - } else { - plan = expressionOrXPlan as ExecutionPlan; - } - - if (state) { - plan.initialState = clone(state); - } else if (!plan.initialState) { - plan.initialState = defaultState; - } - - return executePlan(plan, opts as Options, logger); + return executePlan(xplan as ExecutionPlan, input, opts as Options, logger); }; export default run; diff --git a/packages/runtime/src/types.ts b/packages/runtime/src/types.ts index 296907941..a869cc73a 100644 --- a/packages/runtime/src/types.ts +++ b/packages/runtime/src/types.ts @@ -1,4 +1,4 @@ -// TMP just thinking through things +import { Operation, StepId, WorkflowOptions, Step } from '@openfn/lexicon'; import { Logger } from '@openfn/logger'; import { Options } from './runtime'; @@ -12,100 +12,29 @@ import { NOTIFY_STATE_LOAD, } from './events'; -// I dont think this is useufl? We can just use error.name of the error object -export type ErrorTypes = - | 'AdaptorNotFound' // probably a CLI validation thing - | 'PackageNotFound' // Linker failed to load a dependency - | 'ExpressionTimeout' // An expression (job) failed to return before the timeout - | 'AdaptorException' // Bubbled out of adaptor code - | 'RuntimeException'; // Caused by an exception in a job. JobException? What about "expected" errors from adaptors? - -export type ErrorReport = { - type: string; // The name/type of error, ie Error, TypeError - message: string; // simple human readable message - jobId: JobNodeID; // ID of the associated job - error: Error; // the original underlying error object - - code?: string; // The error code, if any (found on node errors) - stack?: string; // not sure this is useful? - data?: any; // General store for related error information -}; - -export declare interface State { - configuration?: C; - state?: S; - references?: Array; - index?: number; - - // New error capture object - // Synonyms: exceptions, problems, issues, err, failures - errors?: Record; - - // Legacy error property from old platform - // Adaptors may use this? - error?: any[]; - - // Note that other properties written to state may be lost between jobs - [other: string]: any; -} - -export declare interface Operation | State> { - (state: State): T; -} - -export type ExecutionPlan = { - id?: string; // UUID for this plan - jobs: JobNode[]; - start?: JobNodeID; - initialState?: State | string; -}; - -export type JobNode = { - id?: JobNodeID; - - // The runtime itself will ignore the adaptor flag - // The adaptor import should be compiled in by the compiler, and dependency managed by the runtime manager - adaptor?: string; - - expression?: string | Operation[]; // the code we actually want to execute. Can be a path. - - configuration?: object | string; // credential object - - // TODO strings aren't actually suppored here yet - state?: Omit | string; // default state (globals) - - next?: string | Record; - previous?: JobNodeID; -}; - -export type JobEdge = - | boolean - | string - | { - condition?: string; // Javascript expression (function body, not function) - label?: string; - disabled?: boolean; - }; - -export type JobNodeID = string; - -export type CompiledJobEdge = +export type CompiledEdge = | boolean | { condition?: Function; disabled?: boolean; }; -export type CompiledJobNode = Omit & { - id: JobNodeID; - next?: Record; +export type CompiledStep = Omit & { + id: StepId; + next?: Record; + + [other: string]: any; }; +export type Lazy = string | T; + export type CompiledExecutionPlan = { - id?: string; - start: JobNodeID; - jobs: Record; - initialState?: State | string; + workflow: { + steps: Record; + }; + options: WorkflowOptions & { + start: StepId; + }; }; export type JobModule = { @@ -119,7 +48,6 @@ type NotifyHandler = ( payload: NotifyEventsLookup[typeof event] ) => void; -// TODO difficulty: this is not the same as a vm execution context export type ExecutionContext = { plan: CompiledExecutionPlan; logger: Logger; @@ -183,7 +111,7 @@ export type NotifyEventsLookup = { }; export type ExecutionCallbacks = { - notify: NotifyHandler; + notify?: NotifyHandler; resolveState?: (stateId: string) => Promise; resolveCredential?: (credentialId: string) => Promise; }; diff --git a/packages/runtime/src/util/assemble-state.ts b/packages/runtime/src/util/assemble-state.ts index 2f1f69204..84f5fc12e 100644 --- a/packages/runtime/src/util/assemble-state.ts +++ b/packages/runtime/src/util/assemble-state.ts @@ -13,15 +13,12 @@ const assembleData = (initialData: any, defaultData = {}) => { const assembleState = ( initialState: any = {}, // previous or initial state configuration = {}, - defaultState: any = {}, // This is default state provided by the job - strictState: boolean = true + defaultState: any = {} // This is default state provided by the job ) => { - const obj = strictState - ? {} - : { - ...defaultState, - ...initialState, - }; + const obj = { + ...defaultState, + ...initialState, + }; if (initialState.references) { obj.references = initialState.references; diff --git a/packages/runtime/src/util/clone.ts b/packages/runtime/src/util/clone.ts index d81320f4a..408f108a6 100644 --- a/packages/runtime/src/util/clone.ts +++ b/packages/runtime/src/util/clone.ts @@ -1,4 +1,4 @@ -import type { State } from '../types'; +import type { State } from '@openfn/lexicon'; // TODO I'm in the market for the best solution here - immer? deep-clone? // What should we do if functions are in the state? diff --git a/packages/runtime/src/util/default-state.ts b/packages/runtime/src/util/default-state.ts new file mode 100644 index 000000000..4d4dc5450 --- /dev/null +++ b/packages/runtime/src/util/default-state.ts @@ -0,0 +1 @@ +export default { data: {}, configuration: {} }; diff --git a/packages/runtime/src/util/execute.ts b/packages/runtime/src/util/execute.ts index bd2d6aaa5..7c5f03439 100644 --- a/packages/runtime/src/util/execute.ts +++ b/packages/runtime/src/util/execute.ts @@ -1,4 +1,4 @@ -import type { Operation, State } from '../types'; +import type { Operation, State } from '@openfn/lexicon'; // Standard execute factory export default (...operations: Operation[]): Operation => { diff --git a/packages/runtime/src/util/index.ts b/packages/runtime/src/util/index.ts new file mode 100644 index 000000000..1ad364095 --- /dev/null +++ b/packages/runtime/src/util/index.ts @@ -0,0 +1,19 @@ +import assembleState from './assemble-state'; +import clone from './clone'; +import defaultState from './default-state'; +import exec from './exec'; +import execute from './execute'; +import logError from './log-error'; +import parseRegex from './regex'; +import validatePlan from './validate-plan'; + +export { + assembleState, + clone, + defaultState, + exec, + execute, + logError, + parseRegex, + validatePlan, +}; diff --git a/packages/runtime/src/util/log-error.ts b/packages/runtime/src/util/log-error.ts index af13aec87..7c23e5021 100644 --- a/packages/runtime/src/util/log-error.ts +++ b/packages/runtime/src/util/log-error.ts @@ -1,9 +1,9 @@ import { Logger } from '@openfn/logger'; -import { ErrorReport, JobNodeID, State } from '../types'; +import type { State, ErrorReport, StepId } from '@openfn/lexicon'; export type ErrorReporter = ( state: State, - jobId: JobNodeID, + stepId: StepId, error: NodeJS.ErrnoException & { severity?: string; handled?: boolean; @@ -16,10 +16,10 @@ export type ErrorReporter = ( // Because we're taking closer control of errors // we should be able to report more simply const createErrorReporter = (logger: Logger): ErrorReporter => { - return (state, jobId, error) => { + return (state, stepId, error) => { const report: ErrorReport = { type: error.subtype || error.type || error.name, - jobId, + stepId, message: error.message, error: error, }; @@ -45,13 +45,13 @@ const createErrorReporter = (logger: Logger): ErrorReporter => { } if (error.severity === 'fail') { - logger.error(`Check state.errors.${jobId} for details.`); + logger.error(`Check state.errors.${stepId} for details.`); if (!state.errors) { state.errors = {}; } - state.errors[jobId] = report; + state.errors[stepId] = report; } return report as ErrorReport; diff --git a/packages/runtime/src/util/validate-plan.ts b/packages/runtime/src/util/validate-plan.ts index b1b058105..2dd86628d 100644 --- a/packages/runtime/src/util/validate-plan.ts +++ b/packages/runtime/src/util/validate-plan.ts @@ -1,5 +1,5 @@ +import { ExecutionPlan, Step } from '@openfn/lexicon'; import { ValidationError } from '../errors'; -import { ExecutionPlan, JobNode } from '../types'; type ModelNode = { up: Record; @@ -20,16 +20,16 @@ export default (plan: ExecutionPlan) => { return true; }; -export const buildModel = (plan: ExecutionPlan) => { +export const buildModel = ({ workflow }: ExecutionPlan) => { const model: Model = {}; - const jobIdx = plan.jobs.reduce((obj, item) => { + const jobIdx = workflow.steps.reduce((obj, item) => { if (item.id) { obj[item.id] = item; } // TODO warn if there's no id? It's usually fine (until it isn't) return obj; - }, {} as Record); + }, {} as Record); const ensureModel = (jobId: string) => { if (!model[jobId]) { @@ -48,7 +48,7 @@ export const buildModel = (plan: ExecutionPlan) => { } }; - for (const job of plan.jobs) { + for (const job of workflow.steps) { let node = job.id ? ensureModel(job.id) : { up: {}, down: {} }; if (typeof job.next === 'string') { validateJob(job.next); @@ -71,9 +71,10 @@ export const buildModel = (plan: ExecutionPlan) => { }; const assertStart = (plan: ExecutionPlan) => { - if (typeof plan.start === 'string') { - if (!plan.jobs.find(({ id }) => id == plan.start)) { - throw new ValidationError(`Could not find start job: ${plan.start}`); + const { start } = plan.options; + if (typeof start === 'string') { + if (!plan.workflow.steps.find(({ id }) => id == start)) { + throw new ValidationError(`Could not find start job: ${start}`); } } }; diff --git a/packages/runtime/test/context.test.ts b/packages/runtime/test/context.test.ts index 11909a604..4583837cb 100644 --- a/packages/runtime/test/context.test.ts +++ b/packages/runtime/test/context.test.ts @@ -2,21 +2,23 @@ import test from 'ava'; import run from '../src/runtime'; import { createMockLogger } from '@openfn/logger'; +import { State } from '@openfn/lexicon'; const createState = (data = {}) => ({ data, configuration: {} }); test('makes parseInt available inside the job', async (t) => { - const job = ` + const expression = ` export default [ (s) => { s.data.count = parseInt(s.data.count); return s; } ];`; + const input = createState({ count: '22' }); - const result = await run(job, createState({ count: '22' })); + const result = await run(expression, input); t.deepEqual(result.data, { count: 22 }); }); test('makes Set available inside the job', async (t) => { - const job = ` + const expression = ` export default [ (s) => { new Set(); // should not throw @@ -24,13 +26,15 @@ test('makes Set available inside the job', async (t) => { } ];`; - const result = await run(job, createState({ count: '33' })); + const state = createState({ count: '33' }); + + const result = await run(expression, state); t.deepEqual(result.data, { count: '33' }); }); test("doesn't allow process inside the job", async (t) => { const logger = createMockLogger(undefined, { level: 'default' }); - const job = ` + const expression = ` export default [ (s) => { process.exit() @@ -38,9 +42,7 @@ test("doesn't allow process inside the job", async (t) => { } ];`; - const state = createState(); - - await t.throwsAsync(() => run(job, state, { logger }), { + await t.throwsAsync(() => run(expression, {}, { logger }), { name: 'RuntimeCrash', message: 'ReferenceError: process is not defined', }); @@ -48,17 +50,13 @@ test("doesn't allow process inside the job", async (t) => { test("doesn't allow eval inside a job", async (t) => { const logger = createMockLogger(undefined, { level: 'default' }); - const job = ` + const expression = ` export default [ (state) => eval('ok') // should throw ];`; - const state = createState(); - await t.throwsAsync(() => run(job, state, { logger }), { + await t.throwsAsync(() => run(expression, {}, { logger }), { name: 'SecurityError', message: /Illegal eval statement detected/, }); }); - -// TODO exhaustive test of globals? -// TODO ensure an imported module can't access eval/process diff --git a/packages/runtime/test/errors.test.ts b/packages/runtime/test/errors.test.ts index 90a9d9d16..a18f3ba5a 100644 --- a/packages/runtime/test/errors.test.ts +++ b/packages/runtime/test/errors.test.ts @@ -1,16 +1,27 @@ import test from 'ava'; import path from 'node:path'; +import type { WorkflowOptions } from '@openfn/lexicon'; + import run from '../src/runtime'; -// This is irrelevant now as state and credentials are preloaded -test.todo('lazy state & credential loading'); +const createPlan = (expression: string, options: WorkflowOptions = {}) => ({ + workflow: { + steps: [ + { + expression, + }, + ], + }, + options, +}); test('crash on timeout', async (t) => { const expression = 'export default [(s) => new Promise((resolve) => {})]'; + const plan = createPlan(expression, { timeout: 1 }); let error; try { - await run(expression, {}, { timeout: 1 }); + await run(plan); } catch (e) { error = e; } @@ -72,24 +83,27 @@ test('crash on eval with SecurityError', async (t) => { }); test('crash on edge condition error with EdgeConditionError', async (t) => { - const workflow = { - jobs: [ - { - id: 'a', - next: { - b: { - // Will throw a reference error - condition: 'wibble', + const plan = { + workflow: { + steps: [ + { + id: 'a', + expression: '.', + next: { + b: { + // Will throw a reference error + condition: 'wibble', + }, }, }, - }, - { id: 'b' }, - ], + { id: 'b', expression: '.' }, + ], + }, }; let error; try { - await run(workflow); + await run(plan); } catch (e) { error = e; } diff --git a/packages/runtime/test/execute/compile-plan.test.ts b/packages/runtime/test/execute/compile-plan.test.ts index 23ef3518e..ec99bd574 100644 --- a/packages/runtime/test/execute/compile-plan.test.ts +++ b/packages/runtime/test/execute/compile-plan.test.ts @@ -1,193 +1,255 @@ import test from 'ava'; -import { ExecutionPlan, JobEdge } from '../../src'; +import { ExecutionPlan, StepEdge } from '@openfn/lexicon'; import compilePlan from '../../src/execute/compile-plan'; const testPlan: ExecutionPlan = { - start: 'a', - jobs: [ - { id: 'a', expression: 'x', next: { b: true } }, - { id: 'b', expression: 'y' }, - ], + workflow: { + steps: [ + { id: 'a', expression: 'x', name: 'a', next: { b: true } }, + { id: 'b', expression: 'y' }, + ], + }, + options: { + start: 'a', + }, }; -const planWithEdge = (edge: JobEdge) => - ({ - ...testPlan, - jobs: [{ id: 'a', next: { b: edge } }], - } as ExecutionPlan); +const planWithEdge = (edge: Partial) => ({ + workflow: { + steps: [ + { + id: 'a', + expression: 'x', + next: { + b: edge, + }, + }, + { id: 'b', expression: 'y' }, + ], + }, + options: { + start: 'a', + }, +}); -test('should preserve initial state as an object', (t) => { - const state = { x: 123 }; +test('should preserve the start option', (t) => { const compiledPlan = compilePlan({ id: 'a', - initialState: state, - jobs: [], + workflow: { + steps: [{ id: 'a', expression: 'a' }], + }, + options: { + start: 'a', + }, }); - t.deepEqual(state, compiledPlan.initialState); + + t.is(compiledPlan.options.start, 'a'); }); -test('should preserve initial state a string', (t) => { +test('should preserve arbitrary options', (t) => { const compiledPlan = compilePlan({ id: 'a', - initialState: 'abc', - jobs: [], + workflow: { + steps: [{ id: 'a', expression: 'a' }], + }, + options: { + // @ts-ignore + a: 1, + z: 2, + '-': 3, + }, + }); + + t.deepEqual(compiledPlan.options, { + a: 1, + z: 2, + '-': 3, + start: 'a', }); - t.is(compiledPlan.initialState, 'abc'); }); -test('should convert jobs to an object', (t) => { - const compiledPlan = compilePlan(testPlan); - t.truthy(compiledPlan.jobs.a); - t.is(compiledPlan.jobs.a.expression, 'x'); +test('should convert steps to an object', (t) => { + const { workflow } = compilePlan(testPlan); + t.deepEqual(workflow.steps.a, { + id: 'a', + name: 'a', + expression: 'x', + next: { b: true }, + previous: undefined, + }); - t.truthy(compiledPlan.jobs.b); - t.is(compiledPlan.jobs.b.expression, 'y'); + t.truthy(workflow.steps.b); + t.is(workflow.steps.b.expression, 'y'); }); -test('should set previous job with 2 jobs', (t) => { +test('should set previous job with 2 steps', (t) => { const plan: ExecutionPlan = { - start: 'a', - jobs: [ - { id: 'a', expression: 'x', next: { b: true } }, - { id: 'b', expression: 'y' }, - ], + workflow: { + steps: [ + { id: 'a', expression: 'x', next: { b: true } }, + { id: 'b', expression: 'y' }, + ], + }, + options: {}, }; - const compiledPlan = compilePlan(plan); - t.is(compiledPlan.jobs.a.previous, undefined); - t.is(compiledPlan.jobs.b.previous, 'a'); + const { workflow } = compilePlan(plan); + t.is(workflow.steps.a.previous, undefined); + t.is(workflow.steps.b.previous, 'a'); }); -test('should set previous job with 2 jobs and shorthand syntax', (t) => { +test('should set previous job with 2 steps and shorthand syntax', (t) => { const plan: ExecutionPlan = { - start: 'a', - jobs: [ - { id: 'a', expression: 'x', next: 'b' }, - { id: 'b', expression: 'y' }, - ], + workflow: { + steps: [ + { id: 'a', expression: 'x', next: 'b' }, + { id: 'b', expression: 'y' }, + ], + }, + options: {}, }; - const compiledPlan = compilePlan(plan); - t.is(compiledPlan.jobs.a.previous, undefined); - t.is(compiledPlan.jobs.b.previous, 'a'); + const { workflow } = compilePlan(plan); + t.is(workflow.steps.a.previous, undefined); + t.is(workflow.steps.b.previous, 'a'); }); -test('should set previous job with 2 jobs and no start', (t) => { +test('should set previous job with 2 steps and no start', (t) => { const plan: ExecutionPlan = { - jobs: [ - { id: 'a', expression: 'x', next: { b: true } }, - { id: 'b', expression: 'y' }, - ], + workflow: { + steps: [ + { id: 'a', expression: 'x', next: { b: true } }, + { id: 'b', expression: 'y' }, + ], + }, + options: {}, }; - const compiledPlan = compilePlan(plan); - t.is(compiledPlan.jobs.a.previous, undefined); - t.is(compiledPlan.jobs.b.previous, 'a'); + const { workflow } = compilePlan(plan); + t.is(workflow.steps.a.previous, undefined); + t.is(workflow.steps.b.previous, 'a'); }); -test('should set previous job with 3 jobs', (t) => { +test('should set previous job with 3 steps', (t) => { const plan: ExecutionPlan = { - start: 'a', - jobs: [ - { id: 'a', expression: 'x', next: { b: true } }, - { id: 'b', expression: 'y', next: { c: true } }, - { id: 'c', expression: 'z' }, - ], + workflow: { + steps: [ + { id: 'a', expression: 'x', next: { b: true } }, + { id: 'b', expression: 'y', next: { c: true } }, + { id: 'c', expression: 'z' }, + ], + }, + options: {}, }; - const compiledPlan = compilePlan(plan); - t.is(compiledPlan.jobs.a.previous, undefined); - t.is(compiledPlan.jobs.b.previous, 'a'); - t.is(compiledPlan.jobs.c.previous, 'b'); + const { workflow } = compilePlan(plan); + t.is(workflow.steps.a.previous, undefined); + t.is(workflow.steps.b.previous, 'a'); + t.is(workflow.steps.c.previous, 'b'); }); -test('should set previous job with 3 jobs and shorthand syntax', (t) => { +test('should set previous job with 3 steps and shorthand syntax', (t) => { const plan: ExecutionPlan = { - start: 'a', - jobs: [ - { id: 'c', expression: 'z' }, - { id: 'a', expression: 'x', next: 'b' }, - { id: 'b', expression: 'y', next: 'c' }, - ], + workflow: { + steps: [ + { id: 'c', expression: 'z' }, + { id: 'a', expression: 'x', next: 'b' }, + { id: 'b', expression: 'y', next: 'c' }, + ], + }, + options: {}, }; - const compiledPlan = compilePlan(plan); - t.is(compiledPlan.jobs.a.previous, undefined); - t.is(compiledPlan.jobs.b.previous, 'a'); - t.is(compiledPlan.jobs.c.previous, 'b'); + const { workflow } = compilePlan(plan); + t.is(workflow.steps.a.previous, undefined); + t.is(workflow.steps.b.previous, 'a'); + t.is(workflow.steps.c.previous, 'b'); }); -test('should auto generate ids for jobs', (t) => { +test('should auto generate ids for steps', (t) => { const plan = { - start: 'a', - jobs: [{ expression: 'x' }, { expression: 'y' }], + workflow: { + steps: [{ expression: 'x' }, { expression: 'y' }], + }, + options: {}, }; - const compiledPlan = compilePlan(plan); - const ids = Object.keys(compiledPlan.jobs); + const { workflow } = compilePlan(plan); + const ids = Object.keys(workflow.steps); t.truthy(ids[0]); t.truthy(ids[1]); t.assert(ids[0] !== ids[1]); }); -test('should convert jobs to an object with auto ids', (t) => { +test('should convert steps to an object with auto ids', (t) => { const plan: ExecutionPlan = { - jobs: [ - // silly use case but it doens't matter - { expression: 'x' }, - { expression: 'y' }, - ], + workflow: { + steps: [{ expression: 'x' }, { expression: 'y' }], + }, + options: {}, }; - const compiledPlan = compilePlan(plan); - t.deepEqual(Object.keys(compiledPlan.jobs), ['job-1', 'job-2']); + const { workflow } = compilePlan(plan); + t.deepEqual(Object.keys(workflow.steps), ['job-1', 'job-2']); }); test('should reset job ids for each call', (t) => { const plan: ExecutionPlan = { - jobs: [{ expression: 'x' }], + workflow: { + steps: [{ expression: 'x' }], + }, + options: {}, }; const first = compilePlan(plan); - t.is(first.jobs['job-1'].expression, 'x'); + t.is(first.workflow.steps['job-1'].expression, 'x'); const second = compilePlan(plan); - t.is(second.jobs['job-1'].expression, 'x'); + t.is(second.workflow.steps['job-1'].expression, 'x'); }); -test('should set the start to jobs[0]', (t) => { +test('should set the start to steps[0]', (t) => { const plan: ExecutionPlan = { - jobs: [ - { id: 'a', expression: 'x' }, - { id: 'b', expression: 'y' }, - { id: 'c', expression: 'z' }, - ], + workflow: { + steps: [ + { id: 'a', expression: 'x' }, + { id: 'b', expression: 'y' }, + { id: 'c', expression: 'z' }, + ], + }, + options: {}, }; - const compiledPlan = compilePlan(plan); - t.is(compiledPlan.start, 'a'); + const { options } = compilePlan(plan); + t.is(options.start, 'a'); }); test('should not override the start', (t) => { const plan: ExecutionPlan = { - start: 'c', - jobs: [ - { id: 'a', expression: 'x' }, - { id: 'b', expression: 'y' }, - { id: 'c', expression: 'z' }, - ], + options: { + start: 'c', + }, + workflow: { + steps: [ + { id: 'a', expression: 'x' }, + { id: 'b', expression: 'y' }, + { id: 'c', expression: 'z' }, + ], + }, }; - const compiledPlan = compilePlan(plan); - t.is(compiledPlan.start, 'c'); + const { options } = compilePlan(plan); + t.is(options.start, 'c'); }); test('should compile a shorthand edge', (t) => { const plan: ExecutionPlan = { - start: 'a', - jobs: [ - { - id: 'a', - expression: 'x', - next: 'y', - }, - ], + workflow: { + steps: [ + { + id: 'a', + expression: 'x', + next: 'y', + }, + ], + }, + options: {}, }; - const compiledPlan = compilePlan(plan); + const { workflow } = compilePlan(plan); - t.deepEqual(compiledPlan.jobs.a.next!, { + t.deepEqual(workflow.steps.a.next!, { y: true, }); }); @@ -198,69 +260,69 @@ test('should not recompile a functional edge', (t) => { condition: () => true, }); - const compiledPlan = compilePlan(plan); + const { workflow } = compilePlan(plan); // @ts-ignore - const result = compiledPlan.jobs.a.next!.b.condition({}); + const result = workflow.steps.a.next!.b.condition({}); t.true(result); }); test('should compile a truthy edge', (t) => { const plan = planWithEdge({ condition: 'true' }); - const compiledPlan = compilePlan(plan); + const { workflow } = compilePlan(plan); // @ts-ignore - const result = compiledPlan.jobs.a.next!.b.condition({}); + const result = workflow.steps.a.next!.b.condition({}); t.true(result); }); test('should compile a string edge', (t) => { const plan = planWithEdge('true'); - const compiledPlan = compilePlan(plan); + const { workflow } = compilePlan(plan); // @ts-ignore - const result = compiledPlan.jobs.a.next!.b.condition(); + const result = workflow.steps.a.next!.b.condition(); t.true(result); }); test('should compile a falsy edge', (t) => { const plan = planWithEdge({ condition: 'false' }); - const compiledPlan = compilePlan(plan); + const { workflow } = compilePlan(plan); // @ts-ignore - const result = compiledPlan.jobs.a.next!.b.condition({}); + const result = workflow.steps.a.next!.b.condition({}); t.false(result); }); test('should compile an edge with arithmetic', (t) => { const plan = planWithEdge({ condition: '1 + 1' }); - const compiledPlan = compilePlan(plan); + const { workflow } = compilePlan(plan); // @ts-ignore - const result = compiledPlan.jobs.a.next!.b.condition({}); + const result = workflow.steps.a.next!.b.condition({}); t.is(result, 2); }); test('should compile an edge which uses state', (t) => { const plan = planWithEdge({ condition: '!state.hasOwnProperty("error")' }); - const compiledPlan = compilePlan(plan); + const { workflow } = compilePlan(plan); // @ts-ignore - const result = compiledPlan.jobs.a.next!.b.condition({}); + const result = workflow.steps.a.next!.b.condition({}); t.true(result); }); test('condition cannot require', (t) => { const plan = planWithEdge({ condition: 'require("axios")' }); - const compiledPlan = compilePlan(plan); + const { workflow } = compilePlan(plan); // @ts-ignore - t.throws(() => compiledPlan.jobs.a.next!.b.condition({ data: {} }), { + t.throws(() => workflow.steps.a.next!.b.condition({ data: {} }), { message: 'require is not defined', }); }); @@ -268,10 +330,10 @@ test('condition cannot require', (t) => { test('condition cannot access process', (t) => { const plan = planWithEdge({ condition: 'process.exit()' }); - const compiledPlan = compilePlan(plan); + const { workflow } = compilePlan(plan); // @ts-ignore - t.throws(() => compiledPlan.jobs.a.next!.b.condition({ data: {} }), { + t.throws(() => workflow.steps.a.next!.b.condition({ data: {} }), { message: 'process is not defined', }); }); @@ -279,10 +341,10 @@ test('condition cannot access process', (t) => { test('condition cannot access process #2', (t) => { const plan = planWithEdge({ condition: '(() => process.exit())()' }); - const compiledPlan = compilePlan(plan); + const { workflow } = compilePlan(plan); // @ts-ignore - t.throws(() => compiledPlan.jobs.a.next!.b.condition({ data: {} }), { + t.throws(() => workflow.steps.a.next!.b.condition({ data: {} }), { message: 'process is not defined', }); }); @@ -290,10 +352,10 @@ test('condition cannot access process #2', (t) => { test('condition cannot eval', (t) => { const plan = planWithEdge({ condition: 'eval("process.exit()")' }); - const compiledPlan = compilePlan(plan); + const { workflow } = compilePlan(plan); // @ts-ignore - t.throws(() => compiledPlan.jobs.a.next!.b.condition({ data: {} }), { + t.throws(() => workflow.steps.a.next!.b.condition({ data: {} }), { message: 'Code generation from strings disallowed for this context', }); }); @@ -310,25 +372,28 @@ test('throw for a syntax error on a job edge', (t) => { test('throw for multiple errors', (t) => { const plan = { - jobs: [ - { - id: 'a', - expression: 'x', - next: { - b: { - condition: '@£^!!', - }, - c: { - condition: '@£^!!', + workflow: { + steps: [ + { + id: 'a', + expression: 'x', + next: { + b: { + condition: '@£^!!', + }, + c: { + condition: '@£^!!', + }, }, }, - }, - ], + ], + }, + options: {}, }; try { compilePlan(plan); - } catch (e) { + } catch (e: any) { // the message will have have one error per line const { message } = e; const lines = message.split('\n\n'); diff --git a/packages/runtime/test/execute/expression.test.ts b/packages/runtime/test/execute/expression.test.ts index 2258e43c2..5b14567e4 100644 --- a/packages/runtime/test/execute/expression.test.ts +++ b/packages/runtime/test/execute/expression.test.ts @@ -1,8 +1,10 @@ import test from 'ava'; import { fn } from '@openfn/language-common'; import { createMockLogger } from '@openfn/logger'; +import type { Operation, State } from '@openfn/lexicon'; + import execute from '../../src/execute/expression'; -import type { State, Operation, ExecutionContext } from '../../src/types'; +import type { ExecutionContext } from '../../src/types'; type TestState = State & { data: { @@ -17,15 +19,18 @@ const createState = (data = {}) => ({ const logger = createMockLogger(undefined, { level: 'debug' }); -const createContext = (args = {}) => +const createContext = (args = {}, options = {}) => + // @ts-ignore ({ logger, plan: {}, - opts: {}, + opts: { + ...options, + }, notify: () => {}, report: () => {}, ...args, - } as unknown as ExecutionContext); + } as ExecutionContext); test.afterEach(() => { logger._reset(); @@ -38,7 +43,6 @@ test.afterEach(() => { test('run a live no-op job with one operation', async (t) => { const job = [(s: State) => s]; const state = createState(); - const context = createContext(); const result = await execute(context, job, state); @@ -108,7 +112,7 @@ test('configuration is removed from the result by default', async (t) => { test('statePropsToRemove removes multiple props from state', async (t) => { const job = [async (s: State) => s]; const statePropsToRemove = ['x', 'y']; - const context = createContext({ opts: { statePropsToRemove } }); + const context = createContext({}, { statePropsToRemove }); const result = await execute(context, job, { x: 1, y: 1, z: 1 }); t.deepEqual(result, { z: 1 }); @@ -118,7 +122,7 @@ test('statePropsToRemove logs to debug when a prop is removed', async (t) => { const job = [async (s: State) => s]; const statePropsToRemove = ['x']; - const context = createContext({ opts: { statePropsToRemove } }); + const context = createContext({}, { statePropsToRemove }); const result = await execute(context, job, { x: 1, y: 1, z: 1 }); t.deepEqual(result, { y: 1, z: 1 }); @@ -130,7 +134,7 @@ test('statePropsToRemove logs to debug when a prop is removed', async (t) => { test('no props are removed from state if an empty array is passed to statePropsToRemove', async (t) => { const job = [async (s: State) => s]; const statePropsToRemove = ['x', 'y']; - const context = createContext({ opts: { statePropsToRemove } }); + const context = createContext({}, { statePropsToRemove }); const state = { x: 1, configuration: 1 }; const result = await execute(context, job, state as any); @@ -140,48 +144,22 @@ test('no props are removed from state if an empty array is passed to statePropsT test('no props are removed from state if a falsy value is passed to statePropsToRemove', async (t) => { const job = [async (s: State) => s]; const statePropsToRemove = undefined; - const context = createContext({ opts: { statePropsToRemove } }); + const context = createContext({}, { statePropsToRemove }); const state = { x: 1, configuration: 1 }; const result = await execute(context, job, state as any); t.deepEqual(result, state); }); -test('config is removed from the result (strict)', async (t) => { +test('config is removed from the result', async (t) => { const job = [async (s: State) => s]; - const context = createContext({ opts: { strict: true } }); - - const result = await execute(context, job, { configuration: {} }); - t.deepEqual(result, {}); -}); + const context = createContext({ opts: {} }); -test('config is removed from the result (non-strict)', async (t) => { - const job = [async (s: State) => s]; - const context = createContext({ opts: { strict: false } }); const result = await execute(context, job, { configuration: {} }); t.deepEqual(result, {}); }); -test('output state is cleaned in strict mode', async (t) => { - const job = [ - async () => ({ - data: {}, - references: [], - configuration: {}, - x: true, - }), - ]; - - const context = createContext({ opts: { strict: true } }); - - const result = await execute(context, job, {}); - t.deepEqual(result, { - data: {}, - references: [], - }); -}); - -test('output state is left alone in non-strict mode', async (t) => { +test('output state is returned verbatim, apart from config', async (t) => { const state = { data: {}, references: [], @@ -190,7 +168,7 @@ test('output state is left alone in non-strict mode', async (t) => { }; const job = [async () => ({ ...state })]; - const context = createContext({ opts: { strict: false } }); + const context = createContext(); const result = await execute(context, job, {}); t.deepEqual(result, { @@ -352,7 +330,8 @@ test('Throws after custom timeout', async (t) => { const job = `export default [() => new Promise((resolve) => setTimeout(resolve, 100))];`; const context = createContext({ - opts: { jobLogger: logger, timeout: 10 }, + plan: { options: { timeout: 10 } }, + opts: { jobLogger: logger }, }); const state = createState(); await t.throwsAsync(async () => execute(context, job, state), { @@ -370,6 +349,6 @@ test('Operations log on start and end', async (t) => { const start = logger._find('debug', /starting operation /i); t.truthy(start); - const end = logger._find('info', /operation 1 complete in \dms/i); + const end = logger._find('debug', /operation 1 complete in \dms/i); t.truthy(end); }); diff --git a/packages/runtime/test/execute/plan.test.ts b/packages/runtime/test/execute/plan.test.ts index 1cdd96682..c7657163f 100644 --- a/packages/runtime/test/execute/plan.test.ts +++ b/packages/runtime/test/execute/plan.test.ts @@ -1,142 +1,112 @@ import test from 'ava'; import path from 'node:path'; import { createMockLogger } from '@openfn/logger'; -import { ExecutionPlan, JobNode } from '../../src/types'; -import execute from './../../src/execute/plan'; +import type { ExecutionPlan, Job } from '@openfn/lexicon'; + +import executePlan from './../../src/execute/plan'; +import { CompiledExecutionPlan } from '../../src'; let mockLogger = createMockLogger(undefined, { level: 'debug' }); +const createPlan = ( + steps: Job[], + options: Partial = {} +): ExecutionPlan => ({ + workflow: { + steps, + }, + options, +}); + +const createJob = ({ id, expression, next, state }: any): Job => ({ + id: id ?? 'job1', + expression: expression ?? 'export default [s => s]', + state, + next, +}); + test('throw for a circular job', async (t) => { - const plan: ExecutionPlan = { - start: 'job1', - jobs: [ - { - id: 'job1', - expression: 'export default [s => s]', - next: { job2: true }, - }, - { - id: 'job2', - expression: 'export default [s => s]', - next: { job1: true }, - }, - ], - }; - const e = await t.throwsAsync(() => execute(plan, {}, mockLogger)); + const plan = createPlan([ + createJob({ next: { job2: true } }), + createJob({ id: 'job2', next: { job1: true } }), + ]); + const e = await t.throwsAsync(() => executePlan(plan, {}, {}, mockLogger)); t.regex(e!.message, /circular dependency/i); }); test('throw for a job with multiple inputs', async (t) => { - // TODO maybe this isn't a good test - job1 and job2 both input to job3, but job2 never gets called - const plan: ExecutionPlan = { - start: 'job1', - jobs: [ - { - id: 'job1', - expression: 'export default [s => s]', - next: { job3: true }, - }, - { - id: 'job2', - expression: 'export default [s => s]', - next: { job3: true }, - }, - { - id: 'job3', - expression: 'export default [s => s]', - next: {}, - }, - ], - }; - const e = await t.throwsAsync(() => execute(plan, {}, mockLogger)); + const plan = createPlan([ + createJob({ next: { job3: true } }), + createJob({ id: 'job2', next: { job3: true } }), + createJob({ id: 'job3' }), + ]); + + const e = await t.throwsAsync(() => executePlan(plan, {}, {}, mockLogger)); t.regex(e!.message, /multiple dependencies/i); }); test('throw for a plan which references an undefined job', async (t) => { - const plan: ExecutionPlan = { - start: 'job1', - jobs: [ - { - id: 'job1', - expression: 'export default [s => s]', - next: { job3: true }, - }, - ], - }; - const e = await t.throwsAsync(() => execute(plan, {}, mockLogger)); + const plan = createPlan([createJob({ next: { job3: true } })]); + + const e = await t.throwsAsync(() => executePlan(plan, {}, {}, mockLogger)); t.regex(e!.message, /cannot find job/i); }); test('throw for an illegal edge condition', async (t) => { - const plan: ExecutionPlan = { - jobs: [ - { - id: 'a', - expression: '.', - next: { - b: { - condition: '!!!', - }, + const plan = createPlan([ + createJob({ + next: { + job2: { + condition: '!!!', }, }, - { id: 'b' }, - ], - }; - const e = await t.throwsAsync(() => execute(plan, {}, mockLogger)); - t.regex(e!.message, /failed to compile edge condition a->b/i); -}); - -test('throw for an edge condition', async (t) => { - const plan: ExecutionPlan = { - jobs: [ - { - expression: 'x', - next: { - b: { - condition: '!!!!', - }, - }, - }, - { id: 'b' }, - ], - }; - const e = await t.throwsAsync(() => execute(plan, {}, mockLogger)); - t.regex(e!.message, /failed to compile edge condition/i); + }), + createJob({ id: 'job2' }), + ]); + const e = await t.throwsAsync(() => executePlan(plan, {}, {}, mockLogger)); + t.regex(e!.message, /failed to compile edge condition job1->job2/i); }); test('execute a one-job execution plan with inline state', async (t) => { - const plan: ExecutionPlan = { - jobs: [ - { - expression: 'export default [s => s.data.x]', - state: { data: { x: 22 } }, - }, - ], - }; - const result = (await execute(plan, {}, mockLogger)) as unknown as number; + const plan = createPlan([ + createJob({ + expression: 'export default [s => s.data.x]', + state: { data: { x: 22 } }, + }), + ]); + + const result: any = (await executePlan( + plan, + {}, + {}, + mockLogger + )) as unknown as number; t.is(result, 22); }); test('execute a one-job execution plan with initial state', async (t) => { - const plan: ExecutionPlan = { - initialState: { - data: { x: 33 }, - }, - jobs: [ - { - expression: 'export default [s => s.data.x]', - }, - ], + const plan = createPlan([ + createJob({ + expression: 'export default [s => s.data.x]', + }), + ]); + const input = { + data: { x: 33 }, }; - const result = (await execute(plan, {}, mockLogger)) as unknown as number; + + const result: any = await executePlan(plan, input, {}, mockLogger); + t.is(result, 33); }); test('lazy load initial state', async (t) => { - const plan: ExecutionPlan = { - initialState: 's1', - jobs: [{ id: 'a', expression: 'export default [s => s]' }], - }; + const plan = createPlan([ + createJob({ + expression: 'export default [s => s]', + }), + ]); + const state = 's1'; + const states = { s1: { data: { result: 42 } } }; const options = { callbacks: { @@ -144,13 +114,10 @@ test('lazy load initial state', async (t) => { }, }; - const result = await execute(plan, options, mockLogger); + const result: any = await executePlan(plan, state, options, mockLogger); t.deepEqual(result, states.s1); }); -test.todo('lazy load initial state with log'); -test.todo('lazy load initial state with notify'); - test('execute a one-job execution plan and notify init-start and init-complete', async (t) => { let notifications: Record = {}; @@ -158,14 +125,11 @@ test('execute a one-job execution plan and notify init-start and init-complete', data: { x: 33 }, }; - const plan: ExecutionPlan = { - initialState: state, - jobs: [ - { - expression: 'export default [s => s.data.x]', - }, - ], - }; + const plan = createPlan([ + createJob({ + expression: 'export default [s => s.data.x]', + }), + ]); const notify = (event: string, payload: any) => { if (notifications[event]) { @@ -176,7 +140,7 @@ test('execute a one-job execution plan and notify init-start and init-complete', const options = { callbacks: { notify } }; - await execute(plan, options, mockLogger); + await executePlan(plan, state, options, mockLogger); t.truthy(notifications['init-start']); t.truthy(notifications['init-complete']); @@ -184,203 +148,172 @@ test('execute a one-job execution plan and notify init-start and init-complete', }); test('execute a job with a simple truthy "precondition" or "trigger node"', async (t) => { - const plan: ExecutionPlan = { - jobs: [ - { - next: { - job: { - condition: 'true', - }, + const plan = createPlan([ + createJob({ + next: { + job: { + condition: 'true', }, }, - { - id: 'job', - expression: 'export default [() => ({ data: { done: true } })]', - }, - ], - }; - const result = await execute(plan, {}, mockLogger); + }), + createJob({ + id: 'job', + expression: 'export default [() => ({ data: { done: true } })]', + }), + ]); + + const result: any = await executePlan(plan, {}, {}, mockLogger); t.true(result.data.done); }); test('do not execute a job with a simple falsy "precondition" or "trigger node"', async (t) => { - const plan: ExecutionPlan = { - jobs: [ - { - next: { - job: { - condition: 'false', - }, + const plan = createPlan([ + createJob({ + next: { + job: { + condition: 'false', }, }, - { - id: 'job', - expression: 'export default [() => ({ data: { done: true } })]', - }, - ], - }; - const result = await execute(plan, {}, mockLogger); + }), + createJob({ + id: 'job', + expression: 'export default [() => ({ data: { done: true } })]', + }), + ]); + + const result: any = await executePlan(plan, {}, {}, mockLogger); t.falsy(result.data.done); }); test('execute a job with a valid "precondition" or "trigger node"', async (t) => { - const plan: ExecutionPlan = { - initialState: { data: { x: 10 } }, - jobs: [ + const plan = createPlan( + [ + // @ts-ignore TODO make this a trigger node when we have the types { + id: 'a', next: { job: { - condition: 'state.data.x === 10', + condition: 'true', }, }, }, - { + createJob({ id: 'job', expression: 'export default [() => ({ data: { done: true } })]', - }, + }), ], - }; - const result = await execute(plan, {}, mockLogger); + { + initialState: { data: { x: 10 } }, + } + ); + + const result: any = await executePlan(plan, {}, {}, mockLogger); t.true(result.data.done); }); test('merge initial and inline state', async (t) => { - const plan: ExecutionPlan = { - initialState: { data: { x: 33 } }, - jobs: [ - { - expression: 'export default [s => s]', - state: { data: { y: 11 } }, - }, - ], - }; - const result = await execute(plan, {}, mockLogger); + const plan = createPlan([ + createJob({ + expression: 'export default [s => s]', + state: { data: { y: 11 } }, + }), + ]); + const state = { data: { x: 33 } }; + + const result: any = await executePlan(plan, state, {}, mockLogger); t.is(result.data.x, 33); t.is(result.data.y, 11); }); test('Initial state overrides inline data', async (t) => { - const plan: ExecutionPlan = { - initialState: { data: { x: 34 } }, - jobs: [ - { - expression: 'export default [s => s]', - state: { data: { x: 11 } }, - }, - ], - }; - const result = await execute(plan, {}, mockLogger); + const plan = createPlan([ + createJob({ + expression: 'export default [s => s]', + state: { data: { y: 11 } }, + }), + ]); + const state = { data: { x: 34 } }; + + const result: any = await executePlan(plan, state, {}, mockLogger); t.is(result.data.x, 34); }); test('Previous state overrides inline data', async (t) => { - const plan: ExecutionPlan = { - jobs: [ - // This will return x as 5 - { - id: 'job1', - expression: 'export default [s => s]', - state: { data: { x: 5 } }, - next: { - job2: true, - }, - }, - - // This will receive x as 5, prefer it to the default x as 88, and return it plus 1 - { - id: 'job2', - expression: 'export default [s => { s.data.x +=1 ; return s; }]', - state: { data: { x: 88 } }, - }, - ], - }; - const result = await execute(plan, {}, mockLogger); + const plan = createPlan([ + // This will return x as 5 + createJob({ + state: { data: { x: 5 } }, + next: { + job2: true, + }, + }), + // This will receive x as 5, prefer it to the default x as 88, and return it plus 1 + createJob({ + id: 'job2', + expression: 'export default [s => { s.data.x +=1 ; return s; }]', + state: { data: { x: 88 } }, + }), + ]); + + const result: any = await executePlan(plan, {}, {}, mockLogger); t.is(result.data.x, 6); }); -test('only allowed state is passed through in strict mode', async (t) => { - const plan: ExecutionPlan = { - jobs: [ - { - expression: - 'export default [s => ({ data: {}, references: [], x: 22, y: 33 })]', - next: { - job2: true, - }, - }, - - { - id: 'job2', - // Throw if we receive unexpected stuff in state - expression: - 'export default [s => { if (s.x || s.y) { throw new Error() }; return s;}]', - }, - ], - }; - const result = await execute(plan, { strict: true }, mockLogger); - t.deepEqual(result, { - data: {}, - references: [], - }); -}); - -test('Jobs only receive state from upstream jobs', async (t) => { +test('steps only receive state from upstream steps', async (t) => { const assert = (expr: string) => `if (!(${expr})) throw new Error('ASSERT FAIL')`; - const plan: ExecutionPlan = { - jobs: [ - { - id: 'start', - expression: 'export default [s => s]', - state: { data: { x: 1, y: 1 } }, - next: { - 'x-a': true, - 'y-a': true, - }, + const plan = createPlan([ + { + id: 'start', + expression: 'export default [s => s]', + state: { data: { x: 1, y: 1 } }, + next: { + 'x-a': true, + 'y-a': true, }, + }, - { - id: 'x-a', - expression: `export default [s => { - ${assert('s.data.x === 1')}; - ${assert('s.data.y === 1')}; - s.data.x += 1; - return s; - }]`, - next: { 'x-b': true }, - }, - { - id: 'x-b', - expression: `export default [s => { - ${assert('s.data.x === 2')}; - ${assert('s.data.y === 1')}; - return s; - }]`, - }, + { + id: 'x-a', + expression: `export default [s => { + ${assert('s.data.x === 1')}; + ${assert('s.data.y === 1')}; + s.data.x += 1; + return s; + }]`, + next: { 'x-b': true }, + }, + { + id: 'x-b', + expression: `export default [s => { + ${assert('s.data.x === 2')}; + ${assert('s.data.y === 1')}; + return s; + }]`, + }, - { - id: 'y-a', - expression: `export default [s => { - ${assert('s.data.x === 1')}; - ${assert('s.data.y === 1')}; - s.data.y += 1; - return s; - }]`, - next: { 'y-b': true }, - }, - { - id: 'y-b', - expression: `export default [s => { - ${assert('s.data.x === 1')}; - ${assert('s.data.y === 2')}; - return s; - }]`, - }, - ], - }; + { + id: 'y-a', + expression: `export default [s => { + ${assert('s.data.x === 1')}; + ${assert('s.data.y === 1')}; + s.data.y += 1; + return s; + }]`, + next: { 'y-b': true }, + }, + { + id: 'y-b', + expression: `export default [s => { + ${assert('s.data.x === 1')}; + ${assert('s.data.y === 2')}; + return s; + }]`, + }, + ]); - const result = await execute(plan, {}, mockLogger); + const result: any = await executePlan(plan, {}, {}, mockLogger); // explicit check that no assertion failed and wrote an error to state t.falsy(result.error); @@ -392,26 +325,24 @@ test('Jobs only receive state from upstream jobs', async (t) => { }); }); -test('all state is passed through in non-strict mode', async (t) => { - const plan: ExecutionPlan = { - jobs: [ - { - expression: - 'export default [s => ({ data: {}, references: [], x: 22, y: 33 })]', - next: { - job2: true, - }, - }, - - { - id: 'job2', - // Throw if we receive unexpected stuff in state - expression: - 'export default [s => { if (!s.x || !s.y || !s.references) { throw new Error() }; return s;}]', - }, - ], - }; - const result = await execute(plan, { strict: false }, mockLogger); +test('all state is passed through successive jobs', async (t) => { + const plan = createPlan([ + createJob({ + expression: + 'export default [s => ({ data: {}, references: [], x: 22, y: 33 })]', + next: { + job2: true, + }, + }), + createJob({ + id: 'job2', + // Throw if we receive unexpected stuff in state + expression: + 'export default [s => { if (!s.x || !s.y || !s.references) { throw new Error() }; return s;}]', + }), + ]); + + const result: any = await executePlan(plan, {}, {}, mockLogger); t.deepEqual(result, { data: {}, references: [], @@ -421,112 +352,102 @@ test('all state is passed through in non-strict mode', async (t) => { }); test('execute edge based on state in the condition', async (t) => { - const plan: ExecutionPlan = { - jobs: [ - { - id: 'job1', - state: {}, - expression: 'export default [(s) => { s.data.x = 10; return s;}]', - next: { - job2: { condition: 'state.data.x === 10' }, - }, + const plan = createPlan([ + { + id: 'job1', + state: {}, + expression: 'export default [(s) => { s.data.x = 10; return s;}]', + next: { + job2: { condition: 'state.data.x === 10' }, }, - { - id: 'job2', - expression: 'export default [() => ({ data: { y: 20 } })]', - }, - ], - }; - const result = await execute(plan, {}, mockLogger); + }, + { + id: 'job2', + expression: 'export default [() => ({ data: { y: 20 } })]', + }, + ]); + const result: any = await executePlan(plan, {}, {}, mockLogger); t.is(result.data?.y, 20); }); test('skip edge based on state in the condition ', async (t) => { - const plan: ExecutionPlan = { - jobs: [ - { - id: 'job1', - state: {}, - expression: 'export default [s => { s.data.x = 10; return s;}]', - next: { - job2: { condition: 'false' }, - }, - }, - { - id: 'job2', - expression: 'export default [() => ({ y: 20 })]', + const plan = createPlan([ + { + id: 'job1', + state: {}, + expression: 'export default [s => { s.data.x = 10; return s;}]', + next: { + job2: { condition: 'false' }, }, - ], - }; - const result = await execute(plan, {}, mockLogger); + }, + { + id: 'job2', + expression: 'export default [() => ({ y: 20 })]', + }, + ]); + const result: any = await executePlan(plan, {}, {}, mockLogger); t.is(result.data?.x, 10); }); test('do not traverse a disabled edge', async (t) => { - const plan: ExecutionPlan = { - jobs: [ - { - id: 'job1', - expression: 'export default [(s) => { s.data.x = 10; return s;}]', - next: { - job2: { - disabled: true, - condition: 'true', - }, + const plan = createPlan([ + { + id: 'job1', + expression: 'export default [(s) => { s.data.x = 10; return s;}]', + next: { + job2: { + disabled: true, + condition: 'true', }, }, - { - id: 'job2', - expression: 'export default [() => ({ data: { x: 20 } })]', - }, - ], - }; - const result = await execute(plan, {}, mockLogger); + }, + { + id: 'job2', + expression: 'export default [() => ({ data: { x: 20 } })]', + }, + ]); + const result: any = await executePlan(plan, {}, {}, mockLogger); t.is(result.data?.x, 10); }); test('execute a two-job execution plan', async (t) => { - const plan: ExecutionPlan = { - initialState: { data: { x: 0 } }, - jobs: [ - { - id: 'job1', - expression: 'export default [s => { s.data.x += 1; return s; } ]', - next: { job2: true }, - }, - { - id: 'job2', - expression: 'export default [s => { s.data.x += 1; return s; } ]', - }, - ], - }; - const result = await execute(plan, {}, mockLogger); + const plan = createPlan([ + { + id: 'job1', + expression: 'export default [s => { s.data.x += 1; return s; } ]', + next: { job2: true }, + }, + { + id: 'job2', + expression: 'export default [s => { s.data.x += 1; return s; } ]', + }, + ]); + const state = { data: { x: 0 } }; + + const result: any = await executePlan(plan, state, {}, mockLogger); t.is(result.data.x, 2); }); test('only execute one job in a two-job execution plan', async (t) => { - const plan: ExecutionPlan = { - initialState: { data: { x: 0 } }, - jobs: [ - { - id: 'job1', - expression: 'export default [s => { s.data.x += 1; return s; } ]', - next: { job2: false }, - }, - { - id: 'job2', - expression: 'export default [s => { s.data.x += 1; return s; } ]', - }, - ], - }; - const result = await execute(plan, {}, mockLogger); + const plan = createPlan([ + { + id: 'job1', + expression: 'export default [s => { s.data.x += 1; return s; } ]', + next: { job2: false }, + }, + { + id: 'job2', + expression: 'export default [s => { s.data.x += 1; return s; } ]', + }, + ]); + const state = { data: { x: 0 } }; + const result: any = await executePlan(plan, state, {}, mockLogger); t.is(result.data.x, 1); }); -test('execute a two-job execution plan with custom start in state', async (t) => { - const plan: ExecutionPlan = { - start: 'job2', - jobs: [ +test('execute a two-job execution plan with custom start', async (t) => { + const plan = createPlan( + [ { id: 'job1', expression: 'export default [() => ({ data: { result: 11 } }) ]', @@ -537,36 +458,16 @@ test('execute a two-job execution plan with custom start in state', async (t) => next: { job1: true }, }, ], - }; - const result = await execute(plan, {}, mockLogger); - t.is(result.data.result, 11); -}); + { start: 'job2' } + ); -test('execute a two-job execution plan with custom start in options', async (t) => { - const plan: ExecutionPlan = { - start: 'job1', - initialState: { start: 'job2' }, - jobs: [ - { - id: 'job1', - expression: 'export default [() => ({ data: { result: 11 } }) ]', - }, - { - id: 'job2', - expression: 'export default [() => ({ data: { result: 1 } }) ]', - next: { job1: true }, - }, - ], - }; - const result = await execute(plan, {}, mockLogger); + const result: any = await executePlan(plan, {}, {}, mockLogger); t.is(result.data.result, 11); }); test('Return when there are no more edges', async (t) => { - const plan: ExecutionPlan = { - start: 'job1', - initialState: { data: { x: 0 } }, - jobs: [ + const plan = createPlan( + [ { id: 'job1', expression: 'export default [s => { s.data.x += 1; return s; } ]', @@ -576,33 +477,36 @@ test('Return when there are no more edges', async (t) => { expression: 'export default [s => { s.data.x += 1; return s; } ]', }, ], - }; - const result = await execute(plan, {}, mockLogger); + { start: 'job1' } + ); + const state = { data: { x: 0 } }; + + const result: any = await executePlan(plan, state, {}, mockLogger); t.is(result.data?.x, 1); }); test('execute a 5 job execution plan', async (t) => { - const plan: ExecutionPlan = { - initialState: { data: { x: 0 } }, - start: '1', - jobs: [], - } as ExecutionPlan; + const steps = []; for (let i = 1; i < 6; i++) { - plan.jobs.push({ + steps.push({ id: `${i}`, expression: 'export default [s => { s.data.x += 1; return s; } ]', next: i === 5 ? null : { [`${i + 1}`]: true }, - } as JobNode); + } as Job); } - const result = await execute(plan, {}, mockLogger); + + const plan = createPlan(steps, { + start: '1', + }); + const state = { data: { x: 0 } }; + + const result: any = await executePlan(plan, state, {}, mockLogger); t.is(result.data.x, 5); }); test('execute multiple steps in "parallel"', async (t) => { - const plan: ExecutionPlan = { - start: 'start', - initialState: { data: { x: 0 } }, - jobs: [ + const plan = createPlan( + [ { id: 'start', expression: 'export default [s => s]', @@ -625,8 +529,11 @@ test('execute multiple steps in "parallel"', async (t) => { expression: 'export default [s => { s.data.x += 1; return s; } ]', }, ], - }; - const result = await execute(plan, {}, mockLogger); + { start: 'start' } + ); + const state = { data: { x: 0 } }; + + const result: any = await executePlan(plan, state, {}, mockLogger); t.deepEqual(result, { a: { data: { x: 1 } }, b: { data: { x: 1 } }, @@ -635,10 +542,8 @@ test('execute multiple steps in "parallel"', async (t) => { }); test('isolate state in "parallel" execution', async (t) => { - const plan: ExecutionPlan = { - start: 'start', - initialState: { data: { x: 0 } }, - jobs: [ + const plan = createPlan( + [ { id: 'start', expression: 'export default [s => s]', @@ -658,17 +563,17 @@ test('isolate state in "parallel" execution', async (t) => { 'export default [s => { if (s.data.b) { throw "e" }; s.data.c = true; return s }]', }, ], - }; + { start: 'start' } + ); + const state = { data: { x: 0 } }; - const result = await execute(plan, {}, mockLogger); + const result: any = await executePlan(plan, state, {}, mockLogger); t.falsy(result.errors); }); test('isolate state in "parallel" execution with deeper trees', async (t) => { - const plan: ExecutionPlan = { - start: 'start', - initialState: { data: { x: 0 } }, - jobs: [ + const plan = createPlan( + [ { id: 'start', expression: 'export default [s => s]', @@ -701,36 +606,35 @@ test('isolate state in "parallel" execution with deeper trees', async (t) => { 'export default [s => { if (s.data.c) { throw "e" }; s.data.b = true; return s }]', }, ], - }; + { start: 'start' } + ); + const state = { data: { x: 0 } }; - const result = await execute(plan, {}, mockLogger); + const result: any = await executePlan(plan, state, {}, mockLogger); t.falsy(result.errors); }); test('"parallel" execution with multiple leaves should write multiple results to state', async (t) => { - const plan: ExecutionPlan = { - start: 'start', - jobs: [ - { - id: 'start', - expression: 'export default [s => s]', - next: { - 'job-b': true, - 'job-c': true, - }, - }, - { - id: 'job-b', - expression: 'export default [s => { s.data.b = true; return s }]', + const plan = createPlan([ + { + id: 'start', + expression: 'export default [s => s]', + next: { + 'job-b': true, + 'job-c': true, }, - { - id: 'job-c', - expression: 'export default [s => { s.data.c = true; return s }]', - }, - ], - }; + }, + { + id: 'job-b', + expression: 'export default [s => { s.data.b = true; return s }]', + }, + { + id: 'job-c', + expression: 'export default [s => { s.data.c = true; return s }]', + }, + ]); - const result = await execute(plan, {}, mockLogger); + const result: any = await executePlan(plan, {}, {}, mockLogger); // Each leaf should write to its own place on state t.deepEqual(result, { 'job-b': { @@ -747,32 +651,30 @@ test('"parallel" execution with multiple leaves should write multiple results to }); test('return an error in state', async (t) => { - const plan: ExecutionPlan = { - jobs: [ - { - id: 'a', - state: {}, - expression: 'export default [s => { throw Error("e")}]', - }, - ], - }; - const result = await execute(plan, {}, mockLogger); + const plan = createPlan([ + { + id: 'a', + state: {}, + expression: 'export default [s => { throw Error("e")}]', + }, + ]); + + const result: any = await executePlan(plan, {}, {}, mockLogger); t.truthy(result.errors); t.is(result.errors.a.message, 'e'); }); // Fix for https://github.com/OpenFn/kit/issues/317 test('handle non-standard error objects', async (t) => { - const plan: ExecutionPlan = { - jobs: [ - { - id: 'a', - state: {}, - expression: 'export default [s => { throw "wibble" }]', - }, - ], - }; - const result = await execute(plan, {}, mockLogger); + const plan = createPlan([ + { + id: 'a', + state: {}, + expression: 'export default [s => { throw "wibble" }]', + }, + ]); + + const result: any = await executePlan(plan, {}, {}, mockLogger); t.truthy(result.errors); const err = result.errors.a; t.is(err.type, 'JobError'); @@ -780,186 +682,174 @@ test('handle non-standard error objects', async (t) => { }); test('keep executing after an error', async (t) => { - const plan: ExecutionPlan = { - jobs: [ - { - id: 'a', - state: {}, - expression: 'export default [s => { throw Error("e"); state.x = 20 }]', - next: { - b: true, - }, - }, - { - id: 'b', - expression: 'export default [() => ({ y: 20 })]', + const plan = createPlan([ + { + id: 'a', + state: {}, + expression: 'export default [s => { throw Error("e"); state.x = 20 }]', + next: { + b: true, }, - ], - }; - const result = await execute(plan, {}, mockLogger); + }, + { + id: 'b', + expression: 'export default [() => ({ y: 20 })]', + }, + ]); + + const result: any = await executePlan(plan, {}, {}, mockLogger); t.is(result.y, 20); t.falsy(result.x); }); test('simple on-error handler', async (t) => { - const plan: ExecutionPlan = { - jobs: [ - { - id: 'job1', - state: {}, - expression: 'export default [s => { throw Error("e")}]', - next: { - job2: { condition: 'state.errors' }, - job3: { condition: '!state.errors' }, - }, - }, - { - id: 'job2', - expression: 'export default [() => ({ y: 20 })]', - }, - { - id: 'job3', - expression: 'export default [() => ({ x: 20 })]', + const plan = createPlan([ + { + id: 'job1', + state: {}, + expression: 'export default [s => { throw Error("e")}]', + next: { + job2: { condition: 'state.errors' }, + job3: { condition: '!state.errors' }, }, - ], - }; - const result = await execute(plan, {}, mockLogger); + }, + { + id: 'job2', + expression: 'export default [() => ({ y: 20 })]', + }, + { + id: 'job3', + expression: 'export default [() => ({ x: 20 })]', + }, + ]); + + const result: any = await executePlan(plan, {}, {}, mockLogger); t.is(result.y, 20); t.falsy(result.x); }); test('log appopriately on error', async (t) => { - const plan: ExecutionPlan = { - jobs: [ - { - id: 'job1', - state: {}, - expression: 'export default [s => { throw Error("e")}]', - }, - ], - }; + const plan = createPlan([ + { + id: 'job1', + state: {}, + expression: 'export default [s => { throw Error("e")}]', + }, + ]); const logger = createMockLogger(undefined, { level: 'debug' }); - await execute(plan, {}, logger); - const err = logger._find('error', /failed job/i); + await executePlan(plan, {}, {}, logger); + const err = logger._find('error', /failed step/i); t.truthy(err); - t.regex(err!.message as string, /Failed job job1 after \d+ms/i); + t.regex(err!.message as string, /Failed step job1 after \d+ms/i); t.truthy(logger._find('error', /JobError: e/)); t.truthy(logger._find('error', /Check state.errors.job1 for details/i)); }); -test('jobs do not share a local scope', async (t) => { - const plan: ExecutionPlan = { - initialState: { data: {} }, - jobs: [ - { - // declare x in this expression's scope - expression: 'const x = 10; export default [s => s];', - next: { - b: true, - }, - }, - { - id: 'b', - // x should not defined here and this will throw - expression: 'export default [s => { s.data.x = x; return s; }]', +test('steps do not share a local scope', async (t) => { + const plan = createPlan([ + { + id: 'job1', + // declare x in this expression's scope + expression: 'const x = 10; export default [s => s];', + next: { + b: true, }, - ], - }; - await t.throwsAsync(() => execute(plan, {}, mockLogger), { + }, + { + id: 'b', + // x should not defined here and this will throw + expression: 'export default [s => { s.data.x = x; return s; }]', + }, + ]); + await t.throwsAsync(() => executePlan(plan, {}, {}, mockLogger), { message: 'ReferenceError: x is not defined', name: 'RuntimeCrash', }); }); -test('jobs do not share a global scope', async (t) => { - const plan: ExecutionPlan = { - initialState: { data: {} }, - jobs: [ - { - expression: 'export default [s => { x = 10; return s; }]', - next: { - b: true, - }, - }, - { - id: 'b', - expression: 'export default [s => { s.data.x = x; return s; }]', +test('steps do not share a global scope', async (t) => { + const plan = createPlan([ + { + id: 'job1', + expression: 'export default [s => { x = 10; return s; }]', + next: { + b: true, }, - ], - }; + }, + { + id: 'b', + expression: 'export default [s => { s.data.x = x; return s; }]', + }, + ]); - await t.throwsAsync(() => execute(plan, {}, mockLogger), { + await t.throwsAsync(() => executePlan(plan, {}, {}, mockLogger), { message: 'ReferenceError: x is not defined', name: 'RuntimeCrash', }); }); -test('jobs do not share a globalThis object', async (t) => { - const plan: ExecutionPlan = { - initialState: { data: {} }, - jobs: [ - { - expression: 'export default [(s) => { globalThis.x = 10; return s; }]', - next: { - b: true, - }, - }, - { - id: 'b', - expression: - 'export default [(s) => { s.data.x = globalThis.x; return s; }]', +test('steps do not share a globalThis object', async (t) => { + const plan = createPlan([ + { + id: 'job1', + expression: 'export default [(s) => { globalThis.x = 10; return s; }]', + next: { + b: true, }, - ], - }; - const result = await execute(plan, {}, mockLogger); + }, + { + id: 'b', + expression: + 'export default [(s) => { s.data.x = globalThis.x; return s; }]', + }, + ]); + + const result: any = await executePlan(plan, {}, {}, mockLogger); t.deepEqual(result, { data: {} }); }); // TODO this fails right now // https://github.com/OpenFn/kit/issues/213 -test.skip('jobs cannot scribble on globals', async (t) => { - const plan: ExecutionPlan = { - initialState: { data: {} }, - jobs: [ - { - expression: 'export default [s => { console.x = 10; return s; }]', - next: { - b: true, - }, - }, - { - id: 'b', - expression: 'export default [s => { s.data.x = console.x; return s; }]', +test.skip('steps cannot scribble on globals', async (t) => { + const plan = createPlan([ + { + id: 'job1', + expression: 'export default [s => { console.x = 10; return s; }]', + next: { + b: true, }, - ], - }; - const result = await execute(plan, {}, mockLogger); + }, + { + id: 'b', + expression: 'export default [s => { s.data.x = console.x; return s; }]', + }, + ]); + + const result: any = await executePlan(plan, {}, {}, mockLogger); t.falsy(result.data.x); }); // TODO this fails right now // https://github.com/OpenFn/kit/issues/213 -test.skip('jobs cannot scribble on adaptor functions', async (t) => { - const plan: ExecutionPlan = { - initialState: { data: { x: 0 } }, - jobs: [ - { - expression: - 'import { fn } from "@openfn/language-common"; fn.x = 10; export default [s => s]', - next: { - b: true, - }, - }, - { - id: 'b', - expression: - 'import { fn } from "@openfn/language-common"; export default [s => { s.data.x = fn.x; return s; }]', +test.skip('steps cannot scribble on adaptor functions', async (t) => { + const plan = createPlan([ + { + id: 'job1', + expression: + 'import { fn } from "@openfn/language-common"; fn.x = 10; export default [s => s]', + next: { + b: true, }, - ], - }; + }, + { + id: 'b', + expression: + 'import { fn } from "@openfn/language-common"; export default [s => { s.data.x = fn.x; return s; }]', + }, + ]); const options = { linker: { modules: { @@ -970,11 +860,11 @@ test.skip('jobs cannot scribble on adaptor functions', async (t) => { }, }; - const result = await execute(plan, options, mockLogger); + const result: any = await executePlan(plan, {}, options, mockLogger); t.falsy(result.data.x); }); -test('jobs can write circular references to state without blowing up downstream', async (t) => { +test('steps can write circular references to state without blowing up downstream', async (t) => { const expression = `export default [(s) => { const a = {}; const b = { a }; @@ -984,21 +874,19 @@ test('jobs can write circular references to state without blowing up downstream' return s; }] `; - const plan: ExecutionPlan = { - initialState: { data: {} }, - jobs: [ - { - expression, - next: { b: true }, - }, - { - id: 'b', - expression: 'export default [(s => s)]', - }, - ], - }; + const plan = createPlan([ + { + id: 'job1', + expression, + next: { b: true }, + }, + { + id: 'b', + expression: 'export default [(s => s)]', + }, + ]); - const result = await execute(plan, {}, mockLogger); + const result: any = await executePlan(plan, {}, {}, mockLogger); t.notThrows(() => JSON.stringify(result)); t.deepEqual(result, { @@ -1010,7 +898,7 @@ test('jobs can write circular references to state without blowing up downstream' }); }); -test('jobs cannot pass circular references to each other', async (t) => { +test('steps cannot pass circular references to each other', async (t) => { const expression = `export default [(s) => { const a = {}; const b = { a }; @@ -1020,101 +908,107 @@ test('jobs cannot pass circular references to each other', async (t) => { return s; }] `; - const plan: ExecutionPlan = { - initialState: { data: {} }, - jobs: [ - { - expression, - next: { b: true }, - }, - { - id: 'b', - expression: `export default [(s => { + const plan = createPlan([ + { + expression, + next: { b: true }, + }, + { + id: 'b', + expression: `export default [(s => { s.data.answer = s.data.ref.b.a; return s })]`, - }, - ], - }; + }, + ]); - const result = await execute(plan, {}, mockLogger); + const result: any = await executePlan(plan, {}, {}, mockLogger); t.notThrows(() => JSON.stringify(result)); t.is(result.data.answer, '[Circular]'); }); -test('jobs can write functions to state without blowing up downstream', async (t) => { - const plan: ExecutionPlan = { - initialState: { data: {} }, - jobs: [ - { - next: { b: true }, - expression: `export default [(s) => { +test('steps can write functions to state without blowing up downstream', async (t) => { + const plan = createPlan([ + { + next: { b: true }, + expression: `export default [(s) => { s.data = { x: () => 22 } return s; }]`, - }, - { - id: 'b', - expression: 'export default [(s) => s]', - }, - ], - }; + }, + { + id: 'b', + expression: 'export default [(s) => s]', + }, + ]); - const result = await execute(plan, {}, mockLogger); + const result: any = await executePlan(plan, {}, {}, mockLogger); t.notThrows(() => JSON.stringify(result)); t.deepEqual(result, { data: {} }); }); -test('jobs cannot pass functions to each other', async (t) => { - const plan: ExecutionPlan = { - initialState: { data: {} }, - jobs: [ - { - next: { b: true }, - expression: `export default [(s) => { +test('steps cannot pass functions to each other', async (t) => { + const plan = createPlan([ + { + next: { b: true }, + expression: `export default [(s) => { s.data = { x: () => 22 } return s; }]`, - }, - { - id: 'b', - expression: `export default [ + }, + { + id: 'b', + expression: `export default [ (s) => { s.data.x(); return s; } ]`, - }, - ], - }; + }, + ]); - const result = await execute(plan, {}, mockLogger); + const result: any = await executePlan(plan, {}, {}, mockLogger); const error = result.errors.b; t.is(error.type, 'TypeError'); t.is(error.message, 'TypeError: s.data.x is not a function'); }); -test('Plans log for each job start and end', async (t) => { - const plan: ExecutionPlan = { - jobs: [ - { - id: 'a', - expression: 'export default [s => s]', - }, - ], - }; +test('Plans log step ids for each job start and end', async (t) => { + const plan = createPlan([ + { + id: 'a', + expression: 'export default [s => s]', + }, + ]); + const logger = createMockLogger(undefined, { level: 'debug' }); + await executePlan(plan, {}, {}, logger); + const start = logger._find('info', /starting step a/i); + t.is(start!.message, 'Starting step a'); + + const end = logger._find('success', /completed step a/i); + t.regex(end!.message as string, /Completed step a in \d+ms/); +}); + +test('Plans log step names for each job start and end', async (t) => { + const plan = createPlan([ + { + id: 'a', + name: 'do-the-thing', + expression: 'export default [s => s]', + }, + ]); const logger = createMockLogger(undefined, { level: 'debug' }); - await execute(plan, {}, logger); + await executePlan(plan, {}, {}, logger); - const start = logger._find('always', /starting job/i); - t.is(start!.message, 'Starting job a'); + const start = logger._find('info', /starting step do-the-thing/i); + t.is(start!.message, 'Starting step do-the-thing'); - const end = logger._find('success', /completed job/i); - t.regex(end!.message as string, /Completed job a in \d+ms/); + const end = logger._find('success', /completed step do-the-thing/i); + t.regex(end!.message as string, /Completed step do-the-thing in \d+ms/); }); diff --git a/packages/runtime/test/execute/job.test.ts b/packages/runtime/test/execute/step.test.ts similarity index 73% rename from packages/runtime/test/execute/job.test.ts rename to packages/runtime/test/execute/step.test.ts index d1a36cc68..2fbd9205f 100644 --- a/packages/runtime/test/execute/job.test.ts +++ b/packages/runtime/test/execute/step.test.ts @@ -6,9 +6,10 @@ import { NOTIFY_JOB_ERROR, NOTIFY_JOB_START, } from '../../src'; -import execute from '../../src/execute/job'; +import execute from '../../src/execute/step'; -import type { ExecutionContext, State } from '../../src/types'; +import type { ExecutionContext } from '../../src/types'; +import { State } from '@openfn/lexicon'; const createState = (data = {}) => ({ data: data, @@ -20,7 +21,9 @@ const logger = createMockLogger(undefined, { level: 'debug' }); const createContext = (args = {}) => ({ logger, - plan: {}, + plan: { + options: {}, + }, opts: {}, notify: () => {}, report: () => {}, @@ -31,35 +34,35 @@ test.afterEach(() => { logger._reset(); }); -test.serial('resolve and return next for a simple job', async (t) => { - const job = { +test.serial('resolve and return next for a simple step', async (t) => { + const step = { id: 'j', expression: [(s: State) => s], next: { k: true, a: false }, }; const initialState = createState(); const context = createContext(); - const { next, state } = await execute(context, job, initialState); + const { next, state } = await execute(context, step, initialState); t.deepEqual(state, { data: {} }); t.deepEqual(next, ['k']); }); -test.serial('resolve and return next for a trigger-style job', async (t) => { - const job = { +test.serial('resolve and return next for a trigger-style step', async (t) => { + const step = { id: 'j', next: { k: true, a: false }, }; const initialState = createState(); const context = createContext(); - const { next, state } = await execute(context, job, initialState); + const { next, state } = await execute(context, step, initialState); t.deepEqual(state, initialState); t.deepEqual(next, ['k']); }); -test.serial('resolve and return next for a failed job', async (t) => { - const job = { +test.serial('resolve and return next for a failed step', async (t) => { + const step = { id: 'j', expression: [ () => { @@ -70,7 +73,7 @@ test.serial('resolve and return next for a failed job', async (t) => { }; const initialState = createState(); const context = createContext(); - const { next, state } = await execute(context, job, initialState); + const { next, state } = await execute(context, step, initialState); // Config should still be scrubbed from data t.deepEqual(state, { data: {} }); @@ -78,7 +81,7 @@ test.serial('resolve and return next for a failed job', async (t) => { }); test.serial(`notify ${NOTIFY_JOB_START}`, async (t) => { - const job = { + const step = { id: 'j', expression: [(s: State) => s], }; @@ -92,32 +95,32 @@ test.serial(`notify ${NOTIFY_JOB_START}`, async (t) => { const context = createContext({ notify }); - await execute(context, job, state); + await execute(context, step, state); }); test.serial( - `don't notify ${NOTIFY_JOB_START} for trigger-style jobs`, + `don't notify ${NOTIFY_JOB_START} for trigger-style steps`, async (t) => { - const job = { + const step = { id: 'j', }; const state = createState(); const notify = (event: string, payload?: any) => { if (event === NOTIFY_JOB_START) { - t.fail('should not notify job-start for trigger nodes'); + t.fail('should not notify step-start for trigger nodes'); } }; const context = createContext({ notify }); - await execute(context, job, state); + await execute(context, step, state); t.pass('all ok'); } ); test.serial(`notify ${NOTIFY_JOB_COMPLETE} with no next`, async (t) => { - const job = { + const step = { id: 'j', expression: [(s: State) => s], }; @@ -139,11 +142,11 @@ test.serial(`notify ${NOTIFY_JOB_COMPLETE} with no next`, async (t) => { const context = createContext({ notify }); - await execute(context, job, state); + await execute(context, step, state); }); test.serial(`notify ${NOTIFY_JOB_COMPLETE} with two nexts`, async (t) => { - const job = { + const step = { id: 'j', expression: [(s: State) => s], next: { b: true, c: true }, @@ -165,26 +168,26 @@ test.serial(`notify ${NOTIFY_JOB_COMPLETE} with two nexts`, async (t) => { const context = createContext({ notify }); - await execute(context, job, state); + await execute(context, step, state); }); test.serial( - `don't notify ${NOTIFY_JOB_COMPLETE} for trigger-style jobs`, + `don't notify ${NOTIFY_JOB_COMPLETE} for trigger-style steps`, async (t) => { - const job = { + const step = { id: 'j', }; const state = createState(); const notify = (event: string) => { if (event === NOTIFY_JOB_COMPLETE) { - t.fail('should not notify job-start for trigger nodes'); + t.fail('should not notify step-start for trigger nodes'); } }; const context = createContext({ notify }); - await execute(context, job, state); + await execute(context, step, state); t.pass('all ok'); } ); @@ -195,7 +198,7 @@ test.serial( // Promises will trigger an exception if you try to serialize them // If we don't return finalState in execute/expression, this test will fail const resultState = { x: new Promise((r) => r), y: 22 }; - const job = { + const step = { id: 'j', expression: [() => resultState], }; @@ -212,12 +215,12 @@ test.serial( const context = createContext({ notify }); - await execute(context, job, state); + await execute(context, step, state); } ); test.serial(`notify ${NOTIFY_JOB_ERROR} for a fail`, async (t) => { - const job = { + const step = { id: 'j', expression: [ () => { @@ -247,33 +250,33 @@ test.serial(`notify ${NOTIFY_JOB_ERROR} for a fail`, async (t) => { const context = createContext({ notify }); - await execute(context, job, state); + await execute(context, step, state); }); test.serial('log duration of execution', async (t) => { - const job = { + const step = { id: 'y', expression: [(s: State) => s], }; const initialState = createState(); const context = createContext(); - await execute(context, job, initialState); + await execute(context, step, initialState); - const duration = logger._find('success', /completed job /i); + const duration = logger._find('success', /completed step /i); - t.regex(duration?.message, /completed job y in \d\d?ms/i); + t.regex(duration?.message, /completed step y in \d\d?ms/i); }); test.serial('log memory usage', async (t) => { - const job = { + const step = { id: 'z', expression: [(s: State) => s], }; const initialState = createState(); const context = createContext(); - await execute(context, job, initialState); + await execute(context, step, initialState); const memory = logger._find('debug', /final memory usage/i); @@ -282,8 +285,8 @@ test.serial('log memory usage', async (t) => { t.regex(memory?.message, /\d+mb(.+)\d+mb/i); }); -test.serial('warn if a non-leaf job does not return state', async (t) => { - const job = { +test.serial('warn if a non-leaf step does not return state', async (t) => { + const step = { id: 'k', expression: [(s: State) => {}], next: { l: true }, @@ -292,14 +295,14 @@ test.serial('warn if a non-leaf job does not return state', async (t) => { const context = createContext(); const state = createState(); - // @ts-ignore ts complains that the job does not return state - const result = await execute(context, job, state); + // @ts-ignore ts complains that the step does not return state + const result = await execute(context, step, state); const warn = logger._find('warn', /did not return a state object/); t.truthy(warn); }); -test.serial('do not warn if a leaf job does not return state', async (t) => { - const job = { +test.serial('do not warn if a leaf step does not return state', async (t) => { + const step = { id: 'k', expression: [(s: State) => {}], }; @@ -307,17 +310,17 @@ test.serial('do not warn if a leaf job does not return state', async (t) => { const context = createContext(); const state = createState(); - // @ts-ignore ts complains that the job does not return state - const result = await execute(context, job, state); + // @ts-ignore ts complains that the step does not return state + const result = await execute(context, step, state); const warn = logger._find('warn', /did not return a state object/); t.falsy(warn); }); test.serial( - 'do not warn a non-leaf job does not return state and there was an error', + 'do not warn a non-leaf step does not return state and there was an error', async (t) => { - const job = { + const step = { id: 'k', expression: [ (s: State) => { @@ -330,8 +333,8 @@ test.serial( const context = createContext(); const state = createState(); - // @ts-ignore ts complains that the job does not return state - const result = await execute(context, job, state); + // @ts-ignore ts complains that the step does not return state + const result = await execute(context, step, state); const warn = logger._find('warn', /did not return a state object/); t.falsy(warn); diff --git a/packages/runtime/test/memory.test.ts b/packages/runtime/test/memory.test.ts index 972482dbd..9dd3b83f7 100644 --- a/packages/runtime/test/memory.test.ts +++ b/packages/runtime/test/memory.test.ts @@ -4,12 +4,9 @@ * */ import test from 'ava'; +import type { ExecutionPlan } from '@openfn/lexicon'; -import { - ExecutionPlan, - NOTIFY_JOB_COMPLETE, - NotifyJobCompletePayload, -} from '../src'; +import { NOTIFY_JOB_COMPLETE, NotifyJobCompletePayload } from '../src'; import callRuntime from '../src/runtime'; /** @@ -52,19 +49,14 @@ const run = async (t, workflow: ExecutionPlan) => { } }; - const state = await callRuntime( - workflow, - {}, - { - strict: false, - callbacks: { notify }, - globals: { - process: { - memoryUsage: () => process.memoryUsage(), - }, + const state = await callRuntime(workflow, { + callbacks: { notify }, + globals: { + process: { + memoryUsage: () => process.memoryUsage(), }, - } - ); + }, + }); return { state, mem }; }; diff --git a/packages/runtime/test/runtime.test.ts b/packages/runtime/test/runtime.test.ts index d4bb1888d..e7f8af39e 100644 --- a/packages/runtime/test/runtime.test.ts +++ b/packages/runtime/test/runtime.test.ts @@ -1,8 +1,9 @@ import test from 'ava'; import path from 'node:path'; import { createMockLogger } from '@openfn/logger'; +import type { ExecutionPlan } from '@openfn/lexicon'; + import { - ExecutionPlan, NOTIFY_INIT_COMPLETE, NOTIFY_JOB_COMPLETE, NOTIFY_JOB_ERROR, @@ -11,6 +12,8 @@ import { } from '../src'; import run from '../src/runtime'; +type ExecutionPlanNoOptions = Omit; + test('run simple expression', async (t) => { const expression = 'export default [(s) => {s.data.done = true; return s}]'; @@ -19,10 +22,12 @@ test('run simple expression', async (t) => { }); test('run a simple workflow', async (t) => { - const plan: ExecutionPlan = { - jobs: [ - { expression: 'export default [(s) => ({ data: { done: true } })]' }, - ], + const plan: ExecutionPlanNoOptions = { + workflow: { + steps: [ + { expression: 'export default [(s) => ({ data: { done: true } })]' }, + ], + }, }; const result: any = await run(plan); @@ -42,8 +47,10 @@ test('run a workflow and notify major events', async (t) => { notify, }; - const plan: ExecutionPlan = { - jobs: [{ expression: 'export default [(s) => s]' }], + const plan: ExecutionPlanNoOptions = { + workflow: { + steps: [{ expression: 'export default [(s) => s]' }], + }, }; await run(plan, {}, { callbacks }); @@ -69,10 +76,12 @@ test('notify job error even after fail', async (t) => { notify, }; - const plan: ExecutionPlan = { - jobs: [ - { id: 'a', expression: 'export default [(s) => s.data.x = s.err.z ]' }, - ], + const plan: ExecutionPlanNoOptions = { + workflow: { + steps: [ + { id: 'a', expression: 'export default [(s) => s.data.x = s.err.z ]' }, + ], + }, }; await run(plan, {}, { callbacks }); @@ -93,8 +102,8 @@ test('notify job error even after crash', async (t) => { notify, }; - const plan: ExecutionPlan = { - jobs: [{ id: 'a', expression: 'export default [() => s]' }], + const plan: ExecutionPlanNoOptions = { + workflow: { steps: [{ id: 'a', expression: 'export default [() => s]' }] }, }; try { @@ -106,17 +115,18 @@ test('notify job error even after crash', async (t) => { }); test('resolve a credential', async (t) => { - const plan: ExecutionPlan = { - jobs: [ - { - expression: 'export default [(s) => s]', - configuration: 'ccc', - }, - ], + const plan: Partial = { + workflow: { + steps: [ + { + expression: 'export default [(s) => s]', + configuration: 'ccc', + }, + ], + }, }; const options = { - strict: false, statePropsToRemove: [], callbacks: { resolveCredential: async () => ({ password: 'password1' }), @@ -129,13 +139,15 @@ test('resolve a credential', async (t) => { }); test('resolve initial state', async (t) => { - const plan: ExecutionPlan = { - jobs: [ - { - expression: 'export default [(s) => s]', - state: 'abc', - }, - ], + const plan: ExecutionPlanNoOptions = { + workflow: { + steps: [ + { + expression: 'export default [(s) => s]', + state: 'abc', + }, + ], + }, }; const options = { @@ -162,11 +174,13 @@ test('run a workflow with two jobs and call callbacks', async (t) => { notify, }; - const plan: ExecutionPlan = { - jobs: [ - { id: 'a', expression: 'export default [(s) => s]', next: { b: true } }, - { id: 'b', expression: 'export default [(s) => s]' }, - ], + const plan: ExecutionPlanNoOptions = { + workflow: { + steps: [ + { id: 'a', expression: 'export default [(s) => s]', next: { b: true } }, + { id: 'b', expression: 'export default [(s) => s]' }, + ], + }, }; await run(plan, {}, { callbacks }); @@ -178,30 +192,34 @@ test('run a workflow with two jobs and call callbacks', async (t) => { }); test('run a workflow with state and parallel branching', async (t) => { - const plan: ExecutionPlan = { - jobs: [ - { - expression: - 'export default [(s) => { s.data.count += 1; s.data.a = true; return s}]', - next: { - b: true as const, - c: true as const, + const plan: ExecutionPlanNoOptions = { + workflow: { + steps: [ + { + expression: + 'export default [(s) => { s.data.count += 1; s.data.a = true; return s}]', + next: { + b: true as const, + c: true as const, + }, }, - }, - { - id: 'b', - expression: - 'export default [(s) => { s.data.count += 1; s.data.b = true; return s}]', - }, - { - id: 'c', - expression: - 'export default [(s) => { s.data.count += 1; s.data.c = true; return s}]', - }, - ], + { + id: 'b', + expression: + 'export default [(s) => { s.data.count += 1; s.data.b = true; return s}]', + }, + { + id: 'c', + expression: + 'export default [(s) => { s.data.count += 1; s.data.c = true; return s}]', + }, + ], + }, }; - const result: any = await run(plan, { data: { count: 0 } }); + const state = { data: { count: 0 } }; + + const result: any = await run(plan, state); t.deepEqual(result, { b: { data: { @@ -220,29 +238,33 @@ test('run a workflow with state and parallel branching', async (t) => { }); }); +// TODO this test sort of shows why input state on the plan object is a bit funky +// running the same plan with two inputs is pretty clunky test('run a workflow with state and conditional branching', async (t) => { - const plan: ExecutionPlan = { - jobs: [ - { - expression: 'export default [(s) => { s.data.a = true; return s}]', - next: { - b: { - condition: 'state.data.count > 0', - }, - c: { - condition: 'state.data.count == 0', + const plan: ExecutionPlanNoOptions = { + workflow: { + steps: [ + { + expression: 'export default [(s) => { s.data.a = true; return s}]', + next: { + b: { + condition: 'state.data.count > 0', + }, + c: { + condition: 'state.data.count == 0', + }, }, }, - }, - { - id: 'b', - expression: 'export default [(s) => { s.data.b = true; return s}]', - }, - { - id: 'c', - expression: 'export default [(s) => { s.data.c = true; return s}]', - }, - ], + { + id: 'b', + expression: 'export default [(s) => { s.data.b = true; return s}]', + }, + { + id: 'c', + expression: 'export default [(s) => { s.data.c = true; return s}]', + }, + ], + }, }; const result1: any = await run(plan, { data: { count: 10 } }); @@ -260,40 +282,48 @@ test('run a workflow with state and conditional branching', async (t) => { test('run a workflow with initial state (data key) and optional start', async (t) => { const plan: ExecutionPlan = { - jobs: [ - { - // won't run - id: 'a', - expression: 'export default [(s) => { s.data.count +=1 ; return s}]', - next: { b: true }, - }, - { - id: 'b', - expression: 'export default [(s) => { s.data.count +=1 ; return s}]', - next: { c: true }, - }, - { - id: 'c', - expression: 'export default [(s) => { s.data.count +=1 ; return s}]', - }, - ], + workflow: { + steps: [ + { + // won't run + id: 'a', + expression: 'export default [(s) => { s.data.count +=1 ; return s}]', + next: { b: true }, + }, + { + id: 'b', + expression: 'export default [(s) => { s.data.count +=1 ; return s}]', + next: { c: true }, + }, + { + id: 'c', + expression: 'export default [(s) => { s.data.count +=1 ; return s}]', + }, + ], + }, + options: { + start: 'b', + }, }; - const result: any = await run(plan, { data: { count: 10 } }, { start: 'b' }); + const result: any = await run(plan, { data: { count: 10 } }); t.is(result.data.count, 12); }); test('run a workflow with a trigger node', async (t) => { - const plan: ExecutionPlan = { - jobs: [ - { - next: { b: { condition: 'state.data.age > 18 ' } }, - }, - { - id: 'b', - expression: 'export default [(s) => { s.data.done = true ; return s}]', - }, - ], + const plan: ExecutionPlanNoOptions = { + workflow: { + steps: [ + { + next: { b: { condition: 'state.data.age > 18 ' } }, + }, + { + id: 'b', + expression: + 'export default [(s) => { s.data.done = true ; return s}]', + }, + ], + }, }; const result: any = await run(plan, { data: { age: 28 } }); @@ -301,18 +331,20 @@ test('run a workflow with a trigger node', async (t) => { }); test('prefer initial state to inline state', async (t) => { - const plan: ExecutionPlan = { - jobs: [ - { - state: { - data: { - x: 20, // this will be overriden by the incoming state - y: 20, // This will be untouched + const plan: ExecutionPlanNoOptions = { + workflow: { + steps: [ + { + state: { + data: { + x: 20, // this will be overriden by the incoming state + y: 20, // This will be untouched + }, }, + expression: 'export default [(s) => s]', }, - expression: 'export default [(s) => s]', - }, - ], + ], + }, }; const result: any = await run(plan, { data: { x: 40 } }); @@ -320,40 +352,11 @@ test('prefer initial state to inline state', async (t) => { t.is(result.data.y, 20); }); -test('do not pass extraneous state in strict mode', async (t) => { - const plan: ExecutionPlan = { - jobs: [ - { - expression: 'export default [() => ({ x: 1, data: {}} )]', - }, - ], - }; - - const result: any = await run(plan, {}, { strict: true }); - t.deepEqual(result, { - data: {}, - }); -}); - -test('do pass extraneous state in non-strict mode', async (t) => { - const plan: ExecutionPlan = { - jobs: [ - { - expression: 'export default [() => ({ x: 1, data: {}} )]', - }, - ], - }; - - const result: any = await run(plan, {}, { strict: false }); - t.deepEqual(result, { - x: 1, - data: {}, - }); -}); - test('Allow a job to return undefined', async (t) => { - const plan: ExecutionPlan = { - jobs: [{ expression: 'export default [() => {}]' }], + const plan: ExecutionPlanNoOptions = { + workflow: { + steps: [{ expression: 'export default [() => {}]' }], + }, }; const result: any = await run(plan); @@ -361,39 +364,43 @@ test('Allow a job to return undefined', async (t) => { }); test('log errors, write to state, and continue', async (t) => { - const plan: ExecutionPlan = { - jobs: [ - { - id: 'a', - expression: 'export default [() => { throw new Error("test") }]', - next: { b: true }, - }, - { - id: 'b', - expression: 'export default [(s) => { s.x = 1; return s; }]', - }, - ], + const plan: ExecutionPlanNoOptions = { + workflow: { + steps: [ + { + id: 'a', + expression: 'export default [() => { throw new Error("test") }]', + next: { b: true }, + }, + { + id: 'b', + expression: 'export default [(s) => { s.x = 1; return s; }]', + }, + ], + }, }; const logger = createMockLogger(); - const result: any = await run(plan, {}, { strict: false, logger }); + const result: any = await run(plan, {}, { logger }); t.is(result.x, 1); t.truthy(result.errors); t.is(result.errors.a.message, 'test'); t.is(result.errors.a.type, 'JobError'); - t.truthy(logger._find('error', /failed job a/i)); + t.truthy(logger._find('error', /failed step a/i)); }); test('log job code to the job logger', async (t) => { - const plan: ExecutionPlan = { - jobs: [ - { - id: 'a', - expression: 'export default [(s) => { console.log("hi"); return s;}]', - }, - ], + const plan: ExecutionPlanNoOptions = { + workflow: { + steps: [ + { + id: 'a', + expression: 'export default [(s) => { console.log("hi"); return s;}]', + }, + ], + }, }; const jobLogger = createMockLogger('JOB', { level: 'debug', json: true }); @@ -407,14 +414,16 @@ test('log job code to the job logger', async (t) => { }); test('log and serialize an error to the job logger', async (t) => { - const plan: ExecutionPlan = { - jobs: [ - { - id: 'a', - expression: - 'export default [(s) => { console.log(new Error("hi")); return s;}]', - }, - ], + const plan: ExecutionPlanNoOptions = { + workflow: { + steps: [ + { + id: 'a', + expression: + 'export default [(s) => { console.log(new Error("hi")); return s;}]', + }, + ], + }, }; const jobLogger = createMockLogger('JOB', { level: 'debug', json: true }); @@ -432,41 +441,45 @@ test('log and serialize an error to the job logger', async (t) => { }); test('error reports can be overwritten', async (t) => { - const plan: ExecutionPlan = { - jobs: [ - { - id: 'a', - expression: 'export default [() => { throw new Error("test") }]', - next: { b: true }, - }, - { - id: 'b', - expression: 'export default [(s) => ({ errors: 22 })]', - }, - ], + const plan: ExecutionPlanNoOptions = { + workflow: { + steps: [ + { + id: 'a', + expression: 'export default [() => { throw new Error("test") }]', + next: { b: true }, + }, + { + id: 'b', + expression: 'export default [(s) => ({ errors: 22 })]', + }, + ], + }, }; const logger = createMockLogger(); - const result: any = await run(plan, {}, { strict: false, logger }); + const result: any = await run(plan, {}, { logger }); t.is(result.errors, 22); }); // This tracks current behaviour but I don't know if it's right test('stuff written to state before an error is preserved', async (t) => { - const plan: ExecutionPlan = { - jobs: [ - { - id: 'a', - data: { x: 0 }, - expression: - 'export default [(s) => { s.x = 1; throw new Error("test") }]', - }, - ], + const plan: ExecutionPlanNoOptions = { + workflow: { + steps: [ + { + id: 'a', + data: { x: 0 }, + expression: + 'export default [(s) => { s.x = 1; throw new Error("test") }]', + }, + ], + }, }; const logger = createMockLogger(); - const result: any = await run(plan, {}, { strict: false, logger }); + const result: any = await run(plan, {}, { logger }); t.is(result.x, 1); }); @@ -474,26 +487,28 @@ test('stuff written to state before an error is preserved', async (t) => { test('data can be an array (expression)', async (t) => { const expression = 'export default [() => ({ data: [1,2,3] })]'; - const result: any = await run(expression, {}, { strict: false }); + const result: any = await run(expression, {}, {}); t.deepEqual(result.data, [1, 2, 3]); }); test('data can be an array (workflow)', async (t) => { - const plan: ExecutionPlan = { - jobs: [ - { - id: 'a', - expression: 'export default [() => ({ data: [1,2,3] })]', - next: 'b', - }, - { - id: 'b', - expression: 'export default [(s) => s]', - }, - ], + const plan: ExecutionPlanNoOptions = { + workflow: { + steps: [ + { + id: 'a', + expression: 'export default [() => ({ data: [1,2,3] })]', + next: 'b', + }, + { + id: 'b', + expression: 'export default [(s) => s]', + }, + ], + }, }; - const result: any = await run(plan, {}, { strict: false }); + const result: any = await run(plan, {}, {}); t.deepEqual(result.data, [1, 2, 3]); }); diff --git a/packages/runtime/test/security.test.ts b/packages/runtime/test/security.test.ts index caa8f1dd0..57b9b6836 100644 --- a/packages/runtime/test/security.test.ts +++ b/packages/runtime/test/security.test.ts @@ -1,13 +1,9 @@ // a suite of tests with various security concerns in mind import test from 'ava'; -import doRun from '../src/runtime'; - import { createMockLogger } from '@openfn/logger'; -import { ExecutionPlan } from '../src/types'; +import type { ExecutionPlan, State } from '@openfn/lexicon'; -// Disable strict mode for all these tests -const run = (job: any, state?: any, options: any = {}) => - doRun(job, state, { ...options, strict: false }); +import run from '../src/runtime'; const logger = createMockLogger(undefined, { level: 'default' }); @@ -21,50 +17,32 @@ test.serial( const src = 'export default [(s) => s]'; const state = { - data: true, + data: {}, configuration: { password: 'secret', }, }; + const result: any = await run(src, state); - t.is(result.data, true); + t.deepEqual(result.data, {}); t.is(result.configuration, undefined); } ); -test.serial( - 'config should be scrubbed from the result state in strict mode', - async (t) => { - const src = 'export default [(s) => s]'; +test.serial('config should be scrubbed from the result state', async (t) => { + const src = 'export default [(s) => s]'; - const state = { - data: true, - configuration: { - password: 'secret', - }, - }; - const result: any = await run(src, state, { strict: true }); - t.is(result.data, true); - t.is(result.configuration, undefined); - } -); + const state = { + data: {}, + configuration: { + password: 'secret', + }, + }; -test.serial( - 'config should be scrubbed from the result state in non-strict mode', - async (t) => { - const src = 'export default [(s) => s]'; - - const state = { - data: true, - configuration: { - password: 'secret', - }, - }; - const result: any = await run(src, state, { strict: false }); - t.is(result.data, true); - t.is(result.configuration, undefined); - } -); + const result: any = await run(src, state, {}); + t.deepEqual(result.data, {}); + t.is(result.configuration, undefined); +}); test.serial( 'config should be scrubbed from the result state after error', @@ -72,14 +50,15 @@ test.serial( const src = 'export default [(s) => { throw "err" }]'; const state = { - data: true, + data: {}, configuration: { password: 'secret', }, }; - const result: any = await run(src, state, { strict: false }); + + const result: any = await run(src, state, {}); t.truthy(result.errors); - t.is(result.data, true); + t.deepEqual(result.data, {}); t.is(result.configuration, undefined); } ); @@ -99,14 +78,14 @@ test.serial('jobs should not have access to global scope', async (t) => { test.serial('jobs should be able to read global state', async (t) => { const src = 'export default [() => state.data.x]'; - const result: any = await run(src, { data: { x: 42 } }); // typings are a bit tricky + const result: any = await run(src, { data: { x: 42 } }); t.is(result, 42); }); test.serial('jobs should be able to mutate global state', async (t) => { const src = 'export default [() => { state.x = 22; return state.x; }]'; - const result: any = await run(src, { data: { x: 42 } }); // typings are a bit tricky + const result: any = await run(src, { data: { x: 42 } }); t.is(result, 22); }); @@ -198,20 +177,22 @@ test.serial( 'jobs in workflow cannot share data through globals (issue #213)', async (t) => { const plan: ExecutionPlan = { - jobs: [ - { - id: 'a', - expression: 'export default [s => { console.x = 10; return s; }]', - next: { - b: true, + workflow: { + steps: [ + { + id: 'a', + expression: 'export default [s => { console.x = 10; return s; }]', + next: { + b: true, + }, + }, + { + id: 'b', + expression: + 'export default [s => { s.data.x = console.x; return s; }]', }, - }, - { - id: 'b', - expression: - 'export default [s => { s.data.x = console.x; return s; }]', - }, - ], + ], + }, }; const result = await run(plan); diff --git a/packages/runtime/test/util/assemble-state.test.ts b/packages/runtime/test/util/assemble-state.test.ts index 8cb87fd8d..eac478b93 100644 --- a/packages/runtime/test/util/assemble-state.test.ts +++ b/packages/runtime/test/util/assemble-state.test.ts @@ -1,13 +1,11 @@ import test from 'ava'; import assembleState from '../../src/util/assemble-state'; -// TODO: what if iniitial state or data is not an object? -// Is this an error? Maybe just in strict mode? - test('with no arguments, returns a basic state object', (t) => { const initial = undefined; const defaultState = undefined; const config = undefined; + const result = assembleState(initial, config, defaultState); t.deepEqual(result, { configuration: {}, @@ -15,34 +13,12 @@ test('with no arguments, returns a basic state object', (t) => { }); }); -test('strict: ignores initial state', (t) => { +test('includes initial state', (t) => { const initial = { x: 22 }; const defaultState = undefined; const config = undefined; - const result = assembleState(initial, config, defaultState, true); - t.deepEqual(result, { - configuration: {}, - data: {}, - }); -}); -test('strict: ignores initial state except references', (t) => { - const initial = { references: [] }; - const defaultState = undefined; - const config = undefined; - const result = assembleState(initial, config, defaultState, true); - t.deepEqual(result, { - references: [], - configuration: {}, - data: {}, - }); -}); - -test('non-strict: includes initial state', (t) => { - const initial = { x: 22 }; - const defaultState = undefined; - const config = undefined; - const result = assembleState(initial, config, defaultState, false); + const result = assembleState(initial, config, defaultState); t.deepEqual(result, { x: 22, configuration: {}, @@ -55,18 +31,14 @@ test('merges default and initial data objects', (t) => { const defaultState = { data: { y: 1 } }; const config = undefined; - const strict = assembleState(initial, config, defaultState, true); - t.deepEqual(strict, { + const result = assembleState(initial, config, defaultState); + t.deepEqual(result, { configuration: {}, data: { x: 1, y: 1, }, }); - - // Ensure the same behaviour in non-strict mode - const nonStrict = assembleState(initial, config, defaultState, false); - t.deepEqual(strict, nonStrict); }); test('Initial data is prioritised over default data', (t) => { @@ -74,16 +46,13 @@ test('Initial data is prioritised over default data', (t) => { const defaultState = { data: { x: 2 } }; const config = undefined; - const strict = assembleState(initial, config, defaultState, true); - t.deepEqual(strict, { + const result = assembleState(initial, config, defaultState); + t.deepEqual(result, { configuration: {}, data: { x: 1, }, }); - - const nonStrict = assembleState(initial, config, defaultState, false); - t.deepEqual(strict, nonStrict); }); test('Initial data does not have to be an object', (t) => { @@ -91,16 +60,11 @@ test('Initial data does not have to be an object', (t) => { const defaultState = { data: {} }; const config = undefined; - const strict = assembleState(initial, config, defaultState, true); - t.deepEqual(strict, { + const result = assembleState(initial, config, defaultState); + t.deepEqual(result, { configuration: {}, data: [1], }); - - // At this point I don't want any special handling for strict mode, - // see https://github.com/OpenFn/kit/issues/233 - const nonStrict = assembleState(initial, config, defaultState, false); - t.deepEqual(strict, nonStrict); }); test('merges default and initial config objects', (t) => { @@ -108,18 +72,14 @@ test('merges default and initial config objects', (t) => { const defaultState = undefined; const config = { y: 1 }; - const strict = assembleState(initial, config, defaultState, true); - t.deepEqual(strict, { + const result = assembleState(initial, config, defaultState); + t.deepEqual(result, { configuration: { x: 1, y: 1, }, data: {}, }); - - // Ensure the same behaviour in non-strict mode - const nonStrict = assembleState(initial, config, defaultState, false); - t.deepEqual(strict, nonStrict); }); test('configuration overrides initialState.configuration', (t) => { @@ -127,15 +87,11 @@ test('configuration overrides initialState.configuration', (t) => { const defaultState = undefined; const config = { x: 2 }; - const strict = assembleState(initial, config, defaultState, true); - t.deepEqual(strict, { + const result = assembleState(initial, config, defaultState); + t.deepEqual(result, { configuration: { x: 2, }, data: {}, }); - - // Ensure the same behaviour in non-strict mode - const nonStrict = assembleState(initial, config, defaultState, false); - t.deepEqual(strict, nonStrict); }); diff --git a/packages/runtime/test/util/regex.ts b/packages/runtime/test/util/regex.test.ts similarity index 100% rename from packages/runtime/test/util/regex.ts rename to packages/runtime/test/util/regex.test.ts diff --git a/packages/runtime/test/util/validate-plan.test.ts b/packages/runtime/test/util/validate-plan.test.ts index 451940703..1f0858d06 100644 --- a/packages/runtime/test/util/validate-plan.test.ts +++ b/packages/runtime/test/util/validate-plan.test.ts @@ -1,19 +1,21 @@ import test from 'ava'; -import { ExecutionPlan } from '../../src'; +import type { ExecutionPlan, Job } from '@openfn/lexicon'; import validate, { buildModel } from '../../src/util/validate-plan'; +const job = (id: string, next?: Record) => + ({ + id, + next, + expression: '.', + } as Job); + test('builds a simple model', (t) => { const plan: ExecutionPlan = { - jobs: [ - { - id: 'a', - next: { b: true }, - }, - { - id: 'b', - }, - ], + options: {}, + workflow: { + steps: [job('a', { b: true }), job('b')], + }, }; const model = buildModel(plan); @@ -31,17 +33,10 @@ test('builds a simple model', (t) => { test('builds a more complex model', (t) => { const plan: ExecutionPlan = { - jobs: [ - { - id: 'a', - next: { b: true }, - }, - { - id: 'b', - next: { c: true, a: true }, - }, - { id: 'c' }, - ], + options: {}, + workflow: { + steps: [job('a', { b: true }), job('b', { c: true, a: true }), job('c')], + }, }; const model = buildModel(plan); @@ -63,16 +58,10 @@ test('builds a more complex model', (t) => { test('throws for a circular dependency', (t) => { const plan: ExecutionPlan = { - jobs: [ - { - id: 'a', - next: { b: true }, - }, - { - id: 'b', - next: { a: true }, - }, - ], + options: {}, + workflow: { + steps: [job('a', { b: true }), job('b', { a: true })], + }, }; t.throws(() => validate(plan), { @@ -82,20 +71,14 @@ test('throws for a circular dependency', (t) => { test('throws for an indirect circular dependency', (t) => { const plan: ExecutionPlan = { - jobs: [ - { - id: 'a', - next: { b: true }, - }, - { - id: 'b', - next: { c: true }, - }, - { - id: 'c', - next: { a: true }, - }, - ], + options: {}, + workflow: { + steps: [ + job('a', { b: true }), + job('b', { c: true }), + job('c', { a: true }), + ], + }, }; t.throws(() => validate(plan), { @@ -105,22 +88,17 @@ test('throws for an indirect circular dependency', (t) => { test('throws for a multiple inputs', (t) => { const plan: ExecutionPlan = { - jobs: [ - { - id: 'a', - next: { b: true, c: true }, - }, - { - id: 'b', - next: { z: true }, - }, - { - id: 'c', - next: { z: true }, - }, - { id: 'z' }, - ], + options: {}, + workflow: { + steps: [ + job('a', { b: true, c: true }), + job('b', { z: true }), + job('c', { z: true }), + job('z'), + ], + }, }; + t.throws(() => validate(plan), { message: 'Multiple dependencies detected for: z', }); @@ -128,12 +106,12 @@ test('throws for a multiple inputs', (t) => { test('throws for a an unknown job', (t) => { const plan: ExecutionPlan = { - jobs: [ - { - next: { z: true }, - }, - ], + options: {}, + workflow: { + steps: [job('next', { z: true })], + }, }; + t.throws(() => validate(plan), { message: 'Cannot find job: z', }); @@ -141,11 +119,15 @@ test('throws for a an unknown job', (t) => { test('throws for a an unknown job with shorthand syntax', (t) => { const plan: ExecutionPlan = { - jobs: [ - { - next: 'z', - }, - ], + options: {}, + workflow: { + steps: [ + { + next: 'z', + expression: '.', + }, + ], + }, }; t.throws(() => validate(plan), { message: 'Cannot find job: z', @@ -154,9 +136,14 @@ test('throws for a an unknown job with shorthand syntax', (t) => { test('throws for invalid string start', (t) => { const plan: ExecutionPlan = { - start: 'z', - jobs: [{ id: 'a' }], + options: { + start: 'z', + }, + workflow: { + steps: [job('a')], + }, }; + t.throws(() => validate(plan), { message: 'Could not find start job: z', }); diff --git a/packages/ws-worker/package.json b/packages/ws-worker/package.json index 670a14606..b70a1fc73 100644 --- a/packages/ws-worker/package.json +++ b/packages/ws-worker/package.json @@ -22,6 +22,7 @@ "dependencies": { "@koa/router": "^12.0.0", "@openfn/engine-multi": "workspace:*", + "@openfn/lexicon": "workspace:^", "@openfn/logger": "workspace:*", "@openfn/runtime": "workspace:*", "@types/koa-logger": "^3.1.2", diff --git a/packages/ws-worker/src/api/claim.ts b/packages/ws-worker/src/api/claim.ts index 18d2e68d0..a54d7ebda 100644 --- a/packages/ws-worker/src/api/claim.ts +++ b/packages/ws-worker/src/api/claim.ts @@ -1,5 +1,6 @@ import { Logger, createMockLogger } from '@openfn/logger'; -import { CLAIM, ClaimPayload, ClaimReply } from '../events'; +import { ClaimPayload, ClaimReply } from '@openfn/lexicon/lightning'; +import { CLAIM } from '../events'; import type { ServerApp } from '../server'; diff --git a/packages/ws-worker/src/api/execute.ts b/packages/ws-worker/src/api/execute.ts index 9ce817bf4..14a9640e6 100644 --- a/packages/ws-worker/src/api/execute.ts +++ b/packages/ws-worker/src/api/execute.ts @@ -1,32 +1,36 @@ +import type { ExecutionPlan, Lazy, State } from '@openfn/lexicon'; +import type { + RunLogPayload, + RunStartPayload, + LightningPlanOptions, +} from '@openfn/lexicon/lightning'; +import type { Logger } from '@openfn/logger'; +import type { + RuntimeEngine, + Resolvers, + WorkflowStartPayload, +} from '@openfn/engine-multi'; + +import { + getWithReply, + createRunState, + throttle as createThrottle, +} from '../util'; import { RUN_COMPLETE, RUN_LOG, - RunLogPayload, RUN_START, - RunStartPayload, - GET_CREDENTIAL, GET_DATACLIP, STEP_COMPLETE, STEP_START, + GET_CREDENTIAL, } from '../events'; -import { - getWithReply, - createRunState, - throttle as createThrottle, -} from '../util'; import handleStepComplete from '../events/step-complete'; import handleStepStart from '../events/step-start'; import handleRunComplete from '../events/run-complete'; import handleRunError from '../events/run-error'; -import type { RunOptions, Channel, RunState, JSONLog } from '../types'; -import type { Logger } from '@openfn/logger'; -import type { - RuntimeEngine, - Resolvers, - WorkflowStartPayload, -} from '@openfn/engine-multi'; -import type { ExecutionPlan } from '@openfn/runtime'; +import type { Channel, RunState, JSONLog } from '../types'; const enc = new TextDecoder('utf-8'); @@ -58,12 +62,13 @@ export function execute( engine: RuntimeEngine, logger: Logger, plan: ExecutionPlan, - options: RunOptions = {}, + input: Lazy, + options: LightningPlanOptions = {}, onFinish = (_result: any) => {} ) { logger.info('executing ', plan.id); - const state = createRunState(plan, options); + const state = createRunState(plan, input); const context: Context = { channel, state, logger, engine, onFinish }; @@ -130,18 +135,19 @@ export function execute( .then(async () => { // TODO we need to remove this from here and let the runtime take care of it through // the resolver. See https://github.com/OpenFn/kit/issues/403 - if (typeof plan.initialState === 'string') { - logger.debug('loading dataclip', plan.initialState); - plan.initialState = await loadDataclip(channel, plan.initialState); + // TODO come back and work out how initial state will work + if (typeof input === 'string') { + logger.debug('loading dataclip', input); + const loadedInput = await loadDataclip(channel, input); logger.success('dataclip loaded'); - logger.debug(plan.initialState); + return loadedInput; } - return plan; + return input; }) // Execute (which we have to wrap in a promise chain to handle initial state) - .then(() => { + .then((input: State) => { try { - engine.execute(plan, { resolvers, ...options }); + engine.execute(plan, input, { resolvers, ...options }); } catch (e: any) { // TODO what if there's an error? handleRunError(context, { diff --git a/packages/ws-worker/src/api/reasons.ts b/packages/ws-worker/src/api/reasons.ts index 73fbd0661..37411a020 100644 --- a/packages/ws-worker/src/api/reasons.ts +++ b/packages/ws-worker/src/api/reasons.ts @@ -1,11 +1,6 @@ -import type { - ExitReason, - ExitReasonStrings, - State, - RunState, -} from '../types'; - -import type { JobNode } from '@openfn/runtime'; +import { State, Step } from '@openfn/lexicon'; +import { ExitReason, ExitReasonStrings } from '@openfn/lexicon/lightning'; +import type { RunState } from '../types'; // This takes the result state and error from the job const calculateJobExitReason = ( @@ -30,7 +25,7 @@ const calculateJobExitReason = ( }; // It has next jobs, but they weren't executed -const isLeafNode = (state: RunState, job: JobNode) => { +const isLeafNode = (state: RunState, job: Step) => { // A node is a leaf if: // It has no `next` jobs at all if (!job.next || Object.keys(job.next).length == 0) { @@ -47,11 +42,11 @@ const calculateRunExitReason = (state: RunState): ExitReason => { // basically becomes the exit reason // So If we get here, we basically just need to look to see if there's a fail on a leaf node // (we ignore fails on non-leaf nodes) - const leafJobReasons: ExitReason[] = state.plan.jobs - .filter((job: JobNode) => isLeafNode(state, job)) + const leafJobReasons: ExitReason[] = state.plan.workflow.steps + .filter((job) => isLeafNode(state, job)) // TODO what if somehow there is no exit reason for a job? // This implies some kind of exception error, no? - .map(({ id }: JobNode) => state.reasons[id!]); + .map(({ id }) => state.reasons[id!]); const fail = leafJobReasons.find((r) => r && r.reason === 'fail'); if (fail) { diff --git a/packages/ws-worker/src/channels/run.ts b/packages/ws-worker/src/channels/run.ts index f7a1dffdd..104fba82d 100644 --- a/packages/ws-worker/src/channels/run.ts +++ b/packages/ws-worker/src/channels/run.ts @@ -1,11 +1,16 @@ -import convertRun from '../util/convert-run'; -import { getWithReply } from '../util'; -import { Run, RunOptions, Channel, Socket } from '../types'; -import { ExecutionPlan } from '@openfn/runtime'; -import { GET_PLAN, GetPlanReply } from '../events'; - +import type { ExecutionPlan, Lazy, State } from '@openfn/lexicon'; +import type { + GetPlanReply, + LightningPlan, + LightningPlanOptions, +} from '@openfn/lexicon/lightning'; import type { Logger } from '@openfn/logger'; +import { getWithReply } from '../util'; +import convertRun from '../util/convert-lightning-plan'; +import { GET_PLAN } from '../events'; +import type { Channel, Socket } from '../types'; + // TODO what happens if this channel join fails? // Lightning could vanish, channel could error on its side, or auth could be wrong // We don't have a good feedback mechanism yet - worker:queue is the only channel @@ -20,7 +25,8 @@ const joinRunChannel = ( return new Promise<{ channel: Channel; plan: ExecutionPlan; - options: RunOptions; + options: LightningPlanOptions; + input: Lazy; }>((resolve, reject) => { // TMP - lightning seems to be sending two responses to me // just for now, I'm gonna gate the handling here @@ -36,9 +42,9 @@ const joinRunChannel = ( if (!didReceiveOk) { didReceiveOk = true; logger.success(`connected to ${channelName}`, e); - const { plan, options } = await loadRun(channel); + const { plan, options, input } = await loadRun(channel); logger.debug('converted run as execution plan:', plan); - resolve({ channel, plan, options }); + resolve({ channel, plan, options, input }); } }) .receive('error', (err: any) => { @@ -54,5 +60,5 @@ export async function loadRun(channel: Channel) { // first we get the run body through the socket const runBody = await getWithReply(channel, GET_PLAN); // then we generate the execution plan - return convertRun(runBody as Run); + return convertRun(runBody as LightningPlan); } diff --git a/packages/ws-worker/src/channels/worker-queue.ts b/packages/ws-worker/src/channels/worker-queue.ts index c961c0906..9ec76e659 100644 --- a/packages/ws-worker/src/channels/worker-queue.ts +++ b/packages/ws-worker/src/channels/worker-queue.ts @@ -1,7 +1,7 @@ import EventEmitter from 'node:events'; import { Socket as PhxSocket } from 'phoenix'; import { WebSocket } from 'ws'; - +import { API_VERSION } from '@openfn/lexicon/lightning'; import generateWorkerToken from '../util/worker-token'; import type { Logger } from '@openfn/logger'; @@ -16,10 +16,20 @@ const connectToWorkerQueue = ( ) => { const events = new EventEmitter(); - generateWorkerToken(secret, serverId, logger).then((token) => { + generateWorkerToken(secret, serverId, logger).then(async (token) => { + const pkg = await import('../../package.json', { + assert: { type: 'json' }, + }); + + const params = { + token, + api_version: API_VERSION, + worker_version: pkg.default.version, + }; + // @ts-ignore ts doesn't like the constructor here at all const socket = new SocketConstructor(endpoint, { - params: { token }, + params, transport: WebSocket, }); diff --git a/packages/ws-worker/src/events.ts b/packages/ws-worker/src/events.ts index 89cdeffca..fc157d5d8 100644 --- a/packages/ws-worker/src/events.ts +++ b/packages/ws-worker/src/events.ts @@ -1,69 +1,43 @@ -import { Run, ExitReason } from './types'; +import * as l from '@openfn/lexicon/lightning'; // These are worker-lightning events, used in the websocket - export const CLAIM = 'claim'; - -export type ClaimPayload = { demand?: number }; -export type ClaimReply = { runs: Array }; -export type ClaimRun = { id: string; token: string }; - export const GET_PLAN = 'fetch:plan'; -export type GetPlanPayload = void; // no payload -export type GetPlanReply = Run; - -export const GET_CREDENTIAL = 'fetch:credential'; -export type GetCredentialPayload = { id: string }; -// credential in-line, no wrapper, arbitrary data -export type GetCredentialReply = {}; - export const GET_DATACLIP = 'fetch:dataclip'; -export type GetDataclipPayload = { id: string }; -export type GetDataClipReply = Uint8Array; // represents a json string Run - -export const RUN_START = 'run:start'; // runId, timestamp -export type RunStartPayload = void; // no payload -export type RunStartReply = {}; // no payload +export const GET_CREDENTIAL = 'fetch:credential'; +export const RUN_START = 'run:start'; +export const RUN_COMPLETE = 'run:complete'; +export const RUN_LOG = 'run:log'; +export const STEP_START = 'step:start'; +export const STEP_COMPLETE = 'step:complete'; +export const INTERNAL_RUN_COMPLETE = 'server:run-complete'; -export const RUN_COMPLETE = 'run:complete'; // runId, timestamp, result, stats -export type RunCompletePayload = ExitReason & { - final_dataclip_id?: string; // TODO this will be removed soon +export type QueueEvents = { + [CLAIM]: l.ClaimPayload; }; -export type RunCompleteReply = undefined; -export const RUN_LOG = 'run:log'; // level, namespace (job,runtime,adaptor), message, time -export type RunLogPayload = { - message: Array; - timestamp: string; - run_id: string; - level?: string; - source?: string; // namespace - job_id?: string; - step_id?: string; +export type QueueEventReplies = { + [CLAIM]: l.ClaimReply; }; -export type RunLogReply = void; -export const STEP_START = 'step:start'; -export type StepStartPayload = { - job_id: string; - step_id: string; - run_id?: string; - input_dataclip_id?: string; - versions: Record; +export type RunEvents = { + [GET_PLAN]: l.GetPlanPayload; + [GET_CREDENTIAL]: l.GetCredentialPayload; + [GET_DATACLIP]: l.GetDataclipPayload; + [RUN_START]: l.RunStartPayload; + [RUN_COMPLETE]: l.RunCompletePayload; + [RUN_LOG]: l.RunLogPayload; + [STEP_START]: l.StepStartPayload; + [STEP_COMPLETE]: l.StepCompletePayload; }; -export type StepStartReply = void; -export const STEP_COMPLETE = 'step:complete'; -export type StepCompletePayload = ExitReason & { - run_id?: string; - job_id: string; - step_id: string; - output_dataclip?: string; - output_dataclip_id?: string; +export type RunReplies = { + [GET_PLAN]: l.GetPlanReply; + [GET_CREDENTIAL]: l.GetCredentialReply; + [GET_DATACLIP]: l.GetDataClipReply; + [RUN_START]: l.RunStartReply; + [RUN_COMPLETE]: l.RunCompleteReply; + [RUN_LOG]: l.RunLogReply; + [STEP_START]: l.StepStartReply; + [STEP_COMPLETE]: l.StepCompleteReply; }; -export type StepCompleteReply = void; - -// These are internal server events -// Explicitly (and awkwardly) namespaced to avoid confusion - -export const INTERNAL_RUN_COMPLETE = 'server:run-complete'; diff --git a/packages/ws-worker/src/events/run-complete.ts b/packages/ws-worker/src/events/run-complete.ts index 1554fb569..75c52f351 100644 --- a/packages/ws-worker/src/events/run-complete.ts +++ b/packages/ws-worker/src/events/run-complete.ts @@ -1,6 +1,7 @@ import type { WorkflowCompletePayload } from '@openfn/engine-multi'; +import type { RunCompletePayload } from '@openfn/lexicon/lightning'; -import { RUN_COMPLETE, RunCompletePayload } from '../events'; +import { RUN_COMPLETE } from '../events'; import { calculateRunExitReason } from '../api/reasons'; import { sendEvent, Context } from '../api/execute'; import logFinalReason from '../util/log-final-reason'; diff --git a/packages/ws-worker/src/events/run-error.ts b/packages/ws-worker/src/events/run-error.ts index 7f8375c64..d37feb5ca 100644 --- a/packages/ws-worker/src/events/run-error.ts +++ b/packages/ws-worker/src/events/run-error.ts @@ -1,8 +1,8 @@ -import { calculateJobExitReason } from '../api/reasons'; - +import type { RunCompletePayload } from '@openfn/lexicon/lightning'; import type { WorkflowErrorPayload } from '@openfn/engine-multi'; -import { RUN_COMPLETE, RunCompletePayload } from '../events'; +import { calculateJobExitReason } from '../api/reasons'; +import { RUN_COMPLETE } from '../events'; import { sendEvent, Context, onJobError } from '../api/execute'; import logFinalReason from '../util/log-final-reason'; diff --git a/packages/ws-worker/src/events/step-complete.ts b/packages/ws-worker/src/events/step-complete.ts index 5400dc897..51c5bfe8d 100644 --- a/packages/ws-worker/src/events/step-complete.ts +++ b/packages/ws-worker/src/events/step-complete.ts @@ -1,12 +1,12 @@ import crypto from 'node:crypto'; +import type { StepCompletePayload } from '@openfn/lexicon/lightning'; +import type { JobCompletePayload } from '@openfn/engine-multi'; -import { STEP_COMPLETE, StepCompletePayload } from '../events'; +import { STEP_COMPLETE } from '../events'; import { stringify } from '../util'; import { calculateJobExitReason } from '../api/reasons'; import { sendEvent, Context } from '../api/execute'; -import type { JobCompletePayload } from '@openfn/engine-multi'; - export default function onStepComplete( { channel, state }: Context, event: JobCompletePayload, diff --git a/packages/ws-worker/src/events/step-start.ts b/packages/ws-worker/src/events/step-start.ts index 9703fb0e5..561652431 100644 --- a/packages/ws-worker/src/events/step-start.ts +++ b/packages/ws-worker/src/events/step-start.ts @@ -1,9 +1,11 @@ import crypto from 'node:crypto'; -import { JobStartPayload } from '@openfn/engine-multi'; import { timestamp } from '@openfn/logger'; +import { JobStartPayload } from '@openfn/engine-multi'; +import type { Job } from '@openfn/lexicon'; +import type { StepStartPayload } from '@openfn/lexicon/lightning'; import pkg from '../../package.json' assert { type: 'json' }; -import { STEP_START, StepStartPayload } from '../events'; +import { STEP_START } from '../events'; import { sendEvent, Context, onJobLog } from '../api/execute'; import calculateVersionString from '../util/versions'; @@ -20,7 +22,9 @@ export default async function onStepStart( state.activeStep = crypto.randomUUID(); state.activeJob = event.jobId; - const job = state.plan.jobs.find(({ id }) => id === event.jobId); + const job = state.plan.workflow.steps.find( + ({ id }) => id === event.jobId + ) as Job; const input_dataclip_id = state.inputDataclips[event.jobId]; diff --git a/packages/ws-worker/src/mock/resolvers.ts b/packages/ws-worker/src/mock/resolvers.ts index 489107e95..25ad81559 100644 --- a/packages/ws-worker/src/mock/resolvers.ts +++ b/packages/ws-worker/src/mock/resolvers.ts @@ -1,4 +1,5 @@ -import type { State, Credential } from '../types'; +import type { State } from '@openfn/lexicon'; +import type { Credential } from '@openfn/lexicon/lightning'; import { Resolvers } from '@openfn/engine-multi'; const mockResolveCredential = (_credId: string) => diff --git a/packages/ws-worker/src/mock/runtime-engine.ts b/packages/ws-worker/src/mock/runtime-engine.ts index b8f2741e5..f96541056 100644 --- a/packages/ws-worker/src/mock/runtime-engine.ts +++ b/packages/ws-worker/src/mock/runtime-engine.ts @@ -1,9 +1,11 @@ import { EventEmitter } from 'node:events'; import crypto from 'node:crypto'; -import run, { ExecutionPlan } from '@openfn/runtime'; +import run from '@openfn/runtime'; import * as engine from '@openfn/engine-multi'; +import type { ExecutionPlan, Job, State } from '@openfn/lexicon'; import mockResolvers from './resolvers'; +import { RuntimeEngine } from '@openfn/engine-multi'; export type EngineEvent = | typeof engine.JOB_COMPLETE @@ -13,23 +15,6 @@ export type EngineEvent = | typeof engine.WORKFLOW_LOG | typeof engine.WORKFLOW_START; -export type WorkflowStartEvent = { - workflowId: string; - threadId: string; -}; - -export type WorkflowCompleteEvent = { - workflowId: string; - error?: any; // hmm maybe not - threadId: string; -}; - -export type WorkflowErrorEvent = { - workflowId: string; - threadId: string; - message: string; -}; - // this is basically a fake adaptor // these functions will be injected into scope const helpers = { @@ -75,16 +60,19 @@ async function createMock() { const execute = async ( xplan: ExecutionPlan, + input: State, options: { resolvers?: engine.Resolvers; throw?: boolean } = { resolvers: mockResolvers, } ) => { - const { id, jobs } = xplan; + const { id } = xplan; + const { steps } = xplan.workflow; activeWorkflows[id!] = true; const threadId = crypto.randomUUID(); - for (const job of jobs) { + for (const step of steps) { + const job = step as Job; if (typeof job.configuration === 'string') { // Call the crendtial callback, but don't do anything with it job.configuration = await options.resolvers?.credential?.( @@ -134,7 +122,7 @@ async function createMock() { dispatch('workflow-start', { workflowId: id, threadId: threadId }); try { - await run(xplan, undefined, opts as any); + await run(xplan, input, opts as any); dispatch('workflow-complete', { workflowId: id, threadId: threadId }); } catch (e: any) { dispatch('workflow-error', { @@ -168,7 +156,7 @@ async function createMock() { getStatus, listen, destroy, - }; + } as unknown as RuntimeEngine; } export default createMock; diff --git a/packages/ws-worker/src/mock/sockets.ts b/packages/ws-worker/src/mock/sockets.ts index 942e7c7f7..172d0b6a5 100644 --- a/packages/ws-worker/src/mock/sockets.ts +++ b/packages/ws-worker/src/mock/sockets.ts @@ -1,7 +1,9 @@ type EventHandler = (evt?: any) => void; // Mock websocket implementations -export const mockChannel = (callbacks: Record = {}) => { +export const mockChannel = ( + callbacks: Record = {} +): any => { const c = { on: (event: string, fn: EventHandler) => { // TODO support multiple callbacks @@ -61,6 +63,7 @@ export const mockChannel = (callbacks: Record = {}) => { }; return receive; }, + leave: () => {}, }; return c; }; diff --git a/packages/ws-worker/src/server.ts b/packages/ws-worker/src/server.ts index 6616dd4ef..6bd6fb217 100644 --- a/packages/ws-worker/src/server.ts +++ b/packages/ws-worker/src/server.ts @@ -5,8 +5,8 @@ import koaLogger from 'koa-logger'; import Router from '@koa/router'; import { humanId } from 'human-id'; import { createMockLogger, Logger } from '@openfn/logger'; - -import { INTERNAL_RUN_COMPLETE, ClaimRun } from './events'; +import { ClaimRun } from '@openfn/lexicon/lightning'; +import { INTERNAL_RUN_COMPLETE } from './events'; import destroy from './api/destroy'; import startWorkloop from './api/workloop'; import claim from './api/claim'; @@ -162,6 +162,7 @@ function createServer(engine: RuntimeEngine, options: ServerOptions = {}) { channel: runChannel, plan, options, + input, } = await joinRunChannel(app.socket, token, id, logger); // Callback to be triggered when the work is done (including errors) @@ -176,6 +177,7 @@ function createServer(engine: RuntimeEngine, options: ServerOptions = {}) { engine, logger, plan, + input, options, onFinish ); diff --git a/packages/ws-worker/src/types.d.ts b/packages/ws-worker/src/types.d.ts index 8cc0709dd..a9d09fbeb 100644 --- a/packages/ws-worker/src/types.d.ts +++ b/packages/ws-worker/src/types.d.ts @@ -1,90 +1,15 @@ import { SanitizePolicies } from '@openfn/logger'; +import type { ExecutionPlan, Lazy, State } from '@openfn/lexicon'; import type { Channel as PhxChannel } from 'phoenix'; -import type { ExecutionPlan } from '@openfn/runtime'; export { Socket }; -export type Credential = Record; - -export type State = { - data: { - [key: string]: any; - }; - configuration?: { - [key: string]: any; - }; - errors?: { - [jobId: string]: { - type: string; - message: string; - }; - }; - - // technically there should be nothing here - [key: string]: any; -}; - -export type ExitReasonStrings = - | 'success' - | 'fail' - | 'crash' - | 'kill' - | 'cancel' - | 'exception'; - -export type ExitReason = { - reason: ExitReasonStrings; - error_message: string | null; - error_type: string | null; -}; - -export type Node = { - id: string; - body?: string; - adaptor?: string; - credential?: object; - credential_id?: string; - type?: 'webhook' | 'cron'; // trigger only - state?: any; // Initial state / defaults -}; - -export interface Edge { - id: string; - source_job_id?: string; - source_trigger_id?: string; - target_job_id: string; - name?: string; - condition?: string; - error_path?: boolean; - errors?: any; - enabled?: boolean; -} - -// An run object returned by Lightning -export type Run = { - id: string; - dataclip_id: string; - starting_node_id: string; - - triggers: Node[]; - jobs: Node[]; - edges: Edge[]; - - options?: RunOptions; -}; - -export type RunOptions = { - runTimeoutMs?: number; - - sanitize?: SanitizePolicies; -}; - // Internal server state for each run export type RunState = { activeStep?: string; activeJob?: string; plan: ExecutionPlan; - options: RunOptions; + input: Lazy; dataclips: Record; // For each run, map the input ids // TODO better name maybe? diff --git a/packages/ws-worker/src/util/convert-run.ts b/packages/ws-worker/src/util/convert-lightning-plan.ts similarity index 52% rename from packages/ws-worker/src/util/convert-run.ts rename to packages/ws-worker/src/util/convert-lightning-plan.ts index 35b200de2..df2427a3d 100644 --- a/packages/ws-worker/src/util/convert-run.ts +++ b/packages/ws-worker/src/util/convert-lightning-plan.ts @@ -1,11 +1,17 @@ import crypto from 'node:crypto'; import type { - JobNode, - JobNodeID, - JobEdge, + Step, + StepId, ExecutionPlan, -} from '@openfn/runtime'; -import { Run, RunOptions, Edge } from '../types'; + State, + Job, + Trigger, + StepEdge, + WorkflowOptions, + Lazy, +} from '@openfn/lexicon'; +import { LightningPlan, Edge } from '@openfn/lexicon/lightning'; +import { ExecuteOptions } from '@openfn/engine-multi'; export const conditions: Record string | null> = { @@ -33,36 +39,44 @@ const mapTriggerEdgeCondition = (edge: Edge) => { return condition; }; -const mapOptions = (options: RunOptions): RunOptions => { - return options; -}; - export default ( - run: Run -): { plan: ExecutionPlan; options: RunOptions } => { - const options = run.options || {}; + run: LightningPlan +): { plan: ExecutionPlan; options: ExecuteOptions; input: Lazy } => { + // Some options get mapped straight through to the runtime's workflow options + // TODO or maybe not? Maybe they're all sent to the engine instead? + const runtimeOpts: Omit = {}; + + // But some need to get passed down into the engine's options + const engineOpts: ExecuteOptions = {}; + + if (run.options?.runTimeoutMs) { + engineOpts.runTimeoutMs = run.options.runTimeoutMs; + } + if (run.options?.sanitize) { + engineOpts.sanitize = run.options.sanitize; + } + const plan: Partial = { id: run.id, + options: runtimeOpts, }; + let initialState; if (run.dataclip_id) { - // This is tricky - we're assining a string to the XPlan - // which is fine becuase it'll be handled later - // I guess we need a new type for now? Like a lazy XPlan - // @ts-ignore - plan.initialState = run.dataclip_id; + initialState = run.dataclip_id; } + if (run.starting_node_id) { - plan.start = run.starting_node_id; + runtimeOpts.start = run.starting_node_id; } - const nodes: Record = {}; + const nodes: Record = {}; - const edges = run.edges ?? []; + const edges: Edge[] = run.edges ?? []; // We don't really care about triggers, it's mostly just a empty node if (run.triggers?.length) { - run.triggers.forEach((trigger) => { + run.triggers.forEach((trigger: Trigger) => { const id = trigger.id || 'trigger'; nodes[id] = { @@ -72,13 +86,16 @@ export default ( // TODO do we need to support multiple edges here? Likely const connectedEdges = edges.filter((e) => e.source_trigger_id === id); if (connectedEdges.length) { - nodes[id].next = connectedEdges.reduce((obj, edge) => { - if (edge.enabled !== false) { - // @ts-ignore - obj[edge.target_job_id] = mapTriggerEdgeCondition(edge); - } - return obj; - }, {}); + nodes[id].next = connectedEdges.reduce( + (obj: Partial, edge) => { + if (edge.enabled !== false) { + // @ts-ignore + obj[edge.target_job_id] = mapTriggerEdgeCondition(edge); + } + return obj; + }, + {} + ); } else { // TODO what if the edge isn't found? } @@ -86,25 +103,27 @@ export default ( } if (run.jobs?.length) { - run.jobs.forEach((job) => { - const id = job.id || crypto.randomUUID(); - - nodes[id] = { + run.jobs.forEach((step) => { + const id = step.id || crypto.randomUUID(); + const job: Job = { id, - configuration: job.credential || job.credential_id, - expression: job.body, - adaptor: job.adaptor, + configuration: step.credential || step.credential_id, + expression: step.body!, + adaptor: step.adaptor, }; - if (job.state) { - // TODO this is likely to change - nodes[id].state = job.state; + if (step.name) { + job.name = step.name; + } + + if (step.state) { + job.state = step.state; } const next = edges .filter((e) => e.source_job_id === id) .reduce((obj, edge) => { - const newEdge: JobEdge = {}; + const newEdge: StepEdge = {}; const condition = mapEdgeCondition(edge); if (condition) { @@ -117,18 +136,27 @@ export default ( ? newEdge : true; return obj; - }, {} as Record); + }, {} as Record); if (Object.keys(next).length) { - nodes[id].next = next; + job.next = next; } + + nodes[id] = job; }); } - plan.jobs = Object.values(nodes); + plan.workflow = { + steps: Object.values(nodes), + }; + + if (run.name) { + plan.workflow.name = run.name; + } return { plan: plan as ExecutionPlan, - options: mapOptions(options), + options: engineOpts, + input: initialState || {}, }; }; diff --git a/packages/ws-worker/src/util/create-run-state.ts b/packages/ws-worker/src/util/create-run-state.ts index b9134e73a..7227da833 100644 --- a/packages/ws-worker/src/util/create-run-state.ts +++ b/packages/ws-worker/src/util/create-run-state.ts @@ -1,20 +1,17 @@ -import type { ExecutionPlan } from '@openfn/runtime'; -import type { RunOptions, RunState } from '../types'; +import type { ExecutionPlan, Job, Lazy, State } from '@openfn/lexicon'; +import type { RunState } from '../types'; -export default ( - plan: ExecutionPlan, - options: RunOptions = {} -): RunState => { +export default (plan: ExecutionPlan, input?: Lazy): RunState => { const state = { - plan, lastDataclipId: '', dataclips: {}, inputDataclips: {}, reasons: {}, - options, + plan, + input, } as RunState; - if (typeof plan.initialState === 'string') { + if (typeof input === 'string') { // We need to initialise inputDataclips so that the first run // has its inputDataclip set properly // Difficulty: the starting node is a trigger and NOT a run @@ -22,9 +19,10 @@ export default ( // and set the input state on THAT // find the first job - let startNode = plan.jobs[0]; - if (plan.start) { - startNode = plan.jobs.find(({ id }) => id === plan.start)!; + const jobs = plan.workflow.steps as Job[]; + let startNode = jobs[0]; + if (plan.options.start) { + startNode = jobs.find(({ id }) => id === plan.options.start)!; } // TODO throw with validation error of some kind if this node could not be found @@ -40,7 +38,7 @@ export default ( // For any runs downstream of the initial state, // Set up the input dataclip initialRuns.forEach((id) => { - state.inputDataclips[id] = plan.initialState as string; + state.inputDataclips[id] = input; }); } else { // what if initial state is an object? diff --git a/packages/ws-worker/src/util/index.ts b/packages/ws-worker/src/util/index.ts index 6c9b2b0e3..776d274e5 100644 --- a/packages/ws-worker/src/util/index.ts +++ b/packages/ws-worker/src/util/index.ts @@ -1,4 +1,4 @@ -import convertRun from './convert-run'; +import convertRun from './convert-lightning-plan'; import tryWithBackoff from './try-with-backoff'; import getWithReply from './get-with-reply'; import stringify from './stringify'; diff --git a/packages/ws-worker/src/util/log-final-reason.ts b/packages/ws-worker/src/util/log-final-reason.ts index aaa37c492..4a2f83981 100644 --- a/packages/ws-worker/src/util/log-final-reason.ts +++ b/packages/ws-worker/src/util/log-final-reason.ts @@ -1,6 +1,6 @@ import { timestamp } from '@openfn/logger'; +import { ExitReason } from '@openfn/lexicon/lightning'; import { Context, onJobLog } from '../api/execute'; -import { ExitReason } from '../types'; export default async (context: Context, reason: ExitReason) => { const time = (timestamp() - BigInt(10e6)).toString(); diff --git a/packages/ws-worker/test/api/destroy.test.ts b/packages/ws-worker/test/api/destroy.test.ts index 8fa2c26ea..2b47105c1 100644 --- a/packages/ws-worker/test/api/destroy.test.ts +++ b/packages/ws-worker/test/api/destroy.test.ts @@ -1,23 +1,23 @@ import test from 'ava'; import crypto from 'node:crypto'; - import createLightningServer from '@openfn/lightning-mock'; +import { createMockLogger } from '@openfn/logger'; +import { LightningPlan } from '@openfn/lexicon/lightning'; + import createWorker from '../../src/server'; import createMockRTE from '../../src/mock/runtime-engine'; - import destroy from '../../src/api/destroy'; -import { createMockLogger } from '@openfn/logger'; -import { Run } from '../../src/types'; const workerPort = 9876; const lightningPort = workerPort + 1; const logger = createMockLogger(); const lightning = createLightningServer({ port: lightningPort }); -let worker; + +let worker: any; test.beforeEach(async () => { - const engine = await createMockRTE(); + const engine: any = await createMockRTE(); worker = createWorker(engine, { logger, @@ -40,7 +40,7 @@ const createRun = () => body: `wait(${500 + Math.random() * 1000})`, }, ], - } as Run); + } as LightningPlan); const waitForClaim = (timeout: number = 1000) => new Promise((resolve) => { @@ -120,46 +120,43 @@ test.serial('destroy a worker while one run is active', async (t) => { }); }); -test.serial( - 'destroy a worker while multiple runs are active', - async (t) => { - return new Promise((done) => { - let completeCount = 0; - let startCount = 0; - - const doDestroy = async () => { - await destroy(worker, logger); +test.serial('destroy a worker while multiple runs are active', async (t) => { + return new Promise((done) => { + let completeCount = 0; + let startCount = 0; - // Ensure all three runs completed - t.is(completeCount, 3); + const doDestroy = async () => { + await destroy(worker, logger); - // should not respond to get - t.false(await ping()); - // should not be claiming - t.false(await waitForClaim()); + // Ensure all three runs completed + t.is(completeCount, 3); - done(); - }; + // should not respond to get + t.false(await ping()); + // should not be claiming + t.false(await waitForClaim()); - lightning.on('run:start', () => { - startCount++; + done(); + }; - // Let all three workflows start before we kill the server - if (startCount === 3) { - doDestroy(); - } - }); + lightning.on('run:start', () => { + startCount++; - lightning.on('run:complete', () => { - completeCount++; - }); + // Let all three workflows start before we kill the server + if (startCount === 3) { + doDestroy(); + } + }); - lightning.enqueueRun(createRun()); - lightning.enqueueRun(createRun()); - lightning.enqueueRun(createRun()); + lightning.on('run:complete', () => { + completeCount++; }); - } -); + + lightning.enqueueRun(createRun()); + lightning.enqueueRun(createRun()); + lightning.enqueueRun(createRun()); + }); +}); test("don't claim after destroy", (t) => { return new Promise((done) => { diff --git a/packages/ws-worker/test/api/execute.test.ts b/packages/ws-worker/test/api/execute.test.ts index 3d23375ba..b1148424b 100644 --- a/packages/ws-worker/test/api/execute.test.ts +++ b/packages/ws-worker/test/api/execute.test.ts @@ -1,5 +1,6 @@ import test from 'ava'; import { createMockLogger } from '@openfn/logger'; +import type { ExecutionPlan } from '@openfn/lexicon'; import { STEP_START, @@ -23,8 +24,7 @@ import createMockRTE from '../../src/mock/runtime-engine'; import { mockChannel } from '../../src/mock/sockets'; import { stringify, createRunState } from '../../src/util'; -import type { ExecutionPlan } from '@openfn/runtime'; -import type { Run, RunState, JSONLog } from '../../src/types'; +import type { RunState, JSONLog } from '../../src/types'; const enc = new TextEncoder(); @@ -54,7 +54,7 @@ test('send event should resolve when the event is acknowledged', async (t) => { test('send event should throw if an event errors', async (t) => { const channel = mockChannel({ - throw: (x) => { + throw: () => { throw new Error('err'); }, }); @@ -98,7 +98,7 @@ test('jobLog should should send a log event outside a run', async (t) => { }, }); - await onJobLog({ channel, state }, log); + await onJobLog({ channel, state } as any, log); }); test('jobLog should should send a log event inside a run', async (t) => { @@ -131,13 +131,13 @@ test('jobLog should should send a log event inside a run', async (t) => { }, }); - await onJobLog({ channel, state }, log); + await onJobLog({ channel, state } as any, log); }); test('jobError should trigger step:complete with a reason', async (t) => { - let stepCompleteEvent; + let stepCompleteEvent: any; - const state = createRunState({ id: 'run-23' } as Run); + const state = createRunState({ id: 'run-23' } as ExecutionPlan); state.activeJob = 'job-1'; state.activeStep = 'b'; @@ -153,7 +153,7 @@ test('jobError should trigger step:complete with a reason', async (t) => { error: { message: 'nope', severity: 'kill', type: 'TEST' }, state: exitState, }; - await onJobError({ channel, state }, event); + await onJobError({ channel, state } as any, event); t.is(stepCompleteEvent.reason, 'kill'); t.is(stepCompleteEvent.error_message, 'nope'); @@ -162,9 +162,9 @@ test('jobError should trigger step:complete with a reason', async (t) => { }); test('jobError should trigger step:complete with a reason and default state', async (t) => { - let stepCompleteEvent; + let stepCompleteEvent: any; - const state = createRunState({ id: 'run-23' } as Run); + const state = createRunState({ id: 'run-23' } as ExecutionPlan); const channel = mockChannel({ [STEP_COMPLETE]: (evt) => { @@ -176,7 +176,7 @@ test('jobError should trigger step:complete with a reason and default state', as const event = { error: { message: 'nope', severity: 'kill', type: 'TEST' }, }; - await onJobError({ channel, state }, event); + await onJobError({ channel, state } as any, event); t.deepEqual(stepCompleteEvent.output_dataclip, '{}'); }); @@ -188,6 +188,7 @@ test('workflowStart should send an empty run:start event', async (t) => { }, }); + // @ts-ignore await onWorkflowStart({ channel }); }); @@ -275,17 +276,20 @@ test('execute should pass the final result to onFinish', async (t) => { const plan = { id: 'a', - jobs: [ - { - expression: 'fn(() => ({ done: true }))', - }, - ], - }; + workflow: { + steps: [ + { + expression: 'fn(() => ({ done: true }))', + }, + ], + }, + } as ExecutionPlan; const options = {}; + const input = {}; return new Promise((done) => { - execute(channel, engine, logger, plan, options, (result) => { + execute(channel, engine, logger, plan, input, options, (result) => { t.deepEqual(result.state, { done: true }); done(); }); @@ -299,14 +303,17 @@ test('execute should return a context object', async (t) => { const plan = { id: 'a', - jobs: [ - { - expression: 'fn(() => ({ done: true }))', - }, - ], - }; + workflow: { + steps: [ + { + expression: 'fn(() => ({ done: true }))', + }, + ], + }, + } as ExecutionPlan; const options = {}; + const input = {}; return new Promise((done) => { const context = execute( @@ -314,13 +321,13 @@ test('execute should return a context object', async (t) => { engine, logger, plan, + input, options, - (result) => { + () => { done(); } ); t.truthy(context.state); - t.deepEqual(context.state.options, options); t.deepEqual(context.channel, channel); t.deepEqual(context.logger, logger); }); @@ -343,18 +350,21 @@ test('execute should lazy-load a credential', async (t) => { const plan = { id: 'a', - jobs: [ - { - configuration: 'abc', - expression: 'fn(() => ({ done: true }))', - }, - ], - }; + workflow: { + steps: [ + { + configuration: 'abc', + expression: 'fn(() => ({ done: true }))', + }, + ], + }, + } as ExecutionPlan; const options = {}; + const input = {}; return new Promise((done) => { - execute(channel, engine, logger, plan, options, (result) => { + execute(channel, engine, logger, plan, input, options, () => { t.true(didCallCredentials); done(); }); @@ -363,34 +373,36 @@ test('execute should lazy-load a credential', async (t) => { test('execute should lazy-load initial state', async (t) => { const logger = createMockLogger(); - let didCallState = false; + let didLoadState = false; const channel = mockChannel({ ...mockEventHandlers, [GET_DATACLIP]: (id) => { t.truthy(id); - didCallState = true; + didLoadState = true; return toArrayBuffer({}); }, }); const engine = await createMockRTE(); - const plan: Partial = { + const plan = { id: 'a', - // @ts-ignore - initialState: 'abc', - jobs: [ - { - expression: 'fn(() => ({ done: true }))', - }, - ], - }; + workflow: { + steps: [ + { + expression: 'fn(() => ({ done: true }))', + }, + ], + }, + options: {}, + } as ExecutionPlan; const options = {}; + const input = 'abc'; return new Promise((done) => { - execute(channel, engine, logger, plan, options, (result) => { - t.true(didCallState); + execute(channel, engine, logger, plan, input, options, () => { + t.true(didLoadState); done(); }); }); @@ -400,10 +412,10 @@ test('execute should call all events on the socket', async (t) => { const logger = createMockLogger(); const engine = await createMockRTE(); - const events = {}; + const events: Record = {}; - const toEventMap = (obj, evt: string) => { - obj[evt] = (e) => { + const toEventMap = (obj: any, evt: string) => { + obj[evt] = (e: any) => { events[evt] = e || true; }; return obj; @@ -424,20 +436,23 @@ test('execute should call all events on the socket', async (t) => { const plan = { id: 'run-1', - jobs: [ - { - id: 'trigger', - configuration: 'a', - adaptor: '@openfn/language-common@1.0.0', - expression: 'fn(() => console.log("x"))', - }, - ], - }; + workflow: { + steps: [ + { + id: 'trigger', + configuration: 'a', + adaptor: '@openfn/language-common@1.0.0', + expression: 'fn(() => console.log("x"))', + }, + ], + }, + } as ExecutionPlan; const options = {}; + const input = {}; return new Promise((done) => { - execute(channel, engine, logger, plan, options, (result) => { + execute(channel, engine, logger, plan, input, options, () => { // Check that events were passed to the socket // This is deliberately crude t.assert(allEvents.every((e) => events[e])); diff --git a/packages/ws-worker/test/api/reasons.test.ts b/packages/ws-worker/test/api/reasons.test.ts index d2d81ef2c..1e753c88c 100644 --- a/packages/ws-worker/test/api/reasons.test.ts +++ b/packages/ws-worker/test/api/reasons.test.ts @@ -4,7 +4,7 @@ import { calculateJobExitReason } from '../../src/api/reasons'; test('success', (t) => { const jobId = 'a'; - const state = {}; + const state: any = {}; const error = undefined; const r = calculateJobExitReason(jobId, state, error); @@ -15,7 +15,7 @@ test('success', (t) => { test('still success if a prior job has errors', (t) => { const jobId = 'a'; - const state = { + const state: any = { errors: { b: { type: 'RuntimeError', @@ -34,7 +34,7 @@ test('still success if a prior job has errors', (t) => { test('fail', (t) => { const jobId = 'a'; - const state = { + const state: any = { errors: { a: { type: 'RuntimeError', @@ -52,7 +52,7 @@ test('fail', (t) => { test('crash', (t) => { const jobId = 'a'; - const state = {}; + const state: any = {}; const error = new RuntimeCrash(new ReferenceError('x is not defined')); const r = calculateJobExitReason(jobId, state, error); @@ -63,7 +63,7 @@ test('crash', (t) => { test('crash has priority over fail', (t) => { const jobId = 'a'; - const state = { + const state: any = { errors: { b: { type: 'RuntimeError', @@ -83,7 +83,7 @@ test('crash has priority over fail', (t) => { // But it should not stop us calculating a reason test('success if no state is passed', (t) => { const jobId = 'a'; - const state = undefined; + const state: any = undefined; const error = undefined; const r = calculateJobExitReason(jobId, state, error); @@ -94,7 +94,7 @@ test('success if no state is passed', (t) => { test('success if boolean state is passed', (t) => { const jobId = 'a'; - const state = true; + const state: any = true; const error = undefined; const r = calculateJobExitReason(jobId, state, error); diff --git a/packages/ws-worker/test/api/workloop.test.ts b/packages/ws-worker/test/api/workloop.test.ts index a1772ee01..a1ce6df1c 100644 --- a/packages/ws-worker/test/api/workloop.test.ts +++ b/packages/ws-worker/test/api/workloop.test.ts @@ -1,13 +1,12 @@ import test from 'ava'; +import { createMockLogger } from '@openfn/logger'; import { sleep } from '../util'; - import { mockChannel } from '../../src/mock/sockets'; import startWorkloop from '../../src/api/workloop'; import { CLAIM } from '../../src/events'; -import { createMockLogger } from '@openfn/logger'; -let cancel; +let cancel: any; const logger = createMockLogger(); @@ -17,7 +16,6 @@ test.afterEach(() => { test('workloop can be cancelled', async (t) => { let count = 0; - let cancel; const app = { queueChannel: mockChannel({ [CLAIM]: () => { @@ -29,7 +27,7 @@ test('workloop can be cancelled', async (t) => { execute: () => {}, }; - cancel = startWorkloop(app, logger, 1, 1); + cancel = startWorkloop(app as any, logger, 1, 1); await sleep(100); // A quirk of how cancel works is that the loop will be called a few times @@ -38,8 +36,6 @@ test('workloop can be cancelled', async (t) => { test('workloop sends the runs:claim event', (t) => { return new Promise((done) => { - let cancel; - const app = { workflows: {}, queueChannel: mockChannel({ @@ -51,13 +47,12 @@ test('workloop sends the runs:claim event', (t) => { }), execute: () => {}, }; - cancel = startWorkloop(app, logger, 1, 1); + cancel = startWorkloop(app as any, logger, 1, 1); }); }); test('workloop sends the runs:claim event several times ', (t) => { return new Promise((done) => { - let cancel; let count = 0; const app = { workflows: {}, @@ -73,14 +68,12 @@ test('workloop sends the runs:claim event several times ', (t) => { }), execute: () => {}, }; - cancel = startWorkloop(app, logger, 1, 1); + cancel = startWorkloop(app as any, logger, 1, 1); }); }); test('workloop calls execute if runs:claim returns runs', (t) => { return new Promise((done) => { - let cancel; - const app = { workflows: {}, queueChannel: mockChannel({ @@ -88,13 +81,13 @@ test('workloop calls execute if runs:claim returns runs', (t) => { runs: [{ id: 'a', token: 'x.y.z' }], }), }), - execute: (run) => { + execute: (run: any) => { t.deepEqual(run, { id: 'a', token: 'x.y.z' }); t.pass(); done(); }, }; - cancel = startWorkloop(app, logger, 1, 1); + cancel = startWorkloop(app as any, logger, 1, 1); }); }); diff --git a/packages/ws-worker/test/channels/run.test.ts b/packages/ws-worker/test/channels/run.test.ts index e5e580bb7..f66dc7a3f 100644 --- a/packages/ws-worker/test/channels/run.test.ts +++ b/packages/ws-worker/test/channels/run.test.ts @@ -36,14 +36,16 @@ test('loadRun should return an execution plan and options', async (t) => { const { plan, options } = await loadRun(channel); t.like(plan, { id: 'run-1', - jobs: [ - { - id: 'job-1', - configuration: 'a', - expression: 'fn(a => a)', - adaptor: '@openfn/language-common@1.0.0', - }, - ], + workflow: { + steps: [ + { + id: 'job-1', + configuration: 'a', + expression: 'fn(a => a)', + adaptor: '@openfn/language-common@1.0.0', + }, + ], + }, }); t.is(options.sanitize, 'obfuscate'); t.is(options.runTimeoutMs, 10); @@ -70,7 +72,7 @@ test('should join an run channel with a token', async (t) => { ); t.truthy(channel); - t.deepEqual(plan, { id: 'a', jobs: [] }); + t.deepEqual(plan, { id: 'a', workflow: { steps: [] }, options: {} }); t.deepEqual(options, { runTimeoutMs: 10 }); }); diff --git a/packages/ws-worker/test/channels/worker-queue.test.ts b/packages/ws-worker/test/channels/worker-queue.test.ts index 53997e5d5..eef728cba 100644 --- a/packages/ws-worker/test/channels/worker-queue.test.ts +++ b/packages/ws-worker/test/channels/worker-queue.test.ts @@ -1,14 +1,17 @@ import test from 'ava'; import * as jose from 'jose'; +import { createMockLogger } from '@openfn/logger'; +import { API_VERSION } from '@openfn/lexicon/lightning'; +import pkg from '../../package.json' assert { type: 'json' }; + import connectToWorkerQueue from '../../src/channels/worker-queue'; import { mockSocket } from '../../src/mock/sockets'; -import { createMockLogger } from '@openfn/logger'; const logger = createMockLogger(); test('should connect', async (t) => { return new Promise((done) => { - connectToWorkerQueue('www', 'a', 'secret', logger, mockSocket).on( + connectToWorkerQueue('www', 'a', 'secret', logger, mockSocket as any).on( 'connect', ({ socket, channel }) => { t.truthy(socket); @@ -28,7 +31,7 @@ test('should connect with an auth token', async (t) => { const secret = 'xyz'; const encodedSecret = new TextEncoder().encode(secret); - function createSocket(endpoint, options) { + function createSocket(endpoint: string, options: any) { const socket = mockSocket(endpoint, {}, async () => { const { token } = options.params; @@ -38,16 +41,42 @@ test('should connect with an auth token', async (t) => { return socket; } - connectToWorkerQueue('www', workerId, secret, logger, createSocket).on( + connectToWorkerQueue( + 'www', + workerId, + secret, + logger, + createSocket as any + ).on('connect', ({ socket, channel }) => { + t.truthy(socket); + t.truthy(socket.connect); + t.truthy(channel); + t.truthy(channel.join); + t.pass('connected'); + done(); + }); + }); +}); + +test('should connect with api and worker versions', async (t) => { + return new Promise((done) => { + function createSocket(endpoint: string, options: any) { + const socket = mockSocket(endpoint, {}, async () => { + const { worker_version, api_version } = options.params; + + t.is(worker_version, pkg.version); + t.truthy(worker_version); + + t.is(api_version, API_VERSION); + t.truthy(api_version); + }); + + return socket; + } + + connectToWorkerQueue('www', 'a', 'secret', logger, createSocket as any).on( 'connect', - ({ socket, channel }) => { - t.truthy(socket); - t.truthy(socket.connect); - t.truthy(channel); - t.truthy(channel.join); - t.pass('connected'); - done(); - } + done ); }); }); @@ -58,7 +87,7 @@ test('should fail to connect with an invalid auth token', async (t) => { const secret = 'xyz'; const encodedSecret = new TextEncoder().encode(secret); - function createSocket(endpoint, options) { + function createSocket(endpoint: string, options: any) { const socket = mockSocket(endpoint, {}, async () => { const { token } = options.params; @@ -77,7 +106,7 @@ test('should fail to connect with an invalid auth token', async (t) => { workerId, 'wrong-secret!', logger, - createSocket + createSocket as any ).on('error', (e) => { t.is(e, 'auth_fail'); t.pass('error thrown'); diff --git a/packages/ws-worker/test/events/run-complete.test.ts b/packages/ws-worker/test/events/run-complete.test.ts index 8ca730082..9220ee28a 100644 --- a/packages/ws-worker/test/events/run-complete.test.ts +++ b/packages/ws-worker/test/events/run-complete.test.ts @@ -4,10 +4,11 @@ import handleRunComplete from '../../src/events/run-complete'; import { mockChannel } from '../../src/mock/sockets'; import { RUN_COMPLETE, RUN_LOG } from '../../src/events'; import { createRunState } from '../../src/util'; +import { createPlan } from '../util'; test('should send an run:complete event', async (t) => { const result = { answer: 42 }; - const plan = { id: 'run-1', jobs: [] }; + const plan = createPlan(); const state = createRunState(plan); state.dataclips = { @@ -22,15 +23,15 @@ test('should send an run:complete event', async (t) => { }, }); - const event = {}; + const event: any = {}; - const context = { channel, state, onFinish: () => {} }; + const context: any = { channel, state, onFinish: () => {} }; await handleRunComplete(context, event); }); test('should call onFinish with final dataclip', async (t) => { const result = { answer: 42 }; - const plan = { id: 'run-1', jobs: [] }; + const plan = createPlan(); const state = createRunState(plan); state.dataclips = { @@ -43,22 +44,22 @@ test('should call onFinish with final dataclip', async (t) => { [RUN_COMPLETE]: () => true, }); - const context = { + const context: any = { channel, state, - onFinish: ({ state: finalState }) => { + onFinish: ({ state: finalState }: any) => { t.deepEqual(result, finalState); }, }; - const event = { state: result }; + const event: any = { state: result }; await handleRunComplete(context, event); }); test('should send a reason log and return reason for success', async (t) => { const result = { answer: 42 }; - const plan = { id: 'run-1', jobs: [] }; + const plan = createPlan(); const state = createRunState(plan); state.dataclips = { @@ -66,8 +67,8 @@ test('should send a reason log and return reason for success', async (t) => { }; state.lastDataclipId = 'x'; - let logEvent; - let completeEvent; + let logEvent: any; + let completeEvent: any; const channel = mockChannel({ [RUN_LOG]: (e) => { @@ -78,15 +79,15 @@ test('should send a reason log and return reason for success', async (t) => { }, }); - const context = { + const context: any = { channel, state, - onFinish: ({ state: finalState }) => { + onFinish: ({ state: finalState }: any) => { t.deepEqual(result, finalState); }, }; - const event = { state: result }; + const event: any = { state: result }; await handleRunComplete(context, event); @@ -98,7 +99,7 @@ test('should send a reason log and return reason for success', async (t) => { test('should send a reason log and return reason for fail', async (t) => { const result = { answer: 42 }; - const plan = { id: 'run-1', jobs: [{ id: 'x' }] }; + const plan = createPlan({ id: 'x', expression: '.' }); const state = createRunState(plan); state.dataclips = { @@ -113,8 +114,8 @@ test('should send a reason log and return reason for fail', async (t) => { }, }; - let logEvent; - let completeEvent; + let logEvent: any; + let completeEvent: any; const channel = mockChannel({ [RUN_LOG]: (e) => { @@ -125,15 +126,15 @@ test('should send a reason log and return reason for fail', async (t) => { }, }); - const context = { + const context: any = { channel, state, - onFinish: ({ state: finalState }) => { + onFinish: ({ state: finalState }: any) => { t.deepEqual(result, finalState); }, }; - const event = { state: result }; + const event: any = { state: result }; await handleRunComplete(context, event); diff --git a/packages/ws-worker/test/events/run-error.test.ts b/packages/ws-worker/test/events/run-error.test.ts index 2583d2257..34404327c 100644 --- a/packages/ws-worker/test/events/run-error.test.ts +++ b/packages/ws-worker/test/events/run-error.test.ts @@ -5,7 +5,7 @@ import { mockChannel } from '../../src/mock/sockets'; import { RUN_COMPLETE, RUN_LOG, STEP_COMPLETE } from '../../src/events'; import { createRunState } from '../../src/util'; -const plan = { id: 'run-1', jobs: [] }; +const plan = { id: 'run-1', workflow: { steps: [] }, options: {} }; test('runError should trigger runComplete with a reason', async (t) => { const jobId = 'job-1'; @@ -25,7 +25,7 @@ test('runError should trigger runComplete with a reason', async (t) => { [RUN_COMPLETE]: () => true, }); - const event = { + const event: any = { severity: 'crash', type: 'Err', message: 'it crashed', @@ -33,7 +33,7 @@ test('runError should trigger runComplete with a reason', async (t) => { const context = { channel, state, onFinish: () => {} }; - await onRunError(context, event); + await onRunError(context as any, event); }); test('workflow error should send reason to onFinish', async (t) => { @@ -46,11 +46,11 @@ test('workflow error should send reason to onFinish', async (t) => { const channel = mockChannel({ [RUN_LOG]: () => true, - [STEP_COMPLETE]: (evt) => true, + [STEP_COMPLETE]: () => true, [RUN_COMPLETE]: () => true, }); - const event = { + const event: any = { error: { severity: 'crash', type: 'Err', @@ -62,12 +62,12 @@ test('workflow error should send reason to onFinish', async (t) => { const context = { channel, state, - onFinish: (evt) => { + onFinish: (evt: any) => { t.is(evt.reason.reason, 'crash'); }, }; - await onRunError(context, event); + await onRunError(context as any, event); }); test('runError should not call job complete if the job is not active', async (t) => { @@ -76,14 +76,14 @@ test('runError should not call job complete if the job is not active', async (t) const channel = mockChannel({ [RUN_LOG]: () => true, - [STEP_COMPLETE]: (evt) => { + [STEP_COMPLETE]: () => { t.fail('should not call!'); return true; }, [RUN_COMPLETE]: () => true, }); - const event = { + const event: any = { error: { severity: 'crash', type: 'Err', @@ -100,7 +100,7 @@ test('runError should not call job complete if the job is not active', async (t) }, }; - await onRunError(context, event); + await onRunError(context as any, event); }); test('runError should log the reason', async (t) => { @@ -108,31 +108,34 @@ test('runError should log the reason', async (t) => { const state = createRunState({ id: 'run-1', - jobs: [{ id: 'job-1' }], + workflow: { + steps: [{ id: 'job-1' }], + }, + options: {}, }); state.lastDataclipId = 'x'; state.activeStep = 'b'; state.activeJob = jobId; - const event = { + const event: any = { severity: 'crash', type: 'Err', message: 'it crashed', }; state.reasons['x'] = event; - let logEvent; + let logEvent: any; const channel = mockChannel({ [RUN_LOG]: (e) => { logEvent = e; }, - [STEP_COMPLETE]: (evt) => true, + [STEP_COMPLETE]: () => true, [RUN_COMPLETE]: () => true, }); const context = { channel, state, onFinish: () => {} }; - await onRunError(context, event); + await onRunError(context as any, event); t.is(logEvent.message[0], 'Run complete with status: crash\nErr: it crashed'); }); diff --git a/packages/ws-worker/test/events/step-complete.test.ts b/packages/ws-worker/test/events/step-complete.test.ts index f12d1a47a..8ba8a8e3d 100644 --- a/packages/ws-worker/test/events/step-complete.test.ts +++ b/packages/ws-worker/test/events/step-complete.test.ts @@ -1,14 +1,15 @@ import test from 'ava'; -import handleStepStart from '../../src/events/step-complete'; +import type { StepCompletePayload } from '@openfn/lexicon/lightning'; +import handleStepComplete from '../../src/events/step-complete'; import { mockChannel } from '../../src/mock/sockets'; import { createRunState } from '../../src/util'; import { STEP_COMPLETE } from '../../src/events'; - -import type { ExecutionPlan } from '@openfn/runtime'; +import { createPlan } from '../util'; +import { JobCompletePayload } from '@openfn/engine-multi'; test('clear the step id and active job on state', async (t) => { - const plan = { id: 'run-1' }; + const plan = createPlan(); const jobId = 'job-1'; const state = createRunState(plan); @@ -19,16 +20,16 @@ test('clear the step id and active job on state', async (t) => { [STEP_COMPLETE]: () => true, }); - const event = { state: { x: 10 } }; - await handleStepStart({ channel, state }, event); + const event = { state: { x: 10 } } as any; + await handleStepComplete({ channel, state } as any, event); t.falsy(state.activeJob); t.falsy(state.activeStep); }); test('setup input mappings on on state', async (t) => { - let lightningEvent; - const plan = { id: 'run-1' }; + let lightningEvent: any; + const plan = createPlan(); const jobId = 'job-1'; const state = createRunState(plan); @@ -41,8 +42,8 @@ test('setup input mappings on on state', async (t) => { }, }); - const engineEvent = { state: { x: 10 }, next: ['job-2'] }; - await handleStepStart({ channel, state }, engineEvent); + const engineEvent = { state: { x: 10 }, next: ['job-2'] } as any; + await handleStepComplete({ channel, state } as any, engineEvent); t.deepEqual(state.inputDataclips, { ['job-2']: lightningEvent.output_dataclip_id, @@ -50,7 +51,7 @@ test('setup input mappings on on state', async (t) => { }); test('save the dataclip to state', async (t) => { - const plan = { id: 'run-1' } as ExecutionPlan; + const plan = createPlan(); const jobId = 'job-1'; const state = createRunState(plan); @@ -61,8 +62,8 @@ test('save the dataclip to state', async (t) => { [STEP_COMPLETE]: () => true, }); - const event = { state: { x: 10 } }; - await handleStepStart({ channel, state }, event); + const event = { state: { x: 10 } } as any; + await handleStepComplete({ channel, state } as any, event); t.is(Object.keys(state.dataclips).length, 1); const [dataclip] = Object.values(state.dataclips); @@ -70,7 +71,7 @@ test('save the dataclip to state', async (t) => { }); test('write a reason to state', async (t) => { - const plan = { id: 'run-1' } as ExecutionPlan; + const plan = createPlan(); const jobId = 'job-1'; const state = createRunState(plan); @@ -83,8 +84,8 @@ test('write a reason to state', async (t) => { [STEP_COMPLETE]: () => true, }); - const event = { state: { x: 10 } }; - await handleStepStart({ channel, state }, event); + const event = { state: { x: 10 } } as any; + await handleStepComplete({ channel, state } as any, event); t.is(Object.keys(state.reasons).length, 1); t.deepEqual(state.reasons[jobId], { @@ -95,14 +96,14 @@ test('write a reason to state', async (t) => { }); test('generate an exit reason: success', async (t) => { - const plan = { id: 'run-1' } as ExecutionPlan; + const plan = createPlan(); const jobId = 'job-1'; const state = createRunState(plan); state.activeJob = jobId; state.activeStep = 'b'; - let event; + let event: any; const channel = mockChannel({ [STEP_COMPLETE]: (e) => { @@ -110,7 +111,10 @@ test('generate an exit reason: success', async (t) => { }, }); - await handleStepStart({ channel, state }, { state: { x: 10 } }); + await handleStepComplete( + { channel, state } as any, + { state: { x: 10 } } as any + ); t.truthy(event); t.is(event.reason, 'success'); @@ -119,7 +123,7 @@ test('generate an exit reason: success', async (t) => { }); test('send a step:complete event', async (t) => { - const plan = { id: 'run-1' }; + const plan = createPlan(); const jobId = 'job-1'; const result = { x: 10 }; @@ -128,7 +132,7 @@ test('send a step:complete event', async (t) => { state.activeStep = 'b'; const channel = mockChannel({ - [STEP_COMPLETE]: (evt) => { + [STEP_COMPLETE]: (evt: StepCompletePayload) => { t.is(evt.job_id, jobId); t.truthy(evt.step_id); t.truthy(evt.output_dataclip_id); @@ -140,11 +144,13 @@ test('send a step:complete event', async (t) => { }); const event = { + jobId, + workflowId: plan.id, state: result, next: ['a'], mem: { job: 1, system: 10 }, duration: 61, - threadId: 'abc', - }; - await handleStepStart({ channel, state }, event); + thread_id: 'abc', + } as JobCompletePayload; + await handleStepComplete({ channel, state } as any, event); }); diff --git a/packages/ws-worker/test/events/step-start.test.ts b/packages/ws-worker/test/events/step-start.test.ts index 9cebbdc94..e97b69a61 100644 --- a/packages/ws-worker/test/events/step-start.test.ts +++ b/packages/ws-worker/test/events/step-start.test.ts @@ -10,7 +10,11 @@ import { RUN_LOG, STEP_START } from '../../src/events'; import pkg from '../../package.json' assert { type: 'json' }; test('set a step id and active job on state', async (t) => { - const plan = { id: 'run-1', jobs: [{ id: 'job-1' }] }; + const plan = { + id: 'run-1', + workflow: { steps: [{ id: 'job-1' }] }, + options: {}, + }; const jobId = 'job-1'; const state = createRunState(plan); @@ -20,7 +24,7 @@ test('set a step id and active job on state', async (t) => { [RUN_LOG]: (x) => x, }); - await handleStepStart({ channel, state }, { jobId }); + await handleStepStart({ channel, state } as any, { jobId } as any); t.is(state.activeJob, jobId); t.truthy(state.activeStep); @@ -29,37 +33,43 @@ test('set a step id and active job on state', async (t) => { test('send a step:start event', async (t) => { const plan = { id: 'run-1', - initialState: 'abc', - jobs: [ - { id: 'job-1', expression: '.' }, - { id: 'job-2', expression: '.' }, - ], + workflow: { + steps: [ + { id: 'job-1', expression: '.' }, + { id: 'job-2', expression: '.' }, + ], + }, + options: {}, }; + const input = 'abc'; const jobId = 'job-1'; - const state = createRunState(plan); + const state = createRunState(plan, input); state.activeJob = jobId; state.activeStep = 'b'; const channel = mockChannel({ [STEP_START]: (evt) => { t.is(evt.job_id, jobId); - t.is(evt.input_dataclip_id, plan.initialState); + t.is(evt.input_dataclip_id, input); t.truthy(evt.step_id); return true; }, [RUN_LOG]: () => true, }); - await handleStepStart({ channel, state }, { jobId }); + await handleStepStart({ channel, state } as any, { jobId } as any); }); test('step:start event should include versions', async (t) => { const plan = { id: 'run-1', - initialState: 'abc', - jobs: [{ id: 'job-1', expression: '.' }], + workflow: { + steps: [{ id: 'job-1', expression: '.' }], + }, + options: {}, }; + const input = 'abc'; const jobId = 'job-1'; const versions = { @@ -76,7 +86,7 @@ test('step:start event should include versions', async (t) => { versions, }; - const state = createRunState(plan); + const state = createRunState(plan, input); state.activeJob = jobId; state.activeStep = 'b'; @@ -91,16 +101,19 @@ test('step:start event should include versions', async (t) => { [RUN_LOG]: () => true, }); - await handleStepStart({ channel, state }, event); + await handleStepStart({ channel, state } as any, event); }); test('also logs the version number', async (t) => { - let logEvent; + let logEvent: any; const plan = { id: 'run-1', - initialState: 'abc', - jobs: [{ id: 'job-1', expression: '.' }], + workflow: { + steps: [{ id: 'job-1', expression: '.' }], + }, + options: {}, }; + const input = 'abc'; const jobId = 'job-1'; const versions = { @@ -117,12 +130,12 @@ test('also logs the version number', async (t) => { versions, }; - const state = createRunState(plan); + const state = createRunState(plan, input); state.activeJob = jobId; state.activeStep = 'b'; const channel = mockChannel({ - [STEP_START]: (evt) => true, + [STEP_START]: () => true, [RUN_LOG]: (evt) => { if (evt.source === 'VER') { logEvent = evt; @@ -131,7 +144,7 @@ test('also logs the version number', async (t) => { }, }); - await handleStepStart({ channel, state }, event); + await handleStepStart({ channel, state } as any, event); t.truthy(logEvent); t.is(logEvent.level, 'info'); diff --git a/packages/ws-worker/test/lightning.test.ts b/packages/ws-worker/test/lightning.test.ts index 337e1847a..3a36a98e1 100644 --- a/packages/ws-worker/test/lightning.test.ts +++ b/packages/ws-worker/test/lightning.test.ts @@ -11,8 +11,8 @@ import createWorkerServer from '../src/server'; import createMockRTE from '../src/mock/runtime-engine'; import * as e from '../src/events'; -let lng; -let worker; +let lng: any; +let worker: any; const urls = { worker: 'http://localhost:4567', @@ -51,7 +51,7 @@ const getRun = (ext = {}, jobs?: any) => ({ test.serial(`events: lightning should respond to a ${e.CLAIM} event`, (t) => { return new Promise((done) => { - lng.on(e.CLAIM, (evt) => { + lng.on(e.CLAIM, (evt: any) => { const response = evt.payload; t.deepEqual(response, []); done(); @@ -64,9 +64,9 @@ test.serial( (t) => { return new Promise((done) => { const run = getRun(); - let response; + let response: any; - lng.on(e.CLAIM, ({ payload }) => { + lng.on(e.CLAIM, ({ payload }: any) => { if (payload.length) { response = payload[0]; } @@ -88,7 +88,7 @@ test.serial( ); test.serial( - 'should run an run which returns an expression as JSON', + 'should run a run which returns an expression as JSON', async (t) => { return new Promise((done) => { const run = { @@ -100,7 +100,7 @@ test.serial( ], }; - lng.waitForResult(run.id).then((result) => { + lng.waitForResult(run.id).then((result: any) => { t.deepEqual(result, { count: 122 }); done(); }); @@ -110,7 +110,7 @@ test.serial( } ); -test.serial('should run an run which returns intial state', async (t) => { +test.serial('should run a run which returns initial state', async (t) => { return new Promise((done) => { lng.addDataclip('x', { data: 66, @@ -126,7 +126,7 @@ test.serial('should run an run which returns intial state', async (t) => { ], }; - lng.waitForResult(run.id).then((result) => { + lng.waitForResult(run.id).then((result: any) => { t.deepEqual(result, { data: 66 }); done(); }); @@ -142,7 +142,7 @@ test.serial( (t) => { return new Promise((done) => { const run = getRun(); - lng.onSocketEvent(e.RUN_COMPLETE, run.id, (evt) => { + lng.onSocketEvent(e.RUN_COMPLETE, run.id, (evt: any) => { const { final_dataclip_id } = evt.payload; t.assert(typeof final_dataclip_id === 'string'); t.pass('run complete event received'); @@ -160,29 +160,26 @@ test.todo(`events: lightning should receive a ${e.RUN_START} event`); // for each event we can see a copy of the server state // (if that helps anything?) -test.serial( - `events: lightning should receive a ${e.GET_PLAN} event`, - (t) => { - return new Promise((done) => { - const run = getRun(); - - let didCallEvent = false; - lng.onSocketEvent(e.GET_PLAN, run.id, ({ payload }) => { - // This doesn't test that the correct run gets sent back - // We'd have to add an event to the engine for that - // (not a bad idea) - didCallEvent = true; - }); +test.serial(`events: lightning should receive a ${e.GET_PLAN} event`, (t) => { + return new Promise((done) => { + const run = getRun(); - lng.onSocketEvent(e.RUN_COMPLETE, run.id, (evt) => { - t.true(didCallEvent); - done(); - }); + let didCallEvent = false; + lng.onSocketEvent(e.GET_PLAN, run.id, () => { + // This doesn't test that the correct run gets sent back + // We'd have to add an event to the engine for that + // (not a bad idea) + didCallEvent = true; + }); - lng.enqueueRun(run); + lng.onSocketEvent(e.RUN_COMPLETE, run.id, () => { + t.true(didCallEvent); + done(); }); - } -); + + lng.enqueueRun(run); + }); +}); test.serial( `events: lightning should receive a ${e.GET_CREDENTIAL} event`, @@ -224,7 +221,7 @@ test.serial( }); let didCallEvent = false; - lng.onSocketEvent(e.GET_DATACLIP, run.id, ({ payload }) => { + lng.onSocketEvent(e.GET_DATACLIP, run.id, ({ payload }: any) => { // payload is the incoming/request payload - this tells us which dataclip // the worker is asking for // Note that it doesn't tell us much about what is returned @@ -247,13 +244,13 @@ test.serial(`events: lightning should receive a ${e.STEP_START} event`, (t) => { return new Promise((done) => { const run = getRun(); - lng.onSocketEvent(e.STEP_START, run.id, ({ payload }) => { + lng.onSocketEvent(e.STEP_START, run.id, ({ payload }: any) => { t.is(payload.job_id, 'j'); t.truthy(payload.step_id); t.pass('called run start'); }); - lng.onSocketEvent(e.RUN_COMPLETE, run.id, (evt) => { + lng.onSocketEvent(e.RUN_COMPLETE, run.id, () => { done(); }); @@ -267,7 +264,7 @@ test.serial( return new Promise((done) => { const run = getRun(); - lng.onSocketEvent(e.STEP_COMPLETE, run.id, ({ payload }) => { + lng.onSocketEvent(e.STEP_COMPLETE, run.id, ({ payload }: any) => { t.is(payload.job_id, 'j'); t.truthy(payload.step_id); t.truthy(payload.output_dataclip); @@ -278,7 +275,7 @@ test.serial( t.pass('called run complete'); }); - lng.onSocketEvent(e.RUN_COMPLETE, run.id, (evt) => { + lng.onSocketEvent(e.RUN_COMPLETE, run.id, () => { done(); }); @@ -299,12 +296,12 @@ test.serial( }, ]); - lng.onSocketEvent(e.STEP_COMPLETE, run.id, ({ payload }) => { + lng.onSocketEvent(e.STEP_COMPLETE, run.id, ({ payload }: any) => { t.is(payload.reason, 'fail'); t.pass('called step complete'); }); - lng.onSocketEvent(e.RUN_COMPLETE, run.id, ({ payload }) => { + lng.onSocketEvent(e.RUN_COMPLETE, run.id, () => { done(); }); @@ -313,37 +310,34 @@ test.serial( } ); -test.serial( - `events: lightning should receive a ${e.RUN_LOG} event`, - (t) => { - return new Promise((done) => { - const run = { - id: 'run-1', - jobs: [ - { - body: 'fn((s) => { console.log("x"); return s })', - }, - ], - }; - - lng.onSocketEvent(e.RUN_LOG, run.id, ({ payload }) => { - const log = payload; +test.serial(`events: lightning should receive a ${e.RUN_LOG} event`, (t) => { + return new Promise((done) => { + const run = { + id: 'run-1', + jobs: [ + { + body: 'fn((s) => { console.log("x"); return s })', + }, + ], + }; - t.is(log.level, 'info'); - t.truthy(log.run_id); - t.truthy(log.step_id); - t.truthy(log.message); - t.deepEqual(log.message, ['x']); - }); + lng.onSocketEvent(e.RUN_LOG, run.id, ({ payload }: any) => { + const log = payload; - lng.onSocketEvent(e.RUN_COMPLETE, run.id, (evt) => { - done(); - }); + t.is(log.level, 'info'); + t.truthy(log.run_id); + t.truthy(log.step_id); + t.truthy(log.message); + t.deepEqual(log.message, ['x']); + }); - lng.enqueueRun(run); + lng.onSocketEvent(e.RUN_COMPLETE, run.id, () => { + done(); }); - } -); + + lng.enqueueRun(run); + }); +}); // Skipping because this is flaky at microsecond resolution // See branch hrtime-send-nanoseconds-to-lightning where this should be more robust @@ -366,13 +360,13 @@ test.serial.skip(`events: logs should have increasing timestamps`, (t) => { lng.onSocketEvent( e.RUN_LOG, run.id, - ({ payload }) => { + ({ payload }: any) => { history.push(BigInt(payload.timestamp)); }, false ); - lng.onSocketEvent(e.RUN_COMPLETE, run.id, (evt) => { + lng.onSocketEvent(e.RUN_COMPLETE, run.id, () => { t.log(history); let last = BigInt(0); @@ -407,7 +401,7 @@ test.serial( return new Promise((done) => { const run = getRun(); - lng.onSocketEvent(e.RUN_COMPLETE, run.id, (evt) => { + lng.onSocketEvent(e.RUN_COMPLETE, run.id, () => { t.pass('called run:complete'); done(); }); @@ -417,37 +411,34 @@ test.serial( } ); -test.serial( - 'should register and de-register runs to the server', - async (t) => { - return new Promise((done) => { - const run = { - id: 'run-1', - jobs: [ - { - body: 'fn(() => ({ count: 122 }))', - }, - ], - }; - - worker.on(e.RUN_START, () => { - t.truthy(worker.workflows[run.id]); - }); +test.serial('should register and de-register runs to the server', async (t) => { + return new Promise((done) => { + const run = { + id: 'run-1', + jobs: [ + { + body: 'fn(() => ({ count: 122 }))', + }, + ], + }; - lng.onSocketEvent(e.RUN_COMPLETE, run.id, (evt) => { - t.truthy(worker.workflows[run.id]); - // Tidyup is done AFTER lightning receives the event - // This timeout is crude but should work - setTimeout(() => { - t.falsy(worker.workflows[run.id]); - done(); - }, 10); - }); + worker.on(e.RUN_START, () => { + t.truthy(worker.workflows[run.id]); + }); - lng.enqueueRun(run); + lng.onSocketEvent(e.RUN_COMPLETE, run.id, () => { + t.truthy(worker.workflows[run.id]); + // Tidyup is done AFTER lightning receives the event + // This timeout is crude but should work + setTimeout(() => { + t.falsy(worker.workflows[run.id]); + done(); + }, 10); }); - } -); + + lng.enqueueRun(run); + }); +}); // TODO this is a server test // What I am testing here is that the first job completes @@ -469,10 +460,10 @@ test.skip('should not claim while at capacity', async (t) => { id: 'run-2', }; - let run1Start; + let run1Start: any; // When the first run starts, we should only have run 1 in progress - lng.onSocketEvent(e.RUN_START, run1.id, (evt) => { + lng.onSocketEvent(e.RUN_START, run1.id, () => { run1Start = Date.now(); t.truthy(worker.workflows[run1.id]); @@ -480,7 +471,7 @@ test.skip('should not claim while at capacity', async (t) => { }); // When the second run starts, we should only have run 2 in progress - lng.onSocketEvent(e.RUN_START, run2.id, (evt) => { + lng.onSocketEvent(e.RUN_START, run2.id, () => { const duration = Date.now() - run1Start; t.true(duration > 490); @@ -490,7 +481,7 @@ test.skip('should not claim while at capacity', async (t) => { // also, the now date should be around 500 ms after the first start }); - lng.onSocketEvent(e.RUN_COMPLETE, run2.id, (evt) => { + lng.onSocketEvent(e.RUN_COMPLETE, run2.id, () => { done(); }); @@ -506,9 +497,9 @@ test.serial('should pass the right dataclip when running in parallel', (t) => { body: `fn((s) => { s.data.${id} = true; return s; })`, }); - const outputDataclipIds = {}; - const inputDataclipIds = {}; - const outputs = {}; + const outputDataclipIds: any = {}; + const inputDataclipIds: any = {}; + const outputs: any = {}; const a = { id: 'a', body: 'fn(() => ({ data: { a: true } }))', @@ -535,7 +526,7 @@ test.serial('should pass the right dataclip when running in parallel', (t) => { const unsub2 = lng.onSocketEvent( e.STEP_START, run.id, - ({ payload }) => { + ({ payload }: any) => { inputDataclipIds[payload.job_id] = payload.input_dataclip_id; }, false @@ -545,14 +536,14 @@ test.serial('should pass the right dataclip when running in parallel', (t) => { const unsub1 = lng.onSocketEvent( e.STEP_COMPLETE, run.id, - ({ payload }) => { + ({ payload }: any) => { outputDataclipIds[payload.job_id] = payload.output_dataclip_id; outputs[payload.job_id] = JSON.parse(payload.output_dataclip); }, false ); - lng.onSocketEvent(e.RUN_COMPLETE, run.id, (evt) => { + lng.onSocketEvent(e.RUN_COMPLETE, run.id, () => { unsub1(); unsub2(); @@ -595,7 +586,7 @@ test.serial( const bc = createEdge('b', 'c'); bc.condition = 'on_job_success'; - const run = createRun([a, b, c], [ab, bc]); + const run = createRun([a, b, c] as any, [ab, bc] as any); const results: Record = {}; @@ -603,13 +594,13 @@ test.serial( const unsub = lng.onSocketEvent( e.STEP_COMPLETE, run.id, - (evt) => { + (evt: any) => { results[evt.payload.job_id] = JSON.parse(evt.payload.output_dataclip); }, false ); - lng.onSocketEvent(e.RUN_COMPLETE, run.id, (evt) => { + lng.onSocketEvent(e.RUN_COMPLETE, run.id, (evt: any) => { t.is(evt.payload.reason, 'success'); // What we REALLY care about is that the b-c edge condition @@ -629,7 +620,7 @@ test.serial( test.serial(`worker should send a success reason in the logs`, (t) => { return new Promise((done) => { - let log; + let log: any; const run = { id: 'run-1', @@ -640,7 +631,7 @@ test.serial(`worker should send a success reason in the logs`, (t) => { ], }; - lng.onSocketEvent(e.RUN_LOG, run.id, ({ payload }) => { + lng.onSocketEvent(e.RUN_LOG, run.id, ({ payload }: any) => { if (payload.message[0].match(/Run complete with status: success/)) { log = payload.message[0]; } @@ -657,7 +648,7 @@ test.serial(`worker should send a success reason in the logs`, (t) => { test.serial(`worker should send a fail reason in the logs`, (t) => { return new Promise((done) => { - let log; + let log: any; const run = { id: 'run-1', @@ -668,7 +659,7 @@ test.serial(`worker should send a fail reason in the logs`, (t) => { ], }; - lng.onSocketEvent(e.RUN_LOG, run.id, ({ payload }) => { + lng.onSocketEvent(e.RUN_LOG, run.id, ({ payload }: any) => { if (payload.message[0].match(/Run complete with status: fail/)) { log = payload.message[0]; } diff --git a/packages/ws-worker/test/mock/runtime-engine.test.ts b/packages/ws-worker/test/mock/runtime-engine.test.ts index bfb9eba63..d60c5b2ab 100644 --- a/packages/ws-worker/test/mock/runtime-engine.test.ts +++ b/packages/ws-worker/test/mock/runtime-engine.test.ts @@ -1,44 +1,52 @@ import test from 'ava'; -import create, { - JobCompleteEvent, - JobStartEvent, - WorkflowCompleteEvent, - WorkflowStartEvent, -} from '../../src/mock/runtime-engine'; -import type { ExecutionPlan } from '@openfn/runtime'; -import { waitForEvent, clone } from '../util'; +import type { ExecutionPlan } from '@openfn/lexicon'; + +import type { + JobCompletePayload, + JobStartPayload, + WorkflowCompletePayload, + WorkflowStartPayload, +} from '@openfn/engine-multi'; +import create from '../../src/mock/runtime-engine'; +import { waitForEvent, clone, createPlan } from '../util'; +import { WorkflowErrorPayload } from '@openfn/engine-multi'; const sampleWorkflow = { id: 'w1', - jobs: [ - { - id: 'j1', - adaptor: 'common@1.0.0', - expression: 'fn(() => ({ data: { x: 10 } }))', - }, - ], + workflow: { + steps: [ + { + id: 'j1', + adaptor: 'common@1.0.0', + expression: 'fn(() => ({ data: { x: 10 } }))', + }, + ], + }, } as ExecutionPlan; -let engine; +let engine: any; test.before(async () => { engine = await create(); }); -test('getStatus() should should have no active workflows', async (t) => { +test.serial('getStatus() should should have no active workflows', async (t) => { const { active } = engine.getStatus(); t.is(active, 0); }); -test('Dispatch start events for a new workflow', async (t) => { +test.serial('Dispatch start events for a new workflow', async (t) => { engine.execute(sampleWorkflow); - const evt = await waitForEvent(engine, 'workflow-start'); + const evt = await waitForEvent( + engine, + 'workflow-start' + ); t.truthy(evt); t.is(evt.workflowId, 'w1'); }); -test('getStatus should report one active workflow', async (t) => { +test.serial('getStatus should report one active workflow', async (t) => { engine.execute(sampleWorkflow); const { active } = engine.getStatus(); @@ -46,9 +54,9 @@ test('getStatus should report one active workflow', async (t) => { t.is(active, 1); }); -test('Dispatch complete events when a workflow completes', async (t) => { +test.serial('Dispatch complete events when a workflow completes', async (t) => { engine.execute(sampleWorkflow); - const evt = await waitForEvent( + const evt = await waitForEvent( engine, 'workflow-complete' ); @@ -57,80 +65,77 @@ test('Dispatch complete events when a workflow completes', async (t) => { t.truthy(evt.threadId); }); -test('Dispatch start events for a job', async (t) => { +test.serial('Dispatch start events for a job', async (t) => { engine.execute(sampleWorkflow); - const evt = await waitForEvent(engine, 'job-start'); + const evt = await waitForEvent(engine, 'job-start'); t.truthy(evt); t.is(evt.workflowId, 'w1'); t.is(evt.jobId, 'j1'); }); -test('Dispatch complete events for a job', async (t) => { +test.serial('Dispatch complete events for a job', async (t) => { engine.execute(sampleWorkflow); - const evt = await waitForEvent(engine, 'job-complete'); + const evt = await waitForEvent(engine, 'job-complete'); t.truthy(evt); t.is(evt.workflowId, 'w1'); t.is(evt.jobId, 'j1'); t.deepEqual(evt.state, { data: { x: 10 } }); }); -test('Dispatch error event for a crash', async (t) => { - const wf = { - id: 'xyz', - jobs: [ - { - id: 'j1', - adaptor: 'common@1.0.0', - expression: 'fn(() => ( @~!"@£!4 )', - }, - ], - } as ExecutionPlan; +test.serial('Dispatch error event for a crash', async (t) => { + const wf = createPlan({ + id: 'j1', + adaptor: 'common@1.0.0', + expression: 'fn(() => ( @~!"@£!4 )', + }); engine.execute(wf); - const evt = await waitForEvent(engine, 'workflow-error'); + const evt = await waitForEvent( + engine, + 'workflow-error' + ); - t.is(evt.workflowId, 'xyz'); + t.is(evt.workflowId, wf.id!); t.is(evt.type, 'RuntimeCrash'); t.regex(evt.message, /invalid or unexpected token/i); }); -test('wait function', async (t) => { - const wf = { - id: 'w1', - jobs: [ - { - id: 'j1', - expression: 'wait(100)', - }, - ], - } as ExecutionPlan; +test.serial('wait function', async (t) => { + const wf = createPlan({ + id: 'j1', + expression: 'wait(100)', + }); engine.execute(wf); const start = Date.now(); - await waitForEvent(engine, 'workflow-complete'); + await waitForEvent(engine, 'workflow-complete'); const end = Date.now() - start; t.true(end > 90); }); -test('resolve credential before job-start if credential is a string', async (t) => { - const wf = clone(sampleWorkflow); - wf.jobs[0].configuration = 'x'; +test.serial( + 'resolve credential before job-start if credential is a string', + async (t) => { + const wf = clone(sampleWorkflow); + wf.id = t.title; + wf.workflow.steps[0].configuration = 'x'; - let didCallCredentials; - const credential = async (_id) => { - didCallCredentials = true; - return {}; - }; + let didCallCredentials; + const credential = async () => { + didCallCredentials = true; + return {}; + }; - // @ts-ignore - engine.execute(wf, { resolvers: { credential } }); + // @ts-ignore + engine.execute(wf, {}, { resolvers: { credential } }); - await waitForEvent(engine, 'job-start'); - t.true(didCallCredentials); -}); + await waitForEvent(engine, 'job-start'); + t.true(didCallCredentials); + } +); -test('listen to events', async (t) => { +test.serial('listen to events', async (t) => { const called = { 'job-start': false, 'job-complete': false, @@ -139,68 +144,64 @@ test('listen to events', async (t) => { 'workflow-complete': false, }; - const wf = { - id: 'wibble', - jobs: [ - { - id: 'j1', - adaptor: 'common@1.0.0', - expression: 'export default [() => { console.log("x"); }]', - }, - ], - } as ExecutionPlan; + const wf = createPlan({ + id: 'j1', + adaptor: 'common@1.0.0', + expression: 'export default [() => { console.log("x"); }]', + }); engine.listen(wf.id, { - 'job-start': ({ workflowId, jobId }) => { + 'job-start': ({ workflowId, jobId }: any) => { called['job-start'] = true; t.is(workflowId, wf.id); - t.is(jobId, wf.jobs[0].id); + t.is(jobId, wf.workflow.steps[0].id); }, - 'job-complete': ({ workflowId, jobId }) => { + 'job-complete': ({ workflowId, jobId }: any) => { called['job-complete'] = true; t.is(workflowId, wf.id); - t.is(jobId, wf.jobs[0].id); + t.is(jobId, wf.workflow.steps[0].id); // TODO includes state? }, - 'workflow-log': ({ workflowId, message }) => { + 'workflow-log': ({ workflowId, message }: any) => { called['workflow-log'] = true; t.is(workflowId, wf.id); t.truthy(message); }, - 'workflow-start': ({ workflowId }) => { + 'workflow-start': ({ workflowId }: any) => { called['workflow-start'] = true; t.is(workflowId, wf.id); }, - 'workflow-complete': ({ workflowId }) => { + 'workflow-complete': ({ workflowId }: any) => { called['workflow-complete'] = true; t.is(workflowId, wf.id); }, }); engine.execute(wf); - await waitForEvent(engine, 'workflow-complete'); + await waitForEvent(engine, 'workflow-complete'); t.assert(Object.values(called).every((v) => v === true)); }); -test('only listen to events for the correct workflow', async (t) => { +test.serial('only listen to events for the correct workflow', async (t) => { engine.listen('bobby mcgee', { - 'workflow-start': ({ workflowId }) => { + 'workflow-start': () => { throw new Error('should not have called this!!'); }, }); engine.execute(sampleWorkflow); - await waitForEvent(engine, 'workflow-complete'); + await waitForEvent(engine, 'workflow-complete'); t.pass(); }); -test('log events should stringify a string message', async (t) => { +test.serial('log events should stringify a string message', async (t) => { const wf = clone(sampleWorkflow); - wf.jobs[0].expression = + wf.id = t.title; + wf.workflow.steps[0].expression = 'fn((s) => {console.log("haul away joe"); return s; })'; engine.listen(wf.id, { - 'workflow-log': ({ message }) => { + 'workflow-log': ({ message }: any) => { t.is(typeof message, 'string'); const result = JSON.parse(message); t.deepEqual(result, ['haul away joe']); @@ -208,15 +209,17 @@ test('log events should stringify a string message', async (t) => { }); engine.execute(wf); - await waitForEvent(engine, 'workflow-complete'); + await waitForEvent(engine, 'workflow-complete'); }); -test('log events should stringify an object message', async (t) => { +test.serial('log events should stringify an object message', async (t) => { const wf = clone(sampleWorkflow); - wf.jobs[0].expression = 'fn((s) => {console.log({ x: 22 }); return s; })'; + wf.id = t.title; + wf.workflow.steps[0].expression = + 'fn((s) => {console.log({ x: 22 }); return s; })'; engine.listen(wf.id, { - 'workflow-log': ({ message }) => { + 'workflow-log': ({ message }: any) => { t.is(typeof message, 'string'); const result = JSON.parse(message); t.deepEqual(result, [{ x: 22 }]); @@ -224,55 +227,53 @@ test('log events should stringify an object message', async (t) => { }); engine.execute(wf); - await waitForEvent(engine, 'workflow-complete'); + await waitForEvent(engine, 'workflow-complete'); }); -test('do nothing for a job if no expression and adaptor (trigger node)', async (t) => { - const workflow = { - id: 'w1', - jobs: [ - { - id: 'j1', - adaptor: '@openfn/language-common@1.0.0', - }, - ], - } as ExecutionPlan; +test.serial( + 'do nothing for a job if no expression and adaptor (trigger node)', + async (t) => { + // @ts-ignore + const workflow = createPlan({ + id: 'j1', + adaptor: '@openfn/language-common@1.0.0', + }); - let didCallEvent = false; + let didCallEvent = false; - engine.listen(workflow.id, { - 'job-start': () => { - didCallEvent = true; - }, - 'job-complete': () => { - didCallEvent = true; - }, - 'workflow-log': () => { - // this can be called - }, - 'workflow-start': () => { - // ditto - }, - 'workflow-complete': () => { - // ditto - }, - }); + engine.listen(workflow.id, { + 'job-start': () => { + didCallEvent = true; + }, + 'job-complete': () => { + didCallEvent = true; + }, + 'workflow-log': () => { + // this can be called + }, + 'workflow-start': () => { + // ditto + }, + 'workflow-complete': () => { + // ditto + }, + }); - engine.execute(workflow); - await waitForEvent(engine, 'workflow-complete'); + engine.execute(workflow); + await waitForEvent(engine, 'workflow-complete'); - t.false(didCallEvent); -}); + t.false(didCallEvent); + } +); -test('timeout', async (t) => { +test.skip('timeout', async (t) => { const wf = clone(sampleWorkflow); - wf.jobs[0].expression = 'wait(1000)'; - // wf.options = { timeout: 10 }; + wf.workflow.steps[0].expression = 'wait(1000)'; // @ts-ignore - engine.execute(wf, { timeout: 10 }); + engine.execute(wf, {}, { timeout: 10 }); - const evt = await waitForEvent( + const evt = await waitForEvent( engine, 'workflow-error' ); diff --git a/packages/ws-worker/test/mock/sockets.test.ts b/packages/ws-worker/test/mock/sockets.test.ts index 46ad6aa4c..2312d9582 100644 --- a/packages/ws-worker/test/mock/sockets.test.ts +++ b/packages/ws-worker/test/mock/sockets.test.ts @@ -68,7 +68,7 @@ test('mock channel: invoke the ok handler with the callback result', (t) => { }, }); - channel.push('ping', 'abc').receive('ok', (evt) => { + channel.push('ping', 'abc').receive('ok', (evt: any) => { t.is(evt, 'pong!'); t.pass(); done(); diff --git a/packages/ws-worker/test/reasons.test.ts b/packages/ws-worker/test/reasons.test.ts index 3ceecfb1a..c93d4da8f 100644 --- a/packages/ws-worker/test/reasons.test.ts +++ b/packages/ws-worker/test/reasons.test.ts @@ -1,11 +1,11 @@ import test from 'ava'; import createRTE from '@openfn/engine-multi'; import { createMockLogger } from '@openfn/logger'; +import type { ExitReason } from '@openfn/lexicon/lightning'; import { createPlan } from './util'; import { execute as doExecute } from '../src/api/execute'; import { mockChannel } from '../src/mock/sockets'; - import { STEP_START, STEP_COMPLETE, @@ -13,10 +13,10 @@ import { RUN_START, RUN_COMPLETE, } from '../src/events'; -import { ExitReason } from '../src/types'; +import { ExecutionPlan } from '@openfn/lexicon'; -let engine; -let logger; +let engine: any; +let logger: any; test.before(async () => { logger = createMockLogger(); @@ -39,7 +39,7 @@ test.before(async () => { test.after(async () => engine.destroy()); // Wrap up an execute call, capture the on complete state -const execute = async (plan, options = {}) => +const execute = async (plan: ExecutionPlan, input = {}, options = {}) => new Promise<{ reason: ExitReason; state: any }>((done) => { // Ignore all channel events // In these test we assume that the correct messages are sent to the channel @@ -51,12 +51,11 @@ const execute = async (plan, options = {}) => [RUN_COMPLETE]: async () => true, }); - const onFinish = (result) => { + const onFinish = (result: any) => { done(result); }; - // @ts-ignore - doExecute(channel, engine, logger, plan, options, onFinish); + doExecute(channel, engine, logger, plan, input, options, onFinish); }); test('success', async (t) => { @@ -65,9 +64,9 @@ test('success', async (t) => { expression: '(s) => s', }); - plan.initialState = { data: { result: 42 } }; + const input = { data: { result: 42 } }; - const { reason } = await execute(plan); + const { reason } = await execute(plan, input); t.is(reason.reason, 'success'); }); @@ -165,10 +164,11 @@ test('fail: error in the first job, with downstream job that is not run', async { id: 'a', expression: 'export default [(s) => {throw "abort!"}]', - next: { b: true }, + next: { b: '!state.errors' }, }, { id: 'b', + expression: 'export default [(s) => s]', } ); @@ -231,7 +231,7 @@ test('kill: timeout', async (t) => { runTimeoutMs: 100, }; - const { reason } = await execute(plan, options); + const { reason } = await execute(plan, {}, options); t.is(reason.reason, 'kill'); t.is(reason.error_type, 'TimeoutError'); t.is(reason.error_message, 'Workflow failed to return within 100ms'); diff --git a/packages/ws-worker/test/server.test.ts b/packages/ws-worker/test/server.test.ts index c6eb919b1..1ac45a6cc 100644 --- a/packages/ws-worker/test/server.test.ts +++ b/packages/ws-worker/test/server.test.ts @@ -4,7 +4,7 @@ import createWorkerServer from '../src/server'; test.before(async () => { const engine = await createMockRTE(); - createWorkerServer(engine, { + createWorkerServer(engine as any, { port: 2323, secret: 'abc', maxWorkflows: 1, diff --git a/packages/ws-worker/test/util.ts b/packages/ws-worker/test/util.ts index fe663d009..df70a3c99 100644 --- a/packages/ws-worker/test/util.ts +++ b/packages/ws-worker/test/util.ts @@ -1,7 +1,8 @@ -import { ExecutionPlan } from '@openfn/runtime'; +import { ExecutionPlan, Job } from '@openfn/lexicon'; +import { Edge, Node } from '@openfn/lexicon/lightning'; import crypto from 'node:crypto'; -export const wait = (fn, maxRuns = 100) => +export const wait = (fn: () => any, maxRuns = 100) => new Promise((resolve) => { let count = 0; let ival = setInterval(() => { @@ -19,11 +20,11 @@ export const wait = (fn, maxRuns = 100) => }, 100); }); -export const clone = (obj) => JSON.parse(JSON.stringify(obj)); +export const clone = (obj: any) => JSON.parse(JSON.stringify(obj)); -export const waitForEvent = (engine, eventName) => +export const waitForEvent = (engine: any, eventName: string) => new Promise((resolve) => { - engine.once(eventName, (e) => { + engine.once(eventName, (e: any) => { resolve(e); }); }); @@ -33,22 +34,27 @@ export const sleep = (delay = 100) => setTimeout(resolve, delay); }); -export const createPlan = (...jobs) => +export const createPlan = (...steps: Job[]) => ({ id: crypto.randomUUID(), - jobs: [...jobs], + workflow: { + steps, + }, + options: {}, } as ExecutionPlan); -export const createEdge = (from: string, to: string) => ({ - id: `${from}-${to}`, - source_job_id: from, - target_job_id: to, -}); +export const createEdge = (from: string, to: string) => + ({ + id: `${from}-${to}`, + source_job_id: from, + target_job_id: to, + } as Edge); -export const createJob = (body?: string, id?: string) => ({ - id: id || crypto.randomUUID(), - body: body || `fn((s) => s)`, -}); +export const createJob = (body?: string, id?: string) => + ({ + id: id || crypto.randomUUID(), + body: body || `fn((s) => s)`, + } as Node); export const createRun = (jobs = [], edges = [], triggers = []) => ({ id: crypto.randomUUID(), diff --git a/packages/ws-worker/test/util/convert-run.test.ts b/packages/ws-worker/test/util/convert-lightning-plan.test.ts similarity index 56% rename from packages/ws-worker/test/util/convert-run.test.ts rename to packages/ws-worker/test/util/convert-lightning-plan.test.ts index 3cfd58ec3..dbed7a503 100644 --- a/packages/ws-worker/test/util/convert-run.test.ts +++ b/packages/ws-worker/test/util/convert-lightning-plan.test.ts @@ -1,6 +1,7 @@ import test from 'ava'; -import convertRun, { conditions } from '../../src/util/convert-run'; -import { Run, Node } from '../../src/types'; +import type { LightningPlan, Node } from '@openfn/lexicon/lightning'; +import convertPlan, { conditions } from '../../src/util/convert-lightning-plan'; +import { ConditionalStepEdge, Job } from '@openfn/lexicon'; // Creates a lightning node (job or trigger) const createNode = (props = {}) => @@ -12,7 +13,7 @@ const createNode = (props = {}) => ...props, } as Node); -const createEdge = (from, to, props = {}) => ({ +const createEdge = (from: string, to: string, props = {}) => ({ id: `${from}-${to}`, source_job_id: from, target_job_id: to, @@ -36,28 +37,51 @@ const createJob = (props = {}) => ({ ...props, }); -const testEdgeCondition = (expr, state) => { +const testEdgeCondition = (expr: string, state: any) => { const fn = new Function('state', 'return ' + expr); return fn(state); }; test('convert a single job', (t) => { - const run: Partial = { + const run: Partial = { id: 'w', jobs: [createNode()], triggers: [], edges: [], }; - const { plan } = convertRun(run as Run); + const { plan } = convertPlan(run as LightningPlan); t.deepEqual(plan, { id: 'w', - jobs: [createJob()], + options: {}, + workflow: { + steps: [createJob()], + }, + }); +}); + +test('convert a single job with names', (t) => { + const run: Partial = { + id: 'w', + name: 'my-workflow', + jobs: [createNode({ name: 'my-job' })], + triggers: [], + edges: [], + }; + const { plan } = convertPlan(run as LightningPlan); + + t.deepEqual(plan, { + id: 'w', + options: {}, + workflow: { + name: 'my-workflow', + steps: [createJob({ name: 'my-job' })], + }, }); }); test('convert a single job with options', (t) => { - const run: Partial = { + const run: Partial = { id: 'w', jobs: [createNode()], triggers: [], @@ -67,111 +91,123 @@ test('convert a single job with options', (t) => { runTimeoutMs: 10, }, }; - const { plan, options } = convertRun(run as Run); + const { plan, options } = convertPlan(run as LightningPlan); t.deepEqual(plan, { id: 'w', - jobs: [createJob()], + options: {}, + workflow: { + steps: [createJob()], + }, + }); + t.deepEqual(options, { + runTimeoutMs: 10, + sanitize: 'obfuscate', }); - t.deepEqual(options, run.options); }); // Note idk how lightningg will handle state/defaults on a job // but this is what we'll do right now test('convert a single job with data', (t) => { - const run: Partial = { + const run: Partial = { id: 'w', jobs: [createNode({ state: { data: { x: 22 } } })], triggers: [], edges: [], }; - const { plan, options } = convertRun(run as Run); + const { plan, options } = convertPlan(run as LightningPlan); t.deepEqual(plan, { id: 'w', - jobs: [createJob({ state: { data: { x: 22 } } })], + options: {}, + workflow: { + steps: [createJob({ state: { data: { x: 22 } } })], + }, }); t.deepEqual(options, {}); }); test('Accept a partial run object', (t) => { - const run: Partial = { + const run: Partial = { id: 'w', }; - const { plan, options } = convertRun(run as Run); + const { plan, options } = convertPlan(run as LightningPlan); t.deepEqual(plan, { id: 'w', - jobs: [], + options: {}, + workflow: { + steps: [], + }, }); t.deepEqual(options, {}); }); -test('handle dataclip_id', (t) => { - const run: Partial = { +test('handle dataclip_id as input', (t) => { + const run: Partial = { id: 'w', dataclip_id: 'xyz', }; - const { plan } = convertRun(run as Run); + const { input } = convertPlan(run as LightningPlan); - t.deepEqual(plan, { - id: 'w', - initialState: 'xyz', - jobs: [], - }); + t.deepEqual(input, 'xyz'); }); -test('handle starting_node_id', (t) => { - const run: Partial = { +test('handle starting_node_id as options', (t) => { + const run: Partial = { id: 'w', starting_node_id: 'j1', }; - const { plan } = convertRun(run as Run); + const { plan } = convertPlan(run as LightningPlan); - t.deepEqual(plan, { - id: 'w', + t.deepEqual(plan.options, { start: 'j1', - jobs: [], }); }); test('convert a single trigger', (t) => { - const run: Partial = { + const run: Partial = { id: 'w', triggers: [createTrigger()], jobs: [], edges: [], }; - const { plan } = convertRun(run as Run); + const { plan } = convertPlan(run as LightningPlan); t.deepEqual(plan, { id: 'w', - jobs: [ - { - id: 't', - }, - ], + options: {}, + workflow: { + steps: [ + { + id: 't', + }, + ], + }, }); }); // This exhibits current behaviour. This should never happen though test('ignore a single edge', (t) => { - const run: Partial = { + const run: Partial = { id: 'w', jobs: [], triggers: [], edges: [createEdge('a', 'b')], }; - const { plan } = convertRun(run as Run); + const { plan } = convertPlan(run as LightningPlan); t.deepEqual(plan, { id: 'w', - jobs: [], + options: {}, + workflow: { + steps: [], + }, }); }); test('convert a single trigger with an edge', (t) => { - const run: Partial = { + const run: Partial = { id: 'w', triggers: [createTrigger()], jobs: [createNode()], @@ -183,24 +219,27 @@ test('convert a single trigger with an edge', (t) => { }, ], }; - const { plan } = convertRun(run as Run); + const { plan } = convertPlan(run as LightningPlan); t.deepEqual(plan, { id: 'w', - jobs: [ - { - id: 't', - next: { - a: true, + options: {}, + workflow: { + steps: [ + { + id: 't', + next: { + a: true, + }, }, - }, - createJob(), - ], + createJob(), + ], + }, }); }); test('convert a single trigger with two edges', (t) => { - const run: Partial = { + const run: Partial = { id: 'w', triggers: [createTrigger()], jobs: [createNode({ id: 'a' }), createNode({ id: 'b' })], @@ -217,26 +256,29 @@ test('convert a single trigger with two edges', (t) => { }, ], }; - const { plan } = convertRun(run as Run); + const { plan } = convertPlan(run as LightningPlan); t.deepEqual(plan, { id: 'w', - jobs: [ - { - id: 't', - next: { - a: true, - b: true, + options: {}, + workflow: { + steps: [ + { + id: 't', + next: { + a: true, + b: true, + }, }, - }, - createJob({ id: 'a' }), - createJob({ id: 'b' }), - ], + createJob({ id: 'a' }), + createJob({ id: 'b' }), + ], + }, }); }); test('convert a disabled trigger', (t) => { - const run: Partial = { + const run: Partial = { id: 'w', triggers: [createTrigger()], jobs: [createNode({ id: 'a' })], @@ -249,38 +291,47 @@ test('convert a disabled trigger', (t) => { }, ], }; - const { plan } = convertRun(run as Run); + const { plan } = convertPlan(run as LightningPlan); t.deepEqual(plan, { id: 'w', - jobs: [ - { - id: 't', - next: {}, - }, - createJob({ id: 'a' }), - ], + options: {}, + workflow: { + steps: [ + { + id: 't', + next: {}, + }, + createJob({ id: 'a' }), + ], + }, }); }); test('convert two linked jobs', (t) => { - const run: Partial = { + const run: Partial = { id: 'w', jobs: [createNode({ id: 'a' }), createNode({ id: 'b' })], triggers: [], edges: [createEdge('a', 'b')], }; - const { plan } = convertRun(run as Run); + const { plan } = convertPlan(run as LightningPlan); t.deepEqual(plan, { id: 'w', - jobs: [createJob({ id: 'a', next: { b: true } }), createJob({ id: 'b' })], + options: {}, + workflow: { + steps: [ + createJob({ id: 'a', next: { b: true } }), + createJob({ id: 'b' }), + ], + }, }); }); // This isn't supported by the runtime, but it'll survive the conversion test('convert a job with two upstream jobs', (t) => { - const run: Partial = { + const run: Partial = { id: 'w', jobs: [ createNode({ id: 'a' }), @@ -290,52 +341,61 @@ test('convert a job with two upstream jobs', (t) => { triggers: [], edges: [createEdge('a', 'x'), createEdge('b', 'x')], }; - const { plan } = convertRun(run as Run); + const { plan } = convertPlan(run as LightningPlan); t.deepEqual(plan, { id: 'w', - jobs: [ - createJob({ id: 'a', next: { x: true } }), - createJob({ id: 'b', next: { x: true } }), - createJob({ id: 'x' }), - ], + options: {}, + workflow: { + steps: [ + createJob({ id: 'a', next: { x: true } }), + createJob({ id: 'b', next: { x: true } }), + createJob({ id: 'x' }), + ], + }, }); }); test('convert two linked jobs with an edge condition', (t) => { const condition = 'state.age > 10'; - const run: Partial = { + const run: Partial = { id: 'w', jobs: [createNode({ id: 'a' }), createNode({ id: 'b' })], triggers: [], edges: [createEdge('a', 'b', { condition })], }; - const { plan } = convertRun(run as Run); + const { plan } = convertPlan(run as LightningPlan); t.deepEqual(plan, { id: 'w', - jobs: [ - createJob({ id: 'a', next: { b: { condition } } }), - createJob({ id: 'b' }), - ], + options: {}, + workflow: { + steps: [ + createJob({ id: 'a', next: { b: { condition } } }), + createJob({ id: 'b' }), + ], + }, }); }); test('convert two linked jobs with a disabled edge', (t) => { - const run: Partial = { + const run: Partial = { id: 'w', jobs: [createNode({ id: 'a' }), createNode({ id: 'b' })], triggers: [], edges: [createEdge('a', 'b', { enabled: false })], }; - const { plan } = convertRun(run as Run); + const { plan } = convertPlan(run as LightningPlan); t.deepEqual(plan, { id: 'w', - jobs: [ - createJob({ id: 'a', next: { b: { disabled: true } } }), - createJob({ id: 'b' }), - ], + options: {}, + workflow: { + steps: [ + createJob({ id: 'a', next: { b: { disabled: true } } }), + createJob({ id: 'b' }), + ], + }, }); }); @@ -343,7 +403,7 @@ test('on_job_success condition: return true if no errors', (t) => { const condition = conditions.on_job_success('a'); const state = {}; - const result = testEdgeCondition(condition, state); + const result = testEdgeCondition(condition!, state); t.is(result, true); }); @@ -354,7 +414,7 @@ test('on_job_success condition: return true if state is undefined', (t) => { const condition = conditions.on_job_success('a'); const state = undefined; - const result = testEdgeCondition(condition, state); + const result = testEdgeCondition(condition!, state); t.is(result, true); }); @@ -369,7 +429,7 @@ test('on_job_success condition: return true if unconnected upstream errors', (t) }, }, }; - const result = testEdgeCondition(condition, state); + const result = testEdgeCondition(condition!, state); t.is(result, true); }); @@ -384,7 +444,7 @@ test('on_job_success condition: return false if the upstream job errored', (t) = }, }, }; - const result = testEdgeCondition(condition, state); + const result = testEdgeCondition(condition!, state); t.is(result, false); }); @@ -399,7 +459,7 @@ test('on_job_failure condition: return true if error immediately upstream', (t) }, }, }; - const result = testEdgeCondition(condition, state); + const result = testEdgeCondition(condition!, state); t.is(result, true); }); @@ -414,7 +474,7 @@ test('on_job_failure condition: return false if unrelated error upstream', (t) = }, }, }; - const result = testEdgeCondition(condition, state); + const result = testEdgeCondition(condition!, state); t.is(result, false); }); @@ -423,7 +483,7 @@ test('on_job_failure condition: return false if no errors', (t) => { const condition = conditions.on_job_failure('a'); const state = {}; - const result = testEdgeCondition(condition, state); + const result = testEdgeCondition(condition!, state); t.is(result, false); }); @@ -432,45 +492,45 @@ test('on_job_failure condition: return false if state is undefined', (t) => { const condition = conditions.on_job_failure('a'); const state = undefined; - const result = testEdgeCondition(condition, state); + const result = testEdgeCondition(condition!, state); t.is(result, false); }); test('convert edge condition on_job_success', (t) => { - const run: Partial = { + const run: Partial = { id: 'w', jobs: [createNode({ id: 'a' }), createNode({ id: 'b' })], triggers: [], edges: [createEdge('a', 'b', { condition: 'on_job_success' })], }; - const { plan } = convertRun(run as Run); - - const [job] = plan.jobs; + const { plan } = convertPlan(run as LightningPlan); - t.truthy(job.next?.b); - t.is(job.next.b.condition, conditions.on_job_success('a')); + const [job] = plan.workflow.steps as Job[]; + const edge = job.next as Record; - t.true(testEdgeCondition(job.next.b.condition, {})); + t.truthy(edge.b); + t.is(edge.b.condition!, conditions.on_job_success('a')!); + t.true(testEdgeCondition(edge.b.condition!, {})); }); test('convert edge condition on_job_failure', (t) => { - const run: Partial = { + const run: Partial = { id: 'w', jobs: [createNode({ id: 'a' }), createNode({ id: 'b' })], triggers: [], edges: [createEdge('a', 'b', { condition: 'on_job_failure' })], }; - const { plan } = convertRun(run as Run); + const { plan } = convertPlan(run as LightningPlan); - const [job] = plan.jobs; - - t.truthy(job.next?.b); - t.is(job.next.b.condition, conditions.on_job_failure('a')); + const [job] = plan.workflow.steps as Job[]; + const edge = job.next as Record; + t.truthy(edge.b); + t.is(edge.b.condition!, conditions.on_job_failure('a')!); // Check that this is valid js t.true( - testEdgeCondition(job.next.b.condition, { + testEdgeCondition(edge.b.condition!, { errors: { a: {} }, }) ); @@ -478,46 +538,32 @@ test('convert edge condition on_job_failure', (t) => { test('convert edge condition on_job_success with a funky id', (t) => { const id_a = 'a-b-c@ # {} !£'; - const run: Partial = { + const run: Partial = { id: 'w', jobs: [createNode({ id: id_a }), createNode({ id: 'b' })], triggers: [], edges: [createEdge(id_a, 'b', { condition: 'on_job_success' })], }; - const { plan } = convertRun(run as Run); - const [job] = plan.jobs; - - t.truthy(job.next?.b); - t.is(job.next.b.condition, conditions.on_job_success(id_a)); + const { plan } = convertPlan(run as LightningPlan); + const [job] = plan.workflow.steps as Job[]; + const edge = job.next as Record; + t.truthy(edge.b); + t.is(edge.b.condition!, conditions.on_job_success(id_a)!); // Check that this is valid js - t.true(testEdgeCondition(job.next.b.condition, {})); + t.true(testEdgeCondition(edge.b.condition!, {})); }); test('convert edge condition always', (t) => { - const run: Partial = { + const run: Partial = { id: 'w', jobs: [createNode({ id: 'a' }), createNode({ id: 'b' })], triggers: [], edges: [createEdge('a', 'b', { condition: 'always' })], }; - const { plan } = convertRun(run as Run); - - const [job] = plan.jobs; - - t.false(job.next.b.hasOwnProperty('condition')); -}); - -test('convert random options', (t) => { - const run: Partial = { - id: 'w', - options: { - a: 1, - b: 2, - c: 3, - }, - }; - const { options } = convertRun(run as Run); + const { plan } = convertPlan(run as LightningPlan); - t.deepEqual(options, { a: 1, b: 2, c: 3 }); + const [job] = plan.workflow.steps as Job[]; + const edge = job.next as Record; + t.false(edge.b.hasOwnProperty('condition')); }); diff --git a/packages/ws-worker/test/util/create-run-state.test.ts b/packages/ws-worker/test/util/create-run-state.test.ts index 9bf15e960..7424ee957 100644 --- a/packages/ws-worker/test/util/create-run-state.test.ts +++ b/packages/ws-worker/test/util/create-run-state.test.ts @@ -1,101 +1,100 @@ import test from 'ava'; +import type { ExecutionPlan, Job } from '@openfn/lexicon'; import { createRunState } from '../../src/util'; +const createPlan = (jobs: Partial[]) => + ({ + workflow: { + steps: jobs.map((j) => ({ expression: '.', ...j })), + }, + options: {}, + } as ExecutionPlan); + test('create run', (t) => { - const options = { timeout: 666 }; - const plan = { jobs: [{ id: 'a' }] }; - const run = createRunState(plan, options); + const plan = createPlan([{ id: 'a' }]); + const input = undefined; + + const run = createRunState(plan, input); t.deepEqual(run.plan, plan); t.deepEqual(run.lastDataclipId, ''); t.deepEqual(run.dataclips, {}); t.deepEqual(run.inputDataclips, {}); t.deepEqual(run.reasons, {}); - t.deepEqual(run.options, options); }); test('Set initial input dataclip if no explicit start and first job is a step', (t) => { - const plan = { initialState: 'x', jobs: [{ id: 'a', expression: '.' }] }; - const run = createRunState(plan); + const plan = createPlan([{ id: 'a' }]); + const input = 'x'; + + const run = createRunState(plan, input); t.deepEqual(run.inputDataclips, { a: 'x' }); }); test('Set initial input dataclip if the explicit start is a step', (t) => { - const plan = { - initialState: 'x', - start: 'a', - jobs: [ - { id: 'b', expression: '.' }, - { id: 'a', expression: '.' }, - ], - }; - const run = createRunState(plan); + const plan = createPlan([{ id: 'a' }, { id: 'b' }]); + plan.options.start = 'a'; + const input = 'x'; + + const run = createRunState(plan, input); t.deepEqual(run.inputDataclips, { a: 'x' }); }); test('Set initial input dataclip if the start is a trigger (simple)', (t) => { - const plan = { - initialState: 's', - start: 't', - jobs: [ - { id: 't', next: { a: true } }, - { id: 'a', expression: '.' }, - ], - }; - const run = createRunState(plan); + const plan = createPlan([{ id: 't', next: { a: true } }, { id: 'a' }]); + plan.options.start = 'a'; + const input = 's'; + + const run = createRunState(plan, input); t.deepEqual(run.inputDataclips, { a: 's' }); }); test('Set initial input dataclip if the start is a trigger (complex)', (t) => { - const plan = { - initialState: 's', - start: 't', - jobs: [ - { id: 'a', expression: '.' }, - { id: 'b', expression: '.' }, - { id: 'c', expression: '.' }, - { id: 'd', expression: '.' }, - { id: 't', next: { c: true } }, - ], - }; - const run = createRunState(plan); + const plan = createPlan([ + { id: 'a' }, + { id: 'b' }, + { id: 'c' }, + { id: 'd' }, + { id: 't', next: { c: true }, expression: undefined }, + ]); + plan.options.start = 't'; + const input = 's'; + + const run = createRunState(plan, input); t.deepEqual(run.inputDataclips, { c: 's' }); }); test('Set initial input dataclip with a trigger as implicit start', (t) => { - const plan = { - initialState: 's', - jobs: [ - { id: 't', next: { c: true } }, - { id: 'a', expression: '.' }, - { id: 'b', expression: '.' }, - { id: 'c', expression: '.' }, - { id: 'd', expression: '.' }, - ], - }; - const run = createRunState(plan); + const plan = createPlan([ + { id: 't', next: { c: true }, expression: undefined }, + { id: 'a', expression: '.' }, + { id: 'b', expression: '.' }, + { id: 'c', expression: '.' }, + { id: 'd', expression: '.' }, + ]); + const input = 's'; + + const run = createRunState(plan, input); t.deepEqual(run.inputDataclips, { c: 's' }); }); test('Set initial input dataclip with a trigger with multiple downstream jobs', (t) => { - const plan = { - initialState: 's', - start: 't', - jobs: [ - { id: 'a', expression: '.' }, - { id: 'b', expression: '.' }, - { id: 't', next: { a: true, b: true, c: true } }, - { id: 'c', expression: '.' }, - { id: 'd', expression: '.' }, - ], - }; - const run = createRunState(plan); + const plan = createPlan([ + { id: 'a' }, + { id: 'b' }, + { id: 't', next: { a: true, b: true, c: true }, expression: undefined }, + { id: 'c' }, + { id: 'd' }, + ]); + plan.options.start = 't'; + const input = 's'; + const run = createRunState(plan, input); t.deepEqual(run.inputDataclips, { a: 's', b: 's', c: 's' }); }); diff --git a/packages/ws-worker/test/util/throttle.test.ts b/packages/ws-worker/test/util/throttle.test.ts index f865f2aca..10260abe8 100644 --- a/packages/ws-worker/test/util/throttle.test.ts +++ b/packages/ws-worker/test/util/throttle.test.ts @@ -88,7 +88,7 @@ test('return in order', async (t) => { const results: string[] = []; - const fn = (name: string, delay: number) => + const fn = (name: string) => new Promise((resolve) => { setTimeout(() => { results.push(name); diff --git a/packages/ws-worker/tsconfig.json b/packages/ws-worker/tsconfig.json index 3be5c53e0..834d5af09 100644 --- a/packages/ws-worker/tsconfig.json +++ b/packages/ws-worker/tsconfig.json @@ -1,6 +1,6 @@ { "extends": "../../tsconfig.common", - "include": ["src/**/*.ts", "test/mock/data.ts", "src/channels/runs"], + "include": ["src/**/*.ts", "test/**/*.ts", "src/channels/runs"], "compilerOptions": { "module": "ESNext" } diff --git a/pnpm-lock.yaml b/pnpm-lock.yaml index 17e5bf7f3..179639c57 100644 --- a/pnpm-lock.yaml +++ b/pnpm-lock.yaml @@ -104,6 +104,12 @@ importers: specifier: ^3.0.2 version: 3.0.2 + integration-tests/cli/repo: + dependencies: + '@openfn/language-common_1.12.0': + specifier: npm:@openfn/language-common@^1.12.0 + version: /@openfn/language-common@1.12.0 + integration-tests/worker: dependencies: '@openfn/engine-multi': @@ -185,6 +191,9 @@ importers: '@openfn/language-common': specifier: 2.0.0-rc3 version: 2.0.0-rc3 + '@openfn/lexicon': + specifier: workspace:^ + version: link:../lexicon '@types/mock-fs': specifier: ^4.13.1 version: 4.13.1 @@ -377,6 +386,9 @@ importers: '@openfn/language-common': specifier: 2.0.0-rc3 version: 2.0.0-rc3 + '@openfn/lexicon': + specifier: workspace:^ + version: link:../lexicon '@openfn/logger': specifier: workspace:* version: link:../logger @@ -413,6 +425,12 @@ importers: packages/engine-multi/tmp/repo: {} + packages/lexicon: + devDependencies: + '@openfn/logger': + specifier: workspace:^ + version: link:../logger + packages/lightning-mock: dependencies: '@koa/router': @@ -421,6 +439,9 @@ importers: '@openfn/engine-multi': specifier: workspace:* version: link:../engine-multi + '@openfn/lexicon': + specifier: workspace:^ + version: link:../lexicon '@openfn/logger': specifier: workspace:* version: link:../logger @@ -559,6 +580,9 @@ importers: '@openfn/language-common': specifier: 2.0.0-rc3 version: 2.0.0-rc3 + '@openfn/lexicon': + specifier: workspace:^ + version: link:../lexicon '@types/mock-fs': specifier: ^4.13.1 version: 4.13.1 @@ -595,6 +619,9 @@ importers: '@openfn/engine-multi': specifier: workspace:* version: link:../engine-multi + '@openfn/lexicon': + specifier: workspace:^ + version: link:../lexicon '@openfn/logger': specifier: workspace:* version: link:../logger @@ -1329,6 +1356,11 @@ packages: heap: 0.2.7 dev: false + /@fastify/busboy@2.1.0: + resolution: {integrity: sha512-+KpH+QxZU7O4675t3mnkQKcZZg56u+K/Ct2K+N2AZYNVK8kyeo/bI18tI8aPm3tvNNRyTWfj6s5tnGNlcbQRsA==} + engines: {node: '>=14'} + dev: false + /@inquirer/checkbox@1.3.5: resolution: {integrity: sha512-ZznkPU+8XgNICKkqaoYENa0vTw9jeToEHYyG5gUKpGmY+4PqPTsvLpSisOt9sukLkYzPRkpSCHREgJLqbCG3Fw==} engines: {node: '>=14.18.0'} @@ -1585,6 +1617,22 @@ packages: semver: 7.5.4 dev: true + /@openfn/language-common@1.12.0: + resolution: {integrity: sha512-JQjJpRNdwG5LMmAIO7P7HLgtHYS0UssoibAhMJOpoHk5/kFLDpH3tywpp40Pai33NMzgofxb5gb0MZTgoEk3fw==} + dependencies: + ajv: 8.12.0 + axios: 1.1.3 + csv-parse: 5.5.3 + csvtojson: 2.0.10 + date-fns: 2.30.0 + http-status-codes: 2.3.0 + jsonpath-plus: 4.0.0 + lodash: 4.17.21 + undici: 5.28.3 + transitivePeerDependencies: + - debug + dev: false + /@openfn/language-common@1.7.5: resolution: {integrity: sha512-QivV3v5Oq5fb4QMopzyqUUh+UGHaFXBdsGr6RCmu6bFnGXdJdcQ7GpGpW5hKNq29CkmE23L/qAna1OLr4rP/0w==} dependencies: @@ -2034,6 +2082,15 @@ packages: clean-stack: 4.2.0 indent-string: 5.0.0 + /ajv@8.12.0: + resolution: {integrity: sha512-sRu1kpcO9yLtYxBKvqfTeh9KzZEwO3STyX1HT+4CaDzC6HpTGYhIhPIzj9XuKU7KYDwnaeh5hcOwjy1QuJzBPA==} + dependencies: + fast-deep-equal: 3.1.3 + json-schema-traverse: 1.0.0 + require-from-string: 2.0.2 + uri-js: 4.4.1 + dev: false + /ansi-colors@4.1.3: resolution: {integrity: sha512-/6w/C21Pm1A7aZitlI5Ni/2J6FFQN8i1Cvz3kHABAAbw93v/NlvKdVOqz7CCWz/3iv/JplRSEEZ83XION15ovw==} engines: {node: '>=6'} @@ -2194,7 +2251,6 @@ packages: /asynckit@0.4.0: resolution: {integrity: sha512-Oei9OH4tRh0YqU3GxhX79dM/mwVgvbZJaSNaRk+bshkj0S5cfHcgYakreBjrHwatXKbz+IoIdYLxrKim2MjW0Q==} - dev: true /atob@2.1.2: resolution: {integrity: sha512-Wm6ukoaOGJi/73p/cl2GvLjTI5JM1k/O14isD73YML8StrH/7/lRFgmg8nICZgD3bZZvjwCGxtMOD3wWNAu8cg==} @@ -2342,7 +2398,6 @@ packages: proxy-from-env: 1.1.0 transitivePeerDependencies: - debug - dev: true /b4a@1.6.1: resolution: {integrity: sha512-AsKjNhz72yxteo/0EtQEiwkMUgk/tGmycXlbG4g3Ard2/ULtNLUykGOkeK0egmN27h0xMAhb76jYccW+XTBExA==} @@ -2423,6 +2478,10 @@ packages: readable-stream: 4.2.0 dev: true + /bluebird@3.7.2: + resolution: {integrity: sha512-XpNj6GDQzdfW+r2Wnn7xiSAd7TM3jzkxGXBGTtWKuSXv1xUV+azxAm8jdWZN06QTQk+2N2XB9jRDkvbmQmcRtg==} + dev: false + /blueimp-md5@2.19.0: resolution: {integrity: sha512-DRQrD6gJyy8FbiE4s+bDoXS9hiW3Vbx5uCdwvcCf3zLHL+Iv7LtGHLpr+GZV8rHG8tK766FGYBwRbu8pELTt+w==} @@ -2815,7 +2874,6 @@ packages: engines: {node: '>= 0.8'} dependencies: delayed-stream: 1.0.0 - dev: true /commander@4.1.1: resolution: {integrity: sha512-NOKm8xhkzAjzFx8B2v5OAHT+u5pRQc2UCa2Vq9jYL/31o2wi9mxBA7LIFs3sV5VSC49z6pEhfbMULvShKj26WA==} @@ -2958,6 +3016,10 @@ packages: resolution: {integrity: sha512-cO1I/zmz4w2dcKHVvpCr7JVRu8/FymG5OEpmvsZYlccYolPBLoVGKUHgNoc4ZGkFeFlWGEDmMyBM+TTqRdW/wg==} dev: true + /csv-parse@5.5.3: + resolution: {integrity: sha512-v0KW6C0qlZzoGjk6u5tLmVfyZxNgPGXZsWTXshpAgKVGmGXzaVWGdlCFxNx5iuzcXT/oJN1HHM9DZKwtAtYa+A==} + dev: false + /csv-stringify@5.6.5: resolution: {integrity: sha512-PjiQ659aQ+fUTQqSrd1XEDnOr52jh30RBurfzkscaE2tPaFsDH5wOAHJiw8XAHphRknCwMUE9KRayc4K/NbO8A==} dev: true @@ -2972,6 +3034,16 @@ packages: stream-transform: 2.1.3 dev: true + /csvtojson@2.0.10: + resolution: {integrity: sha512-lUWFxGKyhraKCW8Qghz6Z0f2l/PqB1W3AO0HKJzGIQ5JRSlR651ekJDiGJbBT4sRNNv5ddnSGVEnsxP9XRCVpQ==} + engines: {node: '>=4.0.0'} + hasBin: true + dependencies: + bluebird: 3.7.2 + lodash: 4.17.21 + strip-bom: 2.0.0 + dev: false + /currently-unhandled@0.4.1: resolution: {integrity: sha512-/fITjgjGU50vjQ4FH6eUoYu+iUoUKIXws2hL15JJpIR+BbTxaXQsMuuyjtNh2WqsSBS5nsaZHFsFecyw5CCAng==} engines: {node: '>=0.10.0'} @@ -3112,7 +3184,6 @@ packages: /delayed-stream@1.0.0: resolution: {integrity: sha512-ZySD7Nf91aLB0RxL4KGrKHBXl7Eds1DAmEdcoVawXnLD7SDhpNgtuII2aAkg7a7QS41jxPSZ17p4VdGnMHk3MQ==} engines: {node: '>=0.4.0'} - dev: true /delegates@1.0.0: resolution: {integrity: sha512-bd2L678uiWATM6m5Z1VzNCErI3jiGzt6HGY8OVICs40JQq/HALfbyNJmp0UDakEY4pMMaN0Ly5om/B1VI/+xfQ==} @@ -3938,6 +4009,10 @@ packages: - supports-color dev: true + /fast-deep-equal@3.1.3: + resolution: {integrity: sha512-f3qQ9oQy9j2AhBe/H9VC91wLmKBCCU/gDOnKNAYG5hswO7BLKj09Hc5HYNz9cGI++xlpDCIgDaitVs03ATR84Q==} + dev: false + /fast-diff@1.3.0: resolution: {integrity: sha512-VxPP4NqbUjj6MaAOafWeUn2cXWLcCtljklUtZf0Ind4XQ+QPtmA0b18zZy0jIQx+ExRVCR/ZQpBmik5lXshNsw==} @@ -4084,7 +4159,6 @@ packages: peerDependenciesMeta: debug: optional: true - dev: true /for-in@1.0.2: resolution: {integrity: sha512-7EwmXrOjyL+ChxMhmG5lnW9MPt1aIeZEwKhQzoBUdTV0N3zuwWDZYVJatDvZ2OyzPUvdIAZDsCetk3coyMfcnQ==} @@ -4115,7 +4189,6 @@ packages: asynckit: 0.4.0 combined-stream: 1.0.8 mime-types: 2.1.35 - dev: true /fragment-cache@0.2.1: resolution: {integrity: sha512-GMBAbW9antB8iZRHLoGw0b3HANt57diZYFO/HL1JGIC1MjKrdmhxvrJbupnVvpys0zsz7yBApXdQyfepKly2kA==} @@ -4491,6 +4564,10 @@ packages: - supports-color dev: true + /http-status-codes@2.3.0: + resolution: {integrity: sha512-RJ8XvFvpPM/Dmc5SV+dC4y5PCeOhT3x1Hq0NU3rjGeg5a/CqlhZ7uudknPwZFz4aeAXDcbAyaeP7GAo9lvngtA==} + dev: false + /https-proxy-agent@5.0.1: resolution: {integrity: sha512-dFcAjpTQFgoLMzC2VwU+C/CbS7uRL0lWmxDITmqm7C+7F0Odmj6s9l6alZc6AELXhrnggM2CeWSXHGOdX2YtwA==} engines: {node: '>= 6'} @@ -4891,6 +4968,10 @@ packages: resolution: {integrity: sha512-43r2mRvz+8JRIKnWJ+3j8JtjRKZ6GmjzfaE/qiBJnikNnYv/6bagRJ1kUhNk8R5EX/GkobD+r+sfxCPJsiKBLQ==} engines: {node: '>=12'} + /is-utf8@0.2.1: + resolution: {integrity: sha512-rMYPYvCzsXywIsldgLaSoPlw5PfoB/ssr7hY4pLfcodrA5M/eArza1a9VmTiNIBNMjOGr1Ow9mTyU2o69U6U9Q==} + dev: false + /is-weakref@1.0.2: resolution: {integrity: sha512-qctsuLZmIQ0+vSSMfoVvyFe2+GSEvnmZ2ezTup1SBse9+twCCeial6EEi3Nc2KFcf6+qz2FBPnjXsk8xhKSaPQ==} dependencies: @@ -4984,6 +5065,10 @@ packages: resolution: {integrity: sha512-xyFwyhro/JEof6Ghe2iz2NcXoj2sloNsWr/XsERDK/oiPCfaNhl5ONfp+jQdAZRQQ0IJWNzH9zIZF7li91kh2w==} dev: true + /json-schema-traverse@1.0.0: + resolution: {integrity: sha512-NM8/P9n3XjXhIZn1lLhkFaACTOURQXjWhV4BA/RnOv8xvgqtqpAX9IO4mRQxSx1Rlo4tqzeqb0sOlruaOy3dug==} + dev: false + /jsonfile@4.0.0: resolution: {integrity: sha512-m6F1R3z8jjlf2imQHS2Qez5sjKWQzbuuhuJ/FKYFRZvPE3PuHcSMVZzfsLhGVOkfd20obL5SWEBew5ShlquNxg==} optionalDependencies: @@ -4998,7 +5083,6 @@ packages: /jsonpath-plus@4.0.0: resolution: {integrity: sha512-e0Jtg4KAzDJKKwzbLaUtinCn0RZseWBVRTRGihSpvFlM3wTR7ExSp+PTdeTsDrLNJUe7L7JYJe8mblHX5SCT6A==} engines: {node: '>=10.0'} - dev: true /jsonpath@1.1.1: resolution: {integrity: sha512-l6Cg7jRpixfbgoWgkrl77dgEj8RPvND0wMH6TwQmi9Qs4TFfS9u5cUFnbeKTwj5ga5Y3BTGGNI28k117LJ009w==} @@ -6305,7 +6389,6 @@ packages: /proxy-from-env@1.1.0: resolution: {integrity: sha512-D+zkORCbA9f1tdWRK0RaCR3GPv50cMxcrz4X8k5LTSUD1Dkw47mKJEZQNunItRTkWwgtaUSo1RVFRIG9ZXiFYg==} - dev: true /proxy-middleware@0.15.0: resolution: {integrity: sha512-EGCG8SeoIRVMhsqHQUdDigB2i7qU7fCsWASwn54+nPutYO8n4q6EiwMzyfWlC+dzRFExP+kvcnDFdBDHoZBU7Q==} @@ -6338,7 +6421,6 @@ packages: /punycode@2.3.0: resolution: {integrity: sha512-rRV+zQD8tVFys26lAGR9WUuS4iUAngJScM+ZRSKtvl5tKeZ2t5bvdNFdNHBW9FWR4guGHlgmsZ1G7BSm2wTbuA==} engines: {node: '>=6'} - dev: true /qs@6.11.2: resolution: {integrity: sha512-tDNIz22aBzCDxLtVH++VnTfzxlfeK5CbqohpSqpJgj1Wg/cQbStNAz3NuqCs5vV+pjBsK4x4pN9HlVh7rcYRiA==} @@ -6544,6 +6626,11 @@ packages: resolution: {integrity: sha512-fGxEI7+wsG9xrvdjsrlmL22OMTTiHRwAMroiEeMgq8gzoLC/PQr7RsRDSTLUg/bZAZtF+TVIkHc6/4RIKrui+Q==} engines: {node: '>=0.10.0'} + /require-from-string@2.0.2: + resolution: {integrity: sha512-Xf0nWe6RseziFMu+Ap9biiUbmplq6S9/p+7w7YXP/JBHhrUDDUhwa+vANyubuqfZWTveU//DYVGsDG7RKL/vEw==} + engines: {node: '>=0.10.0'} + dev: false + /require-main-filename@2.0.0: resolution: {integrity: sha512-NKN5kMDylKuldxYLSUfrbo5Tuzh4hd+2E8NPPX02mZtn1VuREQToYe/ZdlJy+J3uCpfaiGF05e7B8W0iXbQHmg==} dev: true @@ -7067,6 +7154,13 @@ packages: dependencies: ansi-regex: 6.0.1 + /strip-bom@2.0.0: + resolution: {integrity: sha512-kwrX1y7czp1E69n2ajbG65mIo9dqvJ+8aBQXOGVxqwvNbsXdFM6Lq37dLAY3mknUwru8CfcCbfOLL/gMo+fi3g==} + engines: {node: '>=0.10.0'} + dependencies: + is-utf8: 0.2.1 + dev: false + /strip-bom@3.0.0: resolution: {integrity: sha512-vavAMRXOgBVNF6nyEEmL3DBK19iRpDcoIwW+swQ+CbGiu7lju6t+JklA1MHweoWtadgt4ISVUsXLyDq34ddcwA==} engines: {node: '>=4'} @@ -7620,6 +7714,13 @@ packages: resolution: {integrity: sha512-hEQt0+ZLDVUMhebKxL4x1BTtDY7bavVofhZ9KZ4aI26X9SRaE+Y3m83XUL1UP2jn8ynjndwCCpEHdUG+9pP1Tw==} dev: true + /undici@5.28.3: + resolution: {integrity: sha512-3ItfzbrhDlINjaP0duwnNsKpDQk3acHI3gVJ1z4fmwMK31k5G9OVIAMLSIaP6w4FaGkaAkN6zaQO9LUvZ1t7VA==} + engines: {node: '>=14.0'} + dependencies: + '@fastify/busboy': 2.1.0 + dev: false + /union-value@1.0.1: resolution: {integrity: sha512-tJfXmxMeWYnczCVs7XAEvIV7ieppALdyepWMkHkwciRpZraG/xwT+s2JN8+pr1+8jCRf80FFzvr+MpQeeoF4Xg==} engines: {node: '>=0.10.0'} @@ -7670,6 +7771,12 @@ packages: engines: {node: '>=4'} dev: true + /uri-js@4.4.1: + resolution: {integrity: sha512-7rKUyy33Q1yc98pQ1DAmLtwX109F7TIfWlW1Ydo8Wl1ii1SeHieeh0HHfPeL2fMXK6z0s8ecKs9frCuLJvndBg==} + dependencies: + punycode: 2.3.0 + dev: false + /urix@0.1.0: resolution: {integrity: sha512-Am1ousAhSLBeB9cG/7k7r2R0zj50uDRlZHPGbazid5s9rlF1F/QKYObEKSIunSjIOkJZqwRRLpvewjEkM7pSqg==} deprecated: Please see https://github.com/lydell/urix#deprecated From 7a1049de806c57a6a736e442e4b25365ebd52ac2 Mon Sep 17 00:00:00 2001 From: Joe Clark Date: Fri, 9 Feb 2024 14:52:32 +0000 Subject: [PATCH 092/128] cli: dont print compiler,runtime versions, also show monorepo for adaptor --- packages/cli/src/util/print-versions.ts | 22 +++---- packages/cli/test/util/print-versions.test.ts | 58 ++++++++++++++----- 2 files changed, 51 insertions(+), 29 deletions(-) diff --git a/packages/cli/src/util/print-versions.ts b/packages/cli/src/util/print-versions.ts index 38e035ba1..a5b14cfc1 100644 --- a/packages/cli/src/util/print-versions.ts +++ b/packages/cli/src/util/print-versions.ts @@ -8,8 +8,6 @@ import { Opts } from '../options'; const NODE = 'node.js'; const CLI = 'cli'; -const RUNTIME = 'runtime'; -const COMPILER = 'compiler'; const { triangleRightSmall: t } = mainSymbols; @@ -26,7 +24,7 @@ const loadVersionFromPath = (adaptorPath: string) => { const printVersions = async ( logger: Logger, - options: Partial> = {} + options: Partial> = {} ) => { const { adaptors, logJson } = options; let adaptor = ''; @@ -41,6 +39,9 @@ const printVersions = async ( const [namePart, pathPart] = adaptor.split('='); adaptorVersion = loadVersionFromPath(pathPart); adaptorName = getNameAndVersion(namePart).name; + } else if (options.monorepoPath) { + adaptorName = getNameAndVersion(adaptor).name; + adaptorVersion = 'monorepo'; } else { const { name, version } = getNameAndVersion(adaptor); adaptorName = name; @@ -49,9 +50,7 @@ const printVersions = async ( } // Work out the longest label - const longest = Math.max( - ...[NODE, CLI, RUNTIME, COMPILER, adaptorName].map((s) => s.length) - ); + const longest = Math.max(...[NODE, CLI, adaptorName].map((s) => s.length)); // Prefix and pad version numbers const prefix = (str: string) => @@ -62,10 +61,7 @@ const printVersions = async ( // built into process/runner.js const pkg = JSON.parse(readFileSync(`${dirname}/../../package.json`, 'utf8')); - const { version, dependencies } = pkg; - - const compilerVersion = dependencies['@openfn/compiler']; - const runtimeVersion = dependencies['@openfn/runtime']; + const { version } = pkg; let output: any; if (logJson) { @@ -73,8 +69,6 @@ const printVersions = async ( versions: { 'node.js': process.version.substring(1), cli: version, - runtime: runtimeVersion, - compiler: compilerVersion, }, }; if (adaptorName) { @@ -87,9 +81,7 @@ const printVersions = async ( output = `Versions: ${prefix(NODE)}${process.version.substring(1)} -${prefix(CLI)}${version} -${prefix(RUNTIME)}${runtimeVersion} -${prefix(COMPILER)}${compilerVersion}${adaptorVersionString}`; +${prefix(CLI)}${version}${adaptorVersionString}`; } logger.always(output); }; diff --git a/packages/cli/test/util/print-versions.test.ts b/packages/cli/test/util/print-versions.test.ts index 9594eb317..c1408baab 100644 --- a/packages/cli/test/util/print-versions.test.ts +++ b/packages/cli/test/util/print-versions.test.ts @@ -6,7 +6,7 @@ import printVersions from '../../src/util/print-versions'; const root = path.resolve('package.json'); -test('print versions for node, cli, runtime and compiler', async (t) => { +test('print versions for node and cli', async (t) => { const logger = createMockLogger('', { level: 'info' }); await printVersions(logger); @@ -17,12 +17,11 @@ test('print versions for node, cli, runtime and compiler', async (t) => { // very crude testing but it's ok to test the intent here t.regex(message, /Versions:/); t.regex(message, /cli/); - t.regex(message, /runtime/); - t.regex(message, /compiler/); + t.regex(message, /node/); t.notRegex(message, /adaptor/); }); -test('print versions for node, cli, runtime, compiler and adaptor', async (t) => { +test('print versions for node, cli and adaptor', async (t) => { const logger = createMockLogger('', { level: 'info' }); await printVersions(logger, { adaptors: ['http'] }); @@ -31,27 +30,24 @@ test('print versions for node, cli, runtime, compiler and adaptor', async (t) => t.regex(message, /Versions:/); t.regex(message, /cli/); - t.regex(message, /runtime/); - t.regex(message, /compiler/); + t.regex(message, /node/); t.regex(message, /http .+ latest/); }); -test('print versions for node, cli, runtime, compiler and adaptor with version', async (t) => { +test('print versions for node, cli and adaptor with version', async (t) => { const logger = createMockLogger('', { level: 'info' }); await printVersions(logger, { adaptors: ['http@1234'] }); const last = logger._parse(logger._last); const message = last.message as string; - // very crude testing but it's ok to test the intent here t.regex(message, /Versions:/); t.regex(message, /cli/); - t.regex(message, /runtime/); - t.regex(message, /compiler/); + t.regex(message, /node/); t.regex(message, /http .+ 1234/); }); -test('print versions for node, cli, runtime, compiler and long-form adaptor', async (t) => { +test('print versions for node, cli and long-form adaptor', async (t) => { const logger = createMockLogger('', { level: 'info' }); await printVersions(logger, { adaptors: ['@openfn/language-http'] }); @@ -61,7 +57,7 @@ test('print versions for node, cli, runtime, compiler and long-form adaptor', as t.regex(message, /@openfn\/language-http .+ latest/); }); -test('print versions for node, cli, runtime, compiler and long-form adaptor with version', async (t) => { +test('print versions for node, cli and long-form adaptor with version', async (t) => { const logger = createMockLogger('', { level: 'info' }); await printVersions(logger, { adaptors: ['@openfn/language-http@1234'] }); @@ -71,6 +67,24 @@ test('print versions for node, cli, runtime, compiler and long-form adaptor with t.regex(message, /@openfn\/language-http .+ 1234/); }); +test('print version of adaptor with monorepo', async (t) => { + mock({ + '/repo/http/package.json': '{ "version": "1.0.0" }', + [root]: mock.load(root, {}), + }); + + const logger = createMockLogger('', { level: 'info' }); + await printVersions(logger, { + adaptors: ['@openfn/language-http@1.0.0'], + monorepoPath: '.', + }); + + const last = logger._parse(logger._last); + const message = last.message as string; + + t.regex(message, /@openfn\/language-http(.+)monorepo/); +}); + test('print version of adaptor with path', async (t) => { mock({ '/repo/http/package.json': '{ "version": "1.0.0" }', @@ -88,6 +102,24 @@ test('print version of adaptor with path', async (t) => { t.regex(message, /@openfn\/language-http(.+)1\.0\.0/); }); +test('print version of adaptor with path even if monorepo is set', async (t) => { + mock({ + '/repo/http/package.json': '{ "version": "1.0.0" }', + [root]: mock.load(root, {}), + }); + + const logger = createMockLogger('', { level: 'info' }); + await printVersions(logger, { + adaptors: ['@openfn/language-http=/repo/http'], + monorepoPath: '.', + }); + + const last = logger._parse(logger._last); + const message = last.message as string; + + t.regex(message, /@openfn\/language-http(.+)1\.0\.0/); +}); + test('print version of adaptor with path and @', async (t) => { mock({ '/repo/node_modules/@openfn/http/package.json': '{ "version": "1.0.0" }', @@ -115,7 +147,5 @@ test('json output', async (t) => { const [{ versions }] = last.message; t.truthy(versions['node.js']); t.truthy(versions['cli']); - t.truthy(versions['runtime']); - t.truthy(versions['compiler']); t.truthy(versions['http']); }); From 5143d933fa6f3ba9175a64ebf1f5a1725ad07e90 Mon Sep 17 00:00:00 2001 From: Joe Clark Date: Fri, 9 Feb 2024 15:01:11 +0000 Subject: [PATCH 093/128] cli tweak output to optionally show components --- packages/cli/src/commands.ts | 3 +- packages/cli/src/util/print-versions.ts | 33 ++++++++++++++----- packages/cli/test/util/print-versions.test.ts | 17 ++++++++++ 3 files changed, 44 insertions(+), 9 deletions(-) diff --git a/packages/cli/src/commands.ts b/packages/cli/src/commands.ts index 441213656..ecb7ed6f6 100644 --- a/packages/cli/src/commands.ts +++ b/packages/cli/src/commands.ts @@ -43,7 +43,8 @@ const handlers = { ['repo-install']: install, ['repo-pwd']: pwd, ['repo-list']: list, - version: async (opts: Opts, logger: Logger) => printVersions(logger, opts), + version: async (opts: Opts, logger: Logger) => + printVersions(logger, opts, true), }; // Top level command parser diff --git a/packages/cli/src/util/print-versions.ts b/packages/cli/src/util/print-versions.ts index a5b14cfc1..7532a7ff8 100644 --- a/packages/cli/src/util/print-versions.ts +++ b/packages/cli/src/util/print-versions.ts @@ -8,6 +8,8 @@ import { Opts } from '../options'; const NODE = 'node.js'; const CLI = 'cli'; +const RUNTIME = 'runtime'; +const COMPILER = 'compiler'; const { triangleRightSmall: t } = mainSymbols; @@ -24,7 +26,8 @@ const loadVersionFromPath = (adaptorPath: string) => { const printVersions = async ( logger: Logger, - options: Partial> = {} + options: Partial> = {}, + includeComponents = false ) => { const { adaptors, logJson } = options; let adaptor = ''; @@ -50,7 +53,9 @@ const printVersions = async ( } // Work out the longest label - const longest = Math.max(...[NODE, CLI, adaptorName].map((s) => s.length)); + const longest = Math.max( + ...[NODE, CLI, RUNTIME, COMPILER, adaptorName].map((s) => s.length) + ); // Prefix and pad version numbers const prefix = (str: string) => @@ -61,7 +66,10 @@ const printVersions = async ( // built into process/runner.js const pkg = JSON.parse(readFileSync(`${dirname}/../../package.json`, 'utf8')); - const { version } = pkg; + const { version, dependencies } = pkg; + + const compilerVersion = dependencies['@openfn/compiler']; + const runtimeVersion = dependencies['@openfn/runtime']; let output: any; if (logJson) { @@ -71,17 +79,26 @@ const printVersions = async ( cli: version, }, }; + if (includeComponents) { + output.versions.runtime = runtimeVersion; + output.versions.compiler = compilerVersion; + } if (adaptorName) { output.versions[adaptorName] = adaptorVersion; } } else { - const adaptorVersionString = adaptorName - ? `\n${prefix(adaptorName)}${adaptorVersion}` - : ''; - output = `Versions: ${prefix(NODE)}${process.version.substring(1)} -${prefix(CLI)}${version}${adaptorVersionString}`; +${prefix(CLI)}${version}`; + + if (includeComponents) { + output += `\n${prefix(RUNTIME)}${runtimeVersion} +${prefix(COMPILER)}${compilerVersion}`; + } + + if (adaptorName) { + output += `\n${prefix(adaptorName)}${adaptorVersion}`; + } } logger.always(output); }; diff --git a/packages/cli/test/util/print-versions.test.ts b/packages/cli/test/util/print-versions.test.ts index c1408baab..05c387653 100644 --- a/packages/cli/test/util/print-versions.test.ts +++ b/packages/cli/test/util/print-versions.test.ts @@ -19,6 +19,8 @@ test('print versions for node and cli', async (t) => { t.regex(message, /cli/); t.regex(message, /node/); t.notRegex(message, /adaptor/); + t.notRegex(message, /compiler/); + t.notRegex(message, /runtime/); }); test('print versions for node, cli and adaptor', async (t) => { @@ -34,6 +36,21 @@ test('print versions for node, cli and adaptor', async (t) => { t.regex(message, /http .+ latest/); }); +test('print versions for node, cli, components and adaptor', async (t) => { + const logger = createMockLogger('', { level: 'info' }); + await printVersions(logger, { adaptors: ['http'] }, true); + + const last = logger._parse(logger._last); + const message = last.message as string; + + t.regex(message, /Versions:/); + t.regex(message, /cli/); + t.regex(message, /node/); + t.regex(message, /runtime/); + t.regex(message, /node/); + t.regex(message, /http .+ latest/); +}); + test('print versions for node, cli and adaptor with version', async (t) => { const logger = createMockLogger('', { level: 'info' }); await printVersions(logger, { adaptors: ['http@1234'] }); From 78cfc5d6249833628e1b53aad1f799d1e40ad1d3 Mon Sep 17 00:00:00 2001 From: Joe Clark Date: Fri, 9 Feb 2024 15:09:06 +0000 Subject: [PATCH 094/128] worker: simplify version output --- packages/ws-worker/src/util/versions.ts | 12 +++--------- packages/ws-worker/test/util/versions.test.ts | 19 ++++++------------- 2 files changed, 9 insertions(+), 22 deletions(-) diff --git a/packages/ws-worker/src/util/versions.ts b/packages/ws-worker/src/util/versions.ts index 35e7d1dbf..8c3ba4383 100644 --- a/packages/ws-worker/src/util/versions.ts +++ b/packages/ws-worker/src/util/versions.ts @@ -5,29 +5,23 @@ const { triangleRightSmall: t } = mainSymbols; export type Versions = { node: string; worker: string; - engine: string; [adaptor: string]: string; }; export default (stepId: string, versions: Versions, adaptor?: string) => { - let longest = 'compiler'.length; // Bit wierdly defensive but ensure padding is reasonable even if version has no props + let longest = 'worker'.length; // Bit wierdly defensive but ensure padding is reasonable even if version has no props for (const v in versions) { longest = Math.max(v.length, longest); } - const { node, compiler, engine, worker, runtime, ...adaptors } = versions; + const { node, worker, ...adaptors } = versions; // Prefix and pad version numbers const prefix = (str: string) => ` ${t} ${str.padEnd(longest + 4, ' ')}`; let str = `Versions for step ${stepId}: ${prefix('node.js')}${versions.node || 'unknown'} -${prefix('worker')}${versions.worker || 'unknown'} -${prefix('engine')}${versions.engine || 'unknown'}`; - - // Unfortunately the runtime and compiler versions get reported as workspace:* in prod right now - // ${prefix('runtime')}${versions.runtime || 'unknown'} - // ${prefix('compiler')}${versions.compiler || 'unknown'}`; +${prefix('worker')}${versions.worker || 'unknown'}`; if (Object.keys(adaptors).length) { let allAdaptors = Object.keys(adaptors); diff --git a/packages/ws-worker/test/util/versions.test.ts b/packages/ws-worker/test/util/versions.test.ts index fe4c41de6..6aadf00a3 100644 --- a/packages/ws-worker/test/util/versions.test.ts +++ b/packages/ws-worker/test/util/versions.test.ts @@ -5,10 +5,7 @@ import calculateVersionString from '../../src/util/versions'; // keys in this obejct are scrambled on purpose const versions = { worker: '2', - // compiler: '5', node: '1', - engine: '3', - // runtime: '4', }; // Util function to parse a version string into something easier to test @@ -33,9 +30,8 @@ test('calculate version string', (t) => { t.is( str, `Versions for step step-1: - ▸ node.js 1 - ▸ worker 2 - ▸ engine 3` + ▸ node.js 1 + ▸ worker 2` ); }); @@ -46,7 +42,6 @@ test('helper should parse a version string and return the correct order', (t) => t.deepEqual(parsed, [ ['node.js', '1'], ['worker', '2'], - ['engine', '3'], ]); }); @@ -58,7 +53,6 @@ test("show unknown if a version isn't passed", (t) => { t.deepEqual(parsed, [ ['node.js', 'unknown'], ['worker', 'unknown'], - ['engine', 'unknown'], ]); }); @@ -67,9 +61,8 @@ test('show adaptors last', (t) => { '@openfn/language-common': '1.0.0', ...versions, }); - const parsed = parse(str); - const common = parsed[3]; + const common = parsed[2]; t.deepEqual(common, ['@openfn/language-common', '1.0.0']); }); @@ -83,9 +76,9 @@ test('sort and list multiple adaptors', (t) => { const parsed = parse(str); - const a = parsed[3]; - const j = parsed[4]; - const z = parsed[5]; + const a = parsed[2]; + const j = parsed[3]; + const z = parsed[4]; t.deepEqual(a, ['a', '1']); t.deepEqual(j, ['j', '2']); From a0823f2af8f03bc0fd8758567a145f46d312554a Mon Sep 17 00:00:00 2001 From: Joe Clark Date: Fri, 9 Feb 2024 15:16:49 +0000 Subject: [PATCH 095/128] mock: resolve conflict --- packages/lexicon/lightning.d.ts | 3 --- 1 file changed, 3 deletions(-) diff --git a/packages/lexicon/lightning.d.ts b/packages/lexicon/lightning.d.ts index 05f1b94f1..e54902d95 100644 --- a/packages/lexicon/lightning.d.ts +++ b/packages/lexicon/lightning.d.ts @@ -86,11 +86,8 @@ export type DataClip = Record; export type Credential = Record; -<<<<<<< HEAD // TODO export reason strings from this repo // and explain what each reason means -======= ->>>>>>> release/1-0 export type ExitReasonStrings = | 'success' | 'fail' From 101f5a1d476b95cd0ccc1d2843a8602e12160355 Mon Sep 17 00:00:00 2001 From: josephjclark Date: Fri, 9 Feb 2024 15:37:41 +0000 Subject: [PATCH 096/128] Autoinstall by default (#594) * lexicon: start building a central lexicon of definitions * runtime: huge refactor of runtime core API * runtime: more refactoring * runtime: take initial state out of the execution plan * fix tests * runtime: changeset * runtime: extra type tweakings * runtime: readme * runtime: jobs -> steps (mostly) there are cases where job is more accurate and useful * cli: start refactoring towrads new runtime API Done a big chunk of execute but still a way to go * cli: basically get the CLI working again * cli: types * cli: fix a bunch of tests, update workflow parsing * cli: fix execute and compile tests * cli: more test fixes * fix more cli tests * cli: fix integration tests * cli: tidy * runtime: remove strict mode * remove strict mode * cli: default workflow name to the file name * runtime: tweak log output * cli: remove log * cli: types * docs * deploy: adjust logging * engine: update types * engine: update names and types This is 90% of the basic rename done. Tests may even pass * runtime: male statePropsToRemove a system options, rather than workflow specific If a workflow wants to remove props, it'll add an fn bock * engine: restore statePropsToRemove tests * mock: update to lexicon * worker: start mapping to lexicon. Handled run-> plan conversion * worker: typings * worker: fix all tests * engine: types * worker: fix cheeky test somehow missed it last time * tests: fix cli tests * worker: update test * package lock * tests: update test * changesets and housekeeping * more housekeeping * engine: tweak test * runtime: tweak error messages * worker: stricter type checkign on tests * fix test * typing in worker tests * worker: update channel mock * lexicon: docs * Run -> LightningPlan * version bumps for logger and mock * cli: autoinstall by default * cli: docs * changeset * cli: fix tests Need to disable autoinstall now or some tests will blow up! --- .changeset/sixty-snails-lie.md | 5 +++++ packages/cli/README.md | 4 ++-- packages/cli/src/options.ts | 4 ++-- packages/cli/test/commands.test.ts | 12 ++++++++---- packages/cli/test/execute/options.test.ts | 2 +- 5 files changed, 18 insertions(+), 9 deletions(-) create mode 100644 .changeset/sixty-snails-lie.md diff --git a/.changeset/sixty-snails-lie.md b/.changeset/sixty-snails-lie.md new file mode 100644 index 000000000..59a13a900 --- /dev/null +++ b/.changeset/sixty-snails-lie.md @@ -0,0 +1,5 @@ +--- +'@openfn/cli': major +--- + +Autoinstall adaptors by default (pass `--no-autoinstall` to disable) diff --git a/packages/cli/README.md b/packages/cli/README.md index 542860ae9..cd4435058 100644 --- a/packages/cli/README.md +++ b/packages/cli/README.md @@ -94,12 +94,12 @@ You're probably here to run Workflows (or individual jobs), which the CLI makes ``` openfn path/to/workflow.json -openfn path/to/job.js -ia adaptor-name +openfn path/to/job.js -a adaptor-name ``` If running a single job, you MUST specify which adaptor to use. -Pass the `-i` flag to auto-install any required adaptors (it's safe to do this redundantly, although the run will be a little slower). +If the requested adaptor (or a matching version) is not already installed, it will be installed automatically. To disable this behaviour, pass the `--no-autoinstall` flag. When finished, the CLI will write the resulting state to disk. By default the CLI will create an `output.json` next to the job file. You can pass a path to output by passing `-o path/to/output.json` and state by adding `-s path/to/state.json`. You can use `-S` and `-O` to pass state through stdin and return the output through stdout. diff --git a/packages/cli/src/options.ts b/packages/cli/src/options.ts index 152aecb20..95b42da93 100644 --- a/packages/cli/src/options.ts +++ b/packages/cli/src/options.ts @@ -114,8 +114,8 @@ export const autoinstall: CLIOption = { yargs: { alias: ['i'], boolean: true, - description: 'Auto-install the language adaptor', - default: false, + description: 'Auto-install the language adaptor(s)', + default: true, }, }; diff --git a/packages/cli/test/commands.test.ts b/packages/cli/test/commands.test.ts index 3d33427b4..a0d339945 100644 --- a/packages/cli/test/commands.test.ts +++ b/packages/cli/test/commands.test.ts @@ -445,7 +445,7 @@ test.serial( const state = JSON.stringify({ data: { count: 11 } }); const job = 'export default [byTwo]'; const result = await run( - `openfn --no-expand-adaptors -S ${state} -a times-two`, + `openfn --no-expand-adaptors -S ${state} -a times-two --no-autoinstall`, job, { repoDir: '/repo', @@ -512,9 +512,13 @@ test.serial( async (t) => { const job = 'fn((state) => { /* function isn\t actually called by the mock adaptor */ throw new Error("fake adaptor") });'; - const result = await run('openfn -a @openfn/language-postgres', job, { - repoDir: '/repo', - }); + const result = await run( + 'openfn -a @openfn/language-postgres --no-autoinstall', + job, + { + repoDir: '/repo', + } + ); t.assert(result === 'execute called!'); } ); diff --git a/packages/cli/test/execute/options.test.ts b/packages/cli/test/execute/options.test.ts index 750303854..4b9d0e7cd 100644 --- a/packages/cli/test/execute/options.test.ts +++ b/packages/cli/test/execute/options.test.ts @@ -11,7 +11,7 @@ test('correct default options', (t) => { const options = parse('execute job.js'); t.deepEqual(options.adaptors, []); - t.is(options.autoinstall, false); + t.is(options.autoinstall, true); t.is(options.command, 'execute'); t.is(options.compile, true); t.is(options.expandAdaptors, true); From 858d07e019bf48890923ff246ad4938e9e2203e8 Mon Sep 17 00:00:00 2001 From: Joe Clark Date: Tue, 13 Feb 2024 09:31:36 +0000 Subject: [PATCH 097/128] openfnx: update console output --- build/install-global.js | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/build/install-global.js b/build/install-global.js index f1d47a448..19c00c702 100644 --- a/build/install-global.js +++ b/build/install-global.js @@ -14,6 +14,8 @@ const outputPath = process.argv[2] || './dist'; // Package everything up like a local build exec('git branch --show-current', {}, async (err, branchName) => { + console.log('Installing openfnx for branch:', branchName); + console.log(); const files = await findPackages(); const pkgs = mapPackages(files); await ensureOutputPath(outputPath); @@ -37,9 +39,13 @@ exec('git branch --show-current', {}, async (err, branchName) => { ).then(async () => { const cliPath = getLocalTarballName(pkgs['@openfn/cli']); const command = `npm install -g ${path.resolve(outputPath, cliPath)}`; - console.log(command); + //console.log(command); await exec(command); // install the local CLI globally + + console.log(); + console.log('openfnx installed successfully! To test:'); + console.log(' openfnx --version'); }); }); From c7f2b3e708a86bd36bcbab01af2b0509feeaae7f Mon Sep 17 00:00:00 2001 From: Joe Clark Date: Tue, 13 Feb 2024 18:33:41 +0000 Subject: [PATCH 098/128] runtime: fix tests --- packages/runtime/test/runtime.test.ts | 45 --------------------------- 1 file changed, 45 deletions(-) diff --git a/packages/runtime/test/runtime.test.ts b/packages/runtime/test/runtime.test.ts index 544cb54b7..e7f8af39e 100644 --- a/packages/runtime/test/runtime.test.ts +++ b/packages/runtime/test/runtime.test.ts @@ -440,51 +440,6 @@ test('log and serialize an error to the job logger', async (t) => { t.falsy(out.message[0].stack); }); -test('log job code to the job logger', async (t) => { - const plan: ExecutionPlan = { - jobs: [ - { - id: 'a', - expression: 'export default [(s) => { console.log("hi"); return s;}]', - }, - ], - }; - - const jobLogger = createMockLogger('JOB', { level: 'debug', json: true }); - await run(plan, {}, { jobLogger }); - - t.is(jobLogger._history.length, 1); - const [out] = jobLogger._history; - - t.is(out.level, 'info'); - t.is(out.message[0], 'hi'); -}); - -test('log and serialize an error to the job logger', async (t) => { - const plan: ExecutionPlan = { - jobs: [ - { - id: 'a', - expression: - 'export default [(s) => { console.log(new Error("hi")); return s;}]', - }, - ], - }; - - const jobLogger = createMockLogger('JOB', { level: 'debug', json: true }); - await run(plan, {}, { jobLogger }); - - t.is(jobLogger._history.length, 1); - const [out] = jobLogger._history; - t.log(out); - - t.is(out.level, 'info'); - t.is(out.message[0].name, 'Error'); - t.is(out.message[0].message, 'hi'); - // should not be an error instance - t.falsy(out.message[0].stack); -}); - test('error reports can be overwritten', async (t) => { const plan: ExecutionPlanNoOptions = { workflow: { From cc8b9db3a7383950bfcf55e18ffcab08525cfa0e Mon Sep 17 00:00:00 2001 From: Joe Clark Date: Tue, 13 Feb 2024 16:00:52 +0000 Subject: [PATCH 099/128] worker: support output_dataclips on run options --- packages/lexicon/lightning.d.ts | 1 + packages/ws-worker/src/api/execute.ts | 13 ++++++-- packages/ws-worker/src/channels/run.ts | 10 ++---- .../ws-worker/src/events/step-complete.ts | 10 ++++-- .../src/util/convert-lightning-plan.ts | 28 ++++++++++------ .../test/events/step-complete.test.ts | 32 +++++++++++++++++++ .../test/util/convert-lightning-plan.test.ts | 13 ++++++++ 7 files changed, 86 insertions(+), 21 deletions(-) diff --git a/packages/lexicon/lightning.d.ts b/packages/lexicon/lightning.d.ts index e54902d95..c7e232366 100644 --- a/packages/lexicon/lightning.d.ts +++ b/packages/lexicon/lightning.d.ts @@ -44,6 +44,7 @@ export type LightningPlanOptions = { runTimeoutMs?: number; sanitize?: SanitizePolicies; start?: StepId; + output_dataclips?: boolean; }; /** diff --git a/packages/ws-worker/src/api/execute.ts b/packages/ws-worker/src/api/execute.ts index 48b4acca6..a0ebf9c8f 100644 --- a/packages/ws-worker/src/api/execute.ts +++ b/packages/ws-worker/src/api/execute.ts @@ -31,6 +31,7 @@ import handleRunComplete from '../events/run-complete'; import handleRunError from '../events/run-error'; import type { Channel, RunState, JSONLog } from '../types'; +import { WorkerRunOptions } from '../util/convert-lightning-plan'; const enc = new TextDecoder('utf-8'); @@ -41,6 +42,7 @@ export type Context = { state: RunState; logger: Logger; engine: RuntimeEngine; + options: WorkerRunOptions; onFinish: (result: any) => void; // maybe its better for version numbers to be scribbled here as we go? @@ -63,14 +65,21 @@ export function execute( logger: Logger, plan: ExecutionPlan, input: Lazy, - options: LightningPlanOptions = {}, + options: WorkerRunOptions = {}, onFinish = (_result: any) => {} ) { logger.info('executing ', plan.id); const state = createRunState(plan, input); - const context: Context = { channel, state, logger, engine, onFinish }; + const context: Context = { + channel, + state, + logger, + engine, + options, + onFinish, + }; const throttle = createThrottle(); diff --git a/packages/ws-worker/src/channels/run.ts b/packages/ws-worker/src/channels/run.ts index 104fba82d..8a505cf8a 100644 --- a/packages/ws-worker/src/channels/run.ts +++ b/packages/ws-worker/src/channels/run.ts @@ -1,13 +1,9 @@ import type { ExecutionPlan, Lazy, State } from '@openfn/lexicon'; -import type { - GetPlanReply, - LightningPlan, - LightningPlanOptions, -} from '@openfn/lexicon/lightning'; +import type { GetPlanReply, LightningPlan } from '@openfn/lexicon/lightning'; import type { Logger } from '@openfn/logger'; import { getWithReply } from '../util'; -import convertRun from '../util/convert-lightning-plan'; +import convertRun, { WorkerRunOptions } from '../util/convert-lightning-plan'; import { GET_PLAN } from '../events'; import type { Channel, Socket } from '../types'; @@ -25,7 +21,7 @@ const joinRunChannel = ( return new Promise<{ channel: Channel; plan: ExecutionPlan; - options: LightningPlanOptions; + options: WorkerRunOptions; input: Lazy; }>((resolve, reject) => { // TMP - lightning seems to be sending two responses to me diff --git a/packages/ws-worker/src/events/step-complete.ts b/packages/ws-worker/src/events/step-complete.ts index 51c5bfe8d..a542a5944 100644 --- a/packages/ws-worker/src/events/step-complete.ts +++ b/packages/ws-worker/src/events/step-complete.ts @@ -8,7 +8,7 @@ import { calculateJobExitReason } from '../api/reasons'; import { sendEvent, Context } from '../api/execute'; export default function onStepComplete( - { channel, state }: Context, + { channel, state, options }: Context, event: JobCompletePayload, // TODO this isn't terribly graceful, but accept an error for crashes error?: any @@ -52,7 +52,6 @@ export default function onStepComplete( step_id, job_id, output_dataclip_id: dataclipId, - output_dataclip: stringify(outputState), reason, error_message, @@ -61,6 +60,11 @@ export default function onStepComplete( mem: event.mem, duration: event.duration, thread_id: event.threadId, - }; + } as StepCompletePayload; + + if (!options || options.outputDataclips !== false) { + evt.output_dataclip = stringify(outputState); + } + return sendEvent(channel, STEP_COMPLETE, evt); } diff --git a/packages/ws-worker/src/util/convert-lightning-plan.ts b/packages/ws-worker/src/util/convert-lightning-plan.ts index df2427a3d..d6892a3ea 100644 --- a/packages/ws-worker/src/util/convert-lightning-plan.ts +++ b/packages/ws-worker/src/util/convert-lightning-plan.ts @@ -39,21 +39,31 @@ const mapTriggerEdgeCondition = (edge: Edge) => { return condition; }; +// Options which relate to this execution but are not part of the plan +export type WorkerRunOptions = ExecuteOptions & { + // Defaults to true - must be explicity false to stop dataclips being sent + outputDataclips?: boolean; +}; + export default ( run: LightningPlan -): { plan: ExecutionPlan; options: ExecuteOptions; input: Lazy } => { +): { plan: ExecutionPlan; options: WorkerRunOptions; input: Lazy } => { // Some options get mapped straight through to the runtime's workflow options - // TODO or maybe not? Maybe they're all sent to the engine instead? const runtimeOpts: Omit = {}; // But some need to get passed down into the engine's options - const engineOpts: ExecuteOptions = {}; - - if (run.options?.runTimeoutMs) { - engineOpts.runTimeoutMs = run.options.runTimeoutMs; - } - if (run.options?.sanitize) { - engineOpts.sanitize = run.options.sanitize; + const engineOpts: WorkerRunOptions = {}; + + if (run.options) { + if (run.options.runTimeoutMs) { + engineOpts.runTimeoutMs = run.options.runTimeoutMs; + } + if (run.options.sanitize) { + engineOpts.sanitize = run.options.sanitize; + } + if (run.options.hasOwnProperty('output_dataclips')) { + engineOpts.outputDataclips = run.options.output_dataclips; + } } const plan: Partial = { diff --git a/packages/ws-worker/test/events/step-complete.test.ts b/packages/ws-worker/test/events/step-complete.test.ts index 8ba8a8e3d..4972eb0f7 100644 --- a/packages/ws-worker/test/events/step-complete.test.ts +++ b/packages/ws-worker/test/events/step-complete.test.ts @@ -154,3 +154,35 @@ test('send a step:complete event', async (t) => { } as JobCompletePayload; await handleStepComplete({ channel, state } as any, event); }); + +test('do not include dataclips in step:complete if output_dataclip is false', async (t) => { + const plan = createPlan(); + const jobId = 'job-1'; + const result = { x: 10 }; + + const state = createRunState(plan); + state.activeJob = jobId; + state.activeStep = 'b'; + + const options = { + outputDataclips: false, + }; + + const channel = mockChannel({ + [STEP_COMPLETE]: (evt: StepCompletePayload) => { + t.truthy(evt.output_dataclip_id); + t.falsy(evt.output_dataclip); + }, + }); + + const event = { + jobId, + workflowId: plan.id, + state: result, + next: ['a'], + mem: { job: 1, system: 10 }, + duration: 61, + thread_id: 'abc', + } as JobCompletePayload; + await handleStepComplete({ channel, state, options } as any, event); +}); diff --git a/packages/ws-worker/test/util/convert-lightning-plan.test.ts b/packages/ws-worker/test/util/convert-lightning-plan.test.ts index dbed7a503..c0ffa73a4 100644 --- a/packages/ws-worker/test/util/convert-lightning-plan.test.ts +++ b/packages/ws-worker/test/util/convert-lightning-plan.test.ts @@ -165,6 +165,19 @@ test('handle starting_node_id as options', (t) => { }); }); +test('handle output_dataclip as options', (t) => { + const run: Partial = { + id: 'w', + options: { + output_dataclips: false, + }, + }; + const { options } = convertPlan(run as LightningPlan); + t.deepEqual(options, { + outputDataclips: false, + }); +}); + test('convert a single trigger', (t) => { const run: Partial = { id: 'w', From 97b381606e231aabbb964f382ff500ec7ba3e80a Mon Sep 17 00:00:00 2001 From: Joe Clark Date: Tue, 13 Feb 2024 18:00:05 +0000 Subject: [PATCH 100/128] worker: additioonal test of output_dataclips --- packages/ws-worker/test/lightning.test.ts | 54 +++++++++++++++++------ 1 file changed, 40 insertions(+), 14 deletions(-) diff --git a/packages/ws-worker/test/lightning.test.ts b/packages/ws-worker/test/lightning.test.ts index 15edea4e1..3ba4df866 100644 --- a/packages/ws-worker/test/lightning.test.ts +++ b/packages/ws-worker/test/lightning.test.ts @@ -4,13 +4,16 @@ import test from 'ava'; import createLightningServer from '@openfn/lightning-mock'; +import type { + LightningPlan, + Node, + RunCompletePayload, +} from '@openfn/lexicon/lightning'; import { createRun, createEdge, createJob } from './util'; - import createWorkerServer from '../src/server'; -import createMockRTE from '../src/mock/runtime-engine'; import * as e from '../src/events'; -import { RunCompletePayload } from '@openfn/lexicon/lightning'; +import createMockRTE from '../src/mock/runtime-engine'; let lng: any; let worker: any; @@ -38,17 +41,18 @@ test.afterEach(() => { let rollingRunId = 0; -const getRun = (ext = {}, jobs?: any) => ({ - id: `a${++rollingRunId}`, - jobs: jobs || [ - { - id: 'j', - adaptor: '@openfn/language-common@1.0.0', - body: 'fn(() => ({ answer: 42 }))', - }, - ], - ...ext, -}); +const getRun = (ext = {}, jobs?: any[]): LightningPlan => + ({ + id: `a${++rollingRunId}`, + jobs: jobs || [ + { + id: 'j', + adaptor: '@openfn/language-common@1.0.0', + body: 'fn(() => ({ answer: 42 }))', + }, + ], + ...ext, + } as LightningPlan); test.serial(`events: lightning should respond to a ${e.CLAIM} event`, (t) => { return new Promise((done) => { @@ -342,6 +346,28 @@ test.serial( } ); +test.serial(`events: ${e.STEP_COMPLETE} should not return dataclips`, (t) => { + return new Promise((done) => { + const run = getRun(); + run.options = { + output_dataclips: false, + }; + + lng.onSocketEvent(e.STEP_COMPLETE, run.id, ({ payload }: any) => { + t.is(payload.job_id, 'j'); + t.falsy(payload.output_dataclip); + t.truthy(payload.output_dataclip_id); + t.pass(); + }); + + lng.onSocketEvent(e.RUN_COMPLETE, run.id, () => { + done(); + }); + + lng.enqueueRun(run); + }); +}); + test.serial(`events: lightning should receive a ${e.RUN_LOG} event`, (t) => { return new Promise((done) => { const run = { From 31c97233700006ca1228c8bc5db040d566d9c6de Mon Sep 17 00:00:00 2001 From: Joe Clark Date: Tue, 13 Feb 2024 18:03:37 +0000 Subject: [PATCH 101/128] types --- packages/ws-worker/src/api/execute.ts | 6 +----- packages/ws-worker/test/lightning.test.ts | 1 - 2 files changed, 1 insertion(+), 6 deletions(-) diff --git a/packages/ws-worker/src/api/execute.ts b/packages/ws-worker/src/api/execute.ts index a0ebf9c8f..c35ea2ca9 100644 --- a/packages/ws-worker/src/api/execute.ts +++ b/packages/ws-worker/src/api/execute.ts @@ -1,9 +1,5 @@ import type { ExecutionPlan, Lazy, State } from '@openfn/lexicon'; -import type { - RunLogPayload, - RunStartPayload, - LightningPlanOptions, -} from '@openfn/lexicon/lightning'; +import type { RunLogPayload, RunStartPayload } from '@openfn/lexicon/lightning'; import type { Logger } from '@openfn/logger'; import type { RuntimeEngine, diff --git a/packages/ws-worker/test/lightning.test.ts b/packages/ws-worker/test/lightning.test.ts index 3ba4df866..4466bec36 100644 --- a/packages/ws-worker/test/lightning.test.ts +++ b/packages/ws-worker/test/lightning.test.ts @@ -6,7 +6,6 @@ import test from 'ava'; import createLightningServer from '@openfn/lightning-mock'; import type { LightningPlan, - Node, RunCompletePayload, } from '@openfn/lexicon/lightning'; From 45ddbe0a8703c7de45e9bd8580f59f77395d4dd9 Mon Sep 17 00:00:00 2001 From: Joe Clark Date: Wed, 14 Feb 2024 11:33:05 +0000 Subject: [PATCH 102/128] mock: error if a credential does not exist --- packages/lightning-mock/src/api-sockets.ts | 23 ++++++++++++++----- .../lightning-mock/test/channels/run.test.ts | 19 ++++++++++++++- 2 files changed, 35 insertions(+), 7 deletions(-) diff --git a/packages/lightning-mock/src/api-sockets.ts b/packages/lightning-mock/src/api-sockets.ts index 10ab917e8..979b56c28 100644 --- a/packages/lightning-mock/src/api-sockets.ts +++ b/packages/lightning-mock/src/api-sockets.ts @@ -250,16 +250,27 @@ const createSocketAPI = ( evt: PhoenixEvent ) { const { ref, join_ref, topic, payload } = evt; - const response = state.credentials[payload.id]; - // console.log(topic, event, response); + const cred = state.credentials[payload.id]; + + let response; + if (cred) { + response = { + status: 'ok', + response: cred, + }; + } else { + response = { + status: 'error', + response: 'not_found', + }; + } + ws.reply({ ref, join_ref, topic, - payload: { - status: 'ok', - response, - }, + // @ts-ignore + payload: response, }); } diff --git a/packages/lightning-mock/test/channels/run.test.ts b/packages/lightning-mock/test/channels/run.test.ts index 8fcffc244..5517e9b04 100644 --- a/packages/lightning-mock/test/channels/run.test.ts +++ b/packages/lightning-mock/test/channels/run.test.ts @@ -139,6 +139,23 @@ test.serial('get credential through the run channel', async (t) => { }); }); +test.serial( + 'get credential should error if the credential does not exist', + async (t) => { + return new Promise(async (done) => { + server.startRun(run1.id); + + const channel = await join(`run:${run1.id}`, { token: 'a.b.c' }); + channel + .push(GET_CREDENTIAL, { id: 'unknown' }) + .receive('error', (result: any) => { + t.is(result, 'not_found'); + done(); + }); + }); + } +); + test.serial('get dataclip through the run channel', async (t) => { return new Promise(async (done) => { server.startRun(run1.id); @@ -155,7 +172,7 @@ test.serial('get dataclip through the run channel', async (t) => { }); test.serial( - 'get dataclip should throw if the dataclip does not exist', + 'get dataclip should error if the dataclip does not exist', async (t) => { return new Promise(async (done) => { server.startRun(run1.id); From 4a2eee192dc761d5ac61c48b2472db2321abe1a5 Mon Sep 17 00:00:00 2001 From: Joe Clark Date: Wed, 14 Feb 2024 14:22:42 +0000 Subject: [PATCH 103/128] engine: throw nice exception if credentials fail to load --- packages/engine-multi/ava | 0 packages/engine-multi/src/api/execute.ts | 3 +- .../src/api/preload-credentials.ts | 24 ++++++- packages/engine-multi/src/errors.ts | 18 ++++- .../test/api/preload-credentials.test.ts | 67 ++++++++++++++++++- .../engine-multi/test/integration.test.ts | 46 ++++++++++++- 6 files changed, 150 insertions(+), 8 deletions(-) create mode 100644 packages/engine-multi/ava diff --git a/packages/engine-multi/ava b/packages/engine-multi/ava new file mode 100644 index 000000000..e69de29bb diff --git a/packages/engine-multi/src/api/execute.ts b/packages/engine-multi/src/api/execute.ts index 933090142..eb52f7dea 100644 --- a/packages/engine-multi/src/api/execute.ts +++ b/packages/engine-multi/src/api/execute.ts @@ -40,7 +40,8 @@ const execute = async (context: ExecutionContext) => { // TODO catch and "throw" nice clean credentials issues await preloadCredentials( state.plan as any, - options.resolvers?.credential + options.resolvers?.credential, + logger ); } diff --git a/packages/engine-multi/src/api/preload-credentials.ts b/packages/engine-multi/src/api/preload-credentials.ts index 08726a313..d74a6cfdb 100644 --- a/packages/engine-multi/src/api/preload-credentials.ts +++ b/packages/engine-multi/src/api/preload-credentials.ts @@ -1,17 +1,34 @@ import { ExecutionPlan, Job } from '@openfn/lexicon'; +import type { Logger } from '@openfn/logger'; +import { CredentialErrorObj, CredentialLoadError } from '../errors'; export default async ( plan: ExecutionPlan, - loader: (id: string) => Promise + loader: (id: string) => Promise, + logger?: Logger ) => { const loaders: Promise[] = []; + const errors: CredentialErrorObj[] = []; + Object.values(plan.workflow.steps).forEach((step) => { const job = step as Job; if (typeof job.configuration === 'string') { + const config = job.configuration as string; loaders.push( new Promise(async (resolve) => { - job.configuration = await loader(job.configuration as string); + logger?.debug(`Loading credential ${config} for step ${job.id}`); + try { + job.configuration = await loader(config as string); + logger?.debug(`Credential ${config} loaded OK (${config})`); + } catch (e: any) { + logger?.debug(`Error loading credential ${config}`); + errors.push({ + id: config, + step: step.id!, + error: e?.message || e?.toString() || e, + }); + } resolve(); }) ); @@ -19,5 +36,8 @@ export default async ( }); await Promise.all(loaders); + if (errors.length) { + throw new CredentialLoadError(errors); + } return plan; }; diff --git a/packages/engine-multi/src/errors.ts b/packages/engine-multi/src/errors.ts index c6020e84f..5468aa390 100644 --- a/packages/engine-multi/src/errors.ts +++ b/packages/engine-multi/src/errors.ts @@ -100,4 +100,20 @@ export class ExitError extends EngineError { } } -// CredentialsError (exception) +export type CredentialErrorObj = { id: string; step: string; error: string }; + +// Error lazy-loading a credenial +export class CredentialLoadError extends EngineError { + severity = 'exception'; + type = 'CredentialLoadError'; + name = 'CredentialLoadError'; + message; + + original: any; // this is the original error + constructor(errors: CredentialErrorObj[]) { + super(); + this.message = errors + .map((e) => `Failed to load credential ${e.id} for step ${e.step}`) + .join('\n'); + } +} diff --git a/packages/engine-multi/test/api/preload-credentials.test.ts b/packages/engine-multi/test/api/preload-credentials.test.ts index 1a822fd71..92a240f2c 100644 --- a/packages/engine-multi/test/api/preload-credentials.test.ts +++ b/packages/engine-multi/test/api/preload-credentials.test.ts @@ -13,7 +13,7 @@ test('handle a plan with no credentials', async (t) => { }; const plan = { - id: 'a', + id: t.title, workflow: { steps: [ { @@ -46,7 +46,7 @@ test('handle a plan with credentials', async (t) => { }; const plan = { - id: 'a', + id: t.title, workflow: { steps: [ { @@ -73,3 +73,66 @@ test('handle a plan with credentials', async (t) => { t.is((plan.workflow.steps[1] as Job).configuration, 'loaded-b'); t.is((plan.workflow.steps[2] as Job).configuration, 'loaded-c'); }); + +test('throw if one credential fails to load', async (t) => { + const loader = async () => { + throw new Error('err'); + }; + + const plan = { + id: t.title, + workflow: { + steps: [ + { + id: 'z', + expression: '.', + configuration: 'a', + }, + ], + }, + options: {}, + } as ExecutionPlan; + + try { + await preloadCredentials(plan, loader); + } catch (e: any) { + t.is(e.name, 'CredentialLoadError'); + t.is(e.message, `Failed to load credential a for step z`); + } +}); + +test('throw if several credentials fail to load', async (t) => { + const loader = async () => { + throw new Error('err'); + }; + + const plan = { + id: t.title, + workflow: { + steps: [ + { + id: 'j', + expression: '.', + configuration: 'a', + }, + { + id: 'k', + expression: '.', + configuration: 'a', + }, + ], + }, + options: {}, + } as ExecutionPlan; + + try { + await preloadCredentials(plan, loader); + } catch (e: any) { + t.is(e.name, 'CredentialLoadError'); + t.is( + e.message, + `Failed to load credential a for step j +Failed to load credential a for step k` + ); + } +}); diff --git a/packages/engine-multi/test/integration.test.ts b/packages/engine-multi/test/integration.test.ts index 504fccffd..21c34e2cd 100644 --- a/packages/engine-multi/test/integration.test.ts +++ b/packages/engine-multi/test/integration.test.ts @@ -373,7 +373,7 @@ test.serial('evaluate conditional edges', (t) => { test.serial('preload credentials', (t) => { return new Promise(async (done) => { - let didCallLoader = true; + let didCallLoader = false; const loader = (id: string) => new Promise((resolve) => { @@ -403,13 +403,55 @@ test.serial('preload credentials', (t) => { const plan = createPlan(jobs); - api.execute(plan, options).on('workflow-complete', () => { + api.execute(plan, {}, options).on('workflow-complete', () => { t.true(didCallLoader); done(); }); }); }); +test.serial('send a workflow error if credentials fail to load', (t) => { + return new Promise(async (done) => { + let didCallLoader = false; + + const loader = () => + new Promise((_resolve, reject) => { + setTimeout(() => { + didCallLoader = true; + reject(); + }, 1); + }); + + api = await createAPI({ + logger, + }); + + const options = { + resolvers: { + credential: loader, + }, + }; + + const jobs = [ + { + id: 'a', + configuration: 'secret', + }, + ]; + + const plan = createPlan(jobs); + + api.execute(plan, {}, options).on('workflow-error', (e) => { + t.true(didCallLoader); + + t.is(e.type, 'CredentialLoadError'); + t.is(e.severity, 'exception'); + t.is(e.message, 'Failed to load credential secret for step a'); + done(); + }); + }); +}); + test.serial('accept initial state', (t) => { return new Promise(async (done) => { api = await createAPI({ From 59c1d7dcf90ec3595ed5184738ee17b4910b4d74 Mon Sep 17 00:00:00 2001 From: Joe Clark Date: Wed, 14 Feb 2024 14:37:54 +0000 Subject: [PATCH 104/128] tests: add tset for bad credential --- .../worker/test/integration.test.ts | 29 +++++++++++++++++++ 1 file changed, 29 insertions(+) diff --git a/integration-tests/worker/test/integration.test.ts b/integration-tests/worker/test/integration.test.ts index ae83b4331..7dfacc552 100644 --- a/integration-tests/worker/test/integration.test.ts +++ b/integration-tests/worker/test/integration.test.ts @@ -304,6 +304,35 @@ test.skip('run a job with credentials', (t) => { }); }); +test('run a job with bad credentials', (t) => { + return new Promise(async (done) => { + const attempt = { + id: crypto.randomUUID(), + dataclip_id: 's1', + jobs: [ + { + adaptor: '@openfn/language-common@latest', + body: 'fn((s) => s)', + credential: 'zzz', + }, + ], + }; + + const initialState = { name: 'Professor X' }; + + lightning.addDataclip('s1', initialState); + + lightning.once('run:complete', ({ payload }) => { + t.is(payload.reason, 'exception'); + t.is(payload.error_type, 'CredentialLoadError'); + t.regex(payload.error_message, /Failed to load credential zzz for step/); + done(); + }); + + lightning.enqueueRun(attempt); + }); +}); + test('blacklist a non-openfn adaptor', (t) => { return new Promise(async (done) => { const attempt = { From 71677cc266bf602b9f61959b364fd8d6ceab9782 Mon Sep 17 00:00:00 2001 From: Joe Clark Date: Wed, 14 Feb 2024 14:38:58 +0000 Subject: [PATCH 105/128] worker: bad credential test --- packages/ws-worker/test/lightning.test.ts | 33 +++++++++++++++++++++++ 1 file changed, 33 insertions(+) diff --git a/packages/ws-worker/test/lightning.test.ts b/packages/ws-worker/test/lightning.test.ts index 4466bec36..dc88c37b1 100644 --- a/packages/ws-worker/test/lightning.test.ts +++ b/packages/ws-worker/test/lightning.test.ts @@ -189,6 +189,8 @@ test.serial( `events: lightning should receive a ${e.GET_CREDENTIAL} event`, (t) => { return new Promise((done) => { + lng.addCredential('a', {}); + const run = getRun({}, [ { id: 'some-job', @@ -204,7 +206,38 @@ test.serial( didCallEvent = true; }); + lng.onSocketEvent(e.RUN_COMPLETE, run.id, ({ payload }: any) => { + t.is(payload.reason, 'success'); + t.true(didCallEvent); + done(); + }); + + lng.enqueueRun(run); + }); + } +); + +test.serial( + `events: lightning should receive an exception reason if ${e.GET_CREDENTIAL} event fails`, + (t) => { + return new Promise((done) => { + const run = getRun({}, [ + { + id: 'some-job', + credential_id: 'zzz', + adaptor: '@openfn/language-common@1.0.0', + body: 'fn(() => ({ answer: 42 }))', + }, + ]); + + let didCallEvent = false; + lng.onSocketEvent(e.GET_CREDENTIAL, run.id, () => { + // again there's no way to check the right credential was returned + didCallEvent = true; + }); + lng.onSocketEvent(e.RUN_COMPLETE, run.id, () => { + console.log('>> RUN COMPLETE'); t.true(didCallEvent); done(); }); From cf8b2429fda120e1e1c542f533ad04b777340642 Mon Sep 17 00:00:00 2001 From: Joe Clark Date: Wed, 14 Feb 2024 14:41:51 +0000 Subject: [PATCH 106/128] mock: update dev endpoint to allow invalid credentials --- packages/lightning-mock/src/api-dev.ts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packages/lightning-mock/src/api-dev.ts b/packages/lightning-mock/src/api-dev.ts index c9477781a..2ab1a5fbd 100644 --- a/packages/lightning-mock/src/api-dev.ts +++ b/packages/lightning-mock/src/api-dev.ts @@ -155,7 +155,7 @@ const setupRestAPI = (app: DevServer, state: ServerState, logger: Logger) => { // convert credentials and dataclips run.jobs.forEach((job) => { - if (job.credential) { + if (job.credential && typeof job.credential !== 'string') { const cid = crypto.randomUUID(); state.credentials[cid] = job.credential; job.credential = cid; From ea6fc05fb3c83506e6c9be624ba1f3e36d7806e9 Mon Sep 17 00:00:00 2001 From: Joe Clark Date: Wed, 14 Feb 2024 14:43:06 +0000 Subject: [PATCH 107/128] changeset --- .changeset/twelve-ravens-listen.md | 5 +++++ 1 file changed, 5 insertions(+) create mode 100644 .changeset/twelve-ravens-listen.md diff --git a/.changeset/twelve-ravens-listen.md b/.changeset/twelve-ravens-listen.md new file mode 100644 index 000000000..553e323a7 --- /dev/null +++ b/.changeset/twelve-ravens-listen.md @@ -0,0 +1,5 @@ +--- +'@openfn/engine-multi': patch +--- + +Add a CredentialLoadError From 699a8e240d14800fb57c9800853a601a7a0302c1 Mon Sep 17 00:00:00 2001 From: Joe Clark Date: Thu, 15 Feb 2024 10:05:01 +0000 Subject: [PATCH 108/128] worker: move tesdt into reasons --- packages/ws-worker/test/lightning.test.ts | 30 ----------------------- packages/ws-worker/test/reasons.test.ts | 18 ++++++++++++++ 2 files changed, 18 insertions(+), 30 deletions(-) diff --git a/packages/ws-worker/test/lightning.test.ts b/packages/ws-worker/test/lightning.test.ts index dc88c37b1..6ba40dd66 100644 --- a/packages/ws-worker/test/lightning.test.ts +++ b/packages/ws-worker/test/lightning.test.ts @@ -217,36 +217,6 @@ test.serial( } ); -test.serial( - `events: lightning should receive an exception reason if ${e.GET_CREDENTIAL} event fails`, - (t) => { - return new Promise((done) => { - const run = getRun({}, [ - { - id: 'some-job', - credential_id: 'zzz', - adaptor: '@openfn/language-common@1.0.0', - body: 'fn(() => ({ answer: 42 }))', - }, - ]); - - let didCallEvent = false; - lng.onSocketEvent(e.GET_CREDENTIAL, run.id, () => { - // again there's no way to check the right credential was returned - didCallEvent = true; - }); - - lng.onSocketEvent(e.RUN_COMPLETE, run.id, () => { - console.log('>> RUN COMPLETE'); - t.true(didCallEvent); - done(); - }); - - lng.enqueueRun(run); - }); - } -); - test.serial( `events: lightning should receive a ${e.GET_DATACLIP} event`, (t) => { diff --git a/packages/ws-worker/test/reasons.test.ts b/packages/ws-worker/test/reasons.test.ts index c93d4da8f..514dbd6b8 100644 --- a/packages/ws-worker/test/reasons.test.ts +++ b/packages/ws-worker/test/reasons.test.ts @@ -12,6 +12,7 @@ import { RUN_LOG, RUN_START, RUN_COMPLETE, + GET_CREDENTIAL, } from '../src/events'; import { ExecutionPlan } from '@openfn/lexicon'; @@ -49,6 +50,9 @@ const execute = async (plan: ExecutionPlan, input = {}, options = {}) => [RUN_LOG]: async () => true, [STEP_COMPLETE]: async () => true, [RUN_COMPLETE]: async () => true, + [GET_CREDENTIAL]: async () => { + throw new Error('err'); + }, }); const onFinish = (result: any) => { @@ -221,6 +225,20 @@ test('exception: autoinstall error', async (t) => { ); }); +test('exception: failed to load credential', async (t) => { + const plan = createPlan({ + id: 'aa', + expression: 'export default [() => s]', + configuration: 'zzz', + }); + + const { reason } = await execute(plan); + + t.is(reason.reason, 'exception'); + t.is(reason.error_type, 'CredentialLoadError'); + t.is(reason.error_message, 'Failed to load credential zzz for step aa'); +}); + test('kill: timeout', async (t) => { const plan = createPlan({ id: 'x', From 37493c1403b9dd2617613f0b911792f56406b9e9 Mon Sep 17 00:00:00 2001 From: Joe Clark Date: Thu, 15 Feb 2024 10:35:31 +0000 Subject: [PATCH 109/128] worker: tweak logs --- packages/ws-worker/src/events/run-error.ts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packages/ws-worker/src/events/run-error.ts b/packages/ws-worker/src/events/run-error.ts index d37feb5ca..aba0ae5ab 100644 --- a/packages/ws-worker/src/events/run-error.ts +++ b/packages/ws-worker/src/events/run-error.ts @@ -29,7 +29,7 @@ export default async function onRunError( onFinish({ reason }); } catch (e: any) { - logger.error('ERROR in workflow-error handler:', e.message); + logger.error('ERROR in run-error handler:', e.message); logger.error(e); onFinish({}); From 29bff418bf32bb6bcf6853da5aff16c9daa3db56 Mon Sep 17 00:00:00 2001 From: josephjclark Date: Thu, 15 Feb 2024 12:08:58 +0000 Subject: [PATCH 110/128] Verify run token (#598) * worker: start trying to verify the attempt token * worker: roughly verify the run token * mock: generate a real jwt for runs * mock: tweak key handling * worker: verify the run token * changesets * todo * worker: support public key from env * worker: better cli handling * error handling * worker: destroy server if run token is invalid * test: add integration test for errors * tests: add keys to more tests * test: fix privateKey * tidyups * more tidyups --- .changeset/grumpy-candles-applaud.md | 5 ++ .changeset/yellow-peaches-melt.md | 5 ++ integration-tests/worker/src/init.ts | 11 ++- .../worker/test/autoinstall.test.ts | 21 +++--- .../worker/test/integration.test.ts | 71 +++++++++++++++++-- integration-tests/worker/test/runs.test.ts | 16 +++-- packages/lightning-mock/src/api-sockets.ts | 16 +++-- packages/lightning-mock/src/index.ts | 2 + packages/lightning-mock/src/server.ts | 20 +++++- packages/lightning-mock/src/tokens.ts | 29 ++++++++ packages/lightning-mock/src/util.ts | 19 ++++- packages/lightning-mock/test/tokens.test.ts | 56 +++++++++++++++ packages/ws-worker/src/api/claim.ts | 45 +++++++++++- packages/ws-worker/src/api/workloop.ts | 14 ++-- packages/ws-worker/src/server.ts | 11 ++- packages/ws-worker/src/start.ts | 29 +++++++- packages/ws-worker/src/util/worker-token.ts | 3 +- packages/ws-worker/test/lightning.test.ts | 27 ++++++- 18 files changed, 351 insertions(+), 49 deletions(-) create mode 100644 .changeset/grumpy-candles-applaud.md create mode 100644 .changeset/yellow-peaches-melt.md create mode 100644 packages/lightning-mock/src/tokens.ts create mode 100644 packages/lightning-mock/test/tokens.test.ts diff --git a/.changeset/grumpy-candles-applaud.md b/.changeset/grumpy-candles-applaud.md new file mode 100644 index 000000000..c711f8b42 --- /dev/null +++ b/.changeset/grumpy-candles-applaud.md @@ -0,0 +1,5 @@ +--- +'@openfn/lightning-mock': minor +--- + +Optionally mock the run token diff --git a/.changeset/yellow-peaches-melt.md b/.changeset/yellow-peaches-melt.md new file mode 100644 index 000000000..66f8ce48c --- /dev/null +++ b/.changeset/yellow-peaches-melt.md @@ -0,0 +1,5 @@ +--- +'@openfn/ws-worker': minor +--- + +Validate the run token diff --git a/integration-tests/worker/src/init.ts b/integration-tests/worker/src/init.ts index 01926a22b..fba523600 100644 --- a/integration-tests/worker/src/init.ts +++ b/integration-tests/worker/src/init.ts @@ -1,17 +1,22 @@ import path from 'node:path'; import crypto from 'node:crypto'; -import createLightningServer from '@openfn/lightning-mock'; +import createLightningServer, { toBase64 } from '@openfn/lightning-mock'; import createEngine from '@openfn/engine-multi'; import createWorkerServer from '@openfn/ws-worker'; import createLogger, { createMockLogger } from '@openfn/logger'; export const randomPort = () => Math.round(2000 + Math.random() * 1000); -export const initLightning = (port = 4000) => { +export const initLightning = (port = 4000, privateKey?: string) => { // TODO the lightning mock right now doesn't use the secret // but we may want to add tests against this - return createLightningServer({ port }); + const opts = { port }; + if (privateKey) { + // @ts-ignore + opts.runPrivateKey = toBase64(privateKey); + } + return createLightningServer(opts); }; export const initWorker = async ( diff --git a/integration-tests/worker/test/autoinstall.test.ts b/integration-tests/worker/test/autoinstall.test.ts index c060a2175..b2856fa24 100644 --- a/integration-tests/worker/test/autoinstall.test.ts +++ b/integration-tests/worker/test/autoinstall.test.ts @@ -1,9 +1,6 @@ -// stress test for autoinstall -// this could evolve into stress testing, benchmarking or artillery generally? -// Also I may skip this in CI after the issue is fixed - import test from 'ava'; import path from 'node:path'; +import { generateKeys } from '@openfn/lightning-mock'; import { initLightning, initWorker } from '../src/init'; import { createRun, createJob } from '../src/factories'; @@ -23,6 +20,7 @@ let worker; const run = async (attempt) => { return new Promise(async (done, reject) => { lightning.on('run:complete', (evt) => { + console.log('>', evt); if (attempt.id === evt.runId) { done(lightning.getResult(attempt.id)); } @@ -33,13 +31,20 @@ const run = async (attempt) => { }; test.before(async () => { + const keys = await generateKeys(); const lightningPort = 4321; - lightning = initLightning(lightningPort); + lightning = initLightning(lightningPort, keys.private); - ({ worker } = await initWorker(lightningPort, { - repoDir: path.resolve('tmp/repo/autoinstall'), - })); + ({ worker } = await initWorker( + lightningPort, + { + repoDir: path.resolve('tmp/repo/autoinstall'), + }, + { + runPublicKey: keys.public, + } + )); }); test.after(async () => { diff --git a/integration-tests/worker/test/integration.test.ts b/integration-tests/worker/test/integration.test.ts index 7dfacc552..bbeef4757 100644 --- a/integration-tests/worker/test/integration.test.ts +++ b/integration-tests/worker/test/integration.test.ts @@ -2,6 +2,7 @@ import test from 'ava'; import path from 'node:path'; import crypto from 'node:crypto'; import Koa from 'koa'; +import { generateKeys } from '@openfn/lightning-mock'; import { initLightning, initWorker, randomPort } from '../src/init'; @@ -12,13 +13,21 @@ let engineLogger; let lightningPort; test.before(async () => { + const keys = await generateKeys(); lightningPort = randomPort(); - lightning = initLightning(lightningPort); - ({ worker, engine, engineLogger } = await initWorker(lightningPort, { + lightning = initLightning(lightningPort, keys.private); + + const engineArgs = { maxWorkers: 1, - purge: false, repoDir: path.resolve('tmp/repo/integration'), - })); + }; + const workerArgs = { runPublicKey: keys.public }; + + ({ worker, engine, engineLogger } = await initWorker( + lightningPort, + engineArgs, + workerArgs + )); }); test.afterEach(() => { @@ -501,7 +510,6 @@ test('stateful adaptor should create a new client for each attempt', (t) => { const engineArgs = { repoDir: path.resolve('./dummy-repo'), maxWorkers: 1, - purge: false, }; await initWorker(lightningPort, engineArgs); @@ -510,5 +518,58 @@ test('stateful adaptor should create a new client for each attempt', (t) => { }); }); +test('worker should exit if it has an invalid key', (t) => { + return new Promise(async (done) => { + if (!worker.destroyed) { + await worker.destroy(); + } + + // generate a new, invalid, public key + const keys = await generateKeys(); + + ({ worker } = await initWorker( + lightningPort, + { + maxWorkers: 1, + repoDir: path.resolve('tmp/repo/integration'), + }, + { + runPublicKey: keys.public, + } + )); + + const run = { + id: crypto.randomUUID(), + jobs: [ + { + adaptor: '@openfn/language-http@latest', + body: `fn((s) => s`, + }, + ], + }; + + // This should NOT run because the worker should + // not verify the token and destroy itself + lightning.once('run:start', () => { + t.fail('invalid run was start'); + done(); + }); + lightning.once('run:complete', () => { + t.fail('invalid run was completed'); + done(); + }); + + // TODO this run will, at the moment, be LOST to Lightning + lightning.enqueueRun(run); + + t.false(worker.destroyed); + setTimeout(() => { + // Ensure that the worker is destroyed after a brief interval + t.true(worker.destroyed); + done(); + }, 500); + }); +}); + // REMEMBER the default worker was destroyed at this point! // If you want to use a worker, you'll have to create your own diff --git a/integration-tests/worker/test/runs.test.ts b/integration-tests/worker/test/runs.test.ts index 4a05ef5fc..6e9d75333 100644 --- a/integration-tests/worker/test/runs.test.ts +++ b/integration-tests/worker/test/runs.test.ts @@ -1,5 +1,6 @@ import test from 'ava'; import path from 'node:path'; +import { generateKeys } from '@openfn/lightning-mock'; import { createRun, @@ -13,13 +14,20 @@ let lightning; let worker; test.before(async () => { + const keys = await generateKeys(); const lightningPort = 4321; - lightning = initLightning(lightningPort); + lightning = initLightning(lightningPort, keys.private); - ({ worker } = await initWorker(lightningPort, { - repoDir: path.resolve('tmp/repo/attempts'), - })); + ({ worker } = await initWorker( + lightningPort, + { + repoDir: path.resolve('tmp/repo/attempts'), + }, + { + runPublicKey: keys.public, + } + )); }); test.afterEach(async () => { diff --git a/packages/lightning-mock/src/api-sockets.ts b/packages/lightning-mock/src/api-sockets.ts index 979b56c28..f56d5c51d 100644 --- a/packages/lightning-mock/src/api-sockets.ts +++ b/packages/lightning-mock/src/api-sockets.ts @@ -39,6 +39,7 @@ import { STEP_COMPLETE, STEP_START, } from './events'; +import { generateRunToken } from './tokens'; import { extractRunId, stringify } from './util'; import type { ServerState } from './server'; @@ -101,8 +102,8 @@ const createSocketAPI = ( }); wss.registerEvents('worker:queue', { - [CLAIM]: (ws, event: PhoenixEvent) => { - const { runs } = pullClaim(state, ws, event); + [CLAIM]: async (ws, event: PhoenixEvent) => { + const { runs } = await pullClaim(state, ws, event); state.events.emit(CLAIM, { payload: runs, state: clone(state), @@ -165,13 +166,13 @@ const createSocketAPI = ( // pull claim will try and pull a claim off the queue, // and reply with the response // the reply ensures that only the calling worker will get the run - function pullClaim( + async function pullClaim( state: ServerState, ws: DevSocket, evt: PhoenixEvent ) { const { ref, join_ref, topic } = evt; - const { queue } = state; + const { queue, options } = state; let count = 1; const runs: ClaimRun[] = []; @@ -184,9 +185,10 @@ const createSocketAPI = ( // TODO assign the worker id to the run // Not needed by the mocks at the moment const next = queue.shift(); - // TODO the token in the mock is trivial because we're not going to do any validation on it yet - // TODO need to save the token associated with this run - runs.push({ id: next!, token: 'x.y.z' }); + + const token = await generateRunToken(next!, options.runPrivateKey); + + runs.push({ id: next!, token }); count -= 1; startRun(next!); diff --git a/packages/lightning-mock/src/index.ts b/packages/lightning-mock/src/index.ts index 5f3b83d4f..94cc4ef21 100644 --- a/packages/lightning-mock/src/index.ts +++ b/packages/lightning-mock/src/index.ts @@ -1,2 +1,4 @@ import createLightningServer from './server'; export default createLightningServer; + +export { toBase64, generateKeys } from './util'; diff --git a/packages/lightning-mock/src/server.ts b/packages/lightning-mock/src/server.ts index c6ceac0db..a035d6da4 100644 --- a/packages/lightning-mock/src/server.ts +++ b/packages/lightning-mock/src/server.ts @@ -7,11 +7,12 @@ import createLogger, { LogLevel, Logger, } from '@openfn/logger'; +import type { StepId } from '@openfn/lexicon'; +import type { RunLogPayload, LightningPlan } from '@openfn/lexicon/lightning'; import createWebSocketAPI from './api-sockets'; import createDevAPI from './api-dev'; -import type { StepId } from '@openfn/lexicon'; -import type { RunLogPayload, LightningPlan } from '@openfn/lexicon/lightning'; +import { fromBase64 } from './util'; import type { DevServer } from './types'; type JobId = string; @@ -43,12 +44,17 @@ export type ServerState = { // event emitter for debugging and observability events: EventEmitter; + + options: LightningOptions; }; export type LightningOptions = { logger?: Logger; logLevel?: LogLevel; port?: string | number; + + // if passed, a JWT will be included in all claim responses + runPrivateKey?: string; }; export type RunId = string; @@ -57,6 +63,11 @@ export type RunId = string; const createLightningServer = (options: LightningOptions = {}) => { const logger = options.logger || createMockLogger(); + // decode the incoming private key from base 64 + const runPrivateKey = options.runPrivateKey + ? fromBase64(options.runPrivateKey) + : undefined; + const state = { credentials: {}, runs: {}, @@ -66,6 +77,11 @@ const createLightningServer = (options: LightningOptions = {}) => { queue: [] as RunId[], results: {}, events: new EventEmitter(), + + options: { + ...options, + runPrivateKey, + }, } as ServerState; const app = new Koa() as DevServer; diff --git a/packages/lightning-mock/src/tokens.ts b/packages/lightning-mock/src/tokens.ts new file mode 100644 index 000000000..cb3aa1608 --- /dev/null +++ b/packages/lightning-mock/src/tokens.ts @@ -0,0 +1,29 @@ +import * as jose from 'jose'; +import crypto from 'node:crypto'; + +export const generateRunToken = async ( + runId: string, + privateKey?: string +): Promise => { + if (privateKey) { + try { + const alg = 'RS256'; + + const key = crypto.createPrivateKey(privateKey); + + const jwt = await new jose.SignJWT({ id: runId }) + .setProtectedHeader({ alg }) + .setIssuedAt() + .setIssuer('Lightning') + .setExpirationTime('2h') + .sign(key); + return jwt; + } catch (e) { + console.error('ERROR IN MOCK LIGHTNING SERVER'); + console.error('Failed to generate JWT token for run ', runId); + console.error(e); + } + } + + return 'x.y.z'; +}; diff --git a/packages/lightning-mock/src/util.ts b/packages/lightning-mock/src/util.ts index 5f9c857b0..2ab35d4f8 100644 --- a/packages/lightning-mock/src/util.ts +++ b/packages/lightning-mock/src/util.ts @@ -1,9 +1,9 @@ import fss from 'fast-safe-stringify'; +import * as jose from 'jose'; export const RUN_PREFIX = 'run:'; -export const extractRunId = (topic: string) => - topic.substr(RUN_PREFIX.length); +export const extractRunId = (topic: string) => topic.substr(RUN_PREFIX.length); // This is copied out of ws-worker and untested here export const stringify = (obj: any): string => @@ -13,3 +13,18 @@ export const stringify = (obj: any): string => } return value; }); + +export const generateKeys = async () => { + const { publicKey, privateKey } = await jose.generateKeyPair('RS256'); + return { + // @ts-ignore export function + public: publicKey.export({ type: 'pkcs1', format: 'pem' }), + // @ts-ignore export function + private: privateKey.export({ type: 'pkcs1', format: 'pem' }), + }; +}; + +export const toBase64 = (key: string) => Buffer.from(key).toString('base64'); + +export const fromBase64 = (key: string) => + Buffer.from(key, 'base64').toString(); diff --git a/packages/lightning-mock/test/tokens.test.ts b/packages/lightning-mock/test/tokens.test.ts new file mode 100644 index 000000000..bf4bdc041 --- /dev/null +++ b/packages/lightning-mock/test/tokens.test.ts @@ -0,0 +1,56 @@ +import test from 'ava'; +import * as jose from 'jose'; +import crypto from 'node:crypto'; + +import { generateRunToken } from '../src/tokens'; +import { generateKeys } from '../src/util'; + +let keys = { public: '.', private: '.' }; + +// util function to verify a token against a public key +const verify = async (token: string, publicKey: string) => { + const key = crypto.createPublicKey(publicKey); + + const { payload } = await jose.jwtVerify(token, key); + + return payload; +}; + +test.before(async () => { + keys = await generateKeys(); +}); + +test('generate a placeholder token if no key passed', async (t) => { + const result = await generateRunToken('.'); + t.is(result, 'x.y.z'); +}); + +test('generate a real token if a key is passed', async (t) => { + const result = await generateRunToken('.', keys.private); + t.true(result.length > 100); +}); + +test('token should be verified with the public key', async (t) => { + const result = await generateRunToken('.', keys.private); + + // Basically testing that this doesn't throw + const payload = await verify(result, keys.public); + t.log(payload); + t.truthy(payload); +}); + +test('token claims should include the run id', async (t) => { + const result = await generateRunToken('23', keys.private); + + const { id } = await verify(result, keys.public); + t.is(id, '23'); +}); + +test('token claims should include the issuer: Lightning', async (t) => { + const result = await generateRunToken('23', keys.private); + + const { iss } = await verify(result, keys.public); + t.is(iss, 'Lightning'); +}); + +// TODO - claim should include exp and nbf diff --git a/packages/ws-worker/src/api/claim.ts b/packages/ws-worker/src/api/claim.ts index a54d7ebda..db4912984 100644 --- a/packages/ws-worker/src/api/claim.ts +++ b/packages/ws-worker/src/api/claim.ts @@ -1,14 +1,38 @@ +import crypto from 'node:crypto'; +import * as jose from 'jose'; import { Logger, createMockLogger } from '@openfn/logger'; import { ClaimPayload, ClaimReply } from '@openfn/lexicon/lightning'; + import { CLAIM } from '../events'; import type { ServerApp } from '../server'; const mockLogger = createMockLogger(); -// TODO: this needs standalone unit tests now that it's bene moved -const claim = (app: ServerApp, logger: Logger = mockLogger, maxWorkers = 5) => { +const verifyToken = async (token: string, publicKey: string) => { + const key = crypto.createPublicKey(publicKey); + + const { payload } = await jose.jwtVerify(token, key, { + issuer: 'Lightning', + }); + + if (payload) { + return true; + } +}; + +type ClaimOptions = { + maxWorkers?: number; +}; + +const claim = ( + app: ServerApp, + logger: Logger = mockLogger, + options: ClaimOptions = {} +) => { return new Promise((resolve, reject) => { + const { maxWorkers = 5 } = options; + const activeWorkers = Object.keys(app.workflows).length; if (activeWorkers >= maxWorkers) { return reject(new Error('Server at capacity')); @@ -31,7 +55,22 @@ const claim = (app: ServerApp, logger: Logger = mockLogger, maxWorkers = 5) => { return reject(new Error('No runs returned')); } - runs.forEach((run) => { + runs.forEach(async (run) => { + if (app.options?.runPublicKey) { + try { + await verifyToken(run.token, app.options.runPublicKey); + logger.debug('verified run token for', run.id); + } catch (e) { + logger.error('Error validating run token'); + logger.error(e); + reject(); + app.destroy(); + return; + } + } else { + logger.debug('skipping run token validation for', run.id); + } + logger.debug('starting run', run.id); app.execute(run); resolve(); diff --git a/packages/ws-worker/src/api/workloop.ts b/packages/ws-worker/src/api/workloop.ts index aadf0469d..ea7465826 100644 --- a/packages/ws-worker/src/api/workloop.ts +++ b/packages/ws-worker/src/api/workloop.ts @@ -17,10 +17,16 @@ const startWorkloop = ( const workLoop = () => { if (!cancelled) { - promise = tryWithBackoff(() => claim(app, logger, maxWorkers), { - min: minBackoff, - max: maxBackoff, - }); + promise = tryWithBackoff( + () => + claim(app, logger, { + maxWorkers, + }), + { + min: minBackoff, + max: maxBackoff, + } + ); // TODO this needs more unit tests I think promise.then(() => { if (!cancelled) { diff --git a/packages/ws-worker/src/server.ts b/packages/ws-worker/src/server.ts index 6bd6fb217..b1c5590fb 100644 --- a/packages/ws-worker/src/server.ts +++ b/packages/ws-worker/src/server.ts @@ -19,7 +19,7 @@ import type { Server } from 'http'; import type { RuntimeEngine } from '@openfn/engine-multi'; import type { Socket, Channel } from './types'; -type ServerOptions = { +export type ServerOptions = { maxWorkflows?: number; port?: number; lightning?: string; // url to lightning instance @@ -27,6 +27,7 @@ type ServerOptions = { noLoop?: boolean; // disable the worker loop secret?: string; // worker secret + runPublicKey?: string; // base64 encoded run public key backoff?: { min?: number; @@ -44,6 +45,7 @@ export interface ServerApp extends Koa { events: EventEmitter; server: Server; engine: RuntimeEngine; + options: ServerOptions; execute: ({ id, token }: ClaimRun) => Promise; destroy: () => void; @@ -152,12 +154,13 @@ function createServer(engine: RuntimeEngine, options: ServerOptions = {}) { router.get('/', healthcheck); + app.options = options || {}; + // TODO this probably needs to move into ./api/ somewhere app.execute = async ({ id, token }: ClaimRun) => { if (app.socket) { app.workflows[id] = true; - // TODO need to verify the token against LIGHTNING_PUBLIC_KEY const { channel: runChannel, plan, @@ -192,7 +195,9 @@ function createServer(engine: RuntimeEngine, options: ServerOptions = {}) { // Debug API to manually trigger a claim router.post('/claim', async (ctx) => { logger.info('triggering claim from POST request'); - return claim(app, logger, options.maxWorkflows) + return claim(app, logger, { + maxWorkers: options.maxWorkflows, + }) .then(() => { logger.info('claim complete: 1 run claimed'); ctx.body = 'complete'; diff --git a/packages/ws-worker/src/start.ts b/packages/ws-worker/src/start.ts index 62b710493..9d6e38d63 100644 --- a/packages/ws-worker/src/start.ts +++ b/packages/ws-worker/src/start.ts @@ -5,7 +5,7 @@ import createLogger, { LogLevel } from '@openfn/logger'; import createRTE from '@openfn/engine-multi'; import createMockRTE from './mock/runtime-engine'; -import createWorker from './server'; +import createWorker, { ServerOptions } from './server'; type Args = { _: string[]; @@ -15,6 +15,7 @@ type Args = { secret?: string; loop?: boolean; log: LogLevel; + lightningPublicKey?: string; mock: boolean; backoff: string; capacity?: number; @@ -26,6 +27,7 @@ type Args = { const { WORKER_BACKOFF, WORKER_CAPACITY, + WORKER_LIGHTNING_PUBLIC_KEY, WORKER_LIGHTNING_SERVICE_URL, WORKER_LOG_LEVEL, WORKER_MAX_RUN_DURATION_SECONDS, @@ -64,6 +66,11 @@ const args = yargs(hideBin(process.argv)) 'Worker secret. (comes from WORKER_SECRET by default). Env: WORKER_SECRET', default: WORKER_SECRET, }) + .option('lightning-public-key', { + description: + 'Base64-encoded public key. Used to verify run tokens. Env: WORKER_LIGHTNING_PUBLIC_KEY', + default: WORKER_LIGHTNING_PUBLIC_KEY, + }) .option('log', { description: 'Set the log level for stdout (default to info, set to debug for verbose output). Env: WORKER_LOG_LEVEL', @@ -133,7 +140,7 @@ const [minBackoff, maxBackoff] = args.backoff function engineReady(engine: any) { logger.debug('Creating worker server...'); - const workerOptions = { + const workerOptions: ServerOptions = { port: args.port, lightning: args.lightning, logger, @@ -146,7 +153,23 @@ function engineReady(engine: any) { }, maxWorkflows: args.capacity, }; - const { logger: _l, secret: _s, ...humanOptions } = workerOptions; + + if (args.lightningPublicKey) { + logger.info( + 'Lightning public key found: run tokens from Lightning will be verified by this worker' + ); + workerOptions.runPublicKey = Buffer.from( + args.lightningPublicKey, + 'base64' + ).toString(); + } + + const { + logger: _l, + secret: _s, + runPublicKey, + ...humanOptions + } = workerOptions; logger.debug('Worker options:', humanOptions); createWorker(engine, workerOptions); diff --git a/packages/ws-worker/src/util/worker-token.ts b/packages/ws-worker/src/util/worker-token.ts index 9438fb6a3..fe11b04af 100644 --- a/packages/ws-worker/src/util/worker-token.ts +++ b/packages/ws-worker/src/util/worker-token.ts @@ -26,8 +26,7 @@ const generateWorkerToken = async ( const jwt = await new jose.SignJWT(claims) .setProtectedHeader({ alg }) .setIssuedAt() - .setIssuer('urn:example:issuer') - .setAudience('urn:example:audience') + .setIssuer('urn:openfn:worker') .sign(encodedSecret); // .setExpirationTime('2h') // ?? diff --git a/packages/ws-worker/test/lightning.test.ts b/packages/ws-worker/test/lightning.test.ts index 6ba40dd66..1fe55c90a 100644 --- a/packages/ws-worker/test/lightning.test.ts +++ b/packages/ws-worker/test/lightning.test.ts @@ -3,11 +3,14 @@ */ import test from 'ava'; -import createLightningServer from '@openfn/lightning-mock'; import type { LightningPlan, RunCompletePayload, } from '@openfn/lexicon/lightning'; +import createLightningServer, { + generateKeys, + toBase64, +} from '@openfn/lightning-mock'; import { createRun, createEdge, createJob } from './util'; import createWorkerServer from '../src/server'; @@ -17,20 +20,31 @@ import createMockRTE from '../src/mock/runtime-engine'; let lng: any; let worker: any; +let keys = { private: '.', public: '.' }; + const urls = { worker: 'http://localhost:4567', lng: 'ws://localhost:7654/worker', }; test.before(async () => { + keys = await generateKeys(); + const engine = await createMockRTE(); - // TODO give lightning the same secret and do some validation - lng = createLightningServer({ port: 7654 }); + lng = createLightningServer({ + port: 7654, + runPrivateKey: toBase64(keys.private), + }); + worker = createWorkerServer(engine, { port: 4567, lightning: urls.lng, secret: 'abc', maxWorkflows: 1, + + // Note that if this is not passed, + // JWT verification will be skipped + runPublicKey: keys.public, }); }); @@ -91,6 +105,13 @@ test.serial( } ); +test.todo('worker should log when a run token is verified'); + +// Perhaps a workflow exception is the most responsible thing right now +test.todo( + 'worker throws or blow up or something when token verification fails' +); + test.serial( 'should run a run which returns an expression as JSON', async (t) => { From b9046140e40a6e91b79b151d492aea4f6af41c20 Mon Sep 17 00:00:00 2001 From: Joe Clark Date: Thu, 15 Feb 2024 14:52:43 +0000 Subject: [PATCH 111/128] version lock pheonix to 1.7.10 1.7.11 introduces a compatability issue --- packages/ws-worker/package.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packages/ws-worker/package.json b/packages/ws-worker/package.json index 1200b2e3c..553e8244b 100644 --- a/packages/ws-worker/package.json +++ b/packages/ws-worker/package.json @@ -34,7 +34,7 @@ "koa": "^2.13.4", "koa-bodyparser": "^4.4.0", "koa-logger": "^3.2.1", - "phoenix": "^1.7.7", + "phoenix": "1.7.10", "ws": "^8.14.1" }, "devDependencies": { From 9f6c35d83d5c9d4f0fff1c651c4cf3fe6fbad785 Mon Sep 17 00:00:00 2001 From: Joe Clark Date: Thu, 15 Feb 2024 11:07:10 +0000 Subject: [PATCH 112/128] logger: add proxy function to the mock --- .changeset/swift-panthers-divide.md | 5 +++++ packages/logger/src/mock.ts | 8 +++++++- 2 files changed, 12 insertions(+), 1 deletion(-) create mode 100644 .changeset/swift-panthers-divide.md diff --git a/.changeset/swift-panthers-divide.md b/.changeset/swift-panthers-divide.md new file mode 100644 index 000000000..e4b98338d --- /dev/null +++ b/.changeset/swift-panthers-divide.md @@ -0,0 +1,5 @@ +--- +'@openfn/logger': patch +--- + +Support proxy() on the mock logger diff --git a/packages/logger/src/mock.ts b/packages/logger/src/mock.ts index 14517788c..579e977d5 100644 --- a/packages/logger/src/mock.ts +++ b/packages/logger/src/mock.ts @@ -1,6 +1,6 @@ // Mock logger which doesn't log anything // TODO built in an API to return the history - very useful in unit tests -import createLogger, { Logger, LogFns, StringLog } from './logger'; +import createLogger, { Logger, LogFns, StringLog, JSONLog } from './logger'; import type { LogOptions, LogEmitter } from './options'; // Each log message is saved as the level, then whatever was actually logged @@ -56,6 +56,12 @@ const mockLogger = ( mock.break = () => {}; // do nothing + // @ts-ignore + mock.proxy = (obj: JSONLog) => { + // @ts-ignore + history.push(obj); + }; + // TODO should this use json? mock.print = (...out: any[]) => { if (opts.level !== 'none') { From e71d28b924b9feebd1314edfac5716253601133d Mon Sep 17 00:00:00 2001 From: Joe Clark Date: Thu, 15 Feb 2024 11:08:03 +0000 Subject: [PATCH 113/128] engine: don't send adaptor logs to stdout --- packages/engine-multi/src/api/lifecycle.ts | 6 +- .../engine-multi/test/api/lifecycle.test.ts | 78 +++++++++++++++++++ 2 files changed, 83 insertions(+), 1 deletion(-) diff --git a/packages/engine-multi/src/api/lifecycle.ts b/packages/engine-multi/src/api/lifecycle.ts index 2c8f84d01..f7c71101b 100644 --- a/packages/engine-multi/src/api/lifecycle.ts +++ b/packages/engine-multi/src/api/lifecycle.ts @@ -3,6 +3,10 @@ import * as externalEvents from '../events'; import * as internalEvents from '../worker/events'; import type ExecutionContext from '../classes/ExecutionContext'; +// Log events from the inner thread will be logged to stdout +// EXCEPT the keys listed here +const logsToExcludeFromStdout = /(job)|(ada)/i; + export const workflowStart = ( context: ExecutionContext, event: internalEvents.WorkflowStartEvent @@ -120,7 +124,7 @@ export const log = ( ) => { const { threadId } = event; - if (event.log.name !== 'JOB') { + if (!logsToExcludeFromStdout.test(event.log.name!)) { // Forward the log event to the engine's logger // Note that we may have to parse the serialized log string const proxy = { diff --git a/packages/engine-multi/test/api/lifecycle.test.ts b/packages/engine-multi/test/api/lifecycle.test.ts index c5892d890..c86c48670 100644 --- a/packages/engine-multi/test/api/lifecycle.test.ts +++ b/packages/engine-multi/test/api/lifecycle.test.ts @@ -218,6 +218,84 @@ test(`log: emits ${e.WORKFLOW_LOG}`, (t) => { }); }); +test('logs get sent to stdout', (t) => { + const workflowId = 'a'; + + const stdout = createMockLogger(undefined, { json: true }); + + const context = createContext(workflowId); + context.logger = stdout; + + const event: w.LogEvent = { + type: w.LOG, + workflowId, + threadId: 'a', + log: { + level: 'info', + name: 'r/t', + message: ['oh hai'], + time: (Date.now() - 100).toString(), + }, + }; + + log(context, event); + + const last = stdout._last; + t.truthy(last); + t.is(last.message[0], 'oh hai'); + t.is(last.name, 'r/t'); +}); + +test('job logs do not get sent to stdout', (t) => { + const workflowId = 'a'; + + const stdout = createMockLogger(); + + const context = createContext(workflowId); + context.logger = stdout; + + const event: w.LogEvent = { + type: w.LOG, + workflowId, + threadId: 'a', + log: { + level: 'info', + name: 'job', + message: ['oh hai'], + time: (Date.now() - 100).toString(), + }, + }; + + log(context, event); + + t.is(stdout._history.length, 0); +}); + +test('adaptor logs do not get sent to stdout', (t) => { + const workflowId = 'a'; + + const stdout = createMockLogger(); + + const context = createContext(workflowId); + context.logger = stdout; + + const event: w.LogEvent = { + type: w.LOG, + workflowId, + threadId: 'a', + log: { + level: 'info', + name: 'ada', + message: ['oh hai'], + time: (Date.now() - 100).toString(), + }, + }; + + log(context, event); + + t.is(stdout._history.length, 0); +}); + // TODO not a very thorough test, still not really sure what I'm doing here test(`error: emits ${e.WORKFLOW_ERROR}`, (t) => { return new Promise((done) => { From 6ccf2c40a2905d90ecc98ff84c5da219b6f15f83 Mon Sep 17 00:00:00 2001 From: Joe Clark Date: Thu, 15 Feb 2024 12:21:35 +0000 Subject: [PATCH 114/128] tests: add test for adaptor logs --- .../@openfn/test-adaptor_1.0.0/index.js | 6 +++ .../@openfn/test-adaptor_1.0.0/package.json | 7 +++ .../worker/dummy-repo/package.json | 3 +- .../worker/test/integration.test.ts | 51 ++++++++++++++++++- 4 files changed, 64 insertions(+), 3 deletions(-) create mode 100644 integration-tests/worker/dummy-repo/node_modules/@openfn/test-adaptor_1.0.0/index.js create mode 100644 integration-tests/worker/dummy-repo/node_modules/@openfn/test-adaptor_1.0.0/package.json diff --git a/integration-tests/worker/dummy-repo/node_modules/@openfn/test-adaptor_1.0.0/index.js b/integration-tests/worker/dummy-repo/node_modules/@openfn/test-adaptor_1.0.0/index.js new file mode 100644 index 000000000..17c958136 --- /dev/null +++ b/integration-tests/worker/dummy-repo/node_modules/@openfn/test-adaptor_1.0.0/index.js @@ -0,0 +1,6 @@ +export const fn = (f) => (s) => f(s); + +export const log = (message) => (s) => { + console.log(message); + return s; +}; \ No newline at end of file diff --git a/integration-tests/worker/dummy-repo/node_modules/@openfn/test-adaptor_1.0.0/package.json b/integration-tests/worker/dummy-repo/node_modules/@openfn/test-adaptor_1.0.0/package.json new file mode 100644 index 000000000..53acc9736 --- /dev/null +++ b/integration-tests/worker/dummy-repo/node_modules/@openfn/test-adaptor_1.0.0/package.json @@ -0,0 +1,7 @@ +{ + "name": "@openfn/test-adaptor", + "version": "1.0.0", + "type": "module", + "main": "index.js", + "private": true +} diff --git a/integration-tests/worker/dummy-repo/package.json b/integration-tests/worker/dummy-repo/package.json index 782ec5ed7..1bc78cb27 100644 --- a/integration-tests/worker/dummy-repo/package.json +++ b/integration-tests/worker/dummy-repo/package.json @@ -3,6 +3,7 @@ "private": true, "version": "1.0.0", "dependencies": { - "@openfn/stateful-test_1.0.0": "@npm:@openfn/stateful-test@1.0.0" + "@openfn/stateful-test_1.0.0": "@npm:@openfn/stateful-test@1.0.0", + "@openfn/test-adaptor_1.0.0": "@npm:@openfn/test-adaptor@1.0.0" } } diff --git a/integration-tests/worker/test/integration.test.ts b/integration-tests/worker/test/integration.test.ts index bbeef4757..48a855b4d 100644 --- a/integration-tests/worker/test/integration.test.ts +++ b/integration-tests/worker/test/integration.test.ts @@ -39,6 +39,15 @@ test.after(async () => { await worker.destroy(); }); +const createDummyWorker = () => { + const engineArgs = { + repoDir: path.resolve('./dummy-repo'), + maxWorkers: 1, + purge: false, + }; + return initWorker(lightningPort, engineArgs); +}; + test('should run a simple job with no compilation or adaptor', (t) => { return new Promise(async (done) => { lightning.once('run:complete', (evt) => { @@ -157,7 +166,7 @@ test("Don't send job logs to stdout", (t) => { lightning.once('run:complete', () => { const jsonLogs = engineLogger._history; - + console.log(jsonLogs); // The engine logger shouldn't print out any job logs const jobLog = jsonLogs.find((l) => l.name === 'JOB'); t.falsy(jobLog); @@ -166,7 +175,44 @@ test("Don't send job logs to stdout", (t) => { // But it SHOULD log engine stuff const runtimeLog = jsonLogs.find( - (l) => l.name === 'R/T' && l.message[0].match(/completed step/i) + (l) => l.name === 'R/T' && l.message[0].match(/completed workflow/i) + ); + t.truthy(runtimeLog); + done(); + }); + + lightning.enqueueRun(attempt); + }); +}); + +test("Don't send adaptor logs to stdout", (t) => { + return new Promise(async (done) => { + // We have to create a new worker with a different repo for this one + await worker.destroy(); + ({ worker, engineLogger } = await createDummyWorker()); + + const message = 've have been expecting you meester bond'; + const attempt = { + id: crypto.randomUUID(), + jobs: [ + { + adaptor: '@openfn/test-adaptor@1.0.0', + body: `import { log } from '@openfn/test-adaptor'; log("${message}")`, + }, + ], + }; + + lightning.once('run:complete', (evt) => { + const jsonLogs = engineLogger._history; + // The engine logger shouldn't print out any adaptor logs + const jobLog = jsonLogs.find((l) => l.name === 'ADA'); + t.falsy(jobLog); + const jobLog2 = jsonLogs.find((l) => l.message[0] === message); + t.falsy(jobLog2); + + // But it SHOULD log engine stuff + const runtimeLog = jsonLogs.find( + (l) => l.name === 'R/T' && l.message[0].match(/completed workflow/i) ); t.truthy(runtimeLog); done(); @@ -471,6 +517,7 @@ test('stateful adaptor should create a new client for each attempt', (t) => { return new Promise(async (done) => { // We want to create our own special worker here await worker.destroy(); + ({ worker } = await createDummyWorker()); const attempt1 = { id: crypto.randomUUID(), From 5f24294b0292a3d0368e4124a7bcd9399b7437cc Mon Sep 17 00:00:00 2001 From: Joe Clark Date: Thu, 15 Feb 2024 12:23:18 +0000 Subject: [PATCH 115/128] changeset --- .changeset/angry-plants-call.md | 5 +++++ 1 file changed, 5 insertions(+) create mode 100644 .changeset/angry-plants-call.md diff --git a/.changeset/angry-plants-call.md b/.changeset/angry-plants-call.md new file mode 100644 index 000000000..615c72785 --- /dev/null +++ b/.changeset/angry-plants-call.md @@ -0,0 +1,5 @@ +--- +'@openfn/engine-multi': patch +--- + +Don't log adaptor logs to stdout From c619dd26c1fcc3a51791c3dedd0c1123bc93c911 Mon Sep 17 00:00:00 2001 From: Joe Clark Date: Thu, 15 Feb 2024 12:24:19 +0000 Subject: [PATCH 116/128] tests: remove logging --- integration-tests/worker/test/integration.test.ts | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/integration-tests/worker/test/integration.test.ts b/integration-tests/worker/test/integration.test.ts index 48a855b4d..b89bef1ed 100644 --- a/integration-tests/worker/test/integration.test.ts +++ b/integration-tests/worker/test/integration.test.ts @@ -166,7 +166,6 @@ test("Don't send job logs to stdout", (t) => { lightning.once('run:complete', () => { const jsonLogs = engineLogger._history; - console.log(jsonLogs); // The engine logger shouldn't print out any job logs const jobLog = jsonLogs.find((l) => l.name === 'JOB'); t.falsy(jobLog); @@ -202,7 +201,7 @@ test("Don't send adaptor logs to stdout", (t) => { ], }; - lightning.once('run:complete', (evt) => { + lightning.once('run:complete', () => { const jsonLogs = engineLogger._history; // The engine logger shouldn't print out any adaptor logs const jobLog = jsonLogs.find((l) => l.name === 'ADA'); @@ -303,7 +302,6 @@ test.skip('run a job with credentials', (t) => { const app = new Koa(); app.use(async (ctx, next) => { - console.log('GET!'); // TODO check basic credential ctx.body = '{ message: "ok" }'; ctx.response.headers['Content-Type'] = 'application/json'; @@ -431,13 +429,11 @@ test.skip('a timeout error should still call step-complete', (t) => { }); lightning.once('step:complete', (event) => { - console.log(event); t.is(event.payload.reason, 'kill'); t.is(event.payload.error_type, 'TimeoutError'); }); lightning.once('run:complete', () => { - console.log('DONE!'); done(); }); From c1512cc477581a77784a316acdd491d29cec3337 Mon Sep 17 00:00:00 2001 From: Joe Clark Date: Thu, 15 Feb 2024 12:28:10 +0000 Subject: [PATCH 117/128] types --- packages/engine-multi/test/api/lifecycle.test.ts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packages/engine-multi/test/api/lifecycle.test.ts b/packages/engine-multi/test/api/lifecycle.test.ts index c86c48670..beca00e69 100644 --- a/packages/engine-multi/test/api/lifecycle.test.ts +++ b/packages/engine-multi/test/api/lifecycle.test.ts @@ -240,7 +240,7 @@ test('logs get sent to stdout', (t) => { log(context, event); - const last = stdout._last; + const last: any = stdout._last; t.truthy(last); t.is(last.message[0], 'oh hai'); t.is(last.name, 'r/t'); From f5738b8dbb487ac84073bfa5ecb34eb5e1a5fdc8 Mon Sep 17 00:00:00 2001 From: Joe Clark Date: Thu, 15 Feb 2024 12:41:52 +0000 Subject: [PATCH 118/128] logger: rethink mock proxy. It's still not working. --- packages/logger/src/logger.ts | 1 + packages/logger/src/mock.ts | 8 +++-- packages/logger/test/logger.test.ts | 49 ++++++++++++++--------------- 3 files changed, 31 insertions(+), 27 deletions(-) diff --git a/packages/logger/src/logger.ts b/packages/logger/src/logger.ts index 74e59e486..05f2d3303 100644 --- a/packages/logger/src/logger.ts +++ b/packages/logger/src/logger.ts @@ -233,6 +233,7 @@ export default function (name?: string, options: LogOptions = {}): Logger { j = j as JSONLog; log(j.name, j.level, ...j.message); + return [j.name, j.level, ...j.message]; }; // print() will log without any metadata/overhead/santization diff --git a/packages/logger/src/mock.ts b/packages/logger/src/mock.ts index 579e977d5..be869c9eb 100644 --- a/packages/logger/src/mock.ts +++ b/packages/logger/src/mock.ts @@ -57,9 +57,13 @@ const mockLogger = ( mock.break = () => {}; // do nothing // @ts-ignore - mock.proxy = (obj: JSONLog) => { + const proxy = m.proxy; + // @ts-ignore + mock.proxy = (...args) => { + // @ts-ignore + const result = proxy(...args); // @ts-ignore - history.push(obj); + history.push(['proxy', result]); }; // TODO should this use json? diff --git a/packages/logger/test/logger.test.ts b/packages/logger/test/logger.test.ts index 53e9a7515..b98105a14 100644 --- a/packages/logger/test/logger.test.ts +++ b/packages/logger/test/logger.test.ts @@ -549,42 +549,41 @@ test('log an error object', (t) => { test('proxy a json argument to string', (t) => { const logger = createLogger('x'); logger.proxy({ name: 'y', level: 'success', message: ['hello'] }); - - const { namespace, level, message } = logger._parse(logger._last); - t.is(namespace, 'y'); + const [_level, [name, level, message]] = logger._last; + t.is(name, 'y'); t.is(level, 'success'); - t.deepEqual(message, 'hello'); + t.is(message, 'hello'); }); test('proxy string arguments to string', (t) => { const logger = createLogger('x'); logger.proxy('y', 'success', ['hello']); - const { namespace, level, message } = logger._parse(logger._last); - t.is(namespace, 'y'); - t.is(level, 'success'); - t.deepEqual(message, 'hello'); -}); - -test('proxy a json argument to json', (t) => { - const logger = createLogger('x', { json: true }); - logger.proxy({ name: 'y', level: 'success', message: ['hello'] }); - - const { name, level, message } = logger._last; + const [_level, [name, level, message]] = logger._last; t.is(name, 'y'); t.is(level, 'success'); - t.deepEqual(message, ['hello']); + t.deepEqual(message, 'hello'); }); -test('proxy string arguments to json', (t) => { - const logger = createLogger('x', { json: true }); - logger.proxy('y', 'success', ['hello']); - - const { name, level, message } = logger._last; - t.is(name, 'y'); - t.is(level, 'success'); - t.deepEqual(message, ['hello']); -}); +// test.only('proxy a json argument to json', (t) => { +// const logger = createLogger('x', { json: true }); +// logger.proxy({ name: 'y', level: 'success', message: ['hello'] }); +// console.log(logger._last); +// const [_level, [name, level, message]] = logger._last; +// t.is(name, 'y'); +// t.is(level, 'success'); +// t.deepEqual(message, ['hello']); +// }); + +// test('proxy string arguments to json', (t) => { +// const logger = createLogger('x', { json: true }); +// logger.proxy('y', 'success', ['hello']); + +// const [_level, { name, level, message }] = logger._last; +// t.is(name, 'y'); +// t.is(level, 'success'); +// t.deepEqual(message, ['hello']); +// }); test('proxy should respect log levels', (t) => { const logger = createLogger('x', { level: 'default' }); From 968d6f9a365fc600382a8da9a77a8340c6c24672 Mon Sep 17 00:00:00 2001 From: Joe Clark Date: Thu, 15 Feb 2024 14:10:05 +0000 Subject: [PATCH 119/128] logger: fix mock proxy function --- packages/logger/src/mock.ts | 12 +------ packages/logger/test/logger.test.ts | 49 ++++++++++++++--------------- 2 files changed, 25 insertions(+), 36 deletions(-) diff --git a/packages/logger/src/mock.ts b/packages/logger/src/mock.ts index be869c9eb..14517788c 100644 --- a/packages/logger/src/mock.ts +++ b/packages/logger/src/mock.ts @@ -1,6 +1,6 @@ // Mock logger which doesn't log anything // TODO built in an API to return the history - very useful in unit tests -import createLogger, { Logger, LogFns, StringLog, JSONLog } from './logger'; +import createLogger, { Logger, LogFns, StringLog } from './logger'; import type { LogOptions, LogEmitter } from './options'; // Each log message is saved as the level, then whatever was actually logged @@ -56,16 +56,6 @@ const mockLogger = ( mock.break = () => {}; // do nothing - // @ts-ignore - const proxy = m.proxy; - // @ts-ignore - mock.proxy = (...args) => { - // @ts-ignore - const result = proxy(...args); - // @ts-ignore - history.push(['proxy', result]); - }; - // TODO should this use json? mock.print = (...out: any[]) => { if (opts.level !== 'none') { diff --git a/packages/logger/test/logger.test.ts b/packages/logger/test/logger.test.ts index b98105a14..3acaf7522 100644 --- a/packages/logger/test/logger.test.ts +++ b/packages/logger/test/logger.test.ts @@ -549,8 +549,8 @@ test('log an error object', (t) => { test('proxy a json argument to string', (t) => { const logger = createLogger('x'); logger.proxy({ name: 'y', level: 'success', message: ['hello'] }); - const [_level, [name, level, message]] = logger._last; - t.is(name, 'y'); + const [level, name, _icon, message] = logger._last; + t.is(name, '[y]'); t.is(level, 'success'); t.is(message, 'hello'); }); @@ -559,31 +559,30 @@ test('proxy string arguments to string', (t) => { const logger = createLogger('x'); logger.proxy('y', 'success', ['hello']); - const [_level, [name, level, message]] = logger._last; + const [level, name, _icon, message] = logger._last; + t.is(name, '[y]'); + t.is(level, 'success'); + t.is(message, 'hello'); +}); + +test('proxy a json argument to json', (t) => { + const logger = createLogger('x', { json: true }); + logger.proxy({ name: 'y', level: 'success', message: ['hello'] }); + const { name, level, message } = logger._last; t.is(name, 'y'); t.is(level, 'success'); - t.deepEqual(message, 'hello'); -}); - -// test.only('proxy a json argument to json', (t) => { -// const logger = createLogger('x', { json: true }); -// logger.proxy({ name: 'y', level: 'success', message: ['hello'] }); -// console.log(logger._last); -// const [_level, [name, level, message]] = logger._last; -// t.is(name, 'y'); -// t.is(level, 'success'); -// t.deepEqual(message, ['hello']); -// }); - -// test('proxy string arguments to json', (t) => { -// const logger = createLogger('x', { json: true }); -// logger.proxy('y', 'success', ['hello']); - -// const [_level, { name, level, message }] = logger._last; -// t.is(name, 'y'); -// t.is(level, 'success'); -// t.deepEqual(message, ['hello']); -// }); + t.deepEqual(message, ['hello']); +}); + +test('proxy string arguments to json', (t) => { + const logger = createLogger('x', { json: true }); + logger.proxy('y', 'success', ['hello']); + + const { name, level, message } = logger._last; + t.is(name, 'y'); + t.is(level, 'success'); + t.deepEqual(message, ['hello']); +}); test('proxy should respect log levels', (t) => { const logger = createLogger('x', { level: 'default' }); From 3dd20755af923bac65c5a566baa65062cb9bc5d0 Mon Sep 17 00:00:00 2001 From: Joe Clark Date: Thu, 15 Feb 2024 14:10:37 +0000 Subject: [PATCH 120/128] engine: fix tests --- packages/engine-multi/test/api/lifecycle.test.ts | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/packages/engine-multi/test/api/lifecycle.test.ts b/packages/engine-multi/test/api/lifecycle.test.ts index beca00e69..329128e5d 100644 --- a/packages/engine-multi/test/api/lifecycle.test.ts +++ b/packages/engine-multi/test/api/lifecycle.test.ts @@ -221,7 +221,7 @@ test(`log: emits ${e.WORKFLOW_LOG}`, (t) => { test('logs get sent to stdout', (t) => { const workflowId = 'a'; - const stdout = createMockLogger(undefined, { json: true }); + const stdout = createMockLogger(undefined, { level: 'debug', json: true }); const context = createContext(workflowId); context.logger = stdout; @@ -249,7 +249,7 @@ test('logs get sent to stdout', (t) => { test('job logs do not get sent to stdout', (t) => { const workflowId = 'a'; - const stdout = createMockLogger(); + const stdout = createMockLogger(undefined, { level: 'debug' }); const context = createContext(workflowId); context.logger = stdout; @@ -274,7 +274,7 @@ test('job logs do not get sent to stdout', (t) => { test('adaptor logs do not get sent to stdout', (t) => { const workflowId = 'a'; - const stdout = createMockLogger(); + const stdout = createMockLogger(undefined, { level: 'debug' }); const context = createContext(workflowId); context.logger = stdout; From 21119c5c8835ee0d958fcac6cccfebe7cceee82a Mon Sep 17 00:00:00 2001 From: Joe Clark Date: Thu, 15 Feb 2024 14:24:01 +0000 Subject: [PATCH 121/128] tests: update tests --- .../@openfn/test-adaptor_1.0.0/index.js | 4 ++-- .../worker/dummy-repo/package.json | 1 + integration-tests/worker/src/init.ts | 10 ++++++---- .../worker/test/integration.test.ts | 19 ++++++++----------- 4 files changed, 17 insertions(+), 17 deletions(-) diff --git a/integration-tests/worker/dummy-repo/node_modules/@openfn/test-adaptor_1.0.0/index.js b/integration-tests/worker/dummy-repo/node_modules/@openfn/test-adaptor_1.0.0/index.js index 17c958136..cedfb278f 100644 --- a/integration-tests/worker/dummy-repo/node_modules/@openfn/test-adaptor_1.0.0/index.js +++ b/integration-tests/worker/dummy-repo/node_modules/@openfn/test-adaptor_1.0.0/index.js @@ -2,5 +2,5 @@ export const fn = (f) => (s) => f(s); export const log = (message) => (s) => { console.log(message); - return s; -}; \ No newline at end of file + return s +} \ No newline at end of file diff --git a/integration-tests/worker/dummy-repo/package.json b/integration-tests/worker/dummy-repo/package.json index 1bc78cb27..1945fc510 100644 --- a/integration-tests/worker/dummy-repo/package.json +++ b/integration-tests/worker/dummy-repo/package.json @@ -3,6 +3,7 @@ "private": true, "version": "1.0.0", "dependencies": { + "@openfn/language-common_latest": "npm:@openfn/language-common@^1.12.0", "@openfn/stateful-test_1.0.0": "@npm:@openfn/stateful-test@1.0.0", "@openfn/test-adaptor_1.0.0": "@npm:@openfn/test-adaptor@1.0.0" } diff --git a/integration-tests/worker/src/init.ts b/integration-tests/worker/src/init.ts index fba523600..c121420d5 100644 --- a/integration-tests/worker/src/init.ts +++ b/integration-tests/worker/src/init.ts @@ -26,9 +26,9 @@ export const initWorker = async ( ) => { const workerPort = randomPort(); - const engineLogger = createMockLogger('engine', { + const engineLogger = createLogger('engine', { level: 'debug', - json: true, + // json: true, }); const engine = await createEngine({ @@ -38,13 +38,15 @@ export const initWorker = async ( }); const worker = createWorkerServer(engine, { - logger: createMockLogger(), - // logger: createLogger('worker', { level: 'debug' }), + // logger: createMockLogger(), + logger: createLogger('worker', { level: 'debug' }), port: workerPort, lightning: `ws://localhost:${lightningPort}/worker`, secret: crypto.randomUUID(), ...workerArgs, }); + console.log(' ***** ', worker.id); + return { engine, engineLogger, worker }; }; diff --git a/integration-tests/worker/test/integration.test.ts b/integration-tests/worker/test/integration.test.ts index b89bef1ed..85932d50b 100644 --- a/integration-tests/worker/test/integration.test.ts +++ b/integration-tests/worker/test/integration.test.ts @@ -43,7 +43,6 @@ const createDummyWorker = () => { const engineArgs = { repoDir: path.resolve('./dummy-repo'), maxWorkers: 1, - purge: false, }; return initWorker(lightningPort, engineArgs); }; @@ -174,7 +173,7 @@ test("Don't send job logs to stdout", (t) => { // But it SHOULD log engine stuff const runtimeLog = jsonLogs.find( - (l) => l.name === 'R/T' && l.message[0].match(/completed workflow/i) + (l) => l.name === 'engine' && l.message[0].match(/complete workflow/i) ); t.truthy(runtimeLog); done(); @@ -211,7 +210,7 @@ test("Don't send adaptor logs to stdout", (t) => { // But it SHOULD log engine stuff const runtimeLog = jsonLogs.find( - (l) => l.name === 'R/T' && l.message[0].match(/completed workflow/i) + (l) => l.name === 'engine' && l.message[0].match(/complete workflow/i) ); t.truthy(runtimeLog); done(); @@ -509,11 +508,13 @@ test('an OOM error should still call step-complete', (t) => { // TODO this test is a bit different now // I think it's worth keeping -test('stateful adaptor should create a new client for each attempt', (t) => { +test.only('stateful adaptor should create a new client for each attempt', (t) => { return new Promise(async (done) => { // We want to create our own special worker here await worker.destroy(); - ({ worker } = await createDummyWorker()); + // ({ worker } = await createDummyWorker()); + + console.log(' >> ', worker.id); const attempt1 = { id: crypto.randomUUID(), @@ -535,8 +536,10 @@ test('stateful adaptor should create a new client for each attempt', (t) => { let results = {}; lightning.on('run:complete', (evt) => { + console.log(evt.payload); const id = evt.runId; results[id] = lightning.getResult(id); + console.log(results[id]); if (id === attempt2.id) { const one = results[attempt1.id]; @@ -550,12 +553,6 @@ test('stateful adaptor should create a new client for each attempt', (t) => { } }); - const engineArgs = { - repoDir: path.resolve('./dummy-repo'), - maxWorkers: 1, - }; - await initWorker(lightningPort, engineArgs); - lightning.enqueueRun(attempt1); lightning.enqueueRun(attempt2); }); From 322813c648dc429f1d7ad110bc7e4ba09ffba50f Mon Sep 17 00:00:00 2001 From: Joe Clark Date: Thu, 15 Feb 2024 14:49:56 +0000 Subject: [PATCH 122/128] worker: fixed a tricky issue with server shutdown If a server is destroyed before the lightning connection returned, the workloop will still fire even if the server is technically destroyed --- integration-tests/worker/src/init.ts | 12 +++++------- integration-tests/worker/test/integration.test.ts | 10 ++-------- packages/ws-worker/src/api/destroy.ts | 7 ++++--- packages/ws-worker/src/server.ts | 10 ++++++++++ 4 files changed, 21 insertions(+), 18 deletions(-) diff --git a/integration-tests/worker/src/init.ts b/integration-tests/worker/src/init.ts index c121420d5..9e1c768a3 100644 --- a/integration-tests/worker/src/init.ts +++ b/integration-tests/worker/src/init.ts @@ -4,7 +4,7 @@ import crypto from 'node:crypto'; import createLightningServer, { toBase64 } from '@openfn/lightning-mock'; import createEngine from '@openfn/engine-multi'; import createWorkerServer from '@openfn/ws-worker'; -import createLogger, { createMockLogger } from '@openfn/logger'; +import { createMockLogger } from '@openfn/logger'; export const randomPort = () => Math.round(2000 + Math.random() * 1000); @@ -26,9 +26,9 @@ export const initWorker = async ( ) => { const workerPort = randomPort(); - const engineLogger = createLogger('engine', { + const engineLogger = createMockLogger('engine', { level: 'debug', - // json: true, + json: true, }); const engine = await createEngine({ @@ -38,15 +38,13 @@ export const initWorker = async ( }); const worker = createWorkerServer(engine, { - // logger: createMockLogger(), - logger: createLogger('worker', { level: 'debug' }), + logger: createMockLogger(), + // logger: createLogger('worker', { level: 'debug' }), port: workerPort, lightning: `ws://localhost:${lightningPort}/worker`, secret: crypto.randomUUID(), ...workerArgs, }); - console.log(' ***** ', worker.id); - return { engine, engineLogger, worker }; }; diff --git a/integration-tests/worker/test/integration.test.ts b/integration-tests/worker/test/integration.test.ts index 85932d50b..ad8626170 100644 --- a/integration-tests/worker/test/integration.test.ts +++ b/integration-tests/worker/test/integration.test.ts @@ -506,15 +506,11 @@ test('an OOM error should still call step-complete', (t) => { // }); // }); -// TODO this test is a bit different now -// I think it's worth keeping -test.only('stateful adaptor should create a new client for each attempt', (t) => { +test('stateful adaptor should create a new client for each attempt', (t) => { return new Promise(async (done) => { // We want to create our own special worker here await worker.destroy(); - // ({ worker } = await createDummyWorker()); - - console.log(' >> ', worker.id); + ({ worker, engineLogger } = await createDummyWorker()); const attempt1 = { id: crypto.randomUUID(), @@ -536,10 +532,8 @@ test.only('stateful adaptor should create a new client for each attempt', (t) => let results = {}; lightning.on('run:complete', (evt) => { - console.log(evt.payload); const id = evt.runId; results[id] = lightning.getResult(id); - console.log(results[id]); if (id === attempt2.id) { const one = results[attempt1.id]; diff --git a/packages/ws-worker/src/api/destroy.ts b/packages/ws-worker/src/api/destroy.ts index 1b9b3b1d9..f5102bedf 100644 --- a/packages/ws-worker/src/api/destroy.ts +++ b/packages/ws-worker/src/api/destroy.ts @@ -30,6 +30,8 @@ const destroy = async (app: ServerApp, logger: Logger) => { await app.engine.destroy(); app.socket?.disconnect(); + logger.info('Server closed....'); + resolve(); }), ]); @@ -41,9 +43,7 @@ const waitForRuns = (app: ServerApp, logger: Logger) => new Promise((resolve) => { const log = () => { logger.debug( - `Waiting for ${ - Object.keys(app.workflows).length - } runs to complete...` + `Waiting for ${Object.keys(app.workflows).length} runs to complete...` ); }; @@ -61,6 +61,7 @@ const waitForRuns = (app: ServerApp, logger: Logger) => log(); app.events.on(INTERNAL_RUN_COMPLETE, onRunComplete); } else { + logger.debug('No active rns detected'); resolve(); } }); diff --git a/packages/ws-worker/src/server.ts b/packages/ws-worker/src/server.ts index b1c5590fb..2de810ab3 100644 --- a/packages/ws-worker/src/server.ts +++ b/packages/ws-worker/src/server.ts @@ -67,6 +67,12 @@ function connect(app: ServerApp, logger: Logger, options: ServerOptions = {}) { // A new connection made to the queue const onConnect = ({ socket, channel }: SocketAndChannel) => { + if (app.destroyed) { + // Fix an edge case where a server can be destroyed before it is + // even connnected + // If this has happened, we do NOT want to go and start the workloop! + return; + } logger.success('Connected to Lightning at', options.lightning); // save the channel and socket @@ -111,6 +117,10 @@ function connect(app: ServerApp, logger: Logger, options: ServerOptions = {}) { // We failed to connect to the queue const onError = (e: any) => { + if (app.destroyed) { + return; + } + logger.error( 'CRITICAL ERROR: could not connect to lightning at', options.lightning From b7f01fdab45074b5f35f8e3033c030935664fc47 Mon Sep 17 00:00:00 2001 From: Joe Clark Date: Thu, 15 Feb 2024 14:54:27 +0000 Subject: [PATCH 123/128] package lock --- pnpm-lock.yaml | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/pnpm-lock.yaml b/pnpm-lock.yaml index 179639c57..7e6cb90f8 100644 --- a/pnpm-lock.yaml +++ b/pnpm-lock.yaml @@ -656,8 +656,8 @@ importers: specifier: ^3.2.1 version: 3.2.1 phoenix: - specifier: ^1.7.7 - version: 1.7.7 + specifier: 1.7.10 + version: 1.7.10 ws: specifier: ^8.14.1 version: 8.14.1 @@ -6165,6 +6165,10 @@ packages: through2: 2.0.5 dev: true + /phoenix@1.7.10: + resolution: {integrity: sha512-akfr/QvLPFRB8sORyc8FQFY/YoGwjWhka/YRcu45sKlBOZHvA80EkLYBUsYlW63UicxgrXABZdrjDkv54LTE+g==} + dev: false + /phoenix@1.7.7: resolution: {integrity: sha512-moAN6e4Z16x/x1nswUpnTR2v5gm7HsI7eluZ2YnYUUsBNzi3cY/5frmiJfXIEi877IQAafzTfp8hd6vEUMme+w==} dev: false From ceda2e9252150ed0bd0736c3f677f4ccaadd35ed Mon Sep 17 00:00:00 2001 From: Joe Clark Date: Thu, 15 Feb 2024 14:54:55 +0000 Subject: [PATCH 124/128] package lock --- pnpm-lock.yaml | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/pnpm-lock.yaml b/pnpm-lock.yaml index 179639c57..7e6cb90f8 100644 --- a/pnpm-lock.yaml +++ b/pnpm-lock.yaml @@ -656,8 +656,8 @@ importers: specifier: ^3.2.1 version: 3.2.1 phoenix: - specifier: ^1.7.7 - version: 1.7.7 + specifier: 1.7.10 + version: 1.7.10 ws: specifier: ^8.14.1 version: 8.14.1 @@ -6165,6 +6165,10 @@ packages: through2: 2.0.5 dev: true + /phoenix@1.7.10: + resolution: {integrity: sha512-akfr/QvLPFRB8sORyc8FQFY/YoGwjWhka/YRcu45sKlBOZHvA80EkLYBUsYlW63UicxgrXABZdrjDkv54LTE+g==} + dev: false + /phoenix@1.7.7: resolution: {integrity: sha512-moAN6e4Z16x/x1nswUpnTR2v5gm7HsI7eluZ2YnYUUsBNzi3cY/5frmiJfXIEi877IQAafzTfp8hd6vEUMme+w==} dev: false From 184a60e614aa7a97116e3f96df5948973178ce37 Mon Sep 17 00:00:00 2001 From: Joe Clark Date: Thu, 15 Feb 2024 15:20:27 +0000 Subject: [PATCH 125/128] tests: tweak output --- integration-tests/worker/test/autoinstall.test.ts | 1 - integration-tests/worker/test/benchmark.test.ts | 2 +- integration-tests/worker/test/integration.test.ts | 3 ++- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/integration-tests/worker/test/autoinstall.test.ts b/integration-tests/worker/test/autoinstall.test.ts index b2856fa24..d04b702c8 100644 --- a/integration-tests/worker/test/autoinstall.test.ts +++ b/integration-tests/worker/test/autoinstall.test.ts @@ -20,7 +20,6 @@ let worker; const run = async (attempt) => { return new Promise(async (done, reject) => { lightning.on('run:complete', (evt) => { - console.log('>', evt); if (attempt.id === evt.runId) { done(lightning.getResult(attempt.id)); } diff --git a/integration-tests/worker/test/benchmark.test.ts b/integration-tests/worker/test/benchmark.test.ts index 9c117e635..83990ccef 100644 --- a/integration-tests/worker/test/benchmark.test.ts +++ b/integration-tests/worker/test/benchmark.test.ts @@ -89,7 +89,7 @@ test.serial.skip('run 100 attempts', async (t) => { } lightning.on('step:complete', (evt) => { - // May want to disable this but it's nice feedback + // May want to disable this but it's nice feedback t.log('Completed ', evt.runId); if (evt.payload.reason !== 'success') { diff --git a/integration-tests/worker/test/integration.test.ts b/integration-tests/worker/test/integration.test.ts index ad8626170..8d0ce42ff 100644 --- a/integration-tests/worker/test/integration.test.ts +++ b/integration-tests/worker/test/integration.test.ts @@ -237,7 +237,8 @@ test('run a job with initial state (with data)', (t) => { lightning.addDataclip('s1', initialState); - lightning.once('run:complete', () => { + lightning.once('run:complete', (evt) => { + t.log(evt.payload); const result = lightning.getResult(attempt.id); t.deepEqual(result, { ...initialState, From c41f7f5b3c1f4dacce51312cae1323176fb84932 Mon Sep 17 00:00:00 2001 From: Joe Clark Date: Thu, 15 Feb 2024 15:30:33 +0000 Subject: [PATCH 126/128] tests: run serially --- .../worker/test/integration.test.ts | 107 +++++++++--------- 1 file changed, 55 insertions(+), 52 deletions(-) diff --git a/integration-tests/worker/test/integration.test.ts b/integration-tests/worker/test/integration.test.ts index 8d0ce42ff..10862417e 100644 --- a/integration-tests/worker/test/integration.test.ts +++ b/integration-tests/worker/test/integration.test.ts @@ -47,7 +47,7 @@ const createDummyWorker = () => { return initWorker(lightningPort, engineArgs); }; -test('should run a simple job with no compilation or adaptor', (t) => { +test.serial('should run a simple job with no compilation or adaptor', (t) => { return new Promise(async (done) => { lightning.once('run:complete', (evt) => { // This will fetch the final dataclip from the attempt @@ -70,7 +70,7 @@ test('should run a simple job with no compilation or adaptor', (t) => { }); }); -test('run a job with autoinstall of common', (t) => { +test.serial('run a job with autoinstall of common', (t) => { return new Promise(async (done) => { let autoinstallEvent; @@ -114,7 +114,7 @@ test('run a job with autoinstall of common', (t) => { }); // this depends on prior test! -test('run a job which does NOT autoinstall common', (t) => { +test.serial('run a job which does NOT autoinstall common', (t) => { return new Promise(async (done) => { lightning.once('run:complete', () => { try { @@ -151,7 +151,7 @@ test('run a job which does NOT autoinstall common', (t) => { }); }); -test("Don't send job logs to stdout", (t) => { +test.serial("Don't send job logs to stdout", (t) => { return new Promise(async (done) => { const attempt = { id: crypto.randomUUID(), @@ -183,7 +183,7 @@ test("Don't send job logs to stdout", (t) => { }); }); -test("Don't send adaptor logs to stdout", (t) => { +test.serial("Don't send adaptor logs to stdout", (t) => { return new Promise(async (done) => { // We have to create a new worker with a different repo for this one await worker.destroy(); @@ -220,7 +220,7 @@ test("Don't send adaptor logs to stdout", (t) => { }); }); -test('run a job with initial state (with data)', (t) => { +test.serial('run a job with initial state (with data)', (t) => { return new Promise(async (done) => { const attempt = { id: crypto.randomUUID(), @@ -255,7 +255,7 @@ test('run a job with initial state (with data)', (t) => { }); }); -test('run a job with initial state (no top level keys)', (t) => { +test.serial('run a job with initial state (no top level keys)', (t) => { return new Promise(async (done) => { const attempt = { id: crypto.randomUUID(), @@ -357,7 +357,7 @@ test.skip('run a job with credentials', (t) => { }); }); -test('run a job with bad credentials', (t) => { +test.serial('run a job with bad credentials', (t) => { return new Promise(async (done) => { const attempt = { id: crypto.randomUUID(), @@ -386,7 +386,7 @@ test('run a job with bad credentials', (t) => { }); }); -test('blacklist a non-openfn adaptor', (t) => { +test.serial('blacklist a non-openfn adaptor', (t) => { return new Promise(async (done) => { const attempt = { id: crypto.randomUUID(), @@ -441,7 +441,7 @@ test.skip('a timeout error should still call step-complete', (t) => { }); }); -test('an OOM error should still call step-complete', (t) => { +test.serial('an OOM error should still call step-complete', (t) => { return new Promise(async (done) => { const attempt = { id: crypto.randomUUID(), @@ -472,7 +472,7 @@ test('an OOM error should still call step-complete', (t) => { }); }); -// test('run a job with complex behaviours (initial state, branching)', (t) => { +// test.serial('run a job with complex behaviours (initial state, branching)', (t) => { // const attempt = { // id: 'a1', // initialState: 's1 @@ -507,53 +507,56 @@ test('an OOM error should still call step-complete', (t) => { // }); // }); -test('stateful adaptor should create a new client for each attempt', (t) => { - return new Promise(async (done) => { - // We want to create our own special worker here - await worker.destroy(); - ({ worker, engineLogger } = await createDummyWorker()); - - const attempt1 = { - id: crypto.randomUUID(), - jobs: [ - { - adaptor: '@openfn/stateful-test@1.0.0', - // manual import shouldn't be needed but its not important enough to fight over - body: `import { fn, threadId, clientId } from '@openfn/stateful-test'; +test.serial( + 'stateful adaptor should create a new client for each attempt', + (t) => { + return new Promise(async (done) => { + // We want to create our own special worker here + await worker.destroy(); + ({ worker, engineLogger } = await createDummyWorker()); + + const attempt1 = { + id: crypto.randomUUID(), + jobs: [ + { + adaptor: '@openfn/stateful-test@1.0.0', + // manual import shouldn't be needed but its not important enough to fight over + body: `import { fn, threadId, clientId } from '@openfn/stateful-test'; fn(() => { return { threadId, clientId } })`, - }, - ], - }; - const attempt2 = { - ...attempt1, - id: crypto.randomUUID(), - }; - let results = {}; - - lightning.on('run:complete', (evt) => { - const id = evt.runId; - results[id] = lightning.getResult(id); - - if (id === attempt2.id) { - const one = results[attempt1.id]; - const two = results[attempt2.id]; - - // The two attempts should run in different threads - t.not(one.threadId, two.threadId); - t.not(one.clientId, two.clientId); + }, + ], + }; + const attempt2 = { + ...attempt1, + id: crypto.randomUUID(), + }; + let results = {}; + + lightning.on('run:complete', (evt) => { + const id = evt.runId; + results[id] = lightning.getResult(id); + + if (id === attempt2.id) { + const one = results[attempt1.id]; + const two = results[attempt2.id]; + + // The two attempts should run in different threads + t.not(one.threadId, two.threadId); + t.not(one.clientId, two.clientId); + + done(); + } + }); - done(); - } + lightning.enqueueRun(attempt1); + lightning.enqueueRun(attempt2); }); + } +); - lightning.enqueueRun(attempt1); - lightning.enqueueRun(attempt2); - }); -}); - -test('worker should exit if it has an invalid key', (t) => { +test.serial('worker should exit if it has an invalid key', (t) => { return new Promise(async (done) => { if (!worker.destroyed) { await worker.destroy(); From 854a586e1da1979e9178ec8ed38a81737d218767 Mon Sep 17 00:00:00 2001 From: Joe Clark Date: Thu, 15 Feb 2024 15:38:58 +0000 Subject: [PATCH 127/128] tests: reorganise --- .../worker/test/integration.test.ts | 137 +++++++++--------- 1 file changed, 68 insertions(+), 69 deletions(-) diff --git a/integration-tests/worker/test/integration.test.ts b/integration-tests/worker/test/integration.test.ts index 10862417e..3781a3bdf 100644 --- a/integration-tests/worker/test/integration.test.ts +++ b/integration-tests/worker/test/integration.test.ts @@ -151,75 +151,6 @@ test.serial('run a job which does NOT autoinstall common', (t) => { }); }); -test.serial("Don't send job logs to stdout", (t) => { - return new Promise(async (done) => { - const attempt = { - id: crypto.randomUUID(), - jobs: [ - { - adaptor: '@openfn/language-common@latest', - body: 'fn((s) => { console.log("@@@"); return s })', - }, - ], - }; - - lightning.once('run:complete', () => { - const jsonLogs = engineLogger._history; - // The engine logger shouldn't print out any job logs - const jobLog = jsonLogs.find((l) => l.name === 'JOB'); - t.falsy(jobLog); - const jobLog2 = jsonLogs.find((l) => l.message[0] === '@@@'); - t.falsy(jobLog2); - - // But it SHOULD log engine stuff - const runtimeLog = jsonLogs.find( - (l) => l.name === 'engine' && l.message[0].match(/complete workflow/i) - ); - t.truthy(runtimeLog); - done(); - }); - - lightning.enqueueRun(attempt); - }); -}); - -test.serial("Don't send adaptor logs to stdout", (t) => { - return new Promise(async (done) => { - // We have to create a new worker with a different repo for this one - await worker.destroy(); - ({ worker, engineLogger } = await createDummyWorker()); - - const message = 've have been expecting you meester bond'; - const attempt = { - id: crypto.randomUUID(), - jobs: [ - { - adaptor: '@openfn/test-adaptor@1.0.0', - body: `import { log } from '@openfn/test-adaptor'; log("${message}")`, - }, - ], - }; - - lightning.once('run:complete', () => { - const jsonLogs = engineLogger._history; - // The engine logger shouldn't print out any adaptor logs - const jobLog = jsonLogs.find((l) => l.name === 'ADA'); - t.falsy(jobLog); - const jobLog2 = jsonLogs.find((l) => l.message[0] === message); - t.falsy(jobLog2); - - // But it SHOULD log engine stuff - const runtimeLog = jsonLogs.find( - (l) => l.name === 'engine' && l.message[0].match(/complete workflow/i) - ); - t.truthy(runtimeLog); - done(); - }); - - lightning.enqueueRun(attempt); - }); -}); - test.serial('run a job with initial state (with data)', (t) => { return new Promise(async (done) => { const attempt = { @@ -506,6 +437,74 @@ test.serial('an OOM error should still call step-complete', (t) => { // }); // }); // }); +test.serial("Don't send job logs to stdout", (t) => { + return new Promise(async (done) => { + const attempt = { + id: crypto.randomUUID(), + jobs: [ + { + adaptor: '@openfn/language-common@latest', + body: 'fn((s) => { console.log("@@@"); return s })', + }, + ], + }; + + lightning.once('run:complete', () => { + const jsonLogs = engineLogger._history; + // The engine logger shouldn't print out any job logs + const jobLog = jsonLogs.find((l) => l.name === 'JOB'); + t.falsy(jobLog); + const jobLog2 = jsonLogs.find((l) => l.message[0] === '@@@'); + t.falsy(jobLog2); + + // But it SHOULD log engine stuff + const runtimeLog = jsonLogs.find( + (l) => l.name === 'engine' && l.message[0].match(/complete workflow/i) + ); + t.truthy(runtimeLog); + done(); + }); + + lightning.enqueueRun(attempt); + }); +}); + +test.serial("Don't send adaptor logs to stdout", (t) => { + return new Promise(async (done) => { + // We have to create a new worker with a different repo for this one + await worker.destroy(); + ({ worker, engineLogger } = await createDummyWorker()); + + const message = 've have been expecting you meester bond'; + const attempt = { + id: crypto.randomUUID(), + jobs: [ + { + adaptor: '@openfn/test-adaptor@1.0.0', + body: `import { log } from '@openfn/test-adaptor'; log("${message}")`, + }, + ], + }; + + lightning.once('run:complete', () => { + const jsonLogs = engineLogger._history; + // The engine logger shouldn't print out any adaptor logs + const jobLog = jsonLogs.find((l) => l.name === 'ADA'); + t.falsy(jobLog); + const jobLog2 = jsonLogs.find((l) => l.message[0] === message); + t.falsy(jobLog2); + + // But it SHOULD log engine stuff + const runtimeLog = jsonLogs.find( + (l) => l.name === 'engine' && l.message[0].match(/complete workflow/i) + ); + t.truthy(runtimeLog); + done(); + }); + + lightning.enqueueRun(attempt); + }); +}); test.serial( 'stateful adaptor should create a new client for each attempt', From 56be004065c063174caf357b865b6adeb8f0dc64 Mon Sep 17 00:00:00 2001 From: Joe Clark Date: Thu, 15 Feb 2024 15:47:53 +0000 Subject: [PATCH 128/128] version: worker@1.0.0 cli@1.0.0 --- .changeset/angry-plants-call.md | 5 ----- .changeset/dull-bags-punch.md | 6 ------ .changeset/grumpy-candles-applaud.md | 5 ----- .changeset/grumpy-pillows-tie.md | 12 ------------ .changeset/old-planes-sort.md | 5 ----- .changeset/pretty-spoons-beam.md | 9 --------- .changeset/real-snakes-begin.md | 5 ----- .changeset/sixty-snails-lie.md | 5 ----- .changeset/swift-panthers-divide.md | 5 ----- .changeset/ten-dingos-explode.md | 6 ------ .changeset/three-shrimps-approve.md | 5 ----- .changeset/twelve-ravens-listen.md | 5 ----- .changeset/two-pugs-burn.md | 6 ------ .changeset/unlucky-moose-greet.md | 5 ----- .changeset/yellow-peaches-melt.md | 5 ----- integration-tests/worker/CHANGELOG.md | 20 ++++++++++++++++++++ integration-tests/worker/package.json | 2 +- packages/cli/CHANGELOG.md | 23 +++++++++++++++++++++++ packages/cli/package.json | 2 +- packages/compiler/CHANGELOG.md | 9 +++++++++ packages/compiler/package.json | 2 +- packages/deploy/CHANGELOG.md | 10 ++++++++++ packages/deploy/package.json | 2 +- packages/engine-multi/CHANGELOG.md | 20 ++++++++++++++++++++ packages/engine-multi/package.json | 2 +- packages/lightning-mock/CHANGELOG.md | 25 +++++++++++++++++++++++++ packages/lightning-mock/package.json | 2 +- packages/logger/CHANGELOG.md | 12 ++++++++++++ packages/logger/package.json | 2 +- packages/runtime/CHANGELOG.md | 17 +++++++++++++++++ packages/runtime/package.json | 2 +- packages/ws-worker/CHANGELOG.md | 20 ++++++++++++++++++++ packages/ws-worker/package.json | 2 +- 33 files changed, 165 insertions(+), 98 deletions(-) delete mode 100644 .changeset/angry-plants-call.md delete mode 100644 .changeset/dull-bags-punch.md delete mode 100644 .changeset/grumpy-candles-applaud.md delete mode 100644 .changeset/grumpy-pillows-tie.md delete mode 100644 .changeset/old-planes-sort.md delete mode 100644 .changeset/pretty-spoons-beam.md delete mode 100644 .changeset/real-snakes-begin.md delete mode 100644 .changeset/sixty-snails-lie.md delete mode 100644 .changeset/swift-panthers-divide.md delete mode 100644 .changeset/ten-dingos-explode.md delete mode 100644 .changeset/three-shrimps-approve.md delete mode 100644 .changeset/twelve-ravens-listen.md delete mode 100644 .changeset/two-pugs-burn.md delete mode 100644 .changeset/unlucky-moose-greet.md delete mode 100644 .changeset/yellow-peaches-melt.md diff --git a/.changeset/angry-plants-call.md b/.changeset/angry-plants-call.md deleted file mode 100644 index 615c72785..000000000 --- a/.changeset/angry-plants-call.md +++ /dev/null @@ -1,5 +0,0 @@ ---- -'@openfn/engine-multi': patch ---- - -Don't log adaptor logs to stdout diff --git a/.changeset/dull-bags-punch.md b/.changeset/dull-bags-punch.md deleted file mode 100644 index 968ee0eb8..000000000 --- a/.changeset/dull-bags-punch.md +++ /dev/null @@ -1,6 +0,0 @@ ---- -'@openfn/logger': patch ---- - -In JSON mode, do not stringify emitted messages. -Better handling of error objects diff --git a/.changeset/grumpy-candles-applaud.md b/.changeset/grumpy-candles-applaud.md deleted file mode 100644 index c711f8b42..000000000 --- a/.changeset/grumpy-candles-applaud.md +++ /dev/null @@ -1,5 +0,0 @@ ---- -'@openfn/lightning-mock': minor ---- - -Optionally mock the run token diff --git a/.changeset/grumpy-pillows-tie.md b/.changeset/grumpy-pillows-tie.md deleted file mode 100644 index c4b59e4db..000000000 --- a/.changeset/grumpy-pillows-tie.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -'@openfn/cli': major ---- - -The 1.0 Release of the CLI updates the language and input of the CLI to match the nomenclature of Lightning. - -See the readme for details of the new terminology. - -- Add support for execution plans -- Deprecate old workflow format (old workflows are supported and will be automatically converted into the new "execution plans") -- Update terminology across the codebase and docs -- Remove strict mode diff --git a/.changeset/old-planes-sort.md b/.changeset/old-planes-sort.md deleted file mode 100644 index 7d0a644c3..000000000 --- a/.changeset/old-planes-sort.md +++ /dev/null @@ -1,5 +0,0 @@ ---- -'@openfn/deploy': patch ---- - -Log the result to success (not always) diff --git a/.changeset/pretty-spoons-beam.md b/.changeset/pretty-spoons-beam.md deleted file mode 100644 index 64ab0fd77..000000000 --- a/.changeset/pretty-spoons-beam.md +++ /dev/null @@ -1,9 +0,0 @@ ---- -'@openfn/runtime': major ---- - -The 1.0 release of the runtime updates the signatures and language of the runtime to match Lightning. It also includes some housekeeping. - -- Update main run() signature -- Remove strict mode options -- Integrate with lexicon diff --git a/.changeset/real-snakes-begin.md b/.changeset/real-snakes-begin.md deleted file mode 100644 index 2b3849e60..000000000 --- a/.changeset/real-snakes-begin.md +++ /dev/null @@ -1,5 +0,0 @@ ---- -'@openfn/engine-multi': patch ---- - -Update handling of logs so that JSON messages are stringified diff --git a/.changeset/sixty-snails-lie.md b/.changeset/sixty-snails-lie.md deleted file mode 100644 index 59a13a900..000000000 --- a/.changeset/sixty-snails-lie.md +++ /dev/null @@ -1,5 +0,0 @@ ---- -'@openfn/cli': major ---- - -Autoinstall adaptors by default (pass `--no-autoinstall` to disable) diff --git a/.changeset/swift-panthers-divide.md b/.changeset/swift-panthers-divide.md deleted file mode 100644 index e4b98338d..000000000 --- a/.changeset/swift-panthers-divide.md +++ /dev/null @@ -1,5 +0,0 @@ ---- -'@openfn/logger': patch ---- - -Support proxy() on the mock logger diff --git a/.changeset/ten-dingos-explode.md b/.changeset/ten-dingos-explode.md deleted file mode 100644 index 703d5a20b..000000000 --- a/.changeset/ten-dingos-explode.md +++ /dev/null @@ -1,6 +0,0 @@ ---- -'@openfn/engine-multi': major -'@openfn/ws-worker': major ---- - -The 1.0 release updates the language and input of the CLI to match the nomenclature of Lightning. diff --git a/.changeset/three-shrimps-approve.md b/.changeset/three-shrimps-approve.md deleted file mode 100644 index 9086eb90b..000000000 --- a/.changeset/three-shrimps-approve.md +++ /dev/null @@ -1,5 +0,0 @@ ---- -'@openfn/ws-worker': patch ---- - -Better error handling for invalid dataclips diff --git a/.changeset/twelve-ravens-listen.md b/.changeset/twelve-ravens-listen.md deleted file mode 100644 index 553e323a7..000000000 --- a/.changeset/twelve-ravens-listen.md +++ /dev/null @@ -1,5 +0,0 @@ ---- -'@openfn/engine-multi': patch ---- - -Add a CredentialLoadError diff --git a/.changeset/two-pugs-burn.md b/.changeset/two-pugs-burn.md deleted file mode 100644 index 080b1307b..000000000 --- a/.changeset/two-pugs-burn.md +++ /dev/null @@ -1,6 +0,0 @@ ---- -'@openfn/lightning-mock': major -'@openfn/logger': major ---- - -Symbolic 1.0 version release diff --git a/.changeset/unlucky-moose-greet.md b/.changeset/unlucky-moose-greet.md deleted file mode 100644 index 5b4d40748..000000000 --- a/.changeset/unlucky-moose-greet.md +++ /dev/null @@ -1,5 +0,0 @@ ---- -'@openfn/ws-worker': patch ---- - -Update handling of logs to accept stringified messages diff --git a/.changeset/yellow-peaches-melt.md b/.changeset/yellow-peaches-melt.md deleted file mode 100644 index 66f8ce48c..000000000 --- a/.changeset/yellow-peaches-melt.md +++ /dev/null @@ -1,5 +0,0 @@ ---- -'@openfn/ws-worker': minor ---- - -Validate the run token diff --git a/integration-tests/worker/CHANGELOG.md b/integration-tests/worker/CHANGELOG.md index 1d9079065..57ec04d55 100644 --- a/integration-tests/worker/CHANGELOG.md +++ b/integration-tests/worker/CHANGELOG.md @@ -1,5 +1,25 @@ # @openfn/integration-tests-worker +## 1.0.35 + +### Patch Changes + +- Updated dependencies [5f24294] +- Updated dependencies [649ca43] +- Updated dependencies [29bff41] +- Updated dependencies [823b471] +- Updated dependencies [9f6c35d] +- Updated dependencies [86dd668] +- Updated dependencies [a97eb26] +- Updated dependencies [ea6fc05] +- Updated dependencies [86dd668] +- Updated dependencies [823b471] +- Updated dependencies [29bff41] + - @openfn/engine-multi@1.0.0 + - @openfn/logger@1.0.0 + - @openfn/lightning-mock@2.0.0 + - @openfn/ws-worker@1.0.0 + ## 1.0.34 ### Patch Changes diff --git a/integration-tests/worker/package.json b/integration-tests/worker/package.json index a5544372c..b9be215c8 100644 --- a/integration-tests/worker/package.json +++ b/integration-tests/worker/package.json @@ -1,7 +1,7 @@ { "name": "@openfn/integration-tests-worker", "private": true, - "version": "1.0.34", + "version": "1.0.35", "description": "Lightning WOrker integration tests", "author": "Open Function Group ", "license": "ISC", diff --git a/packages/cli/CHANGELOG.md b/packages/cli/CHANGELOG.md index 1b3cb9dc5..d63fb5564 100644 --- a/packages/cli/CHANGELOG.md +++ b/packages/cli/CHANGELOG.md @@ -1,5 +1,28 @@ # @openfn/cli +## 1.0.0 + +### Major Changes + +- 86dd668: The 1.0 Release of the CLI updates the language and input of the CLI to match the nomenclature of Lightning. + + See the readme for details of the new terminology. + + - Add support for execution plans + - Deprecate old workflow format (old workflows are supported and will be automatically converted into the new "execution plans") + - Update terminology across the codebase and docs + - Remove strict mode + +- 101f5a1: Autoinstall adaptors by default (pass `--no-autoinstall` to disable) + +### Patch Changes + +- Updated dependencies + - @openfn/logger@1.0.0 + - @openfn/deploy@0.4.2 + - @openfn/runtime@1.0.0 + - @openfn/compiler@0.0.40 + ## 0.4.16 ### Patch Changes diff --git a/packages/cli/package.json b/packages/cli/package.json index aeae1d2c4..1b1f72ad0 100644 --- a/packages/cli/package.json +++ b/packages/cli/package.json @@ -1,6 +1,6 @@ { "name": "@openfn/cli", - "version": "0.4.16", + "version": "1.0.0", "description": "CLI devtools for the openfn toolchain.", "engines": { "node": ">=18", diff --git a/packages/compiler/CHANGELOG.md b/packages/compiler/CHANGELOG.md index 48300926a..c50e21489 100644 --- a/packages/compiler/CHANGELOG.md +++ b/packages/compiler/CHANGELOG.md @@ -1,5 +1,14 @@ # @openfn/compiler +## 0.0.40 + +### Patch Changes + +- Updated dependencies [649ca43] +- Updated dependencies [9f6c35d] +- Updated dependencies [86dd668] + - @openfn/logger@1.0.0 + ## 0.0.39 ### Patch Changes diff --git a/packages/compiler/package.json b/packages/compiler/package.json index 1b44313c6..45d5718a0 100644 --- a/packages/compiler/package.json +++ b/packages/compiler/package.json @@ -1,6 +1,6 @@ { "name": "@openfn/compiler", - "version": "0.0.39", + "version": "0.0.40", "description": "Compiler and language tooling for openfn jobs.", "author": "Open Function Group ", "license": "ISC", diff --git a/packages/deploy/CHANGELOG.md b/packages/deploy/CHANGELOG.md index 46ce5ec5d..66e70c31d 100644 --- a/packages/deploy/CHANGELOG.md +++ b/packages/deploy/CHANGELOG.md @@ -1,5 +1,15 @@ # @openfn/deploy +## 0.4.2 + +### Patch Changes + +- 86dd668: Log the result to success (not always) +- Updated dependencies [649ca43] +- Updated dependencies [9f6c35d] +- Updated dependencies [86dd668] + - @openfn/logger@1.0.0 + ## 0.4.1 ### Patch Changes diff --git a/packages/deploy/package.json b/packages/deploy/package.json index 3eb36e86a..aef2ded1c 100644 --- a/packages/deploy/package.json +++ b/packages/deploy/package.json @@ -1,6 +1,6 @@ { "name": "@openfn/deploy", - "version": "0.4.1", + "version": "0.4.2", "description": "Deploy projects to Lightning instances", "type": "module", "exports": { diff --git a/packages/engine-multi/CHANGELOG.md b/packages/engine-multi/CHANGELOG.md index e58b97b27..ef3d59bb6 100644 --- a/packages/engine-multi/CHANGELOG.md +++ b/packages/engine-multi/CHANGELOG.md @@ -1,5 +1,25 @@ # engine-multi +## 1.0.0 + +### Major Changes + +- 86dd668: The 1.0 release updates the language and input of the Engine to match the nomenclature of Lightning. + +### Patch Changes + +- 5f24294: Don't log adaptor logs to stdout +- 823b471: Update handling of logs so that JSON messages are stringified +- ea6fc05: Add a CredentialLoadError +- Updated dependencies [649ca43] +- Updated dependencies [86dd668] +- Updated dependencies [9f6c35d] +- Updated dependencies [86dd668] + - @openfn/logger@1.0.0 + - @openfn/runtime@1.0.0 + - @openfn/compiler@0.0.40 + - @openfn/lexicon@1.0.0 + ## 0.4.1 ### Patch Changes diff --git a/packages/engine-multi/package.json b/packages/engine-multi/package.json index 2a18c5e18..0b4c520cc 100644 --- a/packages/engine-multi/package.json +++ b/packages/engine-multi/package.json @@ -1,6 +1,6 @@ { "name": "@openfn/engine-multi", - "version": "0.4.1", + "version": "1.0.0", "description": "Multi-process runtime engine", "main": "dist/index.js", "type": "module", diff --git a/packages/lightning-mock/CHANGELOG.md b/packages/lightning-mock/CHANGELOG.md index 1184018dd..edff55188 100644 --- a/packages/lightning-mock/CHANGELOG.md +++ b/packages/lightning-mock/CHANGELOG.md @@ -1,5 +1,30 @@ # @openfn/lightning-mock +## 2.0.0 + +### Major Changes + +- 86dd668: Symbolic 1.0 version release + +### Minor Changes + +- 29bff41: Optionally mock the run token + +### Patch Changes + +- Updated dependencies [5f24294] +- Updated dependencies [649ca43] +- Updated dependencies [86dd668] +- Updated dependencies [823b471] +- Updated dependencies [9f6c35d] +- Updated dependencies [86dd668] +- Updated dependencies [ea6fc05] +- Updated dependencies [86dd668] + - @openfn/engine-multi@1.0.0 + - @openfn/logger@1.0.0 + - @openfn/runtime@1.0.0 + - @openfn/lexicon@1.0.0 + ## 1.2.1 ### Patch Changes diff --git a/packages/lightning-mock/package.json b/packages/lightning-mock/package.json index 170ab1ebf..6d44a6698 100644 --- a/packages/lightning-mock/package.json +++ b/packages/lightning-mock/package.json @@ -1,6 +1,6 @@ { "name": "@openfn/lightning-mock", - "version": "1.2.1", + "version": "2.0.0", "private": true, "description": "A mock Lightning server", "main": "dist/index.js", diff --git a/packages/logger/CHANGELOG.md b/packages/logger/CHANGELOG.md index 39c72e12a..e6ffd9de3 100644 --- a/packages/logger/CHANGELOG.md +++ b/packages/logger/CHANGELOG.md @@ -1,5 +1,17 @@ # @openfn/logger +## 1.0.0 + +### Major Changes + +- 86dd668: Symbolic 1.0 version release + +### Patch Changes + +- 649ca43: In JSON mode, do not stringify emitted messages. + Better handling of error objects +- 9f6c35d: Support proxy() on the mock logger + ## 0.0.20 ### Patch Changes diff --git a/packages/logger/package.json b/packages/logger/package.json index e0b75aa14..1b202bf12 100644 --- a/packages/logger/package.json +++ b/packages/logger/package.json @@ -1,6 +1,6 @@ { "name": "@openfn/logger", - "version": "0.0.20", + "version": "1.0.0", "description": "Cross-package logging utility", "module": "dist/index.js", "author": "Open Function Group ", diff --git a/packages/runtime/CHANGELOG.md b/packages/runtime/CHANGELOG.md index c993ebd55..a3f02e071 100644 --- a/packages/runtime/CHANGELOG.md +++ b/packages/runtime/CHANGELOG.md @@ -1,5 +1,22 @@ # @openfn/runtime +## 1.0.0 + +### Major Changes + +- 86dd668: The 1.0 release of the runtime updates the signatures and language of the runtime to match Lightning. It also includes some housekeeping. + + - Update main run() signature + - Remove strict mode options + - Integrate with lexicon + +### Patch Changes + +- Updated dependencies [649ca43] +- Updated dependencies [9f6c35d] +- Updated dependencies [86dd668] + - @openfn/logger@1.0.0 + ## 0.2.6 ### Patch Changes diff --git a/packages/runtime/package.json b/packages/runtime/package.json index 3c0dcfcb3..8c00db466 100644 --- a/packages/runtime/package.json +++ b/packages/runtime/package.json @@ -1,6 +1,6 @@ { "name": "@openfn/runtime", - "version": "0.2.6", + "version": "1.0.0", "description": "Job processing runtime.", "type": "module", "exports": { diff --git a/packages/ws-worker/CHANGELOG.md b/packages/ws-worker/CHANGELOG.md index 9fda2dddf..d717a45cd 100644 --- a/packages/ws-worker/CHANGELOG.md +++ b/packages/ws-worker/CHANGELOG.md @@ -1,5 +1,25 @@ # ws-worker +## 1.0.0 + +### Major Changes + +- 86dd668: The 1.0 release updates the language and input of the Worker to match the nomenclature of Lightning. + +### Minor Changes + +- 29bff41: Validate the run token + +### Patch Changes + +- a97eb26: Better error handling for invalid dataclips +- 823b471: Update handling of logs to accept stringified messages +- Updated dependencies + - @openfn/engine-multi@1.0.0 + - @openfn/logger@1.0.0 + - @openfn/runtime@1.0.0 + - @openfn/lexicon@1.0.0 + ## 0.8.1 ### Patch Changes diff --git a/packages/ws-worker/package.json b/packages/ws-worker/package.json index 553e8244b..82eb2be59 100644 --- a/packages/ws-worker/package.json +++ b/packages/ws-worker/package.json @@ -1,6 +1,6 @@ { "name": "@openfn/ws-worker", - "version": "0.8.1", + "version": "1.0.0", "description": "A Websocket Worker to connect Lightning to a Runtime Engine", "main": "dist/index.js", "type": "module",