From 4ab02b8338fa5d1f5c4995f971f5a297a1316a1a Mon Sep 17 00:00:00 2001 From: Taylor Downs Date: Wed, 1 Nov 2023 16:31:46 +0000 Subject: [PATCH] split legacy and lightning docs --- docusaurus.config.js | 15 + versioned_docs/version-legacy/CNAME | 1 + .../version-legacy/about-lightning.md | 387 ++++++ versioned_docs/version-legacy/about.md | 17 + .../version-legacy/build/credentials.md | 34 + .../version-legacy/build/example-build.md | 224 ++++ versioned_docs/version-legacy/build/inbox.md | 82 ++ versioned_docs/version-legacy/build/jobs.md | 802 +++++++++++ .../build/lightning-quick-start.md | 341 +++++ .../version-legacy/build/triggers.md | 258 ++++ .../version-legacy/build/troubleshooting.md | 132 ++ versioned_docs/version-legacy/cli.md | 1182 +++++++++++++++++ versioned_docs/version-legacy/core.md | 58 + .../version-legacy/deploy/options.md | 112 ++ .../version-legacy/deploy/requirements.md | 158 +++ .../design/design-quickstart.md | 193 +++ .../design/when-to-integrate.md | 8 + .../version-legacy/devtools/home.md | 361 +++++ versioned_docs/version-legacy/faqs.md | 262 ++++ versioned_docs/version-legacy/for-devs.md | 77 ++ .../commcare-project-walkthrough.md | 306 +++++ .../getting-started/glossary.md | 141 ++ .../implementation-checklist.md | 117 ++ .../integrating-using-openfn.md | 14 + .../getting-started/integration-toolkit.md | 108 ++ .../getting-started/security.md | 117 ++ .../so-you-want-to-integrate.mdx | 103 ++ .../getting-started/terminology.md | 196 +++ versioned_docs/version-legacy/gsoc.md | 90 ++ .../version-legacy/instant-openhie.md | 325 +++++ versioned_docs/version-legacy/intro.md | 106 ++ versioned_docs/version-legacy/jobs/each.md | 145 ++ .../version-legacy/jobs/editing_locally.md | 74 ++ versioned_docs/version-legacy/jobs/errors.md | 88 ++ .../version-legacy/jobs/job-design-intro.md | 61 + .../version-legacy/jobs/job-studio.md | 30 + versioned_docs/version-legacy/jobs/limits.md | 76 ++ .../jobs/multiple-operations.md | 44 + .../version-legacy/jobs/operations.md | 36 + versioned_docs/version-legacy/jobs/state.md | 42 + .../version-legacy/jobs/understanding.md | 139 ++ .../jobs/working_with_branches.md | 59 + .../version-legacy/manage/platform-mgmt.md | 1072 +++++++++++++++ .../troubleshooting-tips-on-platform.md | 214 +++ .../version-legacy/microservice/home.md | 181 +++ .../version-legacy/openfn-roadmap.md | 77 ++ .../version-legacy/portability-versions.md | 285 ++++ versioned_docs/version-legacy/portability.md | 445 +++++++ .../version-legacy/release-notes.md | 515 +++++++ versioned_docs/version-legacy/roadmap.md | 69 + versioned_docs/version-legacy/source-apps.md | 59 + .../standards/digital-public-goods.md | 19 + .../version-legacy/standards/global-goods.md | 20 + .../version-legacy/standards/openhie.md | 81 ++ versioned_docs/version-legacy/style-guide.md | 297 +++++ versioned_docs/version-legacy/writing-code.md | 28 + versioned_docs/version-legacy/writing-docs.md | 59 + .../version-legacy-sidebars.json | 134 ++ versions.json | 3 + 59 files changed, 10679 insertions(+) create mode 100644 versioned_docs/version-legacy/CNAME create mode 100644 versioned_docs/version-legacy/about-lightning.md create mode 100644 versioned_docs/version-legacy/about.md create mode 100644 versioned_docs/version-legacy/build/credentials.md create mode 100644 versioned_docs/version-legacy/build/example-build.md create mode 100644 versioned_docs/version-legacy/build/inbox.md create mode 100644 versioned_docs/version-legacy/build/jobs.md create mode 100644 versioned_docs/version-legacy/build/lightning-quick-start.md create mode 100644 versioned_docs/version-legacy/build/triggers.md create mode 100644 versioned_docs/version-legacy/build/troubleshooting.md create mode 100644 versioned_docs/version-legacy/cli.md create mode 100644 versioned_docs/version-legacy/core.md create mode 100644 versioned_docs/version-legacy/deploy/options.md create mode 100644 versioned_docs/version-legacy/deploy/requirements.md create mode 100644 versioned_docs/version-legacy/design/design-quickstart.md create mode 100644 versioned_docs/version-legacy/design/when-to-integrate.md create mode 100644 versioned_docs/version-legacy/devtools/home.md create mode 100644 versioned_docs/version-legacy/faqs.md create mode 100644 versioned_docs/version-legacy/for-devs.md create mode 100644 versioned_docs/version-legacy/getting-started/commcare-project-walkthrough.md create mode 100644 versioned_docs/version-legacy/getting-started/glossary.md create mode 100644 versioned_docs/version-legacy/getting-started/implementation-checklist.md create mode 100644 versioned_docs/version-legacy/getting-started/integrating-using-openfn.md create mode 100644 versioned_docs/version-legacy/getting-started/integration-toolkit.md create mode 100644 versioned_docs/version-legacy/getting-started/security.md create mode 100644 versioned_docs/version-legacy/getting-started/so-you-want-to-integrate.mdx create mode 100644 versioned_docs/version-legacy/getting-started/terminology.md create mode 100644 versioned_docs/version-legacy/gsoc.md create mode 100644 versioned_docs/version-legacy/instant-openhie.md create mode 100644 versioned_docs/version-legacy/intro.md create mode 100644 versioned_docs/version-legacy/jobs/each.md create mode 100644 versioned_docs/version-legacy/jobs/editing_locally.md create mode 100644 versioned_docs/version-legacy/jobs/errors.md create mode 100644 versioned_docs/version-legacy/jobs/job-design-intro.md create mode 100644 versioned_docs/version-legacy/jobs/job-studio.md create mode 100644 versioned_docs/version-legacy/jobs/limits.md create mode 100644 versioned_docs/version-legacy/jobs/multiple-operations.md create mode 100644 versioned_docs/version-legacy/jobs/operations.md create mode 100644 versioned_docs/version-legacy/jobs/state.md create mode 100644 versioned_docs/version-legacy/jobs/understanding.md create mode 100644 versioned_docs/version-legacy/jobs/working_with_branches.md create mode 100644 versioned_docs/version-legacy/manage/platform-mgmt.md create mode 100644 versioned_docs/version-legacy/manage/troubleshooting-tips-on-platform.md create mode 100644 versioned_docs/version-legacy/microservice/home.md create mode 100644 versioned_docs/version-legacy/openfn-roadmap.md create mode 100644 versioned_docs/version-legacy/portability-versions.md create mode 100644 versioned_docs/version-legacy/portability.md create mode 100644 versioned_docs/version-legacy/release-notes.md create mode 100644 versioned_docs/version-legacy/roadmap.md create mode 100644 versioned_docs/version-legacy/source-apps.md create mode 100644 versioned_docs/version-legacy/standards/digital-public-goods.md create mode 100644 versioned_docs/version-legacy/standards/global-goods.md create mode 100644 versioned_docs/version-legacy/standards/openhie.md create mode 100644 versioned_docs/version-legacy/style-guide.md create mode 100644 versioned_docs/version-legacy/writing-code.md create mode 100644 versioned_docs/version-legacy/writing-docs.md create mode 100644 versioned_sidebars/version-legacy-sidebars.json create mode 100644 versions.json diff --git a/docusaurus.config.js b/docusaurus.config.js index ab76aef37a3..479dbc80a74 100644 --- a/docusaurus.config.js +++ b/docusaurus.config.js @@ -60,6 +60,10 @@ module.exports = { type: 'localeDropdown', position: 'right', }, + { + type: 'docsVersionDropdown', + position: 'right', + }, { href: 'https://github.com/openfn/docs', position: 'right', @@ -139,6 +143,17 @@ module.exports = { sidebarPath: require.resolve('./sidebars-main.js'), routeBasePath: '/documentation', editUrl: 'https://github.com/openfn/docs/edit/main', + lastVersion: 'legacy', + versions: { + current: { + banner: 'unreleased', + label: "Lightning 🚧" + }, + 'legacy': { + banner: 'none', + label: "Legacy" + }, + }, }, blog: { showReadingTime: true, diff --git a/versioned_docs/version-legacy/CNAME b/versioned_docs/version-legacy/CNAME new file mode 100644 index 00000000000..b2612e1869d --- /dev/null +++ b/versioned_docs/version-legacy/CNAME @@ -0,0 +1 @@ +docs.openfn.org \ No newline at end of file diff --git a/versioned_docs/version-legacy/about-lightning.md b/versioned_docs/version-legacy/about-lightning.md new file mode 100644 index 00000000000..54b7ab3423f --- /dev/null +++ b/versioned_docs/version-legacy/about-lightning.md @@ -0,0 +1,387 @@ +--- +title: Lightning (Beta) +sidebar_label: Lightning +--- + +## Introducing Lightning + +[OpenFn/Lightning](https://github.com/OpenFn/lightning/) is the v2 of the OpenFn +integration software: a _fully open source_ workflow automation platform +designed for governments and NGOs who need a flexible solution to integrate and +connect _any system_. + +##### Leveraging the tech powering the field-tested enterprise OpenFn platform... + +Lightning brings together the tried and tested technology which we have been +using since 2015 (the OpenFn +[Integration Toolkit](/documentation/getting-started/integration-toolkit)) to +manage the orchestration and execution of integrations in a stable, scalable and +secure way. + +##### ...and providing a fully open source web app with a user-friendly web interface. + +A fully open source web app, it can be deployed anywhere through Docker and +comes with a user-friendly, low-code interface with the full functionality +needed for organizations to build, run and audit their workflows all in one +place. + +![Lightning](/img/lightning_build_run_audit.png) + +### Build + +Empower more users in your organization to have a say in what gets automated and +how. Lightning’s visual interface makes workflows more intelligible to +non-technical users, bridging the gap between the IT specialists that build out +automations and program managers that are the real business/ program experts on +the processes that need automating. + +![Lightning build interface](/img/lightning_build.png) + +### Audit + +Treat every workflow run with the care and attention it deserves. In OpenFn, +each incoming request or transaction that gets processed is more than a piece of +data - it represents a vulnerable child in need of critical support, a farmer +managing their savings to make sure they can afford the next harvest. Lightning +provides users with a dashboard that allows them to monitor the health of their +integrations to make sure no request goes unprocessed. + +![Lightning audit interface](/img/lightning_audit.png) + +## Features + +##### General + +- Deploy Lightning via docker +- Create and delete user accounts +- Create new projects and assign users with different access levels to these + projects (owner/admin/editor/viewer) +- Transfer credential ownership to another user +- View an audit trail of all credential changes (superuser role) +- Set up SSO via an identity provider +- Generate and revoke API tokens +- List projects, jobs and runs via JSON API + +##### Workflow builder + +- Create a new workflow with a webhook or cron trigger +- Create and configure jobs for a workflow with any OpenFn adaptor and operation +- Create credentials through a form +- View all available operations for a given adaptor +- View the metadata from your external system (DHIS2 and Salesforce) +- View the input and output from the last run of each job in a workflow +- Run a job manually + +##### Runs history + +- View all runs grouped by workflow +- Search and filter runs by status, workflow and run logs +- Retry a workflow run from the start (first job) + +##### Project settings + +- Get notified via email on run failure +- Receive a daily, weekly or monthly digest of project activity +- View collaborators for a project +- Update a project name and description + +## Roadmap + +See the [Lightning Roadmap](/documentation/openfn-roadmap) for a detailed list +of features that are in the backlog, planned, and/or in development for the +OpenFn Digital Public Good. + +_You can follow our progress and track delivered features in our +[changelog](https://github.com/OpenFn/Lightning/blob/main/CHANGELOG.md)._ + +## Try it out + +:::danger Please note + +Lightning is still in Beta. + +::: + +You have 3 options for exploring OpenFn/Lightning: + +1. For quick viewing, visit [demo.openfn.org](https://demo.openfn.org/) and log + into our demo account with username: `demo@openfn.org` password: + `welcome123`. (NOTE that any changes made here are lost when the demo resets + every 24 hours. I.e., don't build things you'd like to keep.) +2. To get your own account and start building non-production workflows, register + for an account at [app.openfn.org](https://app.openfn.org/). +3. To install and run Lightning locally follow the instructions in the + [github README](https://github.com/OpenFn/Lightning). + +Go through the +[self-paced user interview](/blog/2023/04/13/lightning-beta#take-15-minutes-to-carry-out-our-user-test) +to learn how OpenFn Lightning works _and_ help us out with feedback in just 15 +minutes. + +## Guiding principles + +Lightning is developed in line with the +[principles for digital development](https://digitalprinciples.org/principles/) +and under the guidance of it's Open Source Steering Committee which you can read +about [here](https://openfn.github.io/governance/OSSC.html). + +On top of this, Lightning follows 4 key principles which determine how it should +be developed: + +### 1. Standards and compliance matter + +Lightning is part of the OpenFn Integration Toolkit which is a certified Digital +Public Good. It is fully open source and even has an Open Source Steering +Committee to make sure our users can influence the roadmap. + +Lightning workflows can be used to automatically enforce and apply data exchange +standards, such as FHIR and ADX. Lightning's design and roadmap are driven by +open standards, and will therefore provide a GovStack- and OpenHIE-compliant +workflow engine. Learn more via the following resources: + +- [Watch this video](https://youtu.be/PTRRZBYtqyc) to learn how OpenFn is an + OpenHIE-compliant workflow engine. +- Check out OpenFn's entry in the + [OpenHIE Reference Technologies page](https://wiki.ohie.org/display/documents/Reference+Technologies). +- Explore the OpenFn-Instant OpenHIE + [reference demo implementation](/documentation/instant-openhie). +- Learn more about the GovStack + [Workflow Building Block](https://govstack.gitbook.io/bb-workflow/2-description) + specification. + +### 2. Interoperability is an ongoing process + +Anyone that has worked on integration projects in the past is well aware that +integrations do break. No matter how well designed they are, the fact is that +**they connect multiple systems that all change over time**: new API versions +get released, data models change, IDs, codes and mappings change, data standards +are updated and the processes themselves evolve. This is why Lightning will +include: + +Enhanced testing and debugging: + +- Save data from workflow runs as test data for robust workflow testing of edge + cases +- View the input and output for each step in a workflow to easily identify where + an error occurred +- Throw custom errors to improve API messages (adaptors) +- Add custom logic to handle a workflow step failure (fail triggers) + +First of class monitoring: + +- Get notified on run failures +- View the status of every run +- Search workflow runs by input/output data and logs +- Filter workflow runs by status, workflow name and date + +Re-processing functionality: + +- Bulk reprocess workflow runs after updating workflow steps to course-correct + if a workflow has been running with flawed logic + +### 3. Collaboration is key + +On one hand, the users that understand what processes need automating are (more +often than not) business analysts, not developers. They’re the experts on what +needs to happen when and where, and they’re very capable of planning out +integrations and putting together mapping specifications and bpmn flows. + +On the other hand, integrations often require custom logic that cannot be +simplified through low-code and therefore must be implemented by software +engineers. + +That’s why **the best integrations are built when non-technical users and +developers collaborate**. Lightning is being developed to bridge the gap between +non-technical and technical users through: + +Intuitive, user-friendly user interface for non-technical users: + +- Understand a workflow in a visual, human-readable format (abstract away from + code to make workflows understandable to non-technical users) +- Build credentials through a form interface (remove the need to read through + confusing API documentation) +- Build API requests through a form interface +- Save mappings used in workflows as constants so they can be easily viewed and + edited without needing to read code +- Clear documentation for users to learn how to plan and build integrations + +Projects-as-code and CLI for a developer interface: + +- Export, import and configure projects as code in the code editor of your + choice +- Run, test and deploy projects through a command line interface +- Review and track changes through version control + +Collaboration functionality: + +- Track changes through version control +- Rollback to a previous version +- Get notified when a workflow is changed +- Share a link to a specific workflow error on the runs history page +- Share a link to a specific workflow step within the builder +- Add collaborators as view-only user or editor to a project +- Audit all changes made to credentials + +### 4. It’s not "just" a request or a piece of data, it’s a person + +OpenFn specializes in integration tooling for the health and humanitarian +sector. This means that behind every piece of data which comes in through a +request lies a person in need of critical services. This is why Lightning +focusses on: + +Accountability: + +- Credential audit trail +- Version control + +Security: + +- Secure credential management (encrypted at REST, credential secrets are + scrubbed from logs, secure credential sharing +- Zero-retention pipelines +- Role-based project access +- Additional authentication rules for webhooks + +## Security + +OpenFn treats security as a top priority, and is trusted to handle information +of the most sensitive nature (for example UNICEF’s child case data). + +To increase transparency and accountability around security, as well to help +other digital public goods think through key aspects of their own organizations’ +security postures, below is a list of the **key aspects of our own security +program**. + +### Organizational security practices + +To ensure a positive security posture at OpenFn, we: + +- Conduct Employee IT security onboarding training & policy +- Run monthly security standups with the whole team +- Conduct an annual security review informed by the OWASP ASVS + +### DevSecOps + +To ensure best practices in our code we: + +- Monitor dependency vulnerabilities via Github’s + [dependabot](https://github.com/features/security) +- Perform static code analysis on each commit with + [Sobelow](https://sobelow.io/) +- Ensure code is clean and standardised through preflight checks +- Monitor code coverage of unit tests and integration tests with Codecov + +### Roles and permissions + +Lightning provides identity and access management for users via various roles +and permissions which determine what level of access they have for resources +across projects and instances (i.e., deployments). + +Lightning has 2 types of access levels: + +1. Instance-wide access levels are managed via an attribute on the `user` + object: + +- **Superusers** are the administrator of the Lightning instance. They can + manage projects and users, configure authentication providers and view the + audit trail. +- **Users** are normal Lightning users. They can manage their own account and + credentials, and have access to projects they are added to. + +2. Project-wide access levels + +- A project **viewer** can view the resources of a project in read-only mode and + configure their own project digest and failure alerts. +- A project **editor** can view, create and edit the jobs and workflows of a + project they have access to, as well as run and rerun jobs. +- A project **admin** has administration access to project members. They can + edit the name and description as well as delete a project. +- A project **owner** can delete a project. + +### Application security + +Lightning is designed to: + +- Scrub credential data from run logs +- Encrypt credentials at REST +- Track credential changes through an audit trail +- Encrypt passwords +- Enforce access controls with deny by default +- Allow users to differentiate between staging and production credentials +- Enable secure credential transfer across users +- Purge credentials and user data on account deletion +- Allow administrators to configure SSO through an identity provider + +### Data residency + +OpenFn Lightning is fully open source and can be deployed in any country. We +offer high-availability managed deployments that are localized to any GCP or AWS +location—guaranteeing that no data ever leaves the selected country. + +### Implementation guidance and recommendations + +To help our users adopt best practices when it comes to the design of their +integrations, we’ve published a +[Security Guidebook for data integration implementations](/documentation/getting-started/security). + +## Get involved + +We are building out in the open, follow our progress on +[Github](https://github.com/OpenFn/lightning) by clicking ‘Watch’ to track +updates and new releases. Ongoing discussions with our Open Source Steering +Committee about Lightning are documented on our +[community forum](https://community.openfn.org/c/ossc/15). Your feedback and +comments are welcome there. If you would like to become a beta user or learn +more about Lightning, book in a call with our product manager here: +https://calendly.com/amber-openfn/short-call. + +![Lightning preview](/img/lightning_preview.png) + +## Lightning FAQ + +#### I can see that Lightning was built recently, is it new? And if so, how can I trust it? + +The Lightning repository may be new, but the technology isn’t. We’ve built out +Lightning by porting the tried and tested code from our proprietary platform. In +other words, Lightning is built with code that has been used in production by +governments and NGOs since 2015 and already handles tens of millions of +transactions a year. Software becomes more robust over time - the more it’s +used, the more edge cases are uncovered and bugs fixed. Over the past 7 years, +every time a bug has come up in our platform, we’ve fixed it and added a test +for it. By bringing over the same tests from platform to Lightning, we’re +essentially guaranteeing the same level of robustness by taking into account +every single edge case or bug that we have ever encountered. + +#### If Lightning was built by open-sourcing code from the OpenFn platform, how is it different? + +Under the hood, Lightning is the same as the OpenFn platform. Integrations are +made up of the same building blocks of triggers, adaptors and job expressions; +requests are executed, retried and reprocessed in exactly the same way. + +What changes in Lightning is how users _build and monitor_ their integrations. + +#### Can I run anything from the OpenFn platform in Lightning? + +Yes, integrations built out on the OpenFn platform are fully compatible with +Lightning. + +#### Who is Lightning for? + +Lightning is for anyone in the government or NGO space that needs to integrate +different systems. + +#### What will I lose by switching from platform to Lightning? + +Right now: version control, authentication rules on webhooks, and the other +features in our roadmap (we’re still in beta). + +Later: nothing - if a feature has proven important to our platform users, it +will be available in Lightning. If there is any feature you require in Lightning +to be able to switch over to it, speak up ! You can reach out to our product +manager Amber via [email](mailto:amber@openfn.org) or even better book some time +with her through her [calendar](https://koalendar.com/e/amber-rignell-openfn). + +#### When will Lightning Beta be ready? + +Lightning is currently in private Beta. You can register for an account on +[app.openfn.org](https://app.openfn.org/). diff --git a/versioned_docs/version-legacy/about.md b/versioned_docs/version-legacy/about.md new file mode 100644 index 00000000000..cff27eec12a --- /dev/null +++ b/versioned_docs/version-legacy/about.md @@ -0,0 +1,17 @@ +--- +title: About +--- + +## Open Function Group + +Open Function Group is a team of ICT4D specialists that have been working +exclusively in data integration, automation and interoperability since 2014. + +We maintain OpenFn.org, the sector's leading integration platform as a service, +and a huge number of open-source workflow automation, data integration, and "ETL" tools which you can find on our [Github](https://www.github.com/openfn). + +The platform is trusted by some of the leading development organizations in the +world, including UNICEF, the WHO, the IRC, and Population Council. + +You can learn more about the people at Open Function Group +[here](https://www.openfn.org/leadership). diff --git a/versioned_docs/version-legacy/build/credentials.md b/versioned_docs/version-legacy/build/credentials.md new file mode 100644 index 00000000000..df94f9825c7 --- /dev/null +++ b/versioned_docs/version-legacy/build/credentials.md @@ -0,0 +1,34 @@ +--- +title: Credentials +--- + +## Credentials + +Credentials are used to authorize connections to destination systems. In the +future, our adaptors will use credentials to fetch meta-data from source and +destination applications and make the job writing process easier. + +Some systems (Salesforce, OpenMRS, DHIS2) require an instanceUrl, host, or +ApiUrl. Leave off the final "/" in these Urls: `https://login.salesforce.com` or +`http://demo.openmrs.org/openmrs` or `https://play.dhis2.org`. + +Credentials can only be viewed, or edited by a single user — their "owner" (or +the person that created that credential). All the collaborators on a particular +project can choose those credentials for use when defining a job. + +There are two special types of credentials, in addition to the myriad standard +application-specific and authentication protocol-specific credentials. + +### Raw Credentials + +Raw credentials are valid JSON documents which are passed into a job's runtime +state. Note that owners of these credentials will be able to view them, in their +entirety, in the clear. + +### Keychain Credentials + +Keychain credentials allow for a single job to make use of multiple credentials. +They work by inspecting the data in the job's runtime state (i.e., `state.data`) +and checking for the value of a predetermined identifier. Based on that value, +present in the data for a given source message, for example, _another_ +credential will be selected and applied for that particular job run. diff --git a/versioned_docs/version-legacy/build/example-build.md b/versioned_docs/version-legacy/build/example-build.md new file mode 100644 index 00000000000..b182ca7486e --- /dev/null +++ b/versioned_docs/version-legacy/build/example-build.md @@ -0,0 +1,224 @@ +--- +title: Platform Quick-Start (v1) +--- + +Learn how to set up a simple data integration using the OpenFn platform. If you +get stuck along the way, post a question to our +[community forum](https://community.openfn.org/) so we can give you a hand. + +In this walkthrough, we’ll connect a **KoboToolbox** form to **Google Sheets**. +If you don’t have a KoboToolbox account, we'll provide you with a demo account +you can use for the tutorial or you can create one for free. + +We’ll be completing the following steps: + +1. Identify your source and destination system +2. Create a project and send data from your source system to your OpenFn inbox +3. Create credentials to connect your destination system +4. Create a your job + +## 1. Identify your source and destination system + +The best way to figure out what an integration flow should look like is to +phrase it in the following way: When A happens **[in system 1]**, I want B to +happen **[in system 2]**. + +_When a ‘Case registration’ form is submitted [in KoboToolbox], I want the +response to be inserted into my ‘Kobo case registrations’ sheet [in Google +Sheets]._ + +This tells us that system 1 (KoboToolbox) is our source application, and system +2 (Google sheets) is our destination system. + +## 2. Create a project and send data from your source system to your OpenFn inbox + +First, create an OpenFn [account](https://www.openfn.org/signup) or +[login](https://www.openfn.org/login). Navigate to your **Project dashboard** - +you'll see that a sample project has been created for you. + +Create a new project called ‘Kobo case registrations’ by clicking on the blue + +icon at the bottom right hand corner of your dashboard. + +![new account dashboard](/img/2.1_new_account_dashboard.png 'Create a new project') + +When you click 'View' to enter your project space, you'll be taken to your +**inbox**. This is where you will receive **messages** - the data that gets sent +from your source system to OpenFn. Copy your **inbox url** to configure +KoboToolbox to send data to it. + +![inbox url](/img/2.2_inbox_url.png 'Copy your inbox URL') + +[Log into](https://kf.kobotoolbox.org/accounts/login/#/) our KoboToolbox demo +account with _username: openfn_demo and password: openfn_demo_. Select the form +you’d like to connect (if using our demo account this will be 'COVID 19 case +registration') and go to Settings -> REST services -> Register a new service. + +![kobo](/img/2.3_kobo_rest.png 'Register a REST service with Kobo') + +Set the service name to OpenFn and the URL to your project inbox url. + +![kobo](/img/2.4_kobo_rest.png 'Set the REST service URL to your OpenFn inbox URL') + +Your form should now be configured to send data to your OpenFn project inbox +whenever a response is submitted. We can test this out by submitting some form +responses at Form -> Open. + +![kobo form](/img/2.5_open_kobo_form.png 'Open a kobo form') + +Return to your project inbox. You should see a new message there, which contains +the data submitted in the KoboToolbox form response. + +![inbox](/img/2.6_inbox.png 'View inbound messages in your inbox') + +If you click on the message, and open up the **message body** you’ll see the +data that you submitted to the form. To view the entire message, open it in full +screen. + +![message body](/img/2.7_message.png 'Open up a message body') + +Once you can see the entire message, you need to identify a data point that will +be the same for every submission. In this case, we know that all of our messages +will have the same form ID. Save the snippet you have identified +(`"\_xform_id_string": "aDReHdA7UuNBYsiCXQBr43"`), you'll need it later to +create your trigger. + +![common data point](/img/2.8_common_data_point.png 'Identify a common snippet for all your messages') + +## 3. Create credentials to connect your destination system + +In order to connect to your destination system, you need to sign in through +OpenFn to create credentials. These will allow you to send data to your google +sheet. + +Head to the credentials section of your dashboard, and once again click the +blue + sign to create new credentials. + +![create credentials](/img/3.1_create_credentials.png 'Create credentials to connect your external system') + +You’ll see various apps you recognise - these are all of the systems that we can +handle credentials for. Select the `Sheets` one, and log into your google +account when you get the pop up window. You’ll get a confirmation message. Close +the window and give your new project access to these credentials. + +![select credential type](/img/3.2_select_credential_type.png 'Select a credential type') + +You’ve now created credentials that will allow you to perform operations in +google sheets from within your job. + +## 4. Create a new job + +A job is a series of operations that formats and transfers data at a given time. +It needs a trigger, which determines when these operations should happen, and an +expression, which determines what should be done with the incoming data and +where it should go. + +Navigate to the jobs section in your dashboard, then click the + icon to create +a new job. + +![new job](/img/4.1_new_job.png 'Create a new job') + +Give the job a name (we’ll make ours “Kobo to sheets”). + +### 4.1 Create a new trigger + +Every job needs a trigger, which determines when it should be run. A **message +filter** is a type of trigger which allows you to trigger a job when a specific +message comes into your inbox. + +In this example, you want your job to be triggered by any message that has come +from the COVID 19 registration KoboToolbox form. Therefore the inclusion +criteria is the id string of the form which we saved earlier on: +`{"\_xform_id_string": "aDReHdA7UuNBYsiCXQBr43"}`. _(Don’t forget to add curly +brackets "{}" around your inclusion criteria snippet.)_ This is found in the +message body sent by each submitted form response to your inbox. + +This message filter will trigger your job whenever a message which includes the +snippet comes into your inbox. + +![new trigger](/img/4.2_new_trigger.png 'Create a new trigger') + +Save your trigger. You should see a confirmation message “Found x matching +messages”. To see the data from your last message inside the +[initial state](/documentation/jobs/state/#initial-state), drag the +**Expression** panel to the right. + +![trigger message](/img/4.3_trigger_message.png 'View a matching trigger message in initial state') + +### 4.2 Select an API adaptor + +Adaptors are preconfigured pieces of code that allow communication with +destination systems. + +In this example, you will send data collected from individual responses to your +kobo form (append values) to google sheets. + +Your API adaptor is therefore google sheets. + +### 4.3 Choose your adaptor operation + +Every adaptor allows you to perform different operations in your destination +system. These operations are functions specific to every API adaptor. + +Open up the inline documentation for the adaptor to see the available functions. +Copy the appendValues function, then paste it into your Expression editor. It +should look something like this. + +![adaptor operation](/img/4.4_adaptor_operation.png 'Choose an adaptor operation') + +### 4.4 Edit the function in your expression editor + +The function you copy pasted into your expression editor is a template that +shows you what your function should look like. This means the text in quotation +marks are just placeholders - they need to be replaced with the data entries you +want to send. + +First, get your spreadsheet ID from the URL of your google sheet (between `d/` +and `/edit`). + +![sheets ID](/img/4.5_sheets_id.png 'Find a google sheets ID') + +Copy and paste the ID into your `appendValues` operation to replace the +placeholder value for `spreadsheetId`. This ensures your values get appended to +the correct spreadsheet. + +Next, open up the initial state to select each form value you want to send. +Let’s start with the ‘National ID’, as this is the first column in your google +sheet. Select the desired input from the dropdown menu located in the initial +state window and paste it to replace the placeholder text ('From expression') +inside `values: []`. Repeat this for the following values, and remove line 7 as +this would add a second row to your sheet. + +![select values](/img/4.6_select_values.png 'Select values from initial state') + +Your operation should now look like this: + +```js +appendValues({ + spreadsheetId: '1zFcE05jGLYouXDpevdYQO81ejBWz7hn0ahEOg2gs9fw', + range: 'Sheet1!A1:E1', + values: [ + [ + dataValue('National_ID'), + dataValue('First_Name_of_Patient'), + dataValue('Last_Name_of_Patient'), + ], + ], +}); +``` + +Click `Save and run` to get a ‘Success!’ response in the `run logs` and see that +the data entries between the square brackets [ ] have been added to your google +sheet. + +![save and run](/img/4.7_save_and_run.png 'Save and run a job') + +## 5. Set autoprocess to true + +You have now written and tested your job. In order to run your job automatically +every time a message matches the trigger inclusion criteria, turn on +auto-process. + +![autoprocess](/img/5.1_autoprocess.png "Enabling 'autoprocess' for a job") + +You're all set! Try out your job by submitting another form response to see the +data automatically populate your google sheet. diff --git a/versioned_docs/version-legacy/build/inbox.md b/versioned_docs/version-legacy/build/inbox.md new file mode 100644 index 00000000000..389ae51b8d8 --- /dev/null +++ b/versioned_docs/version-legacy/build/inbox.md @@ -0,0 +1,82 @@ +--- +title: The Inbox +sidebar_label: Your Inbox +--- + +## How it works + +On the platform, each project has their own unique inbox URL, something like +`https://www.openfn.org/inbox/54804f1a-4a70-4392-97cb-1f350e98e9c8`. That big +string of numbers and letters is called a `UUID`. It's your address, and the +"place" on the web that you'll send data for processing by OpenFn if you're +doing real-time or "event-based" integration. + +Your project will always be listening, and whenever an HTTP request is received +at that URL, we'll respond with a `202/Accepted` and start processing the data +sent either in the `body` or the `parameters` of that request. + +## `202/Accepted vs 201/Created` + +You've probably heard of `200/OK` or other common "status codes", but the +difference between a `201` and a `202` is very interesting from an integration +perspective. + +The `201/Created` means that we've completed processing whatever data was sent +to us by the requester. Usually, this response is accompanied by a payload with +a new `id` for whatever resource what created. This is _not_ what OpenFn does, +instead we send a `202/Accepted` indicating that your request was acceptable and +we'll get to work. + +:::tip + +OpenFn sends a `202/Accepted` indicating that your request has passed our +initial validation (i.e. the data is valid `JSON` or parseable `XML` and the +inbox URL exists) and that we've enqueued it for processing. + +::: + +Behind the scenes, we've now a system of simple, durable queues that ensure we +don't "drop" this event at any point in time from here on forward. + +1. We'll load it into the database and soon it will appear as a new "message" + record in your "Inbox" page. +2. We'll check the triggers for all the active jobs in your project and if it + matches one of those triggers we'll send it to another queue for job running. +3. We'll make sure your project is configured properly and that you haven't + exceeded your usage limits. +4. We'll start executing a job run, which may itself may hundreds of unique HTTP + requests to other endpoints. +5. And finally we'll report back on the status of that run and soon it will + appear as a new "run" in your "Activity History" page. + +Depending on how many requests your job makes, how much data is being processed, +and the response time of your other systems, all of this could take quite some +time—anywhere from `200ms` to `20 minutes`! + +If the system that sends the data to OpenFn needs to know whether all the +operations in step 4 completed successfully (what do you count as a success with +these various custom actions, by the way?) you should consider implementing a +SAGA pattern, whereby after all this processing is complete you trigger another +request back to the initial system reporting on the downstream tasks. This can +be done in OpenFn with [Flow Triggers](/documentation/jobs/multiple-operations). + +## Synchronous vs. Asynchronous Processing + +On **OpenFn/platform**, processing is asynchronous by default. Multiple complex workflows may be initiated, error handling and notifications all happen downstream. +1. If you send data to OpenFn Inbox, you'll receive a `202` if successful (and `502` if we didn't receive your data/bad request). +2. We'll then load it into the database and soon it will appear as a new "message" + record in your "Inbox" page. +3. We'll check the triggers for all the active jobs in your project and if it + matches one of those triggers we'll send it to another queue for job running. +4. We'll make sure your project is configured properly and that you haven't + exceeded your usage limits. +5. We'll start executing a job run, which may itself may hundreds of unique HTTP + requests to other endpoints. +6. _If you want to then send an update back to the source system... you may configure another job to send requests and updates back to the triggering source system._ + +In **OpenFn/microservice** or using open-source tools, you could create a synchronous system. We've created a way to set up inbox endpoints as +"synchronous", meaning they'll actually hold a connection open _until_ all of +the processing above is completed, and then respond with a `2XX`, `4xx`, or +`5XX`. This is not recommended for high volume systems, but may be a requirement +for some implementations; the sprit of **OpenFn/microservice** is to give as much +control as possible to whoever is deploying it on their servers. diff --git a/versioned_docs/version-legacy/build/jobs.md b/versioned_docs/version-legacy/build/jobs.md new file mode 100644 index 00000000000..6ae92379689 --- /dev/null +++ b/versioned_docs/version-legacy/build/jobs.md @@ -0,0 +1,802 @@ +--- +title: Introduction to Jobs +--- + +A job defines the specific series of "operations" (think: tasks or database +actions) to be performed when a triggering message is received (even-based), +another run finishes (flow- or catch-based) or a pre-scheduled (and recurring) +time is reached. + +## The properties of a job + +- `Name` - a human-readable name describing the series of operations +- `Project` - the project the job belongs to +- `Trigger` - the trigger that is used to automatically initiate a run of the + job +- `Adaptor` - the adaptor that is used to provide tool-specific functionality + for this job (e.g., `language-dhis2` or `language-commcare`) +- `Auto-process?` - a true/false switch which controls whether the trigger + should be used to automatically run this job when its criteria are met +- `Expression` - the job "script" itself; a sequence of operations + +## Adaptors + +We've got a whole section on creating new +[Adaptors](/adaptors), but the critical thing to be aware of +when writing a job is that you've got to choose an **adaptor**, and an **adaptor +version**. + +All of the discussion below of helper functions like `create` or `findPatient` +requires some understanding of adaptors. When you run a job, you're borrowing a +layer of functionality that's been built to connect with some specific API, type +of API, or database. + +For example, `create` means one thing in `language-salesforce` and another thing +entirely in `language-dhis2`. For this reason, before you can begin writing a +job you have to decide which `adaptor` to work with. + +### Adaptor Versions + +Adaptors change over time. They're open source, and we encourage as much +contribution as possible—releasing new versions for use on OpenFn.org as soon as +they pass our security reviews. New features may be added and bugs may be fixed, +but in order to make sure that an existing integration is not broken, we +recommend that you select a specific version (rather than using the +"auto-upgrade" feature) when you choose an adaptor. The highest released version +is the default choice here. + +:::tip + +The _first 4 lines_ in the log of any run on OpenFn will tell you what adaptor +you're running. (As well as the version of core and NodeJs) This is incredibly +important, particularly if you're trying to troubleshoot jobs in various +environments (like your own shell, OpenFn.org, OpenFn/microservice, etc.). + +::: + +Pay careful attention to which `version` you're using to write a job. Consider +the following run logs: + +```sh +╭───────────────────────────────────────────────╮ +│ ◲ ◱ @openfn/core#v1.3.12 (Node.js v12.20.1) │ +│ ◳ ◰ @openfn/language-http#v2.4.15 │ +╰───────────────────────────────────────────────╯ +...more logs here... + +Finished. +``` + +Note that here, OpenFn/core version `1.3.12` is running on Node.js `12.20.1` and +using `@openfn/language-http#v2.4.15` which might have very different helper +functions from `@openfn/language-http#v3.1.5` + +:::info + +See [the npm section](/adaptors#install-on-platform-via-npm) +on the adaptors docs page to learn how to install an adaptor from `npm` while +using `platform`. + +::: + +### Upgrading to newer adaptor versions + +While it may be beneficial to upgrade as part of your routine maintenance, these +upgrades should be carefully tested. Most often, customers upgrade to a new +adaptor version for an existing job when they are making business-drives changes +to that job. Some business-driven changes may actually _require_ upgrading the +version in order to use a new feature from the adaptor. Even if those changes +don't require and upgrade, if the technical team must spend time testing +job-specific changes anyway, it may be an ideal opportunity to test also test an +upgrade. + +Adaptors follow [SEMVER](https://semver.org/) so you can be reasonably assured +that upgrading from `x.1.z` to `x.2.z` will not lead to existing job code +failing, but an upgrade from `3.y.z` to `4.y.z` may—in SEMVER _major_ upgrades +(those that change the first number in the `x.y.z` version number) have +"breaking" or "non-backwards compatible" changes. + +## Composing job expressions + +In most cases, a job expression is a series of `create` or `upsert` actions that +are run after a message arrives, using data from that message. It could look +like this: + +### A basic expression + +```js +create( + 'Patient__c', + fields( + field('Name', dataValue('form.surname')), + field('Other Names', dataValue('form.firstName')), + field('Age__c', dataValue('form.ageInYears')), + field('Is_Enrolled__c', true), + field('Enrollment_Status__c', 3) + ) +); +``` + +That would create a new `Patient__c` in some other system. The patient's `Name` +will be determined by the triggering message (the value inside `form.surname`, +specifically) and the patient's `Is_Enrolled__c` will _always_ be `true`. See +how we hard coded it? + +What you see above is OpenFn's own syntax, and you've got access to dozens of +common "helper functions" like `dataValue(path)` and destination specific +functions like `create(object,attributes)`. While most cases are covered +out-of-the-box, jobs are **evaluated as Javascript**. This means that you can +write your own custom, anonymous functions to do whatever your heart desires: + +### dataValue + +The most commonly used "helper function" is `dataValue(...)`. This function +takes a single argument—the _path_ to some data that you're trying to access +inside the message that has triggered a particular run. In the above example, +you'll notice that `Is_Enrolled__c` is _always_ set to `true`, but `Name` will +change for each message that triggers the running of this job. It's set to +`dataValue('form.surname')` which means it will set `Name` to whatever value is +present at `state.data.form.surname` for the triggering message. It might be Bob +for one message, and Alice for another. + +:::note + +Note that for message-triggered jobs, `state` will always have it's `data` key +(i.e., `state.data`) set to the body of the triggering message (aka HTTP +request). + +I.e., `dataValue('some.path') === state.data.some.path`, as evaluated at the +time that the operation (`create` in the above expression) is executed. + +::: + +### An expression with custom Javascript + +```js +create( + 'Patient__c', + fields( + field('Name', state => { + console.log('Manipulate state to get your desired output.'); + return Array.apply(null, state.data.form.names).join(', '); + }), + field('Age__c', 7) + ) +); +``` + +Here, the patient's name will be a comma separated concatenation of all the +values in the `patient_names` array from our source message. + +## Available Javascript Globals + +For security reasons, users start with access to the following standard +Javascript globals, and can request more by opening an issue on Github: + +- [`Array`](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Array) +- [`console`](https://nodejs.org/api/console.html) +- [`JSON`](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/JSON) +- [`Number`](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Number) +- [`Promise`](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Promise) +- [`String`](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/String) + +## Examples of adaptor-specific functions + +**N.B.: This is just a sample.** There are lots more available in the +language-packs. + +### language-common + +- `field('destination_field_name__c', 'value')` Returns a key, value pair in an + array. + [(source)](https://github.com/OpenFn/language-common/blob/master/src/index.js#L248) +- `fields(list_of_fields)` zips key value pairs into an object. + [(source)](https://github.com/OpenFn/language-common/blob/master/src/index.js#L258) +- `dataValue('JSON_path')` Picks out a single value from source data. + [(source)](https://github.com/OpenFn/language-common/blob/master/src/index.js#L71) +- `each(JSON_path, operation(...))` Scopes an array of data based on a JSONPath + [(source)](https://github.com/OpenFn/language-common/blob/master/src/index.js#L194). + See beta.each when using multiple each()'s in an expression. +- `each(merge(dataPath("CHILD_ARRAY[*]"),fields(field("metaId", dataValue("*meta-instance-id*")),field("parentId", lastReferenceValue("id")))), create(...))` + merges data into an array then creates for each item in the array + [(source)](https://github.com/OpenFn/language-common/blob/master/src/index.js#L272) +- `lastReferenceValue('id')` gets the sfID of the last item created + [(source)](https://github.com/OpenFn/language-common/blob/master/src/index.js#L96-L100) +- `function(state){return state.references[state.references.length-N].id})` gets + the sfID of the nth item created + +#### each() + +Read more about each here: [The each(...) operation](/documentation/jobs/each) + +```js +each( + dataPath('csvData[*]'), + upsertTEI( + 'aX5hD4qUpRW', //piirs uid + { + trackedEntityType: 'bsDL4dvl2ni', + orgUnit: dataValue('OrgUnit'), + attributes: [ + { + attribute: 'aX5hD4qUpRW', + value: dataValue('aX5hD4qUpRW'), + }, + { + attribute: 'MxQPuS9G7hh', + value: dataValue('MxQPuS9G7hh'), + }, + ], + }, + { strict: false } + ) +); +``` + +#### beta.each + +```js +beta.each(JSON_path, operation(...)) +``` + +Scopes an array of data based on a JSONPath but then returns to the state it was +given upon completion +[(source)](https://github.com/OpenFn/language-common/blob/master/src/beta.js#L44). +This is necessary if you string multiple `each(...)` functions together in-line +in the same expression. (E.g., Given data which has multiple separate 'repeat +groups' in a form which are rendered as arrays, you want to create new records +for each item inside the first repeat group, then _RETURN TO THE TOP LEVEL_ of +the data, and then create new records for each item in the second repeat group. +Using `beta.each(...)` lets you enter the first array, create your records, then +return to the top level and be able to enter the second array. + +### Salesforce + +- `create("DEST_OBJECT_NAME__C", fields(...))` Create a new object. Takes 2 + parameters: An object and attributes. + [(source)](https://github.com/OpenFn/language-salesforce/blob/master/src/Adaptor.js#L42-L63) +- `upsert("DEST_OBJECT_NAME__C", "DEST_OBJECT_EXTERNAL_ID__C", fields(...))` + Creates or updates an object. Takes 3 paraneters: An object, an ID field and + attributes. + [(source)](https://github.com/OpenFn/language-salesforce/blob/master/src/Adaptor.js#L65-L80) +- `relationship("DEST_RELATIONSHIP_NAME__r", "EXTERNAL_ID_ON_RELATED_OBJECT__C", "SOURCE_DATA_OR_VALUE")` + Adds a lookup or 'dome insert' to a record. + [(source)](https://github.com/OpenFn/language-salesforce/blob/master/src/sourceHelpers.js#L21-L40) + +### dhis2 + +- `event(...)` Creates an event. + [(source)](https://github.com/OpenFn/language-dhis2/blob/master/src/Adaptor.js#L31-L60) +- `dataValueSet(...)` Send data values using the dataValueSets resource + [(source)](https://github.com/OpenFn/language-dhis2/blob/master/src/Adaptor.js#L62-L82) + +### OpenMRS + +- `person(...)` Takes a payload of data to create a person + [(source)](https://github.com/OpenFn/language-openmrs/blob/master/src/Adaptor.js#L31-L60) +- `patient(...)` Takes a payload of data to create a patient + [(source)](https://github.com/OpenFn/language-openmrs/blob/master/src/Adaptor.js#L62-L90) + +## Snippets and samples + +Below you can find some examples of block code for different functions and data +handling contexts. + +### Job expression (for CommCare to SF) + +The following job expression will take a matching receipt and use data from that +receipt to upsert a `Patient__c` record in Salesforce and create multiple new +`Patient_Visit__c` (child to Patient) records. + +```js +upsert( + 'Patient__c', + 'Patient_Id__c', + fields( + field('Patient_Id__c', dataValue('form.patient_ID')), + relationship('Nurse__r', 'Nurse_ID_code__c', dataValue('form.staff_id')), + field('Phone_Number__c', dataValue('form.mobile_phone')) + ) +), + each( + join('$.data.form.visits[*]', '$.references[0].id', 'Id'), + create( + 'Visit__c', + fields( + field('Patient__c', dataValue('Id')), + field('Date__c', dataValue('date')), + field('Reason__c', dataValue('why_did_they_see_doctor')) + ) + ) + ); +``` + +### Accessing the "data array" in Open Data Kit submissions + +Notice how we use "each" to get data from each item inside the "data array" in +ODK. + +```js +each( + '$.data.data[*]', + create( + 'ODK_Submission__c', + fields( + field('Site_School_ID_Number__c', dataValue('school')), + field('Date_Completed__c', dataValue('date')), + field('comments__c', dataValue('comments')), + field('ODK_Key__c', dataValue('*meta-instance-id*')) + ) + ) +); +``` + +### ODK to Salesforce: create parent record with many children from parent data + +Here, the user brings `time_end` and `parentId` onto the line items from the +parent object. + +```js +each( + dataPath('data[*]'), + combine( + create( + 'transaction__c', + fields( + field('Transaction_Date__c', dataValue('today')), + relationship( + 'Person_Responsible__r', + 'Staff_ID_Code__c', + dataValue('person_code') + ), + field('metainstanceid__c', dataValue('*meta-instance-id*')) + ) + ), + each( + merge( + dataPath('line_items[*]'), + fields( + field('end', dataValue('time_end')), + field('parentId', lastReferenceValue('id')) + ) + ), + create( + 'line_item__c', + fields( + field('transaction__c', dataValue('parentId')), + field('Barcode__c', dataValue('product_barcode')), + field('ODK_Form_Completed__c', dataValue('end')) + ) + ) + ) + ) +); +``` + +> **NB - there was a known bug with the `combine` function which has been +> resolved. `combine` can be used to combine two operations into one and is +> commonly used to run multiple `create`'s inside an `each(path, operation)`. +> The source code for combine can be found here: +> [language-common: combine](https://github.com/OpenFn/language-common/blob/master/src/index.js#L204-L222)** + +### Create many child records WITHOUT a repeat group in ODK + +```js +beta.each( + '$.data.data[*]', + upsert( + 'Outlet__c', + 'Outlet_Code__c', + fields( + field('Outlet_Code__c', dataValue('outlet_code')), + field('Location__Latitude__s', dataValue('gps:Latitude')), + field('Location__Longitude__s', dataValue('gps:Longitude')) + ) + ) +), + beta.each( + '$.data.data[*]', + upsert( + 'Outlet_Call__c', + 'Invoice_Number__c', + fields( + field('Invoice_Number__c', dataValue('invoice_number')), + relationship('Outlet__r', 'Outlet_Code__c', dataValue('outlet_code')), + relationship('RecordType', 'name', 'No Call Card'), + field('Trip__c', 'a0FN0000008jPue'), + relationship( + 'Sales_Person__r', + 'Sales_Rep_Code__c', + dataValue('sales_rep_code') + ), + field('Date__c', dataValue('date')), + field('Comments__c', dataValue('comments')) + ) + ) + ); +``` + +### Salesforce: perform an update + +```js +update("Patient__c", fields( + field("Id", dataValue("pathToSalesforceId")), + field("Name__c", dataValue("patient.first_name")), + field(...) +)); +``` + +### Salesforce: Set record type using 'relationship(...)' + +```js +create( + 'custom_obj__c', + fields( + relationship( + 'RecordType', + 'name', + dataValue('submission_type'), + field('name', dataValue('Name')) + ) + ) +); +``` + +### Salesforce: Set record type using record Type ID + +```js +each( + '$.data.data[*]', + create( + 'fancy_object__c', + fields( + field('RecordTypeId', '012110000008s19'), + field('site_size', dataValue('size')) + ) + ) +); +``` + +### Telerivet: Send SMS based on Salesforce workflow alert + +```js +send( + fields( + field( + 'to_number', + dataValue( + 'Envelope.Body.notifications.Notification.sObject.phone_number__c' + ) + ), + field('message_type', 'sms'), + field('route_id', ''), + field('content', function (state) { + return 'Hey there. Your name is '.concat( + dataValue('Envelope.Body.notifications.Notification.sObject.name__c')( + state + ), + '.' + ); + }) + ) +); +``` + +### HTTP: fetch but don't fail! + +```js +// ============= +// We use "fetchWithErrors(...)" so that when the +// SMS gateway returns an error the run does not "fail". +// It "succeeds" and then delivers that error message +// back to Salesforce with the "Update SMS Status" job. +// ============= +fetchWithErrors({ + getEndpoint: 'send_to_contact', + query: function (state) { + return { + msisdn: + state.data.Envelope.Body.notifications.Notification.sObject + .SMS__Phone_Number__c, + message: + state.data.Envelope.Body.notifications.Notification.sObject + .SMS__Message__c, + api_key: 'some-secret-key', + }; + }, + externalId: state.data.Envelope.Body.notifications.Notification.sObject.Id, + postUrl: 'https://www.openfn.org/inbox/another-secret-key', +}); +``` + +### Sample DHIS2 events API job: + +```js +event( + fields( + field('program', 'eBAyeGv0exc'), + field('orgUnit', 'DiszpKrYNg8'), + field('eventDate', dataValue('properties.date')), + field('status', 'COMPLETED'), + field('storedBy', 'admin'), + field('coordinate', { + latitude: '59.8', + longitude: '10.9', + }), + field('dataValues', function (state) { + return [ + { + dataElement: 'qrur9Dvnyt5', + value: dataValue('properties.prop_a')(state), + }, + { + dataElement: 'oZg33kd9taw', + value: dataValue('properties.prop_b')(state), + }, + { + dataElement: 'msodh3rEMJa', + value: dataValue('properties.prop_c')(state), + }, + ]; + }) + ) +); +``` + +### Sample DHIS2 data value sets API job: + +```js +dataValueSet( + fields( + field('dataSet', 'pBOMPrpg1QX'), + field('orgUnit', 'DiszpKrYNg8'), + field('period', '201401'), + field('completeData', dataValue('date')), + field('dataValues', function (state) { + return [ + { dataElement: 'f7n9E0hX8qk', value: dataValue('prop_a')(state) }, + { dataElement: 'Ix2HsbDMLea', value: dataValue('prop_b')(state) }, + { dataElement: 'eY5ehpbEsB7', value: dataValue('prop_c')(state) }, + ]; + }) + ) +); +``` + +### sample openMRS expression, creates a person and then a patient + +```js +person( + fields( + field('gender', 'F'), + field('names', function (state) { + return [ + { + givenName: dataValue('form.first_name')(state), + familyName: dataValue('form.last_name')(state), + }, + ]; + }) + ) +), + patient( + fields( + field('person', lastReferenceValue('uuid')), + field('identifiers', function (state) { + return [ + { + identifier: '1234', + identifierType: '8d79403a-c2cc-11de-8d13-0010c6dffd0f', + location: '8d6c993e-c2cc-11de-8d13-0010c6dffd0f', + preferred: true, + }, + ]; + }) + ) + ); +``` + +### merge many values into a child path + +```js +each( + merge( + dataPath("CHILD_ARRAY[*]"), + fields( + field("metaId", dataValue("*meta-instance-id*")), + field("parentId", lastReferenceValue("id")) + ) + ), + create(...) +) +``` + +### arrayToString + +```js +arrayToString(arr, separator_string); +``` + +### access an image URL from an ODK submission + +```js +// In ODK the image URL is inside an image object... +field("Photo_URL_text__c", dataValue("image.url")), +``` + +### alterState (alter state) to make sure data is in an array + +```js +// Here, we make sure CommCare gives us an array to use in each(merge(...), ...) +fn(state => { + const idCards = state.data.form.ID_cards_given_to_vendor; + if (!Array.isArray(idCards)) { + state.data.form.ID_cards_given_to_vendor = [idCards]; + } + return state; +}); + +// Now state has been changed, and we carry on... +each( + merge( + dataPath('form.ID_cards_given_to_vendor[*]'), + fields( + field('Vendor_Id', dataValue('form.ID_vendor')), + field('form_finished_time', dataValue('form.meta.timeEnd')) + ) + ), + upsert( + 'Small_Packet__c', + 'sp_id__c', + fields( + field('sp_id__c', dataValue('ID_cards_given_to_vendor')), + relationship('Vendor__r', 'Badge_Code__c', dataValue('Vendor_Id')), + field( + 'Small_Packet_Distribution_Date__c', + dataValue('form_finished_time') + ) + ) + ) +); +``` + +### Login in to a server with a custom SSL Certificate + +This snippet describes how you would connect to a secure server ignoring SSL +certificate verification. Set `strictSSL: false` in the options argument of the +`post` function in `language-http`. + +```js +post( + `${state.configuration.url}/${path}`, + { + headers: { 'content-type': 'application/json' }, + body: { + email: 'Luka', + password: 'somethingSecret', + }, + strictSSL: false, + }, + callback +); +``` + +## Anonymous Functions + +Different to [Named Functions](#examples-of-adaptor-specific-functions), +Anonymous functions are generic pieces of javascript which you can write to suit +your needs. Here are some examples of these custom functions: + +### Custom replacer + +```js +field('destination__c', state => { + console.log(something); + return dataValue('path_to_data')(state).toString().replace('cats', 'dogs'); +}); +``` + +This will replace all "cats" with "dogs" in the string that lives at +`path_to_data`. + +> **NOTE:** The JavaScript `replace()` function only replaces the first instance +> of whatever argument you specify. If you're looking for a way to replace all +> instances, we suggest you use a regex like we did in the +> [example](#custom-concatenation-of-null-values) below. + +### Custom arrayToString + +```js +field("target_specie_list__c", function(state) { + return Array.apply( + null, sourceValue("$.data.target_specie_list")(state) + ).join(', ') +}), +``` + +It will take an array, and concatenate each item into a string with a ", " +separator. + +### Custom concatenation + +```js +field('ODK_Key__c', function (state) { + return dataValue('metaId')(state).concat('(', dataValue('index')(state), ')'); +}); +``` + +This will concatenate two values. + +### Concatenation of null values + +This will concatenate many values, even if one or more are null, writing them to +a field called Main_Office_City_c. + +```js +... + field("Main_Office_City__c", function(state) { + return arrayToString([ + dataValue("Main_Office_City_a")(state) === null ? "" : dataValue("Main_Office_City_a")(state).toString().replace(/-/g, " "), + dataValue("Main_Office_City_b")(state) === null ? "" : dataValue("Main_Office_City_b")(state).toString().replace(/-/g, " "), + dataValue("Main_Office_City_c")(state) === null ? "" : dataValue("Main_Office_City_c")(state).toString().replace(/-/g, " "), + dataValue("Main_Office_City_d")(state) === null ? "" : dataValue("Main_Office_City_d")(state).toString().replace(/-/g, " "), + ].filter(Boolean), ',') + }) +``` + +> Notice how this custom function makes use of the **regex** `/-/g` to ensure +> that all instances are accounted for (g = global search). + +### Custom Nth reference ID + +If you ever want to retrieve the FIRST object you created, or the SECOND, or the +Nth, for that matter, a function like this will do the trick. + +```js +field('parent__c', function (state) { + return state.references[state.references.length - 1].id; +}); +``` + +See how instead of taking the id of the "last" thing that was created in +Salesforce, you're taking the id of the 1st thing, or 2nd thing if you replace +"length-1" with "length-2". + +### Convert date string to standard ISO date for Salesforce + +```js +field('Payment_Date__c', function (state) { + return new Date(dataValue('payment_date')(state)).toISOString(); +}); +``` + +> **NOTE**: The output of this function will always be formatted according to +> GMT time-zone. + +### Use external ID fields for relationships during a bulk load in Salesforce + +```js +array.map(item => { + return { + Patient_Name__c: item.fullName, + 'Account.Account_External_ID__c': item.account + 'Clinic__r.Unique_Clinic_Identifier__c': item.clinicId, + 'RecordType.Name': item.type, + }; +}); +``` + +### Bulk upsert with an external ID in salesforce + +```js +bulk( + 'Visit_new__c', + 'upsert', + { + extIdField: 'commcare_case_id__c', + failOnError: true, + allowNoOp: true, + }, + dataValue('patients') +); +``` diff --git a/versioned_docs/version-legacy/build/lightning-quick-start.md b/versioned_docs/version-legacy/build/lightning-quick-start.md new file mode 100644 index 00000000000..eecff033e2f --- /dev/null +++ b/versioned_docs/version-legacy/build/lightning-quick-start.md @@ -0,0 +1,341 @@ +--- +title: Lightning Quick-Start (v2) +--- + +This tutorial takes ~15 minutes to complete, and teaches you how to build +workfows with OpenFn Lightning. If you get stuck, post a question to our +[community forum](https://community.openfn.org/). + +## 1. Register + +Register for an account on +[app.openfn.org](https://app.openfn.org/users/register) and follow the link sent +to your inbox to confirm your email. + +\*If you already have an account, you can +[login](https://app.openfn.org/users/log_in). + +## 2. Understand the sample workflow + +Click on the 'sample workflow' created for you on registration. + +![lightning-workflows-page](/img/lightning-workflows-page.png) + +:::tip + +A **workflow** is a series of tasks to be carried out _automatically_ (i.e a +process that has been automated). + +::: + +The sample workflow pictured below formats and sends data from a source system +(KoboToolbox, a mobile data-collection app) to a destination system (DHIS2, a +health information management system). It automates patient registration by +taking a patient’s name and age and: + +1. checking if they are over 18 months old; +2. converting it to the same format as DHIS2; +3. uploading it to DHIS2. + +![lightning-sample-workflow](/img/lightning-sample-workflow.png) + +It is made up of 3 _jobs_. + +:::tip + +A **job** is an action to be carried out at a given point in time. It has a +trigger, an adaptor, a credential and a job expression which each define _when, +where, how_ and _what_ to do. + +::: + +Click on Job 3 to view more details about it in the setup and editor tab. + +### [SETUP TAB] + +The SETUP TAB is where you define the when, where and how of your job. + +![lightning_setup](/img/lightning_setup.png) + +**When: trigger** + +The trigger defines when an action should happen. It can be one of the +following: + +- When data is sent to OpenFn Lightning from an external system: **_webhook_** +- At a recurring point in time: **_cron_** +- When the job which comes before it in the workflow succeeds: **_on success_** +- When the job which comes before it in the workflow fails: **_on failure_** + +If you **never** want the job to run, you can disable it by unselecting the +'Enabled' checkbox. + +:::tip + +The trigger for the first job in a workflow will always be either a 'cron' or +'webhook' trigger. All the other jobs will have a trigger of 'on success' or 'on +failure'. + +::: + +**Where: adaptor** + +The adaptor is what helps you communicate with and perform actions in a +particular system. In OpenFn, you can carry out an action in the following +systems: + +- In OpenFn: OpenFn or common adaptors +- In an external system OpenFn has an adaptor for: commcare, DHIS2, google + sheets, kobotoolbox ... +- In any other external system which has an API: http adaptor + +**How: credential** + +Credentials define _how_ a Job is able to perform an action on your behalf, just +as you would need to cover logging in if you were explaining how to carry out an +action manually. + +:::tip + +If you are performing an action in an external system, you'll need to select +_the same_ credential type as your adaptor. + +::: + +### [INPUT TAB] + +The INPUT TAB is where you can see examples of data that has been sent to your +job during previous runs. + +In job 3, we'll be using the data values that are in `names` which are +`"Wycliffe"` and `"Orao"` in this example. Can you see them? + +![lightning_input_data](/img/lightning_input_data.png) + +:::tip + +The _input_ data of a job can be accessed through state. For example, if you +want the `names` values from an input, you can access it at `state.names`. + +::: + +### [EDITOR TAB] + +The EDITOR TAB is where you define _what_ the job should do and which data from +state (which contains your input) to use. + +:::tip + +When you need to use data that comes from your webhook trigger (data sent from +your external system), cron trigger, or a previous job you can find it in +`state`. Learn more [here](https://docs.openfn.org/documentation/jobs/state/). + +::: + +![lightning_editor_1](/img/lightning_editor_1.png) + +In this job, we're using the `names` data from state (which we saw in the Input +tab). + +**What: Job expression** + +The job expression defines what action to carry out and which data values to +use. + +It gets added from the adaptor documentation _below_ the editor as an example +operation, and is then configured to use specific values from the state input +data. (see image below for details) + +![lightning_editor](/img/lightning_editor.png) + +## 3. Run the sample workflow + +:::tip + +A workflow will run when the trigger from the first job (represented as the +first node on the canvas) is called. + +::: + +In the case of the Sample Workflow, this is when data is sent to the webhook +URL. There are three ways of doing this. + +Follow the instructions from _one_ of the options below to run your workflow. + +### Option 1: Manually send data to your first job trigger + +Click on the first job in your workflow, then head to the input tab. Paste the +data below into the `custom input`, then click `run`. + +```json +{ + "data": { + "age_in_months": 19, + "name": "Wycliffe Gigiwe" + } +} +``` + +![lightning_manual_run](/img/lightning_manual_run.png) + +You should now be able to +[see your request on the history page](#4-check-your-request-got-processed-correctly). + +:::tip + +When a job is run, OpenFn adds the input into state (used to get data values in +the job expression), along with the credentials which get added to +configuration. + +::: + +### Option 2: Send data through a curl request + +You can also send data to a webhook URL by making a curl request in your +terminal. + +Copy your webhook URL by clicking on the first node of your workflow, then use +it to replace `YOUR_WEBHOOK_URL` in the command below and run it in your CLI. + +```sh +curl -H 'Content-Type: application/json' \ + -d '{"age_in_months": 19, "name": "Wycliffe Gigiwe"}' \ + -X POST \ + YOUR_WEBHOOK_URL +``` + +You should get a response that looks like this, and be able to +[see your request on the history page](#4-check-your-request-got-processed-correctly). + +```json +{ + "attempt_id": "3602a2e6-cd01-4b48-bfa9-5237e7393c90", + "run_id": "fdebd5a9-3578-4bfd-945e-12e0a24e8c6a", + "work_order_id": "b1899b6f-e420-479f-a6ae-8641189764cd" +} +``` + +### Option 3: Send data from your external system + +:::tip + +You can trigger a workflow from an external system by configuring it's REST +services to send data to your trigger webhook URL. + +::: + +In the case of our Sample Workflow, we're using KoboToolbox as an external +system. + +[Log into](https://kf.kobotoolbox.org/accounts/login/#/) our KoboToolbox demo +account with _username: openfn_demo and password: openfn_demo_. Select the form +you’d like to connect ('Lightning sample workflow') and go to Settings -> REST +services -> Register a new service. + +![kobo](/img/2.3_kobo_rest.png 'Register a REST service with Kobo') + +Set the service name to OpenFn and the URL to the webhook URL (you can copy is +from the first node on your workflow). + +![kobo](/img/2.4_kobo_rest.png 'Set the REST service URL to your OpenFn inbox URL') + +Your form should now be configured to send data to the webhook trigger for your +first job whenever a response is submitted. We can test this out by submitting +some form responses at Form -> Open. + +![kobo form](/img/2.5_open_kobo_form.png 'Open a kobo form') + +Once you've made a form submission, you should be able to +[see your request on the history page](#4-check-your-request-got-processed-correctly). + +## 4. Check your request got processed correctly + +:::tip + +The history page shows you each **work order** or _request for data to be +processed_. + +::: + +Now that you have run your workflow, head to the history page to see the work +order. You'll see it has a status of 'Success' which means it got processed +correctly. + +![lightning_history](/img/lightning_history.png) + +Click on the chevron next to the status to expand it and see each job run. + +![lightning-history_expanded](/img/lightning_history_expanded.png) + +## 5. Make a run that fails, then edit the job and rerun it to make it succeed + +From your workflow page, run the job manually with a patient that is 18 months +old using the data below. + +```json +{ + "data": { + "age_in_months": 18, + "name": "Njoroge Orao" + } +} +``` + +Head to the history page and see that the work order has a status of 'Failure'. +This is because the patient is **not** older than 18 months. + +![lightning_history_failure](/img/lightning_history_failure.png) + +Let's say we made a mistake and _actually_ wanted to register any patient that +is _**both**_ 18 months old _**and**_ above. We want to edit the job logic and +reprocess the request. + +Head to the Editor tab in Job 1 to update the logic by changing the if statement +from `> 18` to `<= 18`. + +Your Job expression should now be the following: + +```js +fn(state => { + if (state.data.age_in_months >= 18) { + console.log('Eligible for program.'); + return state; + } else { + throw 'Error, patient ineligible.'; + } +}); +``` + +Make sure to click save, then head back to your history page and find the work +order you want to reprocess. You can search for "Njoroge Orao" in the search bar +to find it. + +Expand the work order, and click the 'rerun' button next to the first job run. + +![lightning_retry](/img/lightning_retry.png) + +You'll see a new **attempt** created in the same work order, which now succeeds. +The work order status also gets updated to the status of the last attempt to +show 'Success'. + +![lightning_new_attempt](/img/lightning_new_attempt.png) + +Rerun the same work order, this time from 'Job 3 - Upload to DHIS2'. You'll see +the runs for Job 1 and 2 get copied over to the new attempt, so that their +output can be used for the input of Job 3. + +![lightning_rerun_downstream_job](/img/lightning_rerun_downstream_job.png) + +:::tip Note + +When you rerun a workflow from a downstream job, the previous job runs are +copied over to the new attempt, so you can still see where the input from your +downstream job came from. + +::: + +You're all set! If you made it to the end of this tutorial, you should be +familiar with the key concepts you need to start building your own workflow. +Give it a go, and don't forget to post on our +[community forum](https://community.openfn.org/) if you get stuck - or to let us +know what you built. diff --git a/versioned_docs/version-legacy/build/triggers.md b/versioned_docs/version-legacy/build/triggers.md new file mode 100644 index 00000000000..d6d2db598c6 --- /dev/null +++ b/versioned_docs/version-legacy/build/triggers.md @@ -0,0 +1,258 @@ +--- +title: Triggers +--- + +Triggers are responsible for starting job runs automatically. They come in 4 +types. The most common are "message filter" triggers, but there are also "cron" +triggers, "flow" triggers, and "fail" triggers. + +## Trigger types + +### Message Filter Triggers + +Message Filter triggers watch for inbound messages and check to see if the data +in those messages meet their **inclusion criteria** and _don't_ meet their +**exclusion criteria**. If they pass these tests and if there are active jobs +configured to use that trigger, a run will be started for each message/job +combination. + +You, the user, specify the inclusion and exclusion criteria which determines +which inbound messages should trigger job runs. Broadly speaking, if part of a +message body **matches** the JSON you provide as the inclusion filter, and +_doesn't_ match the JSON you provided as the exclusion filter, a job will run +(assuming you created one with `autoprocess` turned on). + +The filter criteria takes the form of a string of valid JSON like this: +`{"Name":"Aleksa Iwobi"}`. In an SQL query, this string will be used in the +WHERE clause and make use of special `jsonb` operators like this: + +```sql +SELECT * FROM messages + WHERE body::jsonb @> '{"Name":"Nicholas Pépé"}'::jsonb; +``` + +If you provide a exclusion criteria like `{"type": "fake-data"}` the resulting +query will look something like this: + +```sql +SELECT * FROM messages + WHERE body::jsonb @> '{"Name":"Nicholas Pépé"}'::jsonb + AND NOT (body::jsonb @> '{"type":"fake-data"}'::jsonb); +``` + +There is a more detailed explanation of filter matching +[below](#filter-matching-in-detail). + +### Cron Triggers (formerly timers) + +Cron triggers run jobs based on a cron schedule. They can run as frequently as +once every minutes, or as infrequently as you desire and can be scheuled on very +specific dates or times. Each time a timed job succeeds, its `final_state` will +be saved and used as the `initial_state` for its next run. See "Managing state" +and "Keeping a cursor" below for implementation help. + +The best way to learn about `cron`, if you're not already familiar, is through +the OpenFn interface or +crontab.guru. + +### Flow Triggers + +Flow triggers will execute a job _after_ another specified job finishes +successfully. E.g., a flow trigger which specifies the succesful run of Job A +can be used by Job B. Each time Job A succeeds, Job B will start to run with the +`final_state` of Job A as its `initial_state`. + +### Fail Triggers + +Fail, or "catch", triggers work just like flow triggers, except that they watch +for the failure, rather than the success, of a specified job. (E.g., Job A pays +a CHW via MPESA. If Job A _fails_ we should initiate Job B, which sends an SMS +to the district manager instructing them to manually pay the CHW.) + +## Processing cron jobs + +**On-demand processing for cron jobs.** If you’re leveraging cron triggers to +run jobs at specific times, you can also run that cron triggered job on demand. +This way you don’t have to wait for the timer to expire before testing! Simply +click the process/ “play” button now available via the Job, Run, and Activity +History pages. + +![Runs list run time trigger button](/img/timetriggerunslist.png) + +![Run history time trigger button](/img/runtimetrigger1.png) + +#### Keeping a cursor in `state` for timer Jobs + +Because many timer jobs require keeping some sort of record of their previous +run to modify their later actions, `state` is passed between the runs. One +example might be keeping a "cursor" to select only new records from a database. +We'd expect the following logic: + +1. `job-1` fetches patients from the database +2. `job-1` does something important with those patient records +3. `job-1` saves the `id` of the last successfully processed patient to + `final_state` +4. when `job-1` runs again, it fetches patients whose `id` is greater than the + `id` of the last successfully processed patient. + +To achieve this you might write: + +```js +fetchPatient({ type: 'referral', offset: state.lastId }, state => { + // Assuming the system returned an array of patients in the "data" key. + state.lastId = state.data.patients.sort((a, b) => b.id - a.id)[0]; + return state; +}); +``` + +The initial offset will be `null`, but the subsequent runs will automatically +only fetch "new" patients. + +### Managing the size of `state` for Timer Jobs + +Since state is passed between each run of a timer job, if your job adds +something new to state each time it runs, it may quickly become too large to be +practically handled. Imagine if a server response were adding, via +`array.push(...)`, to `state.references` each time the job ran. OpenFn supports +up to 50,000 bytes (via Erlang's `byte_size`), though most `final_state` byte +sizes are between 100 and 1000. + +If the size of your `final_state` exceeds 10,000 bytes, OpenFn will send project +collaborators a warning email. If it exceeds 50,000 bytes, your run will still +succeed but its `final_state` will not be saved and the next time that job runs +it will inherit the previous, un-updated final state. (I.e., the last state that +was < 50,000 bytes.) + +### A quick fix for final state bloat + +Most often, final state bloat is due to improper handling of `state.references` +or `state.data`. This can be fixed by adding the following lines _either_ to the +callback of your language-package's operation (if it allows for one) or by +appending an `fn(...)` operation after your operation. + +```js +fn(state => { + state.custom = somethingIntentional; + state.data = {}; + state.references = []; + return state; +}); +``` + +## Filter Matching in Detail + +To illustrate filter matching, refer to the filters and message samples below. + +- Message "a" will match filter 1, but message "b" will not. +- Message "c" will match filter 2, but message "d" will not. + +### Filter 1, simple inclusion + +The inclusion criteria is `{ "formID": "patient_registration_v7" }` and the +exclusion criteria is left blank. + +#### Message "a" will match + +```json +{ + "submissionDate": "2016-01-15", + "formID": "patient_registration_v7", + "name": "Jack Wilshere", + "dob": "1986-05-16", + "medications": ["anaphlene", "zaradood", "morphofast"] +} +``` + +#### Message "b" will NOT match + +```json +{ + "submissionDate": "2016-01-16", + "formID": "patient_registration_v8", + "name": "Larry Bird", + "dob": "1982-03-21", + "medications": ["anaphlene", "zaradood", "morphofast"] +} +``` + +Message 'b' does not include `"formID":"patient_registration_v7"` and will not +match filter '1'. + +### Filter 2, inclusion _and_ exclusion + +The inclusion criteria is `{ "name": "john doe" }` and the exclusion criteria is +`{"allowedToShare": false}`. + +#### Message "c" will match + +```json +{ + "submissionDate": "2016-01-15", + "name": "john doe", + "dob": "1986-05-16" +} +``` + +#### Message "d" will NOT match + +```json +{ + "submissionDate": "2016-01-15", + "name": "john doe", + "dob": "1986-05-16", + "allowedToShare": false +} +``` + +## More filter samples + +### Match messages `WHERE` the `formId` is `"Robot_Photo_21.04.2015"` + +| inclusion | exclusion | +| ---------------------------------------- | --------- | +| `{ "formId": "Robot_Photo_21.04.2015" }` | | + +### Match a message with two fragments inside an array called `data` + +(This is useful when gathering data via ODK) + +| inclusion | exclusion | +| --------------------------------------------------------------------- | --------- | +| `{ "data": [{ "outlet_call": "TRUE", "new_existing": "Existing" }] }` | | + +### Match a message `WHERE` this `AND` that are both included + +| inclusion | exclusion | +| ------------------------------------------------------------ | --------- | +| `{ "formId": "Robot_Photo_21.04.2015", "secret_number": 8 }` | | + +### Match a message using exclusion + +| inclusion | exclusion | +| ---------------------------------------- | ---------------------------- | +| `{ "formId": "Robot_Photo_21.04.2015" }` | `{ "safeToProcess": false }` | + +### Match a message with a fragment inside another object called `form` + +| inclusion | exclusion | +| ------------------------------------------------------------------------------------- | --------- | +| `{"form": {"@xmlns": "http://openrosa.org/formdesigner/F732194-3278-nota-ReAL-one"}}` | | + +## An exclusion demo + +Imagine that we had a filter which included messages with `form == 'bns_survey'` +but we then want to start _excluding_ those that have +`body.survey_type == 'practice'`. Our filter trigger would look need to like +this: + +| inclusion | exclusion | +| -------------------------- | --------------------------------------- | +| `{ "form": "bns_survey" }` | `{"body": {"survey_type": "practice"}}` | + +We'd set it up from the trigger form like this: + +![img](/img/exclusion.gif) + +And verify the result on the inbox: + +![img](/img/demo-exclusion.gif) diff --git a/versioned_docs/version-legacy/build/troubleshooting.md b/versioned_docs/version-legacy/build/troubleshooting.md new file mode 100644 index 00000000000..388b4cc9503 --- /dev/null +++ b/versioned_docs/version-legacy/build/troubleshooting.md @@ -0,0 +1,132 @@ +--- +title: Troubleshooting integrations +sidebar_label: Troubleshooting +--- + +Or, a penny for your thoughts during the debugging process. + + + +So, you've noticed that something isn't quite right. Here's a list of questions, +and complications, that might help you get to the bottom of it. + +## The Implementation Perspective + +First, keep this quick checklist at hand... answering these questions _in order_ +will ensure that you're spending as little time as possible getting to the cause +of the problem, whether that's big or small. + +### 1. What do you desire? + +This one might take a lifetime to answer, but in the context of debugging you +can limit the bounds a bit. We really can't move forward until you're clear on +what you want. + +### 2. How are you asking for it? + +Show me the issue, the specifications, the "requirement"! Let's make sure it's +clearly articulated and memorialized. If so, move to Q3! + +### 3. Is what you're asking for going to produce the effect you desire? + +This is a tricky one, and may involve the engineering team. (In fact, this is +often when engineering gets called in. There's a "bug", and before we take a +look at any code we need to figure out if what's being asked for—the +specification—will actually produce the desired outcomes.) + +### 4. Does the expression implement what you're asking for? + +So we're _certain_ that the spec will produce the effect we want? OK, great... +now let's look at the job expression. Does the job expression implement the +spec? How can you prove (with logs, assertions, etc.) that it does? Don't move +on until you're certain of this, or certain that it _can't_, given the adaptor +you're using! + +:::info Time check + +N.B., a change to the job expression takes as little as a couple of minutes. + +::: + +### 5. Does the adaptor support/enable the implementation in the expression? + +OK, if you're certain the expression is doing all it can with the spec... maybe +there's a bug in the adaptor! Something in how that helper function was +implemented may not be doing what the author of the adaptor intended—and this +could be producing the "bug". + +If you're starting work on the adaptor, you should have _already_ reduced the +problem to a **_GENERAL PROBLEM_**, leaving aside all specifics of this +implementation. You're starting to change the way that this adaptor interacts +with the target API. You've got out your API docs and you're CURLing requests +directly to various endpoints, setting up tests in the adaptor, etc. + +:::info Time check + +A change to the adaptor might take an hour, maybe a few. We're on the order of a +day, if you've got big changes and include the time required to deploy new +versions. + +::: + +### 6. Does the target API support/enable the implementation in the adaptor? + +Whoa... if you've made it down here you're in now "big serious" land. Tread +lightly! I'm guessing that you've found lots of Stack Overflow threads +describing the issue you're facing. What you're getting at is that _despite_ the +API docs we used to build this adaptor, there's something different about the +way the API is actually behaving. + +Maybe there's a new API version with a breaking change? + +Maybe there's a bug in the target system? + +Either way, when you get down to this level you're spending a LOT of time and +you're engaging with the broader open source community. You should post on at +least one forum before signing off for the day. + +:::info Time check + +Writing a new adaptor for a new API version, or fixing a bug in another +developer's system via a pull request... this stuff takes weeks and months and, +worse yet, the timelines are often outside of our control. + +::: + +## The Product Perspective + +To throw a spanner into the works _(embrace the complexity!)_ when I wear my +product hat I invert the pyramid here. While a problem might be solved in 15 +minutes by writing a new line in the `expression` (see Q4), is this a +generalizable problem and could I save _future implementers_ those 15 minutes by +making a change to the adaptor (see Q5) that would provide this fix/feature "out +of the box"? + +Better yet... could I make some change to the OpenFn platform (or to Primero, or +CommCare, or DHIS2?) that would enable easier/better/adaptors and solve this +problem with clicks, not code? + +:::tip + +Remember those jobs we used to write that did nothing (simply returning state) +if a condition was met? Well, using exactly this approach we delivered an +"exclusion filter" feature to OpenFn which allows a user to skip certain inbound +messages based on criteria, rather than having to evaluate those message in the +job. + +It took much more work than writing that one `fn(...)` block at the top of a +single client's job, but now it saves _everyone_ from writing that line in the +future. + +::: + +## Find balance, in the end + +These questions are always knocking around inside my head and I try to weigh +this product perspective against the implementation perspective. In the end, +it's always about balance (no surprise there) in how we _solve_ these problems, +but by following the implementation perspective in how you approach, understand, +debug, and produce estimates will get more information out onto the table faster +and enable a better "OK, how should we solve this given the current +chronological and commercial constraints" conversation between the +Implementation Team and the Engineering Team. diff --git a/versioned_docs/version-legacy/cli.md b/versioned_docs/version-legacy/cli.md new file mode 100644 index 00000000000..270cdfef440 --- /dev/null +++ b/versioned_docs/version-legacy/cli.md @@ -0,0 +1,1182 @@ +--- +title: The CLI +sidebar_label: CLI +slug: /cli +--- + +:::info What is this tutorial? + +- It's a _hands-on_ way to learn about the new OpenFn CLI. By following the + prompts and "challenges", a developer with a bit of Javascript experience + should be able to write, run, and debug complex, multi-step jobs with OpenFn, + using nothing but a text editor and their terminal. +- The estimated time to finish this developer challenge is 1 to 2 hours + (depending on your familiarity with the underlying concepts and tooling) +- If you are stuck and need help, please post in + [community.openfn.org](https://community.openfn.org/t/about-the-job-writing-category/11/1) + +::: + +## Intro to the OpenFn CLI + +The [@openfn/cli](https://github.com/OpenFn/kit/tree/main/packages/cli) is a +command line interface for running OpenFn workflows locally. It enables +developers to run, build, and test steps in an OpenFn workflow. + +This CLI replaces [@openfn/devtools](https://github.com/OpenFn/devtools) and +provides a new suite of features and improvements, including: + +- a new runtime and compiler for executing and creating runnable OpenFn jobs, +- customizable logging output, +- automatic installation of language adaptors, +- and support for the adaptors monorepo + ([@openfn/adaptors](https://github.com/OpenFn/adaptors)) where all OpenFn + adaptor source code and documentation lives. + +These features are designed to make it easier and more convenient for developers +to use and understand OpenFn. + +:::caution Looking for a way to execute jobs from OpenFn v1 locally? Use Core! + +If you're looking for a way to execute jobs running on the OpenFn v1 platform, +please see the documentation for **[@openfn/core](/documentation/core)** and +[Devtools](/documentation/devtools/home). + +::: + +## Prerequisites + +1. Ensure you have a code editor installed on your machine (e.g. + [VS Code](https://code.visualstudio.com/), + [Sublime](https://www.sublimetext.com/)) + +2. Install NodeJs **v18 is the minimum version required** + + - To install a specific version of Node.js (in this case, version 18) on + Linux, Windows, or macOS, you can use a version manager such as nvm (Node + Version Manager) or any multiple runtime version manager eg: + [asdf](https://github.com/asdf-vm/asdf). These tools allow you to install + and switch between multiple versions of Node.js on the same machine. See + below for instructions for different operating systems. + - Read this article to learn how to install NodeJs in your machine + [kinsta.com/blog/how-to-install-node-js/](https://kinsta.com/blog/how-to-install-node-js/) + +3. Have a basic understanding of OpenFn—check out jobs and adaptors, at least, + in the [OpenFn Concepts](getting-started/terminology) of this site. +4. Install the OpenFn CLI with `npm install -g @openfn/cli` + +## Walkthrough & Challenges + +### 1. Getting started with the CLI + +Let's start by running a simple command with the CLI. Type the following into +your terminal: + +```bash +openfn test +``` + +The word `openfn` will invoke the CLI. The word `test` will invoke the test +command. + +
+ You should see some output like this: + +```bash +[CLI] ℹ Versions: + ▸ node.js 18.12.1 + ▸ cli 0.0.39 + ▸ runtime 0.0.24 + ▸ compiler 0.0.32 +[CLI] ℹ Running test job... +[CLI] ℹ Workflow object: +[CLI] ℹ { + "start": "start", + "jobs": [ + { + "id": "start", + "data": { + "defaultAnswer": 42 + }, + "expression": "const fn = () => (state) => { console.log('Starting computer...'); return state; }; fn()", + "next": { + "calculate": "!state.error" + } + }, + { + "id": "calculate", + "expression": "const fn = () => (state) => { console.log('Calculating to life, the universe, and everything..'); return state }; fn()", + "next": { + "result": true + } + }, + { + "id": "result", + "expression": "const fn = () => (state) => ({ data: { answer: state.data.answer || state.data.defaultAnswer } }); fn()" + } + ] +} + +[CLI] ✔ Compilation complete +[R/T] ♦ Starting job start +[JOB] ℹ Starting computer... +[R/T] ℹ Operation 1 complete in 0ms +[R/T] ✔ Completed job start in 1ms +[R/T] ♦ Starting job calculate +[JOB] ℹ Calculating to life, the universe, and everything.. +[R/T] ℹ Operation 1 complete in 0ms +[R/T] ✔ Completed job calculate in 1ms +[R/T] ♦ Starting job result +[R/T] ℹ Operation 1 complete in 0ms +[R/T] ✔ Completed job result in 0ms +[CLI] ✔ Result: 42 + +``` + +
+ +What we've just done is executed a JavaScript expression, which we call a _job_. +The output prefixed with `[JOB]` comes directly from `console.log` statements in +our job code. All other output is the CLI trying to tell us what it is doing. + +
+What is a job? +A job is Javascript code which follows a particular set of conventions. +Typically a job has one or more operations which perform a particular +task (like pulling information from a database, creating a record, etc.) and +return state for the next operation to use. + +The test job we just ran looks like this: + +```js +const fn = () => state => { + console.log( + 'Calculating the answer to life, the universe, and everything...' + ); + return state * 2; +}; +export default [fn()]; +``` + +You can see this (and a lot more detail) by running the test command with +debug-level logging: + +```bash +openfn test --log debug +``` + +
+ +#### Tasks: + +:::info To get started with @openfn/cli + +1. Create a new folder for the repository you'll be working on by running the + following command: `mkdir devchallenge && cd devchallenge` + +2. While you can keep your job scripts anywhere, it's a good practice to store + `state.json` and `output.json` in a `tmp` folder. To do this, create a new + directory called `tmp` within your `devchallenge` folder: `mkdir tmp` + +3. Since `state.json` and `output.json` may contain sensitive configuration + information and project data, it's important to never upload them to Github. + To ensure that Github ignores these files, add the `tmp` directory to your + `.gitignore` file: `echo "tmp" >> .gitignore` +4. (Optional) Use the `tree` command to check that your directory structure + looks correct. Running `tree -a` in your `devchallenge` folder should display + a structure like this: + ```bash + devchallenge + ├── .gitignore + └── tmp + ├── state.json + └── output.json + ``` + +::: + +1. Create a file called `hello.js` and write the following code. + + ```js + console.log('Hello World!'); + ``` + +
+ What is console.log? + console.log is a core JavaScript language function which lets + us send messages to the terminal window. +
+ +1. Run the job using the CLI + + ```bash + openfn hello.js -o tmp/output.json + ``` + +
+ + View expected output + + ```bash + [CLI] ⚠ WARNING: No adaptor provided! + [CLI] ⚠ This job will probably fail. Pass an adaptor with the -a flag, eg: + openfn job.js -a common + [CLI] ✔ Compiled from helo.js + [R/T] ♦ Starting job job-1 + [JOB] ℹ Hello World! + [R/T] ✔ Completed job job-1 in 1ms + [CLI] ✔ State written to tmp/output.json + [CLI] ✔ Finished in 17ms ✨ + + ``` + +
+ +Note that our `console.log` statement was printed as `[JOB] Hello world!`. Using +the console like this is helpful for debugging and/or understanding what's +happening inside our jobs. + +#### 🏆 Challenge: Write a job that prints your name + +1. Modify `hello.js` to print your name. +2. Re-run the job by running `openfn hello.js -a common -o tmp/output.json`. +3. Validate that you receive the logs below: + +```bash +[CLI] ✔ Compiled job from hello.js +[JOB] ℹ My name is { YourName } +[R/T] ✔ Operation 1 complete in 0ms +[CLI] ✔ Writing output to tmp/output.json +[CLI] ✔ Done in 366ms! ✨ +``` + +### 2. Using adaptor helper functions + +Adaptors are Javascript or Typescript modules that provide OpenFn users with a +set of helper functions for simplifying communication with a specific external +system. Learn more about adaptors here: [docs.openfn.org/adaptors](/adaptors/) + +#### Basic usage: + +Let’s use +[@openfn/language-http](https://www.npmjs.com/package/@openfn/language-http) +adaptor to fetch a list of forms from +[https://jsonplaceholder.typicode.com/](https://jsonplaceholder.typicode.com/) + +:::info Understanding CLI arguments + +Use `-a` to specify the adaptor; use `-i` to auto-install the necessary adaptor + +Run `openfn help` to see the full list of CLI arguments. + +::: + +#### Tasks: + +1. Create a file called `getPosts.js` and write the following code + + ```jsx title=getPosts.js + get('https://jsonplaceholder.typicode.com/posts'); + fn(state => { + console.log(state.data[0]); + return state; + }); + ``` + +2. Run the job by running + +```bash +openfn getPosts.js -i -a http -o tmp/output.json +``` + +Since it is our first time using the `http` adaptor, we are installing the +adaptor using `-i` argument + +
+ 3. See expected CLI logs + +```bash + [CLI] ✔ Installing packages... + [CLI] ✔ Installed @openfn/language-http@4.2.8 + [CLI] ✔ Installation complete in 14.555s + [CLI] ✔ Compiled from getPosts.js + [R/T] ♦ Starting job job-1 + GET request succeeded with 200 ✓ + [JOB] ℹ { + userId: 1, + id: 1, + title: 'sunt aut facere repellat provident occaecati excepturi optio reprehenderit', + body: 'quia et suscipit\n' + + 'suscipit recusandae consequuntur expedita et cum\n' + + 'reprehenderit molestiae ut ut quas totam\n' + + 'nostrum rerum est autem sunt rem eveniet architecto' + } + [R/T] ✔ Completed job job-1 in 872ms + [CLI] ✔ State written to tmp/output.json + [CLI] ✔ Finished in 15.518s ✨ + +``` + +
+ +#### 🏆 Challenge: Get and inspect data via HTTP + +Using the +[https://jsonplaceholder.typicode.com/users](https://jsonplaceholder.typicode.com/users) +API, get a list of users and print the first user object. + +1. Create file called `getUsers.js` and write your operation to fetch the user. +2. Run the job using the OpenFn/cli + `openfn getUsers.js -a http -o tmp/output.json`. +3. Validate that you receive this expected CLI logs: + +```bash +openfn getUsers.js -a http -o tmp/output.json +``` + +
+See expected CLI logs: + +``` +[CLI] ✔ Compiled job from hello.js GET request succeeded with 200 ✓ +[R/T] ✔ Operation 1 complete in 581ms +[JOB] ℹ { + id: 1, + name: 'Leanne Graham', + username: 'Bret', + email: 'Sincere@april.biz', + address: { + street: 'Kulas Light', + suite: 'Apt. 556', + city: 'Gwenborough', + zipcode: '92998-3874', + geo: { lat: '-37.3159', lng: '81.1496' } + }, + phone: '1-770-736-8031 x56442', + website: 'hildegard.org', + company: { + name: 'Romaguera-Crona', + catchPhrase: 'Multi-layered client-server neural-net', + bs: 'harness real-time e-markets' + } +} +[R/T] ✔ Operation 2 complete in 2ms +[CLI] ✔ Writing output to tmp/output.json [CLI] ✔ Done in 950ms! ✨ +``` + +
+ +### 3. Understanding `state` + +If a job expression is a set of instructions for a chef (a recipe?) then the +initial state is all of the ingredients they need tied up in a perfect little +bundle. See +["It all starts with state​"](/articles/2021/07/05/wrapping-my-head-around-jobs/#it-all-starts-with-state) +in the knowledge base for extra context. + +It usually looks something like this + +```json +{ + "configuration": { + "hostUrl": "https://moh.kenya.gov.ke/dhis2", + "username": "someone", + "password": "something-secret" + }, + "data": { + "type": "registration", + "patient": { + "age": 24, + "gender": "M", + "nationalId": "321cs7" + } + } +} +``` + +#### `state.configuration` + +This key is where we put credentials which are used to authorize connections to +any authenticated system that the job will interact with. (Note that this part +of `state` is usually overwritten at runtime with a real "credential" when using +the OpenFn platform, rather than the CLI.) + +:::warning Important + +Note that `console.log(state)` will display the whole state, including +`state.configuration` elements such as **username and password**. Remove this +log whenever you're done debugging to avoid accidentally exposing sensitive +information when the job is successfully deployed on production. + +The OpenFn platform has built in protections to "scrub" state from the logs, but +when you're using the CLI directly you're on your own! + +::: + +#### `state.data` + +This key is where we put data related to a specific job run. On the platform, +it's the work-order-specific data from a triggering HTTP request or some bit of +information that's passed from one job to another. + +Using CLI, `state.json` will be loaded automatically from the current directory + +Or you can specify the path to the state file by passing the option -s, +--state-path + +Specify a path to your `state.json` file with this command: + +```bash +openfn hello.js -a http -s tmp/state.json -o tmp/output.json +``` + +Expected CLI logs + +``` +[CLI] ✔ Compiled job from hello.js +GET request succeeded with 200 ✓ +[R/T] ✔ Operation 1 complete in 876ms +[R/T] ✔ Operation 2 complete in 0ms +[CLI] ✔ Writing output to tmp/output.json +[CLI] ✔ Done in 1.222s! ✨ +``` + +#### How can we use state? + +Each adaptor has a configuration schema that's recommended for use in your +`state.json`. [Here is an example](/adaptors/packages/http-configuration-schema) +of how to set up `state.configuration` for `language-http`. + +```json +{ + "username": "name@email", + "password": "supersecret", + "baseUrl": "https://jsonplaceholder.typicode.com" +} +``` + +#### Tasks: + +1. Update your `state.json` to look like this: + + ```json title=state.json + { + "configuration": { + "baseUrl": "https://jsonplaceholder.typicode.com" + } + } + ``` + + Since we have update our configuration in our `state.json` we can now use + `get()` helper function without the need to specify the **baseUrl**—i.e + `get('posts')` + +2. Update your `getPosts.js` job to look like this: + + ```js title="getPosts.js" + // Get all posts + get('posts'); + + fn(state => { + const posts = state.data; + console.log(posts[0]); + return state; + }); + ``` + +3. Now run the job using the following command + + ```bash + openfn getPosts.js -a http -s tmp/state.json -o tmp/output.json + ``` + + And validate that you see the expected CLI logs: + + ```bash + [CLI] ✔ Compiled job from getPosts.js + GET request succeeded with 200 ✓ + [R/T] ✔ Operation 1 complete in 120ms + [JOB] ℹ { + userId: 1, + id: 1, + title: 'sunt aut facere repellat provident occaecati excepturi optio reprehenderit', + body: 'quia et suscipit\n' + + 'suscipit recusandae consequuntur expedita et cum\n' + + 'reprehenderit molestiae ut ut quas totam\n' + + 'nostrum rerum est autem sunt rem eveniet architecto' + } + [R/T] ✔ Operation 2 complete in 0ms + [CLI] ✔ Writing output to tmp/output.json + [CLI] ✔ Done in 470ms! ✨ + ``` + +#### 🏆 Challenge: Fetch Covid-19 metadata + +1. Using the [disease.sh API](https://disease.sh/), write an operation that + returns all covid-19 metadata. + +:::tip + +`https://disease.sh/v3/covid-19/` as your **baseUrl** in `state.configuration` + +::: + +2. Validate your output: there are a lot of ways you might choose to format or + display this data. Share your results with your administrator for feedback. + +### 4. Additional arguments and commands + +#### 🏆 Challenge: Practice CLI arguments and commands + +Perform these tasks and submit answers to the discussion questions to your +administrator for feedback. + +1. Compile a openfn job (**hello.js**). + + > What's the difference between the job you wrote and the compiled job? + +2. Run a job without "strict mode" enabled. + + > What's the difference between the outputs when strict mode is enabled and + > disabled? + +3. Run a job with the log level set to `none`, and then run it again with the + log level set to `debug`. + + > When is it appropriate to use these different log levels? + +### 5. Manipulating data in a sequence of operations + +In most cases you need to manipulate, clean, or transform data at some step in +your workflow. For example after we get data from the +`https://jsonplaceholder.typicode.com` registry we might need to group the posts +by user id. The example below shows how we can: + +1. get all posts and return them in `state.data` +2. group returned posts by `userId` +3. log posts with userId 1 + +##### Example: + +```js title="getPosts.js" +// Get all posts +get('posts'); + +// Group posts by user id +fn(state => { + const posts = state.data; + + // Group posts by userId + const groupPostsByUserId = posts.reduce((acc, post) => { + const existingValue = acc[post.userId] || []; + return { ...acc, [post.userId]: [...existingValue, post] }; + }, {}); + + // console.log(groupPostsByUserId); + return { ...state, groupPostsByUserId }; +}); + +// Log posts where userId = 1 +fn(state => { + const { groupPostsByUserId } = state; + console.log('Post with userId 1', groupPostsByUserId[1]); + return state; +}); +``` + +
+What is array.reduce? +The reduce() method applies a function against an accumulator and +each value of the array (from left-to-right) to reduce it to a single value. + +Perhaps the easiest-to-understand case for reduce() is to return +the sum of all the elements in an array: + +##### JavaScript Demo: `Array.reduce()` + +``` +const array1 = [1, 2, 3, 4]; + +// 0 + 1 + 2 + 3 + 4 +const initialValue = 0; +const sumWithInitial = array1.reduce( + (accumulator, currentValue) => accumulator + currentValue, + initialValue +); + +console.log(sumWithInitial); +// Expected output: 10 + +``` + +You can learn more about `array.reduce` from +[this article](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Array/Reduce) + +
+ +> Expected CLI logs + +``` +[CLI] ✔ Compiled job from getPosts.js +GET request succeeded with 200 ✓ +[R/T] ✔ Operation 1 complete in 825ms +[R/T] ✔ Operation 2 complete in 0ms +[JOB] ℹ Post with userId 1 [ + //All of posts for userId 1 +] +[R/T] ✔ Operation 3 complete in 12ms +[CLI] ✔ Writing output to tmp/output.json +[CLI] ✔ Done in 1.239s! ✨ +``` + +#### 🏆 Challenge: extract names & emails + +Using +[https://jsonplaceholder.typicode.com/posts/1/comments](https://jsonplaceholder.typicode.com/posts/1/comments) +API fetch comments for post with id 1 and extract name and email from each +comment in that post + +1. Get post all comments for post id 1 +2. Extract name and email from comments +3. Log the extracted data from comments + +Discuss the results with your administrator. + +### 6. Debugging errors + +When debugging, it’s interesting to use log to have a visual representation of +the content of the manipulated objects (such as state). + +When you want to inspect the content of state in between operations, add an +`fn()` block with a `console.log`: + +```js +// firstOperation(...); + +fn(state => { + console.log(state); + return state; +}); + +// secondOperation(...); +``` + +##### Create **debug.js** and paste the code below + +```jsx title="debug.js" +// Get all posts +get('posts'); + +// Get post by index helper function +fn(state => { + // const getPostbyIndex = (index) => dataValue(index)(state); + console.log(dataValue(1)); + + return { ...state }; +}); +``` + +##### Run **openfn debug.js -a http** + +> Expected CLI logs + +```bash +[CLI] ✘ TypeError: path.match is not a function + at dataPath (/tmp/openfn/repo/node_modules/@openfn/language-common/dist/index.cjs:258:26) + at dataValue (/tmp/openfn/repo/node_modules/@openfn/language-common/dist/index.cjs:262:22) + at getPostbyIndex (vm:module(0):5:37) + at vm:module(0):18:36 + at /tmp/openfn/repo/node_modules/@openfn/language-common/dist/index.cjs:241:12 + at file:///home/openfn/.asdf/installs/nodejs/18.12.0/lib/node_modules/@openfn/cli/node_modules/@openfn/runtime/dist/index.js:288:26 + at process.processTicksAndRejections (node:internal/process/task_queues:95:5) + at async run (file:///home/openfn/.asdf/installs/nodejs/18.12.0/lib/node_modules/@openfn/cli/node_modules/@openfn/runtime/dist/index.js:269:18) + at async executeHandler (file:///home/openfn/.asdf/installs/nodejs/18.12.0/lib/node_modules/@openfn/cli/dist/process/runner.js:388:20) +``` + +As you can see from our logs that helper function `dataValue` has a TypeError, +To troubleshoot this you can go to the documentation for **dataValue -> +[docs.openfn.org/adaptors/packages/common-docs/#datavaluepath--operation](/adaptors/packages/common-docs/#datavaluepath--operation) +** + +According to the docs, dataValue take path which is a string type. But in our +operation we were passing an integer, that’s why we have a _TypeError_. You can +fix the error by passing a string in dataValue i.e `console.log(dataValue(“1”))` + +> Expected CLI logs + +```bash +[CLI] ✔ Compiled job from debug.js +GET request succeeded with 200 ✓ +[R/T] ✔ Operation 1 complete in 722ms +[JOB] ℹ [Function (anonymous)] +[R/T] ✔ Operation 2 complete in 1ms +[CLI] ✔ Writing output to tmp/output.json +[CLI] ✔ Done in 1.102s ✨ +``` + +If you need more information for debugging you can pass -l debug which will give +all information about the run + +i.e `openfn debug.js -a http -l debug` + +#### 🏆 Challenge: control error messages + +Debug what is causing an error on the following line of code and display the +error message + +```jsx +// Get post where id is 180 +get('posts/180'); +``` + +Discuss the results with your administrator. + +### 7. Each and array iteration + +We often have to perform the same operation multiple times for items in an +array. Most of the helper functions for data manipulation are inherited from +@openfn/language-common and are available in most of the adaptors. + +##### Modify getPosts.js to group posts by user-ID + +```js title="getPosts.js" +// Get all posts +get('posts'); + +// Group posts by user +fn(state => { + const posts = state.data; + + // Group posts by userId + const groupPostsByUserId = posts.reduce((acc, post) => { + const existingValue = acc[post.userId] || []; + return { ...acc, [post.userId]: [...existingValue, post] }; + }, {}); + + // console.log(groupPostsByUserId); + return { ...state, groupPostsByUserId }; +}); + +// Log posts where userId = 1 +fn(state => { + const { groupPostsByUserId } = state; + const posts = groupPostsByUserId[1]; + + // console.log("Post with userId 1", groupPostsByUserId[1]); + return { ...state, posts }; +}); + +each('posts[*]', state => { + console.log('Post', JSON.stringify(state.data, null, 2)); + return state; +}); +``` + +Notice how this code uses the `each` function, a helper function defined in +[language-common](/adaptors/packages/common-docs/#eachdatasource-operation--operation) +but accessed from this job that is using language-http. Most adaptors import and +export many functions from `language-common`. + +##### Run **openfn getPosts.js -a http -o tmp/output.json** + +> Expected CLI logs + +```bash +[CLI] ✔ Compiled job from getPosts.js +GET request succeeded with 200 ✓ +[R/T] ✔ Operation 1 complete in 730ms +[R/T] ✔ Operation 2 complete in 0ms +[R/T] ✔ Operation 3 complete in 0ms +[JOB] ℹ Posts [ +// Posts +] +[R/T] ✔ Operation 4 complete in 10ms +[CLI] ✔ Writing output to tmp/output.json +[CLI] ✔ Done in 1.091s! ✨ +``` + +#### 🏆 Challenge: Reduce, filter, and map + +Using Javascript globals i.e `Array.reduce`, `Array.filter` or `Array.map`, +build function that will get posts by user id. + +1. Create a file called job1.js +2. Add the 1st operation which is get all posts +3. Add 2nd operation which has a function that filter posts by id +4. Use the function from 2nd operation to get all post for user id 1 + +Discuss the results with your administrator. + +### 8. Running Workflows + +As of `v0.0.35` the `@openfn/cli` supports running not only jobs, but also +_workflows_. Running a workflow allows you to define a list of jobs and rules +for executing them. You can use a workflow to orchestrate the flow of data +between systems in a structured and automated way. + +_For example, if you have two jobs in your workflow (GET users from system A & +POST users to system B), you can set up your workflow to run all jobs in +sequence from start to finish. This imitates the +[flow trigger patterns](https://docs.openfn.org/documentation/build/triggers#flow-triggers) +on the OpenFn platform where a second job should run after the first one +succeeds, respectively, using the data returned from the first job. “_ + +:::info tl;dr + +You won't have to assemble the initial state of the next job, the final state of +the upstream job will automatically be passed down to the downstream job as the +initial state. + +::: + +##### Workflow + +A workflow is the execution plan for running several jobs in a sequence. It is +defined as a JSON object that consists of the following properties: + +- `start` (optional): The ID of the job that should be executed first (defaults + to jobs[0]). +- `jobs` (required): An array of job objects, each of which represents a + specific task to be executed. + - `id` (required): A job name that is unique to the workflow and helps you ID + your job. + - `configuration`: (optional) Specifies the configuration file associated with + the job. + - `data` (optional): A JSON object that contains the pre-populated data. + - `adaptor` (required): Specifies the adaptor used for the job (version + optional). + - `expression` (required): Specifies the JavaScript file associated with the + job. It can also be a string that contains a JavaScript function to be + executed as the job. + - `next` (optional): An object that specifies which jobs to call next. All + edges returning true will run. The object should have one or more key-value + pairs, where the key is the ID of the next job, and the value is a boolean + expression that determines whether the next job should be executed.If there + are no next edges, the workflow will end. + +###### Example of a workflow + +
+Here's an example of a simple workflow that consists of three jobs: + +```json title="workflow.json" +{ + "start": "getPatients", + "jobs": [ + { + "id": "getPatients", + "adaptor": "http", + "expression": "getPatients.js", + "configuration": "tmp/http-creds.json", + "next": { + "getGlobalOrgUnits": true + } + }, + { + "id": "getGlobalOrgUnits", + "adaptor": "common", + "expression": "getGlobalOrgUnits.js", + "next": { + "createTEIs": true + } + }, + { + "id": "createTEIs", + "adaptor": "dhis2", + "expression": "createTEIs.js", + "configuration": "tmp/dhis2-creds.json" + } + ] +} +``` + +
+ +
+ tmp/http-creds.json + +```json title="tmp/http-creds.json" +{ + "baseUrl": "https://jsonplaceholder.typicode.com/" +} +``` + +
+ +
+ tmp/dhis2-creds.json + +```json title="tmp/dhis2-creds.json" +{ + "hostUrl": "https://play.dhis2.org/2.39.1.2", + "password": "district", + "username": "admin" +} +``` + +
+ +
+ getPatients.js + +```js title="getPatients.js" +// Get users from jsonplaceholder +get('users'); + +// Prepare new users as new patients +fn(state => { + const newPatients = state.data; + return { ...state, newPatients }; +}); +``` + +
+ +
+ getGlobalOrgUnits.js + +```js title="getGlobalOrgUnits.js" +// Globals: orgUnits +fn(state => { + const globalOrgUnits = [ + { + label: 'Njandama MCHP', + id: 'g8upMTyEZGZ', + source: 'Gwenborough', + }, + { + label: 'Njandama MCHP', + id: 'g8upMTyEZGZ', + source: 'Wisokyburgh', + }, + { + label: 'Njandama MCHP', + id: 'g8upMTyEZGZ', + source: 'McKenziehaven', + }, + { + label: 'Njandama MCHP', + id: 'g8upMTyEZGZ', + source: 'South Elvis', + }, + { + label: 'Ngelehun CHC', + id: 'IpHINAT79UW', + source: 'Roscoeview', + }, + { + label: 'Ngelehun CHC', + id: 'IpHINAT79UW', + source: 'South Christy', + }, + { + label: 'Ngelehun CHC', + id: 'IpHINAT79UW', + source: 'Howemouth', + }, + { + label: 'Ngelehun CHC', + id: 'IpHINAT79UW', + source: 'Aliyaview', + }, + { + label: 'Baoma Station CHP', + id: 'jNb63DIHuwU', + source: 'Bartholomebury', + }, + { + label: 'Baoma Station CHP', + id: 'jNb63DIHuwU', + source: 'Lebsackbury', + }, + ]; + + return { ...state, globalOrgUnits }; +}); +``` + +
+ +
+ createTEIs.js + +```js title="createTEIs.js" +fn(state => { + const { newPatients, globalOrgUnits } = state; + + const getOrgUnit = city => + globalOrgUnits.find(orgUnit => orgUnit.source === city).id; + + const mappedEntities = newPatients.map(patient => { + const [firstName = 'Patient', lastName = 'Test'] = ( + patient.name || '' + ).split(' '); + + const orgUnit = getOrgUnit(patient.address.city); + + const attributes = [ + { attribute: 'w75KJ2mc4zz', value: firstName }, + { attribute: 'zDhUuAYrxNC', value: lastName }, + { attribute: 'cejWyOfXge6', value: 'Male' }, + ]; + + return { ...patient, attributes: attributes, orgUnit: orgUnit }; + }); + + return { ...state, mappedEntities }; +}); + +each( + 'mappedEntities[*]', + create('trackedEntityInstances', { + orgUnit: dataValue('orgUnit'), + trackedEntityType: 'nEenWmSyUEp', + attributes: dataValue('attributes'), + }) +); +``` + +
+ +Run `openfn [path/to/workflow.json]` to execute the workflow. + +
+ +For example if you created workflow.json in the root of your project directory, This is how your project will look like + + +```bash + devchallenge + ├── .gitignore + ├── getPatients.js + ├── createTEIs.js + ├── getGlobalOrgUnits.js + ├── workflow.json + └── tmp + ├── http-creds.json + ├── dhis2-creds.json + └── output.json +``` + +
+ +```bash +openfn workflow.json -o tmp/output.json +``` + +On execution, this workflow will first run the `getPatients.js` job. If is +successful, `getGlobalOrgUnits.js` will run using the final state of +`getPatients.js`. If `getGlobalOrgUnits.js` is successful, `createTEIs.js` will +run using the final state of `getGlobalOrgUnits.js`. + +Note that without the `-i` flag, you'll need to already have your adaptor +installed. To execute the workflow with the adaptor autoinstall option run this +command: + +```bash +openfn workflow.json -i -o tmp/output.json +``` + +On execution, this workflow will first auto-install the adaptors then run the +workflow + +:::danger Important + +When working with the `workflow.json` file, it is important to handle sensitive +information, such as credentials and initial input data, in a secure manner. To +ensure the protection of your sensitive data, please follow the guidelines +outlined below: + +1. Configuration Key: In the `workflow.json` file, specify a path to a git + ignored configuration file that will contain necessary credentials that will + be used to access the destination system. For example: + + ```json + { + ... + "configuration": "tmp/openMRS-credentials.json" + }, + ``` + +2. Data Key: Incase you need to pass initial data to your job, specify a path to + a gitignored data file + ```json + { + ... + "data": "tmp/initial-data.json", + } + ``` + +::: + +## CLI Usage - Key Commands + +You’ll learn about these commands in the following challenges, but please refer +to this section for the key commands used in working with the CLI. + +### Check the version + +```bash +openfn version +``` + +### Get help + +```bash +openfn help +``` + +### Run a job + +```bash +openfn path/to/job.js -ia {adaptor-name} +``` + +Note: You MUST specify which adaptor to use. Pass the `-i` flag to auto-install +that adaptor (it's safe to do this redundantly). + +You can find the list of publicly available adaptors [here](/adaptors). + +> Path is the job to load the job from (a .js file or a dir containing a job.js +> file) For example `openfn execute hello.js ` Reads hello.js, looks for state +> and output in foo + +```bash +-i, --autoinstall Auto-install the language adaptor +-a, --adaptors, --adaptor A language adaptor to use for the job +``` + +If an adaptor is already installed by auto install, you can use the command +without the `-i` options. i.e `openfn hello.js -a http` + +### Change log level + +You can pass `-l info` or `--log info` to get more feedback about what's +happening, or `--log debug` for more details than you could ever use. Below is +the list of different log levels + +```bash +openfn hello.js -a http -l none +``` + +| log level | description | +| ------------ | -------------------------------------------------------- | +| `-l none` | Quiet mode | +| `-l default` | Top level information of what is happening | +| `-l info` | Get more feedback on what is happening openfn | +| `-l debug` | Get information about runtime, cli, compiler and the job | + +### Compilation + +The CLI will attempt to compile your job code into normalized Javascript. It +will do a number of things to make your code robust, portable, and easier to +debug from a pure JS perspective. + +```bash +openfn compile [path] +``` + +Will compile the openfn job and print or save the resulting js. + +Learn more about CLI +[github.com/OpenFn/kit/](https://github.com/OpenFn/kit/tree/main/packages/cli) diff --git a/versioned_docs/version-legacy/core.md b/versioned_docs/version-legacy/core.md new file mode 100644 index 00000000000..be0a0607041 --- /dev/null +++ b/versioned_docs/version-legacy/core.md @@ -0,0 +1,58 @@ +--- +title: Core +--- + +:::caution Core reaching EOL in 2023. + +OpenFn/core is currently still being used by the v1 platform (www.openfn.org) +but is reaching end-of-life in 2023. + +::: + +## What is core? + +Core is the central job processing program used in the OpenFn platform. It's +what actually executes `jobs` with `state` and `adaptors` to do work for +governments and NGOs all over the world. + +## Where is it used? + +Core is used in OpenFn v1 (the web platform) and by developers who want to test +job execution on their local machines. It's _not_ used in Lightning (OpenFn v2) +which instead makes use of the new runtime. For a local developer experience +using the new runtime, check out [CLI](/documentation/cli). + +## Why might I want to use it now? + +If you've got jobs running on OpenFn v1 and want to test them locally, core will +give you the exact same job running experience as you see on the web. This can +be incredibly helpful for debugging. + +:::tip Using the new CLI. + +If you're a new OpenFn user and want to build or test jobs for Lighting (v2) and beyond in 2023, use the new [CLI](/documentation/cli) instead! + +::: + +## How do I use it? + +Check out the official documentation on +[Github](https://github.com/OpenFn/core). + +The tl;dr: is that you execute jobs from the command line by passing in an +expression, state, and the path to an adaptor. + +```sh +npm install @openfn/core +core execute -l ../language-http.Adaptor -e ./some-exprsesion.js -s ./some-state.json +``` + +The full options are: + +```sh +-l, --language resolvable language/adaptor path [required] +-e, --expression target expression to execute [required] +-s, --state Path to initial state file. [required] +-o, --output Path to write result from expression +-t, --test Intercepts and logs all HTTP requests to console +``` diff --git a/versioned_docs/version-legacy/deploy/options.md b/versioned_docs/version-legacy/deploy/options.md new file mode 100644 index 00000000000..39841b8417b --- /dev/null +++ b/versioned_docs/version-legacy/deploy/options.md @@ -0,0 +1,112 @@ +--- +title: Planning +--- + +## Introduction + +OpenFn can be used as a secure, stable, scalable cloud-hosted offering or +deployed locally—with both managed and un-managed options. Regardless of which +path you choose, you can configure OpenFn so that no sensitive data is kept +outside your country's borders. + +:::success Portability + +Because of OpenFn's [portability specification](/portability.md) and open-source +deployment tools you can transition between these various pathways at any time. +We're committed to a **no vendor lock-in** experience. + +::: + +| Pathway | Free | Standard | Dedicated | Do-it-yourself (DIY) | +| :-------------------: | :------------------------------------------------------------------------------------------: | :-----------------------------------------------------------------------------------------------------------: | :-------------------------------------------------------------------------------------------------------------------------------------: | :----------------------------------------------------------------------------------------------------------------: | +| Description | Go live today on OpenFn.org for small scale projects | Scale up and down, pay only for what you need | A dedicated, unrestricted OpenFn installation anywhere in the world on our servers or yours | Deploy and manage your own solutions with OpenFn | +| License | Free forever with usage limitations | **SaaS** [plans](https://www.openfn.org/pricing); contact enterprise@openfn.org for custom/invoice agreements | **SDaaS** includes deployment, maintenance, security patches, upgrades, and troubleshooting as a service; contact enterprise@openfn.org | LGPLv3 means use freely as part of any closed or open-source solution, but make all _derivative_ works open source | +| Location | Secure **global** cloud infrastructure | Secure **global** cloud infrastructure | **Local (in-country)** or **Global** infrastructure | Wherever you want | +| Deployment | **Click to start** at [OpenFn.org](https://www.openfn.org/signup) | **Click to start** at [OpenFn.org](https://www.openfn.org/signup) | **Contact** enterprise@openfn.org | Read this docs page and visit our [Github](https://www.github.com/OpenFn) | +| Setup & Configuration | **Your choice** to setup yourself, with a certified implementer, or with the OpenFn.org team | **Your choice** to setup yourself, with a certified implementer, or with the OpenFn.org team | **Your choice** to setup yourself, with a certified implementer, or with the OpenFn.org team | **Your choice** to setup yourself, with a certified implementer, or with the OpenFn.org team | +| Support | Give & receive support via the [community](https://community.openfn.org) | Various levels from support@openfn.org | Various levels from support@openfn.org | Give & receive support via the [community](https://community.openfn.org) | + +## Sample Local Deployment Plan + +:::info This is just an example + +Your requirements will vary, but this is an example plan for achieving a +large-scale, high-sensitivity local deployment. + +::: + +If you're considering a large-scale/high-sensitivity implementation of OpenFn on +local/government-managed servers, you might: + +1. **Run a proof-of-concept, prototype, or time-bound production solution** + using the cloud hosted offering while determining fit and value. (It's a more + secure, less expensive, and faster way to prove out the value and viability + of the solution itself.) +2. While the first phase is running, **assess the value and begin + preparations**: + 1. Assess the **value of the solution** itself—is it solving the problems you + thought it would? + 2. Asses your **data residency requirements**—do you need to run this + solution in country? + 3. Assess your team’s technical **DevOps capacity**—how are other local + deployments of DPGs going? + 4. Assess your countries computing, storage, and networking + infrastructure—what options\* are available for servers and network + connectivity? + 5. Determine if a **"zero-persistence"** cloud solution or a **locally + deployed** solution is best for your ministry—with the data above run a + cost-benefit analysis on both options. +3. Engage with OpenFn.org or a certified partner to **practice deployment**, + migration, rollback, restart, backup, etc. +4. Using OpenFn’s portability tools, **run a cloned local version** of your + cloud-hosted solution to assess the readiness of your local deployment. +5. Establish a **fail-over protocol** with OpenFn to “fail to cloud” for + critical systems. + 1. How often should the implementation configuration (not the sensitive data) + be backed up to the OpenFn.org hosted cloud? + 2. What credentials and/or test environments should the cloud backup have + access to? + 3. Establish a plan for switching between cloud and local. +6. Establish a **support retainer** with OpenFn-certified local vendors and/or + the OpenFn core team to help maintain the local deployment in case of issues. +7. **Transition fully to your local deployment** and maintain capacity to + support or redeploy your solution to other cloud or local servers. +8. **Monitor & adjust your strategy** as and when required by your country’s + usage and data sovereignty requirements evolve over time. + +\*Head over to the [Requirements](./requirements.md) page for more information +on recommended server specifications. + +## Moving from cloud to local (v1 or v2) + +For users that are planning a self-hosted implementation, we recommend +developing and testing the initial solution on the OpenFn SaaS (v1 or v2, +possibly on a free tier) and then exporting for use in Lightning (v2). + +This allows the implementer to focus on ironing out the business and technical +requirements of the automation before incurring deployment costs. Focus on the +solution, not the deployment. Your OpenFn solution can then be migrated to a +local deployment of Lightning once it has been piloted, its value has been +proven, and you're ready to scale it up. + +### A user journey for locally deployed OpenFn + +1. Build and test your workflows on [Openfn.org](https://www.openfn.org). +2. Export your OpenFn project _as code_ using the "export" button or deployment + CLI. +3. Deploy your local instance of OpenFn/Lightning. +4. Import your project (from step #2) to your local OpenFn/Lightning instance + using the deployment CLI. +5. Re-configure your credentials (credential secrets will _not_ be included in + the export). +6. Test your locally deployed project. + +## Technical Guidelines + +For detailed deployment documentation instructions, head to the Lightning +[developer docs page](https://openfn.github.io/Lightning/readme.html), paying +special attention to several sections: + +1. [Getting Started](https://openfn.github.io/Lightning/readme.html#getting-started) +2. [Deployment Considerations](https://openfn.github.io/Lightning/deployment.html) +3. [Benchmarking](https://openfn.github.io/Lightning/benchmarking.html) diff --git a/versioned_docs/version-legacy/deploy/requirements.md b/versioned_docs/version-legacy/deploy/requirements.md new file mode 100644 index 00000000000..365a7b333a1 --- /dev/null +++ b/versioned_docs/version-legacy/deploy/requirements.md @@ -0,0 +1,158 @@ +--- +title: Requirements +--- + +## Plan first + +Not sure where to start? Head back to the +["Planning"](/documentation/deploy/options) page to think about how you want to +scale up your OpenFn automation projects. + +## Assess your capacity + +:::info Help your partner estimate up-front and ongoing costs + +Use these questions to start assessing capacity and technical resources so that +your deployment partner can better estimate your total cost of ownership. + +::: + +1. How do you currently deploy, monitor, and maintain cloud-based applications + at your organization/government? All deployment environments and institutions + are unique and OpenFn is flexible; based on your current dev-ops processes we + will recommend different deployment mechanisms. +2. What IT and DevOps staff resources are available to support OpenFn deployment + and maintenance? Do they have experience with Docker & Kubernetes? Do they + have experience with Postgres databases? +3. Will the deployment require high-availability? (i.e., if OpenFn will receive + requests in real-time from other applications rather than run cron-based + jobs, then at least two instances of OpenFn should be run simultaneously + behind a load-balancer, making use of “distributed Erlang” to ensure graceful + application redundancy; if OpenFn will not be responsible for receiving + requests and will only be responsible for making relatively time-independent + outbound requests on a cron schedule, the importance of maintaining a + zero-downtime system is slightly reduced.) + +## Knowledge Requirements + +| Skill | Relevance and reason | +| ---------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| Erlang | The OpenFn **webapp/orchestration layer** is an Erlang OTP application. | +| Javascript | The OpenFn **job processing workers** and OpenFn workflows themselves are Javascript-based. With knowledge of how NodeJs works you can build workflows that do _anything_. | +| Postgres | The default **database** for OpenFn is PostgreSQL | +| Docker | We publish all **OpenFn [images](https://hub.docker.com/repository/docker/openfn/lightning/general)** on Docker Hub. Whether you're streamlining developer setup or using container orchestration technologies, understanding docker and containerized computing is helpful. | +| Kubernetes | For high-availability deployments, Kubernetes services provide **load balancing** and simplify **container management** on multiple hosts. They make it easy for an enterprise's apps to have greater scalability and be flexible, portable and more productive. | + +## Machine Requirements + +:::tip If you're going with "DIY", start simple + +Kubernetes is _NOT_ required, but it's recommended for high-availability +deployments. Consider docker or bare-metal deployments (Erlang OTP apps work +very well on Linux) for a simpler setup. + +::: + +The official OpenFn SaaS uses [Kubernetes](https://kubernetes.io/) for managed +deployments on Google Cloud and we recommend it for high-availability and +scalable deployments. With dynamic workloads, it is important (for stability and +cost reasons) to be able to scale the Erlang OTP app node pool & pods +independently of the Javascript worker node pool & pods. + +1. Use a scalable SQL service and keeping _at least_ two app nodes running with + the following specs will help prevent unwanted downtime. + 1. **GKE requests:** cpu@ "500m", memory@ "1024Mi" + 2. **GKE limits:** memory@ "2560Mi" +2. For a simple non-Kubernetes/HA deployments, the minimum recommended machines + are: + - **Application machine:** 2 vCPU (roughly a single core of a 2.6 GHz Intel + Xeon E5) with 3.75 GB memory and 15 gb of storage for the application + 1. Any linux-based operating system that can run Docker (Ubuntu 20.04+ or + Debian 9+). + 2. Docker (18 or greater). + - **Database machine:** 2 vCPU (roughly a single core of a 2.6 GHz Intel Xeon + E5) with 3.75 GB memory. Storage required for the DB varies by how many + days (if any) of message data you’d like to store on the app itself and + cannot be determined without estimates for message/run throughput. If + scaling physical storage is not difficult for your particular deployment, + start at 40gb. 3. A Postgres (at least v14.2) instance (as we run this on a + _separate server_) from the application for greater stability. +3. If both the application and database are hosted on the same machine (which is + not recommended) that machine should have roughly the sum of the requirements + above. +4. **Note** that the application by default provides an HTTP endpoint (no + TLS/SSL). A reverse-proxy/load-balancer is expected to provide both HTTPS + (HTTP2 compliant) and load balancing between instances. + - _I.e. the application server provides no encryption for web access, a web + server in front of the application needs to be provided; Nginx is a good + start, provided with TLS certificates._ +5. While network architecture is up to the client, we strongly **recommend a + private subnet** for the application servers. +6. The OpenFn application does not need to be deployed on the same machine as + any other services, however network routing and firewall rules will need to + be provided in order for the integration to access the source and destination + systems if hosted on different servers. +7. For **troubleshooting/external support**, administrators will need SSH access + to an unrestricted account (`sudo` for Ubuntu) if deployment maintenance + services are required. + +## Possible Configurations + +While your deployment strategy should be carefully considered with a DevOps +specialist, the following sample configurations may provide useful starting +points. + +### (a) Simple + +Deploy the application and database on the same machine. + +```mermaid +flowchart TB + subgraph "Linux VM with Docker" + ex1-.-db1 + direction TB + ex1(Erlang OTP App with JS Worker) + db1[(PostgreSQL)] + end +``` + +### (b) Recommended Minimum + +Deploy the application and database on separate machines. + +```mermaid +flowchart TB + ex1-.-db1 + subgraph "Linux VM with Docker" + direction LR + ex1(Erlang OTP App)-.-js1(NodeJs Worker App) + end + subgraph "Linux VM" + db1[(PostgreSQL)] + end +``` + +### (c) Ideal + +Auto-scale different optimized node pools for the Erlang orchestration app and +the Javascript worker app. + +```mermaid +flowchart TB + ex1-.-db1 + ex1-.-js1 + lb1-->ex1 + subgraph "Load Balancer" + lb1(Ingress) + end + subgraph "Pool of VMs for Erlang apps" + direction LR + ex1(Erlang OTP Apps) + end + subgraph "Pool of VMs for JS Worker Apps" + js1(NodeJs Worker Apps) + end + subgraph "Linux VM" + db1[(PostgreSQL)] + end +``` diff --git a/versioned_docs/version-legacy/design/design-quickstart.md b/versioned_docs/version-legacy/design/design-quickstart.md new file mode 100644 index 00000000000..21791e27d2d --- /dev/null +++ b/versioned_docs/version-legacy/design/design-quickstart.md @@ -0,0 +1,193 @@ +--- +title: Integration Design +--- + +# Getting Started on Integration Design + +**Integration design begins with the functional or business requirements (not +the technical bits).** Therefore, you do not need to be an IT consultant or +software engineer to start designing an integration solution! (Although having +those resources certainly helps when we get to the technical specifications... +but more on that later). + +A clear understanding and _documentation_ of the business processes, functional +requirements, and people interacting with your desired integration are the first +critical step in integration design. As you're planning for your next +integration project, start developing the following documentation to get started +with solution. + +## 1. Capture requirements as user stories + +Documenting _why_ the integration is needed and the driving requirements is +important to making sure the priority needs are identified and that everyone is +aligned on project expectations. + +_User stories_ are short, simple descriptions of a requirement told from the +perspective of the person who desires the new functionality. + +> As a ``, I want `` so that +> ``. + +Good user stories will capture 3 parts: + +1. _Who_ - who is using the solution? +2. _What_ - what do they hope to achieve via the solution? +3. _Why_ - why is this desire important? What are the business implications? + +If you capture these 3 elements, user stories can b ean effective way of +detailing integration requirements and starting discussions at your organization +about which requirements are priority. + +### Example user stories: + +- **Case Referrals:** As a caseworker, I want to automatically send referral + requests to my partner agency using another case management system, so that I + can securely share case information and quickly notify them when their + services are needed in a crisis situation. +- **EMR - HIS:** As a clinic manager, I would like to integrate patient data + from the district clinic electronic medical record system with the national + DHIS2 health information system, so that I can securely and automatically + report on health outcomes for key indicators in my district. +- **Kobo Toolbox - MSSQL Database:** As a M&E manager, I want to monitor Kobo + Toolbox survey responses in a central database in real-time, so that I can + better understand data collection activities and program performance across my + research partner sites. + +## 2. Diagram the business process + +Once the user stories have been identified, start to document the functional +processes that are in place (or will need to be implemented) in order to achieve +the desired requirments. These might be automated or human/manually-driven +processes. This is the precursor step to mapping out the data flow (which +details the technical steps for how connections will be made and data exchanged +between systems). Again, process mapping is _business analysis_–not a technical +exercise. + +For example, if you want to exchange information between your organization and +another... how might this exchange work from a functional point of view? _What_ +information will be exchanged? With _whom_ (between which systems or users)? +_When_ will the information be exchanged? And what are the human or automation +steps that should facilitate and trigger this exchange? These business process +questions are discussed in more detail on the +[So, what is an integration?](/documentation/getting-started/so-you-want-to-integrate/) +page. + +:::tip + +Capture the current & desired process Document _current and desired_ business +processes in order to determine how information should be exchanged between your +organization/system/users and others, and to ensure alignment of expectations +and assumptions across partners. + +::: + +### Use BPMN for standardized documentation + +When documenting internal business procedures, consider using standard Business +Process Model and Notation (BPMN) as a standard way to graphically document key +business processes. BPMN (learn more about standard +[BPMN 2.0](https://www.omg.org/spec/BPMN/2.0/)) has flowchart-like symbols and +precise notation that can be translated to software process components. + +Check out these resources for learning & building your own BPMN diagrams: + +- `BPMN.io` open-source modeler: https://bpmn.io/ +- `Camunda BPMN Tool` includes a free tool and tutorial: + https://camunda.com/bpmn/ +- `LucidChart` provides a very user-friendly diagramming interface: + https://www.lucidchart.com/pages/bpmn + +Looking for a crash course? This video provides a quick overview of BPMN and how +to use it: https://www.youtube.com/watch?v=BwkNceoybvA + +### OpenFn Examples of BPMN Diagrams + +See the below example BPMN diagram for the user story: + +> As a program manager, I want to extract beneficiary details ("tracked entity +> instances") from my country's DHIS2 system, so that I can enroll them as +> contacts in my SMS campaign configured on RapidPro to send them automated +> alerts and program updates. + + + +## 3. Map data elements to be exchanged + +Once the business processes are documented, start to document the specific data +elements to be exchanged. This exercise requires a lot of specificity to detail +the individual "fields" or "attributes" to be exchanged. + +The output of this exerice is a `Data Element Mapping Specification`, which will +serve as (1) documentation of the specific data elements agreed to be exchanges, +as well as (2) a guide for how to translate meaning between partners and systems +(e.g., `client` in one system might mean `patient` in another). + +If your organization already has a data management or sharing agreement, this +might be the perfect starting point for identifying the specific data points to +be exchanged. + +### Mapping Specification Template + +Collaborating on mapping specifications with implementing partners is an +important exercise for documenting very specific requirements and building +consensus on what data exactly will be exchanged, and how. + +:::tip Template for getting started + +[Use this template](https://docs.google.com/spreadsheets/d/1IqTIgOzyOztEevXbgY_4uE8Y8tiHXufZXx-IyJZase0/edit?usp=sharing) +for drafting your own data element mapping specification. + +::: + +This template includes: + +1. Details on the source metadata such as field API name, data type, sample data + values and comments: + ![image](https://user-images.githubusercontent.com/80456839/130796010-fe900c03-1bff-40c0-9263-c29e22d9191f.png) +2. Similar details on the destination metadata: + ![image](https://user-images.githubusercontent.com/80456839/130796087-67b0359d-207a-4169-aa88-6609572b2561.png) +3. Notes on data transformations & cleaning required and comments for tracking + changes & questions for technical input: + + ![image](https://user-images.githubusercontent.com/80456839/130796170-2e29a997-9b41-44f7-ac60-79375d096cc9.png) + +### To build a complete mapping specification, you’ll need to... + +1. Define the data elements to be exchanged by connected systems (you may need + to export target system metadata to identify exact names) +2. Determine which data elements belong to the data source, and which belong to + the destination system +3. Agree on how the data elements should map between target systems +4. Analyze the structure and quality of the data to consider if data values will + need to be transformed, cleaned, or re-labeled in order to map + +This mapping will serve as a blueprint for the technical integration setup, and +will be an important artefact of the data sharing agreement between partners. + +### Tips for drafting mapping specifications + +- **Version mappings** to keep track of mapping change requests over time +- **Color coding:** Highlight any fields that require further discussion one + color and any newly added fields another color +- Use Google Sheets or shared document comments and email to notify appropriate + users when changes have been made to the mappings +- Include a **legend/README** that explains how to use the mappings template +- **Unique identifiers:** spend time evaluating available unique identifier + schemes and what options may already be implemented by related information + systems +- When mapping multiple choice questions, make sure to consider how the answer + choices should map to the source/destination system. +- Check out system-specific mapping tips by visiting the `Apps` section of the + Docs site (see sidebar). + +## Next Steps + +Once you have your (1) `user stories`, (2) `business process diagrams`, and (3) +`data element specifications` defined, you have produced a suite of functional +design documentation that will drive the technical design of your integration +solution, as well as memorialize the business decisions and agreements made by +implementing partners. + +Share this documentation with any technical implementation team, or check out +the [Build](/documentation/build/jobs) documentation section to learn how to +implement these design specifications using OpenFn. diff --git a/versioned_docs/version-legacy/design/when-to-integrate.md b/versioned_docs/version-legacy/design/when-to-integrate.md new file mode 100644 index 00000000000..2055f4d6450 --- /dev/null +++ b/versioned_docs/version-legacy/design/when-to-integrate.md @@ -0,0 +1,8 @@ +--- +Title: When to Integrate +--- + +This article is a stub... it's coming soon. + +In the meantime, check out Aleksa Krolls' +[Three Questions To Ask](articles/2020/06/24/three-questions-to-ask). diff --git a/versioned_docs/version-legacy/devtools/home.md b/versioned_docs/version-legacy/devtools/home.md new file mode 100644 index 00000000000..df6f670975b --- /dev/null +++ b/versioned_docs/version-legacy/devtools/home.md @@ -0,0 +1,361 @@ +--- +title: Devtools +--- + +:::caution Devtools is deprecated + +Please note that [OpenFn/devtools](https://github.com/OpenFn/devtools) are being +deprecated and replaced by [OpenFn/cli](/documentation/cli). Learn more about +CLI +[github.com/OpenFn/cli/](https://github.com/OpenFn/kit/tree/main/packages/cli) + +::: + +OpenFn/Devtools is a set of tools for writing & testing job expressions, +managing OpenFn projects, and developing new adaptors. It's how most people work +with OpenFn from their own command lines, outside of OpenFn.org, Microservice, +or Lightning. + +:::info Are you a developer? + +The [Devtools](https://github.com/OpenFn/devtools) repo is a collection of bash +and Node scripts, as well as a _suggested_ (but not necessary) directory +structure for working with OpenFn jobs and adaptors. + +To run OpenFn jobs locally, you only need [Core](/documentation/core) and at +least one adaptor, e.g. [language-http](https://github.com/OpenFn/language-http) +and you may prefer to install core globally via `npm install -g @openfn/core` + +::: + +## Up and running + +1. Install [git](https://git-scm.com/downloads) and + [Node.js](https://nodejs.org/en/download/) (version 14 or greater) + +2. Clone and install devtools to setup core, language-common, and language-http + using either SSH or HTTPS: + +```mdx-code-block +import CodeBlock from '@theme/CodeBlock'; + + + + + git clone git@github.com:OpenFn/devtools.git{'\n'} + cd devtools{'\n'} + ./install.sh ssh + + + + + git clone https://github.com/OpenFn/devtools.git{'\n'} + cd devtools{'\n'} + ./install.sh https + + + +``` + +_Note: If you get a "permission denied" message when running `./install.sh`, try +`run chmod +x ./install.sh ` then retry the install command._ + +## Usage + +Execute takes: + +1. `-l [language-package].Adaptor`: The adaptor being used +2. `-e [expression.js]:` The expression being tested +3. `-s [state.json]`: The message `data: {...}` and credential + `configuration: {...}` +4. `-o [output.json]`: The file to which the output will be written + +### Run a job using bash + +```sh +~/devtools/core/bin/core execute \ + -l ~/devtools/adaptors/language-http \ + -s ./tmp/state.json \ + -o ./tmp/output.json \ + -e ./tmp/expression.js +``` + +### More on Devtools + +```mdx-code-block +import ReactPlayer from 'react-player'; + + +``` + +### Install a specific adaptor version + +To install specific adaptors, run +`./install.sh ${ssh || https} language-${name}` + +When you install a new adaptor, the latest version will be enabled by default. +To switch the adaptor version when running jobs locally, in the root of the +adaptor directory, run: + +`git checkout tags/v2.4.15` (substitute `2.4.15` with the adaptor version you +want) + +### The `--test` option + +```sh +~/devtools/core/bin/core execute \ + -l ~/devtools/adaptors/language-http \ + -s ./tmp/state.json \ + -o ./tmp/output.json \ + -e ./tmp/expression.js \ + --test +``` + +This intercepts all HTTP requests and displays the request information for +debugging. + +#### `.FakeAdaptor` + +Adaptors may provide dummy modules for testing. `language-salesforce` has a +built-in `.FakeAdaptor` which allows a user to test expressions on data without +sending them to a real Salesforce server. + +Instead of using `-l ./language-salesforce.Adaptor`, use +`-l./language-salesforce.FakeAdaptor` to test expressions offline: +`./core/bin/core execute -l ./language-salesforce.FakeAdaptor -s ./tmp/state.json -o ./tmp/output.json -e ./tmp/expression.js` + +#### Offline testing for other adaptors + +For most standard adaptors which make use of HTTP requests, you can add `--test` +to the execute command to intercept all HTTP requests and return a `200`. + +## Hands-on with devtools and the command line + +:::tip + +Check out this example workflow for using devtools in your day-to-day. + +::: + +1. `cd` in the folder containing the repo you're working on. +2. You can keep your job scripts anywhere, but store `state.json` and + `output.json` in a `tmp` folder. In our repos we always add the `tmp` + directory in our `.gitignore` file that tells Github to ignore the specified + paths. Make sure you have your `.gitignore` file and you know what's tracked + by Github and what's not. `state` and `config` may contain sensitive + configuration information and project data so never upload them to Github! +3. The devtools command is a mouthful. You can search your command line history + with `Ctl-r` and typing core to pull it up the devtools command. Notice that + it’s got line breaks and a flag for all the important bits… `-l` for + language-package (adaptor), `-s` for state, `-o` for output, and `-e` for + expression. You can also save your frequently used devtools commands in a + document and just copy-paste. +4. It's quick the change job names or the adaptor in the command. If you put all + your adaptors in the same folder `~/devtools/adaptors/language-_________` you + can quickly swap them in the command, as you can see in the video below. The + Backspace key deletes characters behind your cursor, Delete deletes them in + front. +5. You can use the TAB key to auto-complete the file path as you search for a + job. +6. Once you've changed a couple of characters for the adaptor and expression (in + the video `state` and `output` stayed the same because we're using the `tmp` + convention) press enter and see the results. + +![devtools](/img/devtools.gif) + +## Configure an OpenFn project + +The easiest way to configure a project is via the web interface (you can then +export or `openfn pull` the project as code) but you can also run +`./scripts/generate-project.js` helps you build a project config YAML +interactively, adding your triggers, credentials and jobs to the config. You can +read more about the config file +[here](https://openfn.github.io/microservice/readme.html#sample-configuration) + +If you choose `monolith` mode, all your job code will be included in the YAML. +In `URI` mode, you’ll get a config file with URI-s to your defined jobs. + +![Generate Project](/img/generate-project.gif) + +## Pre-Requisites + +1. [Node](https://nodejs.org/en/download/) is required to run jobs and use many + of the scripts in Devtools (e.g., `npm run build` is required after changes + to adaptors). + +2. A basic working knowledge of NodeJs, promises and asynchronous functions is + essential for writing adaptors. + +## Scripts + +Devtools comes with a collection of scripts to aid in setting up a development +environment for adaptor work, and include commands to quickly clone a large +number of adaptors, create tarballs of adaptors with only production +dependencies included, etc. + +For the kitchen sink, run: + +```sh +./install ssh +./scripts/bootstrap npm-install +``` + +In order to run the scripts, ensure you have cd'd into the project directory and +enter `./scripts/` + +### bootstrap + +Installs all adaptors in `repos` file to the `/adaptors` directory and prepares +the working directory. This needs to be run before running any of the other +scripts. Pass `npm-install` to run npm install for each adaptor also. + +`./scripts/bootstrap npm-install` - to clone, set up hooks and npm install in +each `./scripts/bootstrap`- to clone and set up hooks in each + +### generate-project.js + +`./scripts/generate-project.js` interactively generates a YAML project +configuration file that can be used both on the OpenFn platform and in OpenFn +microservice to define projects. + +### generate-doclets + +Iterates overs all language pack folder names found in the `repos` list and +creates a doclet json file in the `doclets` directory. + +### analyse-doclets + +Iterates overs all doclets found in `doclets` and gives a tree view of the +doclet structure using [jsdoc-query](https://github.com/OpenFn/jsdoc-query). + +## Building adaptors for platform + +All adaptor releases are built inside a `docker container`. The importance of +running the build and release process through a container is to standardize the +build environment across the team. While adaptors can be built and run on lots +of different operating systems and architectures, when we run the platform on +Kubernetes it expects linux boxes running x86... so that's where we build these +official releases. + +Here's how to build and release adaptors: + +1. Reopen your package in **dev-container** by typing `ctrl+shift+p` (or + `cmd+shift+p` on mac) and choosing **Remote-Container: Rebuild and Reopen in + Container**. +2. After the build is finished, open a terminal in vscode and run + `openfn-devtools release .` to build, tag, and push to + [npm](https://www.npmjs.com/). +3. Run `openfn-devtools package-release .` to package everything with production + dependencies and push to [Github](https://github.com/openfn). + +Depending on how you've configured your local environment and your VSCode +installation, you might encounter access issues preventing connections to NPM +and GitHub. + +### Troubleshooting + +There are a number of issues that you may encounter related to sharing settings +that are responsible for passing ssh keys and local configurations from your +host machine into the VSCode container. + +### Git config issues + +An issue can pop up about git config not set, To solve this, you should probably +set your email and name globally using the commands below: + +```sh +git config --global user.email "youremail@something.com" +git config --global user.name "Your Name" +``` + +### SSH key issues + +You may find that you are unable to access your `ssh` keys from inside the +container. + +:::warning Error + +permission denied (publickey) + +::: + +To solve this, first make sure the `ssh agent` is +[up and running](https://code.visualstudio.com/docs/remote/containers#_sharing-git-credentials-with-your-container). +In MacOS, it is running by default. On Linux you can start the agent using the +command + +```sh +eval $(ssh-agent -s) +``` + +Then you can add these line your `~/.bash_profile` or `~/.zprofile` (for Zsh) to +make it run by default. + +```sh +if [ -z "$SSH_AUTH_SOCK" ]; then + RUNNING_AGENT="`ps -ax | grep 'ssh-agent -s' | grep -v grep | wc -l | tr -d '[:space:]'`" + if [ "$RUNNING_AGENT" = "0" ]; then + # Launch a new instance of the agent + ssh-agent -s &> $HOME/.ssh/ssh-agent + fi + eval `cat $HOME/.ssh/ssh-agent` +fi +``` + +Next, run the command below to add your identity to the ssh agent: + +```mdx-code-block +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + + + + + ssh-add + + + + + ssh-add -A + + + +``` + +Finally, configure VSCode to share your local ssh keys with the dev container. +In VSCode, go to `Settings`, and in the search bar, type +`terminal.integrated.inherit`. You should see the option in the image below and +check it if it's unchecked. + +![vscode settings](/img/vscode-settings.png) + +### Github token sharing + +Our release process relies on a `GH_TOKEN` variable. Set up an +[access token](https://docs.github.com/en/authentication/keeping-your-account-and-data-secure/creating-a-personal-access-token) +in Github. + +In your `~/.bash_profile` or `~/.zshrc` file, export the newly created token by +adding this line: + +```sh +export GH_TOKEN= +``` + +## Using a new adaptor in an OpenFn/platform instance + +1. Add your release to the `scripts/install-lp` script. +2. Add the version number to `priv/adaptors.json`. +3. Add the `bodySchema` to `CredentialView.js`. diff --git a/versioned_docs/version-legacy/faqs.md b/versioned_docs/version-legacy/faqs.md new file mode 100644 index 00000000000..3f5b54dd68b --- /dev/null +++ b/versioned_docs/version-legacy/faqs.md @@ -0,0 +1,262 @@ +--- +title: Frequently Asked Questions +sidebar_label: FAQs +--- + +Data integration, interoperability, and workflow automation can be confusing +subjects. Not to mention the fact that there are lots of different terms and +ways of talking about the same concepts. We get it. Here are a few questions +that come up a lot. + +## What is OpenFn? + +OpenFn is an **_integration platform as a service_**. This means our prime +directive is to move data quickly and securely between different software +systems. In most cases: + +1. A source application sends **messages** to your project’s **inbox** when + something happens. + +2. **Jobs** will be triggered, based on your **filters**, and use the data in + those messages to attempt specific actions in destination systems. + +3. The **logs** are recorded so you can see precisely what happened and when and + where it happened to take action in the event of a failed attempt—like + editing the job or even the source message and trying it again. + +## Who uses OpenFn? + +OpenFn is used by organizations big and small, but the individuals interacting +with the platform range from system administrators to Javascript developers. +With a basic understanding of Javascript, the flexibility of the platform is +almost limitless. + +## What is a `job`? + +OpenFn automation centers around jobs, which define the specific series of tasks +or database actions OpenFn should perform. They can be set to be activated +(triggered) at certain time intervals or when data matching specified criteria +is received. You can think of jobs as a set of instructions you might give a +data entry staff member (e.g., create a new Patient record in OpenMRS when a +form containing a newly registered client is received from CommCare, export data +to DHIS2 every week on Friday 11pm, send SMS with payment confirmation number +when payment confirmation message is received etc.). + +:::note + +Jobs are fully configurable and reusable. They can also be chained together to +create [multi-step automation](jobs/multiple-operations) flows, two-way syncs. +and to keep data consistent between multiple applications (using multi-app Saga +patterns). You can read more on two-way synching below. + +::: + +## What is a `run`? + +A run is each individual execution of a job. Imagine that a job is configured to +create a new patient in OpenMRS whenever a case is opened in CommCare. Over the +next week, if 5 cases are opened in CommCare, you’ll see 5 different runs of +this one job in OpenFn. If 4 runs are successful and one has failed, you’ll see +4 new patients in OpenMRS, and your system administrator will have been notified +that one of those patients couldn’t be created (or whatever more robust +error-handling you’ve set up will take place.) + +Note that there’s not always a 1-to-1 mapping between runs and the real-world +things you’re working with. I might define a job that gets all updated event +data from DHIS2 for the last 2 weeks and publishes it to a public map using +CartoDB. This job will be triggered at specified time intervals, every 2 weeks +in this case, and after a month, we’ll only see 2 runs in OpenFn (that’s one run +every two weeks). Each run will have succeeded or failed, and each one might +have processed thousands of events from DHIS2. + +:::note + +For one last example, a single form submission in Open Data Kit might trigger a +job that creates new contacts and attendance records in Salesforce. In this +case, you’ll find a run for each ODK form submission, but each run will create +lots of different records in Salesforce—specifically, at least one contact and N +number of attendance records, corresponding to the number of items in your ODK +form’s “attendance repeat group”. + +::: + +## Is OpenFn open-source? + +OpenFn is a suite of different technologies with different licenses. We have +built and maintain dozens of open-source data transformation and API wrapper +software packages. Those are, for the most part, licensed under the **LGPL** and +can be used freely to extract, transform, and load data from a command line, or +as part of another software application. + +OpenFn also hosts a proprietary web-application that ties these tools together +(www.openfn.org) into an out-of-the-box integration management platform. This +platform is open-core, providing the powerful ETL tools that sit at the heart of +the proprietary OpenFn.org iPaaS as free and open-source software (FOSS). All of +the jobs running on OpenFn.org, as well as all of the underlying adaptors, can +be run offline using our FOSS tools. + +:::note + +OpenFn will also soon offer an enhanced FOSS implementation option called +[OpenFn/microservice](https://openfn.github.io/microservice/). This FOSS +microservice approach is currently in development with funding from the +[DIAL Open Source Center](https://www.osc.dial.community/), +[Digital Square](https://digitalsquare.org/), and the +[FCDO](https://www.gov.uk/government/organisations/foreign-commonwealth-development-office) +(formerly DFID). + +Please note that this pathway does not provide the entire OpenFn platform as +free and open source software (FOSS). In situations where a particular partner +or government is unable to use the proprietary platform (though it can be +deployed on local servers with an unlimited use license), this approach ensures +that all jobs, triggers, and project configuration can be exported from +OpenFn.org and used, in conjunction with OpenFn's FOSS ETL tools to deploy a +microservices-style implementation which incurs zero licence costs and provides +the basic data processing that OpenFn's platform does. While at the outset there +will be no web interface and no ability to reprocess messages, etc., these +features could be built by partners in time to replace the features of the +OpenFn platform. I.E., none of the initial investment in OpenFn will be lost if +the partners choose to build their own, fully open-source integration platform +based on our powerful open-source ETL tools. + +::: + +## How much does OpenFn cost? + +### Design & implementation costs + +OFG offers a range of packages to ensure successful first-time implementations, +which include integration consulting, design, configuration, and +capacity-building. Typical engagements take 1-5 days to complete, and our most +popular package is the Integration QuickStart, in which we spend 1 week to +design and configure ~5 integration flows end-to-end and provide administrative +training to your staff for $5,000. + +### Ongoing costs + +OpenFn.org offers a free plan for users seeking to trial the platform or +implement projects handling low data volumes (up to 100 runs/month). Usage of +OpenFn.org, the proprietary integration-platform-as-a-service (iPaaS), incurs +ongoing costs, which are largely dependent on the expected data volumes to be +processed. OpenFn offers monthly subscriptions, enterprise licenses for annual +and multi-year agreements, as well as unlimited and local deployment options. +Contact enterprise@openfn.org to learn more and for a tailored cost estimate. + +There are also available DIY options, as well as bespoke training services to +develop your capacity to implement and manage OpenFn independently. + +## Can I trial the platform? + +Yes. As a matter of fact, you can use it for free, forever. + +OpenFn.org offers a free plan to all users +([sign up here](https://www.openfn.org/signup)). Try it out using OpenFn Docs, +or contact our team for a free consultation and help getting started. Change +your OpenFn plan at any time (no lock-in!), or contact enterprise@openfn.org to +learn more about annual, enterprise, and unlimited licenses. + +:::tip + +At low volumes, or for prototyping, you can use the hosted platform for free +forever. + +::: + +## How reliable is the hosted service? + +OpenFn has harnessed the extreme stability and scalability of Erlang to +coordinate these actions and provide users with email alerts, project management +tools, and an online job writing IDE. + +We constantly monitor our own status with independent infrastructure at +[status.openfn.org](https://status.openfn.org). You can subscribe to +notifications there or follow [@openfnstatus](https://twitter.com/openfnstatus). + +We've been delivering this service continuously since 2014. + +## Can OpenFn integrate with my custom app? + +Yes, OpenFn can integrate with _any_ application. + +If your technology has a REST endpoint or webhooks service, it will likely work +right out of the box. This covers most web applications (e.g., CommCare, Kobo, +ODK, DHIS2, Salesforce, MS Dynamics, MPesa, etc.). OpenFn can also integrate +with most databases, like Postgres, MySql, and Mongo, custom applications, +legacy government systems, and can even parse CSV files–so long as these can be +accessed from an online location. Read more about +[connecting source applications](source-apps), or check out the Apps page for +applications widely implemented. + +We offer pre-built connectors (called "adaptors") for our users' most popular +apps to make the integration setup quicker and more user-friendly when +connecting with these tools. For example, users can implement language-http to +send basic HTTP requests to any web application, or implement language-dhis2 to +automatically handle DHIS2 authentication and access helper functions like +fetchData()to export DHIS2 datasets. + +## Does OpenFn support two-way syncing? + +Yes, OpenFn can support two-way syncing of applications. Utilizing +[Flow Triggers](build/triggers#flow-triggers), OpenFn jobs can be chained +together to facilitate real-time two-way data sync, +[multi-step automation](jobs/multiple-operations) and data cleaning processes, +and complex branching logic. Users can also implement bi-directional data syncs, +as well as complex Saga Patterns to implement a transaction that spans multiple +applications by configuring webhooks in their endpoint applications and +performing updates in both systems when events take place in either. + +## Do I need to know how to code? + +No, but it helps to have written a formula in MS Excel! Many OpenFn users are +familiar with data, not development, and quickly get comfortable with OpenFn +jobs. + +If your project is leveraging an OpenFn adaptor (e.g., `language-dhis2`), you +have access to pre-built helper functions (e.g., `getPatient`, `update`) so that +you don’t need to write custom code, and rather can use OpenFn documentation or +existing job scripts to write your own job. See OpenFn Github for inspiration +and open-source job code shared by OpenFn users. You’ll notice that these +functions work in the same way that functions do in Excel… `sum(A1, A2, A3)` + +Jobs can be written and extended using raw Javascript for advanced data cleaning +and manipulation. Therefore, you may want to implement Javascript to achieve +specific requirements or to extend existing OpenFn adaptors, which are +open-source! + +## Where is my data stored? + +OpenFn is a middleware provider rather than a data storage system. We move +information from system A to system B, and integrations can be set up to be +compliant with GDPR, HIPAA, and other policies. To make auditing and +reprocessing easy, OpenFn temporarily stores message data and job run history, +but we're not the single source of truth nor the final resting point for these +data. When organizations choose to use our hosted OpenFn platform at OpenFn.org, +no data processed by OpenFn is stored locally and our platform runs on the +Google Cloud Platform (GCP). Read more on our +[Compliance](https://www.openfn.org/compliance) page. + +OpenFn.org currently offers hosting on U.S. and Swiss-based cloud servers. +OpenFn local and in-country cloud deployments are also available upon request. +Contact enterprise@openfn.org to learn more. + +## Is my data secure? + +Yes, OpenFn prioritizes security, stability, and scalability (what we call +[S³](https://www.openfn.org/trust#s3)) above all else, and many of our users +implement OpenFn to comply with GDPR, HIPAA, and other policies. Read more on +our [Trust](https://www.openfn.org/trust), +[Compliance](https://www.openfn.org/compliance), and +[Privacy](https://www.openfn.org/privacy) pages. + +OpenFn.org runs on the Google Cloud Platform, an infrastructure protected by +more than 500 top experts in information, application, and network security. For +organizations with specific compliance and data governance requirements, OpenFn +can also be deployed on designated local- or cloud-infrastructure. + +## What if I have more questions? + +Open Function Group is a team of ICT4D and integration specialists, waiting to +help you get started. Click the chat icon in the bottom right hand corner of +this page to talk now. Or Email our team at admin@openfn.org, chat us on +OpenFn.org, or post a question in our +[Community Forum](https://community.openfn.org). diff --git a/versioned_docs/version-legacy/for-devs.md b/versioned_docs/version-legacy/for-devs.md new file mode 100644 index 00000000000..8569a5ccaca --- /dev/null +++ b/versioned_docs/version-legacy/for-devs.md @@ -0,0 +1,77 @@ +--- +title: Developing Connected Applications +sidebar_label: Building Compliant APIs +--- + +This section is for you if you are hoping to build or extend an existing +application that can connect to OpenFn. We follow modern web-standard JSON api +guidelines. + +For your application to a be data provider (or "source") for OpenFn +integrations, we highly recommend that you create a "notifications service" +(sometimes called a "webhooks service" or "event-based push API"). This is +preferable to using a REST api for two reasons: (1) A notifications service will +give your clients the ability to set up real-time integrations, and (2) a +notifications service is more efficient for both your servers and OpenFn—instead +of having requests be made and handled every X seconds, your servers and +OpenFn's servers will only work when new data is available. + +For your application to be a consumer (or "destination") for OpenFn, you must +either have a standard, JSON-based rest API or create a language-package that +meets your API specifications. + +## Sending data to OpenFn + +To send data to OpenFn, your application must be able to make an HTTPS post to +an external URL with a valid JSON object as the post body. See the following +example using cURL: + +```sh +curl -X POST \ + -H "Content-Type: application/json" \ + -H "Cache-Control: no-cache" \ + -d '{"foo":"bar", "baz":"qux"}' \ + "https://staging.openfn.org/inbox/some-secret-api-key" +``` + +OpenFn will respond with a 200 and an empty JSON object in the event of a +successful post. 400s mean that the user's external URL is wrong, and 500s means +that there is an application error on OpenFn. While 500s are rare, they could be +due to invalid JSON in your POST body. + +If you cannot notify an external URL when some event takes place, you can still +integrate with OpenFn if you have a JSON-based REST API. OpenFn users can make +HTTP GET requests to your application and perform additional actions based on +your response. You should allow either basic or token authentication and +response to a valid GET with JSON. There is no specific format for your +response, as users can parse it any way they'd like, extracting relevant data +and then performing other actions—like loading it into a destination system—with +that data. See [language-http](https://www.github.com/openfn/language-http) for +details on how users make these generic HTTP requests. + +### Payload sizing + +If you're using the platform, and you're not planning on using an enterprise +plan you'll have to consider the size of the data you're sending in each +payload. Run `state` is typically limited to `10MB` and you should therefore +keep your payloads well below that limit. + +## Receiving data from OpenFn + +To make it easy for users to connect to your application, it's highly +recommended that you create a language-package with your required authentication +and a set of simple, allowable actions nicely abstracted into "helper +functions". See [language-dhis2](https://www.github.com/openfn/language-dhis2) +for an example of a language-package which creates a simpler interface for a +traditional JSON-based REST api. Adaptors are written in Javascript and execute +in Node. You can convert OpenFn's JSON into XML, or any other format before +sending it to your application and you may make use of any node modules you'd +like. See +[language-postgresql](https://www.github.com/openfn/language-postgresql) for an +example of an adaptor that connects directly to PostgreSQL databases using a +popular NPM module called "pg". + +To receive data from OpenFn's generic `language-http` adaptor, your application +must allow either basic, token, or digest authenticated POST, PUT, or GET +requests. (Though it is not advisable to create an API that requires GET +requests to create or update data.) diff --git a/versioned_docs/version-legacy/getting-started/commcare-project-walkthrough.md b/versioned_docs/version-legacy/getting-started/commcare-project-walkthrough.md new file mode 100644 index 00000000000..c8c4e0d76aa --- /dev/null +++ b/versioned_docs/version-legacy/getting-started/commcare-project-walkthrough.md @@ -0,0 +1,306 @@ +--- +title: + Walk-through - Syncing your CommCare form submissions to a PostgreSQL database +--- + +**Before starting this tutorial please make sure:** + +- You have signed up for [OpenFn.org](http://openfn.org) (it takes less than a + minute!) +- You have checked out our glossary and have an understanding of basic OpenFn + and API terminology. Check out the pages below to get started + - [OpenFn Concepts](/documentation/getting-started/terminology/) + - [A glossary for data integration](/documentation/getting-started/terminology/) +- You have a CommCare application with at least one form configured. This is + your source system. +- You have a PostgreSQL database configured. This is your destination system. + +**If you don’t have a CommCare application or PostgreSQL database setup, you can +also follow along with the prebuilt solution. Follow along at the links below:** + +1. [Mapping specifications document](https://docs.google.com/spreadsheets/d/1pi_oxImakhtaCCCIENkjTPZeuyWhpFEcNmH7hfvTBgo/edit?usp=sharing) +2. Commcare application to download: + - Username: testuser + - Password: 123 + +![install_cc_app](/img/install_cc_app.png) + +3. [OpenFn project](https://www.openfn.org/projects/commcare-demo/jobs) +4. [Public report that shows records in the PostgreSQL database](https://analytics.openfn.org/public/question/095449a9-5696-463c-a4fb-24614c9f08a5) + +## Getting started + +In this walkthrough, we will be setting up an **automatic data sync between +CommCare and a PostgreSQL database**. We will be syncing submissions coming from +a CommCare `Maternal and Newborn Health` application that has a +`Register a New Patient` form. + +:::tip + +Whenever a CommCare user registers a new patient, the patient details will +automatically be synced to an already configured PostgreSQL database to enable +real-time monitoring and analytics on data collected in the field. For example, +this database can quickly be connected to a dashboard that collects aggregate +data on patients registered! + +::: + +![cc-postgres](/img/cc-postgres.png) + +**This integration can be broken up into two parts:** + +1. Getting data from your source system to your OpenFn inbox so you can inspect + the data structure to inform the job design for part two +2. Transforming and loading this data to your destination system + +… let’s get started! + +## Getting data from CommCare + +**There are two ways to get your CommCare form submissions in your OpenFn inbox +to inspect the data, and to later map it to your destination system.** + +### Option 1: Webhook to forward cases and/or forms in real-time from CommCare to OpenFn using REST service + +CommCareHQ has a native data forwarding feature that provides a webhook/REST +service that can be pointed to the destination of your choice (i.e., your OpenFn +Inbox). When a webhook is configured, any Commcare forms submitted are +**_automatically forwarded_** to the designated endpoint, such as your OpenFn +inbox. After data forwarding is set up, it happens automatically, **_in +real-time for all forms and cases_**. Learn more about configuring a webhook +[here](/adaptors/commcare#webhook-forward-cases-andor-forms-from-commcare-to-openfn-using-rest-service). + +![option1](/img/option1.png) + +### Option 2: Extracting Commcare data via the REST API + +CommCare provides a robust +[REST API](https://confluence.dimagi.com/display/commcarepublic/List+Forms) for +extracting and loading data. This second option involves configuring a job in +OpenFn to fetch CommCare submissions via a `GET` HTTP request with parameters to +filter your data query. Follow along for how to set this job up! + +1. **Create a new project space, or open up an existing one where you have Admin + access.** + +![create_new_project](/img/Create_new_project.gif) + +2. **Create a new “Cron” trigger to schedule this extract job. Consider how + frequently you want this job to run. Daily? Weekly? Every 1 hour?** + +![create_trigger_cc](/img/create_trigger_cc.gif) + +3. **Create a “Raw JSON” credential to input the authentication details for your + CommCare source application.** + +![add_new_cred](/img/add_new_cred.gif) + +In the credential `JSON Configuration`, add your credential as follows: + +```json +{ + "appId": "APPID", + "password": "PASSWORD", + "username": "USERNAME", + "applicationName": "APP NAME", + "hostUrl": "https://www.CommCarehq.org", + "openfnInboxUrl": "INBOXURL" +} +``` + +:::tip + +Check out [this](/documentation/getting-started/terminology/#inbox) docs page on +how to find your OpenFn inbox URL to fill in the configuration above. + +::: + +Now that you've configured the job Trigger and Credential to authenticate… + +4. **Configure a new job. Note that this job will use the HTTP adaptor in order + to connect with the CommCare REST API.** + +![configure_job_cc](/img/configure_job_cc.gif) + +5. **Writing the “FETCH” job expression:** You will want to write a job + expression that sends a `GET` HTTP request to CommCare’s List Forms API. + + `GET /https://www.CommCarecommcarehq.org/a/cc-demo-2/api/v0.5/form` + + We have included the code snippet for replicating this job below. Please + check out the + [CommCare API docs](https://confluence.dimagi.com/display/commcarepublic/List+Forms) + on how to adjust the request query parameters. + +```js +get( + 'https://www.CommCarehq.org/a/cc-demo-2/api/v0.5/form/', + { + query: { + //see API docs to adjust query parameters + limit: 1000, //max limit: 1000 + offset: + state.meta && state.meta.next + ? state.meta.limit + state.meta.offset + : 0, + received_on_start: '2022-02-16', + received_on_end: '2022-02-18', + xmlns: + 'http://openrosa.org/formdesigner/D771417E-354E-4906-A686-DF0BA230F16A', + }, + }, + state => { + //After the CommCare API responds to our GET request, we want to POST the data in the response to our OpenFn Inbox for further inspection + const { meta, objects } = state.data; + const { openfnInboxUrl } = state.configuration; + const forms = objects; + + state.configuration = { baseUrl: 'https://www.openfn.org' }; + console.log('Posting form submissions to OpenFn Inbox...'); + + return each(forms, state => { + return post(`/inbox/${openfnInboxUrl}`, { body: state.data }, state => ({ + ...state, + data: {}, + references: [], + }))(state); + })(state); + } +); +``` + +6. **Once you are finished configuring and writing your job, save and run it!** + +![save_run_job_cc](/img/save_run_job_cc.gif) + +7. **Check out the `Activity History` tab to see if your run succeeded.** If it + succeeded, you should see: + - Successful run log (look for the green!) + - New Messages in your `Inbox` containing data for any forms submitted in the + time frame specified in your query. + +![activity_history_cc](/img/activity_history_cc.png) + +:::info + +**What do do if your run fails:** + +1. Open the run to inspect the error message +2. Adjust the job to issue and re-run the transaction as needed by clicking the + play button in `Activity History` +3. Check out the [PostgreSQL common errors](/adaptors/postgresql/#common-errors) + page for more details! + +::: + +**If you want to replicate this setup and configure your own CommCare +integration, first consider your CommCare extraction options - remember that +there are 2:** + +1. Data forwarding webhook (native CommCare feature) +2. REST API (List Forms API - **_API access requires a paid CommCare plan_**) + + The main advantage of using the webhook is that your data is forwarded to the + destination system in real-time. However, the List Forms API is also + advantageous because it enables users to extract data in bulk on a scheduled + basis, for syncing historical data every month on the 30th, for example. + Deciding on which option to go with depends on your business requirements. + +## Transforming and loading CommCare data to a PostgreSQL database + +1. **You should have a database configured and a username provided for OpenFn to + read and write data in your target DB tables.** For this demo, we have + configured the database + [like this](https://docs.google.com/spreadsheets/d/1pi_oxImakhtaCCCIENkjTPZeuyWhpFEcNmH7hfvTBgo/edit?usp=sharing) + to capture the CommCare form data. Check out the + [design quickstart](/documentation/design/design-quickstart#3-map-data-elements-to-be-exchanged) + for how to create your own `mapping specification document` to map data + elements to be exchanged. + +![db_config](/img/db_config.png) + +2. **Create a new message filter trigger, to run our second job for every new + patient record received in the OpenFn inbox.** Learn more about message + filter triggers + [here](/documentation/build/triggers/#message-filter-triggers). + +![create_new_trgger_db](/img/create_new_trgger_db.gif) + +3. **Create a PostgreSQL credential which will be used by the job to + authenticate with the database.** + +![add_credential_postgres](/img/add_credential_postgres.gif) + +4. **Create a new job with the `postgresql` adaptor for loading the CommCare + data into your destination database.** + +![configure_job_postgres](/img/configure_job_postgres.gif) + +**Writing the job:** For this job we will use the upsert operation to +insert/update records in the destination `patient` table and use `patient_id` as +the primary key. An `upsert` will update an existing row if a specified value +already exists in a table, and insert a new row if the specified value doesn't +already exist. + +```js +upsert('patient', 'ON CONSTRAINT patient_pk', { + patient_id: dataValue('data.patient_name'), + patient_name: dataValue('data.patient_name'), + village_name: dataValue('data.village_name'), + last_menstrual_period: dataValue('data.last_menstrual_period'), + expected_delivery_date: dataValue('data.expected_delivery_date'), + children_alive: dataValue('data.children_alive'), + living_children: dataValue('data.living_children'), + feeling_sick: dataValue('data.feeling_sick'), + total_children: dataValue('data.Total_children'), + risk_level: dataValue('data.Risk_level'), +}); +``` + +Feel free to modify the code above to reflect your CommCare and database +configuration according to your mapping specifications. Check out this +[page](/documentation/jobs/job-studio#job-studio-features) for how to copy the +dataValue for source data fields in the OpenFn job studio. + +:::tip + +Check out the +[design quickstart](/documentation/design/design-quickstart#3-map-data-elements-to-be-exchanged) +for how to create your own `mapping specification document` to map data elements +to be exchanged. + +::: + +6. **Save and turn on the job** + +![save_db_job](/img/save_db_job.gif) + +## Time to test! + +1. Submit a form in CommCare +2. If you have enabled data forwarding, refresh your OpenFn inbox +3. If you have not enabled data forwarding and set up a FETCH job instead, run + the job (ensure the `received_on_start` and `received_on_start` dates in the + FETCH are appropriate). +4. Run the FETCH job–if the fetch job passes, the “Load to DB” job should + automatically run +5. Check out the `Activity History` and ensure that both runs passed (look for + the green checks in the `Status/Action` column). + +![activity_history_final](/img/activity_history_final.png) + +6. **Finally, refresh your database and check out the new submission data!** + +![metabase](/img/metabase.png) + +While this guide is specifically for PostgreSQL databases, you can generally +follow these same steps for other database types (e.g., MS SQL or MySQL)—simply +leverage a different adaptor in your job configuration. + +**Other resources to check out:** + +1. OpenFn Job Library +2. OpenFn Docs ‘App’ pages for CommCare and Postgres + +**Any questions? Comments? New configuration ideas? Please reach out to us with +a post on the [OpenFn Community](https://community.openfn.org/) forum.** diff --git a/versioned_docs/version-legacy/getting-started/glossary.md b/versioned_docs/version-legacy/getting-started/glossary.md new file mode 100644 index 00000000000..d57aa6e204a --- /dev/null +++ b/versioned_docs/version-legacy/getting-started/glossary.md @@ -0,0 +1,141 @@ +--- +sidebar_label: Glossary for Integration +title: A glossary for data integration +--- + +Now that we've got a basic understanding of what an integration is, it's +important to establish some of the foundational concepts we need to press +forward. This doesn't mean you can't use OpenFn if you don't know what any of +these words mean prior to reading our documentation, but it does mean that some +of the most important tasks along the OpenFn journey will assume at least a +basic understanding of each of these terms. In +some cases, we also link to further reading if you want a better +understanding of some part of your data integration picture. + +Note: This glossary is meant to be OpenFn-agnostic. The rest of the docs help +you to get a picture of the parts of OpenFn, what we call them, and why, but +this glossary is really meant as a prerequisite to all those other things to aid +users with no experience in this area. + +## API + +API is short for "application programming interface," and it's the part of some +software application that has chosen to make itself visible +(interface) to users outside the application itself. And it's doing that +in a programmatic way, in a way that allows developers of other +applications or data systems to use it the same way each time. + +## API Protocol + +There's no hard and fast rule about how an API gets developed, but over time, +standards have emerged to make it more straightforward for a new user to +interact with Platform X's API, by trying to ensure most applications use one of +a few different formats. That's what an API protocol is. A few of the big names +here are REST, SOAP, JSON, and GraphQL. Rather than reinvent the wheel, +[here's a good primer on how protocols differ, their data formats, and why that all matters.](https://frontend-digest.com/beginners-guide-to-apis-protocols-and-data-formats-f80cf7f30425]) + +## Database + +Any organized collection of data can probably be safely called a database. If +it's got a structure with which to reference all the stuff it's storing, and the +"stuff" is data, then it's a database. + +## Data source + +A data source is an application, database, or table that provides data to some +other platform. Nothing is always a data source. For example, Google +Sheets can be a data source, but it can also pull from data sources (individual +CSV uploads or manual user data entry). We just call it a source when it's doing +the job of sourcing data to some other place. Data sources are the starting +point, temporally, for any integration. + +## Data system + +Sometimes folks get confused about the distinction between a database, a data +source, an application, and a data system. A data system is a more +complex collection of these other things, usually one that allows a user to more +easily interact with all of the data they should have access to. The data system +often serves as an entry point to the myriad databases, applications, tables, +etc. that a user would otherwise have to go 12 different places to find. + +## Encryption + +In this day and age, security is everything. Encryption is the process of taking +something that is readable to anyone and making it only readable to people we +want to read it. OpenFn ensures your data is encrypted every step of the way +while it's in our platform. +[For more on different kinds of encryption, you can look here.](https://ssd.eff.org/en/node/36) + +## File system + +A file system is to files what a data system is to data. It structures your +files in a way that makes it easy for you to retrieve them in a standardized way +(think of your home file system with its file paths on your home computer). File +systems can exist in other contexts too, and sometimes you need to access them +to retrieve a file (a Word doc, CSV, plain text file, etc. might all be relevant +depending on your use case). The only real difference between file systems and +data systems or databases is the kind of information stored, data vs. files. + +## ETL + +ETL stands for extract, transform, and load. These are often thought of as the +three constituent parts of a data integration. First, we extract (push of pull +data from a data source). Then, we transform (make any changes to the data to +make it acceptable to the destination system or application). Then, we load +(send it to the destination). + +## Integration platform + +An integration platform (e.g., OpenFn) is an application (or set of +applications) that help organizations set up, run, and maintain/manage the +integrations between all of their various systems. + +### iPaaS + +You may also see the acronym "iPaaS". This stands for integration platform as a +service and is a type of "software as a service" (or "SaaS"). SaaS is a software +purchasing model in which software is paid for only as it is used (often +month-to-month), rather than purchased up front or given away for free. + +## Metadata + +This is data that tells us about our data. In a table, for example, that's the +name of the columns, the number of rows, etc. Metadata is often brought up in +conversations about privacy—e.g., regulators might want to ensure that _only +metadata_ is moved from Ministry A to Ministry B, as opposed to personally +identifiable information (PII) about individuals themselves. + +## Push, pull, and streaming + +Pushing is when a triggering action in the data source causes it to send +data to the destination. Pulling is the opposite, where the destination +system requests the data from the source based on some triggering action, rather +than waiting for the source to send it on its own. Streaming is a bit +different, and it's when a data source is essentially constantly sending +data to a destination system. + +## Webhook + +A [webhook](/documentation/source-apps#standard-webhook-configuration) (also called a web +callback or HTTP push API — thanks +[SendGrid](https://sendgrid.com/blog/whats-webhook/)!) is a feature of an +application that allows pushing. It's often configured to notify some +external URL when an event occurs. A system administrator might create a +"webhook" which notifies an integration platform whenever some event occurs so +that the iPaaS can start executing some complex workflow. + +## Structured and unstructured data + +Structured data is data that has metadata. Unstructured data has very little +metadata (though probably still has things like time of creation, update, etc.). +Without metadata about the format of the data, unstructured data is more +difficult to interact with programmatically. We need different sorts of rules +when doing ETL on unstructured data to do it well, whereas structured data is an +easier starting point because we know what to expect from a column with a name, +data type, field size, and so on. + +## Writeback + +Refers to a destination system making a change in a data source. When my +destination application receives information from a data source and wants to do +something back to the source in response, that's writeback. diff --git a/versioned_docs/version-legacy/getting-started/implementation-checklist.md b/versioned_docs/version-legacy/getting-started/implementation-checklist.md new file mode 100644 index 00000000000..88793ae2348 --- /dev/null +++ b/versioned_docs/version-legacy/getting-started/implementation-checklist.md @@ -0,0 +1,117 @@ +--- +sidebar_label: Implementation Checklist +title: Implementation Checklist for planning your next integration project +--- + +# Implementation Checklist + +This checklist draws from experience of implementing interoperability projects +with in-country government agencies (incl. UNICEF country offices, Ministry of +Social Services Cambodia, MoH Thailand) to offer an implementation & planning +guide covering key milestones in most interoperability and integration projects. + +While this checklist should be tailored for each implementation, the tasks +outlined here provide a template workplan that can help any organization prepare +for their upcoming implementation. + +:::tip + +Check out a real-world example See the UNICEF Cambodia repository for documented +outputs from this checklist from an interoperability project implemented for the +Cambodia Ministry of Social Affairs, Veterans, and Youth Rehabilitation and +partner NGOs: +[openfn.github.io/unicef-cambodia/](https://openfn.github.io/unicef-cambodia/) + +::: + +:::info + +The XLS version of this template can be found [here](https://docs.google.com/spreadsheets/d/1_XY0nx0OLNUsogrIHnRaSTyZ-KdcSXks-tqwm3ZfMc4/edit#gid=72612093). + +::: + + +## (1) Preparing for the Implementation + +- [ ] Point of contacts identified for each target system (incl. system administrators, folks who can speak to the functional and technical requirements) +- [ ] Data sharing agreement finalized (if required; common for cross-organization workflows) +- [ ] Business value assessed & documented +- [ ] High-level workflow requirements gathered & documented (in diagram) +- [ ] Technical feasibility assessment completed to verify integration approach, available connection points, and OpenFn deployment option and adaptors +- [ ] Capacity assessment completed + + +## (2) Discovery & Design - Functional Workflow Requirements + +- [ ] User stories documented to capture business value & desired outcomes + Learn more about user stories [here.](https://docs.openfn.org/documentation/design/design-quickstart#1-capture-requirements-as-user-stories) +- [ ] Workflow BPMN diagram capturing functional steps of the business process finalized + Learn more about diagrams & BPMN notation [here.](https://docs.openfn.org/documentation/design/design-quickstart#2-diagram-the-business-process) +- [ ] Request list of data elements from administrators of target systems + Read about mapping specs [here.](https://docs.openfn.org/documentation/design/design-quickstart#3-map-data-elements-to-be-exchanged). +- [ ] Data element mapping specifications finalized (functional/business-friendly version) +- [ ] Client sign-offs on workflow diagram & mapping specs +- [ ] Workflow assumptions documented (e.g., what human, manual steps does the workflow rely on; what are the unique identifiers) +- [ ] Testing scenarios drafted + +## (3) Discovery & Design - Technical Specifications + +- [ ] Documentation on APIs or target connection points secured +- [ ] Connection points & authentication methods confirmed +- [ ] Access secured to developer/sandbox environments for testing +- [ ] Authentication and authorization methods & credentials tested +- [ ] Target API endpoints determined based on functional specifications & review of API docs +- [ ] Target API endpoints tested to validate the functional data element specifications can be delivered +- [ ] Workflow BPMN diagram capturing the technical steps of the workflow finalized +- [ ] Technical version of data element mapping specifications created +- [ ] Workflow assumptions updated to include any technical considerations +- [ ] Test scenarios updated to include any technical considerations +- [ ] Project Security Configuration Checklist drafted to capture data security & compliance considerations +- [ ] Github repository created +- [ ] Job specifications written for developers + +## (4) Build + +- [ ] OpenFn platform: project space created & relevant users invited +- [ ] OpenFn platform: Jobs, triggers, and credentials configured +- [ ] OpenFn platform: Version control configured to connect Github repo +- [ ] Jobs written & pushed to branch on Github +- [ ] Job code review complete and merged to main branch on Github +- [ ] OpenFn platform: Github paths updated on each job to link to source file +- [ ] Test data created (if needed) +- [ ] Engineer updates mapping specifications (if needed) + +## (5) Testing + +- [ ] Testing Round 1: Developers run jobs locally with sample data provided +- [ ] Testing Round 2: Analysts complete Test Scenarios & run jobs on platform +- [ ] Iteration: Analysts submit feedback to developers & re-test +- [ ] UAT Round 1: Client completes Test Scenarios +- [ ] Iteration: Analysts submit feedback to developers & re-test +- [ ] UAT Round 2 (if needed): Client completes Test Scenarios +- [ ] Iteration: Analysts submit feedback to developers & re-test + +## (6) Training & Prep for Go-Live + +- [ ] Training materials drafted for client administrators +- [ ] Documentation drafted, and all project artefacts/docs linked +- [ ] Project Security Configuration Checklist reviewed to determine if any configuration changes or settings need to be implemented post-testing +- [ ] Confirm that production system(s) have been configured +- [ ] Production credentials secured & tested (authentication & authorization) +- [ ] OpenFn platform: "production" project created (cloned from "staging" project), job configuration migrated, & jobs connected to production credentials +- [ ] OpenFn administrator users & access levels confirmed and granted? +- [ ] Support POCs identified for each target system? +- [ ] Establish support structures & governance model for change management +- [ ] Training session delivered to designated OpenFn and target system administrators and any other ToTs + +### Rollout & Support + +- [ ] Go Live: Turn "on" OpenFn jobs in production platform project so that the workflow is now live in production systems +- [ ] Confirm administrators have OpenFn notifications turned on to "Each Time" so that they will receive failure notifications (see OpenFn Account Settings) +- [ ] Communicate to end users as needed about the go-live and its implications + +## Questions or feedback? + +If you have any inputs, comments, or questions–please contribute! Submit a pull +request to this documentation page or leave a comment in +[OpenFn Community](https://community.openfn.org/). diff --git a/versioned_docs/version-legacy/getting-started/integrating-using-openfn.md b/versioned_docs/version-legacy/getting-started/integrating-using-openfn.md new file mode 100644 index 00000000000..7042578e231 --- /dev/null +++ b/versioned_docs/version-legacy/getting-started/integrating-using-openfn.md @@ -0,0 +1,14 @@ +--- +title: Integrating using OpenFn + +--- + +In [What's an Integration, Really?], you can read about the how, when, and why of integration design. Now, we take those general concerns and apply them to the OpenFn framework to help you get started with using the platform and knowing the lingo. + + + +When->Triggers +What->Jobs and runs +Why->not our problem/data mapping +How->All of this stuff +How safely->Credentials diff --git a/versioned_docs/version-legacy/getting-started/integration-toolkit.md b/versioned_docs/version-legacy/getting-started/integration-toolkit.md new file mode 100644 index 00000000000..e7828b605ea --- /dev/null +++ b/versioned_docs/version-legacy/getting-started/integration-toolkit.md @@ -0,0 +1,108 @@ +--- +title: The Integration Toolkit +--- + +OpenFn's free and open-source Integration Toolkit gives governments and NGOs +around the world more flexibility and freedom to chose how they achieve success +in integration and interoperability projects. The Toolkit is both a recognized +[Digital Public Good](https://digitalpublicgoods.net/) ("DPG") and a +[Digital Square Global Good](https://digitalsquare.org/digital-health-global-goods). + +![DPG](/img/openfn_dpg.png) + +The Toolkit provides a suite of software tools and documentation to help users +design, build, and automate integrations. + +## About the Toolkit + +At the heart of the toolkit is the `project`—a set of jobs, triggers, and +credentials which allow organizations to flexibly define workflows and +integrations across their systems. + +Projects can be ported from the `platform` to `microservice` (the main +deployment pathway for the Integration Toolkit) and back again (see below) but +to really understand the toolkit you've got to first understand Open Function +Group and `platform`, the enterprise iPaaS. + +![Integration Toolkit](/img/integration-toolkit.png) + +Open Function Group has been building free and open source software (FOSS) for +data integration projects in the health, humanitarian, and international +development sectors since in 2014. Their software and services are now in use by +governments, NGOs, and impact-first businesses in over 40 countries. + +OFG's first integration platform was entirely FOSS, but they soon shifted to an +["open-core"](https://en.wikipedia.org/wiki/Open-core_model) (think GitLab) in +order to sustain their impact-focused integration work. Their main hosted +offering, the OpenFn "platform", is _proprietary_ but makes extensive use of the +open-source integration toolkit; in fact, the "platform" may be thought of as an +enterprise/hosted layer running on-top of the basic, open-source building blocks +provided by the Integration Toolkit. + +### Why OFG is driving the development of the Integration Toolkit + +Our mission is to make health & humanitarian interventions more efficient & +effective, and we see investment in the integration toolkit as strategic. + +We'll strive to preserve the integration toolkit as a healthy and bona fide open +source project and sustains its operations through business activities related +to the toolkit and their other proprietary and/or service offerings until it +grows legs of its own and is taken over by the broader community. + +We have designed the tools in the toolkit to be useful as standalone pieces of +software _and_ as modules, used by other applications. Because a substantial +portion of OFG's revenue comes from contracts related to the platform, and +because the platform relies on OpenFn/core, OpenFn/engine, and the OpenFn +adaptors, we hope to ensure that OFG will always be incentivized to continue +their investment in the integration toolkit. + +In other words, we're attempting to ensure that as OFG grows, they will continue +enhancing the open source integration toolkit regardless of whether or not +additional funders and/or stakeholders contribute to the project. + +## What's in the Integration Toolkit + +Separate from "the platform", the integration toolkit is the suite of +applications and modules provided by OFG and the community which enable data +integration, interoperability, and automation solutions via OpenFn-compliant +jobs, triggers, and credentials. The key components of the toolkit are: + +1. OpenFn/docs +2. OpenFn/core +3. OpenFn/engine +4. OpenFn/microservice +5. OpenFn/devtools +6. the OpenFn adaptors +7. _OpenFn/lightning (coming soon...)_ + +:::caution Microservice and devtools are being replaced by Lightning + +Please note that [OpenFn/microservice](https://github.com/OpenFn/microservice) +and [OpenFn/devtools](https://github.com/OpenFn/devtools) are being deprecated +and replaced by [OpenFn/Lightning](https://github.com/OpenFn/lightning), When +lighting is released. + +::: + +### Lightning, coming soon! + +Lightning is an upcoming addition to the Integration Toolkit. It is a _fully +open source_ workflow automation platform designed for governments and NGOs who +need a flexible solution to integrate and connect _any system_. + +You can read all about it [here](/documentation/about-lightning)! + +## Architecture for implementation + +![Lightning architecture](/img/lightning_architecture.png) + +## Open Source Steering Committee (OSSC) + +We've also initiated an Open Source Steering Committee (OSSC) to represent the +OpenFn community of end users and implementers. It reviews and gives feedback on +major roadmap decisions, new designs, specifications, features, and protocol +changes. + +The OSSC's membership and decision making process are defined in the +[OSSC's internal governance policy](https://openfn.github.io/governance/OSSC.html) +if if you're interested in joining, we'd love to hear from you! diff --git a/versioned_docs/version-legacy/getting-started/security.md b/versioned_docs/version-legacy/getting-started/security.md new file mode 100644 index 00000000000..f21e28065c3 --- /dev/null +++ b/versioned_docs/version-legacy/getting-started/security.md @@ -0,0 +1,117 @@ +--- +sidebar_label: Security +title: Security considerations for data integration projects +--- + +# Security Guidelines for Data Integration Implementations + +Even if the technologies leveraged in your integration solution can be +considered secure, there are still many security risks in data integration, +especially during implementation. With support from Digital Square, we have +therefore developed a **Security Guidebook for Data Integration +Implementations**. + +Since 2014, we at Open Function Group (the primary custodians of OpenFn) have +helped implement nearly 100 data integration solutions for over 45 NGO and +government partners around the world. Through our engagements with security +teams at different partners, our own research and development, consultations +with security experts internal and external, and partnerships with other +communities of practice, we have developed a strong understanding of security +best practices and considerations for data integration projects that we would +like to share with the wider digital development community. + +**This Guidebook aims to help digital implementers in the Digital Public Good +and Global Goods communities better understand security risks and presents 23 +best practices for the various implementation phases of data integration +projects.** It also links to some open-sourced OFG resources our team uses in +our own implementation process for OpenFn projects. + +You can find a complete list of the 23 best practices on this page below. + +**To access the Guidebook, check out the below slides or click the link to share +& download:** +[https://bit.ly/security_guidebook](https://bit.ly/security_guidebook) + +

+ +

Secure Data Integration: 23 Implementation Best Practices

+

Core Tenets

+
    +
  1. Understand relevant policies specific to data sharing, storage, and protection
  2. +
  3. Only extract & transfer essential data points
  4. +
  5. Document, document, document
  6. +
+

Analyze & Plan

+
    +
  1. Don’t take API security for granted
  2. +
  3. Budget time for security testing
  4. +
+ +

Design

+
    +
  1. Resource: Mapping specification template
  2. +
  3. Resource: Architecture data flow diagram
  4. +
  5. Resource: Project Security Configuration & Go-Live Checklist
  6. +
  7. Consider idempotency, unique identifiers, & “upsert” operations to ensure data integrity
  8. +
  9. Design for failures & transaction reprocessing
  10. +
  11. Consider data validation
  12. +
+

Build

+
    +
  1. Use change tracking & version control
  2. +
  3. Encrypt where possible
  4. +
  5. Use strong authentication; don’t talk to strangers
  6. +
  7. Authorization scopes to limit access
  8. +
  9. Log transactions for activity monitoring & control what information is logged
  10. +
+

Deploy

+
    +
  1. Test again, especially credentials, before deployment
  2. +
  3. Train users and system administrators on integration security
  4. +
  5. Review your security requirements again before go-live
  6. +
  7. Determine point of contacts for reporting security issues
  8. +
+

Ongoing Monitoring & Management

+
    +
  1. Consider Governance models for ongoing management & changing requirements
  2. +
  3. Train partners on change management
  4. +
  5. Have a strategy for access management
  6. +
+ +Read on for other resources and implementer communities to check out. + +### Resources referenced in the guidebook + +- [Principles of Digital Development Privacy and Security Guide](https://digitalprinciples.org/wp-content/uploads/PDD_Principle-AddressPrivacySecurity_v2.pdf) +- [UNICEF policy on personal data protection](https://www.unicef.org/supply/media/5356/file/Policy-on-personal-data-protection-July2020.pdf.pdf) +- [International Committee of the Red Cross Handbook on data protection in humanitarian action](https://www.icrc.org/en/data-protection-humanitarian-action-handbook) +- [GDPR Quick Guide](https://gdpr.eu/what-is-gdpr/) +- [Sanity.io A Rough Guide to Running a GDPR Compliant SaaS Business](https://www.sanity.io/blog/a-rough-guide-to-running-a-gdpr-compliant-saas-business) +- [OWASP API Security Project](https://owasp.org/www-project-api-security/) +- [GovStack Security & API Standards](https://www.govstack.global/wp-content/uploads/2021/08/Security_Building_Block_Definition_1.0.1.pdf) +- [Health Data Governance Principles](https://www.healthdataprinciples.org/) +- [CDC Health Data Privacy, Confidentiality, and Security Guidelines](https://gicsandbox.org/sandbox-cms/health-data-privacy-confidentiality-and-security-guidelines-development-toolkit#dd01fcf80d4d46f08a099b282bc23f16) + +### OpenFn Resources + +More implementation guidance can be found across this Docs site. For OpenFn +users, learn more about OpenFn security & compliance at +[openfn.org/trust](http://openfn.org/trust) and +[openfn.org/compliance](http://openfn.org/compliance). + +Here are the key OpenFn templates and resources referenced in the Guidebook: + +- [Mapping Specification Template](https://docs.google.com/spreadsheets/d/1IqTIgOzyOztEevXbgY_4uE8Y8tiHXufZXx-IyJZase0/edit#gid=1822444315) +- [Solution Architecture Diagram](https://lucid.app/lucidchart/1e997197-2d67-4393-8394-a532d83561b2/edit#?templateid=fb96ae05-e288-4d1f-b3fc-2cbf7641a7cc) +- [BPMN Diagram resources](/documentation/design/design-quickstart/#use-bpmn-for-standardized-documentation) +- [Project Security Configuration & Go-Live Checklist](https://docs.google.com/document/d/1CbQkN7SqNmXeqt3nMTYP4ioQlTuwF2LbDkkFqhp0zsU/edit?usp=sharing) + +### Communities of practice & other experts + +Here are some other communities you may consider following for more security +guidance. + +1. [OpenHIE Privacy & Security Working Group](https://wiki.ohie.org/display/resources/Privacy+and+Security+Working+Group+Call) +2. [GovStack](https://www.govstack.global/) +3. [DHIS2 Security Team & Community of Practice](https://dhis2.org/security/) +4. [Asia eHealth Information Network (AeHIN) Communities of Practice](https://www.asiaehealthinformationnetwork.org/communities-of-practice/) diff --git a/versioned_docs/version-legacy/getting-started/so-you-want-to-integrate.mdx b/versioned_docs/version-legacy/getting-started/so-you-want-to-integrate.mdx new file mode 100644 index 00000000000..6bd50e4bbe5 --- /dev/null +++ b/versioned_docs/version-legacy/getting-started/so-you-want-to-integrate.mdx @@ -0,0 +1,103 @@ +--- +title: So, what is an integration? 🤔 +--- + +import Graph_CommCaretoSF from '/static/js/components/ccsf_graph'; +import Graph_Master_View from '/static/js/components/master_view_graph'; +import Graph_Data_Viz_Flow from '/static/js/components/data_viz_react_flow'; +import ReactFlowProvider from 'react-flow-renderer'; + +OpenFn is an integration platform. And if you found us, you likely came to the +conclusion at some moment prior that you want to integrate technology X with +technology Y (and maybe W and Z while you're at it). But not all of our users +come to OpenFn with a wealth of previous integrations under their belt. So if +this is your first go, this page can help you think through all the different +ways integrations can take shape so that you have a strong understanding of what +it is you really want _before_ you start writing +[(or borrowing)](/adaptors/library) a +[job](/documentation/jobs/job-design-intro/). + +There are plenty of different reasons to integrate your data systems. Maybe you +want one "master" view that you or your clients can trust as a source of truth. + +
+ +
+ +Maybe you want to automate some data viz that you currently have to do manually. + +
+ +
+ +Or maybe you just want to expose a small slice of data from one user group to a +different app used exclusively by some other part of your company. + +Regardless of the reason, what every integration boils down to is connecting two +or more disconnected applications. But as you can see, not all integrations look +alike. This basic structure comes in many shapes and sizes. There's plenty of +variety to be found: + +1. Perhaps the most important variation is **why** you move the data. + +This part boils down to end goals. Integration for integration's sake is a waste +of time. What's your reason for wanting X data in Y system? + +It's important to keep that ultimate business requirement in mind when designing +any integration and weigh potential outcomes of design decisions against that +ultimate goal. + +2. **When** you move the data. + +Usually, you can articulate the best case scenario here in plain English pretty +easily. + +> I want Salesforce to \_\_\_ **when** one of our field workers submits a new +> CommCare form. + +
+ +
+ +or + +> I want Postgres to \_\_\_ **every two weeks.** + +A crucial difference between these two **whens** is that the first turns on an +action, whereas the second is based on a set period of time, regardless of what +happens in that window. + +3. **How** you move the data, namely whether the destination system is pulling + or the source system is pushing (or some other pattern), what format the data + is being transferred in, and what protocol(s) that system is using to do it. + +1 and 2 are really about real world considerations. Sometimes technical +constraints from our source and destination systems can get in the way of our +ideals, but answering the questions in an ideal way doesn't require any serious +thought about the tech behind it. Now that we're at **how**, we have to think +more seriously about the underlying technology. + +There's not space here to explore all the different ways that a platform can +choose to set up to send or receive data, but it's important now to take note +that there are many options, and knowing which ones are available is an +important part of designing a strong integration. For a more in-depth look at +how to answer these questions based on the specifics of your project, check out +\_\_\_. + +4. A final variation to consider here is **how to move the data safely** (sorry + to break the pattern we had going). + +At OpenFn, data security is a first priority. That's also true of many of the +other systems our customers use. What that means is that we often can't just +grab data from one system and put it into another without first assuring each +system that we are someone who's allowed to be there. In general, we talk about +this slice of the world as **authentication**. + +These are all very important questions to consider when designing an +integration. Check out our docs on integration design to learn more about how we +begin to answer these questions and more: + +- **Integration design:** + https://docs.openfn.org/documentation/design/design-quickstart/ +- **Glossary for integration:** + https://docs.openfn.org/documentation/getting-started/glossary/ diff --git a/versioned_docs/version-legacy/getting-started/terminology.md b/versioned_docs/version-legacy/getting-started/terminology.md new file mode 100644 index 00000000000..4246c9ea78b --- /dev/null +++ b/versioned_docs/version-legacy/getting-started/terminology.md @@ -0,0 +1,196 @@ +--- +title: OpenFn Concepts +--- + +All across the OpenFn Integration Toolkit, the iPaaS, and this documentation +site you'll find some OpenFn-specific terminology that's important to +understand. This page is your reference guide—a glossary of the most important +_OpenFn-specific_ words and what they mean. + +:::tip Keep In Mind + +As you read about the terms below, remember that in order to make OpenFn perform +some sort of data integration work you'll always need to specify: + +- **What** to do (e.g., update patient data in some system) +- **When** to do it (e.g., at 7am every day) +- and **How** to log in (e.g., go to `example.com` and use `abc123` as the + security token) + +With this in mind, let's check out the key terms. + +::: + +Please note that if you're looking for a glossary for generic terms used in +data-integration (rather than this _OpenFn-specific_ stuff) head over to the +[Glossary for Integration](/documentation/getting-started/glossary) page in the +Design section. Otherwise, read on! + +## Project + +A project is an administrative grouping in OpenFn. In +[OpenFn/microservice](/documentation/microservice/home/), it corresponds to a +[`project.yaml`](/documentation/portability#proposal-v2-latest) file. On the +platform, it's got an owner, a billing plan, and a bunch of +collaborators—different users that have been granted access to the project. In +either case, a project contains jobs, triggers, credentials, and everything you +need to run an integration with OpenFn. + +## Job + +:::tip + +Jobs are the **"what to do"** part of automation! + +::: + +OpenFn automation centers around [jobs](/documentation/build/jobs), which define +the specific series of operations (i.e., tasks) that OpenFn should perform. Jobs +can be executed at certain times, when message arrive from outside systems, or +when _other jobs_ succeed or fail. Think of jobs as a set of instructions you +might give a data entry staff member (e.g., Please create a new Patient record +in OpenMRS when a form containing a newly registered client is received from +CommCare, export data to DHIS2 every week on Friday 11pm, send SMS with payment +confirmation number when payment confirmation message is received etc.). + +:::note Jobs are Reusable + +Jobs are fully configurable and reusable. They can also be chained together to +create [multi-step automation](/documentation/jobs/multiple-operations) flows, +two-way syncs, and to keep data consistent between multiple applications (using +multi-app Saga patterns). You can read more on two-way syncing below. + +:::note + +### Adaptor + +OpenFn [adaptors](/adaptors) are open-source modules that +provide your jobs with the features they need to communicate with a particular +system's API. Some examples are `language-dhis2`, `language-commcare`, +`language-salesforce`, `language-postgresql`, etc. There are more than 50 active +adaptors at the moment, and anyone is free to build or enhance them. + +### Operation + +An [operation](/documentation/jobs/operations) is the sub-task inside a job. For +example, a job for loading data to DHIS2 might include 3 separate operations: + +1. Create a new "program". +2. Create many new "tracked entity instances". +3. Enroll those tracked entity instances in the program. + +## Trigger + +:::tip + +Triggers are the **"when to do it"** part of automation! + +::: + +A [trigger](/documentation/build/triggers) determines **when** to run a job +automatically. A trigger could be set up to run a job when a message arrives +(this is known as a `message filter` trigger), on a cron schedule (a `cron` +trigger) or based on the success or failure of _another_ job(a `flow` or `catch` +trigger). A simple cron trigger might specify to run a job at "7am every +weekday". + +## Credential + +:::tip + +Credentials are the **"How to log in"** part of automation! + +::: + +A [credential](/documentation/build/credentials) is used to log in to a +destination system (e.g., Salesforce username, password & login URL) so that a +job can run. Via OpenFn's security model, they are separated from the jobs +themselves to ensure that stored usernames and passwords (which are all +encrypted) do not get leaked or accessed by the wrong people. + +## Message + +A message is a chunk of data that's been received by your inbox. (Technically, +it's an HTTP request.) It might trigger a job run, and it contains the `body` +and `headers` of the HTTP request that was made to your inbox. + +### Inbox + +Your project's [inbox](/documentation/build/inbox) contains all of the messages +that have been sent to your project. Messages are stored payloads or data (e.g., +an incoming SMS, a submitted CommCare form) that were sent via HTTP post to your +inbox. + +:::info Inbox URL + +Click the link icon in the top right of the "Inbox" page to copy your inbox URL. +You can then use this URL to send data to OpenFn. + +::: + +## Run + +A run is each individual execution of a job. Imagine that a job is configured to +create a new patient in OpenMRS whenever a case is opened in CommCare. Over the +next week, if 5 cases are opened in CommCare, you’ll see 5 different runs of +this one job. If 4 runs are successful and one has failed, you’ll see 4 new +patients in OpenMRS, and your system administrator will have been notified that +one of those patients couldn’t be created (or whatever more robust +error-handling you’ve set up will take place.) + +Runs have start times, end times, logs and exit codes that indicate when they +took place, what they did, and whether or not they succeeded. + +:::note + +There’s not always a 1-to-1 mapping between runs and the real-world things +you’re working with. I might define a job that gets all updated event data from +DHIS2 for the last 2 weeks and publishes it to a public map using CartoDB. This +job will be triggered at specified time intervals, every 2 weeks in this case, +and after a month, we’ll only see 2 runs in OpenFn (that’s one run every two +weeks). Each run will have succeeded or failed, and each one might have +processed thousands of events from DHIS2. + +:::note + +### Activity History + +On the platform, the Activity History section provides a list of all of the runs +that have taken place in a project. ("Activity History" is to "Run" and "Inbox" +is to "Message".) + +## Related Runs and Messages + +Given the many-to-one relationship between `runs` and `messages`, OpenFn +provides an interface for viewing a messages **"job-state"**. This is a +calculation that can be useful for organizations that need to understand if a +given message has _eventually_ been handled successfully. + +A job state is defined as the result ("success", "failure", or "in progress") of +the _last_ run (ordered by the time it was finished, rather than when it was +inserted into the runs table) for a given message-job combination. + +If two runs for the same message-job combination finished at the same time, it's +ordered by their start time, and then finally by their primary key. In reality, +since the same message-job combination can only be used to create a run once +every 10 seconds, this will almost never occur. + +:::info A Job state example + +Consider a message which should trigger both a case referral job and a payment +job. Two runs will get created when the message arrives, with the referral +succeeding and the payment failing. Navigating to the inbox, you'd see two +"job-states" for that single message: + +1. Referral (success - run 1) +2. Payment (failure - run 2) + +If an administrator then made some sort of change, re-ran the failed payment job +for that message, and this third run succeeded, you'd still only see 2 +"job-states" in the inbox, but they'd both be successful: + +1. Referral (success - run 1) +2. Payment (success - run 3) + +Browsing to the receipt inspector would show all three runs for this single +message. diff --git a/versioned_docs/version-legacy/gsoc.md b/versioned_docs/version-legacy/gsoc.md new file mode 100644 index 00000000000..f102a018798 --- /dev/null +++ b/versioned_docs/version-legacy/gsoc.md @@ -0,0 +1,90 @@ +--- +title: Google Summer of Code +--- + +## Overview + +OpenFn provides data integration, automation, and interoperability tools that +are used to scale the world's most promising health and humanitarian +interventions. UNICEF, the World Health Organization, the IRC, and the Wildlife +Conservation Society are just a few of the many organizations that drive +efficiency via OpenFn software. With an open-core model, we've got hosted and +locally-deployed solutions in 40+ countries, and this summer you'll get the +chance to work on leading-edge ETL tools built in Elixir/Erlang, and NodeJs. If +learning about APIs, data transformation, and middleware/automation layers +excites you, OpenFn is the place to be. + +## Mentors + +This summer, you'll get the chance to work with some of the core team at OpenFn, +including [Chaiwa Berian](https://openfn.org/team#chaiwa), +[Mamadou Cissé](https://openfn.org/team#mamadou), +[Stu Corbishley](https://openfn.org/team#stuart), and +[Taylor Downs](https://openfn.org/team#taylor). They're based in Zambia, +Senegal, South Africa, and the United Kingdom, respectively. Between them, +they've got almost 50 years of experience working in software and... a PhD in +Computer Science. (Hey thanks, Mamadou 😉.) + +## Project Ideas + +### OpenFn/microservice Extension + +OpenFn projects (see [`project.yaml`](portability)) can be deployed on the +platform _or_ on microservice, a Phoenix web application. This summer, GSOC +interns will have the opportunity to build out the front-end for this community +supported web app. + +Difficulty level: `medium` + +You'll be working in [`Docker`](https://docs.docker.com/get-started/), +[`Phoenix`](https://www.phoenixframework.org/), +[`Elixir`](https://elixir-lang.org/) and [`Erlang`](https://www.erlang.org/). + +### OpenFn/engine Extension + +Engine is part of the common FOSS toolkit that is used both by `microservice` +and `platform`. It's the software which is responsible for actually executing +calls to `OpenFn/core` and doing neat things like streaming logs back to the +requester. If you're keen on really understanding how Elixir and Erlang work, +getting your hands dirty with OTP apps, engine is where you want to be working +this summer. + +Difficulty level: `high` + +You'll be working in [`Elixir`](https://elixir-lang.org/) and +[`Erlang`](https://www.erlang.org/). + +### OpenFn/core Metrics + +At the bottom of it all, whether we're providing secure patient data transfer +services for ministries of health or making child protection case referrals for +UNICEF, OpenFn relies on spinning up NodeVMs, executing code inside those VMs +safely, and then shutting the down. Welcome to the core. + +This summer you could have the chance to dig into that _sandboxed-VM-in-a-VM_ +magic, learn loads about NodeJs, and provide end-users with better metrics on +exactly what kinds of compute they're using to "get the job done". + +Difficulty level: `medium` + +You'll be working in [`NodeJs`](https://nodejs.dev/learn) and +[`Typescript`](https://www.typescriptlang.org/). + +### Adaptors 2.0 + +Adaptors are the API wrappers that allow OpenFn users to quickly and easily work +with the most common APIs in international development. They provide an +interface for connecting to DHIS2, ODK, CommCare, OpenMRS, etc., etc. + +They're NodeJs modules, but in order to make the adaptor development and _use_ +process better, we want to bring them into the future with Typescript. Adaptors +should tell you how to use them while you use them. + +Check out this thread on +[community.openfn.org](https://community.openfn.org/t/discussion-regarding-adapter-2-0-project) +for more information. + +Difficulty level: `medium` + +You'll be working in [`NodeJs`](https://nodejs.dev/learn) and +[`Typescript`](https://www.typescriptlang.org/). diff --git a/versioned_docs/version-legacy/instant-openhie.md b/versioned_docs/version-legacy/instant-openhie.md new file mode 100644 index 00000000000..d883f026278 --- /dev/null +++ b/versioned_docs/version-legacy/instant-openhie.md @@ -0,0 +1,325 @@ +--- +title: Instant OpenHIE +--- + +:::caution Microservice and devtools are being replaced by Lightning + +Please note that OpenFn/microservice and OpenFn/devtools are being deprecated +and replaced by OpenFn/lightning, When Lighting is released, it may be used +within Instant OpenHIE (instead of microservice) as an OpenHIE-compliant workflow engine that can interface with the OpenHIE Interoperability Layer ([learn more](/documentation/about-lightning#standards-and-compliance-matter)). + +::: + +## Overview + +In partnership with [Digital Square][digitalsquare] and _FCDO COVIDaction_, +**OpenFn has been investing in its open source integration toolkit** to provide +robust integration solutions that can connect _any digital health system_ and be +rapidly implemented on any server, in any country, by any organization. + +**[OpenFn/microservice][openfnmicroservice]** is a fully [Instant +OpenHIE][instantopenhie] compliant component which can be used to drive +workflow, achieve compliance with standards, and integrate components of the +[OpenHIE stack][openhiestack]. + +We seek to enhance the value of the [Instant OpenHIE][instantopenhie] project by +developing a package that will include [OpenFn][openfn] as an integration +pathway for connecting with the [OpenHIE architecture][openhiearchitecture]. + +This package aims to enhance the value of [Instant OpenHIE][instantopenhie] by +providing another option for robust information processing, integration, and +business process (workflow) automation. When deploying [Instant +OpenHIE][instantopenhie], implementers now have the option to include +[OpenFn][openfn] as a component. + +[OpenFn][openfn] may also be used as a workflow engine to automate complex +business logic alongside [OpenHIM][openhim] and the [OpenHIE +stack][openhiestack]. Individual [jobs][jobs] in [OpenFn][openfn], sometimes +many in a single microservice deployment, may be used as _“mediators”_ ([see +OpenHIE library of existing mediators][mediators]) to quickly transform and map +data to the [OpenHIE architecture][openhiearchitecture]. + +To demonstrate a real-world use case for how [OpenFn][openfn] might be +implemented in the [OpenHIE architecture][openhiearchitecture], we met with +several community members to identify key use cases for a reference prototype +implementation. + +Visit the [demo repo here][demorepo]. + +## Use Cases for the Prototype Implementation + +We’ve seen that the most common integration use case is that health service +delivery providers, especially large community health worker (“CHW”) +interventions, need to integrate their data and programming into national +eHealth architectures. + +### User stories + +> 1. _As a community health implementer, I want to integrate my CommCare case +> management application used by CHWs with the national patient registry, so +> that I can develop a shared health record and automate reporting +> pipelines._ +> 2. _As a health services provider, I want to integrate my existing application +> with the national HIS, but I want to apply the FHIR standard to my data +> collected before sharing to adhere to compliance and reporting +> requirements._ + +- We therefore decided to build an integration solution that demonstrates how + existing **CHW** applications can be integrated with the national health + infrastructure and leverage a simple job on [OpenFn][openfn] as a + [mediator][mediators] to apply the [FHIR data standard][fhir] and other data + manipulation needed to integrate with [HAPI FHIR][hapifhir]. + +## Implementation Design + +In sum, the prototype sends patient case registration data from mobile data +collection apps ([CommCare][commcare], [KoboToolbox][kobo]) to +[OpenFn/microservice][openfnmicroservice]. [OpenFn][openfn] then transforms the +data and ensures that it adheres to the [FHIR][fhir] [patient][patientspec] and +[encounter][encouterspec] data standards, before sending it onwards to a [FHIR +channel][fhir] in the [OpenHIM][openhim]. [OpenHIM][openhim] is used as a +\_“channel”\_ here for the [OpenHIE architecture][openhiearchitecture] to +validate requests and forward them onto other systems in the **national eHealth +architecture**. In this case, we forward the case data onwards to register the +patients in a [HAPI FHIR][hapifhir] server. + +This implementation design was determined to be the highest value/most in-demand +because it leverages the core functionality of [OpenHIM][openhim] (providing a +reverse proxy and generating an audit trail) without requiring integrators to +build a new [mediator—a][mediators] process that is more complex than +configuring a [job][jobs] within an [OpenFn project][projects]. + +This prototype includes the following components: + +An [Instant OpenHIE][instantopenhie] instance can be spun up which contains +[HAPI FHIR][hapifhir], [OpenHIM][openhim], and a single +[OpenFn/microservice][openfnmicroservice] deployment (a +[project.yaml][projectyaml] file, exported from [OpenFn/platform][openfn]) with +2 different [jobs][jobs]. When data is forwarded to +[OpenFn/microservice][openfnmicroservice] from two distinct form submissions on +[CommCare][commcare] and [Kobo][kobo], it is processed and creates [FHIR][fhir] +patient resources via [OpenHIM][openhim] and [HAPI FHIR][hapifhir]. We’ve opted +for a single [OpenFn/microservice][openfnmicroservice] “project” with two +slightly different [jobs][jobs] and [triggers][triggers] to highlight the +versatility of [OpenFn projects][projects]. + +## Explore the Implementation + +Currently, there are two different ways to explore this demo. The first (the +more traditional _“Instant”_ way) is by **cloning the OpenFn/instant-demo +repo**. Once inside, users type _“yarn setup”_ to get everything up and running. +Running _“yarn test”_ will then demonstrate the +[Kobo][kobo]/[CommCare][commcare] to [OpenFn][openfn] to [OpenHIM][openhim] to +[FHIR][fhir] flows. + +They can explore the various [jobs][jobs], sample payloads, endpoints, and post +data to the various endpoints using either the data forwarding settings in +[CommCare][commcare] and [Kobo][kobo] or via [CURL][curl] (or their HTTP request +agent of choice.) + +Once running, users can see how standard [CommCare][commcare] and [Kobo][kobo] +submissions are transformed by the [OpenFn/microservice][openfnmicroservice] to +adhere to the [FHIR][fhir] specifications for [patients][patientspec] and +[encounters][encouterspec], and then that those subsequent resources are created +on the [HAPI FHIR][hapifhir] server, via a channel on the [OpenHIM][openhim]. + +The second (slightly less conventional way) to explore the [demo][instantdemo], +is via [OpenFn.org][openfn]. Since [OpenFn projects][projects] can be run in +[microservice][openfnmicroservice] or on the [hosted platform][openfn], we’ve +provided a project instance at [OpenFn.org][openfn] that allows users to explore +the configuration required to incorporate [OpenFn][openfn] in an [Instant +OpenHIE][instantopenhie] project. There are three [jobs][examplejobs] which can +be accessed with a **demo user** with _username: demo@openfn.org_ and +_password:guest123_. + +The three jobs will show: + +- How a [CommCare][commcare] submission is transformed and sent to [HAPI + FHIR][hapifhir]; +- How a [Kobo][kobo] submission is transformed and sent to [HAPI + FHIR][hapifhir]; +- And what the final resources that would be sent to [HAPI FHIR][hapifhir] look + like. + +It’s our hope that this will provide a valuable entry-point for [Instant +OpenHIE][instantopenhie] configuration with +[OpenFn/microservices][openfnmicroservice]. + +## About the Implementation Setup + +### Processes + +- We met with [OpenHIE community members][openhiecomm] to understand use cases, + and with [Jembi Health Systems][jembi] to learn about [Instant + OpenHIE][instantopenhie] packages, specifications, and compliance + requirements. + +- Identified sample data sources (real [CommCare][commcare] and [Kobo][kobo] + case registration forms - see here) that we could use to send data to the + **national eHealth architecture**. Here is a [sample submission payload from + CommCare][commcaresample] Here is a [sample submission payload from + Kobo][kobosample] + +- Reviewed [FHIR-HL7][fhir] documentation to determine data standard + requirements for patient data and encounter data. See [FHIR patient + spec][patientspec] and [FHIR encounter spec][encouterspec]. + +- Evaluated [OpenFn][openfn] vs. [OpenHIM][openhim] capabilities to determine + how to use. Determined that using an [OpenHIM channel][openhim] will leverage + the core audit trail functionality from [OpenHIM][openhim], but not require us + to build a [new mediator][mediators]. + +### Project Configuration Steps + +There are two ways to set up a [project.yaml][projectyaml] to run as a +[microservice][openfnmicroservice]. The first is to use the [OpenFn.org +platform][openfn], and the second way is to use [OpenFn/devtools][devtools]. + +These two methods are detailed below: + +1. **Configure a project using the OpenFn.org platform** + + - This option allows organisations to leverage [OpenFn.org][openfn]’s + built-in features for easy [project][projects] setup, [job writing][jobs] + and source code management. + - The [project.yaml][projectyaml] file generated from this project setup will + then be used as the base structure for the [OpenFn + Microservice][openfnmicroservice]. + - The steps to setup the [OpenFn Microservice][openfnmicroservice] project + using the [OpenFn.org platform][openfn] are as below: + + **A. Add [credentials][cred] to the project which will be used to connect + the OpenFn Microservice to OpenHIM.** + + - This is also an opportunity to add [credentials][cred] which [OpenFn + Microservice][openfnmicroservice] may use to connect to source systems + (such as [CommCare][commcare] or [KoBotoolbox][kobo]) . + + **B. Add [triggers][trig] to the project which will be used by the [OpenFn + Microservice][openfnmicroservice] to match payloads from source systems to + [OpenFn Microservice Jobs][jobs].** + + - Note that the [Microservice][openfnmicroservice] is configured to run a + [job][jobs] based on the shape of the incoming payload. + + - For example, a [trigger][trig] may be configured to match payloads, from + [CommCare][commcare], which contain the + `{"@name": "Register New Patient"}` message in their message body. + + - A given [job][jobs] will then match against this message, and will be + invoked by the [OpenFn Microservice][openfnmicroservice] to (a) create a + payload in the [FHIR standard][fhir] containing an [Encounter + Resource][encouterspec] and (b) send the [FHIR Standard][patientspec] + Payload to [OpenHIM][openhim] with instructions to load it to [HAPI + FHIR][hapifhir]. + + **C. Export the [project.yaml][projectyaml] file using the Export Wizard of + the [OpenFn.org][openfn]** + + - The [generated YAML][projectyaml] file will then be used by the [OpenFn + Microservice][openfnmicroservice] to execute the [jobs][jobs] for the + matching payloads. + +2. **Configure a project using the [OpenFn/devtools][devtools]** + + - This option allows organisations to configure the [project][projects] and + host [job expression][jobexpr] source files, for [OpenFn Microservice + projects][projects], without using the [OpenFn platform][openfn]. + - With this option, it is recommended that organisations use source + versioning tools and platforms such as `git` and `github` to manage the + [project][projects] and [job expression][jobexpr] source code/files. + - To configure the [OpenFn Microservice project][openfnmicroservice] using + [OpenFn/devtools][devtools], create a local folder or github repository to + host your project configuration files. Inside this folder, one would then + perform the following actions: + + - Create a credential.json file + - Add credentials as shown in the [sample credential here][samplecred] + - Create the [job expressions][jobexpr]. In this case, one would create the + [CommCare-to-OpenHIM][demoexpr] and [Kobo-to-OpenHIM][demoexpr] + expressions as shown in the demo expressions [here][demoexpr] + - Run the [OpenFn CLI][openfncli] to configure the rest of the project. The + [CLI][openfncli] will assemble the [project.yaml][projectyaml] file from + the different artifacts as provided. See detailed steps in the + documentation site [here][openfncli]. + + - The last step of the [CLI][openfncli] prompts will allow one to export + the [Project YAML file][projectyaml], which will then be used by the + [OpenFn Microservice][openfnmicroservice] to execute the [jobs][jobs] for + matching payloads. + +## Job writing notes + +[OpenFn][openfn] provides two ways of writing jobs: + +- Using the [OpenFn.org’s Job Studio][studio] as detailed in the documentation + site [here][jobs] + - With this option, if editing an existing [Job Expression][jobexpr], one + would be expected to use [OpenFn.org Project Export service][openfn] to + re-generate the [Project YAML][projectyaml] file for the [OpenFn + Microservice][openfnmicroservice]. +- Using [OpenFn/devtools][devtools]. + - This option also allows organisations to write [job expressions][jobexpr] + without using the [OpenFn’s hosted service][openfn]. See detailed + documentation [here][devtools] + - With this option, if editing an existing [Job Expression][jobexpr], one + would be expected to run the [OpenFn CLI][openfncli], to re-generate the + [Project YAML file][projectyaml] for the [OpenFn + Microservice][openfnmicroservice]. + +## System Deployment Steps + +- [OpenFn] provides an automated deployment script that allows system admins to + setup and run the [OpenFn Microservice][openfnmicroservice]. +- For example, to run the [Instant-demo Microservice][instantdemo], the + following steps are recommended: + - Clone the [OpenFn/instant-demo repo][instantdemo] + - Overwrite the [sample “project.yaml”][sampleyaml] file with your newly + generated [project.yaml file][projectyaml], or use the existing [YAML + file][projectyaml] to deploy the demo project. Run the setup command as + described in the documentation [here][instantdemo] + - Verify the system is working by [curling][curl] data (or submitting forms on + [CommCare][commcare]/[Kobo][kobo]) matching their [triggers][triggers] to + the [microservice][openfnmicroservice] endpoint `(localhost:4001/inbox)` and + checking to see that resources are created in [HAPI FHIR][hapifhir]. + - Note how the [test.js file][testfile] handles this verification with the + [sample project.yaml][sampleyaml] + + +[openfn]: https://openfn.org/ +[instantopenhie]: https://wiki.ohie.org/display/resources/Instant+OpenHIE +[openhiestack]: https://openhim.readthedocs.io/en/latest/implementations/openhie.html +[openhiearchitecture]: https://wiki.ohie.org/pages/viewpage.action?pageId=8454157 +[openhim]: http://openhim.org/ +[jobs]: /documentation/build/jobs/ +[mediators]: http://openhim.org/mediator-library/ +[demorepo]: https://github.com/OpenFn/instant-demo +[openfnmicroservice]: /documentation/microservice/home/ +[digitalsquare]: https://digitalsquare.org/ +[fhir]: https://fhir.org/ +[hapifhir]: https://hapifhir.io/ +[commcare]: https://www.commcarehq.org/ +[kobo]: https://www.kobotoolbox.org/ +[projects]: /documentation/build/example-build/ +[projectyaml]: https://github.com/OpenFn/microservice/blob/main/project.yaml.example +[triggers]: /documentation/build/triggers/ +[commcaresample]: https://github.com/OpenFn/instant-demo/blob/main/fixtures/commcare_sample.json +[kobosample]: https://github.com/OpenFn/instant-demo/blob/main/fixtures/koboCaseRegistration.json +[patientspec]: https://www.hl7.org/fhir/patient-example.json.html +[encouterspec]: https://www.hl7.org/fhir/encounter-example.json.html +[openhiecomm]: https://ohie.org/tag/community/ +[jembi]: https://www.jembi.org/ +[cred]: /documentation/build/credentials/ +[trig]: /documentation/build/triggers/ +[devtools]: https://github.com/OpenFn/devtools +[testfile]: https://github.com/OpenFn/instant-demo/blob/main/test.js +[instantdemo]: https://github.com/OpenFn/instant-demo +[samplecred]: https://github.com/OpenFn/instant-demo/blob/main/openfn/docker/config/project.yaml#L165-L167 +[openfncli]: /documentation/devtools/home/#configure-an-openfn-project +[demoexpr]: https://github.com/OpenFn/instant-demo/tree/main/expressions +[jobexpr]: /documentation/build/jobs/#a-basic-expression +[sampleyaml]: https://github.com/OpenFn/instant-demo/blob/main/openfn/docker/config/project.yaml +[curl]: https://curl.se/ +[studio]: /documentation/jobs/job-studio/ +[examplejobs]: https://openfn.org/projects/p5pqx3/jobs + diff --git a/versioned_docs/version-legacy/intro.md b/versioned_docs/version-legacy/intro.md new file mode 100644 index 00000000000..c6c1f1b2a3f --- /dev/null +++ b/versioned_docs/version-legacy/intro.md @@ -0,0 +1,106 @@ +--- +title: About +sidebar_label: What is OpenFn? +slug: / +--- + +## What is OpenFn? + +:::tip + +OpenFn is software that makes it easier for governments and NGOs to _connect_ +the different technologies they use, share data securely, drive critical +business processes, and scale their interventions via workflow automation and +real-time interoperability. + +::: + +OpenFn is a suite of data integration, interoperability, and business process +automation (i.e., workflow) tools that's used by governments, NGOs, and social +enterprises in the health and humanitarian sectors. It enables users to connect +any system, and comes with adaptors (i.e. connectors) for +[over 70 apps](https://www.openfn.org/apps). + +Some OpenFn use cases are: +[Business process and workflow automations](https://www.dropbox.com/s/nb246sav7ozlmo1/OpenFn%20Business%20Process%20%26%20Workflow%20Automation%20Solutions.pdf?dl=0) +| +[Data Integration and Interoperability](https://www.dropbox.com/s/ij7n0fa2wosreod/OpenFn%20Data%20Integration%20%26%20Interoperability%20Solutions.pdf?dl=0) +| +[Data Clearning Pipelines](https://www.dropbox.com/s/ffchww1niw46nlu/OpenFn%20Data%20Cleaning%20Pipeline%20Solutions.pdf?dl=0) +| +[Data Sharing and Reporting](https://www.dropbox.com/s/5oj5wqo84q29p5j/OpenFn%20Data%20Sharing%20%26%20Reporting%20Solutions.pdf?dl=0) +| +[Data Warehouse and Analytics](https://www.dropbox.com/s/e48z6a9x1kgmlbd/OpenFn%20Data%20Warehouse%20%26%20Analytics%20Solutions.pdf?dl=0) +| +[Legacy Systems Integration](https://www.dropbox.com/s/dqhgvjkqjrs9vzi/OpenFn%20Legacy%20Systems%20Integration%20%26%20Sync%20Solutions.pdf?dl=0) + +## Who is it built by? + +OpenFn products are built by the [Open Function Group](/documentation/about) and +a growing community of open-source contributors. + +## Our products + +OpenFn has several products, which are all fully interoperable. This gives our +users the freedom to switch between any and all of the OpenFn products. + +All OpenFn products, other than the OpenFn iPaaS are part of the free and +open-source +[**OpenFn Integration Toolkit**](/documentation/getting-started/integration-toolkit). +This Toolkit is a **Digital Public Good** (a "DPG") recognized in the +[DPG Registry](https://digitalpublicgoods.net/registry/) and Digital Square's +[Global Goods Guidebook](https://digitalsquare.org/resourcesrepository/global-goods-guidebook). + +### OpenFn v1 + +[OpenFn](https://www.openfn.org/signup) v1 is an +_integration-platform-as-a-service_ or "iPaaS". It is enterprise-grade software, +hosted by OpenFn. We recommend starting out there - you can +[create an account for free](https://www.openfn.org/signup), or view our pricing +[here](https://openfn.org/pricing). + +### OpenFn v2: Lightning ⚡ + +[OpenFn/Lightning](https://github.com/OpenFn/lightning/) is a _fully open +source_ workflow automation web application which can be deployed and run +anywhere. It's the v2 of our digital public good. It is designed for governments +and NGOs who want state-of-the-art workflow automation and data +integration/interoperability capabilities with fully-fledged user management and +auditing capabilities through a managed _or_ entirely self-hosted platform. + +Lightning relies on the same tried-and-trusted core technology as the OpenFn +iPaaS and comes with an improved, visual interface for building integrations. + +:::info OpenFn/Lightning is now in beta + +Lightning is in beta, but will be validated for production use and publicly +released in the second half of 2023. + +::: + +If you have any questions about our products, please don't hesitate to email +[admin@openfn.org](mailto:admin@openfn.org). + +### OpenFn developer tooling + +#### [OpenFn/cli](/documentation/cli) + +A set of CLI tools for writing & testing expressions, managing OpenFn projects, +and developing [adaptors](https://github.com/openfn/adaptors). + +:::note + +You can view the technical documentation and source code for OpenFn's FOSS +integration tools and adaptors in their respective repositories at +[Github.com/OpenFn](https://github.com/openfn) or see +[Deploy](/documentation/deploy/options) section for an overview of the FOSS +options and additional docs. + +::: + +## The Community Forum + +Finally, please make sure to check out our Discourse forum at +[community.openfn.org](https://community.openfn.org). Sign up and join the +conversation. Usually, that's the quickest way to get help if you've got +questions that aren't answered here. diff --git a/versioned_docs/version-legacy/jobs/each.md b/versioned_docs/version-legacy/jobs/each.md new file mode 100644 index 00000000000..c47dbfb6b32 --- /dev/null +++ b/versioned_docs/version-legacy/jobs/each.md @@ -0,0 +1,145 @@ +--- +id: each +title: The each(...) operation +--- + +The `each` operation allows you to perform another operation on each item in an +array. + +## Each takes two arguments + +In other words, `each(arrayPath, operation)` will _do_ `operation` on each item +it finds in the `arrayPath` array. It takes just two arguments: + +1. an arrayPath +2. an operation(...) + +### arrayPath + +Let's look at the first argument in `each`... the path to the array. Consider +the following code using the Salesforce adaptor: + +```js +each( + dataPath('form.participants[*]'), + upsert( + 'Person__c', + 'Participant_Identification_Number_PID__c', + fields( + field('Participant_Identification_Number_PID__c', dataValue('pid')), + relationship('RecordType', 'Name', 'Participant'), + field('First_Name__c', dataValue('participant_first_name')), + field('Surname__c', dataValue('participant_surname')), + field('Mobile_Number_1__c', dataValue('mobile_number')) + field('Sex__c', dataValue('gender')), + ) + ) +); +``` + +This will upsert a `Person__c` resource in Salesforce for each item found in the +`state.data.form.participants` array. You could specify this path in the +following ways: + +- `'$.data.form.participants[*]'` +- `dataPath('form.participants[*]')` + +Note the JSON path syntax. + +### the operation + +If there are 5 participants in there, it will execute the `upsert` operation on +all 5 items, in sequence. `upsert` takes whatever arguments it takes normally +but it operates _inside_ the array. See below for more details on the _scope_ of +this operation. + +## dataValue(...) _inside_ each(...) + +Note that inside the `each(...)` operation, using `dataValue(path)` will +evaluate a path inside each item in the array. + +## merge(...) and bringing data 'down' into an array: + +What if you want to access data in your `upsert` operation that does _not_ exist +in the array itself. You could use a data preparation step (see: `alterState`) +or make use of `merge(path, data)` which allows you to merge data from the +initial scope down into your array and access it from the `upsert` operation. + +```js +each( + merge( + dataPath('form.participants[*]'), + fields( + field('school_id', dataValue('form.school.id')), + field('intervention_type', dataValue('form.type')) + ) + ), + upsert( + 'Person__c', + 'Participant_Identification_Number_PID__c', + fields( + field('Participant_Identification_Number_PID__c', dataValue('pid')), + relationship('RecordType', 'Name', 'Participant'), + field('First_Name__c', dataValue('participant_first_name')), + field('Surname__c', dataValue('participant_surname')), + field('Mobile_Number_1__c', dataValue('mobile_number')) + field('Sex__c', dataValue('gender')), + // new fields... + field('School__c', dataValue('school_id')), + field('Intervention_Type__c', dataValue('intervention_type')) + ) + ) +); +``` + +## beta.each + +After using an `each(...)` operation the scope of subsequent operations will be +inside the array at `arrayPath`. If you want to return to the top-level scope so +that you can iterate through another array rather than continuing to work inside +the first array called with `each()`, you can use `beta.each` + +`beta.each(...)` will scopes an array of data based on a JSONPath but then +**return** to the state it was given upon completion. See the +[source](https://github.com/OpenFn/language-common/blob/master/src/beta.js#L44) +here. + +This is necessary if you string multiple `each(...)` functions together in-line +in the same expression. (E.g., given data which has multiple separate 'repeat +groups' in a form which are rendered as arrays, you want to create new records +for each item inside the first repeat group, then _RETURN TO THE TOP LEVEL_ of +the data, and then create new records for each item in the second repeat group. +Using `beta.each(...)` lets you enter the first array, create your records, then +return to the top level and be able to enter the second array. + +```js +// create some schools from the state.data.form.schools array... +beta.each( + dataPath('form.schools[*]'), + upsert( + 'School__c', + 'School_ID__c', + fields( + field('School_ID__c', dataValue('schoolId')), + field('School_Name__c', dataValue('schoolName')), + ) + ) +); + +// back up at the top level, we scope the next array with each... +beta.each( + dataPath('form.participants[*]'), + upsert( + 'Person__c', + 'Participant_Identification_Number_PID__c', + fields( + field('Participant_Identification_Number_PID__c', dataValue('pid')), + relationship('RecordType', 'Name', 'Participant'), + field('First_Name__c', dataValue('participant_first_name')), + field('Surname__c', dataValue('participant_surname')), + field('Mobile_Number_1__c', dataValue('mobile_number')) + field('Sex__c', dataValue('gender')), + ) + ) +); +``` diff --git a/versioned_docs/version-legacy/jobs/editing_locally.md b/versioned_docs/version-legacy/jobs/editing_locally.md new file mode 100644 index 00000000000..c09eab88858 --- /dev/null +++ b/versioned_docs/version-legacy/jobs/editing_locally.md @@ -0,0 +1,74 @@ +--- +title: Editing jobs locally +--- + +To edit jobs, instead of using the OpenFn +[Job Studio](/documentation/jobs/job-studio) you can also use your favorite text +editor and make changes offline, committing and pushing to GitHub to deploy to +your OpenFn using the +[version control feature](/documentation/manage/platform-mgmt/#github-version-control). + +First, make sure that version control is set up for your project and the job in +question. When that's all done, follow the steps below: + +1. Make sure you have + [git installed](https://git-scm.com/book/en/v2/Getting-Started-Installing-Git) + +2. Clone the repo from GitHub. Depending on how you're connecting, grab the + HTTPS or SSH URL of the repository. + +![GH Clone URL](/img/git_clone_url.png) + +:::tip + +You can connect to GitHub with username+password (HTTPS) or an SSH keypair you +generated. (You can check out the +[GitHub docs](https://docs.github.com/en/get-started/getting-started-with-git/about-remote-repositories) +for more info.) + +::: + +3. Then use it to + [clone the repo](https://docs.github.com/en/repositories/creating-and-managing-repositories/cloning-a-repository) + to your computer by running this command in a folder that you'd like to + contain your new repo: `git clone {repo URL}` (e.g., + `git clone https://github.com/OpenFn/Miracle-Feet.git`) + +4. To update your local copy with changes from GitHub, run `git pull` regularly + while you're editing. + +5. For this tutorial, we assume you're making changes on the `main` or `master` + branch: the one that is deployed as your production system to OpenFn. + +6. To edit your jobs, use a code editor such as + [Visual Studio Code](https://code.visualstudio.com/download). + +![VS Code](/img/edit_job_vscode.png) + +7. Make sure you install the + [Prettier VSCode Extension](https://marketplace.visualstudio.com/items?itemName=esbenp.prettier-vscode) + and set is as default formatter in Settings as seen below. This will apply + the correct code formatting to the files you change. + +![Prettier](/img/prettier.png) + +8. Once you're done, you can check which files you changed with `git status`. + +9. Then use `git add {filepath}` followed by `git commit -m {change notes}` to + prepare the changes to be merged into the repo. + +:::tip + +There's a lot to learn about git. +[Here's a good place to start](https://github.com/git-guides/git-commit). + +::: + +10. Then run `git push` to upload the files to the repo (see more on + [git docs](https://github.com/git-guides/git-push)). + +From there, the version control integration will update changed jobs in your +OpenFn project and you can test those changes on the platform. + +Once you're ready to start running jobs and testing your changes _locally_, head +over to the [The CLI](/documentation/cli) docs for guidance. diff --git a/versioned_docs/version-legacy/jobs/errors.md b/versioned_docs/version-legacy/jobs/errors.md new file mode 100644 index 00000000000..9bf0a9e6dc8 --- /dev/null +++ b/versioned_docs/version-legacy/jobs/errors.md @@ -0,0 +1,88 @@ +--- +title: Exit Codes & Errors +--- + +## About errors + +Errors are your friends. The most important thing you can do when encountering +an error in any step of a data integration project is to _read_ what's on the +screen. While this may sound obvious, it's very easy to see a big block of +technical language and switch off. + +Often, there are simple business reasons that a run is failing and they can be +identified from the error messages displayed in the logs by @openfn/core. + +## Standard Exit Codes + +### 0: Success + +Exit code `0` is a success (run succeeded, e.g. a destination system responded +with a `200`) + +### 1: Error + +Exit code `1` is a normally-handled error (run failed normally, e.g. a +destination system responded with a `4XX`, `5XX`, or some specialized +`RequiredFieldMissing` error.) + +### 2: Timeout + +Exit code `2` means that your run timed out. It exceeded the timeout you set for +your job, or if you're using the hosted platform on a non-enterprise plan, it +exceeded `100` seconds. + +### 10: Core Error + +Exit code `10` means there was an error in `@openfn/core/cli.js execute`. It's +possible that you job can't be run how it's written, but that various validation +checks failed to warn you earlier. + +### 134: Out of memory + +This is a standard out of memory error from the NodeVM. See +[NodeVM memory limits](#nodevm-limits) below. + +## Special Exit Codes (Platform) + +The OpenFn platform controls the execution of all of your jobs via the ErlangVM. +(Learn about Erlang [here](https://www.erlang.org/).) The following exit codes +are applied at the level of this controlling VM, rather than down inside your +run's NodeVM. + +### 3: Failed to start + +Exit code `3` means the run could not be started due to an Erlang error. This +could relate to network traffic, but very rare as an error which takes place +_before_ the run is started will be retried—with an exponential backoff—for a +very long time. + +### 4: The NodeVM is unresponsive + +Exit code `4` means that the NodeVM running your job became unresponsive. It +attempted to exit after the timeout but couldn't, so we shut down the process +from the controlling ErlangVM. + +### 5: Elixir/ErlangVM Error + +Exit code `5` means we encountered an unexpected error during the execution of +your job which could only be caught by the controlling ErlangVM. Errors like +this are very rare and trigger an internal audit by the OpenFN engineering team. + +### 6: Aborted by project administrator + +Exit code `6` means that a run created and queued for execution but then +subsequently aborted by request of an administrator or owner of that project. + +### 11: Node.js Error + +Exit code `11` means that there was an error in the NodeVM execution but the +exit code itself couldn't be captured by our application. + +## Limits + +We've shifted this content to a dedicated [limits](/documentation/jobs/limits) +page. + +### [NodeVM memory limits](/documentation/jobs/limits#nodevm-memory-limits) + +### [Job state limits](/documentation/jobs/limits#job-state-limits) diff --git a/versioned_docs/version-legacy/jobs/job-design-intro.md b/versioned_docs/version-legacy/jobs/job-design-intro.md new file mode 100644 index 00000000000..b5a6966dd92 --- /dev/null +++ b/versioned_docs/version-legacy/jobs/job-design-intro.md @@ -0,0 +1,61 @@ +--- +title: Designing a job +--- + +A job defines the specific series of tasks or database actions to be performed +when a triggering message is received (even-based) or a pre-scheduled (and +recurring) time is reached. It's the series of instructions for handling the +data coming from a source system and to be sent to the destination system, or in +other words, mapping data elements from one system to the other. + +Designing a job really just means clearly defining the “rules” for data element +mapping. We'll walk through the main steps using Kobo Toolbox as an example +source system and a Postgres database as destination but check out the [integration design](/documentation/design/design-quickstart/) page for more details on data flow diagrams and mappings. + +Step 1: Map your data flows + +1. Define your input(s). What data collection forms are used to collect data? + How many forms? Are there different form versions? +2. Define your output(s). Where should the data be stored? In what format? What + are your analysis requirements? + +Step 2: Map your data elements + +1. Export the metadata of your form (input) & destination DB (output). +2. Paste the metadata into an Excel spreadsheet to create a mapping sheet: + +![Sample mapping sheet](/img/data-element-mapping.png) + +3. Map data elements & define rules for data cleaning and transformation a. How + should the data collected be translated into your destination system’s data + model?  b. Does your destination system have data input & validation + requirements? + +Step 3. Define your operations: insert, update, upsert... + +1. Find out or create the unique identifiers you will use to insert and update + data (form ID, answer ID, or, case or patient ID etc.). +2. Determine operations: e.g. insert, update, upsert, upsertMany +3. Check the adaptor for helper functions. a. Example from + [language-postgresql](https://github.com/OpenFn/language-postgresql) + - `insert(...)`, `insertMany(...)` + - `update(...)`, `updateMany(...)` + - `upsert(...)`, `upsertMany(...)`  → update if record exists or insert if it + doesn’t; references an external Id b. Example from + [language-dhis2](https://github.com/OpenFn/language-dhis2) using Tracked + Entity Instances (TEI) + - `updateTEI(...)` + - `upsertTEI(...)` + +Example upsert job: + +``` +upsert('mainDataTable', 'AnswerId', { +  AnswerId: dataValue('\_id'), //external Id for upsert +  column: dataValue('firstQuestion)'), +  LastUpdate: new Date().toISOString(), +  Participant: dataValue('participant'), +  Surveyor: dataValue('surveyor'), +  ... +}); +``` diff --git a/versioned_docs/version-legacy/jobs/job-studio.md b/versioned_docs/version-legacy/jobs/job-studio.md new file mode 100644 index 00000000000..ea2a11803f5 --- /dev/null +++ b/versioned_docs/version-legacy/jobs/job-studio.md @@ -0,0 +1,30 @@ +--- +title: The OpenFn Job Studio +--- + +The OpenFn Job Studio allows you create and test jobs on the platform with easy +access to documentation of [adaptors](/documentation/build/jobs#adaptors), +matching [messages](/documentation/build/inbox) and real-time logs. + +```mdx-code-block +import ReactPlayer from 'react-player'; + + +``` + +## Job Studio Features + +When editing a job, you can find messages in the inbox that match your job +trigger by clicking "Find messages matching this trigger". If you navigate to a +field within the message JSON, you can copy the exact path to the field and +paste it as a `dataValue` in your job. + +You can find a list of avaible helper functions for your chosen adaptor in the +job writing panel heading. Clicking the function name copies the function code +that you can directly paste into the job. + +![Job_Studio_Edit](/img/job_studio_edit.gif) + +By clicking "Save and Run" run logs are shown real-time in the Output panel. + +![Stream](/img/stream.gif) diff --git a/versioned_docs/version-legacy/jobs/limits.md b/versioned_docs/version-legacy/jobs/limits.md new file mode 100644 index 00000000000..77ecf74aaa8 --- /dev/null +++ b/versioned_docs/version-legacy/jobs/limits.md @@ -0,0 +1,76 @@ +--- +title: Limits +--- + +Both microservice and OpenFn.org (the iPaaS) have some pre-configured limits to +ensure smooth operation. Read below for a detailed explanation of those defaults +and how to adjust the limits. + +:::tip + +If you don't feel like reading much, here's the tl;dr: + +1. Don't send more than `8MB` to the `/inbox` API in a single request. +2. Make sure your `expression.js` and your `state.json` don't total more than + `10MB` when you _start_ or _finish_ a run. +3. Keep memory usage below `512MB` _during_ the run. + +::: + +## NodeVM memory limits + +The default memory limit for a Node process is `512MB` and unless you've +customized it for use on core or have agreed to a high-memory plan on +OpenFn.org, job runs will be killed by Node (`JavaScript heap out of memory`) +when they reach that threshold. + +### Increase memory limit for an individual execution + +For a `1GB` limit for an individual job, use: + +```sh +node --max-old-space-size=1024 core execute ...arguments +``` + +### Increase memory limit for all jobs on that machine + +For a `4GB` limit for all jobs, set an environment variable with: + +```sh +export NODE_OPTIONS=--max_old_space_size=4096 +``` + +## Request body limit on inboxes + +The maximum size of the JSON body accepted on the `/inbox/your-uuid` endpoint is +`8MB`. Note that payloads should be kept below this limit in order to ensure +that [Job state limits](#job-state-limits) are not hit when a subsequent job run +is executed. + +:::note + +If you're using OpenFn for bulk data processing/periodic batch jobs, you can +either reconfigure these limits on your microservice deployment or contact +enterprise@openfn.org to have special provisions made for your OpenFn.org +project. + +::: + +## Job state limits + +When a job is to be sent for processing the total size of all job artifacts +(your job `expression` and the initial `state`) cannot exceed `10MB`. In other +words, your code (`expression.js`) plus your data and configuration +(`state.json`) cannot exceed `10MB` in total. + +Similarly, when state is saved to cloud storage/disk _after_ execution there is +a `10MB` limit to the size of a job's final `state`. + +If you're dealing with large amounts of data you've got to (a) keep your total +memory use below `512MB` during execution and (b) clean up state so that +whatever you're passing to the next job, whether via FLOW or via saved state for +a CRON job, remains below `10MB`. + +Sometimes, this necessitates streaming data from A -> B in the same job. Other +times it will necessitate storing data as local variables and then posting data +to your inbox in chunks before cleaning up state. diff --git a/versioned_docs/version-legacy/jobs/multiple-operations.md b/versioned_docs/version-legacy/jobs/multiple-operations.md new file mode 100644 index 00000000000..054caa53bab --- /dev/null +++ b/versioned_docs/version-legacy/jobs/multiple-operations.md @@ -0,0 +1,44 @@ +--- +title: Using multiple operations +--- + +You can do many things in sequence with OpenFn, whether using `core`, +`microservice`, or `platform`. + +## Flow jobs vs multiple operations in a single job vs posting back to the inbox + +### Reasons to use flow jobs + +- Each operation needs to use functions that are _only_ available in different + adaptors. +- You _must_ have different credentials for each operation +- You want to see success and failure at the level of each operation +- Each individual run takes a long time and you're worried about your NodeVM + being timed out. (On `platform`, this happens after 100s for non-enterprise + users; on `microservice` you've likely configured your own timeout duration.) + +### Reasons to use multiple operations in a single job + +- The job must be atomic, you want the whole thing to count as a failure if any + part of it fails. +- You run jobs manually and you want a single button to click to retry the + entire sequence of operations. +- You update a `cursor` in a series of operations that involve `GET` and `POST`. + When the `POST` fails, you don't want to update the `cursor` for the + subsequent job run which contains the `GET`. +- Your operations don't take too long (<100s in total for `platform`) and you + want to reduce the number of executions. + +### Reasons to post back to the inbox + +You might decide to send data from a `GET` back to your inbox and let another +job be triggered by a message filter. + +```js +get('somePath', { query: { after: '2020-10-12' } }, post('my-inbox-uuid')); +``` + +- You'd like to store the result of the first operation for later use or + inspection and not have to retry that operation in order to reproduce the + data. +- You don't care about small delays between the first and second job being run. diff --git a/versioned_docs/version-legacy/jobs/operations.md b/versioned_docs/version-legacy/jobs/operations.md new file mode 100644 index 00000000000..b70fed343be --- /dev/null +++ b/versioned_docs/version-legacy/jobs/operations.md @@ -0,0 +1,36 @@ +--- +title: What's an operation? +--- + +An Operation is a function which returns a function which takes `state` and +returns a `Promise` or `state`. + +The purpose of an Operation is to act as an unresolved unit of behaviour. + +For example, when creating an expression - the code itself doesn't know what the +state is going to be, only what _it's going to do_. + +Language packs all follow this convention, where the functions that are provided +all return Operations. + +```javascript +create('My_Custom_Object__c', { + Custom_Field__c: dataValue('foo'), +}); +``` + +In the snippet above, the `create` function doesn't know anything about +credentials, or any dynamic data that you may be available at runtime. + +```javascript +function create(objectName, data) { + return function (state) { + // expand the data argument using state + // actually do the work + }; +} +``` + +In this snippet is a simple example of what most functions in OpenFn look like. +The `create` function returns a function that takes state, this is an +`Operation`. The runtime using `execute` will call all Operations with `state`. diff --git a/versioned_docs/version-legacy/jobs/state.md b/versioned_docs/version-legacy/jobs/state.md new file mode 100644 index 00000000000..1bbc081488e --- /dev/null +++ b/versioned_docs/version-legacy/jobs/state.md @@ -0,0 +1,42 @@ +--- +title: Initial and final state for runs +--- + +## Initial state + +Depending on what tools you're using and what triggered a given run, the initial +`state` for a job run might be generated in a number of different ways, and you +might even build `state` by hand. For `microservice`, `engine`, and `platform` +however, there are strict rules around how `state` gets created and provided to +a runtime for execution of your operations. See the table below for details. + +## Final state + +The final state of a job run is determined by _you_. Job expressions are a +series of `operations`—they each take `state` and return `state`, after creating +any number of side effects. + +### Final state after an error + +If a job run fails, it will not produce a final state. The run itself will have +`log` information attached to it, along with its exit code, but there's not +necessarily a clean final `state` which can be serialized to `JSON`. + +:::info + +If you're making use of a `failure` triggered job, that job run will not get the +final state of the previous job run, as it failed and has no final state. It +will instead receive the initial state of the previous (failed) run, plus a new +`error` key that contains the stringified logs from the previous run. See below +for details. + +::: + +## States by job trigger type + +| Triggering Event | Initial State | +| ---------------- | ------------------------------------------------------------------------------------------------------ | +| http request | `{ data: httpRequest.body, configuration: job.credential.body }` | +| cron | `{ ...finalStateOfLastSuccessfulRun, configuration: job.credential.body }` | +| flow: success | `{ ...finalStateOfTriggeringRun, configuration: job.credential.body }` | +| flow: failure | `{ ...initialStateOfTriggeringRun, error: logsFromTriggeringRun, configuration: job.credential.body }` | diff --git a/versioned_docs/version-legacy/jobs/understanding.md b/versioned_docs/version-legacy/jobs/understanding.md new file mode 100644 index 00000000000..ae567b8e11a --- /dev/null +++ b/versioned_docs/version-legacy/jobs/understanding.md @@ -0,0 +1,139 @@ +--- +title: A closer look at jobs +--- + +This is technical documentation aimed at making complex custom jobs easier to +write. + +## Key Terms and Concepts + +1. **core** (https://github.com/openfn/core) is the Javascript program which + executes jobs for OpenFn in an emphemeral Node.js environment. +2. **state** is a .JSON file that is built and passed into the Node environment. + It contains at least two keys, `configuration` and `data`. Configuration will + be populated with your credential and it used by adaptors for authentication, + and data will be populated with message data if the job was triggered by an + incoming message. + +```json +{ + "configuration": { + "username": "taylor", + "password": "shhhhhh", + "loginUrl": "https://login.salesforce.com" + }, + "data": { + "a": 1, + "b": { + "x": [1, 2, 3] + } + } +} +``` + +3. **expressions** are sequences of operations to be executed. They are part of + "jobs", which also include a credential, a trigger, a label, and (sometimes) + a github filepath. +4. **operations** are named functions, exported for use by specific adaptors, + which take state and return state. + +## State is passed to operations. Operations Return state. + +This is a key concept. When you write: + +```js +create('object', fields( + field(...) +)); +``` + +The execute function in your language-package (e.g., `language-salesforce`) will +execute each operation with state, then return state. If you want to execute +operations inside another custom function, you must explicitly pass in state. + +```js +fn(state => { + return create('object', fields( + field(...) + ))(state) +}); +``` + +## Sequences of operations inside custom functions. + +Using `execute` you can string together several sequential operations inside a +custom function. + +```js +fn(state => { + const { userName } = state.data.form.meta; + + if (userName != 'tester') { + return execute( + upsert("person__c", "Name", fields( + field(...), + field(...) + )), + beta.each( + dataPath("form.array[*]"), + upsert("object", "Name", fields( + field(...) + )) + ) + )(state) + } + return state; +}); +``` + +## Controlling timing between operations with async functions. + +To get really complex, you might want to execute a number of async functions +inside an `alterState` operation, but WAIT for those functions to resolve before +moving on to your next operation. If `execute` doesn't work for your use case, +you could use `Promise.all` and return an async function. + +```js +fn(state => { + console.log('Here we will await the result of a LOT of async operations.'); + console.log('First we define a bunch of different async functions.'); + const postClinics = async c => { + return post(state.configuration.inboxUrl, { + body: { clinics: c }, + })(state); + }; + + const postPatients = async p => { + return post(state.configuration.inboxUrl, { + body: { patients: p }, + })(state); + }; + + const postVisits = async v => { + return post(state.configuration.inboxUrl, { + body: { visits: v }, + })(state); + }; + + console.log( + 'Then we define a single function that wraps them all up and waits for all the individual functions to resolve.' + ); + async function makePosts() { + return Promise.all([ + ...state.data.clinicSets.map(item => postClinics(item)), + ...state.data.patientSets.map(item => postPatients(item)), + ...state.data.visitSets.map(item => postVisits(item)), + ]); + } + + console.log( + 'Then we return that function, forcing our next operation to await the result of this one.' + ); + return makePosts(); +}); + +fn(state => { + console.log('I get called AFTER those async functions are resolved.'); + return state; +}); +``` diff --git a/versioned_docs/version-legacy/jobs/working_with_branches.md b/versioned_docs/version-legacy/jobs/working_with_branches.md new file mode 100644 index 00000000000..b2e254a2a5c --- /dev/null +++ b/versioned_docs/version-legacy/jobs/working_with_branches.md @@ -0,0 +1,59 @@ +--- +title: Working with branches +--- + +In the [Editing jobs locally](/documentation/jobs/editing_locally) section we +walked through the process of creating and adding your changes to the `main` +branch of a project. + +However, most code change workflows involve sharing and reviewing changes before +deployment. You can do this by creating, testing and sharing your changes on a +new branch, then, once final, merging them into `main` for deployment. + +:::tip + +There are LOTS of different strategies for branching and reviewing code on Git. +(Like [GitHub Flow](https://guides.github.com/introduction/flow/) or +["That Famous @nvie Post"](https://nvie.com/posts/a-successful-git-branching-model/) +for example!) This guide is meant to give you a very brief introduction to +branches in Git, but it is not meant to dictate the "right way". + +::: + +Let's pick up the workflow when you `git pull` -ed the latest changes of the +repo to your local folder. + +1. Running `git checkout -b {branch_name}` will create and switch over to a new + branch. When you start editing your jobs, the changes will be kept on this + branch, managed separately from `main`. + +2. To test the changes locally, check out the [The CLI](/documentation/cli) + docs. + +3. Just as you've seen when working on `main`, when you're done check which + files you changed with `git status`. + +4. Then use `git add {filepath}` followed by `git commit -m {change notes}` to + prepare the changes to be merged into the repo. + +5. The following command will push your changes to the remote repository as a + separate, new branch: `git push --set-upstream origin {branch_name}`. + +6. On GitHub, you can create a Pull Request to get your changes reviewed and + approved. + + ![PR-1](/img/pull-request.png) + + ![PR-2](/img/pull-request-2.png) + +7. As you keep working with branches, make sure you check which branch you're on + with `git status`. + +![git-status](/img/git-status.png) + +8. To keep your local copy up to date with the remote repo, switch to `main` + with `git checkout main` and hit `git pull` to pull any changes. + +9. If you're still working on your separate branch while `main` has been + updated, use `git checkout {branch_name}` followed by `git merge main` to + copy over new changes from `main` to your branch. diff --git a/versioned_docs/version-legacy/manage/platform-mgmt.md b/versioned_docs/version-legacy/manage/platform-mgmt.md new file mode 100644 index 00000000000..88ed8f59cf3 --- /dev/null +++ b/versioned_docs/version-legacy/manage/platform-mgmt.md @@ -0,0 +1,1072 @@ +--- +title: Project Management +--- + +:::important + +Currently, this section is specific to **OpenFn/platform**. + +::: + +## Jobs + +This section of the portal allows you to create and manage your jobs. + +### Searching jobs + +For a project with a number of jobs, finding a job can be easily achieved via +the search feature. + +To search for a given job: + +- From the application **menu**, click on **Jobs**. +- Find the **Search jobs** box and type the name of the job in the search box. +- The application will filter and show all jobs matching the portion of text + entered into the search box. + +### Switching on/off a job + +In OpenFn, a job is **off** by default. To **switch on** a given job, follow the +steps below: + +- From the application **menu**, click on **Jobs**. +- Find the job you would like to turn on. +- On the top-right corner of the job card, click on the **switch** button to + turn on/off the job. +- Once switched on, the job's **switch** button will change the color to + **blue**. + +:::info + +Note that once a job is **switched on**, OpenFn will run it automatically, as +[configured](/documentation/build/jobs). If you do not want a job to be run +automatically, by OpenFn, then turn it **off**. + +::: + +### Making a job private + +OpenFn allows you to share jobs to an **open source job library** that other +users can learn from or reuse. All jobs are available for sharing and inherit +project sharing settings, by default. If you do not want a given job to be +available for sharing to [OpenFn's Job Library](/adaptors/library), then you can +mark that job as **private**. To mark a job as private, follow the below steps: + +- From the application **menu**, click on **Jobs**. +- Find the job you would like to mark as **private**. +- On the bottom-left corner of the job card, click on the **View**. +- While on the details page for the selected job, click on the **eye** icon. + +:::info + +Note that once a job is marked as **private**, sharing will be blocked even if +its project is enrolled in the [OpenFn's Job Library](/adaptors/library). You +can toggle this setting back by clicking on the **eye** icon. + +::: + +### Archiving a job + +OpenFn allows you to **archive** a job if it is no longer needed or used. To +archive a job, follow the steps below: + +- From the application **menu**, click on **Jobs**. +- Find the job you would like to **archive**. +- On the bottom-left corner of the job card, click on the **View**. +- While on the details page for the selected job, click on the **archive** icon. +- Confirm archiving in the dialog that pops up after clicking the archive icon. + +:::info + +Note that once **archived**, the job won't appear in your jobs list. Also +messages will not appear to match against it until you restore the job. Also +note that a job **cannot be deleted**, it can only be archived. + +::: + +### Restore archived job + +To restore an archived job, follow the steps below: + +- From the application **menu**, click on **Jobs**. +- While on the jobs list page, click on the **Show archived jobs** button. +- All archived jobs will be shown in the jobs list. +- Find the job you would like to **restore**. +- On the bottom-left corner of the job card, click on the **View**. +- While on the details page for the selected job, click on the **restore** icon. +- The job will now be shown in the list of available jobs. + +### Disabling console logs for a job + +OpenFn allows you to disable `console.log` statements for your job. Disabling +`console.log` ensures that sloppy or malicious code written in the job +expression does not expose sensitive data from the jobs. + +To disable `console.log` for a given job, follow the steps below: + +- From the application **menu**, click on **Jobs**. +- Find the job you would like to **disable console log** for. +- On the bottom-left corner of the job card, click on the **View**. +- While on the details page for the selected job, click on the **lock** icon. + +### Editing a job + +OpenFn allows you to edit or make changes to existing jobs. To edit a given job, +follow the steps below: + +- From the application **menu**, click on **Jobs**. +- Find the job you would like to **edit**. +- On the bottom-left corner of the job card, click on the **View**. +- While on the details page for the selected job, click on the **pencil** icon. +- See details about job editing in [Job Studio here](platform-mgmt#job-studio). + +### Job change history and reverting changes + +If your job is linked to a Github repo, changes made to a job expression can be +reverted to a given git commit. To revert changes made to a job expression, +follow the steps below: + +- From the application **menu**, click on **Jobs**. +- Find the job whose changes you would like to **revert**. +- On the bottom-left corner of the job card, click on the **View**. +- While on the details page for the selected job, scroll down to the bottom of + the job card and click on **View Change History**. +- Select a corresponding change history row. +- Accept the prompts to revert to a previous commit, in the revert dialog. + +:::info + +Note that after the revert dialog confirmation, the job expression will +instantly be reverted to a selected commit. No other jobs will be reverted. To +instantly revert all jobs in for a given project to a previous commit, +[resend the webhook from GitHub](./platform-mgmt#github-version-control). + +::: + +### Creating a new job + +To create a new job, follow the steps below: + +- From the application **menu**, click on **Jobs**. +- Find a **blue** floating button with **+** icon, and click on it. +- Clicking the **+** button will open **Job Studio** for you to enter details + for your new job. +- See details on how to use [Job Studio here](platform-mgmt#job-studio). + +### Job Studio + +**Job Studio** is OpenFn's **Job Editor**. It allows you to create a new job or +edit an exisiting one. It can be accessed by following the steps for +[editing an existing job](platform-mgmt#editing-a-job) or +[creating a new job](platform-mgmt#creating-a-new-job). The instructions below +assume you already know how to open Job Studio by either methods. + +#### Changing Job Studio mode + +Job Studio comes in two editing modes, namely **Wizard mode** and **Fullscreen +mode**. By default, OpenFn JobStudio runs in **wizard mode**. Wizard mode allows +you to configure a job via a step-by-step configuration wizard. On the other +hand, **Fullscreen mode** allows you to quickly configure or edit the job +without the help of the wizard. + +To change from one **Job Studio mode** to another, follow the steps below: + +- While in Job Studio, in the top-right corner, click on the **fullscreen** + icon. +- Depending on the current Job Studio mode, clicking on the **fullscreen** icon + will toggle the editing mode to either **Wizard** or **Fullscreen**. + +:::info + +Note that once you toggle the editing mode, OpenFn updates your user settings +and saves this editing preference as your default Job Studio mode for subsequent +editing sessions. Note, however, that when creating **new jobs**, Job Studio +will always open in **Wizard mode**, regardless of your saved editing mode +preference. + +::: + +#### Configuring a job + +While in Job Studio, if in **Wizard mode**, you will see **four configuration +steps** and an **expression editor**. In **Fullscreen mode**, the **four +configuration steps** appear as regular fields, without a wizard. + +The **four configuration steps** include giving the job a name, defining what +[triggers](/documentation/build/triggers) its execution, selecting an +[adaptor](/adaptors), and providing +[authentication](/documentation/build/credentials) details. + +The **expression editor** is the area where you write your +[job expression](/documentation/build/jobs/#composing-job-expressions). Fill-in +all the details, and click on the **Save** icon in the top-right corner to save +your job's configuration changes. + +#### Inspecting job's initial state + +This feature allows you to view the +[initial state](/documentation/jobs/state/#initial-state) of a selected job. +Note that this feature is currently only available for +[message-triggered jobs](/documentation/build/triggers#message-filter-triggers). + +To view or inspect a job's initial state, click the expression pane splitter and +drag to the right. After dragging, you will see a `json tree` representation of +the matching initial state. To copy a path to a given node in the state, click +on the **_Copy to clipboard_** icon overlaid on the node. The path to that node +will be saved to clipboard, and can then be pasted inside the expression editor +as data path for the job's expression. + +#### Accessing inline adaptor documentation + +For a selected adaptor, OpenFn allows you to view documentation and code +examples for each [adaptor operation](/documentation/jobs/operations). + +To view adaptor documentation, click on the `documentation icon`(first icon) on +the top-right corner of the `Expression Pane`. + +Each adaptor operation has a short description and an example. You can click on +the example expression to copy and use it in your job's expression editor. + +Also note that expression examples or code snippets for adaptor operations can +be auto-generated through the expression editor's autocompletion feature. To +generate a code snippet for a given operation, type the first few letters of the +operation and press the `tab` key. + +#### Changing JobStudio theme + +OpenFn allows you to customize the feel and look of Job Studio. To change Job +Studio's theme from the default one, click the `color palette` icon, and select +a theme of your choice. + +#### Installing a unreleased adaptor version + +In Job Studio, you can install adaptors that are not part of the recommended +adaptors picklist directly from npm. See details +[here](/adaptors#install-on-platform-via-npm) on how to install the unreleased +adaptor version. + +#### Testing a job + +You can test your job without exiting Job Studio, by clicking on the **Save and +Run** button. You can find the **Save and Run** button in the bottom pane of the +Job Studio. + +After clicking on **Save and Run**, the job will be run and its logs will be +streamed to the `Run logs` console. + +:::info + +Note that this feature is currently only available to **message-triggered +jobs**. + +::: + +## Triggers + +This section of the portal allows you to create and manage your Triggers. + +### Searching triggers + +For a project with a number of jobs and a range of trigger criteria, finding a +given trigger can be easily achieved via the search feature. + +Triggers can be filtered/searched by **name** or **criteria**. To search for a +given trigger: + +- From the application **menu**, click on **Triggers**. +- Find the **Search triggers** box and type, in the search box, the **trigger + criteria** for a **message trigger** (e.g, `{"test": "data"}`) or **name** of + the trigger for any other type of trigger. +- The application will filter and show all triggers matching the portion of text + entered into the search box. + +### Editing a trigger + +OpenFn allows you to edit or make changes to existing triggers. To edit a given +trigger, follow the steps below: + +- From the application **menu**, click on **Triggers**. +- Find the trigger you would like to **edit**. +- On the bottom-left corner of the trigger card, click on **Edit**. +- See details about types of triggers and other editing options + [here](/documentation/build/triggers). + +### Deleting a trigger + +OpenFn allows you to delete an existing trigger. To delete a given trigger, +follow the steps below: + +- From the application **menu**, click on **Triggers**. +- Find the trigger you would like to **delete**. +- On the bottom-left corner of the trigger card, click on **Edit**. +- While on the edit page for the selected trigger, click the **trash** icon. +- The application will prompt you to confirm whether you would want to proceed + with deleting the given trigger. + +:::info + +Note that OpenFn will mark this trigger for deletion. You will not be able to +access or edit the trigger once this is done. If there are any jobs linked to +this trigger, they will not run successfully until you assign them new or other +existing triggers. + +::: + +### Creating a trigger + +To create a new trigger, follow the steps below: + +- From the application **menu**, click on **Triggers**. +- Find a **blue** floating button with **+** icon, and click on it. +- Clicking the **+** button will open **New Trigger Form** for you to enter the + details for your new trigger. +- See details about types of triggers and other editing options + [here](/documentation/build/triggers). + +## Credentials + +This section of the portal allows you to create and manage your Credentials. + +### Searching Credentials + +For a project with a number of jobs and a range of credentials, finding a given +credential can be easily achieved via the search feature. + +Credentials can be filtered/searched by **name**. To search for a given +credential: + +- From the application **menu**, click on + [**Credentials** or **My Credentials**](./platform-mgmt#credential-ownership-and-access). +- Find the **Search credentials** box and type, in the search box, **name** of + the credential. +- The application will filter and show all credentials matching the portion of + text entered into the search box. + +:::info + +Note that if you are searching for all credentials you own, then find them via +the **My Credentials** menu item otherwise you can find all credentials assigned +to a given project via the **Credentials** menu item. Also note that not every +credential you own is available to all the projects you are member of. See +details about credential ownership and access +[here](./platform-mgmt#credential-ownership-and-access). + +::: + +### Credential ownership and access + +A credential is owned, by default, by the user who created it. To view all the +credentials you own, follow the steps below: + +- From the application **menu**, click on **My Credentials**. +- A list of all credentials you own will be displayed. + +You can assign a credential to a project, and all users with access to that +project will be able to use it. However, note that a credential can be available +to all users in a given project for use, but only the owner can edit it. + +To view credentials available to a given project, follow the steps below: + +- From the application **menu**, click on **Project Dashboard**. +- Select the **project** for which you would like to see the credentials. +- After the project loads, from the application **menu**, click on + **Credentials**. +- A list of all credentials available to a selected project, will be displayed. + +### Editing a credential + +OpenFn allows you to edit or make changes to existing credentials. To edit a +given credential, follow the steps below: + +- From the application **menu**, click on **Credentials** or **My Credentials**. +- Find the credential you would like to **edit**. +- On the bottom-left corner of the credential card, click on **Edit**. +- See details about types of credentials and other editing options + [here](/documentation/build/credentials). + +### Transferring credential ownership + +In OpenFn, a credential is owned, by default, by the user who created it. +However, OpenFn allows you to change ownership of a credential to another user +of the portal. To transfer credential ownership to another user of the OpenFn +portal, follow the steps below: + +- From the application **menu**, click on **Credentials** or **My Credentials**. +- Find the credential you would like to **transfer ownership**. +- On the bottom-left corner of the credential card, click on **Edit**. +- While on the credential detail page, scroll down to the bottom left corner of + the page and click on **Ownership Transfer**. +- Enter the **email address** and **user number** for the new credential owner. + This information can be found on the recipient's account settings page +- After entering **email address** and **user number**, click on the **Transfer + Ownership** button. +- OpenFn will prompt you to confirm wether to proceed with the transfer or not. + +:::info + +Note that once you proceed with credential ownership transfer, you will lose +access to the credential immediately. The new owner may be able to view or +modify personal or sensitive information stored on this credential.You will not +be able to regain access to this credential without the new owner. + +However, you will still be able to use this credential for jobs in the projects +to which it has been shared until and unless the new owner revokes that +project's access to the credential. + +::: + +### Granting/revoking credential access to a project + +Note that, by default, a credential is available to the project the user had +loaded at the time the user was creating the credential. However, OpenFn allows +you to **grant** or **revoke** access to a credential for one or more projects. +To grant or revoke access to a credential for a project, follow the below steps: + +- From the application **menu**, click on **Credentials** or **My Credentials**. +- Find the credential you would like to **edit project access**. +- On the bottom-left corner of the credential card, click on **Edit**. +- While on the credential detail page, find the **Manage Access** section on the + right side of the page. +- You will see a list of projects that you are a member of. **Mark** the + `checkbox` to **grant** access or **un-mark** the `checkbox` to **revoke** + access for a given project. + +### Deleting a credential + +OpenFn allows you to delete an existing credential if you own it. To delete a +given credential, follow the steps below: + +- From the application **menu**, click on **Credentials**. +- Find the credential you would like to **delete**. +- On the bottom-left corner of the credential card, click on **Edit**. +- While on the edit page for the selected credential, click the **trash** icon + on the top right corner of the page. +- The application will prompt you to confirm whether you would want to proceed + with deleting the given credential. + +:::info + +Note that if you proceed with deleting a given credential, OpenFn will delete +this credential immediately for security reasons. You will not be able to +restore the credential once this is done, but you may create a new credential +with the same login information. If jobs are currently using this credential, +they may not run successfully until you add a new credential and assign it to +those jobs. + +::: + +### Creating a new credential + +To create a new credential, follow the steps below: + +- From the application **menu**, click on **Credentials** or **My Credentials**. +- Find a **blue** floating button with **+** icon, and click on it. +- Clicking the **+** button will prompt you to choose the **type of credential** + you would like to create. +- Note that **credentials** are meant to be used to connect to other systems. So + choose the type of credential that corresponds to the system you will be + integrating with via OpenFn. +- After choosing the type of credential, OpenFn will open the **New Credential + Form** for you to enter the details. +- See details about types of credentials and other editing options + [here](/documentation/build/credentials). + +## Activity + +In this section of the portal, you can view a list of all "runs" - i.e. +individual job runs. This list is essentially a compilation of all jobs, +messages and credentials flowing through your OpenFn account towards your +destination system(s). + +### Runs + +Runs are attempts made on a destination system by running a receipt through a +Job Description. Runs can be viewed and re-processed. Each submission has a +`success`, `started_at`, `finished_at`, `job_description_id`, and `receipt_id` +attribute. `Started_at` and `finished_at` are the timestamps when the submission +began and ended. + +> **Note:** Some runs may take a really long time, particularly if they are +> performing multiple actions in a destination system or if they are fetching +> lots of data from a REST api at the start of a migration. They will appear as +> red if they have failed. In the case of failure, refer to our +> [Troubleshooting](/documentation/manage/troubleshooting-tips-on-platform) +> section below. + +### Filter runs in the Activity view + +You can filter the run logs in the Activity View by: + +- **Text** - Remember to be patient as a full log text search can take time + process. Leverage this feature to search for runs with specific error messages + to support with troubleshooting any failed runs. + +- **Date** - Filter the view to only show runs that failed in the last few + hours/ days/ year – or a custom date range! Note that the default activity + history view shows runs from the last 30 days. + +### Bulk reprocess (retry) runs + +Need to re-process a series of runs? This could be helpful if you had multiple +runs fail due to an error message. + +1. Generate a list of the runs that you want to reprocess by adjusting the + filters—be sure to specify an exact date range, job, status, etc. + +2. Simply click the **Reprocess** button and review the dialog that appears. + This dialog contains important information about the query that will be used + for reprocessing and gives you an approximate number of runs that will be + reprocessed. + +![Retry run button](/img/retrybutton.png) + +3. Click "Reprocess" when you're happy with the query. You'll get feedback on + the number of runs enqueued within seconds, and you should see your project + queue fill up then empty over time as the batch is processed. + +![Retry run button](/img/reprocess-runs.png) + +:::info + +Note that a filtered list of runs will include runs triggered by message +filters, cron, and flow or catch triggers. When you select to reprocess runs +from a filtered list, the runs in that list which can only be triggered by the +successful or failed exit of _another_ run will not be included in the initial +batch. Those jobs will, however, still get run if they are turned on and +successful or failed runs in the batch trigger them. In other words, flow/catch +triggers will behave normally even during a bulk reprocess order. + +::: + +:::note + +Remember that OpenFn plans are run-based, and you can monitor usage in **Project +Settings** to ensure that you don’t hit any run limits when bulk reprocessing! + +::: + +### Export runs to CSV + +You can download and review OpenFn runs data by exporting to a CSV file. + +1. In your activity history view, filter the runs you’d like to export to CSV. + Choose to filter by text, date, job, and status. + +2. Click the **Export as CSV** button to review and confirm the desired export. + +![Export runs button](/img/exportruns.png) + +3. Click the "Export" button to submit the request. A link to download the file + will be sent to your email address shortly. + +![Retry run button](/img/export-runs.png) + +## Inbox + +Your inbox contains the history of all messages that have passed in to your +project, which may or may not have triggered a specific job. Messages are stored +payloads or data that were sent via HTTP post to your inbox. They can be viewed +in formatted JSON, edited, or manually processed (if they did not match a filter +when they were originally delivered.) + +To edit a message, click the "pencil and paper" icon next to that receipt. Be +careful, as no original copy will be persisted. + +### Filter messages in your inbox + +To help you more quickly find relevant messages, you can now filter your inbox +by: + +- **Body Text** - Search your messages for specific text (e.g., find surveys + that contain “India” in the body). As individual projects may have millions of + messages containing tens of thousands of lines of JSON each, we’ve implemented + a “tsvector” search strategy. Please be patient and note that this text-based + search may take a moment to return results.. If you’re curious about how + tsvector works from a technical perspective, check out the + [official documentation](https://www.postgresql.org/docs/10/datatype-textsearch.html#DATATYPE-TSVECTOR). +- **Date** - Choose a relative date range (e.g., “Last 90 Days”) or define a + custom date range yourself. Note that the default inbox view shows “Last 30 + Days”. + +![Image of Inbox Filters](/img/inbox_filter.png) + +### Bulk reprocess messages + +Need to re-run a series of messages? If you had a job fail because of an error +for multiple messages, or need to re-process the data in OpenFn to re-send to a +destination application, then this feature will help you do so more quickly! + +1. Generate a list of the messages that you want to reprocess by adjusting the + filters—be sure to specify an exact date range, the matching trigger, etc. + +2. Simply click the **Reprocess** button and review the dialog that appears. + This dialog contains important information about the query that will be used + for reprocessing and gives you an approximate number of messages that will be + reprocessed. + +![Reprocess button](/img/reprocess_msgs.png) + +3. Click "Reprocess" when you're happy with the query. You'll get feedback on + the number of messages enqueued within seconds, and you should see your + project queue fill up then empty over time as the batch is processed. + +![Retry run button](/img/reprocess-messages.png) + +#### Note when bulk reprocessing messages + +- This simulates the chain of events that starts when messages first arrive in + your inbox. In other words, reprocessed messages will be handled by message + filter triggers for any jobs that have the `autoprocess` setting “on”. If + you've got messages that match certain triggers, but the associated jobs are + switched "off" they will not be run when those messages are reprocessed. + +- Remember that OpenFn plans are run-based, and you can monitor usage in + **Project Settings** to ensure that you don’t hit any run limits when bulk + reprocessing! ![Usage stats chart](/img/usage.png) + +### Export messages to CSV + +You can now download and review OpenFn message data by exporting to a CSV file. + +1. In your inbox, filter the messages you’d like to export to CSV. Choose to + filter by text, date, trigger, and run state. + +2. Click the **Export as CSV** button to review and confirm the desired export. + +![Export CSV button](/img/exportcsv.png) + +3. Click the "Export" button to submit the request. A link to download the file + will be sent to your email address shortly. + +![Retry run button](/img/export-messages.png) + +## Search Console + +The **Search Console** allows users to answer questions such as _Did patient +798123 get successfully referred or Did CommCare submission +123e4567-e89b-12d3-a456-426614174000 get loaded into DHIS2?_ via direct string +search. + +Searches via [Inbox](./platform-mgmt#inbox) and +[Activity History](./platform-mgmt#activity) rely on `JSONB matching` and +`tsvector`, which are more powerful for traversing very large date ranges of +messages or run logs but are less intuitive than string searches. + +The **Search Console** solves this challenge and allows the user to type the +`string` of concern in a **Search Box** and press enter. OpenFn will search in +`message bodies` and `run logs`, by default, and/or in `message headers` if +otherwise specified. + +To use the **Search Console**, follow the below steps: + +1. On the left menu pane, click on **Search Console** link. +2. While on the **Search Console** page, select the **Date Range** and enter the + **text** matching your search in the **Search Box**. +3. Press the **Enter Key** on the _Keyboard_ or click the **Search** button to + search. + +:::note + +OpenFn will limit the results of your search to a maximum of `10` records per +specified search type (i.e. OpenFn will return a maximum of `10` results for +matches found in `bodies`, `logs`, or `headers`). It is therefore recommended to +refine your search to a very _specific string_ and _date range_ for which a +matching result is expected. + +::: + +## Account Management + +### Add a credit card + +OpenFn's hosted iPaaS has a free-forever tier, but if your organization requires +more jobs or runs each month, you can add a credit card and change to a paid +tier. For comprehensive pricing information please visit our +[pricing page](https://openfn.org/pricing). + +To enter your credit card information follow these steps: + +1. Login to your OpenFn account. +2. Click on your profile icon on the top right corner o f the page and select + **Billing**. +3. From the **Billing** page select **Add Card** and enter your credit card + information. + +![Credit Card](/img/add_credit_card.gif) + +### Change plan + +Once your credit card information is entered you can upgrade your plan by +navigating to the Project Settings page and dragging the slider to the right or +left. + +To following these steps: + +1. Login to your OpenFn account. +2. Click on the **Project Settings** link on the left-hand menu of the project + you'd like to modify. (_Or_ click your profile icon on the top right corner + of the page and select **Billing** and select the project that you would like + to upgrade.) +3. This will take you to the **Project Settings** menu. +4. Scroll down on the **Project Settings** page and change plans using the + slider. +5. Once you have selected the desired plan, click **Change to _[plan name]_** + and then confirm the change. + +![Change Plans](/img/change_plan.gif) + +### Lost password + +If at any time you forget the password for your OpenFn account follow these +steps to reset it: + +1. Visit https://openfn.org/login . +2. Enter the email address associated with your account. +3. Click on **Recover Password** (see gif below). This will trigger OpenFn to + send a recovery token to your associated email account. +4. Check your email for the recovery token and make a copy of it. +5. Enter your recovery token and a new password into the OpenFn "Reset Password" + page. + +![Password Reset](/img/recover_password.gif) + +## Project settings + +This section of OpenFn platform allows you to view and update the project +configuration and plan settings. + +### Project Configuration + +To view or update the following project configuration details, follow the steps +below: + +- From the application **menu**, click on **Project Settings** and then find the + **Project Configuration** section. + +#### Changing Project Name + +- Changing your project name will update the URL and, after a 60-day deprecation + period, will break bookmarks or old links to the project. This won't affect + your project's inbox URL, but may impact users with lots of old run or message + links saved offline. + +#### Viewing Project Inbox URL + +- To view the `Inbox URL` for your project, click on the **eye** icon against + the `Unique Inbox URL` label. + +#### Changing Project Description + +- Project description is optional but can be updated under this section. + +#### Changing Concurrency + +- **Concurrency** is the number of jobs that will be run at the same time. + +:::note + +Think of it as the number of workers or employees performing the same task at +your organization. The task may be to convert an inbound patient record to meet +the FHIR standard, then load it into OpenMRS. You could have 10 files waiting to +be processed from separate deliveries. With a concurrency of 10, all ten files +would start to be processed immediately. With a concurrency of 1, they'd be +processed sequentially, the second only being started once your single worker +finished working on the first. + +::: + +- Projects are set to a concurrency of "1" by default. This means that runs will + be processed one-at-a-time and that each subsequent run will be blocked until + the previous run is completed. + +- If your project is subscribed to a paid plan, you have the option of toggling + concurrency from the default "1" all the way up to a concurrency of "10". + +- To change the concurrency level for your project follow these steps: + +1. Login to your OpenFn account. +2. Click on the **Project Settings** link on the left-hand menu of the project + - you'd like to modify. +3. This will take you to the **Project Settings** menu. +4. On the **Project Settings** page change concurrency to the appropriate level + using the slider. +5. Once you have selected the desired concurrency, click **Update Project**. + +![Change Concurrency](/img/change_concurrency.gif) + +#### Changing Notification Threshold + +- By default, OpenFn sends a notification to all project collaborators when + **85%** of the project's allowed runs have been used in a given billing cycle. + You can change this setting by adjusting the **Notification Threshold Slider** + to your desired level. +- Once you have selected the desired notification threshold, click **Update + Project**. + +#### Exporting Project Config + +- OpenFn allows you to run your project as a + [Microservice](/documentation/microservice/home/) . +- There are two options for exporting the project config used in OpenFn + Microservice. Exporting as `project.yaml` will provide you with a `YAML` file + that can be used to run this project with + [OpenFn/engine](/documentation/microservice/home/), + [OpenFn/microservice](/documentation/microservice/home/), or for use in + another OpenFn/platform space. +- Exporting as `microservice.zip` will prepare a `ZIP` file with + `openfn/microservice:latest` (from hub.docker.com) and a your `YAML` file + inside a pre-configured directory structure so that you can run this project + as a microservice via `docker run`. In both cases, your project configuration + will be built asynchronously and you'll receive an email with download link + when it's done. + +### Project Plan + +- In this section, you can view and change your project's pricing plan. + +#### Usage and Subscription + +- This section provides you with a graph that shows your project's current plan + usage limit and current usage pace. +- To view detailed report of the project's usage, find and click on the + **Historical Project Usage** button. + +#### Change Plan + +- By default, OpenFn sets your project's plan to **Free**. +- Before changing your project's subscription plan, you must add a valid credit + card. +- To change the project's plan, find the project plans slider and click on the + plan of your choice. +- After selecting the project plan, click on the **Add Card** button and enter + card details in the form and save. + +### Job Library Sharing + +- The **OpenFn Job Library** is a project supported by the **OpenFn community**. + It's a collection of open-source job code from projects across **ICT4D**. +- You will always able to browse the library so that writing jobs is faster and + easier, but by enabling library contributions for this project, jobs not + marked as "private" will also be published to the library. +- Contribute to the job library, on the top-right corner, find the **library** + icon and click on it. +- Click on the **Yes, contribute to the job library** button of the dialog box + that appears. + +:::note + +Data from **messages** or **runs** are **NEVER** shared. Your job expressions +(which most OpenFn users already keep in public repositories on GitHub) and +other non-sensitive metadata (e.g., adaptor and version, created/updated dates) +will be made searchable to help other organizations and governments write jobs +more quickly and easily if you enable this setting. + +::: + +## User Account Menu + +- You can view and modify your account settings by clicking on the **person** + icon on the top right corner of the **App bar**. + +### Changing User Account Settings + +- To change your user account settings, such as _name_, _IDE Style_, and + _theme_, click on the **Account Settings** of the **User Account Menu**. This + action will take you to the **Account Settings** page. +- While on the **Account Settings** page, make the necessary changes and click + on the **Save** button to save the changes. + +:::note + +- Note also that, while on the **Account Settings** page, you can access + additional features such as _Changing Email_, _Changing Password_, _connecting + and disconnecting to Github_, _Billing Management_, and _Deleting Account_. +- To access these additional features, open the sub-menu by clicking on the + **three dots** on the top-right corner of the **Account Settings** page. + +::: + +### Viewing Billing + +- The **User Account Menu** also allows you to view details of all projects + billed to your account. +- To view a list of all projects billed to your account, click on the + **Billing** menu item. + +### Logging-out + +- You can logout of OpenFn by clicking on the **Logout** menu item of the **User + Account Menu**. + + :::note + + Also note that OpenFn will log you out of your current session after 24 hours + without warning! It also ensures that you are logged-out of all browser tabs, + once your current login session expires. + + ::: + +## Access & Security + +This section covers the **Access & Security** features each OpenFn project has. +To explore these features, on the left hand navigation ribbon click on the +**Access & Security** tab (#1). + +_Please refer to the screenshot below for help navigating the functionality of +this page._ + +![Access&Security Circled](/img/access_security1.png) + +### User Access + +OpenFn provides users with the ability to **add collaborator access**, **revoke +collaborator access**, and, in the event you get stuck and need help from an +implementation specialist, **grant OpenFn support access**. + +#### Add collaborator access + +To add collaborator access to your project from the **Access & Security** page: + +- Enter the e-mail address of your collaborator in the "Add collaborator by +email" field. Note that you will need to select "add as collaborator," or add as +administrator" to add him/her to the project. See the screenshot above for +reference (#2). + + +#### Revoke collaborator access + +To revoke collaborator access to your project from the **Access & Security** +page: + +- Find the collaborator's name in the **User** list and in the **Revoke** column + click on the on the **Revoke** button. See the screenshot above for reference + (#3). + +#### Grant OpenFn support access + +To add OpenFn support team's access to your project from the **Access & +Security** page: + +- Enable the **Grant support access** toggle (#4). + +### Inbox Security + +OpenFn project administrators can choose to configure additional authentication +for any inbound requests made to the project's inbox URL. In the "Access & +Security" page of their OpenFn project, Administrators can choose from API Key +and Basic Auth types, which will prompt administrators to either generate an API +token or to setup a username:password credential. Once this inbox authentication +is configured, any HTTP requests made to the OpenFn Inbox URL must include +either this `x-api-key` token or username:password in the request header. +![inbox security](/img/inbox-security.png) + +#### Rotating auth methods + +Because more than one auth method may be accepted at a given time, some +organizations choose to periodically rotate their auth methods for extra +security and can do so without disrupting live production integrations. To +rotate your inbox auth methods: + +1. Create a _second_ valid auth method with a new token or user:pass + combination. +2. Provide that token to your external systems so that they can start using it + in their webhooks/requests to OpenFn. +3. Once you are certain that all external services are now using the new auth + token, _revoke_ the old auth token. + +You can repeat this process as frequently as is required by your organization's +internal security protocols. + +## GitHub version control + +Managing large numbers of jobs with multiple contributors is complicated. We +developed the GitHub integration so that OpenFn projects can be linked to GitHub +repositories and you can work collaboratively on your jobs, incorporating git +flows for management. + +OK, you're ready to manage your jobs via GitHub, the leading hosted version +control software on the web? Great, this section describes the steps necessary +to get going. + +:::info tl:dr; + +1. If a **commit** is made to a designated branch on GitHub, + + ✅ OpenFn will automatically update the associated job's **expression** to + match the file on GitHub. + +2. If a job's **expression** or **GitHub filepath** is modified on the platform, + + ✅ OpenFn platform will automatically push a **commit** to your Github repo, + updating the linked file to match the expression. + +::: + +Note that if you change a file on GitHub that's _not_ related to any OpenFn +jobs, no update will be made on OpenFn. Likewise, if you edit a job on OpenFn +but _don't_ make any changes to the **expression** or **Github filepath**, no +commit will be made on GitHub. + +:::warning + +As soon as you enter a valid filepath for a job in a project with a connected +Github repo, all modifications made to that job on OpenFn will appear as Github +commits on that branch in that repo. + +Likewise, as soon as you make a commit on Github with a change to a file that is +linked to a job on OpenFn, the contents of that file will overwrite the existing +job on OpenFn. + +⚠️ **PLEASE note** that _before_ you connect Github, there is no version history +for OpenFn jobs on the platform. If you commit something you don't want (like an +empty file) to Github, `autodeploy` is on, and that file is linked to an OpenFn +job, you will **erase your existing job** and you may not be able to retrieve +it. ⚠️ + +For this reason, and because [**OpenFn/cli**](/documentation/cli) provides a +free, open-source, offline testing environment, it's recommended to create your +jobs using a Github repo and test them on your own machine _before_ linking them +to a project on OpenFn. + +::: + +### Setup Steps + +#### Linking your OpenFn account to your Github account + +1. OpenFn: [User Settings](https://www.openfn.org/account): Click the + three-button "action menu" (top right corner of the account card) and select + "Connect to GitHub". +2. GitHub: When prompted by GitHub, grant OpenFn read and write access to + your/your organizations repositories as needed. +3. OpenFn: Once redirected to OpenFn you may be asked to re-authenticate + depending on the domain you originally used to connect to OpenFn. +4. OpenFn: Ensure all changes you've made to your account are saved, and verify + that you see a bright blue check next to "Github OAuth". + +#### Linking projects and jobs to Github repos and files + +1. OpenFn: Project -> Version Control: Specify the repository owner, repository + name and branch for automatic deploys. You can also select to turn on or off + automatic deploys: when _on_ commits to the branch specified will + automatically be written to your jobs on OpenFn. +2. OpenFn: Project -> Jobs -> Job Edit: To link an individual job to a file in a + GitHub repo, edit that job and paste in the path to the job from the root of + your GitHub repo. If your repo looks like this, you'd type `sample_job_1.js` + or `some_folder/some_other_job.js` to link your OpenFn job to the select file + in your repo. + +:::info + +Automated GitHub version control is currently only available for enterprise +users. Contact [enterprise@openfn.org](mailto:enterprise@openfn.org) to build a +custom plan for your needs. + +::: + +### Advanced Version Control + +Using this GitHub integration, you can revert to previous versions of a job by +selecting that version (by its commit date and SHA) on the job view page. A new +commit will be made, updating the job to the state it was in at the time of the +old commit. diff --git a/versioned_docs/version-legacy/manage/troubleshooting-tips-on-platform.md b/versioned_docs/version-legacy/manage/troubleshooting-tips-on-platform.md new file mode 100644 index 00000000000..c9ba921ac21 --- /dev/null +++ b/versioned_docs/version-legacy/manage/troubleshooting-tips-on-platform.md @@ -0,0 +1,214 @@ +--- +title: Troubleshooting Platform +sidebar_label: Troubleshooting +--- + +:::important + +Currently, this section is specific to **OpenFn/platform**. + +::: + +## Runs + +One of the most helpful pages for troubleshooting on OpenFn platform is the +[Activity History](/documentation/getting-started/terminology/#activity-history). +This pages provides a list of all of the runs executed in a project and always +marks any failed runs red or yellow. Project administrators can troubleshoot +errors by clicking into the run to review the run details. Keep reading for all +the important parts of a run and how to leverage them during your +troubleshooting! + +### Exit codes + +Every run will have an exit code. The exit code is a way for OpenFn to classify +the run status and can help you troubleshoot errors. Learn more about OpenFn +exit codes and what each one means [here](/documentation/jobs/errors). + +### The time it took for the job to fail + +The run will also record how long it took before the job failed. This +information helps users understand if the job is taking longer than it should +and is especially helpful with errors that involve timeouts. You can use the run +to determine at which operation the job is timing out and determine if the job +performance can be optimized. + +### Run logs + +As jobs are developed it is important to log details which will make testing and +troubleshooting much easier in the future. Keep reading for the two most +important parts of a run log! + +#### Mappings + +The logs should be written so that you can see exactly what was mapped between +the source system and the destination system. In cases where data received from +the source system is _not_ being posted to the OpenFn inbox (often for security +reasons), it can even be helpful to log the data that was received. In summary, +the log can have a **"Data received from source system"** section and a **"Data +to be uploaded to destination system"** section. + +These logs can help admins verify that the source data and the data being +uploaded to the destination system is correct. For example, seeing in the logs +that that a unique identifier is being mapped to `undefined` in the destination +system can help you understand this Salesforce error message: + +`METHOD_NOT_ALLOWED: HTTP Method 'PATCH' not allowed. Allowed are GET,HEAD,POST at HttpApi.getError`. + +#### Error messages + +The run log should also tell us if an error has been thrown, and depending on +the destination system, what the error message is. Sometimes the error message +is very specific like: + +`NOT_FOUND: Provided external ID field does not exist or is not accessible` + +This error from Salesforce usually indicates that `External ID` has not been +checked in the field settings in Salesforce. + +Other error messages are not as clear and can take some time to debug: + +`TypeError [Error]: Cannot read property 'split' of undefined` + +**`TypeErrors`** usually indicate that the job received a part of the message +that it wasn't expecting, or there is a syntax error in your job code. It means +that the job needs to be updated to know how to handle the message. In this +case, the job received an old version of the Commcare form which was missing a +field which the job called the `split` function on. You can determine this by +reviewing the job for which fields the split function is being called on and +checking that they are all present in the message. + +The more you test and troubleshoot with a particular system, the more familiar +with it's error messages you become. + +:::tip + +OpenFn has outlined several of the more common error messages specific to some +of the systems that we have integrated in the past. Explore these systems and +their error messages [here](/adaptors#connect-anything). + +::: + +## Leveraging search and filtering in OpenFn + +Leverage the various search functionalities in OpenFn to find the right messages +and runs to support your troubleshooting. You can search in the Inbox, Activity +History, and Search Console. + +1. **[Inbox](/documentation/manage/platform-mgmt/#inbox)** - The inbox contains + all the messages that have been sent to your project. Search your project + inbox for messages that contain a specific body text. You can also filter + these messages by date, run status and trigger. You can even use filtering to + see all the messages whose last run failed, so you can get to + troubleshooting! Learn more about inbox filtering + [here](/documentation/manage/platform-mgmt/#inbox). + +2. **[Activity history](/documentation/getting-started/terminology/#activity-history)** - + As discussed above, the activity history records all runs for the current + project. It has similar search and filtering capabilities as the inbox except + it doesn't require valid JSON in the search box. Use the search in activity + history instead of inbox when you want to search **_run logs_** instead of + messages. + +3. **[Search console](/documentation/manage/platform-mgmt/#search-console)** - + The search console will search in both message bodies **_and_** run logs for + any string is entered! + +## Bulk reprocessing + +Sometimes you'll see several messages in the inbox whose last run failed because +of the same error. Once you have resolved the error, you can test it by +rerunning the transaction for one failed run. If this passes, you can then +leverage **bulk reprocessing** in the OpenFn inbox and activity history to +reprocess all messages in the current filtered query. This means OpenFn will +rerun all of those transactions in the order they were received. Learn more +about bulk reprocessing +[here](/documentation/manage/platform-mgmt/#bulk-reprocess-messages). + +Sometimes you'll have so many messages with failed runs that it will be +virtually impossible to open each one and inspect the run logs. In this case, +you can resolve the errors and bulk reprocess as you go, each time reducing the +number of failures in the inbox and generating a short list of the remaining +errors. + +## Other tips + +- **Posting messages to the inbox** - You can post messages directly to the + OpenFn inbox by clicking the **plus** icon on the bottom left of the inbox. + This feature can come in handy when you have lengthy messages which include + several rows of data. You simply copy and paste one row of data from the + original message to a new message and post it to the inbox. This allows you to + troubleshoot individual rows of data. +- **Editing messages** - Messages in the inbox can be edited by clicking the + pencil icon in the message. This is a quick way to update and test any mapping + fixes with data that is already in the inbox. Once you verify that the run for + the updated message passes, you can make the appropriate updates to the source + sytem and post the correct data to the inbox moving forward. +- **Organizing error messages** - When working with various different jobs and + error messages, it is helpful to organize all errors in a spreadsheet which + links to the last run, error, and status. + +## Sign up for email alerts + +You can turn on notifications to receive email alerts when a job fails. When you +receive an error email, you can click “inspect & take action” to be taken to the +failed run and being troubleshooting! + +## More + +> What happens if my survey data from ODK needs to link to existing records in +> my Salesforce system but a respondent enters or selects an invalid +> `external ID`? + +Great question, and don't worry, it happens all the time. Assuming you've +already taken all possible measures to either pre-load external IDs in your ODK +form or use more human-proof IDs (like barcodes and fingerprints) here's the +flow of work: + +1. Read the email, and inspect the reason for failure. + +2. 99% of failed runs on OpenFn are due to `value mismatches`. The _collected_ + `id` in ODK doesn't match the _expected_ `id` in Salesforce. You must now + chose to either: + + A. Edit the source `id` in your `receipt` & retry the attempt. + + B. Edit the related `id` in your destination system & retry the attempt. + + C. Ignore the attempt—this source data will never reach your destination + system. (There have been reports of ODK Aggregate's JSON publisher sending + duplicate values. If that happens and your run fails due to "duplicate + values" on a particular unique field you can safely ignore the run in + OpenFn.) + +Editing data in your destination system can be done through that system's +interface. Many tools that act as `sources` (like ODK) do not allow for easy +editing and re-submission of data. You can use OpenFn to edit the source data +before retrying the attempt. + +### Common Error Messages + +The most common error messages with English explanations are: + +```sh +DUPLICATE_VALUE: duplicate value found: ODK_uuid__c duplicates value on record with id: a0524000005wNw0 +The insert is blocked because you are attempting to create a new record with a +unique field with the same value as an existing record. +``` + +```sh +Required value missing +``` + +```sh +ExternalId not found +``` + +```sh +{ INVALID_FIELD_FOR_INSERT_UPDATE: Unable to create/update fields: Contact__c. +Please check the security settings of this field and verify that it is +read/write for your profile or permission set. } +``` + +This last one may arise if a master-detail relationship in Salesforce is not set +as reparentable and the user attempts to run an upsert. diff --git a/versioned_docs/version-legacy/microservice/home.md b/versioned_docs/version-legacy/microservice/home.md new file mode 100644 index 00000000000..304981e2aa9 --- /dev/null +++ b/versioned_docs/version-legacy/microservice/home.md @@ -0,0 +1,181 @@ +--- +title: Microservice +--- + +:::caution Microservice and devtools are being replaced by Lightning + +Please note that [OpenFn/microservice](https://github.com/OpenFn/microservice) +and [OpenFn/devtools](https://github.com/OpenFn/devtools) are being deprecated +and replaced by [OpenFn/Lightning](https://github.com/OpenFn/lightning), When +lighting is released. + +::: + +## Intent + +OpenFn is used by numerous health and humanitarian organizations around the +world to scale their programs through real-time interoperability, systems +integration, and workflow automation. **OpenFn/microservice** makes use of +OpenFn's open-core technology—namely **OpenFn/core**, **OpenFn/engine**, and the +various OpenFn **adaptors**—to create standalone microservices which can be +deployed on any hardware. + +This microservice approach helps to ensure that governments and NGOs are never +locked-in to OpenFn's SaaS offering, and can port their existing jobs, triggers, +and credentials from [OpenFn.org](https://www.openfn.org) to their own +infrastructure easily. + +## Introduction + +Similar to `platform`, OpenFn/microservice runs on `project.yaml` files. This +means that when organizations or governments have an open-source license +requirement, all their jobs, credentials, and project configurations can be +exported from OpenFn's iPaaS and used to create a microservice deployment. + +While this approach doesn't provide the OpenFn platform front-end with its +various project management and configuration features, it's perfect for groups +with DevOps experience and 100% compatible with the platform. You can even build +and test entire projects on `platform` and then export the `project.yaml` file +to run on your own servers using `microservice`. + +This microservice approach provides flexibility to governments and NGOs, so they +are never locked-in to OpenFn's SaaS platform offering. At any time, an +organization can port their existing jobs, triggers, and credentials from +OpenFn.org to run with our FOSS integration toolkit, using their own +infrastructure. + +## Prerequisites + +Familiarity with other elements of OpenFn's open source integration toolkit is +helpful when considering the microservice approach. + +- [OpenFn/docs](https://docs.openfn.org/) +- [OpenFn/engine](https://github.com/openfn/engine) +- [OpenFn/core](https://github.com/openFn/core) +- [OpenFn/devtools](https://openfn.github.io/devtools/) + +## Docker up and running + +Assuming you've got an `.env` and a sample project at `./sample-project` +directory with a `project.yaml` spec: + +```sh +docker-compose up +``` + +You can configure either the compose file or the .env, or run the container +using `docker run`: + +```sh +docker run -v :/home/microservice/ \ + --env-file \ + --network host \ + openfn/microservice:v0.3.2 +``` + +## Development up and running guide + +- Clone this repo with `git clone git@github.com:OpenFn/microservice.git` +- Enter the directory with `cd microservice` +- Install dependencies with `mix setup` +- Run the tests with `mix test` +- Make a project directory to hold your project artifacts with + `mkdir sample-project` +- Create a new project specification with + `cp project.yaml.example ./sample-project/project.yaml` +- Create a `.env` file with `cp .env.example .env` +- Install necessary adaptors via + `npm install @openfn/language-http --prefix priv/openfn/runtime/node_modules --no-save --no-package-lock --global-style` +- Start your microservice server with + `env $(cat .env | grep -v "#" | xargs ) iex -S mix phx.server` + +### Up and running inside Docker + +- Build a docker image with `docker build -t openfn/microservice:v0.3.0 .` +- Run with the [docker run command](#Docker-run) + +## Project configuration + +You can configure the jobs, triggers, credentials and language packs used in +your microservice in the `project.yaml` config file. + +### First setup using the sample config + +The +[sample project configuration file](https://github.com/OpenFn/microservice/blob/main/project.yaml.example) +describes an example project setup to help you get acquainted with this +structure. + +By default microservice is configured with 4 sample jobs: + +1. `job-1` is triggered when a matching message arrives to the inbox (see + `trigger-1`). +2. `recurring-job` is a timed job scheduled to run every minute and is linked to + the `every-minute` cron trigger. +3. `flow-job` and `catch-job` run after the `success` and `failure` of job-1, + respectively. + +All of the jobs are configured with the language pack `openfn/language-common`. + +In the default sample configuration a new message posted to +`localhost:4000/inbox` that matches `trigger-1` (i.e. the message contains +`"number":2`) is greeted with an asynchronous acknowledgement receipt +(`HTTP 202` `Data accepted and processing has begun`) and will trigger `job-1` +to run. + +You can try this out with the following snippet: + +```sh +curl -X POST -H "Content-Type: application/json" \ + -d '{ + "number":2, + "surveyId": 37479 +}' \ + http://localhost:4000/inbox +``` + +Posting a message not matching any of the triggers (e.g. `“number”:3`) equally +prompts an acknowledgement but doesn’t trigger any jobs. + +Example message post for this non-match scenario: + +```sh +curl -X POST -H "Content-Type: application/json" \ + -d '{ + "number":3, + "surveyId": 37479 +}' \ + http://localhost:4000/inbox +``` + +HTTP `post` requests made to +[`localhost:4000/inbox`](http://localhost:4000/inbox) will be processed by the +`Receiver`, according to the `credential`, `expression`, and `adaptor` defined +in the project configuration `YAML` file. + +Time-based jobs will be run by `Engine` according to the `credential`, +`expression`, and `adaptor` defined in your `project.yaml` file. + +### Setup from your existing OpenFn platform project + +If you have a project configured on OpenFn, you have two ways for exporting your +config on the Project Settings page and running your project in microservice. + +1. If you export as `project.yaml`, you can download your settings in `yaml` + format from your platform project Download page or from a link in the + auto-generated email sent to your address. You can plug this file into your + environment as set up using the + [Development Up and Running Guide](#Development-up-and-running-guide). + +2. If you export as `microservice.zip`, you'll get your microservice folder + ready to run with `docker`, containing + +- a `docker-compose.yaml` config file +- a project folder containing `project.yaml` +- `.env` file with the default environment variables for docker +- a `Readme` file + +`cd` into the folder and run the project with `docker-compose up`. If you don't +have the docker image, it will be auto-pulled from `hub.docker.com`. + +![Export Microservice Zip](/img/microservice_zip_export.gif) diff --git a/versioned_docs/version-legacy/openfn-roadmap.md b/versioned_docs/version-legacy/openfn-roadmap.md new file mode 100644 index 00000000000..c5a53b6f4e1 --- /dev/null +++ b/versioned_docs/version-legacy/openfn-roadmap.md @@ -0,0 +1,77 @@ +--- +title: OpenFn Roadmap +sidebar_label: OpenFn Roadmap +--- + +## OpenFn Roadmap + +This page details the planned roadmaps for the key products in the OpenFn +product suite. This includes OpenFn/`Lightning`, `Adaptors`, and `Docs`. + +### Key for the Roadmap `Status` Values + +| Status Value | Definition | +| ------------- | ----------------------------------------------------------------------- | +| `Not started` | Issue in the backlog but still in need of being designed and scoped out | +| `Planned` | Scoped and ready to be picked up by an engineer | +| `In dev` | Currently being worked on by an engineer | + +### Lightning Roadmap + +OpenFn/Lightning is the fully open-source workflow automation platform at the +core of the OpenFn Digital Public Good (learn more about the product +[here](/documentation/about-lightning)). + +| **Feature** | **`Status`** | **Target Timeline** | **Related Links** | **Description** | +| ---------------------------------------------------------------------- | ------------ | ------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| 1. Delete a project (as a project owner or a superuser) | Delivered | Q2 '23 | [Github issue 757](https://github.com/OpenFn/Lightning/issues/757) & [issue 746](https://github.com/OpenFn/Lightning/issues/746) | Project owners can delete a project. This will schedule the project for deletion after the configured time period. Also, superusers can schedule a project for deletion, cancel a project's deletion and delete a project immediately. | +| 2. Bulk reprocess work orders (from start) | Delivered | Q2 '23 | [Issue 659](https://github.com/OpenFn/Lightning/issues/659) | Select and reprocess multiple work orders to rerun them from the start with updated job logic. | +| 3. Add filter expressions to triggers | In dev | Q2 '23 | [Issue 778](https://github.com/OpenFn/Lightning/issues/778) | Triggers can take expressions to determine whether to run the job or not. This is similar to 'message filters' in OpenFn platform (v1) | +| 4. Import and export projects through the command line interface (CLI) | In dev | Q3 '23 | [Issues 641](https://github.com/OpenFn/Lightning/issues/641), [249](https://github.com/OpenFn/Lightning/issues/249), & [288](https://github.com/OpenFn/Lightning/issues/288) | Import and export a project as code, to save a local version or edit a job in your own code editor. | +| 5. Handle [Oauth](https://oauth.net/2/) authentication flow | In dev | Q3 '23 | [Issue 646](https://github.com/OpenFn/Lightning/issues/646) | Handle jobs with Oauth credentials by periodically refreshing the token. | +| 6. Fully decoupled workflow execution manager (RTM/RTM-server) | In dev | Q3 '23 | [Issue 52](https://github.com/OpenFn/kit/issues/52) | Handle extremely long-running workflows. Better manage resource consumption and load by scaling “workflow execution workers” independently from the webapp/orchestration layer. | +| 7. Automatic github version control | Planned | Q3 '23 | [Issues 289](https://github.com/OpenFn/Lightning/issues/289) & [250](https://github.com/OpenFn/Lightning/issues/250) | Use Github Version Control to track and review changes to your workflow. | +| 8. View key project metrics (number of runs, failures, workflows) | Planned | Q3 '23 | [Issue 755](https://github.com/OpenFn/Lightning/issues/755) | View the success rate and number of failed work orders for your workflows. | +| 9. Add authentication to webhook triggers | Not started | Q4 '23 | [Issue 245](https://github.com/OpenFn/Lightning/issues/245) | Add option to configure API keys and authentication for HTTP requests sent to OpenFn webhook triggers. | +| 10. Zero-retention pipeline | Not started | Q4 '23 | [Issue 752](https://github.com/OpenFn/Lightning/issues/752) | Feature to ensure OpenFn will persist _zero_ data processed via OpenFn workflows to ensure compliance with data security and residency requirements. | +| 11. Get notified of run limits via email | Not started | Q4 '23 | [Issues 755](https://github.com/OpenFn/Lightning/issues/755) & [556](https://app.zenhub.com/workspaces/product-engineering-6305f22ada44914905485ab1/issues/gh/openfn/lightning/556) | Email alerts to help admins monitor usage. | +| 12. Set up multi-factor authentication (MFA) | Planned | Q4 '23 | [Issue 364](https://github.com/OpenFn/Lightning/issues/364) | Feature to allow admins to enable MFA for OpenFn users. | +| 13. Disable console.logs | Not started | Q4 '23 | [Issue 276](https://github.com/OpenFn/kit/issues/276) | Disable console.logs from showing up in the job logs, for data privacy once workflows are handling production data. | +| 14. Expanded Audit Trail and Node Authentication (ATNA) functionality | Planned | Q4 '23 | [Issues 271](https://github.com/OpenFn/Lightning/issues/271) | Extend audit trail functionality to cover more aspects of ATNA, reference [OpenHIE IOL requirement IOLWF-1](https://guides.ohie.org/arch-spec/openhie-component-specifications-1/openhie-interoperability-layer-iol#openhie-iol-workflow-requirements). | +| 15. Enhanced user experience for workflow testing | Not started | 2024 | [Issue 311](https://github.com/OpenFn/Lightning/issues/311) | Name and save job inputs (for example as sample data for testing) and allow users to execute jobs with these saved inputs. | +| 16. Bulk reprocess work orders (from a specific job) | Not started | 2024 | [Issue 833](https://github.com/OpenFn/Lightning/issues/833) | Bulk execute multiple work orders from a specific job to avoid rerunning non idempotent jobs and duplicating resources. | + +_You can follow Lightning's progress and track delivered features in the +[Lightning Changelog](https://github.com/OpenFn/Lightning/blob/main/CHANGELOG.md)._ + +### Adaptors Roadmap + +OpenFn's open-source adaptors can connect any application, including web APIs, +databases, and even raw data files, enabling interoperability with any +information system ([read more](/adaptors/)). Adaptors, alongside OpenFn's +workflow engine, enable automated workflows that cut across digital systems. + +| **Feature** | **`Status`** | **Target Timeline** | **Related Links** | **Description** | +| ------------------------------------------------------------------------------------------------- | ------------ | ------------------- | ------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| 1. Enhancements to [`FHIR`](http://www.hl7.org/fhir/) & [`OpenHIM`](http://openhim.org/) adaptors | Not Started | Q3 2023 | See existing adaptors for [FHIR](/adaptors/packages/fhir-docs) and [OpenHIM](/adaptors/packages/openhim-docs) | To rebuild the existing 2021 [OpenFn Instant-OpenHIE reference demo](/documentation/instant-openhie) to highlight the exchange of data between existing non-FHIR digital health tools and a HAPI FHIR server. (OpenFn Lightning is OpenHIE-compliant and can be used as a workflow engine for the OpenHIE Interoperability layer - [learn more here](/documentation/about-lightning#standards-and-compliance-matter).) We also want to demonstrate data exchange between existing non-FHIR digital health tools and key components of Google’s [Open Health Stack](https://developers.google.com/open-health-stack) and [Cloud Healthcare API](https://cloud.google.com/healthcare-api/docs/concepts/fhir) | +| 2. Add "magic" functions to existing, in-demand adaptors | Not started | Q3 2023 | [Issue 243](https://github.com/OpenFn/adaptors/issues/243) | Add functions, dynamic lists, and shortcuts to fast-track workflow configuration for key adaptors including HTTP, [DHIS2](https://dhis2.org/), [CommCare](https://www.dimagi.com/commcare/), & [OpenMRS](https://openmrs.org/) | +| 3. New [`OpenMRS`](https://openmrs.org/) adaptor version | Not started | Q3 2023 | [See existing adaptor docs](/adaptors/packages/openmrs-readme) | To ensure compliance with OpenMRS v3 | +| 4. Enhancements to the [`OCL`](https://openconceptlab.org/) adaptor | Not started | Q3 2023 | [See existing adaptor docs](/adaptors/packages/ocl-readme) | To ensure that mappings stored in OCLs can be more easily access and processed as inputs in OpenFn/Lightning workflows | + +### Docs Roadmap + +| **Feature** | **`Status`** | **Target Timeline** | **Related Links** | **Description** | +| ----------------------------------------------------------- | ------------ | ------------------- | --------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | +| 1. OpenFn and the [OpenHIE](https://ohie.org/) architecture | Planned | Q2 2023 | [See current docs](/documentation/about-lightning#standards-and-compliance-matter) | New page dedicated to how OpenHIE aligns with OpenHIE architecture; expansion of the existing small section on standards | +| 2. New Lightning User Guidance | Not started | Q3 2023 | To be hosted on docs.openfn.org | New documentation, videos, and other user guidance on how to use OpenFn/Lightning and how to migrate existing OpenFn/platform projects to Lightning (the new OpenFn "v2") | +| 3. Template [FHIR](http://www.hl7.org/fhir/) Workflows | Planned | Q3 2023 | To be hosted on demo.openfn.org | OpenFn can already help achieve FHIR compliance, but we will build and document reference/template workflows to demonstrate how OpenFn/Lightning can automate data exchange, registration, and/or reporting workflows between non-FHIR data systems and FHIR APIs. | +| 4. Template Alerting Workflows | Not started | Q4 2023 | [See OpenHIE docs](https://guides.ohie.org/arch-spec/introduction/alerting-sending-reminders-or-information); to be hosted on demo.openfn.org | To demonstrate how OpenFn can facilitate one-way communication to a client or provider listed in the HIE (from the OpenHIE standard spec) | +| 5. Template Shared Health Record Workflows | Not started | Q4 2023 | [See OpenHIE docs](https://guides.ohie.org/arch-spec/introduction/shared-health-record); to be hosted on demo.openfn.org | To demonstrate how OpenFn can allow external systems to automatically save and retrieve information from the HIE (from the OpenHIE standard spec) | +| 6. Template Aggregate Reporting Workflows | Not started | 2024 | [See OpenHIE docs](https://guides.ohie.org/arch-spec/introduction/aggregate-reporting-workflows); to be hosted on demo.openfn.org | To demonstrate how OpenFn can support aggregate data exchange of health indicators, leveraging the [ADX](https://wiki.ihe.net/index.php/Aggregate_Data_Exchange) data standard | + +### Questions? Feedback? Ideas? + +Post on the OpenFn Community at +[community.openfn.org](https://community.openfn.org), or consider contributing +to the OpenFn software, adaptors, or documentation (learn more in the +[Contributing section](/documentation/writing-code)). diff --git a/versioned_docs/version-legacy/portability-versions.md b/versioned_docs/version-legacy/portability-versions.md new file mode 100644 index 00000000000..f824370d7ba --- /dev/null +++ b/versioned_docs/version-legacy/portability-versions.md @@ -0,0 +1,285 @@ +--- +title: Versions of the Portability Proposal +--- + +OpenFn is currently designing a portable project configuration schema that can +be used to import or export projects between OpenFn/platform and OpenFn/engine. + +## Proposal v4 + +The portability specification v4 defines how entire projects (groups of +workflows with their associated triggers, credentials and jobs) can be +represented as code. This specification has been written for +[Lightning](/documentation/getting-started/integration-toolkit/#lightning-coming-soon), +the fully open source webb app which extends the OpenFn DPG. It aims to (a) +improve developer experience, allowing them to build and test workflows locally; +(b) enable version control and an audit trail of project changes; and (c) enable +users to port existing workflows from the OpenFn platform to Lightning. + +This new specification has been designed and documented thanks to support from a +Digital Square Global Goods grant. + +The `project.zip` structure and files: + +``` +/globals + sample-clinic-map.json + sample-translations.json +/workflow-a + job-1.js + job-2.js + job-3.js +/workflow-b + job-4.js +project.yaml +project.state.yaml +``` + +The `project.yaml`: + +```yaml +name: "My Project" # The project name + +globals: # All global constants accessible to this project + clinic-map: file://./globals/clinic-map.json + project-expense-codes: file://./globals/project-expense-codes.json + service-codes: + body: + m126: Medical Referral + g01: General Checkup + ps: Psycho-social Support + +workflows: # All workflows in a project + CommCare-to-OpenMRS: #The workflow name. Workflow names won't have spaces + jobs: # All jobs/steps in a workflow + Coerce-to-FHIR: # The job/step name + trigger: webhook #webhook urls are uids so are not included + adaptor: language-fhir + enabled: true + credential: my-fihr-credential #looks up credential in state by its name + # when running locally, the credentials values are taken from the overrides file + # cli run workflow "CommCare-to-OpenMRS" --overrides ./keys-and-values.yaml + body: "file://./CommCare-to-OpenMRS/Coerce-to-FHIR.js" # each job job-body is stored in a separate file, within a folder for the whole workflow + + Load-to-openmrs: + trigger: + on-success: Coerce-to-FHIR + adaptor: language-openmrs + credential: my-other-credential + enabled: true + body: + # no "include", but pathlike doesn't work: if you're doing a uri you need to be explicit about it + # default to local fs -- no numbering because too complicated if users change the order + "file://./CommCare-to-OpenMRS/Load-to-openmrs.js" + + Send-Wrap-Up-Reports: + trigger: + on-success: Load-to-openmrs + enabled: true + adaptor: language-mailgun + globals: + - service-codes + - clinic-map + body: > + # this triggers a new workflow + fn(state => state) + sendEmail(state => state.emailContent) + + Kobo-to-DHIS2: #This is a second workflow + Fetch-Kobo-Submissions: + trigger: + cron: * 5 * * * + enabled: true + adaptor: language-kobotoolbox + body: "file://./Kobo-to-DHIS2/Fetch-Kobo-Submissions.js" + + Upload-to-DHIS2: + trigger: + on-success: Fetch-Kobo-Submissions + adaptor: language-kobotoolbox + enabled: false + body: "file://./Kobo-to-DHIS2/Upload-to-DHIS2.js" +``` + +The `project.state.yaml`: + +```yaml +project: + - id: '45bffee' + key: 'My Project' + +globals: + - id: 'sj23n36' + key: 'clinic-map' + - id: 'bss522g' + key: 'project-expense-codes' + - id: '22aa4st' + key: 'service-codes' + +workflows: + - id: 'cfd7c68' + key: 'CommCare-to-OpenMRS' # this is the NAME and the KEY + - id: 'd1ecc4f' + key: 'Kobo-to-DHIS2' + +jobs: + - id: 'ns6yw54' + key: 'Coerce-to-FHIR' + - id: '12bs52j' + key: 'Load-to-openmrs' + - id: 'lk81hs6' + key: 'Send-Wrap-Up-Reports' + + - id: 'sn26sh2' + key: 'Fetch-Kobo-Submissions' + - id: 'sk1722h' + key: 'Upload-to-DHIS2' + +credentials: + - id: '12ms62y' + key: 'My FHIR Credential' +``` + +## Proposal v3 + +v3 introduces +[URI schemes](https://en.wikipedia.org/wiki/Uniform_Resource_Identifier) +`file://`, `https://`, `gcs://` + +```yaml +jobs: + job-1: + expression: 'file://my-job.js' # URIs may be used (e.g., https://raw.githubusercontent.com/org/repo/my-job.js) + adaptor: '@openfn/language-common' + trigger: trigger-1 + credential: my-secret-credential + recurring-job: + expression: > + fn(state => { + console.log("Hi there!") + return state; + }) + adaptor: '@openfn/language-common' + trigger: every-minute + flow-job: + expression: > + fn(state => { + state.data.number = state.data.number * 3 + return state; + }) + adaptor: '@openfn/language-common' + trigger: after-j1 + catch-job: + expression: > + fn(state => { + state.message = "handled it." + return state; + }) + adaptor: '@openfn/language-common' + trigger: j1-fails + +triggers: + trigger-1: + criteria: '{"number":2}' + every-minute: + cron: '* * * * *' + after-j1: + success: job-1 + j1-fails: + failure: job-1 + +credentials: + my-secret-credential: + username: '******' # Credential keys get exported, but values must be manually reentered + password: '******' + my-other-credential: 'file://gcp_credential.json' # And URIs may be specified directly for the credential body +``` + +## Proposal v2 + +```yaml +jobs: + job-1: + expression: > + registerPatient({ + patient-id: state.data.id, + dob: state.data.birth + }) + adaptor: '@openfn/language-openmrs' + trigger: trigger-1 + credential: my-secret-credential + recurring-job: + expression: > + fn(state => { + console.log("Hi there!") + return state; + }) + adaptor: '@openfn/language-common' + trigger: every-minute + flow-job: + expression: > + fn(state => { + state.data.number = state.data.number * 3 + return state; + }) + adaptor: '@openfn/language-common' + trigger: after-j1 + catch-job: + expression: > + fn(state => { + state.message = "handled it." + return state; + }) + adaptor: '@openfn/language-common' + trigger: j1-fails + +triggers: + trigger-1: + criteria: '{"number":2}' + every-minute: + cron: '* * * * *' + after-j1: + success: job-1 + j1-fails: + failure: job-1 + +# Note that credential keys get copied, but values must be manually entered +# after the export is completed. +credentials: + my-secret-credential: + username: '******' + password: '******' +``` + +## Proposal v1 + +```js +const project = { + async: true, + triggers: { + uniqueTriggerId: { + // trigger properties + }, + otherTrigger: { + // other trigger properties + }, + }, + credentials: { + // for now, credentials will not be synced // + // secret1: { + // username: 'mamadou', + // pass: 'shhh', + }, + staticData: { + // static objects that can be accessed from any job + }, + jobs: { + payHealthWorker: { trigger: 'otherTrigger' }, + syncToSalesforce: { + expression: 'uri://github.com/jobs/expresion.js', + trigger: 'uniqueTriggerId', + credential: 'secret1', + }, + }, +}; +``` diff --git a/versioned_docs/version-legacy/portability.md b/versioned_docs/version-legacy/portability.md new file mode 100644 index 00000000000..066700122d0 --- /dev/null +++ b/versioned_docs/version-legacy/portability.md @@ -0,0 +1,445 @@ +--- +title: Portability +--- + +## Intent + +The portability specification allows for the representations of entire workflow +projects "as code", lets user move between various deployment pathways (cloud, +local, DIY, etc.) and proposes a globally-applicable way of **_specifying +workflow automation_** and **_systems integration_** that might be applied +across workflow-engines/integration platforms across the sector. Nothing about +the spec _must_ be specific to OpenFn or any one of our individual products. We +envision a future in which software built with Lightning, the OpenFn Integration +Toolkit, and entirely new and different integration/workflow tools can adopt +this specification. + +If you're interested in contributing to the specification, reach out to OpenFn +via the [community forum](https://community.openfn.org), write to us, or suggest +changes by submitting a pull request here. + +## "Projects as code" + +The portability specification v4 defines how entire projects (groups of +workflows with their associated triggers, edges, credentials and jobs) can be +represented as code. It improves the OpenFn developer experience, allowing +workflows to be built and tested locally; (b) enables project version control +and an audit trail of project changes; and (c) allows users to port existing +workflows from OpenFn v1 to v2, as well as between instances or deployments of +Lightning. + +### The project "spec" + +The project specification (or "spec") is often saved as a `project.yaml` file. + +```yaml +name: openhie-project +description: Some sample +# credentials: +# globals: +workflows: + OpenHIE-Workflow: + name: OpenHIE Workflow + jobs: + FHIR-standard-Data-with-change: + name: FHIR-standard-Data-with-change + adaptor: '@openfn/language-http@latest' + enabled: true + # credential: + # globals: + body: | + fn(state => { + console.log("hello github integration") + return state + }); + + Send-to-OpenHIM-to-route-to-SHR: + name: Send-to-OpenHIM-to-route-to-SHR + adaptor: '@openfn/language-http@latest' + enabled: true + # credential: + # globals: + body: | + fn(state => state); + + Notify-CHW-upload-successful: + name: Notify-CHW-upload-successful + adaptor: '@openfn/language-http@latest' + enabled: true + # credential: + # globals: + body: | + fn(state => state); + + Notify-CHW-upload-failed: + name: Notify-CHW-upload-failed + adaptor: '@openfn/language-http@latest' + enabled: true + # credential: + # globals: + body: | + fn(state => state); + + triggers: + webhook: + type: webhook + edges: + webhook->FHIR-standard-Data-with-change: + source_trigger: webhook + target_job: FHIR-standard-Data-with-change + condition: always + FHIR-standard-Data-with-change->Send-to-OpenHIM-to-route-to-SHR: + source_job: FHIR-standard-Data-with-change + target_job: Send-to-OpenHIM-to-route-to-SHR + condition: on_job_success + Send-to-OpenHIM-to-route-to-SHR->Notify-CHW-upload-successful: + source_job: Send-to-OpenHIM-to-route-to-SHR + target_job: Notify-CHW-upload-successful + condition: on_job_success + Send-to-OpenHIM-to-route-to-SHR->Notify-CHW-upload-failed: + source_job: Send-to-OpenHIM-to-route-to-SHR + target_job: Notify-CHW-upload-failed + condition: on_job_failure +``` + +### The project "state" + +The project state is a representation of a particular project as _on a specific +Lightning instance_. It is often saved as `projectState.json` and contains UUIDs +for resources on a particular Lightning deployment. + +```json +{ + "workflows": { + "OpenHIE-Workflow": { + "id": "27ae2937-0959-48b8-a597-b1646aae8c14", + "name": "OpenHIE Workflow", + "jobs": { + "Transform-data-to-FHIR-standard": { + "id": "e44f65bb-5038-4e17-8d93-b63cbe95254a", + "delete": true + }, + "Send-to-OpenHIM-to-route-to-SHR": { + "id": "977b87ff-f347-42b5-832f-6ae2ca726f32", + "name": "Send-to-OpenHIM-to-route-to-SHR", + "adaptor": "@openfn/language-http@latest", + "body": "fn(state => state);\n", + "enabled": true + }, + "Notify-CHW-upload-successful": { + "id": "86b743a3-fd00-4629-b9fb-d5f38fb56d0b", + "name": "Notify-CHW-upload-successful", + "adaptor": "@openfn/language-http@latest", + "body": "fn(state => state);\n", + "enabled": true + }, + "Notify-CHW-upload-failed": { + "id": "be85df30-0abd-4f8e-be17-501f67e18b8d", + "name": "Notify-CHW-upload-failed", + "adaptor": "@openfn/language-http@latest", + "body": "fn(state => state);\n", + "enabled": true + }, + "FHIR-standard-Data": { + "id": "55016dda-42e3-4ee1-8a9c-24e3f23d42f1", + "delete": true + }, + "FHIR-standard-Data-with-change": { + "id": "28dd0846-a6ae-40c0-8ab4-3e0a6b487afe", + "name": "FHIR-standard-Data-with-change", + "adaptor": "@openfn/language-http@latest", + "body": "fn(state => state);\n", + "enabled": true + } + }, + "triggers": { + "webhook": { + "id": "530cde0b-0de4-4f68-8834-0a4356a2fe53", + "type": "webhook" + } + }, + "edges": { + "webhook->Transform-data-to-FHIR-standard": { + "id": "b2c7407b-0ae9-4ca5-9d6b-ee624976fa54", + "delete": true + }, + "Transform-data-to-FHIR-standard->Send-to-OpenHIM-to-route-to-SHR": { + "id": "d22ed6f4-26a2-4c85-b261-cc110a6851e6", + "delete": true + }, + "Send-to-OpenHIM-to-route-to-SHR->Notify-CHW-upload-successful": { + "id": "26c12f7f-7806-4008-87cd-6747998f95f4", + "condition": "on_job_success", + "source_job_id": "977b87ff-f347-42b5-832f-6ae2ca726f32", + "source_trigger_id": null, + "target_job_id": "86b743a3-fd00-4629-b9fb-d5f38fb56d0b" + }, + "Send-to-OpenHIM-to-route-to-SHR->Notify-CHW-upload-failed": { + "id": "0630ac96-4f67-4de7-8c3d-0bf3f89f80d9", + "condition": "on_job_failure", + "source_job_id": "977b87ff-f347-42b5-832f-6ae2ca726f32", + "source_trigger_id": null, + "target_job_id": "be85df30-0abd-4f8e-be17-501f67e18b8d" + }, + "webhook->FHIR-standard-Data": { + "id": "5ce3a8ed-b9eb-464a-a2cd-ba55adc393c2", + "delete": true + }, + "FHIR-standard-Data->Send-to-OpenHIM-to-route-to-SHR": { + "id": "5f459cd9-2882-4a61-a2cc-ec45e58d4837", + "delete": true + }, + "webhook->FHIR-standard-Data-with-change": { + "id": "75e7f7d8-274b-410d-9600-730bbd535229", + "condition": "always", + "source_job_id": null, + "source_trigger_id": "530cde0b-0de4-4f68-8834-0a4356a2fe53", + "target_job_id": "28dd0846-a6ae-40c0-8ab4-3e0a6b487afe" + }, + "FHIR-standard-Data-with-change->Send-to-OpenHIM-to-route-to-SHR": { + "id": "1e5ba385-2c49-4241-8cd2-042c99a810ec", + "condition": "on_job_success", + "source_job_id": "28dd0846-a6ae-40c0-8ab4-3e0a6b487afe", + "source_trigger_id": null, + "target_job_id": "977b87ff-f347-42b5-832f-6ae2ca726f32" + } + } + } + }, + "id": "8deff39d-8189-4bd7-9dc7-f9f08e7f2c60", + "name": "openhie-project" +} +``` + +### Using the CLI to deploy or describe projects projects as code + +The project spec and project state can be used for a variety of reasons, e.g. +one could generate the state and spec as backups of the project or one could +generate these files and use them for auditing and record keeping, etc. The +OpenFn [CLI](https://github.com/OpenFn/kit/tree/main/packages/cli) comes with +commands that can be used to pull project configurations down from a running +Lightning server, and to deploy or push updates to existing projects on a +Lightning server. + +:::info Don't have the CLI yet? + +Install it by running `npm install -g @openfn/cli` + +::: + +Before using the CLI, configure it either by passing in environment variables: + +``` +OPENFN_ENDPOINT=https://app.openfn.org +OPENFN_API_KEY=yourSecretApiToken +``` + +Or through a `config.json` file: + +```json +{ + // Required, can be overridden or set with `OPENFN_API_KEY` env var + "apiKey": "***", + + // Optional: can be set using the -p, defaults to project.yaml + "specPath": "project.yaml", + + // Optional: can be set using -s, defaults to .state.json + "statePath": ".state.json", + + // Optional: defaults to OpenFn.org's API, can be overridden or set with + // `OPENFN_ENDPOINT` env var + "endpoint": "https://app.openfn.org" +} +``` + +More details on the CLI can be found +[here](https://github.com/OpenFn/kit/tree/main/packages/cli#basic-usage). + +### `openfn pull` to generate a project spec and state + +To generate the spec and state files for an existing project, use: + +```sh +openfn pull {YOUR-PROJECT-UUID} -c ./config.json +``` + +This command will save (or overwrite) a project spec and state file based on the +path you've set in your configuration. + +### `openfn deploy` to create a project on a Lightning instance + +To deploy a new project to a Lightning instance from a project spec (without a +project state) file use: + +```sh +openfn deploy -c config.json +``` + +### `openfn deploy` to update an existing project + +With a valid project state defined in your `config.json`, the same +`openfn deploy` command will beam up your changes as described by a difference +between your project spec and what's found on the server. + +```sh +openfn deploy -c config.json +Checking https://demo.openfn.org/api/provision/4adf2644-ed4e-4f97-a24c-ab35b3cb1efa for existing project. +Project found. +[CLI] ♦ Changes: + { + workflows: [ + { + jobs: [ + { +- body: "fn(state => {\n console.log(\"ok\")\n return state\n});" ++ body: "fn(state => {\n console.log(\"some changes here!\")\n return state\n});\n" + } + ... + ... + ... + ] + } + ] + } + +? Deploy? yes +[CLI] ♦ Deployed. +``` + +### Automated Version Control with Github and Lightning + +Representations of projects as code and pull/deploy functionality allows you to +check your whole project into a version control system such as Github. + +Lightning comes with a Github App that enables user to sync projects from an +instance to Github using the `openfn pull` command and to do the vice versa +using `openfn deploy`. + +To set up version control: + +1. Create a project repo connection to a github repository in **Project Settings + -> Sync to Github**. +2. Follow the instructions to install the Lightning Github app in your desired + repository. +3. Once you have created a a connection, set up `pull` and `deploy` workflows + that use openfn github actions below. +4. Add `OPENFN_API_KEY` and `OPENFN_PROJECT_ID` repository secrets to your + Github repo as described below. +5. Add a `.config.json` file to your repository which specifies your endpoint + and paths to project spec and state files. +6. Click the sync to Github button to initiate a sync from Lightning to GitHub. +7. Push a change to your selected branch to push changes from Github to + Lightning. + +#### Github Repository Secrets + +The workflows that interact with the OpenFn actions will need the repository set +up with two secrets used in the github actions: + +- OPENFN_API_KEY: This is your API Key as generated from Lightning and will be + needed for authentication +- OPENFN_PROJECT_ID: This is your Project ID from Lightning this will be used to + pull from the lightning instance + +#### Github Repository Structure + +Here you can do pretty much what you want, so long as you've got a `config.json` +pointing to your project spec, state, and Lightning endpoint. + +#### Example [Deploy Workflow](https://github.com/OpenFn/demo-openhie/blob/main/.github/workflows/deploy.yml) for GitHub + +See https://docs.github.com/en/actions/quickstart#creating-your-first-workflow +for more help here. + +```yml +on: + push: + branches: + - main + +jobs: + deploy-to-lightning: + runs-on: ubuntu-latest + name: A job to deploy to Lightning + steps: + - name: openfn deploy + uses: OpenFn/cli-deploy-action@v0.1.11 + with: + secret_input: ${{ secrets.OPENFN_API_KEY }} +``` + +#### Example [Pull Workflow](https://github.com/OpenFn/demo-openhie/blob/main/.github/workflows/pull.yml) for GitHub + +See https://docs.github.com/en/actions/quickstart#creating-your-first-workflow +for more help here. + +```yml +on: [repository_dispatch] + +jobs: + pull-from-lightning: + runs-on: ubuntu-latest + permissions: + contents: write + name: A job to pull changes from Lightning + steps: + - name: openfn pull and commit + uses: OpenFn/cli-pull-action@v0.7.0 + with: + secret_input: ${{ secrets.OPENFN_API_KEY }} + project_id_input: ${{ secrets.OPENFN_PROJECT_ID }} + commit_message_input: + 'user ${{ github.event.client_payload.message }}' +``` + +The Lightning [demo instance](https://demo.openfn.org) is currently connected to +[this repo](https://github.com/OpenFn/demo-openhie/). Feel free to play around +with it. + +#### Using version control + +##### Lightning to GitHub + +Once you have configured version control for a project and a related repository +branch, you can sync changes to GitHub by pressing the "Initiate Sync" button on +the version control page and the Lightning GitHub app will run a `openfn pull` +action to update the versioned representation of your project as code. + +##### Github to Lightning + +Assuming you've configured a deploy action, any time there are changes made to +that branch in your GitHub repo, those changes will be pushed to your Lightning +project via `openfn deploy`. + +### Getting Help with the cli + +The cli package comes with an inbuilt `help`. Adding `--help` to a command such +as `openfn deploy --help` will result in a help message describing the command +and the options available when using this command. See an example below + +```sh +openfn deploy --help +openfn deploy + +Deploy a project's config to a remote Lightning instance + +Options: + --version Show version number [boolean] + --help Show help [boolean] + -c, --config, --config-path The location of your config file [default: "./.config.json"] + --no-confirm Skip confirmation prompts (e.g. 'Are you sure?') [boolean] + --describe Downloads the project yaml from the specified instance [boolean] + -l, --log Set the log level [string] + --log-json Output all logs as JSON objects [boolean] + -p, --project-path The location of your project.yaml file [string] + -s, --state-path Path to the state file +``` + +## Other Versions + +- [Portability Proposal v4](portability-versions#proposal-v4) +- [Portability Proposal v3](portability-versions#proposal-v3) +- [Portability Proposal v2](portability-versions#proposal-v2) (`@latest` for + platform-app/microservice compatibility.) +- [Portability Proposal v1](portability-versions#proposal-v1) diff --git a/versioned_docs/version-legacy/release-notes.md b/versioned_docs/version-legacy/release-notes.md new file mode 100644 index 00000000000..b2bb0ef2bd6 --- /dev/null +++ b/versioned_docs/version-legacy/release-notes.md @@ -0,0 +1,515 @@ +--- +title: Release Notes +--- + +Release notes for **OpenFn/plaform** + +## Version 1.94.87 (2021-07-05) + +New features: + +- **Full text search with Search Console**: Users can now search across message + bodies, message headers, and run logs via full-text string search. + +## Version v1.94.80 (2021-05-25) + +New features: + +- **Enhanced date selection for inbox and activity history:** Users can now type + a date string `YYYY-MM-DD HH:mm:ss`, directly into the date field as a filter, + or select it as before from the date picker. +- **Inbox and Activity History speed enhancements:** Via database structure + changes, the speed for loading lists of messages and runs has been improved + significantly. +- **New Export and Reprocess Interfaces:** A unified + "what-you-see-is-what-you-get" interface has been implemented for exporting + and reprocessing messages and runs. Now, the bulk query is taken from the + current filters and a confirmation dialog which displays the query is + presented to the user. This allows users to have a strong understand of what + will be exported or reprocessed before executing the bulk request. + +## Version 1.92.44 (2021-05-07) + +New features: + +- **Exclusion filters:** Users can now specify message filter triggers to + _exclude_ messages matching a certain signature via the same matching rules as + the standard _inclusion_ filter. Read more in the + [docs](/documentation/build/triggers#message-filter-triggers) + +## Version 1.90.30 (2021-03-31) + +New features: + +- **Streaming logs:** Users can now view individual logs lines as they are + emitted by their NodeVM during the execution of a run. +- **Click to find paths:** Users can now click on a node in a sample message in + the job studio to copy the path to that node. It can then be pasted into the + expression. +- **Run directly from job studio:** While writing a job in the job studio, it's + now possible to start a run for that job given the sample message and the logs + will automatically be streamed back to the Job Studio. + +## Version 1.89.29 (2021-03-24) + +New features: + +- **Sample message explorer:** When writing a job, users can view a selection of + matching messages which would cause the job to run. Now, that sample + `state.json` view is more easily browseable as a tree, and by clicking + individual nodes in the tree users can determine the require `path` to + accessing source data. For example, clicking on a node at + `state.data.patient.lastVisitDate` will copy + `dataValue('patient.lastVisitDate')` to your clipboard for pasting into the + job expression in the adjacent panel. + +Bug Fixes: + +- Addressed an issue with the archive service which resulted in incomplete + archiving of expired data. This impacted users with 365 day retention plans + and Google Cloud Storage `.zip` archiving for data past its platform expiry. + _Data stored on the platform itself was unaffected_ but automatic archives for + data that expired from August 1st, 2019 to March 31st, 2020 only contains a + subset of the messages and runs from that period. + +## Version 1.89.0 (2021-03-15) + +New features: + +- **Export as yaml or microservice.zip:** Users can now export their project + configurations (triggers, jobs, credentials) as a `.yaml` file which can be + run with `OpenFn/microservice` or as a `.zip` file with a prepared + microservice implementation which can be extracted and started with + `docker-compose up`. + +## Version 1.88.0 (2021-02-22) + +New features: + +- **Typeahead snippets:** When writing jobs on OpenFn.org, users are now + prompted with a typeahead (or "autocomplete") dialog which shows them all + available helper functions and provides rich templates which can be accessed + at a keystroke. The result is a simpler, more intuitive job writing experience + that is more tightly coupled to the adaptors themselves. +- **Better IDE mode:** When editing a job, users are now provided with either a + step-by-step "wizard" mode or a full-screen integrated development environment + (IDE) which gives them a flexible workspace for viewing sample receipts, + drafting jobs, and exploring documentation. +- **Direct links to source code:** In the documentation drawer, users now find + direct links to the source code for each _version_ of the adaptor they're + using so that they don't need to take extra steps once on GitHub. +- **Save and share searches and filters:** On the **Inbox** and **Activity + History** pages the current filter (e.g., "Only show messages from the last 7 + days matching trigger X with associated job runs in a failed state") is now + stored in the URL, allowing users to bookmark particular searches and share + those searches/filters with colleagues. + +## Version 1.87.8 (2021-01-28) + +New features: + +- **Improved logging on timeouts:** We've enhanced job timeouts significantly, + splitting into two separate types of timeouts. Exit code `2` is a standard + timeout which allows us to display the full Javascript logs up to the moment + of the timeout instead of a mysterious "we timed out your run" message. Exit + code `4` will be used for when the NodeVM fails to time itself out and becomes + unresponsive. This second case is extremely rare. + +## Version 1.75.0 (2020-07-14) + +New features: + +- **Scheduled jobs replace timer jobs:** In the past "interval trigger." behaved + like sand timers, executing your jobs every `x` seconds. This functionality + has been completely overhauled, giving users much greater control over when + jobs get executed by introducing `cron` expressions. Learn about `cron` via + the OpenFn UI, or at + crontab.guru. + +## Version 1.72.17 (2020-06-21) + +New features: + +- **Credential sharing:** Users can now share credentials across multiple + projects _and_ transfer credential ownership to other users. +- **Enhanced credential security:** Project owners can now _disable_ `console` + for particular jobs in their projects. This can be used to stop unintentional + or malicious printing of credential data to the logs. +- **Test mode for HTTP jobs:** Users can turn on "test mode" for jobs, which + will intercept all outbound HTTP requests, print the request parameters to the + log, and provide a 200 OK response. This can be used to test integrations for + systems that are still in development. +- **Enhanced debugging/open-source integration:** Now using a new version of + OpenFn/core which displays the current langauge-package version and Node JS + version for better debugging and a tighter integration between our + offline/open-source tools and the platform. +- **Enhanced monitoring:** Queue size monitor allows users to see approximately + how many runs are in their queue—this is useful for estimating time to + completing for big bulk reprocessing jobs. + +## Version 1.37.0 (2019-10-21) + +New features: + +- Submit ODK Collect forms (or any OpenRosa compliant form) directly to an + OpenFn inbox, rather than to ODK Aggregate or some other server before + forwarding. + +## Version 1.36.0 (2019-10-01) + +New features: + +- Allow messages to be deleted (in accordance with plan retention periods) + despite having more recent runs related to them. We set the message to "null" + for these younger runs, but the run logs will still be available until they're + past the retention period. This allows sensitive data in the initial message + payload to be purged with the retention period, while less sensitive data in + the run logs is still kept. +- Added more specific exit codes to runs for non-standard exits. Note that exit + codes above 2 are _very_ rare. See below for new codes from `v1.36.0` onwards. + +Enhanced Error Codes: + +- `0`: success (run succeeded, e.g. a destination system responded with a `200`) +- `1`: error (run failed normally, e.g. a destination system responded with a + `4XX`, `5XX`, or some specialized `RequiredFieldMissing` error.) +- `2`: run timed out (runs >100s only supported in enterprise plans) +- `3`: run could not start due to error (could relate to network traffic, but + very rare as an error _before_ the run is started will be retried from Redis + with an exponential backoff for a very long time) +- `5`: unexpected error during job execution +- `10`: error in `core/cli.js execute` + +## Version 1.35.0 (2019-10-01) + +New features: + +- Default navigation drawer to open and grouped nav items for easier access +- Various UI bug-fixes +- Added new indexes on messages and runs for faster search and filter + performance. +- Enhanced bulk-retry feature for runs +- Enhanced bulk-reprocess feature for messages +- Added user-warning when connecting a job to a GitHub filepath: the contents at + that filepath will overwrite your current OpenFn job on the next GitHub commit +- Added historical project usage view +- Added activity cleaning, as per www.openfn.org/pricing#plans to better comply + with data protection regulations and improve UI performance + +## Version 1.22.0 (2019-03-10) + +New features: + +- Allow **filtering by run status (any, success, failed) for bulk retrying + runs** in the Run Retry modal. +- Hovering over a message/run **displays the full date-time** at which it was + received/started as well as the relative time (i.e., how long ago) of that + action. + +## Version 1.21.0 (2019-03-09) + +New features: + +- Added buttons to the Job, Run, and Activity History pages that allow a user to + **run a time triggered job on demand** so that they don't have to wait for the + timer to expire to test. + +## Version 1.20.0 (2019-03-07) + +New features: + +- **We've been busy, but negligent on release notes. To explain all that's + changed we've broken the new features list into multiple sections.** + +Messages & the "Inbox" view: + +- **Filter messages by body text**. (Be patient, doing `tsvector` searching + across millions of payloads.) +- Filter by date. (Default inbox view shows last 30 days.) +- **Export messages** as a CSV, based on your currently applied filters. +- **Bulk reprocess messages** in a series. +- All projects on paid plans now have their own job running queues. +- Partial loading to address inbox view performance issues: messages are loaded + first on the inbox view, and then their related "run states" are calculated + and loaded in a second action. + +Runs & the "Activity History" view: + +- **Filter runs by log text**. (This is a full text search and may take some + time.) +- Filter by date. (Default activity history view shows last 30 days.) +- **Bulk retry runs** in a series. (With the ability to limit retries to a + certain job.) +- **Export runs** as a CSV, based on your currently applied filters. + +Authentication & Security: + +- Require basic auth or token auth to make HTTP requests to a project inbox. + +Project Settings: + +- View the "inbox URL" as text with a click-to-reveal button. +- Show "pace" of estimated usage to determine when a plan upgrade will be + necessary. + +Jobs: + +- Ability to create a job, and a trigger all from the same "Wizard" view. + +Triggers: + +- Ability to create "success" and "failure" triggers so that jobs can be run + based on the success or failure of another job run. + +## Version 1.10.0 (2017-05-04) + +New features: + +- GitHub integration now generally available for enterprise users. Self-setup + interface completed. + +## Version 1.9.0 (2017-03-07) + +New features: + +- View matching messages in the job writing interface when a message filter + trigger is selected. +- "Tree view" exposed for job expression viewing. With valid syntax, you're able + to see your expression as a syntax tree as we step slowly towards a more + point-and-click interface. + +## Version 1.75.0 (2016-12-08) + +New features: + +- Hold control while clicking on navigation buttons to open the target in a new + window. +- Filter messages in your inbox by their content by selecting a message-filter + trigger. + +## Version 1.7.0 (2016-12-05) + +#### _1.7 is all about user experience!_ + +New features: + +- Material design—more whitespace and cleaner lines. +- Goto page on inbox and activity tables—save time when processing errors. +- Change number of items per page on inbox and activity table—care with this one + on slow connections! +- Go to next or previous message or run—makes working through an audit trail + easier +- Change user profile settings without changing password +- Select syntax style for code editors in user settings—clouds midnight is my + new favorite +- Filter projects, jobs, triggers by name—on the fly for quick navigation +- Add adaptor logos to credentials list—quick identification +- Specify connection types on "Apps" list—seems there was some confusion about + this. I know we're missing plenty of apps that have good APIs. Will consider + logging API documentation as well. +- Shift second top-nav to a collapseable "side nav"—better use of screen + real-esate. +- Use 'masonry' packing module for jobs, triggers, credentials, and project + settings boxes—more efficient use of space +- Add material design to _this_ documentation page! + +## Version 1.6.0 (2016-11-24) + +New features: + +- Updated payment receipts to include project names. +- Added `update(...)` to Salesforce adaptor v0.3.0 +- Added `fetchWithErrors` to HTTP adaptor v0.3.1 + +**New Salesforce helper function `update(...)`:** It takes an object and, so +long as you're using the "Id" only updates. + +```js +update("Patient__c", fields( + field("Id", dataValue("pathToSalesforceId"), + field("Name__c", dataValue("patient.first_name")), + field(...) +)) +``` + +**New http helper function `fetchWithErrors(...)`:** This function will perform +a get request on an endpoint and return the response to another endpoint, +regardless of whether the first GET suceeded or failed. It's currently being +used to send message receipt confirmations back to a system of origin that uses +OpenFn as an intermediary between it and an SMS gateway. If the SMS message +doesn't get delivered because the phone number is invalid, we'd like that +information the return all the way to Salesforce, rather than erroring out and +staying in OpenFn. + +```js +// ============= +// We use "fetchWithErrors(...)" so that when the +// SMS gateway returns an error the run does not "fail". +// It "succeeds" and then delivers that error message +// back to Salesforce with the "Update SMS Status" job. +// ============= +fetchWithErrors({ + getEndpoint: 'send_to_contact', + query: function (state) { + return { + msisdn: + state.data.Envelope.Body.notifications.Notification.sObject + .SMS__Phone_Number__c, + message: + state.data.Envelope.Body.notifications.Notification.sObject + .SMS__Message__c, + api_key: 'some-secret-key', + }; + }, + externalId: state.data.Envelope.Body.notifications.Notification.sObject.Id, + postUrl: 'https://www.openfn.org/inbox/another-secret-key', +}); +``` + +## Version 1.5.0 (2016-10-05) + +New features: + +- Delete credentials +- Delete triggers +- Archive jobs +- Continual testing from status.openfn.org + +**Delete credentials and triggers:** Users can now delete credentials and +triggers. + +**Archive jobs:** Users can now archive jobs, rendering them inactive. Click +"view archived jobs" to see and restore old jobs. + +**status.openfn.org**: is now live, providing continual testing of key OpenFn +services. We run both message-filter-based and timer-trigger-based jobs every +five minutes to ensure availability, as well as measuring the round-trip time +(in ms) that it takes for a server in a different geographical location to send +valid JSON to OpenFn then receive and process the 200 response. (This time will +vary according to the location of your servers, but it's important to note that +we test the full round trip. Our servers typically send out 200s in about 5-6ms, +but you can expect the round trip to complete in closer to 750ms.) + +## Version 1.4.0 (2016-09-26) + +New features: + +- Run "matches" directly from your inbox view. +- Always display the latest notification, dismiss to scroll back in time. +- Login and signup server responses + +**Run "matches" from inbox:** Users can now run matches in a single click from +their inbox, getting notifications that runs have successfully started without +having to navigate to the Message Inspector page for a given message. Look for +the blue "play" button next to each match. Simply click to start running that +job with the message in question. + +**Latest notifications:** User notifications will now be displayed +_newest-on-top_ and when there are multiple stacked notifications users will +be... well... notified. Click the small "x" to dismiss the latest notification, +moving backwards in time until all have been read. + +**Login/signup errors:** Until now, invalid login messages and duplicate singup +emails had been only displayed in your brower's logs. (That's our fault.) You'll +now see a handy "invalid credentials" or "email already registered" message when +trying to log in or sign up. + +## Version 1.3.0 (2016-09-20) + +- New version of language-salesforce allows users to `alterState` with a custom + function. + +**alterState:** +[documentation](https://github.com/OpenFn/docs/blob/master/generate-library/alter-state-before-operations.js) + +## Version 1.2.0 (2016-09-15) + +- Users can now select specific adaptor versions for their jobs. +- Jobs will "auto-upgrade" unless locked to a specific version. + +**Adaptor versions:** This means that the code beneath your job, once saved with +a specific adaptor version, will never change. This is an important step forward +for the whole community, as it enables more rapid progress—especially +considering the growing number of outside contributors—without risking +introducing instability to existing jobs. + +Each new version of an adaptor will have release notes introducing the new +features or changes to helper functions. To allow easy upgrades, we will still +mandate that all new versions are backwards compatible. + +## Version 1.1.0 (2016-08-29) + +New features: + +- Users can now run jobs based on **timers** as well as filters. +- Users can now view logs for all runs, not just the most recent. +- Jobs are "aware" of their last running state. +- `get(...)` and `post(...)` are now supported using the language-http adaptor, + allowing users to make their own HTTP calls in jobs. + +**Timer triggers:** On the triggers tab, users can set the trigger type to +"timer" and input a whole number of seconds for the "interval". Any "active" +jobs associated with this trigger will run periodically after the interval +elapses. + +**View logs for all runs:** By clicking on an individual run from either the +Activity tab or the Message Inspector, users can view the full logs for that +run, regardless of whether or not a more recent run took place with the same job +and message. + +**Job state:** When a job runs based on a timer, not an incoming message, it +will preserve it's state for the next run. This feature is commonly used by +language packs like language-surveycto, language-odk, and others to create a +"cursor" to offset or limit database queries. + +> For example, `fetchSubmissions(...)` in the language-surveycto adaptor takes +> three arguments: `formId`, `afterDate`, and `postUrl`. The first time this job +> runs it will only fetch submissions _after_ the `afterDate`. If any +> submissions are received, it will take the last submission from the array (by +> date) and persist it in the `job_state` as `lastSubmissionDate`. The next time +> this job runs, say, 300 seconds (5 minutes) later, it will ignore `afterDate` +> and instead fetch submissions after `lastSubmissionDate`. While this +> particular helper function is very abstract (it does this one thing well) it's +> possible to write a job that simply alters the final "state" before +> completing, passing whatever data you'd like from _THIS RUN_ to the _NEXT RUN_ +> of the job. + +**get(...) and post(...):** Have a look at this complex job using language-http. +See how it is possible to provide a query and a callback for `get` while `post` +takes a url and a body object. At the end, the user is setting +state.lastSubmissionDate to `submissions[submissions.length-1].SubmissionDate`. + +See the functions themselves at +[language-http](https://github.com/OpenFn/language-http/blob/master/src/Adaptor.js). + +```js +get('forms/data/wide/json/someForm', { + query: function (state) { + return { date: state.lastSubmissionDate || 'Aug 29, 2016 4:44:26 PM' }; + }, + callback: function (state) { + // Pick submissions out in order to avoid `post` overwriting `response`. + var submissions = state.response.body; + // return submissions + return submissions + .reduce(function (acc, item) { + // tag submissions as part of the "someForm" form + item.formId = 'someForm'; + return acc.then( + post('https://www.openfn.org/inbox/some-inbox-uuid', { body: item }) + ); + }, Promise.resolve(state)) + .then(function (state) { + if (submissions.length) { + state.lastSubmissionDate = + submissions[submissions.length - 1].SubmissionDate; + } + return state; + }) + .then(function (state) { + delete state.response; + return state; + }); + }, +}); +``` diff --git a/versioned_docs/version-legacy/roadmap.md b/versioned_docs/version-legacy/roadmap.md new file mode 100644 index 00000000000..d99bee57dd9 --- /dev/null +++ b/versioned_docs/version-legacy/roadmap.md @@ -0,0 +1,69 @@ +--- +title: Documentation Roadmap +--- + +## Visit the public [documentation roadmap](https://github.com/orgs/OpenFn/projects/1?card_filter_query=repo%3Aopenfn%2Fdocs) on github. + +We're constantly working to improve the documentation on OpenFn, particularly +around the open source integration toolkit. Visit the documentation roadmap on +Github to see what we're working on. + +:::tip + +Visit +[**the documentation roadmap**](https://github.com/orgs/OpenFn/projects/1?card_filter_query=repo%3Aopenfn%2Fdocs) +to view the documentation roadmap. + +Below is merely a static list that will be updated far less frequently than the +actual docs issues on Github. + +::: + +## Planned Sections/Enhancements + +### Foundational Concepts + +A page or set of pages that reviews concepts that are essentially prerequisite +to using OpenFn or another interoperability platform. + +### The anatomy of an integration (title subject to change) + +Envisioned as a primarily visual page that outlines the constituent parts of +every integration to equip OpenFn users with a solid grounding of what an +integration really is + +### Integrating using OpenFn + +Either a subpage of the structure page or its own page subsequent to; this maps +the foundational concepts above to jobs, triggers, runs, etc. We will only +introduce OpenFn-specific terminology after we establish a common frame of +reference. In some ways this is just an edit of the current Getting Started +page. + +### What Does it Mean to be Ready? + +Detail our OFG consulting team’s implementation process, including the various +places that data mapping is referenced to do a step-by-step “pre-job-writing” +walkthrough + +### Creation of documentation/tutorial “pathways” + +Presenting guides and for new users and new devs from existing documentation +resources to flatten the learning curve + +### Quickstart + +Rewrite to be dev-centered; should read as a page for users who understand the +above already and are ready to dive into technical setup + +### Project walk-through + +Revise to incorporate the below app-specific tutorials in a cohesive manner + +### How to integrate CommCare with Salesforce + +The Project walkthrough, but specific to these two tools. + +### How to integrate DHIS2 and CommCare + +### How to integrate Kobo Toolbox and a custom Postgres database diff --git a/versioned_docs/version-legacy/source-apps.md b/versioned_docs/version-legacy/source-apps.md new file mode 100644 index 00000000000..5895f4afd0a --- /dev/null +++ b/versioned_docs/version-legacy/source-apps.md @@ -0,0 +1,59 @@ +--- +title: Generic Data Sources +sidebar_label: Generic Sources +--- + +## Standard webhook configuration + +This section describes how to enable push notifications from selected source +applications or how to configure pull jobs to fetch data from those sources. If +you don't see yours in the alphabetical list below feel free to add it with a +pull request. + +Every OpenFn project has a unique **Inbox URL** address that can be used as an +endpoint for any JSON webhook. To set up a data source, configure that source to +make a POST over HTTPS to your Inbox URL. See [Your Inbox](/build/inbox.md). + +To connect an application with standard JSON webhooks, copy your inbox URL from +the "Inbox" page or your "Project Settings" screen and use it as the destination +URL on your source application. Unless you have specifically configured it on +the "Access & Security" page, no authentication is required. + +**_N.B.: This is by no means an exhaustive list._** It is merely a list of +common sources that external contributors have added. Remember that anything +with a REST api or a JSON-based notification service can be used with OpenFn. + +## How webhooks enable real time integration + +Webhooks services (sometimes called “REST Services”) are services that your +users can configure on your application which make posts to other REST +endpoints. The most common example we’ll come across is a form, submission, or +case forwarding service that will send a copy of a submission to an external +API. + +## Providing a UI for your webhook? + +This is likely the most end-user interactive part of your API, and you’ll +probably want a feature in your user-interface that allows them to turn on and +off these various services. See the below example from Kobo Toolbox (left) and +CommCare (right). + +![kobo_to_commcare](/img/webhooks1.png) + +## When to send? + +Consider whether to set up watches or triggers at the DB level (this seems like +overkill but is provided by some databases relatively inexpensively) or at +several key interfaces in your application. What types of +updates/submissions/changes in your application might other applications need to +be notified of in real time? A new submission is the most common, but updates to +a “case”, changes to UAM, or any other events could be valuable. + +## What to send? + +The whole resource, please. This anticipates our thoughts on sector-wide data +standards slightly, but (within reason) it makes sense to expose everything your +end-user will need to run the next step in their logic. Some interfaces allow +the user to control which fields (and even which related resources) are sent in +a given payload, but often the default is to send everything and let them pick +and choose what they want to use. diff --git a/versioned_docs/version-legacy/standards/digital-public-goods.md b/versioned_docs/version-legacy/standards/digital-public-goods.md new file mode 100644 index 00000000000..558cfbe2994 --- /dev/null +++ b/versioned_docs/version-legacy/standards/digital-public-goods.md @@ -0,0 +1,19 @@ +--- +title: Digital Public Goods +--- + +OpenFn is recognised by the +[Ditial Public Goods Alliance](https://digitalpublicgoods.net/) as a Digital +Public Good, or "DPG". + +:::info Digital Public Goods Definition + +Open-source software, open data, open AI models, open standards, and open +content that adhere to privacy and other applicable best practices, do no harm +by design and are of high relevance for attainment of the United Nations 2030 +Sustainable Development Goals (SDGs) + +::: + +You can read more about the DPG standard +[here](https://digitalpublicgoods.net/standard/). diff --git a/versioned_docs/version-legacy/standards/global-goods.md b/versioned_docs/version-legacy/standards/global-goods.md new file mode 100644 index 00000000000..00fa3bd279c --- /dev/null +++ b/versioned_docs/version-legacy/standards/global-goods.md @@ -0,0 +1,20 @@ +--- +title: Global Goods +--- + +OpenFn is one of 36 software applications that have been recognised as a Digital +Square Global Good for Health. + +:::info Global Goods for Health Definition + +A mature digital health software global good is software that is Free and Open +Source Software (FOSS), is supported by a strong community, has a clear +governance structure, is funded by multiple sources, has been deployed at +significant scale, is used across multiple countries, has demonstrated +effectiveness, is designed to be interoperable, and is an emergent standard +application. + +::: + +You can read more about Global Goods for Health +[here](https://digitalsquare.org/digital-health-global-goods). diff --git a/versioned_docs/version-legacy/standards/openhie.md b/versioned_docs/version-legacy/standards/openhie.md new file mode 100644 index 00000000000..92e6926f12e --- /dev/null +++ b/versioned_docs/version-legacy/standards/openhie.md @@ -0,0 +1,81 @@ +--- +title: OpenHIE +--- + +_This section assumes you are familiar with the OpenHIE specification–a +reference framework that makes sharing health data across information systems +possible through a Health Information Exchange (“HIE”). To learn more, check out +[OpenHIE docs](https://guides.ohie.org/arch-spec/) and +[community](https://ohie.org/)._ + +## OpenFn Lightning and OpenHIE + +OpenFn Lightning is an OpenHIE-compliant **_workflow engine_** used to (1) +automate complex business processes that cut across digital systems (including +OpenHIE components _and_ point of care systems), and to (2) handle data mapping +and transformation. + +If your organization is implementing the OpenHIE standard architecture, then +OpenFn provides a workflow engine that interfaces with your interoperability +later component (“IOL”). OpenFn can be implemented to automate: + +- Workflows between point of service systems; +- Workflows between across core HIE components; +- Data transformation steps required to prepare data before routing it to other + HIE components via the IOL. (Note that OpenFn workflows serve as a + web-UI-accessible and manageable alternative to OpenHIM “mediators”.) + +OpenFn supports the +[functional requirements](https://guides.ohie.org/arch-spec/openhie-component-specifications-1/openhie-interoperability-layer-iol#openhie-iol-functional-requirements) +of the OpenHIE IOL, therefore some organizations also use OpenFn as their +central interoperability layer. That said, please note that OpenFn cannot yet be +used as a fully OpenHIE-compliant **_interoperability layer _**because it does +not leverage the IHE ATNA profile (see +[requirement IOL-WF1](https://guides.ohie.org/arch-spec/openhie-component-specifications-1/openhie-interoperability-layer-iol#openhie-iol-workflow-requirements)). + +![openhie_architecture](/img/openhie_architecture.png) + +_For an overview of OpenFn Lightning and how it fits into OpenHIE, see our +[introduction for the OpenHIE showcase](https://www.youtube.com/watch?v=PTRRZBYtqyc)_ +or read on for more context. + +## Context + +### The Interoperability layer (IOL): + +- Sits between the OpenHIE components and point-of-care systems +- Serves as a single point of entry and secure gateway to the OpenHIE +- Complies with requirements around transaction routing and auditing + +_OpenFn Lightning satisfies the functional requirements of the IOL, but is not +fully OpenHIE-compliant since it does not yet leverage the IHE ATNA profile_ + +### The workflow engine: + +- Provides out-of-box interfaces to connect to point of care systems +- Handles complex data mapping and transformation to reformat data for receipt + by a destination system (e.g., map data from point of care system to the data + model of a OpenHIE component, and/or map non-FHIR data to FHIR profiles) +- Routes data to the interoperability layer +- Can keep track of the long running state of a patient's care and perform + actions based on this context (such as sending alerts) to improve patient + care. + +_OpenFn Lightning is an OpenHIE-compliant workflow engine_ + +## Case study: Using OpenFn to quickly deliver a new OpenHIM mediator in Nigeria + +In Nigeria, as part of the +[ALMANACH project](https://articles.nigeriahealthwatch.com/almanach-revolutionising-the-management-of-childhood-illnesses-in-adamawa-state/), +SwissTPH used OpenFn to automate data mapping and exchange between CommCare and +DHIS2 for disease surveillance. The workflow ran on OpenFn’s cloud for several +years and in preparation for handover and scaling-up, the team at SwissTPH then +prepared a deep integration with OpenHIM for local deployment. + +SwissTPH took their existing OpenFn workflow and built it into their OpenHIM +instance as a “mediator”, ensuring all data is routed via this IOL while still +leveraging OpenFn’s out-of-box DHIS2 adaptor and reusable workflow templates to +quickly develop automation that reformats data received from CommCare and maps +it to the DHIS2 data model. + +![swisstph](/img/swisstph.png) diff --git a/versioned_docs/version-legacy/style-guide.md b/versioned_docs/version-legacy/style-guide.md new file mode 100644 index 00000000000..cffd4830b3d --- /dev/null +++ b/versioned_docs/version-legacy/style-guide.md @@ -0,0 +1,297 @@ +--- +id: style-guide +title: Style Guide +sidebar_label: Style Guide +slug: /style-guide +--- + +You can write content using +[GitHub-flavored Markdown syntax](https://github.github.com/gfm/). + +:::tip + +We use a `.prettierrc` file to enforce standard styles via the "Prettier" code +formatter. If you are using VsCode, you can install prettier via +https://marketplace.visualstudio.com/items?itemName=esbenp.prettier-vscode + +Make sure to format you work before opening a PR. + +::: + +## Markdown Syntax + +To serve as an example page when styling markdown based Docusaurus sites. + +## Headers + +# H1 - Create the best documentation + +## H2 - Create the best documentation + +### H3 - Create the best documentation + +#### H4 - Create the best documentation + +##### H5 - Create the best documentation + +###### H6 - Create the best documentation + +--- + +## Emphasis + +Emphasis, aka italics, with _asterisks_ or _underscores_. + +Strong emphasis, aka bold, with **asterisks** or **underscores**. + +Combined emphasis with **asterisks and _underscores_**. + +Strikethrough uses two tildes. ~~Scratch this.~~ + +--- + +## Lists + +1. First ordered list item +1. Another item + - Unordered sub-list. +1. Actual numbers don't matter, just that it's a number + 1. Ordered sub-list +1. And another item. + +- Unordered list can use asterisks + +* Or minuses + +- Or pluses + +--- + +## Links + +[I'm an inline-style link](https://www.google.com/) + +[I'm an inline-style link with title](https://www.google.com/ "Google's Homepage") + +[I'm a reference-style link][arbitrary case-insensitive reference text] + +[You can use numbers for reference-style link definitions][1] + +Or leave it empty and use the [link text itself]. + +URLs and URLs in angle brackets will automatically get turned into links. +http://www.example.com/ or and sometimes example.com +(but not on GitHub, for example). + +Some text to show that the reference links can follow later. + +[arbitrary case-insensitive reference text]: https://www.mozilla.org/ +[1]: http://slashdot.org/ +[link text itself]: http://www.reddit.com/ + +--- + +## Images + +Here's our logo (hover to see the title text): + +Inline-style: +![alt text](https://github.com/adam-p/markdown-here/raw/master/src/common/images/icon48.png 'Logo Title Text 1') + +Reference-style: ![alt text][logo] + + +[logo]: https://github.com/adam-p/markdown-here/raw/master/src/common/images/icon48.png + 'Logo Title Text 2' + + +Images from any folder can be used by providing path to file. Path should be +relative to markdown file. + +![img](/img/undraw_Portfolio_update_re_jqnp.svg) + +### Image sizing/styling + +Images can be sized using inline HTML. + + + +--- + +## Gifs + +Gifs are helpful for demonstrating short sequences of user behaviour. + +![img](/img/how-to-gif.gif) + +There are many tools that will help you create GIFs: + +- [Peek](https://github.com/phw/peek) +- [Capture to a Gif](https://chrome.google.com/webstore/detail/capture-to-a-gif/eapecadlmfblmnfnojebefkbginhggeh) +- [Chrome Capture](https://chrome.google.com/webstore/detail/chrome-capture-screenshot/ggaabchcecdbomdcnbahdfddfikjmphe) + +:::note + +Please note that if you're using an animated "cursor dot" and "show/click +animation", the hex code we use is **#B53F48**. + +::: + +--- + +## Code + +```javascript +var s = 'JavaScript syntax highlighting'; +alert(s); +``` + +```python +s = "Python syntax highlighting" +print(s) +``` + +``` +No language indicated, so no syntax highlighting. +But let's throw in a tag. +``` + +```js {2} +function highlightMe() { + console.log('This line can be highlighted!'); +} +``` + +--- + +## Tables + +Colons can be used to align columns. + +| Tables | Are | Cool | +| ------------- | :-----------: | -----: | +| col 3 is | right-aligned | \$1600 | +| col 2 is | centered | \$12 | +| zebra stripes | are neat | \$1 | + +There must be at least 3 dashes separating each header cell. The outer pipes (|) +are optional, and you don't need to make the raw Markdown line up prettily. You +can also use inline Markdown. + +| Markdown | Less | Pretty | +| -------- | --------- | ---------- | +| _Still_ | `renders` | **nicely** | +| 1 | 2 | 3 | + +--- + +## Blockquotes + +> Blockquotes are very handy in email to emulate reply text. This line is part +> of the same quote. + +Quote break. + +> This is a very long line that will still be quoted properly when it wraps. Oh +> boy let's keep writing to make sure this is long enough to actually wrap for +> everyone. Oh, you can _put_ **Markdown** into a blockquote. + +--- + +## Inline HTML + +
+
Definition list
+
Is something people use sometimes.
+ +
Markdown in HTML
+
Does *not* work **very** well. Use HTML tags.
+
+ +--- + +## Line Breaks + +Here's a line for us to start with. + +This line is separated from the one above by two newlines, so it will be a +_separate paragraph_. + +This line is also a separate paragraph, but... This line is only separated by a +single newline, so it's a separate line in the _same paragraph_. + +--- + +## Admonitions + +:::note + +This is a note + +::: + +:::tip + +This is a tip + +::: + +:::important + +This is important + +::: + +:::caution + +This is a caution + +::: + +:::warning + +This is a warning + +::: + +## Tabs + +```mdx-code-block +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +``` + +Note how we import tabs first, then use them as below: + +```jsx +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + + + This is an apple 🍎 + This is an orange 🍊 + This is a banana 🍌 +; +``` + +```mdx-code-block + + This is an apple 🍎 + This is an orange 🍊 + This is a banana 🍌 + +``` diff --git a/versioned_docs/version-legacy/writing-code.md b/versioned_docs/version-legacy/writing-code.md new file mode 100644 index 00000000000..74c19bd587c --- /dev/null +++ b/versioned_docs/version-legacy/writing-code.md @@ -0,0 +1,28 @@ +--- +title: Writing Code +--- + +## Introduction + +This section is intended to provide developers with a basic introduction to +contributing to OpenFn's open-source applications. + +There are three ways you can contribute to the OpenFn DPG: + +### 1. Build or extend OpenFn adaptors + +- Requires knowledge of Javascript and Typescript +- See the [README.md](https://github.com/OpenFn/lightning#contribute-to-this-project) to learn how to contribute + +### 2. Add or improve a feature on the OpenFn Lightning platform +- Requires knowledge of Elixir and Pheonix Liveview +- See the [README.md](https://github.com/OpenFn/adaptors#contributing) to learn how to contribute + +### 3. Add to or improve our documentation + +Please feel free to point out [issues](https://github.com/openfn/docs/issues) +with the OpenFn documentation or, if you can't find the right repo, issues with the +tools themselves. (The more feedback the better!). If you want to propose some +new language for the documentation, you can make those changes by clicking the +**"Edit this page"** link at the bottom of any page and submit a pull request! + diff --git a/versioned_docs/version-legacy/writing-docs.md b/versioned_docs/version-legacy/writing-docs.md new file mode 100644 index 00000000000..277a4d7a272 --- /dev/null +++ b/versioned_docs/version-legacy/writing-docs.md @@ -0,0 +1,59 @@ +--- +title: Writing Docs +--- + +Please feel free to point out [issues](https://github.com/openfn/docs/issues) +with this documentation or, if you can't find the right repo, issues with the +tools themselves. (The more feedback the better!). If you want to propose some +new language for the documentation, you can make those changes by clicking the +**"Edit this page"** link at the bottom of any page and submit a pull request! + +## Intro + +This document is meant to be a guide for OpenFn’s documentation. Remember the +goal is to treat “docs like code” and to create a docs portal that makes using +OpenFn's tools a fairly self-service experience. Feel free to contribute to this +document. + +## What are docs + +When we say docs, we mean streamlined, tightly phrased, and fast-moving +information that helps citizen integrators using OpenFn understand the +platform’s complex application interfaces. What does treating docs like code +mean? Store the doc source files in a version control system. Build the doc +artifacts automatically. Ensure that a trusted set of reviewers meticulously +reviews the docs. Publish the artifacts without much human intervention. + +(Source: Anne Gentle’s book +_[Docs Like Code](https://www.docslikecode.com/about/)_.) + +## Goals for these docs + +### Promote collaboration + +Collaborate with contributors efficiently by keeping docs in the same system as +code with deliverables generated from source files. + +### Get long tail contributions + +Split deliverables into parts that encourage small but mighty contributions. One +person no longer needs to own an entire deliverable of documentation. + +### Track doc bugs like code bugs + +When you fix a doc bug, you reference that bug in the commit message to help +reviewers judge whether the doc fix solves the stated problem. + +### Get prompt and good quality reviews from team members + +Trust team members to value docs, ensure technical accuracy and consistency, +respect end users’ needs, and advocate for the best doc deliverables for +consumers. + +### Make beautiful docs + +Design is important. Create beautiful and modern looking docs. + +### Use developer tools and workflows + +Automate the process as much as possible, so we can focus on content creation. diff --git a/versioned_sidebars/version-legacy-sidebars.json b/versioned_sidebars/version-legacy-sidebars.json new file mode 100644 index 00000000000..250652bf5d7 --- /dev/null +++ b/versioned_sidebars/version-legacy-sidebars.json @@ -0,0 +1,134 @@ +{ + "docs": [ + { + "type": "category", + "label": "Introduction", + "items": [ + "intro", + "getting-started/integration-toolkit", + "about-lightning", + "getting-started/so-you-want-to-integrate" + ] + }, + { + "type": "category", + "label": "Getting Started", + "items": [ + "build/lightning-quick-start", + "getting-started/terminology", + "getting-started/implementation-checklist", + "getting-started/security" + ] + }, + { + "type": "category", + "label": "Design", + "items": [ + "design/design-quickstart", + "getting-started/glossary" + ] + }, + { + "type": "category", + "label": "Build", + "items": [ + { + "type": "category", + "label": "Jobs", + "items": [ + "build/jobs", + "jobs/job-design-intro", + "jobs/understanding", + "jobs/operations", + "jobs/multiple-operations", + "jobs/state", + "jobs/each", + "jobs/job-studio", + "jobs/editing_locally", + "jobs/working_with_branches" + ] + }, + "build/triggers", + "build/credentials", + { + "type": "link", + "label": "Adaptors", + "href": "/adaptors" + }, + { + "type": "category", + "label": "Live Data", + "items": [ + "build/inbox", + "source-apps" + ] + }, + "cli", + "build/troubleshooting", + { + "type": "category", + "label": "Old Versions", + "items": [ + "core", + "devtools/home", + "microservice/home" + ] + } + ] + }, + { + "type": "category", + "label": "Deploy", + "items": [ + "deploy/options", + "deploy/requirements", + "portability", + "instant-openhie" + ] + }, + { + "type": "category", + "label": "Manage", + "items": [ + "manage/platform-mgmt", + "manage/troubleshooting-tips-on-platform", + "jobs/errors", + "jobs/limits", + "release-notes" + ] + }, + { + "type": "category", + "label": "Standards", + "items": [ + "standards/digital-public-goods", + "standards/global-goods", + "standards/openhie" + ] + }, + { + "type": "doc", + "id": "faqs" + }, + { + "type": "category", + "label": "Contributing", + "items": [ + "openfn-roadmap", + "for-devs", + "gsoc", + "writing-docs", + "style-guide" + ] + }, + { + "type": "doc", + "id": "about" + }, + { + "type": "link", + "label": "Community Forum", + "href": "https://community.openfn.org" + } + ] +} diff --git a/versions.json b/versions.json new file mode 100644 index 00000000000..2c0f623f9b1 --- /dev/null +++ b/versions.json @@ -0,0 +1,3 @@ +[ + "legacy" +]