From 66a6248f328a068d55c4361adcb6e826d9a8c4c4 Mon Sep 17 00:00:00 2001 From: Stephen James Date: Fri, 20 Oct 2023 17:27:53 +0100 Subject: [PATCH 1/2] Pages reviewed and updated where appropriate. --- ...-record-architecture-decisions.html.md.erb | 2 +- ...or-device-domain-naming-system.html.md.erb | 4 +- ...loud-platform-to-host-dhcp-dns.html.md.erb | 6 +- ...container-service-for-dhcp-dns.html.md.erb | 4 +- ...-use-aws-codepiplines-for-cicd.html.md.erb | 2 +- ...regration-platform-for-logging.html.md.erb | 6 +- ...ws-parameter-store-for-secrets.html.md.erb | 8 +- ...d-grafana-for-metrics-alerting.html.md.erb | 4 +- ...aws-elastic-container-registry.html.md.erb | 4 +- ...aws-sso-for-aws-account-access.html.md.erb | 8 +- ...-for-monitoring-infrastructure.html.md.erb | 14 +- ...1-use-github-actions-for-ci-cd.html.md.erb | 2 +- .../012-use-techdocs-for-adrs.html.md.erb | 6 +- .../documentation/adrs/adr-index.html.md.erb | 2 +- .../our-alliance.html.md.erb | 2 +- .../products/backups.html.md.erb | 11 +- .../documentation/products/dhcp.html.md.erb | 14 +- source/documentation/products/dns.html.md.erb | 10 +- .../documentation/products/nacs.html.md.erb | 12 +- .../best-practices/github.html.md.erb | 10 +- .../best-practices/use-aws-sso.html.md.erb | 215 +++++++++--------- source/index.html.md.erb | 3 +- 22 files changed, 178 insertions(+), 171 deletions(-) diff --git a/source/documentation/adrs/000-record-architecture-decisions.html.md.erb b/source/documentation/adrs/000-record-architecture-decisions.html.md.erb index c74b123..b4ab376 100644 --- a/source/documentation/adrs/000-record-architecture-decisions.html.md.erb +++ b/source/documentation/adrs/000-record-architecture-decisions.html.md.erb @@ -1,7 +1,7 @@ --- owner_slack: "#nvvs-devops" title: 000 - Record architecture decisions -last_reviewed_on: 2023-10-05 +last_reviewed_on: 2023-10-20 review_in: 3 months --- diff --git a/source/documentation/adrs/001-use-bind-for-device-domain-naming-system.html.md.erb b/source/documentation/adrs/001-use-bind-for-device-domain-naming-system.html.md.erb index ae40907..de4a9f3 100644 --- a/source/documentation/adrs/001-use-bind-for-device-domain-naming-system.html.md.erb +++ b/source/documentation/adrs/001-use-bind-for-device-domain-naming-system.html.md.erb @@ -1,7 +1,7 @@ --- owner_slack: "#nvvs-devops" title: 001 - Use BIND DNS for device name resolution -last_reviewed_on: 2023-04-11 +last_reviewed_on: 2023-10-20 review_in: 6 months --- @@ -17,7 +17,7 @@ Staff devices e.g. laptops and desktops connected to our network will need [DNS] There is a requirement that this service is able to automatically scale (both up and down) to cope with varying load levels during the course of the day. -There is a limitation around using the fully managed AWS Route53 DNS service as it does not support DNS forwarding. +There is a limitation around using the fully managed AWS Route53 DNS service as it does not support DNS forwarding. **Dec 2021 Update** Route53 can now forward DNS requests e.g. [PDNS](https://www.ncsc.gov.uk/information/pdns) diff --git a/source/documentation/adrs/002-use-cloud-platform-to-host-dhcp-dns.html.md.erb b/source/documentation/adrs/002-use-cloud-platform-to-host-dhcp-dns.html.md.erb index 56cf1b0..0cb5f39 100644 --- a/source/documentation/adrs/002-use-cloud-platform-to-host-dhcp-dns.html.md.erb +++ b/source/documentation/adrs/002-use-cloud-platform-to-host-dhcp-dns.html.md.erb @@ -1,14 +1,14 @@ --- owner_slack: "#nvvs-devops" title: 002 - Use Cloud Platform to host DHCP and DNS -last_reviewed_on: 2023-04-11 +last_reviewed_on: 2023-10-20 review_in: 6 months --- # 002 - Use Cloud Platform to host DHCP and DNS Date: 2020-05-22 -## Status +## Status ❌ Rejected ## Context @@ -21,7 +21,7 @@ After [investigations](https://github.com/ministryofjustice/cloud-platform/issue **Update 6th January 2021** -The Cloud Platform `live` cluster is now running on Kubernetes 1.20 which should allow TCP and UDP on the network load balancer +The Cloud Platform `live` cluster is now running on Kubernetes 1.20 which should allow TCP and UDP on the network load balancer ([see issue here](https://github.com/ministryofjustice/cloud-platform/issues/1897#issuecomment-1006539120)) diff --git a/source/documentation/adrs/003-use-aws-elastic-container-service-for-dhcp-dns.html.md.erb b/source/documentation/adrs/003-use-aws-elastic-container-service-for-dhcp-dns.html.md.erb index d8f1860..6be24da 100644 --- a/source/documentation/adrs/003-use-aws-elastic-container-service-for-dhcp-dns.html.md.erb +++ b/source/documentation/adrs/003-use-aws-elastic-container-service-for-dhcp-dns.html.md.erb @@ -1,7 +1,7 @@ --- owner_slack: "#nvvs-devops" title: 003 - Use AWS Elastic Container Service for DHCP DNS -last_reviewed_on: 2023-04-11 +last_reviewed_on: 2023-10-20 review_in: 6 months --- @@ -29,4 +29,4 @@ Less administrative overhead than running virtual machines e.g. EC2 and less com ### Disadvantages -Still need to provision the service, require CI/CD tooling, operational documentation and forever maintaining those things. \ No newline at end of file +Still need to provision the service, require CI/CD tooling, operational documentation and forever maintaining those things. diff --git a/source/documentation/adrs/004-use-aws-codepiplines-for-cicd.html.md.erb b/source/documentation/adrs/004-use-aws-codepiplines-for-cicd.html.md.erb index eadf65b..436496b 100644 --- a/source/documentation/adrs/004-use-aws-codepiplines-for-cicd.html.md.erb +++ b/source/documentation/adrs/004-use-aws-codepiplines-for-cicd.html.md.erb @@ -1,7 +1,7 @@ --- owner_slack: "#nvvs-devops" title: 004 - Use AWS CodePipelines for CI/CD -last_reviewed_on: 2023-04-11 +last_reviewed_on: 2023-10-20 review_in: 6 months --- diff --git a/source/documentation/adrs/005-use-log-aggregration-platform-for-logging.html.md.erb b/source/documentation/adrs/005-use-log-aggregration-platform-for-logging.html.md.erb index d539935..cf849db 100644 --- a/source/documentation/adrs/005-use-log-aggregration-platform-for-logging.html.md.erb +++ b/source/documentation/adrs/005-use-log-aggregration-platform-for-logging.html.md.erb @@ -1,7 +1,7 @@ --- owner_slack: "#nvvs-devops" title: 005 - Use Log Aggregation Platform -last_reviewed_on: 2023-04-11 +last_reviewed_on: 2023-10-20 review_in: 6 months --- @@ -74,9 +74,9 @@ The Operational Security Logging Platform is ready to accept these logs and the ### Advantages -- We don't need to stand up our own logging infrastructure +- We don't need to stand up our own logging infrastructure - Availability of logs from different sources in one location. ### Disadvantages -- Reliant on another team which means we may need to wait sometime before we get an aggregated view of our logs. \ No newline at end of file +- Reliant on another team which means we may need to wait sometime before we get an aggregated view of our logs. diff --git a/source/documentation/adrs/006-use-aws-parameter-store-for-secrets.html.md.erb b/source/documentation/adrs/006-use-aws-parameter-store-for-secrets.html.md.erb index d3c2df4..5147db1 100644 --- a/source/documentation/adrs/006-use-aws-parameter-store-for-secrets.html.md.erb +++ b/source/documentation/adrs/006-use-aws-parameter-store-for-secrets.html.md.erb @@ -1,7 +1,7 @@ --- owner_slack: "#nvvs-devops" title: 006 - Use AWS Parameter Store for Secrets -last_reviewed_on: 2023-04-11 +last_reviewed_on: 2023-10-20 review_in: 6 months --- @@ -17,14 +17,14 @@ There is a need to store infrastructure secrets securely in the [PTTP](https://m ## Decision -Use AWS SSM Parameter Store. +Use AWS SSM Parameter Store. - Aligned with [MoJ Security Guidance](https://security-guidance.service.justice.gov.uk/secrets-management/#application--infrastructure-secrets) - Compatible with AWS services e.g. [CodePipelines](https://docs.aws.amazon.com/codebuild/latest/userguide/build-spec-ref.html#build-spec-ref-example) - The use of AWS Secrets Manager can easily be extended if required. -### Alternative Considerations: +### Alternative Considerations: #### AWS Secrets Manager AWS Secrets Manager has ability to automatically rotate secrets for AWS RDS access. AWS Secrets Manager has a higher cost than AWS SSM Parameter Store. #### HashiCorp Vault -HashiCorp Vault is an open-source secret management solution. In order to use it we would have to host and manage an instance of the service ourselves. The cost of hosting, as well as the time to ensure data has appropriate backups, gives this service a high maintenance cost and overhead. \ No newline at end of file +HashiCorp Vault is an open-source secret management solution. In order to use it we would have to host and manage an instance of the service ourselves. The cost of hosting, as well as the time to ensure data has appropriate backups, gives this service a high maintenance cost and overhead. diff --git a/source/documentation/adrs/007-use-prometheus-and-grafana-for-metrics-alerting.html.md.erb b/source/documentation/adrs/007-use-prometheus-and-grafana-for-metrics-alerting.html.md.erb index 6d43df7..5271e1b 100644 --- a/source/documentation/adrs/007-use-prometheus-and-grafana-for-metrics-alerting.html.md.erb +++ b/source/documentation/adrs/007-use-prometheus-and-grafana-for-metrics-alerting.html.md.erb @@ -1,7 +1,7 @@ --- owner_slack: "#nvvs-devops" title: 007 - Use Prometheus and Grafana for metrics and alerting -last_reviewed_on: 2023-04-11 +last_reviewed_on: 2023-10-20 review_in: 6 months --- @@ -33,4 +33,4 @@ Use [Prometheus](https://prometheus.io/) for metrics and [Grafana](https://grafa - Prometheus [Exporters](https://prometheus.io/docs/instrumenting/exporters/) allow collection of metrics from network devices using [SNMP](https://github.com/prometheus/snmp_exporter), as well as the many [native](https://prometheus.io/docs/instrumenting/exporters/#software-exposing-prometheus-metrics) applications - Grafana to visualise a [wide variety](https://grafana.com/docs/grafana/latest/datasources/) of sources. - Grafana can send notifications when a custom metric thresholds. Can be easily integrated into Slack (when availble ServiceNow) -- Can be deployed into our existing CI/CD pipelines used for DHCP/DNS. \ No newline at end of file +- Can be deployed into our existing CI/CD pipelines used for DHCP/DNS. diff --git a/source/documentation/adrs/008-use-aws-elastic-container-registry.html.md.erb b/source/documentation/adrs/008-use-aws-elastic-container-registry.html.md.erb index e2f0c49..86ec373 100644 --- a/source/documentation/adrs/008-use-aws-elastic-container-registry.html.md.erb +++ b/source/documentation/adrs/008-use-aws-elastic-container-registry.html.md.erb @@ -1,7 +1,7 @@ --- owner_slack: "#nvvs-devops" title: 008 - Use AWS Elastic Container Registry -last_reviewed_on: 2023-04-11 +last_reviewed_on: 2023-10-20 review_in: 6 months --- @@ -22,4 +22,4 @@ Created issue [here](https://github.com/ministryofjustice/nvvs-devops/issues/96) ## Decision We will use AWS Elastic Container Registry to store our images. -- It integrates with CodePipelines and existing workflows and will remove the limits we have been hitting.. \ No newline at end of file +- It integrates with CodePipelines and existing workflows and will remove the limits we have been hitting.. diff --git a/source/documentation/adrs/009-use-aws-sso-for-aws-account-access.html.md.erb b/source/documentation/adrs/009-use-aws-sso-for-aws-account-access.html.md.erb index 3fe536e..994f836 100644 --- a/source/documentation/adrs/009-use-aws-sso-for-aws-account-access.html.md.erb +++ b/source/documentation/adrs/009-use-aws-sso-for-aws-account-access.html.md.erb @@ -1,7 +1,7 @@ --- owner_slack: "#nvvs-devops" title: 009 - Use AWS SSO for AWS Account Access -last_reviewed_on: 2023-04-11 +last_reviewed_on: 2023-10-20 review_in: 6 months --- @@ -14,13 +14,13 @@ Date: 2021-05-01 ## Context -We need to use Single Sign On to access all our AWS accounts. +We need to use Single Sign On to access all our AWS accounts. We currently use AzureAD for securing access to many of our services. ## Decision We will use the [Modernisation Platforms](https://github.com/ministryofjustice/modernisation-platform) implementation of [AWS Single Sign On](https://user-guide.modernisation-platform.service.justice.gov.uk/concepts/environments/single-sign-on.html#single-sign-on). It is being used by many teams already so means less development time forour growing team. It does require the use of a MoJ Org GitHub account, but that requirement only further facilitates using [infrastructure as code](https://en.wikipedia.org/wiki/Infrastructure_as_code) within our AWS accounts. -### Alternative Considerations: +### Alternative Considerations: #### AzureAD -AzureAD is currently managed externally, this means that automating user and groups is not possible which limits its potential. \ No newline at end of file +AzureAD is currently managed externally, this means that automating user and groups is not possible which limits its potential. diff --git a/source/documentation/adrs/010-use-aws-eks-for-monitoring-infrastructure.html.md.erb b/source/documentation/adrs/010-use-aws-eks-for-monitoring-infrastructure.html.md.erb index 629394a..d83c392 100644 --- a/source/documentation/adrs/010-use-aws-eks-for-monitoring-infrastructure.html.md.erb +++ b/source/documentation/adrs/010-use-aws-eks-for-monitoring-infrastructure.html.md.erb @@ -1,7 +1,7 @@ --- owner_slack: "#nvvs-devops" title: 010 - Use AWS EKS for monitoring infrastructure -last_reviewed_on: 2023-04-11 +last_reviewed_on: 2023-10-20 review_in: 6 months --- @@ -13,20 +13,20 @@ Date: 2021-03-22 ## Context -The infrastructure monitoring and alerting platform consists of several services deployed as docker containers. So far these containers have been running on ECS via Fargate, chosen because of the relative ease with which it allows us to get instances provisioned. +The infrastructure monitoring and alerting platform consists of several services deployed as docker containers. So far these containers have been running on ECS via Fargate, chosen because of the relative ease with which it allows us to get instances provisioned. -As the solution has grown, and the interactions between new services have become more complex, we have found that we are running up against Fargate's limitations and require finer-grained control over our deployments. +As the solution has grown, and the interactions between new services have become more complex, we have found that we are running up against Fargate's limitations and require finer-grained control over our deployments. Kubernetes is the industry standard platform for orchestrating and running container based workloads and provides considerably more flexibility in comparison to ECS and Fargate. - + ## Decision Starting with Prometheus and Thanos, we are migrating our services over to AWS's managed Kubernetes offering - [Amazon Elastic Kubernetes Service (EKS)](https://aws.amazon.com/eks/). ## Consequences -While it has the potential to be more complicated due to its increased flexibility, we believe that in the long run, Kubernetes will simplify the operation, maintenance, and improvement of the IMA platform. +While it has the potential to be more complicated due to its increased flexibility, we believe that in the long run, Kubernetes will simplify the operation, maintenance, and improvement of the IMA platform. It offers several advantages over Fargate: - Better networking support out of the box enabling: @@ -36,6 +36,6 @@ It offers several advantages over Fargate: - faster development cycle - Simpler and clearer configuration - Less reliance on specific infrastructure (could conceivably run on any Kubernetes cluster, regardless of the provider) - - Reduced overall costs as the team can share the same development Kubernetes cluster + - Reduced overall costs as the team can share the same development Kubernetes cluster - More aligned with common DevOps approaches in wider industry -- The infrastructure will be ready to migrate to another hosting platform like Cloud Platform in the future. ([see issue here](https://github.com/ministryofjustice/cloud-platform/issues/3454)) \ No newline at end of file +- The infrastructure will be ready to migrate to another hosting platform like Cloud Platform in the future. ([see issue here](https://github.com/ministryofjustice/cloud-platform/issues/3454)) diff --git a/source/documentation/adrs/011-use-github-actions-for-ci-cd.html.md.erb b/source/documentation/adrs/011-use-github-actions-for-ci-cd.html.md.erb index 7c6a620..ea8d28b 100644 --- a/source/documentation/adrs/011-use-github-actions-for-ci-cd.html.md.erb +++ b/source/documentation/adrs/011-use-github-actions-for-ci-cd.html.md.erb @@ -1,7 +1,7 @@ --- owner_slack: "#nvvs-devops" title: 011 - Use GitHub Actions for CI/CD -last_reviewed_on: 2023-04-11 +last_reviewed_on: 2023-10-20 review_in: 6 months --- diff --git a/source/documentation/adrs/012-use-techdocs-for-adrs.html.md.erb b/source/documentation/adrs/012-use-techdocs-for-adrs.html.md.erb index 6f9eddb..d57d64e 100644 --- a/source/documentation/adrs/012-use-techdocs-for-adrs.html.md.erb +++ b/source/documentation/adrs/012-use-techdocs-for-adrs.html.md.erb @@ -1,7 +1,7 @@ --- owner_slack: "#nvvs-devops" title: 012 - Use Tech-Docs for ADRs -last_reviewed_on: 2023-04-11 +last_reviewed_on: 2023-10-20 review_in: 6 months --- @@ -12,7 +12,7 @@ Date: 2021-03-22 ✅ Accepted ## Context -We want to make sure our architectural design records are reviewed reguarly. +We want to make sure our architectural design records are reviewed reguarly. If we move our ADRs to tech-docs we can take advantage of [Tech Docs Monitor](https://github.com/ministryofjustice/tech-docs-monitor) ## Decision @@ -20,4 +20,4 @@ It has been decided that we use [TechDocs](https://github.com/ministryofjustice/ ### Advantages -- We will use [Tech Docs Monitor](https://github.com/ministryofjustice/tech-docs-monitor) to remind team to review ADRs and promote knowledge transfer and discussion. \ No newline at end of file +- We will use [Tech Docs Monitor](https://github.com/ministryofjustice/tech-docs-monitor) to remind team to review ADRs and promote knowledge transfer and discussion. diff --git a/source/documentation/adrs/adr-index.html.md.erb b/source/documentation/adrs/adr-index.html.md.erb index 7f95419..88c0222 100644 --- a/source/documentation/adrs/adr-index.html.md.erb +++ b/source/documentation/adrs/adr-index.html.md.erb @@ -1,7 +1,7 @@ --- owner_slack: "#nvvs-devops" title: Architecture Decision Records index -last_reviewed_on: 2023-04-11 +last_reviewed_on: 2023-10-20 review_in: 6 months --- diff --git a/source/documentation/general-information/our-alliance.html.md.erb b/source/documentation/general-information/our-alliance.html.md.erb index eacf1bc..0f3e0ad 100644 --- a/source/documentation/general-information/our-alliance.html.md.erb +++ b/source/documentation/general-information/our-alliance.html.md.erb @@ -1,7 +1,7 @@ --- owner_slack: "#nvvs-devops" title: Our Alliance -last_reviewed_on: 2023-06-21 +last_reviewed_on: 2023-10-20 review_in: 3 months --- diff --git a/source/documentation/products/backups.html.md.erb b/source/documentation/products/backups.html.md.erb index f0de849..db484a6 100644 --- a/source/documentation/products/backups.html.md.erb +++ b/source/documentation/products/backups.html.md.erb @@ -1,7 +1,7 @@ --- owner_slack: "#nvvs-devops" title: Backups -last_reviewed_on: 2023-04-11 +last_reviewed_on: 2023-10-20 review_in: 6 months --- @@ -15,10 +15,10 @@ Product | Type of backup | Retention ---|---|---| DHCP/DNS Admin Portal | RDS (MySQL) | 30 Days DHCP KEA | RDS (MySQL) | 30 Days | -IMA | RDS (Postgres) | 7 Days | -NACS Admin | RDS (MySQL) | 30 Days | +IMA | RDS (Postgres) | 7 Days | +NACS Admin | RDS (MySQL) | 30 Days | -## Backup configuration +## Backup configuration The backup retention is defined as code in the following locations. @@ -26,5 +26,4 @@ Product | variable | database setting ---|---|---| DHCP/DNS Admin Portal | https://github.com/ministryofjustice/staff-device-dns-dhcp-infrastructure/blob/main/variables.tf#L52) | https://github.com/ministryofjustice/staff-device-dns-dhcp-infrastructure/blob/main/modules/admin/db.tf#L18 | DHCP KEA | https://github.com/ministryofjustice/staff-device-dns-dhcp-infrastructure/blob/main/variables.tf | https://github.com/ministryofjustice/staff-device-dns-dhcp-infrastructure/blob/main/modules/dhcp/mysql.tf#L10 | -NACS Admin | https://github.com/ministryofjustice/network-access-control-infrastructure/blob/main/variables.tf#L48 | https://github.com/ministryofjustice/network-access-control-infrastructure/blob/main/modules/admin/db.tf#L15 | - +NACS Admin | https://github.com/ministryofjustice/network-access-control-infrastructure/blob/main/variables.tf#L48 | https://github.com/ministryofjustice/network-access-control-infrastructure/blob/main/modules/admin/db.tf#L15 | diff --git a/source/documentation/products/dhcp.html.md.erb b/source/documentation/products/dhcp.html.md.erb index 9af38a0..8f5468f 100644 --- a/source/documentation/products/dhcp.html.md.erb +++ b/source/documentation/products/dhcp.html.md.erb @@ -1,6 +1,6 @@ --- title: DHCP Overview -last_reviewed_on: 2023-04-11 +last_reviewed_on: 2023-10-20 review_in: 3 months --- @@ -18,14 +18,14 @@ Allows Public internet connectivity for prison staff and enables modern devices. Enable onsite support staff to manage local devices e.g. [DHCP reservation](https://kb.isc.org/docs/what-are-host-reservations-how-to-use-them) using GOV.UK Design System styles and patterns. -[Use cloud first](https://www.gov.uk/guidance/use-cloud-first) To meet point 5 of the [Technology Code of Practice](https://www.gov.uk/guidance/the-technology-code-of-practice) (TCoP) and the government’s cloud first policy. +[Use cloud first](https://www.gov.uk/guidance/use-cloud-first) To meet point 5 of the [Technology Code of Practice](https://www.gov.uk/guidance/the-technology-code-of-practice) (TCoP) and the government’s cloud first policy. -[Infrastructure as Code](https://en.wikipedia.org/wiki/Infrastructure_as_code) provides a complete audit of changes, versioning of cloud infrastructure and DNS server application, automated testing and redeployment of the service in the event of disaster. +[Infrastructure as Code](https://en.wikipedia.org/wiki/Infrastructure_as_code) provides a complete audit of changes, versioning of cloud infrastructure and DNS server application, automated testing and redeployment of the service in the event of disaster. ## Tools The DHCP service uses [ISC KEA](https://www.isc.org/kea/) containers running on [AWS ECS Fargate](https://docs.aws.amazon.com/AmazonECS/latest/userguide/what-is-fargate.html). -We use [Terraform](https://www.terraform.io/intro) and [Infrastructure as Code](https://en.wikipedia.org/wiki/Infrastructure_as_code) to provide a complete audit of changes, versioning of components and the DNS server application, automated testing and redeployment of the service in the event of disaster. +We use [Terraform](https://www.terraform.io/intro) and [Infrastructure as Code](https://en.wikipedia.org/wiki/Infrastructure_as_code) to provide a complete audit of changes, versioning of components and the DNS server application, automated testing and redeployment of the service in the event of disaster. ## Diagram ![High level diagram](../../images/dhcp-hld-diagram.jpeg) @@ -33,7 +33,7 @@ We use [Terraform](https://www.terraform.io/intro) and [Infrastructure as Code]( ## Repositories -| Repository | Description | +| Repository | Description | | --- | --- | | [DHCP admin portal](https://github.com/ministryofjustice/staff-device-dns-dhcp-admin#readme) | Admin Portal for managing staff device DNS forwarders and zone configuration. | | [DHCP server](https://github.com/ministryofjustice/staff-device-dhcp-server#readme) | This repository contains the Dockerfile to create the ISC DHCP server Docker image. The configuration for this server is managed in the Admin Portal. | @@ -43,10 +43,8 @@ We use [Terraform](https://www.terraform.io/intro) and [Infrastructure as Code]( ## Useful links -| Link | Description | +| Link | Description | | --- | --- | | [DHCP admin portal](https://dhcp-dns-admin.staff.service.justice.gov.uk/dhcp) | Admin Portal for managing staff device DNS forwarders and zone configuration. *Please not you need to be a member of the AzureAD group `MoJO-EntApp-DNSDHCP_Viewer` to view and `MoJO-EntApp-DNSDHCP_Editor` to edit.* | | [Monitoring and alerting guide](product-monitoring-alerting.html) | List Grafana dashboards for health of the products and slack channels in use for alerts. | | [Transit gateway ](https://github.com/ministryofjustice/deployment-tgw) | Connects the service to wider MoJ networks as a virtual WAN | - - diff --git a/source/documentation/products/dns.html.md.erb b/source/documentation/products/dns.html.md.erb index b48f511..6acbc8a 100644 --- a/source/documentation/products/dns.html.md.erb +++ b/source/documentation/products/dns.html.md.erb @@ -1,6 +1,6 @@ --- title: DNS Overview -last_reviewed_on: 2022-08-12 +last_reviewed_on: 2023-10-20 review_in: 3 months --- @@ -24,14 +24,14 @@ Allow our staff access to modern devices by unlocking public internet. Prevent access to malware, ransomware, phishing attacks, viruses, malicious sites, and spyware. [NCSC protective DNS service](https://ncsc.gov.uk/information/pdns). -[Use cloud first](https://www.gov.uk/guidance/use-cloud-first) To meet point 5 of the [Technology Code of Practice](https://www.gov.uk/guidance/the-technology-code-of-practice) (TCoP) and the government’s cloud first policy. +[Use cloud first](https://www.gov.uk/guidance/use-cloud-first) To meet point 5 of the [Technology Code of Practice](https://www.gov.uk/guidance/the-technology-code-of-practice) (TCoP) and the government’s cloud first policy. [Infrastructure as Code](https://en.wikipedia.org/wiki/Infrastructure_as_code) provides full audit of changes, automated testing, redeployment of the service in the event of failure or disaster. ## Tools The DNS service uses [BIND ISC](https://www.isc.org/bind/) containers running on [AWS ECS Fargate](https://docs.aws.amazon.com/AmazonECS/latest/userguide/what-is-fargate.html). -We use [Terraform](https://www.terraform.io/intro) and [Infrastructure as Code](https://en.wikipedia.org/wiki/Infrastructure_as_code). This provides an audit of changes, versioning of components and automated testing and redeployment of the service in the event of disaster. +We use [Terraform](https://www.terraform.io/intro) and [Infrastructure as Code](https://en.wikipedia.org/wiki/Infrastructure_as_code). This provides an audit of changes, versioning of components and automated testing and redeployment of the service in the event of disaster. ## Diagram ![High level diagram](../../images/dns-hld-diagram.jpeg) @@ -39,7 +39,7 @@ We use [Terraform](https://www.terraform.io/intro) and [Infrastructure as Code]( ## Repositories -| Repository | Description | +| Repository | Description | | --- | --- | | [DNS admin portal](https://github.com/ministryofjustice/staff-device-dns-dhcp-admin#readme) | Admin Portal for managing staff device DNS forwarders and zone configuration. | | [DNS server](https://github.com/ministryofjustice/staff-device-dns-server#readme) | This repository contains the Dockerfile to create the BIND DNS server Docker image. The configuration for this server is managed in the Admin Portal. | @@ -49,7 +49,7 @@ We use [Terraform](https://www.terraform.io/intro) and [Infrastructure as Code]( ## Useful links -| Link | Description | +| Link | Description | | --- | --- | | [DNS admin portal](https://dhcp-dns-admin.staff.service.justice.gov.uk/dns) | Admin Portal for managing staff device DNS forwarders and zone configuration. *Please not you need to be a member of the AzureAD group `MoJO-EntApp-DNSDHCP_Viewer` and `MoJO-EntApp-DNSDHCP_Editor` to edit.* | | [Monitoring and alerting guide](product-monitoring-alerting.html) | List Grafana dashboards for health of the products and slack channels in use for alerts. | diff --git a/source/documentation/products/nacs.html.md.erb b/source/documentation/products/nacs.html.md.erb index 2b6c889..552c873 100644 --- a/source/documentation/products/nacs.html.md.erb +++ b/source/documentation/products/nacs.html.md.erb @@ -1,6 +1,6 @@ --- title: NACS Overview -last_reviewed_on: 2023-05-09 +last_reviewed_on: 2023-10-20 review_in: 3 months --- @@ -16,15 +16,15 @@ Provides access to MOJ networks from suitably configured devices. Enable onsite support staff to manage access of local devices such as printers e.g. [Printer Mac Address reservation](https://kb.isc.org/docs/what-are-host-reservations-how-to-use-them). -[Use cloud first](https://www.gov.uk/guidance/use-cloud-first) To meet point 5 of the [Technology Code of Practice](https://www.gov.uk/guidance/the-technology-code-of-practice) (TCoP) and the government’s cloud first policy. +[Use cloud first](https://www.gov.uk/guidance/use-cloud-first) To meet point 5 of the [Technology Code of Practice](https://www.gov.uk/guidance/the-technology-code-of-practice) (TCoP) and the government’s cloud first policy. -[Infrastructure as Code](https://en.wikipedia.org/wiki/Infrastructure_as_code) provides a complete audit of changes, versioning of cloud infrastructure and DNS server application, automated testing and redeployment of the service in the event of disaster. +[Infrastructure as Code](https://en.wikipedia.org/wiki/Infrastructure_as_code) provides a complete audit of changes, versioning of cloud infrastructure and DNS server application, automated testing and redeployment of the service in the event of disaster. ## Tools The NACS service uses [Freeradius](https://www.freeradius.org) containers running on [AWS ECS Fargate](https://docs.aws.amazon.com/AmazonECS/latest/userguide/what-is-fargate.html), as well as a seperate ruby on rails admin portal. -We use [Terraform](https://www.terraform.io/intro) and [Infrastructure as Code](https://en.wikipedia.org/wiki/Infrastructure_as_code) to provide a complete audit of changes, versioning of components, automated testing and redeployment of the service in the event of disaster. +We use [Terraform](https://www.terraform.io/intro) and [Infrastructure as Code](https://en.wikipedia.org/wiki/Infrastructure_as_code) to provide a complete audit of changes, versioning of components, automated testing and redeployment of the service in the event of disaster. ## Diagram ![High level diagram](../../images/nacs-hld-diagram.png) @@ -32,7 +32,7 @@ We use [Terraform](https://www.terraform.io/intro) and [Infrastructure as Code]( ## Repositories -| Repository | Description | +| Repository | Description | | --- | --- | | [NACS admin portal](https://github.com/ministryofjustice/network-access-control-admin#readme) | Admin Portal for configuring clients, sites, policies and rules. | | [NACS server](https://github.com/ministryofjustice/network-access-control-server#readme) | This repository contains the neccesary code to build the freeradius docker image. | @@ -42,7 +42,7 @@ We use [Terraform](https://www.terraform.io/intro) and [Infrastructure as Code]( ## Useful links -| Link | Description | +| Link | Description | | --- | --- | | [NACS Admin Portal(Live)](https://admin.network-access-control.service.justice.gov.uk/sign_in) | Admin Portal for managing clients, sites, policies and rules. *Please note you need to be a member of the AzureAD group `moj-[ENVIRONMENT_NAME]-network-access-control-admin-azure-app`| | [NACS Radius Server Docs] (https://github.com/ministryofjustice/network-access-control-server/tree/main/docs) | NACS Radius Server documentation | diff --git a/source/documentation/team-guide/best-practices/github.html.md.erb b/source/documentation/team-guide/best-practices/github.html.md.erb index 0de1f1c..1b617b5 100644 --- a/source/documentation/team-guide/best-practices/github.html.md.erb +++ b/source/documentation/team-guide/best-practices/github.html.md.erb @@ -1,12 +1,12 @@ --- owner_slack: "#nvvs-devopss" title: GitHub -last_reviewed_on: 2022-08-23 +last_reviewed_on: 2023-10-20 review_in: 3 months --- # Github -The NVVS Dev Ops team follows a GitOps style workflow. Any changes to the system should be made via a pull request against the appropriate repository. +The NVVS Dev Ops team follows a GitOps style workflow. Any changes to the system should be made via a pull request against the appropriate repository. ## All changes via Pull Request @@ -16,12 +16,12 @@ All work undertaken by a member of the NVVS Dev Ops team will take place in a br If you're asking yourself "should I commit now"? then the answer is probably yes! Committing regularly helps with avoiding conflicts. It is especially helpful when you're pulled away to another task and need a little reminder of where you were. -## Main Branch +## Main Branch As GitHubs default practice our main branch is named 'main' and not 'master'. More information [here](https://github.com/github/renaming). ## Gitmoji 😎 -Yes, it made it to our best practices! [Gitmoji](https://gitmoji.dev/) is a great way to tag commits according to what they're doing. It's a great suppliment to your commit messages and can help identify key commits quickly. +Yes, it made it to our best practices! [Gitmoji](https://gitmoji.dev/) is a great way to tag commits according to what they're doing. It's a great suppliment to your commit messages and can help identify key commits quickly. -Most of all - [have fun 🥚](https://www.youtube.com/watch?v=dQw4w9WgXcQ) . \ No newline at end of file +Most of all - [have fun 🥚](https://www.youtube.com/watch?v=dQw4w9WgXcQ) . diff --git a/source/documentation/team-guide/best-practices/use-aws-sso.html.md.erb b/source/documentation/team-guide/best-practices/use-aws-sso.html.md.erb index dec28bf..76ab8bc 100644 --- a/source/documentation/team-guide/best-practices/use-aws-sso.html.md.erb +++ b/source/documentation/team-guide/best-practices/use-aws-sso.html.md.erb @@ -1,163 +1,172 @@ --- owner_slack: "#nvvs-devops" title: Re-configure AWS Vault -last_reviewed_on: 2023-06-22 +last_reviewed_on: 2023-10-20 review_in: 3 month --- # Configure AWS Vault -Follow this guide to re-configure your [AWS Vault](https://github.com/99designs/aws-vault) to work with [AWS SSO](https://aws.amazon.com/single-sign-on/). +Follow this guide to re-configure your [AWS Vault](https://github.com/99designs/aws-vault) to work with [AWS SSO](https://aws.amazon.com/single-sign-on/). -> **Note:** +> **Note:** > -> This guide is for re-configuring AWS Vault running on **Ubuntu**. +> This guide is for re-configuring AWS Vault running on **Ubuntu**. +> Some details are different on MacOS ## Prerequisites -To be able to follow this guide to setup the AWS SSO, you need to have the following already: +To be able to follow this guide to setup the AWS SSO, you need to have the following already: -- [GPG](https://gnupg.org/index.html) installed. -- [AWS Vault](https://github.com/99designs/aws-vault#installing) set up. -- Access to [Moj AWS SSO](https://moj.awsapps.com/start#/). +- [GPG](https://gnupg.org/index.html) installed. +- [AWS Vault](https://github.com/99designs/aws-vault#installing) set up. +- Access to [Moj AWS SSO](https://moj.awsapps.com/start#/). -## Generate a GPG key -We will need a GPG key to encrypt credentials in a password store later. So, let's start by generating a GPG key. Run the following command: -`gpg --full-generate-key` +## Generate a GPG key -Press `Enter` for `Please select what kind of key you want:`, to select the default value. +We will need a GPG key to encrypt credentials in a password store later. So, let's start by generating a GPG key. Run the following command: -Press `Enter` again for `What keysize do you want? (3072)`, to select the given size. +`gpg --full-generate-key` -Press `Enter` again for `Key is valid for? (0)`, to set the key to never expire. +Press `Enter` for `Please select what kind of key you want:`, to select the default value. -Confirm this by typing `Y` for `Is this correct? (y/N)` in next step and then press `Enter`. +Press `Enter` again for `What keysize do you want? (3072)`, to select the given size. -Provide a ID for the key in the next step as below: +Press `Enter` again for `Key is valid for? (0)`, to set the key to never expire. -Type ` Password Storage Key` for `Real name:` +Confirm this by typing `Y` for `Is this correct? (y/N)` in next step and then press `Enter`. -You may leave `Email address:` and `Comment:` blank. +Provide a ID for the key in the next step as below: -Finally, confirm all by typing `O` to accept Okay. +Type ` Password Storage Key` for `Real name:` -It will prompt for a `Passphrase`. Create a new passphrase to protect the GPG key. +You may leave `Email address:` and `Comment:` blank. -A new GPG key is now created. +Finally, confirm all by typing `O` to accept Okay. -## Export GPG_TTY variable +It will prompt for a `Passphrase`. Create a new passphrase to protect the GPG key. -If you do not already have, add the below in your `.bashrc` or `.zshrc` file: +A new GPG key is now created. -```` -# gpg -export GPG_TTY=$(tty) -```` +## Export GPG_TTY variable -## Install and initialise pass - -We will use [pass](https://www.passwordstore.org/) as the `backend` for `aws-vault` later. So, let's install it. - -### Install +If you do not already have, add the below in your `.bashrc` or `.zshrc` file: -Run the below command: +```` +# gpg +export GPG_TTY=$(tty) +```` -`sudo apt-get install pass` +## Install and initialise pass -### Setting it up +(Not required on MacOS as it uses the keychain to create a vault) -To begin, run the following command: +We will use [pass](https://www.passwordstore.org/) as the `backend` for `aws-vault` later. So, let's install it. -`pass init " Password Storage Key"` +### Install -Here, ` Password Storage Key` is the ID of the GPG key that you created in the previous step. +Run the below command: -A new Password Store is now created in `~/.password-store`. +`sudo apt-get install pass` -## Finally, update the config file +### Setting it up -Add to / replace with the below, your `~/.aws/config` file and save. +To begin, run the following command: -```` -[default] -region=eu-west-2 -output=json - -[profile mojo-shared-services] -sso_start_url = https://moj.awsapps.com/start -sso_region = eu-west-2 -sso_account_id = -sso_role_name = AdministratorAccess -region = eu-west-2 -output = json +`pass init " Password Storage Key"` -[profile mojo-development] -sso_start_url = https://moj.awsapps.com/start -sso_region = eu-west-2 -sso_account_id = -sso_role_name = AdministratorAccess -region = eu-west-2 -output = json +Here, ` Password Storage Key` is the ID of the GPG key that you created in the previous step. -[profile mojo-pre-production] -sso_start_url = https://moj.awsapps.com/start -sso_region = eu-west-2 -sso_account_id =
   
-sso_role_name = AdministratorAccess  
-region = eu-west-2  
-output = json  
+A new Password Store is now created in `~/.password-store`.
 
-[profile mojo-production]  
-sso_start_url = https://moj.awsapps.com/start  
-sso_region = eu-west-2  
-sso_account_id =    
-sso_role_name = AdministratorAccess  
-region = eu-west-2  
-output = json  
+## Finally, update the config file
 
-[profile mojo-shared-services-cli]  
-region = eu-west-2  
-credential_process = /usr/local/bin/aws-vault exec mojo-shared-services --json --backend=pass --prompt=pass  
+On MacOs use the config file from [here](https://github.com/ministryofjustice/provision-ubuntu2004-on-wsl2/blob/master/templates/aws-cli-config.j2)
+It omits ` --backend=pass --prompt=pass` from the config.
 
-[profile mojo-development-cli]  
-credential_process = /usr/local/bin/aws-vault exec mojo-development --json --backend=pass --prompt=pass  
 
-[profile mojo-pre-production-cli]  
-credential_process = /usr/local/bin/aws-vault exec mojo-pre-production --json --backend=pass --prompt=pass  
+Add to / replace with the below, your `~/.aws/config` file and save.
 
-[profile mojo-production-cli]  
-credential_process = /usr/local/bin/aws-vault exec mojo-production --json --backend=pass --prompt=pass  
-````  
+````
+[default]
+region=eu-west-2
+output=json
+
+[profile mojo-shared-services]
+sso_start_url = https://moj.awsapps.com/start
+sso_region = eu-west-2
+sso_account_id = 
+sso_role_name = AdministratorAccess
+region = eu-west-2
+output = json
+
+[profile mojo-development]
+sso_start_url = https://moj.awsapps.com/start
+sso_region = eu-west-2
+sso_account_id = 
+sso_role_name = AdministratorAccess
+region = eu-west-2
+output = json
+
+[profile mojo-pre-production]
+sso_start_url = https://moj.awsapps.com/start
+sso_region = eu-west-2
+sso_account_id = 
+sso_role_name = AdministratorAccess
+region = eu-west-2
+output = json
+
+[profile mojo-production]
+sso_start_url = https://moj.awsapps.com/start
+sso_region = eu-west-2
+sso_account_id = 
+sso_role_name = AdministratorAccess
+region = eu-west-2
+output = json
+
+[profile mojo-shared-services-cli]
+region = eu-west-2
+credential_process = /usr/local/bin/aws-vault exec mojo-shared-services --json --backend=pass --prompt=pass
+
+[profile mojo-development-cli]
+credential_process = /usr/local/bin/aws-vault exec mojo-development --json --backend=pass --prompt=pass
+
+[profile mojo-pre-production-cli]
+credential_process = /usr/local/bin/aws-vault exec mojo-pre-production --json --backend=pass --prompt=pass
+
+[profile mojo-production-cli]
+credential_process = /usr/local/bin/aws-vault exec mojo-production --json --backend=pass --prompt=pass
+````
 
-## Test your AWS Cli  
+## Test your AWS Cli
 
-Run the below, to set the `AWS_PROFILE` to use the shared services account:  
+Run the below, to set the `AWS_PROFILE` to use the shared services account:
 
-`export AWS_PROFILE=mojo-shared-services-cli`  
+`export AWS_PROFILE=mojo-shared-services-cli`
 
-Then, run the below aws command:  
+Then, run the below aws command:
 
-`aws sts get-caller-identity`  
+`aws sts get-caller-identity`
 
-When prompted, provide the passphrase for GPG key you created earlier.  
+When prompted, provide the passphrase for GPG key you created earlier.
 
-You will then see web page on your browser to authorise a request:  
+You will then see web page on your browser to authorise a request:
 
-![AWS SSO authorisation prompt](images/aws_sso_auth_prompt.png)  
+![AWS SSO authorisation prompt](images/aws_sso_auth_prompt.png)
 
-Click `Allow`.  
+Click `Allow`.
 
 You should see the below in the terminal:
 
 ````
-{  
-    "UserId": "@digital.justice.gov.uk",  
-    "Account": "",  
-    "Arn": "arn:aws:sts:::assumed-role/@digital.justice.gov.uk"  
-}  
-````  
+{
+    "UserId": "@digital.justice.gov.uk",
+    "Account": "",
+    "Arn": "arn:aws:sts:::assumed-role/@digital.justice.gov.uk"
+}
+````
 
 Congratulations, you have successfully configured your AWS Vault to work with AWS SSO.
 
@@ -165,7 +174,7 @@ In order for the SSO to work you wil need access to MOJ gihub org. Please contac
 
 ## Forgotten GPG Key Password
 
-In the event you forget the password of your GPG Key you will need to delete it and create another. 
+In the event you forget the password of your GPG Key you will need to delete it and create another.
 
 List the key. You will need the string under 'pub' later.
 
@@ -194,7 +203,7 @@ Delete this key from the keyring? (y/N) y
 This is a secret key! - really delete? (y/N) y
 ```
 
-Now delete the key itself: 
+Now delete the key itself:
 
 ```
 username@laptop:~$ gpg  --delete-keys DDE8C445795E88ABB4CBCE49C3F2562BD
@@ -218,6 +227,6 @@ gpg: checking the trustdb
 gpg: no ultimately trusted keys found
 ```
 
-Now you must generate a new GPG Key as per the documentation above. 
+Now you must generate a new GPG Key as per the documentation above.
 
-### 
\ No newline at end of file
+###
diff --git a/source/index.html.md.erb b/source/index.html.md.erb
index 1f80c68..d409d72 100644
--- a/source/index.html.md.erb
+++ b/source/index.html.md.erb
@@ -1,7 +1,7 @@
 ---
 owner_slack: "#nvvs-devops"
 title: NVVS DevOps
-last_reviewed_on: 2023-06-21
+last_reviewed_on: 2023-10-20
 review_in: 3 months
 ---
 
@@ -59,6 +59,7 @@ This documentation is for anyone interested in the NVVS DevOps team and its core
 | Repo | Description |
 | --- | --- |
 | [All repositories](https://github.com/ministryofjustice/nvvs-devops#core-repositories) | All repositories that NVVS DevOps maintain
+| [GitHub Team](https://github.com/orgs/ministryofjustice/teams/nvvs-devops-admins/repositories) | Repositories that NVVS DevOps access listed by GitHub
 
 ## Contact Us
 ## Questions and Queries

From a7984197d535e0b9a844cbd7c1c1b0d42b679f67 Mon Sep 17 00:00:00 2001
From: Stephen James 
Date: Fri, 20 Oct 2023 18:05:13 +0100
Subject: [PATCH 2/2] Added a reporting script

Script for quickly reporting outdated pages.
Added as target to Makefile

Makefile has simple help feature added as default target.
---
 makefile                                |  14 ++-
 report-for-daniel-the-manual-spaniel.sh | 118 ++++++++++++++++++++++++
 2 files changed, 129 insertions(+), 3 deletions(-)
 create mode 100755 report-for-daniel-the-manual-spaniel.sh

diff --git a/makefile b/makefile
index 379a01a..6d49f0a 100644
--- a/makefile
+++ b/makefile
@@ -1,10 +1,18 @@
+.DEFAULT_GOAL := help
 IMAGE := ministryofjustice/tech-docs-github-pages-publisher:1.4
 
-# Use this to run a local instance of the documentation site, while editing
-.PHONY: preview
-preview:
+.PHONY: preview report
+
+preview: ## Run a local instance of the documentation site, while editing
 	docker run --rm \
 		-v $$(pwd)/config:/app/config \
 		-v $$(pwd)/source:/app/source \
 		-p 4567:4567 \
 		-it $(IMAGE) /publishing-scripts/preview.sh
+
+
+report: ## Review which pages have expired
+	 ./report-for-daniel-the-manual-spaniel.sh
+
+help:
+	@grep -h -E '^[a-zA-Z0-9_-]+:.*?## .*$$' $(MAKEFILE_LIST) | sort | awk 'BEGIN {FS = ":.*?## "}; {printf "\033[36m%-30s\033[0m %s\n", $$1, $$2}'
diff --git a/report-for-daniel-the-manual-spaniel.sh b/report-for-daniel-the-manual-spaniel.sh
new file mode 100755
index 0000000..82b3e1d
--- /dev/null
+++ b/report-for-daniel-the-manual-spaniel.sh
@@ -0,0 +1,118 @@
+#!/usr/bin/env bash
+
+## Simple script to quickly review which pages have expired and which are due
+## to expire in next three weeks.
+
+function check_dependencies () {
+
+  if ! command -v ag; then
+    echo -e "\nThe silver searcher is required to run this script"
+    echo -e "https://github.com/ggreer/the_silver_searcher \n"
+    exit 1
+  fi
+
+  if ! command -v datediff >/dev/null 2>&1; then
+    echo -e "\ndatediff is required to run this script"
+    echo "it is part of 'dateutils' - install with brew or os package manager"
+    echo -e "https://github.com/hroptatyr/dateutils \n"
+    exit 1
+  fi
+}
+
+function readlines () {
+    local N="$1"
+    local line
+    local rc="1"
+
+    # Read at most N lines
+    for i in $(seq 1 $N)
+    do
+        # Try reading a single line
+        read line
+        if [ $? -eq 0 ]
+        then
+            # Output line
+            echo $line
+            rc="0"
+        else
+            break
+        fi
+    done
+
+    # Return 1 if no lines where read
+    return $rc
+}
+
+run_report () {
+  local report
+  local today
+  local expiring_pages
+  local expired_pages
+
+  local last_reviewed_on
+  local review_in
+  local expiry
+  local expiry_diff
+
+  report=$(ag last_reviewed_on -A 1 --ignore "*.txt" --ignore "*.sh" --group)
+  today=$(date '+%Y-%m-%d')
+  expiring_pages="Following pages expiring in next 3 weeks:\n"
+  expired_pages="Following pages have expired:\n"
+
+  while chunk=$(readlines 4)
+  do
+      echo "******************************************************************************************"
+      echo "$chunk"
+      echo ""
+
+      last_reviewed_on="$(echo "$chunk" | grep "last_reviewed_on" | cut -d " " -f 2)"
+      echo "last_reviewed_on: ${last_reviewed_on}"
+
+      review_in="$(echo "$chunk" | grep "review_in" | cut -d " " -f 2)"
+      echo "review_in: ${review_in}"
+
+      review_in_days=$(expr ${review_in} \* 30)
+      echo "review_in_days: ${review_in_days}"
+
+      expiry=$(date -d "${last_reviewed_on}+${review_in_days}days" '+%Y-%m-%d')
+      echo "expiry: ${expiry}"
+
+      expiry_diff=$(datediff ${today} ${expiry})
+      echo "expiry_diff: ${expiry_diff}"
+
+      page="$(echo "$chunk" | grep "source" | cut -d "/" -f1-)"
+
+      if [[ ${expiry_diff} -gt 0 ]];then
+        echo "****** fine ${page}  *****"
+
+        if [[ ${expiry_diff} -lt 21 ]];then
+          echo "****** due ${page}  *****"
+          expiring_pages+="$(echo -e "\nExpiring in ${expiry_diff} days: ${page}")"
+        fi
+      else
+        echo "****** review ${page}  *****"
+        expired_pages+="$(echo -e "\nExpired ${expiry_diff} days ago: ${page}")"
+      fi
+
+      echo "******************************************************************************************"
+      echo ""
+      echo ""
+  done  <<<"${report}"
+
+  echo ""
+  echo ""
+  echo -e "${expiring_pages}"
+
+  echo ""
+  echo ""
+  echo -e "${expired_pages}"
+  echo ""
+  echo ""
+}
+
+main() {
+  check_dependencies
+  run_report
+}
+
+main