From 3312b57bd2be41cfccb1d9f608b044e2f972ebdc Mon Sep 17 00:00:00 2001 From: Jim Grady Date: Mon, 25 Mar 2024 14:42:10 -0400 Subject: [PATCH] Create script to install The Combine on a standalone system (#2976) * Set Kubernetes version in ansible configs * Add service to write wired ethernet addess to external device * Remove backend init container * Check for login credentials when a non-released version is requested * Install on localhost * Remove microk8s, fix lookup on user/group on localhost * create playbook for desktop installation * Comment out docker installation * Set link_kubeconfig to true for localhost * Fix regex for IP address * Add local.thecombine.app to list of certificates for offline use (#2825) * Add role to install helm * Add initial helm repo setup * Setup makeself to build installation self-extractor * Add combinectl command to installation * Add desktop install target * Rename "docker_install" role to "container_engine" * Separate hostname from DNS name for The Combine for local install * Skip test of WiFi access point on desktop * Move install of base helm charts to installation script * Make Kubeconfig file only readable to owner * Update repo for installing kubectl * Add input arguments; resume from last install step * Build combinectl from template * Create readme for installation process * Specify network management tool * Add KUBECONFIG to user profile * Migrate from netplan to network-manager * Fix pattern for WiFi interfaces * Assert that there is only one WiFi interface present * Add state to wait for combine cluster to come up * Update setting of ~/.kube/config to support local & remote access * Set k8s_user to current for ansible install; remove debugging output * Remove unhelpful messages from output when performing a standard installation * Add script for uninstall * Remove template for sudoer file - no longer used * Add ability to print or customize the WiFi passphrase * Add ability to print/set WiFi Password * Convert README.md to README.pdf * Add Desktop configuration for The Combine charts --- .gitignore | 5 + README.md | 29 ++ deploy/ansible/group_vars/nuc/main.yml | 14 +- deploy/ansible/group_vars/server/main.yml | 11 + deploy/ansible/host_vars/localhost/main.yml | 62 ++++ deploy/ansible/hosts.yml | 5 + deploy/ansible/playbook_desktop_setup.yaml | 54 ++++ deploy/ansible/playbook_dev_tools.yaml | 35 --- ...e_install.yaml => playbook_nuc_setup.yaml} | 20 +- .../roles/container_engine/defaults/main.yml | 3 + .../tasks/main.yml | 26 +- .../roles/docker_install/defaults/main.yml | 6 - .../roles/docker_install/handlers/main.yml | 3 - .../roles/helm_install/defaults/main.yml | 5 + .../ansible/roles/helm_install/tasks/main.yml | 32 +++ .../roles/k8s_config/defaults/main.yml | 4 - .../ansible/roles/k8s_config/tasks/main.yml | 58 ---- .../roles/k8s_install/defaults/main.yml | 9 +- .../ansible/roles/k8s_install/tasks/k3s.yml | 30 +- .../k8s_install/tasks/k8s_remote_access.yml | 53 ++++ .../ansible/roles/k8s_install/tasks/main.yml | 44 ++- .../roles/monitor_hardware/handlers/main.yml | 1 + .../files/01-network-manager-all.yaml | 4 + .../roles/network_config/tasks/main.yml | 68 ++--- .../roles/package_install/defaults/main.yml | 5 - .../roles/package_install/tasks/main.yml | 15 - .../roles/support_tools/defaults/main.yml | 8 + .../roles/support_tools/files/combinectl.sh | 196 +++++++++++++ .../support_tools/files/display-eth-addr.sh | 30 ++ .../roles/support_tools/handlers/main.yml | 10 + .../roles/support_tools/tasks/main.yml | 40 +++ .../templates/display-eth.service.j2 | 10 + .../templates/display-eth.timer.j2 | 9 + .../ansible/roles/wifi_ap/defaults/main.yml | 5 +- .../ansible/roles/wifi_ap/tasks/install.yml | 19 +- deploy/ansible/roles/wifi_ap/tasks/main.yml | 11 +- .../roles/wifi_ap/templates/create_ap.conf.j2 | 2 +- .../roles/wifi_ap/templates/etc_hosts.j2 | 12 +- deploy/ansible/templates/sudoer.j2 | 1 - deploy/ansible/vars/packages.yml | 8 - .../backend/templates/deployment-backend.yaml | 8 - deploy/scripts/aws_env.py | 1 - deploy/scripts/install-combine.sh | 266 ++++++++++++++++++ deploy/scripts/setup_combine.py | 17 +- .../scripts/setup_files/cluster_config.yaml | 6 +- .../scripts/setup_files/combine_config.yaml | 11 + .../scripts/setup_files/profiles/desktop.yaml | 35 +++ deploy/scripts/setup_target.py | 2 +- deploy/scripts/uninstall-combine | 53 ++++ installer/README.md | 160 +++++++++++ installer/make-combine-installer.sh | 10 + 51 files changed, 1287 insertions(+), 244 deletions(-) create mode 100644 deploy/ansible/host_vars/localhost/main.yml create mode 100644 deploy/ansible/playbook_desktop_setup.yaml delete mode 100644 deploy/ansible/playbook_dev_tools.yaml rename deploy/ansible/{playbook_kube_install.yaml => playbook_nuc_setup.yaml} (70%) create mode 100644 deploy/ansible/roles/container_engine/defaults/main.yml rename deploy/ansible/roles/{docker_install => container_engine}/tasks/main.yml (73%) delete mode 100644 deploy/ansible/roles/docker_install/defaults/main.yml delete mode 100644 deploy/ansible/roles/docker_install/handlers/main.yml create mode 100644 deploy/ansible/roles/helm_install/defaults/main.yml create mode 100644 deploy/ansible/roles/helm_install/tasks/main.yml delete mode 100644 deploy/ansible/roles/k8s_config/defaults/main.yml delete mode 100644 deploy/ansible/roles/k8s_config/tasks/main.yml create mode 100644 deploy/ansible/roles/k8s_install/tasks/k8s_remote_access.yml create mode 100644 deploy/ansible/roles/network_config/files/01-network-manager-all.yaml delete mode 100644 deploy/ansible/roles/package_install/defaults/main.yml delete mode 100644 deploy/ansible/roles/package_install/tasks/main.yml create mode 100644 deploy/ansible/roles/support_tools/defaults/main.yml create mode 100755 deploy/ansible/roles/support_tools/files/combinectl.sh create mode 100755 deploy/ansible/roles/support_tools/files/display-eth-addr.sh create mode 100644 deploy/ansible/roles/support_tools/handlers/main.yml create mode 100644 deploy/ansible/roles/support_tools/tasks/main.yml create mode 100644 deploy/ansible/roles/support_tools/templates/display-eth.service.j2 create mode 100644 deploy/ansible/roles/support_tools/templates/display-eth.timer.j2 delete mode 100644 deploy/ansible/templates/sudoer.j2 delete mode 100644 deploy/ansible/vars/packages.yml create mode 100755 deploy/scripts/install-combine.sh create mode 100644 deploy/scripts/setup_files/profiles/desktop.yaml create mode 100755 deploy/scripts/uninstall-combine create mode 100644 installer/README.md create mode 100755 installer/make-combine-installer.sh diff --git a/.gitignore b/.gitignore index 76d0e9555f..add9aefeb1 100644 --- a/.gitignore +++ b/.gitignore @@ -81,6 +81,11 @@ src/resources/dictionaries/*.txt deploy/scripts/semantic_domains/json/*.json database/semantic_domains/* +# Combine installer +installer/combine-installer.run +installer/makeself-* +installer/README.pdf + # Kubernetes Configuration files **/site_files/ **/charts/*.tgz diff --git a/README.md b/README.md index 21994cb538..f834298763 100644 --- a/README.md +++ b/README.md @@ -63,6 +63,7 @@ A rapid word collection tool. See the [User Guide](https://sillsdev.github.io/Th 6. [Inspect Database](#inspect-database) 7. [Add or Update Dictionary Files](#add-or-update-dictionary-files) 8. [Cleanup Local Repository](#cleanup-local-repository) + 9. [Generate Installer Script for The Combine](#generate-installer-script-for-the-combine-linux-only) 3. [Setup Local Kubernetes Cluster](#setup-local-kubernetes-cluster) 1. [Install Rancher Desktop](#install-rancher-desktop) 2. [Install Docker Desktop](#install-docker-desktop) @@ -127,6 +128,17 @@ A rapid word collection tool. See the [User Guide](https://sillsdev.github.io/Th `dotnet tool update --global dotnet-reportgenerator-globaltool --version 5.0.4` 11. [dotnet-project-licenses](https://github.com/tomchavakis/nuget-license) `dotnet tool update --global dotnet-project-licenses` +12. Tools for generating the self installer (Linux only): + + - [makeself](https://makeself.io/) - a tool to make self-extracting archives in Unix + - [pandoc](https://pandoc.org/installing.html#linux) - a tool to convert Markdown documents to PDF. + - `weasyprint` a PDF engine for `pandoc`. + + These can be installed on Debian-based distributions by running: + + ```console + sudo apt install -y makeself pandoc weasyprint + ``` ### Prepare the Environment @@ -505,6 +517,23 @@ of development setup errors. Run from within a Python virtual environment. python scripts/cleanup_local_repo.py ``` +### Generate Installer Script for The Combine (Linux only) + +To generate the installer script, run the following commands starting in the project top level directory: + +```console +cd installer +./make-combine-installer.sh combine-release-number +``` + +where `combine-release-number` is the Combine release to be installed, e.g. `v1.2.0`. + +To update the PDF copy of the installer README.md file, run the following from the `installer` directory: + +```console +pandoc --pdf-engine=weasyprint README.md -o README.pdf +``` + ## Setup Local Kubernetes Cluster This section describes how to create a local Kubernetes cluster using either _Rancher Desktop_ or _Docker Desktop_. diff --git a/deploy/ansible/group_vars/nuc/main.yml b/deploy/ansible/group_vars/nuc/main.yml index 5119969493..1c85395b17 100644 --- a/deploy/ansible/group_vars/nuc/main.yml +++ b/deploy/ansible/group_vars/nuc/main.yml @@ -19,7 +19,17 @@ image_pull_secret: aws-login-credentials app_namespace: thecombine k8s_user: sillsdev -k8s_group: sillsdev + +################################################ +# Helm Installation +################################################ +install_helm: no + +################################################ +# Support Tool Settings +################################################ +install_ip_viewer: yes +install_combinectl: yes ####################################### # Ingress configuration @@ -30,7 +40,7 @@ ingress_namespace: ingress-nginx # The server name will direct traffic to the production # server since it is used to get the certificates for the # NUC. -public_dns_name: "{{ ansible_hostname }}" +k8s_dns_name: "{{ ansible_hostname }}" ################################################ # Ethernet settings diff --git a/deploy/ansible/group_vars/server/main.yml b/deploy/ansible/group_vars/server/main.yml index 5e2035b3fa..8bc8a5189f 100644 --- a/deploy/ansible/group_vars/server/main.yml +++ b/deploy/ansible/group_vars/server/main.yml @@ -19,6 +19,17 @@ create_namespaces: [] # k8s namespaces app_namespace: thecombine +################################################ +# Helm Installation +################################################ +install_helm: no + +################################################ +# Support Tool Settings +################################################ +install_ip_viewer: no +install_combinectl: no + ####################################### # Ingress configuration ingress_namespace: ingress-nginx diff --git a/deploy/ansible/host_vars/localhost/main.yml b/deploy/ansible/host_vars/localhost/main.yml new file mode 100644 index 0000000000..7df72dd2a2 --- /dev/null +++ b/deploy/ansible/host_vars/localhost/main.yml @@ -0,0 +1,62 @@ +--- +################################################# +# Host specific configuration items for localhost +################################################ + +################################################ +# Configure Kubernetes cluster +################################################ + +# Specify which Kubernetes engine to install - +# one of k3s or none. +k8s_engine: k3s + +image_pull_secret: aws-login-credentials + +# k8s namespaces +app_namespace: thecombine + +k8s_user: "{{ ansible_user_id }}" + +################################################ +# Helm Installation +################################################ +install_helm: yes + +################################################ +# Support Tool Settings +################################################ +install_ip_viewer: no +install_combinectl: yes + +####################################### +# Ingress configuration +ingress_namespace: ingress-nginx + +# For the NUCs we want to use the ansible host name +# since that is how we can connect on the local network +# The server name will direct traffic to the production +# server since it is used to get the certificates for the +# NUC. +k8s_dns_name: "local" + +################################################ +# Ethernet settings +################################################ +eth_optional: yes + +################################################ +# WiFi access point settings +################################################ +has_wifi: yes +ap_domain: thecombine.app +ap_ssid: "thecombine_ap" +ap_passphrase: "Combine2020" +ap_gateway: "10.10.10.1" +ap_hostname: "local" +test_wifi: false +################################################ +# hardware monitoring settings +################################################ +include_hw_monitoring: no +history_days: 60 diff --git a/deploy/ansible/hosts.yml b/deploy/ansible/hosts.yml index a18e607101..74e4a4245b 100644 --- a/deploy/ansible/hosts.yml +++ b/deploy/ansible/hosts.yml @@ -1,6 +1,11 @@ --- all: hosts: + localhost: + ansible_connection: local + kubecfgdir: local + combine_server_name: local.thecombine.app + link_kubeconfig: true children: nuc: hosts: diff --git a/deploy/ansible/playbook_desktop_setup.yaml b/deploy/ansible/playbook_desktop_setup.yaml new file mode 100644 index 0000000000..caedfb6a3d --- /dev/null +++ b/deploy/ansible/playbook_desktop_setup.yaml @@ -0,0 +1,54 @@ +--- +############################################################## +# Playbook: playbook_desktop_setup.yml +# +# playbook_desktop_setup.yml installs the packages and +# configuration files that are required to run TheCombine +# as containers managed by a Kubernetes cluster on localhost. +# +############################################################## + +- name: Configure hardware for The Combine + hosts: localhost + gather_facts: yes + become: yes + + vars_files: + - "vars/config_common.yml" + + tasks: + - name: Update packages + apt: + update_cache: yes + upgrade: "yes" + + - name: Setup WiFi Access Point + import_role: + name: wifi_ap + when: has_wifi + + - name: Enable hardware monitoring + import_role: + name: monitor_hardware + when: include_hw_monitoring + + - name: Configure Network Interfaces + import_role: + name: network_config + + - name: Install Container Engine + import_role: + name: container_engine + + - name: Install Kubernetes + import_role: + name: k8s_install + + - name: Install Helm + import_role: + name: helm_install + when: install_helm + + - name: Setup Support Tool + import_role: + name: support_tools diff --git a/deploy/ansible/playbook_dev_tools.yaml b/deploy/ansible/playbook_dev_tools.yaml deleted file mode 100644 index d0d916fe2f..0000000000 --- a/deploy/ansible/playbook_dev_tools.yaml +++ /dev/null @@ -1,35 +0,0 @@ ---- -############################################################## -# playbook_dev_tools.yml installs some packages that may be -# useful for developers. In addition, the apt cache is -# updated and existing packages are upgraded. -############################################################## - -- name: Install development tools - hosts: all - gather_facts: yes - become: yes - - vars: - developer: "{{ k8s_user | default('none') }}" - tasks: - - name: Update cache and upgrade existing packages - apt: - update_cache: yes - upgrade: "yes" - - - name: Install packages for development - apt: - name: - - emacs-nox - - yaml-mode - - net-tools - - - name: Skip sudo password for {{ developer }} - template: - src: sudoer.j2 - dest: /etc/sudoers.d/{{ developer }} - owner: root - group: root - mode: 0440 - when: developer != 'none' diff --git a/deploy/ansible/playbook_kube_install.yaml b/deploy/ansible/playbook_nuc_setup.yaml similarity index 70% rename from deploy/ansible/playbook_kube_install.yaml rename to deploy/ansible/playbook_nuc_setup.yaml index 2dcb2022d7..a2bd3ce5c6 100644 --- a/deploy/ansible/playbook_kube_install.yaml +++ b/deploy/ansible/playbook_nuc_setup.yaml @@ -1,21 +1,21 @@ --- ############################################################## -# Playbook: playbook_kube_install.yml +# Playbook: playbook_nuc_setup.yml # -# playbook_kube_install.yml installs the packages and +# playbook_nuc_setup.yml installs the packages and # configuration files that are required to run TheCombine -# as Docker containers managed by a Kubernetes cluster. +# as Docker containers managed by a Kubernetes cluster on +# a target PC, such as an Intel NUC. # ############################################################## - name: Configure hardware for The Combine - hosts: all + hosts: nuc gather_facts: yes become: yes vars_files: - "vars/config_common.yml" - - "vars/packages.yml" tasks: - name: Update packages @@ -37,16 +37,14 @@ import_role: name: network_config - - name: Install Docker Subsystem + - name: Install Container Engine import_role: - name: docker_install + name: container_engine - name: Install Kubernetes Tools import_role: name: k8s_install - - name: Get Kubernetes Configuration + - name: Setup Support Tool import_role: - name: k8s_config - tags: - - kubeconfig + name: support_tools diff --git a/deploy/ansible/roles/container_engine/defaults/main.yml b/deploy/ansible/roles/container_engine/defaults/main.yml new file mode 100644 index 0000000000..802a83f543 --- /dev/null +++ b/deploy/ansible/roles/container_engine/defaults/main.yml @@ -0,0 +1,3 @@ +--- +container_packages: + - containerd.io diff --git a/deploy/ansible/roles/docker_install/tasks/main.yml b/deploy/ansible/roles/container_engine/tasks/main.yml similarity index 73% rename from deploy/ansible/roles/docker_install/tasks/main.yml rename to deploy/ansible/roles/container_engine/tasks/main.yml index a4fcc6d2f3..3f19a7bf00 100644 --- a/deploy/ansible/roles/docker_install/tasks/main.yml +++ b/deploy/ansible/roles/container_engine/tasks/main.yml @@ -1,16 +1,13 @@ --- ############################################################## -# Role: docker_install +# Role: container_engine # -# Install the Docker Engine, Docker Compose and all their -# pre-requisite packages. +# Install the container engine and pre-requisite packages. # -# The Docker Engine is installed by adding the repo from +# The container engine is installed by adding the repo from # docker.com to our apt sources and installing the relevant -# package. +# packages. # -# Docker Compose is installed by downloading the ZIP package -# from GitHub and extracting it to /usr/local/bin ############################################################## - name: Update apt cache. apt: @@ -27,7 +24,6 @@ - gnupg - lsb-release state: present - notify: reboot target - name: Create keyring directory file: @@ -50,6 +46,16 @@ - name: Install Docker Packages apt: - name: "{{ docker_packages }}" + name: "{{ container_packages }}" update_cache: yes - notify: reboot target + +- name: Check if reboot is required + stat: + path: /var/run/reboot-required + register: reboot_required + +- name: Reboot + reboot: + when: + - reboot_required.stat.exists + - ansible_connection != "local" diff --git a/deploy/ansible/roles/docker_install/defaults/main.yml b/deploy/ansible/roles/docker_install/defaults/main.yml deleted file mode 100644 index bdbc2bb4a9..0000000000 --- a/deploy/ansible/roles/docker_install/defaults/main.yml +++ /dev/null @@ -1,6 +0,0 @@ ---- -install_credential_helper: false -credential_helper_version: v0.6.3 - -docker_packages: - - containerd.io diff --git a/deploy/ansible/roles/docker_install/handlers/main.yml b/deploy/ansible/roles/docker_install/handlers/main.yml deleted file mode 100644 index 708664f6f8..0000000000 --- a/deploy/ansible/roles/docker_install/handlers/main.yml +++ /dev/null @@ -1,3 +0,0 @@ ---- -- name: reboot target - reboot: diff --git a/deploy/ansible/roles/helm_install/defaults/main.yml b/deploy/ansible/roles/helm_install/defaults/main.yml new file mode 100644 index 0000000000..5e6d43b831 --- /dev/null +++ b/deploy/ansible/roles/helm_install/defaults/main.yml @@ -0,0 +1,5 @@ +--- +helm_version: v3.13.2 +helm_arch: linux-amd64 + +helm_download_dir: /opt/helm-{{ helm_version }}-{{ helm_arch }} diff --git a/deploy/ansible/roles/helm_install/tasks/main.yml b/deploy/ansible/roles/helm_install/tasks/main.yml new file mode 100644 index 0000000000..6956f09f06 --- /dev/null +++ b/deploy/ansible/roles/helm_install/tasks/main.yml @@ -0,0 +1,32 @@ +--- +- name: Create working directory + file: + path: "{{ helm_download_dir }}" + state: directory + owner: root + group: root + mode: 0755 + +- name: Get Latest Release + get_url: + # https://get.helm.sh/helm-v3.13.2-linux-amd64.tar.gz + url: "https://get.helm.sh/helm-{{ helm_version }}-{{ helm_arch }}.tar.gz" + dest: "{{ helm_download_dir }}/helm.tar.gz" + owner: root + group: root + mode: 0755 + +- name: Unpack helm tarball + command: + cmd: "tar -zxvf {{ helm_download_dir }}/helm.tar.gz" + chdir: "{{ helm_download_dir }}" + creates: "{{ helm_download_dir }}/{{ helm_arch }}/helm" + +- name: Link to extracted helm file + file: + src: "{{ helm_download_dir }}/{{ helm_arch }}/helm" + path: /usr/local/bin/helm + state: link + owner: root + group: root + mode: 0755 diff --git a/deploy/ansible/roles/k8s_config/defaults/main.yml b/deploy/ansible/roles/k8s_config/defaults/main.yml deleted file mode 100644 index b51b2332a9..0000000000 --- a/deploy/ansible/roles/k8s_config/defaults/main.yml +++ /dev/null @@ -1,4 +0,0 @@ ---- -# Used to setup the certificate for kubectl -# Can be overridden by specific groups/hosts -public_dns_name: "{{ combine_server_name }}" diff --git a/deploy/ansible/roles/k8s_config/tasks/main.yml b/deploy/ansible/roles/k8s_config/tasks/main.yml deleted file mode 100644 index 2587c57ade..0000000000 --- a/deploy/ansible/roles/k8s_config/tasks/main.yml +++ /dev/null @@ -1,58 +0,0 @@ ---- -- name: Get home directory for {{ k8s_user }} - shell: > - getent passwd {{ k8s_user }} | awk -F: '{ print $6 }' - register: k8s_user_home - changed_when: false - -- name: Save kubectl configuration on host - fetch: - src: "{{ k8s_user_home.stdout }}/.kube/config" - dest: "{{ kubecfg }}" - flat: yes - -- name: Restrict permissions to kubeconfig to owner - delegate_to: localhost - become: false - file: - path: "{{ kubecfg }}" - state: file - mode: 0600 - - # The kubeconfig file that is generated by k3s on the target - # system identifies the server by the IP address. This updates - # the file when it has been copied to the host to replace the - # IP address with the server name. This is needed in the a - # cloud environment where the IP address seen on the host is not - # the public IP address. For example: - # server: 10.0.0.40:6443 - # is changed to: - # server: nuc2:6443 - # (kubectl communicates with the cluster over port 16443 or 6443) -- name: Replace server IP with DNS name in site_files copy - delegate_to: localhost - become: false - lineinfile: - state: present - path: "{{ kubecfg }}" - regexp: '^(\s+server: https:\/\/)[.0-9]+:(1?6443)' - backrefs: yes - line: '\1{{ public_dns_name }}:\2' - -- name: Replace 'default' cluster, user, etc with {{ kubecfgdir }} - delegate_to: localhost - become: false - replace: - path: "{{ kubecfg }}" - regexp: "^(.*)default(.*)$" - replace: '\1{{ kubecfgdir }}\2' - -- name: Link ~/.kube/config to {{ kubecfg }} - delegate_to: localhost - become: false - file: - state: link - src: "{{ kubecfg }}" - dest: "{{ lookup('env', 'HOME') }}/.kube/config" - mode: 0600 - when: link_kubeconfig | default(false) diff --git a/deploy/ansible/roles/k8s_install/defaults/main.yml b/deploy/ansible/roles/k8s_install/defaults/main.yml index d1445c42d6..450ab1fade 100644 --- a/deploy/ansible/roles/k8s_install/defaults/main.yml +++ b/deploy/ansible/roles/k8s_install/defaults/main.yml @@ -1,7 +1,7 @@ --- # Used to setup the certificate for kubectl # Can be overridden by specific groups/hosts -public_dns_name: "{{ combine_server_name }}" +k8s_dns_name: "{{ combine_server_name }}" k8s_required_pkgs: - apt-transport-https @@ -11,8 +11,11 @@ k8s_required_pkgs: # Options for installing the k3s engine k3s_options: - --write-kubeconfig-mode - - 644 + - 600 - --disable - traefik - --tls-san - - "{{ public_dns_name }}" + - "{{ k8s_dns_name }}" + +k3s_version: "v1.25.14+k3s1" +kubectl_version: "v1.29" diff --git a/deploy/ansible/roles/k8s_install/tasks/k3s.yml b/deploy/ansible/roles/k8s_install/tasks/k3s.yml index 94d3aac9b6..b605f045fa 100644 --- a/deploy/ansible/roles/k8s_install/tasks/k3s.yml +++ b/deploy/ansible/roles/k8s_install/tasks/k3s.yml @@ -6,10 +6,10 @@ ################################################ - name: Install k3s shell: - cmd: curl -sfL https://get.k3s.io | sh -s - {{ k3s_options | join(' ') }} + cmd: curl -sfL https://get.k3s.io | INSTALL_K3S_VERSION="{{ k3s_version }}" sh -s - {{ k3s_options | join(' ') }} creates: /usr/local/bin/k3s -# Change KillMode from "process" to "mixed" to eliminate 90 wait for k3s containers +# Change KillMode from "process" to "mixed" to eliminate 90s wait for k3s containers # to exit. This limits the ability to upgrade k3s in-place without stopping the # current containers but that is not needed for the NUC use case. - name: Patch k3s service @@ -27,6 +27,12 @@ register: k8s_user_home changed_when: false +- name: Get user group id for {{ k8s_user }} + shell: > + getent passwd {{ k8s_user }} | awk -F: '{ print $4 }' + register: k8s_user_group_id + changed_when: false + - name: Create .kube directories file: path: "{{ item.home }}/.kube" @@ -37,19 +43,23 @@ loop: - home: "{{ k8s_user_home.stdout }}" owner: "{{ k8s_user }}" - group: "{{ k8s_group }}" + group: "{{ k8s_user_group_id.stdout }}" - home: /root owner: root group: root -- name: Set link .kube/config to /etc/rancher/k3s/k3s.yaml - file: - src: /etc/rancher/k3s/k3s.yaml - path: "{{ item }}/.kube/config" - state: link +- name: Copy /etc/rancher/k3s/k3s.yaml to .kube/config + shell: | + cp /etc/rancher/k3s/k3s.yaml {{ item.home }}/.kube/config + chown {{ item.owner }}:{{ item.group }} {{ item.home }}/.kube/config + chmod 600 {{ item.home }}/.kube/config loop: - - "{{ k8s_user_home.stdout }}" - - /root + - home: "{{ k8s_user_home.stdout }}" + owner: "{{ k8s_user }}" + group: "{{ k8s_user_group_id.stdout }}" + - home: /root + owner: root + group: root - name: List contexts command: kubectl --kubeconfig=/etc/rancher/k3s/k3s.yaml config get-contexts diff --git a/deploy/ansible/roles/k8s_install/tasks/k8s_remote_access.yml b/deploy/ansible/roles/k8s_install/tasks/k8s_remote_access.yml new file mode 100644 index 0000000000..c23ef7e510 --- /dev/null +++ b/deploy/ansible/roles/k8s_install/tasks/k8s_remote_access.yml @@ -0,0 +1,53 @@ +--- +# +# Setup a kubeconfig file on the host machine so that users can +# connect to the target system when the host is not the target +# system. +# +- name: Save {{ k8s_user_home.stdout }}/.kube/config on host + fetch: + src: "{{ k8s_user_home.stdout }}/.kube/config" + dest: "{{ kubecfg }}" + flat: yes + +- name: Update kubeconfig file + delegate_to: localhost + become: false + block: + - name: Restrict permissions to kubeconfig to owner + file: + path: "{{ kubecfg }}" + state: file + mode: 0600 + + # The kubeconfig file that is generated by k3s on the target + # system identifies the server by the IP address. This updates + # the file when it has been copied to the host to replace the + # IP address with the server name. This is needed in the a + # cloud environment where the IP address seen on the host is not + # the public IP address. For example: + # server: 10.0.0.40:6443 + # is changed to: + # server: nuc2:6443 + # (kubectl communicates with the cluster over port 16443 or 6443) + - name: Replace server IP with {{ k8s_dns_name }} in {{ kubecfg }} + lineinfile: + state: present + path: "{{ kubecfg }}" + regexp: '^(\s+server: https:\/\/)[.0-9]+:(1?6443)' + backrefs: yes + line: '\1{{ k8s_dns_name }}:\2' + + - name: Replace 'default' cluster, user, etc with {{ kubecfgdir }} + replace: + path: "{{ kubecfg }}" + regexp: "^(.*)default(.*)$" + replace: '\1{{ kubecfgdir }}\2' + + - name: Link ~/.kube/config to {{ kubecfg }} + file: + state: link + src: "{{ kubecfg }}" + dest: "{{ lookup('env', 'HOME') }}/.kube/config" + mode: 0600 + when: link_kubeconfig | default(false) diff --git a/deploy/ansible/roles/k8s_install/tasks/main.yml b/deploy/ansible/roles/k8s_install/tasks/main.yml index 19db968ab9..533e1f6899 100644 --- a/deploy/ansible/roles/k8s_install/tasks/main.yml +++ b/deploy/ansible/roles/k8s_install/tasks/main.yml @@ -9,18 +9,52 @@ file: "{{ k8s_engine }}.yml" when: k8s_engine != "none" -- name: Download the Google Cloud public signing key +- name: Create keyring directory if necessary + file: + path: /etc/apt/keyrings + state: directory + owner: root + group: root + mode: "0755" + +- name: Download the Kubernetes public signing key shell: cmd: > - curl -fsSL https://packages.cloud.google.com/apt/doc/apt-key.gpg - | gpg --dearmor -o /etc/apt/keyrings/kubernetes-archive-keyring.gpg - creates: /etc/apt/keyrings/kubernetes-archive-keyring.gpg + curl -fsSL https://pkgs.k8s.io/core:/stable:/v1.29/deb/Release.key + | gpg --dearmor -o /etc/apt/keyrings/kubernetes-apt-keyring.gpg + creates: /etc/apt/keyrings/kubernetes-apt-keyring.gpg + +- name: Set signing key permissions + file: + name: /etc/apt/keyrings/kubernetes-apt-keyring.gpg + mode: 0644 + state: file - name: Add repository apt_repository: - repo: "deb [signed-by=/etc/apt/keyrings/kubernetes-archive-keyring.gpg] https://apt.kubernetes.io/ kubernetes-xenial main" + repo: "deb [signed-by=/etc/apt/keyrings/kubernetes-apt-keyring.gpg] https://pkgs.k8s.io/core:/stable:/v1.29/deb/ /" filename: kubernetes + mode: 0644 - name: Install kubectl apt: name: kubectl + +- name: Get home directory for {{ k8s_user }} + shell: > + getent passwd {{ k8s_user }} | awk -F: '{ print $6 }' + register: k8s_user_home + changed_when: false + +- name: update profile + lineinfile: + state: present + path: "{{ k8s_user_home.stdout }}/.profile" + line: "export KUBECONFIG=${HOME}/.kube/config" + mode: 0600 + when: ansible_connection == "local" + +- name: Setup remote access to cluster + include_tasks: + file: k8s_remote_access.yml + when: ansible_connection != "local" diff --git a/deploy/ansible/roles/monitor_hardware/handlers/main.yml b/deploy/ansible/roles/monitor_hardware/handlers/main.yml index 62bf80bebf..f0e8eaa537 100644 --- a/deploy/ansible/roles/monitor_hardware/handlers/main.yml +++ b/deploy/ansible/roles/monitor_hardware/handlers/main.yml @@ -6,6 +6,7 @@ - name: restart target reboot: + when: ansible_connection != "local" - name: restart sysstat service service: diff --git a/deploy/ansible/roles/network_config/files/01-network-manager-all.yaml b/deploy/ansible/roles/network_config/files/01-network-manager-all.yaml new file mode 100644 index 0000000000..4a8fd08590 --- /dev/null +++ b/deploy/ansible/roles/network_config/files/01-network-manager-all.yaml @@ -0,0 +1,4 @@ +# Let NetworkManager manage all devices on this system +network: + version: 2 + renderer: NetworkManager diff --git a/deploy/ansible/roles/network_config/tasks/main.yml b/deploy/ansible/roles/network_config/tasks/main.yml index 7c5cf2da2d..153991b42c 100644 --- a/deploy/ansible/roles/network_config/tasks/main.yml +++ b/deploy/ansible/roles/network_config/tasks/main.yml @@ -1,50 +1,50 @@ --- ################################## -# Update the netplan configuration files -# for the ethernet interface to mark it -# as optional +# Install network-manager if +# necessary and setup dummy +# ethernet connection for +# air gap operation. ################################## -- name: List netplan ethernet configuration files - shell: grep -l "{{ eth_if_pattern }}" /etc/netplan/*.yaml - when: eth_optional - register: net_config - changed_when: false - failed_when: false +- name: Install network-manager + apt: + name: network-manager -- name: Set Ethernet I/F as optional - lineinfile: - path: "{{ item }}" - state: present - insertafter: "^ {{ eth_if_pattern }}" - line: " optional: true" - when: eth_optional - loop: "{{ net_config.stdout_lines }}" - notify: Apply netplan - -- name: Configure resolv.conf to use {{ ap_gateway }} as DNS nameserver - template: - src: resolved.conf.j2 - dest: /etc/systemd/resolved.conf +- name: Set network-manager as renderer + copy: + src: 01-network-manager-all.yaml + dest: /etc/netplan/01-network-manager-all.yaml owner: root group: root - mode: 0644 - notify: Restart resolved - when: has_wifi + mode: 0600 + notify: Apply netplan + +- meta: flush_handlers ### # Create a virtual network interface so that k3s can run # when no ethernet connection is attached. ### -- name: Create virtual network I/F +- name: Create virtual IP connection + community.general.nmcli: + conn_name: "dummy-{{ virtual_if }}" + ifname: "{{ virtual_if }}" + autoconnect: yes + ip4: 172.16.1.23/16 + gw4: 172.16.1.23 + method4: "manual" + route_metric4: 999 + method6: "link-local" + route_metric6: 999 + state: "present" + type: "dummy" + +- name: Configure resolv.conf to use {{ ap_gateway }} as DNS nameserver template: - src: "{{ item }}.j2" - dest: /etc/systemd/network/{{ virtual_if }}.{{ item }} + src: resolved.conf.j2 + dest: /etc/systemd/resolved.conf owner: root group: root mode: 0644 - with_items: - - netdev - - network - when: k8s_engine != "none" - notify: Restart networkd + notify: Restart resolved + when: has_wifi diff --git a/deploy/ansible/roles/package_install/defaults/main.yml b/deploy/ansible/roles/package_install/defaults/main.yml deleted file mode 100644 index 11a2fee572..0000000000 --- a/deploy/ansible/roles/package_install/defaults/main.yml +++ /dev/null @@ -1,5 +0,0 @@ ---- -required_packages: - - apt-transport-https - - aptitude - - software-properties-common diff --git a/deploy/ansible/roles/package_install/tasks/main.yml b/deploy/ansible/roles/package_install/tasks/main.yml deleted file mode 100644 index f0bc1a54e9..0000000000 --- a/deploy/ansible/roles/package_install/tasks/main.yml +++ /dev/null @@ -1,15 +0,0 @@ ---- -################################################### -# Role to update/upgrade all current packages and -# install packages listed in "required_packages" -################################################### - -- name: Upgrade all current packages - apt: - update_cache: "yes" - upgrade: "yes" - -- name: install required packages dependencies - apt: - name: "{{ required_packages }}" - state: present diff --git a/deploy/ansible/roles/support_tools/defaults/main.yml b/deploy/ansible/roles/support_tools/defaults/main.yml new file mode 100644 index 0000000000..76e1a42aca --- /dev/null +++ b/deploy/ansible/roles/support_tools/defaults/main.yml @@ -0,0 +1,8 @@ +--- +eth_update_period: 5s +eth_update_program: /usr/local/bin/display-eth-addr + +install_ip_viewer: no +install_combinectl: no + +wifi_interfaces: "{{ ansible_facts.interfaces | select('search', '^wl[op][0-9]+[a-z][a-z0-9]+') }}" diff --git a/deploy/ansible/roles/support_tools/files/combinectl.sh b/deploy/ansible/roles/support_tools/files/combinectl.sh new file mode 100755 index 0000000000..b1bb7eae34 --- /dev/null +++ b/deploy/ansible/roles/support_tools/files/combinectl.sh @@ -0,0 +1,196 @@ +#! /usr/bin/env bash + +usage () { + cat << .EOM + Usage: + combinectl COMMAND [parameters] + + Commands: + help: Print this usage message. + start: Start the combine services. + stop: Stop the combine services. + status: List the status for the combine services. + cert: Print the expiration date for the web certificate. + update release-number: + Update the version of The Combine to the "release-number" + specified. You can see the number of the latest release + at https://github.com/sillsdev/TheCombine/releases. + + Note that not all releases can be updated this way. If + The Combine does not run properly, download and run the + updated install package. + wifi [wifi-passphrase]: + If no parameters are provided, display the wifi + passphrase. If a new passphase is provided, the + wifi passphrase is updated to the new phrase. + If your passphrase has spaces or special characters, + it is best to enclose your pass phrase in quotation marks (""). + + If the command is omitted or unrecognized, this usage message is + printed. +.EOM +} + +# Get the name of the first wifi interface. In general, +# this script assumes that there is a single WiFi interface +# installed. +get-wifi-if () { + IFS=$'\n' WIFI_DEVICES=( $(nmcli d | grep "^wl") ) + if [[ ${#WIFI_DEVICES[@]} -gt 0 ]] ; then + IFS=' ' read -r -a IFNAME <<< "${WIFI_DEVICES[0]}" + echo "${IFNAME[0]}" + else + echo "" + fi +} + +# Restart a WiFi connection that was saved previously +restore-wifi-connection () { + if [ -f "${CACHED_WIFI_CONN}" ] ; then + WIFI_CONN=`cat ${CACHED_WIFI_CONN}` + if [ "$WIFI_CONN" != "--" ] ; then + echo "Restoring connection ${WIFI_CONN}" + sudo nmcli c up "${WIFI_CONN}" + fi + fi +} + +# Save the current WiFi connection and then shut it down +save-wifi-connection () { + # get the name of the WiFi Connection + WIFI_CONN=`nmcli d show "$WIFI_IF" | grep "^GENERAL.CONNECTION" | sed "s|^GENERAL.CONNECTION: *||"` + # save it so we can restore it later + echo "$WIFI_CONN" > ${CACHED_WIFI_CONN} + if [ "$WIFI_CONN" != "--" ] ; then + sudo nmcli c down "$WIFI_CONN" + fi +} + +# Print the expiration date of the TLS Certificate +combine-cert () { + SECRET_NAME=`kubectl -n thecombine get secrets --field-selector type=kubernetes.io/tls -o name` + CERT_DATA=`kubectl -n thecombine get $SECRET_NAME -o "jsonpath={.data['tls\.crt']}"` + echo $CERT_DATA | base64 -d | openssl x509 -enddate -noout| sed -e "s/^notAfter=/Web certificate expires at /" +} + +# Start The Combine services +combine-start () { + echo "Starting The Combine." + if ! systemctl is-active --quiet create_ap ; then + save-wifi-connection + sudo systemctl start create_ap + sudo systemctl restart systemd-resolved + fi + if ! systemctl is-active --quiet k3s ; then + sudo systemctl start k3s + fi +} + +# Stop The Combine services and restore the WiFI +# connection if needed. +combine-stop () { + echo "Stopping The Combine." + if systemctl is-active --quiet k3s ; then + sudo systemctl stop k3s + fi + if systemctl is-active --quiet create_ap ; then + sudo systemctl stop create_ap + restore-wifi-connection + sudo systemctl restart systemd-resolved + fi +} + +# Print the status of The Combine services. If the combine is +# "up" then also print that status of the deployments in +# "thecombine" namespace. +combine-status () { + if systemctl is-active --quiet create_ap ; then + echo "WiFi hotspot is Running." + else + echo "WiFi hotspot is Stopped." + fi + if systemctl is-active --quiet k3s ; then + echo "The Combine is Running." + kubectl -n thecombine get deployments + else + echo "The Combine is Stopped." + fi +} + +# Update the image used in each of the deployments in The Combine. This +# is akin to our current update process for Production and QA servers. It +# does *not* update any configuration files or secrets. +combine-update () { + echo "Updating The Combine to $1" + IMAGE_TAG=$1 + while [[ ! $IMAGE_TAG =~ ^v[0-9]+\.[0-9]+\.[0-9]+ ]] ; do + echo "$IMAGE_TAG doesn't look like a valid version." + read -p "Enter a new release number, for example, v1.2.0: " IMAGE_TAG + done + kubectl -n thecombine set image deployment/database database="public.ecr.aws/thecombine/combine_database:$IMAGE_TAG" + kubectl -n thecombine set image deployment/backend backend="public.ecr.aws/thecombine/combine_backend:$IMAGE_TAG" + kubectl -n thecombine set image deployment/frontend frontend="public.ecr.aws/thecombine/combine_frontend:$IMAGE_TAG" + kubectl -n thecombine set image deployment/maintenance maintenance="public.ecr.aws/thecombine/combine_maint:$IMAGE_TAG" +} + +# Print the current password for the WiFi Access point +combine-wifi-list-password () { + WIFI_PASSWD=`grep PASSPHRASE ${WIFI_CONFIG} | sed "s/PASSPHRASE=//g"` + echo "WiFi Password is \"${WIFI_PASSWD}\"" +} + +# Set the password for the WiFi Access point +combine-wifi-set-password () { + # Check that the passphrase is at least 8 characters long + if [[ ${#1} -ge 8 ]] ; then + sudo sed -i "s/PASSPHRASE=.*/PASSPHRASE=$1/" ${WIFI_CONFIG} + if systemctl is-active --quiet create_ap ; then + sudo systemctl restart create_ap + sudo systemctl restart systemd-resolved + fi + combine-wifi-list-password + else + echo "Wifi password must be at least 8 characters long." + fi +} + +# Main script entrypoint +WIFI_IF=$(get-wifi-if) +WIFI_CONFIG=/etc/create_ap/create_ap.conf +export KUBECONFIG=${HOME}/.kube/config +COMBINE_CONFIG=${HOME}/.config/combine +CACHED_WIFI_CONN=${COMBINE_CONFIG}/wifi-connection.txt + +# Make sure config directory exists +mkdir -p "${COMBINE_CONFIG}" + +# Print usage if command is missing +if [[ $# -eq 0 ]] ; then + usage + exit 0 +fi + +case "$1" in + help) + usage;; + start) + combine-start;; + stop) + combine-stop;; + stat*) + combine-status;; + cert*) + combine-cert;; + update) + combine-update $2;; + wifi) + if [[ $# -eq 1 ]] ; then + combine-wifi-list-password + else + combine-wifi-set-password $2 + fi + ;; + *) + echo -e "Unrecognized command: \"$1\".\n" + usage;; +esac diff --git a/deploy/ansible/roles/support_tools/files/display-eth-addr.sh b/deploy/ansible/roles/support_tools/files/display-eth-addr.sh new file mode 100755 index 0000000000..653b92a741 --- /dev/null +++ b/deploy/ansible/roles/support_tools/files/display-eth-addr.sh @@ -0,0 +1,30 @@ +#! /usr/bin/env bash + +TTY_DEV="${1:-/dev/ttyACM0}" +CLEAR_SCREEN="\\xfe\\x58" +TO_ORIGIN="\\xfe\\x48" +SET_BACKGROUND="\\xfe\\xd0" +RED="\\xff\\x1f\\x1f" +GREEN="\\x3f\\xff\\x7f" + +if [ -e "$TTY_DEV" ] ; then + echo -en "${CLEAR_SCREEN}"> ${TTY_DEV} + # Move cursor to origin + echo -en "${TO_ORIGIN}" > ${TTY_DEV} + ETH_ADD_STR=`ip -4 -br address | grep "^en"` + if [ -n "${ETH_ADD_STR}" ] ; then + ETH_IP=`echo ${ETH_ADD_STR} | sed -E -e 's%^[0-9a-z]+\s+[A-Z]+\s+([0-9\.]+)/.*%\1%'` + ETH_STATUS=`echo ${ETH_ADD_STR} | sed -E -e 's%^[0-9a-z]+\s+([A-Z]+)\s+[0-9\.]+/.*%\1%'` + # set green background + echo -en "${SET_BACKGROUND}${GREEN}" > ${TTY_DEV} + # Print IP address + echo "IP: ${ETH_IP} " > ${TTY_DEV} + # Print I/F Status + echo -n "Status: ${ETH_STATUS} " > ${TTY_DEV} + else + # set red background + echo -en "${SET_BACKGROUND}${RED}" > ${TTY_DEV} + echo " NO ETHERNET" > ${TTY_DEV} + echo -n " CONNECTED" > ${TTY_DEV} + fi +fi diff --git a/deploy/ansible/roles/support_tools/handlers/main.yml b/deploy/ansible/roles/support_tools/handlers/main.yml new file mode 100644 index 0000000000..865e2d0722 --- /dev/null +++ b/deploy/ansible/roles/support_tools/handlers/main.yml @@ -0,0 +1,10 @@ +--- +- name: start display eth + systemd: + name: "{{ item }}" + state: started + enabled: true + daemon_reload: true + with_items: + - display-eth.service + - display-eth.timer diff --git a/deploy/ansible/roles/support_tools/tasks/main.yml b/deploy/ansible/roles/support_tools/tasks/main.yml new file mode 100644 index 0000000000..28c4a0aaa9 --- /dev/null +++ b/deploy/ansible/roles/support_tools/tasks/main.yml @@ -0,0 +1,40 @@ +--- +- name: Install program to write wired ethernet address + copy: + src: display-eth-addr.sh + dest: "{{ eth_update_program }}" + owner: root + group: root + mode: 0755 + when: install_ip_viewer + +- name: Setup service to update display + template: + src: "{{ item }}.j2" + dest: /usr/lib/systemd/system/{{ item }} + owner: root + group: root + mode: 0644 + with_items: + - display-eth.service + - display-eth.timer + notify: start display eth + when: install_ip_viewer + +- name: Verify that there is a single WiFi interface + assert: + that: wifi_interfaces|length == 1 + success_msg: "Setup WiFi Interface: {{ wifi_interfaces }}" + fail_msg: | + Only a single WiFi interface is supported. + Found the following interfaces: + {{ ansible_facts.interfaces }} + +- name: Install combinectl tool + copy: + src: combinectl.sh + dest: /usr/local/bin/combinectl + owner: root + group: root + mode: 0755 + when: install_combinectl diff --git a/deploy/ansible/roles/support_tools/templates/display-eth.service.j2 b/deploy/ansible/roles/support_tools/templates/display-eth.service.j2 new file mode 100644 index 0000000000..dd31bfbd1e --- /dev/null +++ b/deploy/ansible/roles/support_tools/templates/display-eth.service.j2 @@ -0,0 +1,10 @@ +[Unit] +Description=Display ethernet address on external device +Wants=display-eth-addr.timer + +[Service] +Type=oneshot +ExecStart={{ eth_update_program }} + +[Install] +WantedBy=multi-user.target diff --git a/deploy/ansible/roles/support_tools/templates/display-eth.timer.j2 b/deploy/ansible/roles/support_tools/templates/display-eth.timer.j2 new file mode 100644 index 0000000000..cf1e79103e --- /dev/null +++ b/deploy/ansible/roles/support_tools/templates/display-eth.timer.j2 @@ -0,0 +1,9 @@ +[Unit] +Description=Timer to trigger display-eth.service + +[Timer] +OnUnitActiveSec={{ eth_update_period }} +OnBootSec={{ eth_update_period }} + +[Install] +WantedBy=timers.target diff --git a/deploy/ansible/roles/wifi_ap/defaults/main.yml b/deploy/ansible/roles/wifi_ap/defaults/main.yml index 9b943cb034..e9a410ea31 100644 --- a/deploy/ansible/roles/wifi_ap/defaults/main.yml +++ b/deploy/ansible/roles/wifi_ap/defaults/main.yml @@ -2,8 +2,11 @@ ap_ssid: "{{ ansible_hostname }}_ap" ap_passphrase: "Set a new passphrase in your config file." ap_gateway: "10.10.10.1" -ap_domain: example.com +ap_domain: thecombine.app ap_hostname: "{{ ansible_hostname }}" +wifi_if_name: "{{ ansible_interfaces | join(' ') | regex_replace('^.*\\b(wl[a-z0-9]+).*$', '\\1') }}" ap_hosts_config: /etc/create_ap/create_ap.hosts create_ap_config: /etc/create_ap/create_ap.conf + +test_wifi: true diff --git a/deploy/ansible/roles/wifi_ap/tasks/install.yml b/deploy/ansible/roles/wifi_ap/tasks/install.yml index 2ad92970be..1db948b4c7 100644 --- a/deploy/ansible/roles/wifi_ap/tasks/install.yml +++ b/deploy/ansible/roles/wifi_ap/tasks/install.yml @@ -51,13 +51,22 @@ mode: 0644 notify: Restart create_ap -- name: Set /etc/hosts to redirect thecombine to WiFi address - template: - src: etc_hosts.j2 - dest: /etc/hosts +- name: Update localhost name + lineinfile: + path: /etc/hosts + regexp: ^127\.0\.0\.1 + state: present + line: 127.0.0.1 localhost {{ ansible_hostname }} owner: root group: root - mode: 0644 + mode: "0644" + +- name: Redirect traffic for The Combine to the AP gateway + lineinfile: + path: /etc/hosts + regexp: ^{{ ap_gateway.replace(".", "\.") }} + state: present + line: "{{ ap_gateway }} {{ ap_hostname }}.{{ ap_domain }} {{ ap_hostname }}" - name: Install hosts lists for access point template: diff --git a/deploy/ansible/roles/wifi_ap/tasks/main.yml b/deploy/ansible/roles/wifi_ap/tasks/main.yml index 4d2feb07d7..74d8c089a9 100644 --- a/deploy/ansible/roles/wifi_ap/tasks/main.yml +++ b/deploy/ansible/roles/wifi_ap/tasks/main.yml @@ -1,10 +1,11 @@ --- - name: Install WiFi Access point - include: install.yml + include_tasks: + file: install.yml tags: - install - meta: flush_handlers -- name: Set WiFi Access point - include: test.yml - tags: - - test +- name: Test WiFi Access point + include_tasks: + file: test.yml + when: test_wifi diff --git a/deploy/ansible/roles/wifi_ap/templates/create_ap.conf.j2 b/deploy/ansible/roles/wifi_ap/templates/create_ap.conf.j2 index 53a22e5156..98af9aa14c 100644 --- a/deploy/ansible/roles/wifi_ap/templates/create_ap.conf.j2 +++ b/deploy/ansible/roles/wifi_ap/templates/create_ap.conf.j2 @@ -21,7 +21,7 @@ FREQ_BAND=2.4 NEW_MACADDR= DAEMONIZE=0 NO_HAVEGED=0 -WIFI_IFACE={{ ansible_interfaces | join(" ") | regex_replace('^.*\\b(wl[a-z0-9]+).*$', '\\1') }} +WIFI_IFACE={{wifi_if_name}} INTERNET_IFACE={{ ansible_interfaces | join(" ") | regex_replace('^.*\\b(e[nt][a-z0-9]+).*$', '\\1') }} SSID={{ ap_ssid }} PASSPHRASE={{ ap_passphrase }} diff --git a/deploy/ansible/roles/wifi_ap/templates/etc_hosts.j2 b/deploy/ansible/roles/wifi_ap/templates/etc_hosts.j2 index 5b2c511514..cebb4294e2 100644 --- a/deploy/ansible/roles/wifi_ap/templates/etc_hosts.j2 +++ b/deploy/ansible/roles/wifi_ap/templates/etc_hosts.j2 @@ -1,9 +1,9 @@ -127.0.0.1 localhost -127.0.1.1 {{ ansible_hostname }} +127.0.0.1 localhost +127.0.1.1 {{ ansible_hostname }} # The following lines are desirable for IPv6 capable hosts -::1 localhost ip6-localhost ip6-loopback -ff02::1 ip6-allnodes -ff02::2 ip6-allrouters +::1 localhost ip6-localhost ip6-loopback +ff02::1 ip6-allnodes +ff02::2 ip6-allrouters -{{ ap_gateway }} {{ ap_hostname }}.{{ ap_domain }} {{ ap_hostname }} +{{ ap_gateway }} {{ ap_hostname }}.{{ ap_domain }} {{ ap_hostname }} rancher.{{ ap_domain }} diff --git a/deploy/ansible/templates/sudoer.j2 b/deploy/ansible/templates/sudoer.j2 deleted file mode 100644 index e2b42d0826..0000000000 --- a/deploy/ansible/templates/sudoer.j2 +++ /dev/null @@ -1 +0,0 @@ -{{ developer }} ALL=(ALL) NOPASSWD: ALL diff --git a/deploy/ansible/vars/packages.yml b/deploy/ansible/vars/packages.yml deleted file mode 100644 index 926764a778..0000000000 --- a/deploy/ansible/vars/packages.yml +++ /dev/null @@ -1,8 +0,0 @@ ---- -# Packages required for targets -required_packages: - - python3 - - python3-pip - - apt-transport-https - - aptitude - - software-properties-common diff --git a/deploy/helm/thecombine/charts/backend/templates/deployment-backend.yaml b/deploy/helm/thecombine/charts/backend/templates/deployment-backend.yaml index aec75369c4..2d4043a1e9 100644 --- a/deploy/helm/thecombine/charts/backend/templates/deployment-backend.yaml +++ b/deploy/helm/thecombine/charts/backend/templates/deployment-backend.yaml @@ -24,14 +24,6 @@ spec: labels: combine-component: backend spec: - initContainers: - - name: chwonit - image: busybox:stable - command: ["sh","-c","chown -R 999:999 /myvol"] - volumeMounts: - - name: backend-data - mountPath: /myvol/combine-files - imagePullPolicy: IfNotPresent containers: - name: backend image: {{ include "backend.containerImage" . }} diff --git a/deploy/scripts/aws_env.py b/deploy/scripts/aws_env.py index 054d9e0ad9..ed7d983324 100755 --- a/deploy/scripts/aws_env.py +++ b/deploy/scripts/aws_env.py @@ -15,7 +15,6 @@ def aws_version() -> Optional[int]: try: result = run_cmd(["aws", "--version"], check_results=False, chomp=True) except FileNotFoundError: - print("AWS CLI version 2 is not installed.") return None else: if result.returncode == 0: diff --git a/deploy/scripts/install-combine.sh b/deploy/scripts/install-combine.sh new file mode 100755 index 0000000000..5c9d3e7fa7 --- /dev/null +++ b/deploy/scripts/install-combine.sh @@ -0,0 +1,266 @@ +#! /usr/bin/env bash + +# Set the environment variables that are required by The Combine. +# In addition, the values are stored in a file so that they do not +# need to be re-entered on subsequent installations. +set-combine-env () { + if [ ! -f "${CONFIG_DIR}/env" ] ; then + # Generate JWT Secret Key + COMBINE_JWT_SECRET_KEY=`LC_ALL=C tr -dc 'A-Za-z0-9*\-_@!' ${CONFIG_DIR}/env + export COMBINE_JWT_SECRET_KEY="${COMBINE_JWT_SECRET_KEY}" + export AWS_DEFAULT_REGION="us-east-1" + export AWS_ACCESS_KEY_ID="${AWS_ACCESS_KEY_ID}" + export AWS_SECRET_ACCESS_KEY="${AWS_SECRET_ACCESS_KEY}" + export COMBINE_SMTP_USERNAME="nobody" +.EOF + chmod 600 ${CONFIG_DIR}/env + fi + source ${CONFIG_DIR}/env +} + +# Create the virtual environment needed by the Python installation +# scripts +create-python-venv () { + cd $INSTALL_DIR + # Install required packages + sudo apt install -y python3-pip python3-venv + + ##### + # Setup Python to run ansible + python3 -m venv venv + source venv/bin/activate + python -m pip install --upgrade pip pip-tools + python -m piptools sync requirements.txt +} + +# Install Kubernetes engine and other supporting +# software +install-kubernetes () { + # Let the user know what to expect + cat << .EOM + + The next step sets up the software environment and WiFi Access Point + for The Combine. You will be prompted for your password with the prompt: + + BECOME password: + +.EOM + ##### + # Setup Kubernetes environment and WiFi Access Point + cd ${INSTALL_DIR}/ansible + + ansible-playbook playbook_desktop_setup.yaml -K -e k8s_user=`whoami` +} + +# Set the KUBECONFIG environment variable so that the cluster can +# be reached by the installation scripts. It also starts the k3s +# service if it is not already running. +set-k3s-env () { + ##### + # Setup kubectl configuration file + K3S_CONFIG_FILE=${HOME}/.kube/config + if [ ! -e ${K3S_CONFIG_FILE} ] ; then + echo "Kubernetes (k3s) configuration file is missing." >&2 + exit 1 + fi + export KUBECONFIG=${K3S_CONFIG_FILE} + ##### + # Start k3s if it is not running + if ! systemctl is-active --quiet k3s ; then + sudo systemctl start k3s + fi +} + +# Install the public charts used by The Combine, specifically, cert-manager +# and nginx-ingress-controller +install-required-charts () { + set-k3s-env + ##### + # Install base helm charts + helm repo add stable https://charts.helm.sh/stable + helm repo add bitnami https://charts.bitnami.com/bitnami + + ##### + # Setup required cluster services + cd ${INSTALL_DIR} + . venv/bin/activate + cd ${INSTALL_DIR}/scripts + ./setup_cluster.py + deactivate +} + +# Install The Combine +install-the-combine () { + ##### + # Setup The Combine + cd ${INSTALL_DIR} + . venv/bin/activate + cd ${INSTALL_DIR}/scripts + set-combine-env + set-k3s-env + ./setup_combine.py --tag ${COMBINE_VERSION} --repo public.ecr.aws/thecombine --target desktop + deactivate +} + +# Wait until all the combine deployments are "Running" +wait-for-combine () { + # Wait for all combine deployments to be up + while true ; do + combine_status=`kubectl -n thecombine get deployments` + # assert the The Combine is up; if any components are not up, + # set it to false + combine_up=true + for deployment in frontend backend database maintenance ; do + deployment_status=$(echo ${combine_status} | grep "${deployment}" | sed "s/^.*\([0-9]\)\/1.*/\1/") + if [ "$deployment_status" == "0" ] ; then + combine_up=false + break + fi + done + if [ ${combine_up} != true ] ; then + sleep 5 + else + break + fi + done +} + +# Set the next value for STATE and record it in +# the STATE_FILE +next-state () { + STATE=$1 + if [[ "${STATE}" == "Done" && -f "${STATE_FILE}" ]] ; then + rm ${STATE_FILE} + else + echo -n ${STATE} > ${STATE_FILE} + fi +} + +##### +# Setup initial variables +INSTALL_DIR=`pwd` +# Create directory for configuration files +CONFIG_DIR=${HOME}/.config/combine +mkdir -p ${CONFIG_DIR} + +# See if we need to continue from a previous install +STATE_FILE=${CONFIG_DIR}/install-state +if [ -f ${STATE_FILE} ] ; then + STATE=`cat ${STATE_FILE}` +else + STATE=Pre-reqs +fi + +# Parse arguments to customize installation +while (( "$#" )) ; do + OPT=$1 + case $OPT in + clean) + next-state "Pre-reqs" + if [ -f ${CONFIG_DIR}/env ] ; then + rm ${CONFIG_DIR}/env + fi + ;; + restart) + next-state "Pre-reqs" + ;; + uninstall) + next-state "Uninstall-combine" + ;; + update|u) + next-state "Install-combine" + ;; + v*) + if [[ $OPT =~ ^v[0-9]+\.[0-9]+\.[0-9]+(-[a-zA-Z0-9-]+\.[0-9]+)?$ ]] ; then + COMBINE_VERSION="$OPT" + else + echo "Invalid version number, $OPT" + exit 1 + fi + ;; + *) + echo "Unrecognized option: $OPT" >&2 + ;; + esac + shift +done + +# Check that we have a COMBINE_VERSION +if [ -z "${COMBINE_VERSION}" ] ; then + echo "Combine version is not specified." + exit 1 +fi + +# Step through the installation stages +while [ "$STATE" != "Done" ] ; do + case $STATE in + Pre-reqs) + create-python-venv + install-kubernetes + next-state "Restart" + ;; + Restart) + next-state "Base-charts" + if [ -f /var/run/reboot-required ] ; then + echo -e "***** Restart required *****\n" + echo -e "Rerun combine-installer.run after the system has been restarted.\n" + read -p "Restart now? (Y/n) " RESTART + if [[ -z $RESTART || $RESTART =~ ^[yY].* ]] ; then + sudo reboot + else + # We don't call next-state because we don't want the $STATE_FILE + # removed - we want the install script to resume with the recorded + # state. + STATE=Done + fi + fi + ;; + Base-charts) + install-required-charts + next-state "Install-combine" + ;; + Install-combine) + install-the-combine + next-state "Wait-for-combine" + ;; + Wait-for-combine) + # Wait until all the combine deployments are up + echo "Waiting for The Combine components to download." + echo "This may take some time depending on your Internet connection." + echo "Press Ctrl-C to interrupt." + wait-for-combine + next-state "Shutdown-combine" + ;; + Shutdown-combine) + # Shut down the combine services + combinectl stop + # Disable combine services from starting at boot time + sudo systemctl disable create_ap + sudo systemctl disable k3s + # Print the current status + combinectl status + next-state "Done" + ;; + Uninstall-combine) + ${INSTALL_DIR}/scripts/uninstall-combine + next-state "Done" + ;; + *) + echo "Unrecognized STATE: ${STATE}" + rm ${STATE_FILE} + exit 1 + ;; + esac +done diff --git a/deploy/scripts/setup_combine.py b/deploy/scripts/setup_combine.py index 26b9d382bb..bf3d532377 100755 --- a/deploy/scripts/setup_combine.py +++ b/deploy/scripts/setup_combine.py @@ -19,6 +19,7 @@ The script also adds value definitions from a profile specific configuration file if it exists. """ import argparse +import logging import os from pathlib import Path import sys @@ -143,8 +144,8 @@ def create_secrets( else: missing_env_vars.append(item["env_var"]) if len(missing_env_vars) > 0: - print("The following environment variables are not defined:") - print(", ".join(missing_env_vars)) + logging.debug("The following environment variables are not defined:") + logging.debug(", ".join(missing_env_vars)) if not env_vars_req: return secrets_written sys.exit(ExitStatus.FAILURE.value) @@ -170,7 +171,7 @@ def get_target(config: Dict[str, Any]) -> str: try: return input("Enter the target name (Ctrl-C to cancel):") except KeyboardInterrupt: - print("Exiting.") + logging.info("Exiting.") sys.exit(ExitStatus.FAILURE.value) @@ -223,6 +224,16 @@ def add_profile_values( def main() -> None: args = parse_args() + # Setup the logging level. The command output will be printed on stdout/stderr + # independent of the logging facility + if args.debug: + log_level = logging.DEBUG + elif args.quiet: + log_level = logging.WARNING + else: + log_level = logging.INFO + logging.basicConfig(format="%(levelname)s:%(message)s", level=log_level) + # Lookup the cluster configuration with open(args.config) as file: config: Dict[str, Any] = yaml.safe_load(file) diff --git a/deploy/scripts/setup_files/cluster_config.yaml b/deploy/scripts/setup_files/cluster_config.yaml index 59f060af00..db6cd382f6 100644 --- a/deploy/scripts/setup_files/cluster_config.yaml +++ b/deploy/scripts/setup_files/cluster_config.yaml @@ -30,7 +30,7 @@ nginx-ingress-controller: name: ingress-controller reference: ingress-nginx/ingress-nginx namespace: ingress-nginx - wait: true + wait: false rancher-ui: repo: @@ -40,10 +40,10 @@ rancher-ui: name: rancher reference: rancher-stable/rancher namespace: cattle-system - version: 2.6.5 + version: 2.7.6 wait: true override: - hostname: rancher.local + hostname: rancher.thecombine.app bootstrapPassword: admin ingress: extraAnnotations: diff --git a/deploy/scripts/setup_files/combine_config.yaml b/deploy/scripts/setup_files/combine_config.yaml index 4e8384fd55..3ce315b835 100644 --- a/deploy/scripts/setup_files/combine_config.yaml +++ b/deploy/scripts/setup_files/combine_config.yaml @@ -18,6 +18,14 @@ targets: thecombine: global: serverName: thecombine.localhost + desktop: + profile: desktop + env_vars_required: false + override: + # override values for 'thecombine' chart + thecombine: + global: + serverName: local.thecombine.app nuc1: profile: nuc env_vars_required: false @@ -99,6 +107,9 @@ profiles: dev: # Profile for local development machines charts: - thecombine + desktop: # Profile for installing The Combine on Ubuntu Desktop (or derivative). + charts: + - thecombine nuc: # Profile for a NUC or a machine whose TLS certificate will be created by another # system and is downloaded from AWS S3 # Container images must be stored in AWS ECR Public repositories diff --git a/deploy/scripts/setup_files/profiles/desktop.yaml b/deploy/scripts/setup_files/profiles/desktop.yaml new file mode 100644 index 0000000000..7a30eaab80 --- /dev/null +++ b/deploy/scripts/setup_files/profiles/desktop.yaml @@ -0,0 +1,35 @@ +--- +################################################ +# Profile specific configuration items +# +# Profile: nuc +################################################ + +charts: + thecombine: + # Disable AWS Login - only run released images from + # public.ecr.aws/thecombine + aws-login: + enabled: false + global: + awsS3Location: local.thecombine.app + combineSmtpUsername: "" + imagePullPolicy: IfNotPresent + pullSecretName: None + frontend: + configOffline: true + configEmailEnabled: false + maintenance: + localLangList: + - "ar" + - "en" + - "es" + - "fr" + - "pt" + - "zh" + + cert-proxy-client: + enabled: true + schedule: "*/5 * * * *" + certManager: + enabled: false diff --git a/deploy/scripts/setup_target.py b/deploy/scripts/setup_target.py index ebf4ebecf8..3bce8d35c7 100755 --- a/deploy/scripts/setup_target.py +++ b/deploy/scripts/setup_target.py @@ -29,7 +29,7 @@ def parse_args() -> argparse.Namespace: def update_hosts_file(tgt_ip: str, tgt_name: str, hosts_filename: Path) -> None: """Map tgt_name to tgt_ip in the specified hosts_filename.""" - match = re.search(r"^(\d{1,3})\.(\d{1,3})\.(\d{1,3})\..(\d{1,3})$", tgt_ip) + match = re.search(r"^(\d{1,3})\.(\d{1,3})\.(\d{1,3})\.(\d{1,3})$", tgt_ip) if match is not None: ip_pattern = tgt_ip.replace(".", r"\.") else: diff --git a/deploy/scripts/uninstall-combine b/deploy/scripts/uninstall-combine new file mode 100755 index 0000000000..aa27e2f99d --- /dev/null +++ b/deploy/scripts/uninstall-combine @@ -0,0 +1,53 @@ +#! /usr/bin/env bash + +delete-files () { + for file in "$@" ; do + if [[ -f "$file" ]] ; then + echo "Removing $file" + sudo rm -rf $file >/dev/null 2>&1 + fi + done +} + +kill-service () { + # Stops and disables the specified service + if systemctl is-active $1 ; then + echo "Stopping service $1" + sudo systemctl stop $1 2>&1 >/dev/null + fi + if systemctl is-enabled $1 ; then + echo "Disabline service $1" + sudo systemctl disable $1 + fi +} + +# Stop & disable combine services +echo "Stopping combine services" +kill-service k3s +kill-service create_ap + +# Delete $HOME/thecombine; $HOME/.config/combine +delete-files ${HOME}/thecombine ${HOME}/.config/combine + +# Remove support tool + +kill-service display-eth.service +kill-service display-eth.timer +delete-files /lib/systemd/system/display-eth.service /lib/systemd/system/display-eth.timer + +# Remove combine management tool +delete-files /usr/local/bin/combinectl + + +# Uninstall k3s +if [[ -x /usr/local/bin/k3s-uninstall.sh ]] ; then + /usr/local/bin/k3s-uninstall.sh +fi + +# Remove network configurations +if nmcli c show dummy-vip >/dev/null 2>&1 ; then + sudo nmcli c delete dummy-vip +fi + +# delete create_ap files +delete-files /etc/create_ap /usr/lib/systemd/system/create_ap.service diff --git a/installer/README.md b/installer/README.md new file mode 100644 index 0000000000..06ab5c9cf9 --- /dev/null +++ b/installer/README.md @@ -0,0 +1,160 @@ +# How to Install _The Combine_ + +This README describes how to install _The Combine_ Rapid Word Collection tool on a laptop or desktop PC. + +## Contents + +1. [System Requirements](#system-requirements) +2. [Install _The Combine_](#install-the-combine) +3. [Running _The Combine_](#running-the-combine) +4. [Advanced Installation Options](#advanced-installation-options) + +## System Requirements + +_The Combine_ can be installed on a PC that meets the following requirements: + +- Debian-based Linux Operating system +- 6 GB of memory; +- WiFi interface that supports creating a WiFi Hotspot; +- a wired-ethernet connection to the Internet +- User account that can run as `root` with `sudo`. + +The installation script has been tested on _Ubuntu 22.04_ and _Wasta Linux 22.04_. + +## Install _The Combine_ + +1. Plug in the wired ethernet connection to the Internet. +2. Make sure WiFi is "on"; it does not need to be connected to a network. +3. Update all of the existing software packages through your OS's _Software Updater_ application or by running: + + ```bash + sudo apt update && sudo apt upgrade -y + ``` + + This step is optional but will make the installation process go more smoothly. Restart the PC if requested. + +4. Download the installation script from + [https://s3.amazonaws.com/software.thecombine.app/combine-installer.run](https://s3.amazonaws.com/software.thecombine.app/combine-installer.run) +5. Open a terminal window (Ctrl-Alt-T) and make the script executable: + + ```console + cd [path where installer was downloaded] + chmod +x combine-installer.run + ``` + +6. Run the script: + + ```console + cd [path where installer was downloaded] + ./combine-installer.run + ``` + + Notes: + + - The installation script requires elevated privileges to run most of its tasks. You may be prompted for your + password in two ways: + + `[sudo] password for {your username}:` + + or + + `BECOME password:` + + - The first time you run the installation script, it will prompt you for an `AWS_ACCESS_KEY_ID` and an + `AWS_SECRET_ACCESS_KEY`. To get the values to enter here, send a request to the team at + [The Combine](https://software.sil.org/thecombine/#contact) + - When run with no options, ./combine-installer.run will install the current version of _The Combine_. + - If the previous installation did not run to completion, it will resume where the previous installation left off. + +_The Combine_ will not be running when installation is complete. + +## Running _The Combine_ + +### Start _The Combine_ + +To start _The Combine_, open a terminal window and run: + +```console +combinectl start +``` + +### Connecting to _The Combine_ + +Once _The Combine_ has been started it will create a WiFi hotspot for users to access _The Combine_ using any WiFi +enabled device with a browser. It can also be accessed using the browser directly on the machine where _The Combine_ is +running. + +#### Connecting to the WiFi Hotspot + +The wireless network name will be `thecombine_ap`. You can connect your device to this network using the passphrase +`Combine2020`. + +If you would like to change the WiFi passphrase, see the options described in [combinectl Tool](#combinectl-tool). + +#### Connecting to the App + +Open a web browser and navigate to [local.thecombine.app](https://local.thecombine.app). + +If your browser tries to do a web search, add the `https://` to the beginning, that is, +[https://local.thecombine.app](https://local.thecombine.app) + +### Shutting Down _The Combine_ + +To shutdown _The Combine_, open a terminal window and run: + +```console +combinectl stop +``` + +### combinectl Tool + +Once installation is complete, you can use the `combinectl` command to manage the installation. The `combinectl` command +is entered in a terminal window as `combinectl COMMAND [parameters]` The possible commands are: + +| Command | Parameters | Description | +| ------- | ----------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| help | N/A | Print a usage message. | +| start | N/A | Start the combine services. | +| stop | N/A | Stop the combine services. | +| status | N/A | List the status for the combine services. | +| cert | N/A | Print the expiration date for the web certificate. | +| update | release-number | Update the version of The Combine to the "release-number" specified. You can see the number of the latest release at [The Combine on GitHub](https://github.com/sillsdev/TheCombine/releases). Note that not all releases can be updated this way. If The Combine does not run properly, download and run the updated install package. | +| wifi | [wifi-passphrase] | If no wifi-passphrase is provieded, the current wifi passphrase is printed. If a new passphase is provided, the wifi passphrase is updated to the new phrase. If your passphrase has spaces or special characters, it is best to enclose your pass phrase in quotation marks (""). | + +If the command is omitted or unrecognized, the help message is printed. + +### Maintaining _The Combine's_ Web Interface + +_The Combine_ requires a web site certificate to be able to provide a secure connection between _The Combine_ and the +web browsers used to enter and cleanup the data. Having a secure connection prevents the browsers from asking the users +to override their security settings. + +_The Combine_ refreshes its certificate when it is connected to the Internet via a wired Ethernet connection. A +certificate will be valid for a time between 60 and 90 days. You can use `combinectl` to view when your current +certificate will expire, for example: + +```console + +``` + +## Advanced Installation Options + +To run `combine-installer.run` with options, the option list must be started with `--`. + +`combine-installer.run` supports the following options: + +| option | description | +| -------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | +| clean | Remove the previously saved environment (AWS Access Key, admin user info) before performing the installation. | +| restart | Run the installation from the beginning; do not resume a previous installation. | +| uninstall | Remove software installed by this script. | +| update | Update _The Combine_ to the version number provided. This skips installing the support software that was installed previously. | +| version-number | Specify a version to install instead of the current version. A version number will have the form `vn.n.n` where `n` represents an integer value, for example, `v1.20.0`. | + +### Examples + +| Command | Effect | +| ------------------------------------------ | ------------------------------------------------------------ | +| `./combine-installer.run -- v1.16.0` | Install version `v1.16.0` of _The Combine_. | +| `./combine-installer.run -- update v2.1.0` | Update an existing Combine installation to version `v2.1.0` | +| `./combine-installer.run -- restart` | Restart the current installation process from the beginning. | diff --git a/installer/make-combine-installer.sh b/installer/make-combine-installer.sh new file mode 100755 index 0000000000..81ad3fdfdd --- /dev/null +++ b/installer/make-combine-installer.sh @@ -0,0 +1,10 @@ +#! /usr/bin/env bash + +if [[ $# -gt 0 ]] ; then + COMBINE_VERSION=$1 +fi +if [ -z "${COMBINE_VERSION}" ] ; then + echo "COMBINE_VERSION is not set." + exit 1 +fi +makeself --tar-quietly ../deploy ./combine-installer.run "Combine Installer" scripts/install-combine.sh ${COMBINE_VERSION}