diff --git a/.gitignore b/.gitignore index ebc8481..212d893 100644 --- a/.gitignore +++ b/.gitignore @@ -1,3 +1,3 @@ +.vscode +.cache/* www/* -.cache - diff --git a/content/antora.yml b/content/antora.yml index b83b3e7..108be9f 100644 --- a/content/antora.yml +++ b/content/antora.yml @@ -1,27 +1,21 @@ --- name: modules -title: Sample Module Title +title: Experience OpenShift Virtualization version: master nav: - - modules/ROOT/nav.adoc +- modules/ROOT/nav.adoc asciidoc: attributes: - lab_name: Red Hat OpenShift Virtualization Roadshow + lab_name: Experience OpenShift Virtualization release-version: master page-pagination: true - my_var: "foo" - guid: my-guid - ssh_user: devops - ssh_password: devops - ssh_command: ssh devops@bastion.{guid}.example.opentlc.com page-links: - - url: https://redhat.com - text: Red Hat - - url: https://www.redhat.com/en/summit - text: Summit + - url: https://redhat.com + text: Red Hat + - url: https://www.redhat.com/en/summit + text: Summit extensions: - - ./content/lib/tab-block.js - - ./content/lib/remote-include-processor.js - + - ./content/lib/tab-block.js + - ./content/lib/remote-include-processor.js diff --git a/content/modules/ROOT/nav.adoc b/content/modules/ROOT/nav.adoc index 4056d81..cd74b80 100644 --- a/content/modules/ROOT/nav.adoc +++ b/content/modules/ROOT/nav.adoc @@ -1,45 +1,44 @@ * xref:index.adoc[Introduction] -* xref:module-01-intro.adoc[1. Virtual Machine Management ] +* xref:module-01-intro.adoc[Virtual Machine Management ] ** xref:module-01-intro.adoc#create_project[Create a New Project] ** xref:module-01-intro.adoc#create_vm[Create a Linux Virtual Machine] ** xref:module-01-intro.adoc#admin_vms[Administering Virtual Machines] ** xref:module-01-intro.adoc#vm_state[Controlling Virtual Machine State] ** xref:module-01-intro.adoc#live_migrate[Live Migrate a Virtual Machine] -* xref:module-02-mtv.adoc[2. Migrating Existing Virtual Machines] +* xref:module-02-mtv.adoc[Migrating Existing Virtual Machines] ** xref:module-02-mtv.adoc#prerequisites[Prerequisites for Migrations] ** xref:module-02-mtv.adoc#migrating_vms[Migrating Virtual Machines from VMware] -* xref:module-03-baremetal.adoc[3. Bare Metal Infrastructure Management ] -** xref:module-03-baremetal.adoc#review_nodes[Review Nodes and Machines] -** xref:module-03-baremetal.adoc#review_hosts[Review Bare Metal Hosts] -** xref:module-03-baremetal.adoc#scaling_cluster[Scaling the Cluster with a New Bare Metal Host] - -* xref:module-04-storage.adoc[4. Storage Management] +* xref:module-04-storage.adoc[Storage Management] ** xref:module-04-storage.adoc#examine_pvc[Examine the PVC for a VM] ** xref:module-04-storage.adoc#managing_snapshots[Managing Snapshots] ** xref:module-04-storage.adoc#clone_vm[Clone a Virtual Machine] -* xref:module-05-bcdr.adoc[5. Backup and Recovery for Virtual Machines] +* xref:module-05-bcdr.adoc[Backup and Recovery for Virtual Machines] ** xref:module-05-bcdr.adoc#review_operator[Review the OADP Operator] ** xref:module-05-bcdr.adoc#create_backup[Create a Virtual Machine Backup] ** xref:module-05-bcdr.adoc#restore_backup[Restore From a Backup] -* xref:module-06-network.adoc[6. Network Management] -** xref:module-06-network.adoc#create_netattach[Create a Network Attachment Definition] -** xref:module-06-network.adoc#connect_external_net[Connect a Virtual Machine to an External Network] -** xref:module-06-network.adoc#multinetwork_policy[Using a Multinetwork Policy] - -* xref:module-07-tempinst.adoc[7. Template and InstanceType Management] +* xref:module-07-tempinst.adoc[Template and InstanceType Management] ** xref:module-07-tempinst.adoc#clone_customize_template[Clone and Customize a Template] ** xref:module-07-tempinst.adoc#create_win[Create a Windows VM Template] ** xref:module-07-tempinst.adoc#instance_types[Introduction to Instance Types] -* xref:module-08-workingvms.adoc[8. Working with Virtual Machines and Applications] +* xref:module-08-workingvms.adoc[Working with Virtual Machines and Applications] ** xref:module-08-workingvms.adoc#lb_concepts[Load Balancer Concepts] ** xref:module-08-workingvms.adoc#lb_config[Load Balancer Configuration] ** xref:module-08-workingvms.adoc#service_route[Exposing an Application with a Service/Route] ** xref:module-08-workingvms.adoc#expose_db[Expose the Database Externally] +// * xref:module-03-baremetal.adoc[Bare Metal Management ] +// ** xref:module-03-baremetal.adoc#review_nodes[Review Nodes and Machines] +// ** xref:module-03-baremetal.adoc#review_hosts[Review Bare Metal Hosts] +// ** xref:module-03-baremetal.adoc#scaling_cluster[Scaling the Cluster with a New Bare Metal Host] + +// * xref:module-06-network.adoc[6. Network Management] +// ** xref:module-06-network.adoc#create_netattach[Create a Network Attachment Definition] +// ** xref:module-06-network.adoc#connect_external_net[Connect a Virtual Machine to an External Network] +// ** xref:module-06-network.adoc#multinetwork_policy[Using a Multinetwork Policy] diff --git a/content/modules/ROOT/pages/index.adoc b/content/modules/ROOT/pages/index.adoc index 106c6db..1a45a1a 100644 --- a/content/modules/ROOT/pages/index.adoc +++ b/content/modules/ROOT/pages/index.adoc @@ -1,4 +1,4 @@ -= Welcome to the {lab_name} ! += Welcome to {lab_name}! [%hardbreaks] == Introduction @@ -14,28 +14,27 @@ In this event we will explore many common management activities that virtualizat *Virtual Infrastructure Administrators* -- Those responsible for the physical infrastructure hosting the OpenShift Virtualization solution. These users will be responsible for physical hardware, storage, and networking changes to the environment, that will affect the day to day operations of the running virtual machines. - == What Content Is Covered In The Roadshow? -These are the *eight* main sections that will be covered: +// These are the *eight* main sections that will be covered: +These are the *six* main sections that will be covered: * _Virtual Machine Management_: In this section we will provide a review of virtual machine management fundamentals, including creating a virtual machine, and modifying it's allotted resources. * _Migrating Existing Virtual Machines_: In this section, we will use the Migration Toolkit for Virtualization (MTV) to migrate a VM from an existing VMware vSphere environment to OpenShift Virtualization. -* _Bare Metal Infrastructure Management_: In this section, an administrator will learn how to scale their OpenShift environment by adding in an additional worker node to host virtualized workloads. +// * _Bare Metal Infrastructure Management_: In this section, an administrator will learn how to scale their OpenShift environment by adding in an additional worker node to host virtualized workloads. * _Storage Management_: The storage paradigm familiar to many administrators changes with OpenShift Virtualization. This section will explore many actions related to storage management for virtual machines. * _Backup and Recovery for Virtual Machines_: This unit introduces and demonstrates additional concepts around backing up VMs to external sites and restoring them in the event of a disaster. -* _Network Management_: By default VMs are connected to the pod network in OpenShift. In this section we will explore creating new L2 network mappings, and configuring microsegmentation policies for multiple networks. +// * _Network Management_: By default VMs are connected to the pod network in OpenShift. In this section we will explore creating new L2 network mappings, and configuring microsegmentation policies for multiple networks. -* _Template and InstanceType Management_: In order to streamline deployment of virtual machines, administrators will often create Templates or define InstanceTypes to ease deployment operations. This section will focus on those processes. +* _Template and InstanceType Management_: In order to streamline deployment of virtual machines, administrators will often create Templates or define InstanceTypes to ease deployment operations. This section will focus on those processes. * _Working with Virtual Machines and Applications_: In this section we will perform several day-2 activities with our imported virtual machines, including setting up a load-balancer, and exposing our VM hosted applications through services and routes. - == What is OpenShift Virtualization? * OpenShift Virtualization is a feature of Red Hat OpenShift; it is not an add-on or a separate product and is included with all entitlements. @@ -45,11 +44,10 @@ These are the *eight* main sections that will be covered: * OpenShift Virtualization leverages the RHEL KVM hypervisor and allows the VM to be managed by Kubernetes and KubeVirt. An OpenShift Virtualization VM uses Kubernetes scheduling, network, and storage infrastructure. * OpenShift Virtualization includes entitlements for **unlimited virtual RHEL guests**. Guest licensing for other supported operating systems will need to be purchased separately. * OpenShift Virtualization is SVVP certified with Microsoft for Windows guest support per the same rules that apply to Red Hat’s other KVM virtualization offerings. -* OpenShift Virtualization is currently only supported on bare metal physical servers, typically on-premises or through dedicated hosting. Support for other topologies (OpenShift deployed on virtualized infrastructure like RHV or vSphere) is not available at this time. +* OpenShift Virtualization is currently only supported on bare metal physical servers, typically on-premises or through dedicated hosting. Support for other topologies (OpenShift deployed on virtualized infrastructure like RHV or vSphere) is not available at this time. * Support for https://www.redhat.com/en/blog/managing-virtual-machines-and-containers-as-code-with-openshift-virtualization-on-red-hat-openshift-service-on-aws[AWS / ROSA^] has already been announced, and we are in the process of adding support for OpenShift Virtualization to additional Managed OpenShift Cloud services. * OpenShift Virtualization allows OpenShift to deploy, manage, and connect virtual machines to an OpenShift cluster. This includes the ability to connect to and manage those VMs using Kubernetes-native methods and take advantage of OpenShift features like Pipelines, GitOps, Service Mesh, and more. - == Why switch from a traditional VM platform? **Adopt cloud-native development and/or cloud-native operations:** @@ -77,28 +75,28 @@ If you would like to learn more about OpenShift Virtualization, please visit the == Requirements for the Lab Environment * Participant needs to have their own computer with browser and internet access. -* Chromium based browsers are recommended as some copy/paste functions don't work in Firefox for the time being. +* Chromium based browsers are recommended as some copy/paste functions don't work in Firefox for the time being. * Remote access console uses the US keyboard layout by default, so it's good to know where special characters reside for other country's layouts, or to use the copy/paste function in a supported browser. === Credentials for the OpenShift Console -Your OpenShift cluster console is available {openshift_web_console}[here^]. +Your OpenShift cluster console is available {openshift_cluster_console_url}[here^]. Administrator login is available with: -* *User:* {openshift_admin_user} -* *Password:* {openshift_admin_password} +* *User:* {openshift_cluster_admin_username} +* *Password:* {openshift_cluster_admin_password} -=== Bastion Access +//// +=== Command Line Access -A RHEL bastion host is available with common utilities pre-installed and OpenShift command line access pre-configured. +To the right you see a Terminal window. Out of the box you are logged in as a service account - that can't do much. However you can log into OpenShift as the cluster administrator using the following command (hit `y` when prompted if you want to connect to an insecure server): -For SSH access to the bastion execute the following: - -[source,sh,role=execute,subs="attributes"] +[source,sh,subs="attributes",role=execute] ---- -sudo ssh root@192.168.123.100 +oc login -u {openshift_cluster_admin_username} -p {openshift_cluster_admin_password} {openshift_api_server_url} ---- +//// === vCenter Access @@ -106,6 +104,5 @@ In the migration chapter of the lab, you will be asked to login and examine a ht For access, please use the following credentials: -* *vcenter_user:* {vcenter_user} +* *vcenter_user:* {vcenter_full_user} * *vcenter_password:* {vcenter_password} - diff --git a/content/modules/ROOT/pages/module-01-intro.adoc b/content/modules/ROOT/pages/module-01-intro.adoc index c5930b9..93a5f07 100644 --- a/content/modules/ROOT/pages/module-01-intro.adoc +++ b/content/modules/ROOT/pages/module-01-intro.adoc @@ -1,9 +1,8 @@ = Virtual Machine Management - == Introduction -The beginning section of this lab will introduce you to the basics of creating and managing VMs in OpenShift Virtualization. You will see how the web console guides you through the whole process of creating a virtual machine from a pre-defined template. We will then review the properties of that VM, do some basic customizations, and perform actions like live migration, that are often expected of virtual machine administrators. +The beginning section of this lab will introduce you to the basics of creating and managing VMs in OpenShift Virtualization. You will see how the web console guides you through the whole process of creating a virtual machine from a pre-defined template. We will then review the properties of that VM, do some basic customizations, and perform actions like live migration, that are often expected of virtual machine administrators. .*Goals* @@ -14,12 +13,12 @@ The beginning section of this lab will introduce you to the basics of creating a As a reminder, here are your credentials for the OpenShift Console: -Your OpenShift cluster console is available {openshift_web_console}[here^]. +Your OpenShift cluster console is available {openshift_cluster_console_url}[here^]. Administrator login is available with: -* *User:* {openshift_admin_user} -* *Password:* {openshift_admin_password} +* *User:* {openshift_cluster_admin_username} +* *Password:* {openshift_cluster_admin_password} [[create_project]] == Create a New Project @@ -52,13 +51,13 @@ image::module-01-intro/03_Create_Project.png[link=self, window=blank, width=100% [[create_vm]] == Create a Linux Virtual Machine -. From the Virtual Machines inventory, click on the *Create VirtualMachine* button and select *From template* from the drop-down menu. +. From the Virtual Machines inventory, click on the *Create VirtualMachine* button and select *From template* from the drop-down menu. + NOTE: VMs can also be created from an InstanceType wizard as well as created by entering a custom YAML definition, but for this current lab scenario we are going to stick with creating VMs based on existing templates. + image::module-01-intro/04_Create_VM_Button.png[link=self, window=blank, width=100%] -. The wizard will appear showing the available pre-defined VM templates. +. The wizard will appear showing the available pre-defined VM templates. + Reviewing the list of available templates you’ll notice that some have a blue badge which indicates "Source available". These are templates which are using automatically downloaded and stored template source disks. If you were deploying in your own environment, you have the option of preventing these from being created and/or removing those source disks, followed by creating and uploading custom disks for your organization. + @@ -72,7 +71,6 @@ image::module-01-intro/06_Create_VM_Quick.png[link=self, window=blank, width=100 + image::module-01-intro/07_Create_VM_Quick_Name.png[link=self, window=blank, width=100%] -+ . After a few seconds, expect to see the VM is *Running*. During this time, the storage provider has cloned the template disk so that it can be used by the newly created virtual machine. The amount of time this takes can vary based on the storage provider being used to create the boot disk. + image::module-01-intro/08_Fedora_Running.png[link=self, window=blank, width=100%] @@ -95,7 +93,7 @@ image::module-01-intro/10_Fedora_Details.png[link=self, window=blank, width=100% Administering and using virtual machines is more than simply creating and customizing their configuration. As the platform administrator, we also need to be able to control the VM states and trigger live migrations so that we can balance resources, perform maintenance tasks, and reconfigure nodes. -. Click the *Configuration* tab, this is the entry point to obtain information about the resources of the Virtual Machine. +. Click the *Configuration* tab, this is the entry point to obtain information about the resources of the Virtual Machine. + image::module-01-intro/11_Configuration_Tab_Nav.png[link=self, window=blank, width=100%] + @@ -109,13 +107,13 @@ image::module-01-intro/12_Configuration_Tab.png[link=self, window=blank, width=1 * *Scheduling*: This tab includes advanced configuration options indicating where the VM should run and the strategy to follow for eviction. This tab is used to configure (anti)affinity rules, configure node selectors and tolerations, and other behaviors that affect which cluster nodes the VM can be scheduled to. * *SSH*: This tab allows you to configure remote access to the machine by creating an SSH service on a configured load-balancer, or by injecting public SSH keys if the feature is enabled. * *Initial run*: This tab allows us to configure _cloud-init_ for Linux or _sys-prep_ for Microsoft Windows, including setting the commands to be executed on the first boot, such as the injection of SSH keys, installation of applications, network configuration, and more. -* *Metadata*: This tab shows current Labels and Annotations applied to the virtual machine. Modifying these values can help us tag our machines for specific purposes, or help us enable automated workflows by uniquely identifying machines. +* *Metadata*: This tab shows current Labels and Annotations applied to the virtual machine. Modifying these values can help us tag our machines for specific purposes, or help us enable automated workflows by uniquely identifying machines. . List the disks associated with the VM by clicking on the *Storage* tab: + image::module-01-intro/13_Storage_Tab.png[link=self, window=blank, width=100%] + -In this environment, the default StorageClass, which defines the source and type of storage used for the disk, is called *ocs-storagecluster-ceph-rbd-virtualization*. This storage is the default type provided by OpenShift Data Foundation (ODF) for running virtual machines. Each storage provider has different storage classes that define the characteristics of the storage backing the VM disk. +In this environment, the default StorageClass, which defines the source and type of storage used for the disk, is called *ocs-external-storagecluster-ceph-rbd*. This storage is the default type provided by OpenShift Data Foundation (ODF) for running virtual machines. Each storage provider has different storage classes that define the characteristics of the storage backing the VM disk. . Examine the network interfaces attached to the VM by clicking on the *Network interfaces* subtab: + @@ -145,7 +143,7 @@ image::module-01-intro/16_VM_Actions_Menu.png[link=self, window=blank, width=100 . Press the *Stop* button and wait until the Virtual Machine is in state *Stopped*. + image::module-01-intro/17_VM_Stopped.png[link=self, window=blank, width=100%] -. Clicking on *Actions*, the option *Start* appears, and the options *Restart* and *Pause* are greyed out. +. Clicking on *Actions*, the option *Start* appears, and the options *Restart* and *Pause* are greyed out. + image::module-01-intro/18_VM_Actions_List_Stopped.png[link=self, window=blank, width=100%] @@ -160,7 +158,7 @@ image::module-01-intro/19_VM_Actions_Paused.png[link=self, window=blank, width=1 [[live_migrate]] == Live Migrate a Virtual Machine -In this section, we will migrate the VM from one OpenShift node to another without shutting down the VM. Live migration requires *ReadWriteMany* (RWX) storage so that the VM disks can be mounted on both the source and destination nodes at the same time. OpenShift Virtualization, unlike other virtualization solutions, does not use monolithic datastores mounted to each cluster member that hold many VM disks for many different VMs. Instead, each VM disk is stored in its own volume that is only mounted when and where it's needed. +In this section, we will migrate the VM from one OpenShift node to another without shutting down the VM. Live migration requires *ReadWriteMany* (RWX) storage so that the VM disks can be mounted on both the source and destination nodes at the same time. OpenShift Virtualization, unlike other virtualization solutions, does not use monolithic datastores mounted to each cluster member that hold many VM disks for many different VMs. Instead, each VM disk is stored in its own volume that is only mounted when and where it's needed. . Navigate to the *Overview* tab to see where the worker node is running: + diff --git a/content/modules/ROOT/pages/module-02-mtv.adoc b/content/modules/ROOT/pages/module-02-mtv.adoc index a87e5fc..1e6662a 100644 --- a/content/modules/ROOT/pages/module-02-mtv.adoc +++ b/content/modules/ROOT/pages/module-02-mtv.adoc @@ -7,7 +7,7 @@ This portion of our lab uses the https://access.redhat.com/documentation/en-us/m * Cold migration turns off the source virtual machine before starting the migration. This is the default migration type. * Warm migration copies data while the source virtual machine continues to run. Once the bulk of data has been migrated, the VM is shutdown and the final data is copied to the destination. The new VM can then be started, resulting in a much shorter period of downtime for the VM-hosted application. -NOTE: The migration toolkit has already been deployed to your cluster using the Operator available in OperatorHub. +NOTE: The migration toolkit has already been deployed to your cluster using the Operator available in OperatorHub. Documentation for how to install and configure the Operator can be found https://access.redhat.com/documentation/en-us/migration_toolkit_for_virtualization/2.6/html/installing_and_using_the_migration_toolkit_for_virtualization/installing-the-operator_mtv[here^]. @@ -24,7 +24,6 @@ If you would like to learn more about how to configure the Migration Toolkit for * Create a Migration Plan * Migrate VMs into OpenShift Virtualization - [[prerequisites]] == Prerequisites for Migrations @@ -62,13 +61,13 @@ To help understand the process of mapping resources, such as datastores and port . Navigate to VMware vCenter: https://{vcenter_console}[vCenter Console^] . Login with the following credentials: -- *User:* {vcenter_user} +- *User:* {vcenter_full_user} - *Password:* {vcenter_password} . By default you'll land in the *Inventory* view at the top of the navigation tree. Click the *Workloads* icon and expand the navigation tree until you see the folder that matches your username, and the 4 VMs under it. Click the *VMs* tab at the top of the screen to view the VM details. + image::module-02-mtv/00_Workload_VM_List.png[link=self, window=blank, width=100%] -+ + . Change to the *Networks* view, then expand the tree to view the port group used by the virtual machines. Note that the name is *segment-migrating-to-ocpvirt*. + image::module-02-mtv/01_vSphere_Network.png[link=self, window=blank, width=100%] @@ -92,95 +91,21 @@ MTV 2.4 and later are project/namespace aware and do not require administrator p . By default, there is a provider called *host* which represents *OpenShift Virtualization* as a target platform. + image::module-02-mtv/04_MTV_Provider_List.png[link=self, window=blank, width=100%] -+ -. The lab is already configured with the VMWare provider named *vmware* and it is marked as a migration source. - -//// -However, you will need to register the source vCenter system to the Migration Toolkit for Virtualization as a new provider. - -. By default, there is a provider called *host* which represents *OpenShift Virtualization* as a target platform -+ -image::module-02-mtv/04_MTV_Provider_list.png[link=self, window=blank, width=100%] - -. Press *Create Provider* button in the top right. A dialog it will appear. -+ -image::module-02-mtv/05_MTV_Create_Provider.png[link=self, window=blank, width=100%] -+ -. Select *VMware* on the *Provider type* dropdown and fill the following data: -.. *Name*: *vmware* -.. *vCenter host name or IP address*: *portal.vc.opentlc.com* -.. *vCenter user name*: {vcenter_user} -.. *vCenter password*: {vcenter_password} -.. *VDDK init image*: *image-registry.openshift-image-registry.svc:5000/openshift/vddk:latest* -.. *SHA-1 fingerprint*: *70:2D:52:D2:D1:A5:A2:75:58:8F:3D:07:D5:7E:E9:73:81:BC:88:A2* -+ -image::module-02-mtv/06_MTV_Fill_Dialog.png[link=self, window=blank, width=100%] -. Press *Create* and wait till the *Status* column is changed to *Ready* -+ -image::module-02-mtv/07_MTV_Provider_Added.png[link=self, window=blank, width=100%] - -Now MTV knows about your VMware vSphere environment and can connect to it. -//// -//// - -2.6 MADE THIS YAML ONLY, SKIPPING IN LAB FOR NOW, CONTENT STILL HERE IF NEEDED LATER. - -=== Create storage and network mappings - -Storage and networking resources are managed differently in VMware vSphere and Red Hat OpenShift Virtualization. Therefore it is necessary to create mappings from the source datastores and networks in VMware vSphere to the equivalent resources in OpenShift so that the migration toolkit understands how to connect and place virtual machines after they are imported. - -These only need to be configured once and are then reused in subsequent VM Migration Plans. - -. Navigate in the left menu to *Migration* -> *NetworkMaps for virtualization* and click on the *Create NetworkMap* button. -+ -image::module-02-mtv/08_MTV_NetworkMaps.png[link=self, window=blank, width=100%] - -. Fill in the following information in the appeared dialog. Press *Create*. -.. *Name*: *mapping-segment* -.. *Source provider*: *vmware* -.. *Target provider*: *host* -.. Click *Add* -.. *Source networks*: *segment-migrating-to-ocpvirt* -.. *Target network*: *Pod network (default)* -+ -image::module-02-mtv/09_Add_VMWARE_Mapping_Network.png[link=self, window=blank, width=100%] - -. Ensure the created mapping has the *Status* *Ready*. -+ -image::module-02-mtv/10_List_VMWARE_Mapping_Network.png[link=self, window=blank, width=100%] - -. Navigate in the left menu to *Migration* -> *StorageMaps for virtualization* and click on the *Create StorageMap* button. -+ -image::module-02-mtv/11_MTV_StorageMaps.png[link=self, window=blank, width=100%] - -. Fill in the following information. Press *Create*. -.. *Name*: *mapping-datastore* -.. *Source provider*: *vmware* -.. *Target provider*: *host* -.. Click *Add* -.. *Source storage*: *WorkloadDatastore* -.. *Target storage classs*: *ocs-storagecluster-ceph-rbd-virtualization* -+ -image::module-02-mtv/12_Add_VMWARE_Mapping_Storage.png[link=self, window=blank, width=100%] - -. Ensure the created mapping has the *Status* *Ready*. -+ -image::module-02-mtv/13_List_VMWARE_Mapping_Storage.png[link=self, window=blank, width=100%] -//// +. The lab is already configured with the VMWare provider named *vmware* and it is marked as a migration source. === Create a Migration Plan Now that we have reviewed our environment, and have our providers created, it is time for us to create a Migration Plan. This plan selects which VMs to migrate from VMware vSphere to Red Hat OpenShift Virtualization and specifics about how to execute the migration. -. Navigate in the left menu to *Migration* -> *Plans for virtualization* and press *Create plan*. +. Navigate in the left menu to *Migration* -> *Plans for virtualization* and press *Create Plan*. + image::module-02-mtv/14_Create_VMWARE_Plan.png[link=self, window=blank, width=100%] -+ + . You will be asked to select the source provider that you intend to migrate from. Click on the *VMware* tile. + image::module-02-mtv/16_VMware_Source_Provider.png[link=self, window=blank, width=100%] -+ + . On the next page select the three VMs you would like to move: * database @@ -190,57 +115,57 @@ image::module-02-mtv/16_VMware_Source_Provider.png[link=self, window=blank, widt . Click *Next*. + image::module-02-mtv/17_VM_Select_VMWARE_Plan.png[link=self, window=blank, width=100%] -+ -. On the next screen you will be tasked with providing details for your migration plan. Several details will already be filled in for you, but you will have to make a few minor modifications to ensure that the VMs land in the correct namespace, and that the networks and storage options map correctly. + +. On the next screen you will be tasked with providing details for your migration plan. Several details will already be filled in for you, but you will have to make a few minor modifications to ensure that the VMs land in the correct namespace, and that the networks and storage options map correctly. + Please fill in your migration plan with the following values: * Plan name: *move-webapp-vmware* * Target namespace: *vmexamples* * Network map: *Pod Networking* -* Storage map: *ocs-storagecluster-ceph-rbd-virtualization* -+ -NOTE: Both the Network and Storage map will automatically detect the Network and Datastore that the discovered virtual machines currently make use of on the source provider. You will just need to make sure that their respective values are set correctly on the OpenShift side. +* Storage map: *ocs-external-storagecluster-ceph-rbd* + +NOTE: Both the Network and Storage map will automatically detect the Network and Datastore that the discovered virtual machines currently make use of on the source provider. You will just need to make sure that their respective values are set correctly on the OpenShift side. + . Click *Create migration plan*. + image::module-02-mtv/18_Create_Migration_Plan.png[link=self, window=blank, width=100%] -+ + . You will be taken to a new screen where you will see that the plan for migration is being made ready. + image::module-02-mtv/19_Migration_Plan_Unready.png[link=self, window=blank, width=100%] -+ + . After a few moments the plan will become *Ready*, click on the green "Play" button in the center of the window to start the migration process. + image::module-02-mtv/20_Migration_Plan_Ready.png[link=self, window=blank, width=100%] -+ + . You will be presented with a confirmation box to begin the migration, click on the *Start* button. + image::module-02-mtv/21_Confirm_Migrate_Start.png[link=self, window=blank, width=100%] -+ + . A progress bar will appear in the center of the screen along with the status of *0 of 3 VMs migrated*. + image::module-02-mtv/22_VMs_Migrating.png[link=self, window=blank, width=100%] -+ + . Click on the *0 of 3 VMs migrated* link and you will be presented with a page with more details about the migration process. + image::module-02-mtv/23_VMs_Migrating_Details.png[link=self, window=blank, width=100%] -+ + . You can click the drop-down arrow next to the name of each VM being migrated to get additional details about the stages of the migration process. + image::module-02-mtv/24_VM_Migration_Stages.png[link=self, window=blank, width=100%] + -IMPORTANT: Having many participants performing the same task in parallel in a simulated lab environment can cause this task to perform much slower than in a real environment. For this lab instance we have limited the number of in-flight VMs to 1 at a time. Please be patient with this process as it completes. You may continue with other sections in the roadshow as the migrations complete. -+ +*IMPORTANT:* Having many participants performing the same task in parallel in a simulated lab environment can cause this task to perform much slower than in a real environment. For this lab instance we have limited the number of in-flight VMs to 2 at a time. Please be patient with this process as it completes. You may continue with other sections in the roadshow as the migrations complete. + . After several minutes the migration has completed. + image::module-02-mtv/25_Completed_VMWARE_Plan.png[link=self, window=blank, width=100%] -+ -. The selected VMs have now been migrated and can be started on OpenShift Virtualization. + +. The selected VMs have now been migrated and can be started on OpenShift Virtualization. == Summary -In this section we explored the Migration Toolkit for Virtualization, and used it to assist with the migration of existing virtual machines from a VMware vSphere environment to OpenShift Virtualization. In addition to the Migration Toolkit for Virtualization, there are three other migration toolkits. The combination of these can be used to move many types of workloads into and within OpenShift clusters depending on your organization's needs. +In this section we explored the Migration Toolkit for Virtualization, and used it to assist with the migration of existing virtual machines from a VMware vSphere environment to OpenShift Virtualization. In addition to the Migration Toolkit for Virtualization, there are three other migration toolkits. The combination of these can be used to move many types of workloads into and within OpenShift clusters depending on your organization's needs. * https://developers.redhat.com/products/mtr/overview[Migration Toolkit for Runtimes^] - Assist and accelerate Java application modernization and migration. * https://access.redhat.com/documentation/en-us/migration_toolkit_for_applications/[Migration Toolkit for Applications^] - Accelerate large-scale application modernization efforts to containers and Kubernetes. diff --git a/content/modules/ROOT/pages/module-03-baremetal.adoc b/content/modules/ROOT/pages/module-03-baremetal.adoc index a9d6859..552877c 100644 --- a/content/modules/ROOT/pages/module-03-baremetal.adoc +++ b/content/modules/ROOT/pages/module-03-baremetal.adoc @@ -10,14 +10,13 @@ In this section, you will review the physical environment used during this works * Review the physical characteristics of the lab cluster. * Discover a new baremetal machine, and use it to scale the cluster. - === Supported Platforms Today, OpenShift virtualization is fully supported in the following environments that provide bare metal resources: * Self-managed _bare metal servers_ on-premises or at a hosted site that provides bare metal resources. The lab you're using today is an OpenShift cluster deployed to an Equinix colocation facility. -* Amazon Web Services (AWS) bare metal instances, as well as ROSA (Red Hat OpenShift Services on AWS) with bare metal instances, are fully supported. See link:https://www.redhat.com/en/blog/managing-virtual-machines-and-containers-as-code-with-openshift-virtualization-on-red-hat-openshift-service-on-aws[OpenShift Virtualization on ROSA^]. +* Amazon Web Services (AWS) bare metal instances, as well as ROSA (Red Hat OpenShift Services on AWS) with bare metal instances, are fully supported. See link:https://www.redhat.com/en/blog/managing-virtual-machines-and-containers-as-code-with-openshift-virtualization-on-red-hat-openshift-service-on-aws[OpenShift Virtualization on ROSA^]. * IBM Cloud Bare Metal Servers are currently _tech preview_. See link:https://access.redhat.com/articles/6738731[Deploy OpenShift Virtualization on IBM Cloud Bare Metal Nodes^] for details. @@ -46,7 +45,7 @@ Before you install OpenShift Virtualization for Red Hat OpenShift Container Plat Review the documentation https://docs.openshift.com/container-platform/4.15/virt/install/preparing-cluster-for-virt.html[here^] for specific requirements and guidance on hardware for use with OpenShift Virtualization. -NOTE: If your cluster uses worker nodes with different CPUs, e.g. Intel and AMD, live migration failures can occur because different CPUs have different capabilities. +NOTE: If your cluster uses worker nodes with different CPUs, e.g. Intel and AMD, live migration failures can occur because different CPUs have different capabilities. === Review Red Hat OpenShift Cluster @@ -72,7 +71,7 @@ A node is a virtual or bare metal machine in an OpenShift cluster. Worker nodes image::module-03-baremetal/02_Worker0_Information.png[link=self, window=blank, width=100%] + The *Overview* tab is showing useful information about the utilization of the resources, such as CPU and memory. It also shows all the applications (*Pods*) that are running inside on this node. - ++ NOTE: At least one physical node is required for OpenShift Virtualization, "nesting" and emulation are not supported. However, other nodes in the cluster can be virtual machines, for example control plane and infrastructure nodes. . Navigate to the tab *Details* to obtain more information about the operating system. @@ -110,33 +109,33 @@ To begin this process we are going to return to the bare metal hosts screen we r . Select *Compute* -> *Bare Metal Hosts*: + image::module-03-baremetal/04_BMHosts.png[link=self, window=blank, width=100%] -+ + . Click on the *Add Host* button in the upper right corner, and select the *New with Dialog* option. + image::module-03-baremetal/06_Add_Host_Red.png[link=self, window=blank, width=100%] -+ + . The dialog menu to add a bare metal host will ask you for the following information: + * Host Name: *worker4* * Boot MAC Address: *de:ad:be:ef:00:07* * BMC Address: *ipmi://192.168.123.1:6237* * BMC Username: *admin* -* BMC Password: *redhat* -+ +* BMC Password: *redhat* + . With this information filled out, click the *Create* button at the bottom of the dialog page. + image::module-03-baremetal/07_Create_Host_Red.png[link=self, window=blank, width=100%] -+ + . You will then be presented with the summary screen for *worker4*, and you will see the status update as it attempts to contact the machine and make it available as a host. + image::module-03-baremetal/08_Worker4_Summary_1.png[link=self, window=blank, width=100%] + NOTE: This step may take several minutes to update as it powers up the host, and collects hardware information. -+ + . When host discovery and hardware inspection is complete you will see that it shows it's status as *Available*. + image::module-03-baremetal/09_Worker4_Summary_2.png[link=self, window=blank, width=100%] -+ + . Because this lab is being hosted in a virtualized environment we need to make a small configuration change before continuing. Click on the *YAML* tab at the top, and add the following two lines to the end of the *spec:* section to modify the type of hard disk present on the machine. Click the *Save* button. + [source,yaml,role=execute] @@ -146,44 +145,43 @@ image::module-03-baremetal/09_Worker4_Summary_2.png[link=self, window=blank, wid ---- + image::module-03-baremetal/09a_Worker4_Yaml_Edit.png[link=self, window=blank, width=100%] -+ + . Once a host has been physically discovered the next step is to add it as a machine to be used by OpenShift. Click on the menu for *MachineSets* on the left under *Compute*. + image::module-03-baremetal/10_Machinesets.png[link=self, window=blank, width=100%] -+ + . Click on the *three-dot* menu on the top-right side, and select *Edit Machine count* from the dropdown menu. + image::module-03-baremetal/11_Edit_Machine_Count.png[link=self, window=blank, width=100%] -+ + . A new menu will appear showing the current machine count of *3*, click the plus (+) sign to increase the machine count to *4*. + image::module-03-baremetal/12_Edit_Machine_Count_4.png[link=self, window=blank, width=100%] -+ + . You will be returned to the MachineSets page, and you can now see that the count of machines is 3/4 machines. + image::module-03-baremetal/13_Machine_Count_3_4.png[link=self, window=blank, width=100%] -+ -. Next, click on the *Machines* button on the left under *Compute* to see a list of all the machines, and you should see worker4 in the *Provisioning* state. + +. Next, click on the *Machines* button on the left under *Compute* to see a list of all the machines, and you should see worker4 in the *Provisioning* state. + NOTE: This step can take a few minutes to complete, as the node reboots several times during the installation process, please feel free to continue with next roadshow module and come back and check on the status of this step at your leisure. + image::module-03-baremetal/14_Worker_4_Provisioning.png[link=self, window=blank, width=100%] -+ + . Once provisioning is complete you will see the node listed with it's Phase set to *Provisioned as node*. + image::module-03-baremetal/15_Provisioned_As_Node.png[link=self, window=blank, width=100%] -+ + . Since our new host has now been added to the machineset and provisioned as a node, we can now see it available if we click on the *Nodes* menu on the left. + image::module-03-baremetal/16_All_Nodes.png[link=self, window=blank, width=100%] -+ + . We can also click directly on *worker4* under the *Name* column to see it's current status. + image::module-03-baremetal/17_Worker_4_Details.png[link=self, window=blank, width=100%] -+ + . The details screen for worker4 is now populated with hardware information including CPU and Memory utilization, as well as the number of Pods assigned to our new worker node. == Summary In this lab, you became familiar with your Red Hat OpenShift cluster and the hardware that makes up the environment. You also used the web console to expand your cluster by discovering an additional bare metal node, and adding it to the cluster machineset used to scale the number of worker nodes that are available. - diff --git a/content/modules/ROOT/pages/module-04-storage.adoc b/content/modules/ROOT/pages/module-04-storage.adoc index 807dbbe..1b08bba 100644 --- a/content/modules/ROOT/pages/module-04-storage.adoc +++ b/content/modules/ROOT/pages/module-04-storage.adoc @@ -12,20 +12,20 @@ image::module-04-storage/00_disk_concepts.png[link=self, window=blank, width=100 == Examine the PVC for a VM -In this lab, we are going to take a closer look at the storage behind the virtual machine we just created *fedora01*. +In this lab, we are going to take a closer look at the storage behind the virtual machine we just created *fedora01*. . Start by clicking on the left menu for *Storage* -> *Persistent Volume Claims*. Make sure you are in the *vmexamples* namespace, you should see the *fedora01* VM from the previous section listed. + //add image + -. Click on the *fedora01* VM and you will be presented with a screen that shows additional details about the storage volume backing the VM. +. Click on the *fedora01* VM and you will be presented with a screen that shows additional details about the storage volume backing the VM. + . Notice the following information about the persistent volume claim: .. The PVC is currently bound successfuly .. The PVC has a requested capacity and size of 30GiB .. The Access mode of the PVC is ReadWriteMany (RWX) .. The Volume mode of the PVC is Block -.. The volume is using the *ocs-storagecluster-ceph-rbd-virtualization* storage class. +.. The volume is using the *ocs-external-storagecluster-ceph-rbd* storage class. + image::module-04-storage/02_Fedora01_PVC_Details.png[link=self, window=blank, width=100%] @@ -55,11 +55,11 @@ With the VM snapshots feature, cluster administrators and application developers . Navigate back to *Virtualization* -> *VirtualMachines* and select the virtual machine, *fedora01* in the project *vmexamples*. + image::module-04-storage/03_VM_Overview.png[link=self, window=blank, width=100%] -+ -. Notice there are currently no snapshots of this VM listed on the overview page. + +. Notice there are currently no snapshots of this VM listed on the overview page. + image::module-04-storage/04_Snapshots_Overview.png[link=self, window=blank, width=100%] -+ + . Navigate to the *Snapshots* tab. + image::module-04-storage/05_Snapshot_Menu.png[link=self, window=blank, width=100%] @@ -79,37 +79,37 @@ image::module-04-storage/07_VM_Snapshot_Taken.png[link=self, window=blank, width + image::module-04-storage/08_VM_Restore_Disabled.png[link=self, window=blank, width=100%] -. Next, switch to the *Console* tab. We are going to login and perform a modification that prevents the VM from being able to boot. +. Next, switch to the *Console* tab. We are going to login and perform a modification that prevents the VM from being able to boot. + image::module-04-storage/09_Console_Login.png[link=self, window=blank, width=100%] + . Click on the *Guest login credentials* dropdown to gather the username and password to log into your console. + NOTE: There is a *Copy to clipboard* button and a *Paste* button available here, which makes the login process much easier. -+ -. Once you are logged in, execute the following command: + +. Once you are logged in, execute the following command: + [source,sh,role=execute] ---- sudo rm -rf /boot/grub2; sudo shutdown -r now ---- + -. The virtual machine will no longer be able to boot. -+ +. The virtual machine will no longer be able to boot. + image::module-04-storage/10_Bootloader_Broken.png[link=self, window=blank, width=100%] + IMPORTANT: In the previous step, the operating system was shutdown from within the guest. However, OpenShift Virtualization will restart it automatically by default. This behavior can be changed globally or on a per-VM basis. -+ -. Using the *Actions* dropdown menu or the shortcut button in the top right corner, *Stop* the VM. This process can take a long time since it attempts a graceful shutdown and the machine is in an unstable state. If you click on the *Actions* dropdown menu again you will have the option to *Force stop*. Please make use of this option in order to continue with the lab. -+ + +. Using the *Actions* dropdown menu or the shortcut button in the top right corner, *Stop* the VM. This process can take a long time since it attempts a graceful shutdown and the machine is in an unstable state. If you click on the *Actions* dropdown menu again you will have the option to *Force stop*. Please make use of this option in order to continue with the lab. + . You can click on the *Overview* tab to confirm that the VM has stopped. You can also see the snapshot we recently took listed in the *Snapshots* tile. (You may need to Force Stop the VM via the dropdown. This is fine as we are about to restore the snapshot.) + -image::module-04-storage/11_VM_Stopped_Snapshot.png[link=self, window=blank, width=100%] -+ +image::module-04-storage/11_VM_Stopped_Snapshot.png[link=self, window=blank, width=100%] + . Navigate back to the *Snapshots* tab, click the three-dot menu, and with the VM stopped, you will find *Restore* is no longer greyed out. Click it. + image::module-04-storage/12_VM_Restore.png[link=self, window=blank, width=100%] -+ + . In the dialog shown, press *Restore*. + image::module-04-storage/13_VM_Restore_Dialog.png[link=self, window=blank, width=100%] @@ -117,12 +117,12 @@ image::module-04-storage/13_VM_Restore_Dialog.png[link=self, window=blank, width . Wait until the VM is restored, the process should be fairly quick. + image::module-04-storage/14_VM_Restored.png[link=self, window=blank, width=100%] -+ + . Return to *Overview* tab, and start the VM. + image::module-04-storage/15_VM_Start.png[link=self, window=blank, width=100%] -+ -. Click on the console tab to confirm that the VM has now restarted successfully. + +. Click on the console tab to confirm that the VM has now restarted successfully. + image::module-04-storage/16_VM_Running.png[link=self, window=blank, width=100%] @@ -134,22 +134,22 @@ Cloning creates a new VM that uses it's own disk image for storage, but most of . Return to the *Overview* screen, and click the *Actions* dropdown menu to see the option to clone the VM. + image::module-04-storage/17_Overview_Actions_Clone.png[link=self, window=blank, width=100%] + . Press *Clone* from the *Actions* menu, and a dialog will open. Name the cloned VM *fedora02*, and select the check box to *Start VirtualMachine on clone*. + image::module-04-storage/18_VM_Clone_Dialog.png[link=self, window=blank, width=100%] -+ + . A new VM is created, the disks are cloned and automatically the portal will redirect you to the new VM, and you can see the *Created* time as very recently. + image::module-04-storage/19_VM_Cloned.png[link=self, window=blank, width=100%] + IMPORTANT: The cloned VM will have the same identity as the source VM, which may cause conflicts with applications and other clients interacting with the VM. Use caution when cloning a VM connected to an external network or in the same project. -+ + . Click on the *YAML* menu at the top of the screen, you will see that the name of the VM is *fedora02*, however there are labels that remain from the *fedora01* source VM that will need to be manually updated. + image::module-04-storage/20_Cloned_VM_YAML.png[link=self, window=blank, width=100%] -+ -. Modify the the *app* and *kubevirt.io/domain* values in the YAML so that they are set to *fedora02* then click the *Save* button at the bottom, this will allow us to work with this VM in future modules much more easily. +. Modify the the *app* and *kubevirt.io/domain* values in the YAML so that they are set to *fedora02* then click the *Save* button at the bottom, this will allow us to work with this VM in future modules much more easily. == Summary diff --git a/content/modules/ROOT/pages/module-05-bcdr.adoc b/content/modules/ROOT/pages/module-05-bcdr.adoc index e320290..53beeba 100644 --- a/content/modules/ROOT/pages/module-05-bcdr.adoc +++ b/content/modules/ROOT/pages/module-05-bcdr.adoc @@ -2,7 +2,7 @@ == Introduction -Data Protection is a major topic of conversation when it comes to any enterprise workload, and there are no shortage of options currently available for backup and recovery of virtual machines with OpenShift Virtualization. Many of these solutions function in the same manner at which they protect pods in OpenShift. They do this by taking a backup of the virtual machine, or a namespace containing multiple virtual machines and store it remotely in an object storage bucket. These backups usually also include the persistent storage volume, alongside the metadata and custom resources that define the virtual machine. +Data Protection is a major topic of conversation when it comes to any enterprise workload, and there are no shortage of options currently available for backup and recovery of virtual machines with OpenShift Virtualization. Many of these solutions function in the same manner at which they protect pods in OpenShift. They do this by taking a backup of the virtual machine, or a namespace containing multiple virtual machines and store it remotely in an object storage bucket. These backups usually also include the persistent storage volume, alongside the metadata and custom resources that define the virtual machine. Red Hat Solutions Include: @@ -35,11 +35,11 @@ image::module-05-bcdr/01_Overview.png[link=self, window=blank, width=100%] + image::module-05-bcdr/02_DPA.png[link=self, window=blank, width=100%] -. Click on *oadp-dpa* to see the details of the _DataProtectionApplication_ and then click on the *YAML* button at the top to see how it is configured. +. Click on *oadp-dpa* to see the details of the _DataProtectionApplication_ and then click on the *YAML* button at the top to see how it is configured. + image::module-05-bcdr/03_OADP_YAML.png[link=self, window=blank, width=100%] + -Notice that *OADP* has been configured by adding the *kubevirt* plugin and it has been configured to use the internal object storage bucket provided by *Red Hat OpenShift Data Foundation. +Notice that *OADP* has been configured by adding the *kubevirt* plugin and it has been configured to use the internal object storage bucket provided by a *Minio* instance running on your cluster. IMPORTANT: For the sake of convenience our lab is setup to perform the backups to a local object bucket, however in a production environment you would want to ensure that backups are directed to an external storage system, or a cloud-based object storage bucket. @@ -77,16 +77,15 @@ spec: storageLocation: oadp-dpa-1 ttl: 720h0m0s ---- -+ -. Click the *Create* button at the bottom. +. Click the *Create* button at the bottom. + Note that the content of this YAML indicates that any object with the labels *app: fedora02* in the namespace *vmexamples* will be backed up to the location specified in the *DataProtectionApplication* configuration. + image::module-05-bcdr/05_Create_Backup_YAML.png[link=self, window=blank, width=100%] + -NOTE: If you did not complete the previous storage section, and you do not have the *fedora02* VM, change the label selectors in the YAML above to match a virtual machine in your inventory. -+ +NOTE: If you did not complete the previous section, and you do not have the *fedora02* VM, change the label selectors in the YAML above to match a virtual machine in your inventory. + . Wait until the *Status* column changes to *Completed*. This indicates that the virtual machine has been successfully backed up. + image::module-05-bcdr/06_Backup_Completed.png[link=self, window=blank, width=100%] @@ -94,16 +93,15 @@ image::module-05-bcdr/06_Backup_Completed.png[link=self, window=blank, width=100 [[restore_backup]] == Restore From a Backup -. Navigate to *Virtualization* -> *VirtualMachines*, click on the three-dot menu to the right of the *fedora02* VM and select *Delete* from the menu that appears. +. Navigate to *Virtualization* -> *VirtualMachines*, click on the three-dot menu to the right of the *fedora02* VM and select *Delete* from the menu that appears (you may need to switch back to the *vmexamples* project). + image::module-05-bcdr/07_Delete_VM.png[link=self, window=blank, width=100%] -+ + . When prompted, click the red *Delete* button to confirm deleting the virtual machine. + image::module-05-bcdr/08_Confirm_Delete.png[link=self, window=blank, width=100%] -+ -. Go back to *Operators* -> *Installed Operators* and select *OADP Operator*. +. Go back to *Operators* -> *Installed Operators* and select *OADP Operator* (you may need to switch back to the *openshift-adp* project). . Use the horizontal navigation bar to locate the the *Restore* tab, click the *Restore* tab, and then press *Create Restore*. + image::module-05-bcdr/09_Restore_Tab.png[link=self, window=blank, width=100%] @@ -119,7 +117,7 @@ metadata: namespace: openshift-adp spec: backupName: backup-fedora02 - includedResources: [] + includedResources: [] excludedResources: - nodes - events @@ -128,20 +126,19 @@ spec: - restores.velero.io restorePVs: true ---- -+ + . Press the *Create* button at the bottom. + image::module-05-bcdr/10_Create_Restore_YAML.png[link=self, window=blank, width=100%] -+ + . Wait until you see that the *Status* column changes to *Completed*. + image::module-05-bcdr/11_Restore_Completed.png[link=self, window=blank, width=100%] -. Navigate back to *Virtualization* -> *Virtual Machines* and confirm that the *fedora02* virtual machine was restored. +. Navigate back to *Virtualization* -> *Virtual Machines* and confirm that the *fedora02* virtual machine was restored (in the *vmexamples* project). + image::module-05-bcdr/12_VM_Restored.png[link=self, window=blank, width=100%] - -== Summary +== Summary Protecting virtual machines is a critical aspect of a virtualization platform. OpenShift Virtualization provides multiple methods that enable native protection, for example using OADP, or allowing storage and backup partners to integrate their offerings. If you have questions about how to protect virtual machines, please don't hesitate to ask the proctors for the workshop or reach out to your vendor to determine their compatibility with OpenShift Virtualization. diff --git a/content/modules/ROOT/pages/module-06-network.adoc b/content/modules/ROOT/pages/module-06-network.adoc index 57a78f8..c8f7528 100644 --- a/content/modules/ROOT/pages/module-06-network.adoc +++ b/content/modules/ROOT/pages/module-06-network.adoc @@ -19,7 +19,6 @@ The OpenShift environment has already been configured with an OVS Bridge on each * Connect a VM to the external network [[create_netattach]] - == Create a Network Attachment Definition In order to use the OVS Bridge with your VM you need to create a *Network Attachment Definition*. This is what tells OpenShift about the network and allows the virtual machines to connect to it. Network Attachment Definitions are specific to the project/namespace they're created in, unless they're created in the *default* project. This gives you, the administrator, the ability to control which networks are and aren't available to users who have access to manage their own VMs. Once the Network Attachment Definition has been created, it can then be used by virtual machines when configuring their network adapters. @@ -34,11 +33,11 @@ To manage an OpenShift node's network configuration you use a tool, available as . Navigate to *Networking* -> *Network Attachment Definitions* and click *Create network attachment definition*: + image::module-06-network/01_NAD_Dashboard.png[link=self, window=blank, width=100%] -+ + . Click the *Edit YAML* button at the top of the page. + image::module-06-network/02_NAD_Create.png[link=self, window=blank, width=100%] -+ + . Paste in the following yaml snippet, and click the Create button: + [source,yaml,role=execute] @@ -64,7 +63,7 @@ spec: image::module-06-network/03_NAD_YAML.png[link=self, window=blank, width=100%] + NOTE: In most cases a single OVS bridge can support many Network Attachment Definitions each with their own designated *VLAN Tag Number*. In this lab we use an untagged network, so no VLAN number is required here, as such our Network Attachment Definition is labeled as vlan0. -+ + . Examine the details of the network attachment definition. Because this was created in the *vmexamples* project, it will be available only to attach to VMs that are in that project. + image::module-06-network/04_NAD_Created.png[link=self, window=blank, width=100%] @@ -77,25 +76,23 @@ image::module-06-network/04_NAD_Created.png[link=self, window=blank, width=100%] image::module-06-network/05_VM_Network_Tab.png[link=self, window=blank, width=100%] + NOTE: Notice that the VM is currently using a single interface *default* which is connected to the *Pod networking* network. We can choose to modify this existing connection or add a new interface to the VM. Either action we choose currently requires a VM restart. -+ + . Click the three-dot menu at the end of the *default* network adapter line, and click on edit in the drop down menu. + image::module-06-network/06_Edit_Default.png[link=self, window=blank, width=100%] -+ + . Click the dropdown menu for the *Network* field, and select the vmexamples/vlan0 network attachment definition that we created. Click on *Save*. + image::module-06-network/07_VM_Net_Modify.png[link=self, window=blank, width=100%] -+ + . Use the *Actions* menu or icon in the upper right corner to restart the VM. After rebooting, navigate to the *Overview* tab: -+ . Once the machine restarts, you can see in the *Network Interfaces* section of the *Overview* screen that the *default* interface obtains a DHCP IP address from the flat network (*192.168.3.x/24*). + image::module-06-network/08_New_IP_Address.png[link=self, window=blank, width=100%] -IMPORTANT: Before the next section of this lab, please repeat the actions to attach the fedora02 VM to the same vlan0 network. +IMPORTANT: Before the next section of this lab, please repeat the actions to attach the *fedora02* VM to the same vlan0 network. [[multinetwork_policy]] - == Using a MultiNetwork Policy A multinetwork policy allows you to configure network access to a namespace and to define granular rules allowing ingress and egress from the namespace to enhance security of the applications and VMs that are running in the namespace. @@ -130,7 +127,7 @@ spec: . Once the file is saved, start a ping to the IP address of the *fedora01* virtual machine to confirm that you can currently connect. + image::module-06-network/11_Bastion_Ping.png[link=self, window=blank, width=100%] -+ + . Apply the multinetwork policy with the following syntax: + [source,sh,role=execute] @@ -139,23 +136,23 @@ oc apply -f deny-all.yaml -n vmexamples ---- + image::module-06-network/12_Deny_All_Applied.png[link=self, window=blank, width=100%] -+ + . Now try again to ping the IP address of the fedora01 virtual machine, your ping attempts should now fail. + image::module-06-network/13_Bastion_Ping_Fail.png[link=self, window=blank, width=100%] -+ + . Return to your OpenShift console, and click on *Virtualization -> VirtualMachines* and select your *fedora02* machine. + image::module-06-network/14_Fedora02_Overview.png[link=self, window=blank, width=100%] -+ + . Click on the button to open it's web console, and login with the provided credentials. + image::module-06-network/15_Fedora02_Console.png[link=self, window=blank, width=100%] -+ + . Attempt to ping the ip address for the *fedora01* virtual machine, notice that it is also blocked, even though we are on the same subnet, in the same namespace. *Leave the ping running.* + image::module-06-network/16_Fedora02_Ping_Fail.png[link=self, window=blank, width=100%] -+ + . Return to the bastion host console, and create a new file called allow-host.yaml, and paste in the following content: + [source,yaml,role=execute] @@ -179,25 +176,24 @@ spec: IMPORTANT: Make sure that you substitute the correct IP from the Fedora02 VM. + image::module-06-network/17_Allow_Host_Syntax.png[link=self, window=blank, width=100%] -+ + . Apply the policy using the following syntax: + [source,sh,role=execute] ---- oc apply -f allow-host.yaml -n vmexamples ---- - + image::module-06-network/18_Allow_Host_Applied.png[link=self, window=blank, width=100%] -+ + . Attempt to ping from the bastion host. This attempt should still fail as we have not explictly allowed it. + image::module-06-network/19_Bastion_Still_Blocked.png[link=self, window=blank, width=100%] -+ + . Return to your *fedora02* VM console, you should find that the ping has now resumed successfully. + image::module-06-network/20_Fedora02_Ping_Allowed.png[link=self, window=blank, width=100%] -+ + . Let's clean up the policies for the next section. + IMPORTANT: For the next lab, it's important that we clear out the two network policies we created. diff --git a/content/modules/ROOT/pages/module-07-tempinst.adoc b/content/modules/ROOT/pages/module-07-tempinst.adoc index 0cf80a8..7051afb 100644 --- a/content/modules/ROOT/pages/module-07-tempinst.adoc +++ b/content/modules/ROOT/pages/module-07-tempinst.adoc @@ -19,15 +19,15 @@ By default pre-configured templates provided by Red Hat OpenShift Virtualization . To begin, navigate to *Virtualization* -> *Templates* and select *All projects* + image::module-07-tempinst/01_Template_List.png[link=self, window=blank, width=100%] -+ -. In the search bar type in *centos9* and in the list of templates that appear find the template for *centos-stream9-server-large*. + +. In the search bar type in *centos9* and in the list of templates that appear find the template for *centos-stream9-server-large*. + image::module-07-tempinst/02_Search_Centos9.png[link=self, window=blank, width=100%] -+ + . Select the three-dot menu to the far right, and click on the option to *Clone* from the menu that pops out. + image::module-07-tempinst/03_Clone_Template.png[link=self, window=blank, width=100%] -+ + . A new menu called *Clone template* will appear, fill in the the following values, and when finished click on the *Clone* button. + * *Template name:* centos-stream9-server-db-xl @@ -35,57 +35,61 @@ image::module-07-tempinst/03_Clone_Template.png[link=self, window=blank, width=1 * *Template display name:* CentOS Stream 9 VM - Database XL Template + image::module-07-tempinst/04_Clone_Template_Options.png[link=self, window=blank, width=100%] -+ + . This will take you to the *Details* page for the template where we will be able to customize some options. Start by finding the CPU and Memory near the bottom of the page, and click on the pencil icon to edit it. + image::module-07-tempinst/05_Clone_Details.png[link=self, window=blank, width=100%] -+ + . A new window will pop out where you can edit the amount of CPU and Memory. For our XL template set the value of CPUs to 4, and Memory to 16 GiB, and click the *Save* button. + image::module-07-tempinst/06_Edit_CPU_Mem.png[link=self, window=blank, width=100%] -+ -. Navigate to the *Network Interfaces* tab, to the right of the *default* interface, select the three-dot menu and the edit option. -+ -image::module-07-tempinst/07_Edit_Network.png[link=self, window=blank, width=100%] -+ -. When the *Edit network interface* menu appears, set the *Network* value to the *vmexamples/vlan0* network, and click the *Save* button. -+ -image::module-07-tempinst/08_Edit_Net_Int.png[link=self, window=blank, width=100%] -+ + +// Commenting out that next section makes the numbers restart at 1... Remove the section later +// . Navigate to the *Network Interfaces* tab, to the right of the *default* interface, select the three-dot menu and the edit option. +// + +// image::module-07-tempinst/07_Edit_Network.png[link=self, window=blank, width=100%] + +// . When the *Edit network interface* menu appears, set the *Network* value to the *vmexamples/vlan0* network, and click the *Save* button. +// + +// image::module-07-tempinst/08_Edit_Net_Int.png[link=self, window=blank, width=100%] + . Next click on the *Scripts* tab at the top, and in the section called *Cloud-init* click the *Edit* button. + image::module-07-tempinst/09_Scripts_CloudInit.png[link=self, window=blank, width=100%] -+ + . When the *Cloud-init* dialog opens, click the radio button to *Configure via: Script* then add the following YAML snippet to the end of the script. + [source,yaml,role=execute] ---- packages: - - mariadb-server + - mariadb-server runcmd: - - systemctl enable mariadb - - systemctl start mariadb + - systemctl enable mariadb + - systemctl start mariadb ---- + image::module-07-tempinst/10_Cloud_Init_Script.png[link=self, window=blank, width=100%] -+ + . Click the *Save* button, followed by the *Apply* button. . Now click on the *Virtualization -> Catalog* menu on the left, select the *Template catalog* option and click on *User templates*. You should see our created template available as a tile. + image::module-07-tempinst/11_User_Templates.png[link=self, window=blank, width=100%] -+ + . Click on the tile and you will be prompted with the VM startup screen. Click the *Quick create VirtualMachine* button. + image::module-07-tempinst/12_Quick_Create_Template.png[link=self, window=blank, width=100%] -+ -. When the virtual machine boots you can see on the *Overview* page that it was created from our template, and has the system resources we defined, and is already attached to the *vmexamples/vlan0* network with an assigned IP address. + +. When the virtual machine boots you can see on the *Overview* page that it was created from our template, and has the system resources we defined, and is already attached to the +Pod +// *vmexamples/vlan0* +network with an assigned IP address. + image::module-07-tempinst/13_VM_From_Template.png[link=self, window=blank, width=100%] -+ + . Click on the *Console* tab at the top and use the *Guest login credentials* option to login to the console of the virtual machine. + image::module-07-tempinst/14_VM_Console.png[link=self, window=blank, width=100%] -+ + . Once you are logged into the virtual machine, run the following command to test the install of MariaDB. + [source,sh,role=execute] @@ -95,6 +99,8 @@ sudo mysql -u root + image::module-07-tempinst/15_MariaDB_Login.png[link=self, window=blank, width=100%] +. Hit `Ctrl-D` twice to log out of the VM. + [[create_win]] == Create a Windows VM Template @@ -105,11 +111,11 @@ This process can be streamlined after the initial operating system installation . From the left menu, navigate to *Virtualization* -> *Catalog*, and click on the *Template catalog* tab near the top.. + image::module-07-tempinst/16_Template_Catalog.png[link=self, window=blank, width=100%] -+ + . Scroll down until you find the *Microsoft Windows Server 2019 VM* tile. + image::module-07-tempinst/17_Windows_2k19_Tile.png[link=self, window=blank, width=100%] -+ + . A dialog will appear showing the default configuration related to the template. + NOTE: Notice that there is intially no option to quick create this VM, and we must customize the VM to fit our needs. @@ -120,23 +126,22 @@ image::module-07-tempinst/18_Windows_2k19_Dialog.png[link=self, window=blank, wi .. Specify the name *windows* .. Enable the checkbox *Boot from CD* .. Choose URL *(creates PVC)* from the drop-down menu -.. Specify the url: http://192.168.123.100:81/Windows2019.iso +.. Specify the url: https://www.opentlc.com/download/ocp4_virt_foundations/Windows2019.iso .. Reduce the CD disk size to *5 GiB* .. Keep the *Disk source* size disk to the default value *60 GiB* .. Ensure the *Mount Windows drivers disk* checkbox is enabled. **This is required to install Windows systems, which will provide the drivers for VirtIO.** + image::module-07-tempinst/19_Windows_2k19_Parameters.png[link=self, window=blank, width=100%] -+ + . With the options filled out, we want to click on the *Customize VirtualMachine* button at the bottom to continue configuring our Template. -+ . On the *Customize and create VirtualMachine screen, click on the *Scripts* tab, and then scroll down to the *Sysprep* section and click on the *Edit* button. + image::module-07-tempinst/20_Customize_Scripts.png[link=self, window=blank, width=100%] -+ + . A new window will pop up for you to create *Sysprep* actions for your new template. + image::module-07-tempinst/21_Sysprep.png[link=self, window=blank, width=100%] -+ + . Copy and paste the following code block into the *autounattend.xml* section: + [source,xml,role=execute] @@ -277,40 +282,39 @@ image::module-07-tempinst/21_Sysprep.png[link=self, window=blank, width=100%] ---- -+ + . Once the code is pasted, click the *Save* button on the dialog. + image::module-07-tempinst/22_Windows_2k19_Sysprep.png[link=self, window=blank, width=100%] -+ + . You will be returned to the *Customize and create Virtual Machine* screen, Click on the *Disks* tab, and locate the line for the *installation-CDROM*, click the three-dot menu on the right, and select *Edit*. + image::module-07-tempinst/23_Edit_Boot_Media.png[link=self, window=blank, width=100%] -+ + . Select the option for *Use this disk as a boot source*, and click on the *Save* button at the bottom. + image::module-07-tempinst/24_Boot_Source.png[link=self, window=blank, width=100%] -+ + . When you return to the *Disks* tab, click on the *Create VirtualMachine* button at the bottom. . The Virtual Machine will start the provisioning process by downloading the ISO image, configuring, and starting the instance. + image::module-07-tempinst/25_Windows_2k19_Provisioning.png[link=self, window=blank, width=100%] -+ + . After a few minutes, the Virtual VM will be in *Running* status. Switch to the *Console* tab: + image::module-07-tempinst/26_Windows_2k19_Console.png[link=self, window=blank, width=100%] + -IMPORTANT: The VM is marked as "Not migratable" because a CD-ROM disk is attached. -+ +IMPORTANT: The VM is marked as "Not migratable" because a CD-ROM disk is attached. + . Once the VM installation process is complete, go ahead and power it off with the stop button. We can now take a snapshot of the root disk, and use that to create a bootable volume template that we can use to boot other Windows 2k19 machines. -+ . Click on *Storage* -> *PersistentVolumeClaims* to see a list of PVCs available in the vmexamples namespace. + image::module-07-tempinst/27_Windows_PVC.png[link=self, window=blank, width=100%] -+ + . Find the *windows* PVC that is listed and using the three-dot menu on the right select *Clone PVC*. On the menu that pops up, name the new PVC *windows-2k19-template*. + image::module-07-tempinst/28_Clone_Windows_PVC.png[link=self, window=blank, width=100%] -+ + . Once this is saved, you can return to the *Virtualization -> Catalog* menu, and use this cloned PVC as a future boot source for creating new Virtual Machines by selecting the option for *PVC (clone PVC)* as the *Disk source*, and selecting the *Windows-2k19-Template* PVC as the *PVC name* to clone. + image::module-07-tempinst/29_Windows_Template.png[link=self, window=blank, width=100%] @@ -323,13 +327,13 @@ In order to simplify the deployment process for virtual machines, starting with . To get started click on *Virtualization* -> *Catalog* on the left-side menu. You will see the default catalog item is *InstanceType*. + image::module-07-tempinst/30_Left_Menu_Catalog.png[link=self, window=blank, width=100%] -+ -. The first step to using an instance type is to select a volume to boot from. Similar to the templates that provide boot sources, those boot sources are available to use for an instance type, or you can upload your own with the *Add volume* button. + +. The first step to using an instance type is to select a volume to boot from. Similar to the templates that provide boot sources, those boot sources are available to use for an instance type, or you can upload your own with the *Add volume* button. . Click on *rhel9* to create a VM with that boot source. + image::module-07-tempinst/31_Volume_Boot.png[link=self, window=blank, width=100%] -+ + . Next you can select the instance type you would like to use. There are Red Hat provided instance types by default, or you can create your own. If you hover over a provided instance type you can see a description of it's intended use. + image::module-07-tempinst/32_Select_InstanceType.png[link=self, window=blank, width=100%] @@ -340,18 +344,18 @@ image::module-07-tempinst/32_Select_InstanceType.png[link=self, window=blank, wi ** gn1: Dedicated for VMs making use of the nVidia GPU operator. ** n1: Designed for network intensive workloads like VNF ** m1: Designed for memory intensive workloads. -+ + . Click on the *U series* tile to see a dropdown list of defined resources for general instance types. The default option here is *medium: 1 CPUs, 4 GiB Memory*. Select it. + image::module-07-tempinst/33_InstanceType_Resources.png[link=self, window=blank, width=100%] -+ + . The last section that needs to be completed to provision using an instance type is similar to the template section. You need to provide a name for the virtual machine, and select the storage class to be used for a backing disk. By default, a name will be generated for the VM, and the default virtualization storage class will be selected. When you are satisfied, click the *Create VirtualMachine* button. + image::module-07-tempinst/34_VM_Details.png[link=self, window=blank, width=100%] -+ + . You will be directed to the virtual machine overview page, and see that the VM provisioned using an instance type is now up and running. + -image::module-07-tempinst/35_VM_Overview.png[link=self, window=blank, width=100%] +image::module-07-tempinst/35_VM_Overview.png[link=self, window=blank, width=100%] == Summary diff --git a/content/modules/ROOT/pages/module-08-workingvms.adoc b/content/modules/ROOT/pages/module-08-workingvms.adoc index 6d5db53..19d420a 100644 --- a/content/modules/ROOT/pages/module-08-workingvms.adoc +++ b/content/modules/ROOT/pages/module-08-workingvms.adoc @@ -2,7 +2,7 @@ == Introduction -This section of our lab is dedicated to the Day-2 operations that many administrators would need to perform when working with virtual machines in their OpenShift Virtualization environment. We will make use of the understanding we have developed throughout this roadshow of how VMs operate in an OpenShift environment, and use those skills to complete the tasks in this section. In this particular case, we are going to work with the three virtual machines that we imported from VMware vSphere earlier in this roadshow, and we are going to make some minor configuration changes to enable the applications hosted on those servers to be accessed as they now run in OpenShift Virtualization. To accomplish this, we will install and configure a loadbalancer, and we will expose our applications using the service/route method that is the default when making use of the OpenShift SDN pod network so that the application is reachable from outside of the cluster. +This section of our lab is dedicated to the Day-2 operations that many administrators would need to perform when working with virtual machines in their OpenShift Virtualization environment. We will make use of the understanding we have developed throughout this roadshow of how VMs operate in an OpenShift environment, and use those skills to complete the tasks in this section. In this particular case, we are going to work with the three virtual machines that we imported from VMware vSphere earlier in this roadshow, and we are going to make some minor configuration changes to enable the applications hosted on those servers to be accessed as they now run in OpenShift Virtualization. To accomplish this, we will install and configure a loadbalancer, and we will expose our applications using the service/route method that is the default when making use of the OpenShift SDN pod network so that the application is reachable from outside of the cluster. [[lb_concepts]] == MetalLB concepts @@ -11,7 +11,7 @@ In this portion of the lab, we will review the MetalLB operator and understand h Using MetalLB is valuable when you have a bare-metal cluster or a virtual infrastructure that is treated like bare-metal, and you want to ensure that there is fault-tolerant access to an application through an external IP address. -For MetalLB to meet this need, you must configure your networking infrastructure to ensure that the network traffic for the external IP address is routed from clients to the host network for the cluster. +For MetalLB to meet this need, you must configure your networking infrastructure to ensure that the network traffic for the external IP address is routed from clients to the host network for the cluster. It can operate in two modes: @@ -118,45 +118,33 @@ To begin, we'll add labels to the virtual machines by modifying their definition + image::module-08-workingvms/11_Imported_VMs_List.png[link=self, window=blank, width=100%] + - NOTE: Ensure you select the correct project, *vmexamples* if you completed the *Migrating Existing Virtual Machines* module or *vmimported* if you did not. -+ . Select to the *winweb01* VM and navigate to the *YAML* tab. - . Find the *spec:* section and under the *template.metadata* add the following lines to *labels* section in the VM resources: + - [source,yaml,role=execute] ---- env: webapp ---- + - IMPORTANT: Make sure to get the indentation exactly right - just like in the screenshot below. + - image::module-08-workingvms/12_Imported_VMs_YAML.png[link=self, window=blank, width=100%] -+ . *Repeat* the process for the VM *winweb02*. - . Start, or restart if already running, the *winweb01* and *winweb02* virtual machines. + - NOTE: Ensure the VMs are properly working by accessing to the console tab of each VM. === Create the Service -. Navigate to *Networking* -> *Services* and press *Create Service*. +. Navigate to *Networking* -> *Services* and press *Create Service*. + - image::module-08-workingvms/13_Navigate_Service.png[link=self, window=blank, width=100%] -+ . Replace the YAML with the following definition + - [source,yaml,role=execute] ---- apiVersion: v1 @@ -168,29 +156,22 @@ spec: selector: env: webapp ports: - - protocol: TCP - port: 80 - targetPort: 80 + - protocol: TCP + port: 80 + targetPort: 80 ---- + - IMPORTANT: Ensure the namespace with your virtual machines, *vmexamples* or *vmimported*, is the one used in the Service YAML. + - image::module-08-workingvms/14_Service_YAML.png[link=self, window=blank, width=100%] -+ . Press *Create*. - . From the details page for the newly created *webapp* Service, locate *Pod selector* link and click it. + - image::module-08-workingvms/15_Imported_VMs_PodSelector.png[link=self, window=blank, width=100%] -+ . Verify the two Windows VMs are properly identified and targeted by the Service. + - image::module-08-workingvms/16_Imported_VMs_Pods.png[link=self, window=blank, width=100%] === Create the Route @@ -199,9 +180,7 @@ Now the Windows IIS servers are accessible from within the OpenShift cluster. Ot . Navigate to *Networking* -> *Routes* in the left navigation menu, verify that you're using the correct project name. Press *Create Route*. + - image::module-08-workingvms/17_Route_Navigation.png[link=self, window=blank, width=100%] -+ . Fill the form using the information below, press *Create* when done. + @@ -215,22 +194,16 @@ image::module-08-workingvms/18_Create_Route.png[link=self, window=blank, width=1 . Navigate to the address shown in *Location* field + - image::module-08-workingvms/19_Route_Access.png[link=self, window=blank, width=100%] -+ . When the page loads, you will see an error. This is because the Windows web servers are not able to currently connect to the database VM after it's migration. + - image::module-08-workingvms/20_WebApp_Error.png[link=self, window=blank, width=100%] + - NOTE: To fix the connectivity issue, we need to create a Service for the database VM so that it can be accessed by the web servers. -+ . Once again, navigate to *Networking* -> *Services* and press *Create Service*. Replace the YAML with the following definition: + - [source,yaml,role=execute] ---- apiVersion: v1 @@ -242,27 +215,21 @@ spec: selector: vm.kubevirt.io/name: database ports: - - protocol: TCP - port: 3306 - targetPort: 3306 + - protocol: TCP + port: 3306 + targetPort: 3306 ---- + - NOTE: This time we used the name of the virtual machine to attach it to the service we are creating, since there is only one VM named *database* in the namespace with this name it is safe to do so without having to customize the YAML of the VM or rebooting the guest. + - image::module-08-workingvms/21_Database_YAML.png[link=self, window=blank, width=100%] -+ . When the YAML is pasted, click the *Create* button. + - IMPORTANT: Ensure the namespace with your virtual machines, *vmexamples* or *vmimported* is the one used in the Service YAML. + - . Reload the webapp URL and expect to get the proper result + - image::module-08-workingvms/22_WebApp_Success.png[link=self, window=blank, width=100%] [[expose_db]] @@ -272,16 +239,12 @@ If you completed the *Exposing an Application with a Service/Route* module, the . Navigate to *Networking* -> *Services* and select the project *vmexamples* + - image::module-08-workingvms/23_Create_Service_LB.png[link=self, window=blank, width=100%] + - IMPORTANT: If you did not complete the module *Migrating Existing Virtual Machines* you can use pre-existing virtual machines in the *vmimported* project. If you are using the pre-imported virtual machines, please replace all instances of *vmexamples* namespace with *vmimported*. -+ . Press *Create Service* and fill the form with the following code snippet: + - [source,yaml,role=execute] ---- apiVersion: v1 @@ -294,42 +257,32 @@ spec: selector: vm.kubevirt.io/name: database ports: - - protocol: TCP - port: 3306 - targetPort: 3306 + - protocol: TCP + port: 3306 + targetPort: 3306 ---- + - NOTE: Notice the *type* indicated is *LoadBalancer*. Since this cluster has MetalLB installed, it will result in the specified port(s) exposed using that. There are other load balancer options available from partners such as F5, Nginx, and more. + - image::module-08-workingvms/24_Database_LB_YAML.png[link=self, window=blank, width=100%] -+ . Press *Create* and review the *Service* created. Notice the IP address assigned to the load balancer is from the range specified earlier in the lab. + - image::module-08-workingvms/25_Database_LB_Service_Created.png[link=self, window=blank, width=100%] -+ . To verify connectivity to the database service via the external IP, use the terminal to the right authenticate to the database at it's exposed LB address with the following credentials - .. *Username*: *root* .. *Password*: *R3dh4t1!* + - -[source,bash,role=execute] +[source,sh,role=execute] ---- -mysql -u root -p -h 192.168.123.202 +mysql -u root -p -h 192.168.123.202 ---- + - NOTE: Be sure to replace the ip address in the sample above with the address being advertised by MetalLB. + - When successful, you should be able to login to the database and view the mysql prompt. + - image::module-08-workingvms/26_DB_Login.png[link=self, window=blank, width=100%] == Summary diff --git a/default-site.yml b/default-site.yml index 3d10c89..b57bfa2 100644 --- a/default-site.yml +++ b/default-site.yml @@ -1,17 +1,16 @@ --- site: - title: "Experience Red Hat OpenShift Virtualization" - # url: https://redhat-scholars.github.io/course-template + title: "Experience Red Hat OpenShift Virtualization" start_page: modules::index.adoc content: sources: - - url: . - start_path: content + - url: . + start_path: content ui: bundle: - url: https://github.com/rhpds/showroom_theme_summit/releases/download/v0.0.1/ui-bundle.zip + url: https://github.com/rhpds/showroom_theme_rhdp/releases/download/rhdp-v0.0.2/ui-bundle.zip runtime: cache_dir: ./.cache/antora @@ -21,6 +20,6 @@ output: antora: extensions: - - id: dev-mode - require: ./content/lib/dev-mode.js - enabled: false + - id: dev-mode + require: ./content/lib/dev-mode.js + enabled: false diff --git a/demo-site.yml b/demo-site.yml deleted file mode 100644 index 0f5271f..0000000 --- a/demo-site.yml +++ /dev/null @@ -1,26 +0,0 @@ ---- -site: - title: "Experience Red Hat OpenShift Virtualization" - # url: https://redhat-scholars.github.io/course-template - start_page: modules::index.adoc - -content: - sources: - - url: . - start_path: content - -ui: - bundle: - url: https://github.com/rhpds/showroom_theme_rhdp/releases/download/rhdp-v0.0.2/ui-bundle.zip - -runtime: - cache_dir: ./.cache/antora - -output: - dir: ./www - -antora: - extensions: - - id: dev-mode - require: ./content/lib/dev-mode.js - enabled: false diff --git a/dev-mode.png b/dev-mode.png deleted file mode 100644 index 4e7195b..0000000 Binary files a/dev-mode.png and /dev/null differ diff --git a/ui-links.png b/ui-links.png deleted file mode 100644 index fb77e9c..0000000 Binary files a/ui-links.png and /dev/null differ