Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[WIP] replace fog-kubevirt with kubeclient #265

Open
wants to merge 5 commits into
base: master
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
48 changes: 32 additions & 16 deletions app/models/manageiq/providers/kubevirt/inventory/collector.rb
Original file line number Diff line number Diff line change
Expand Up @@ -27,27 +27,43 @@ def initialize_for_targeted_refresh
name = @target.name
@nodes = {}

@manager.with_provider_connection(:namespace => @target.location) do |connection|
if @target.template?
@templates = [connection.template(name)]
else
@vms = [connection.vm(name)]
begin
@vm_instances = [connection.vm_instance(name)]
rescue
# target refresh of a vm might fail if it has no vm instance
_log.debug("The is no running vm resource for '#{name}'")
end
if @target.template?
@templates = [openshift_template_client.get_template(name, @target.location)]
else
@vms = [kubevirt_client.get_virtual_machine(name, @target.location)]
begin
@vm_instances = [kubevirt_client.get_virtual_machine_instance(name, @target.location)]
rescue Kubeclient::ResourceNotFoundError
# target refresh of a vm might fail if it has no vm instance
_log.debug("The is no running vm resource for '#{name}'")
end
end
end

def initialize_for_full_refresh
@manager.with_provider_connection do |connection|
@nodes = connection.nodes
@vms = connection.vms
@vm_instances = connection.vm_instances
@templates = connection.templates
@nodes = kube_client.get_nodes
@vms = kubevirt_client.get_virtual_machines
@vm_instances = kubevirt_client.get_virtual_machine_instances
@templates = openshift_template_client.get_templates
Comment on lines +44 to +47
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

More out of curiosity, and not for this PR, but could we collect these from the clients in parallel?

end

def kube_client(api_group = nil)
api_path, api_version = api_group&.split("/")

options = {:service => "kubernetes"}
if api_path
options[:path] = "/apis/#{api_path}"
options[:version] = api_version
end

@manager.parent_manager.connect(options)
end

def kubevirt_client
@kubevirt_client ||= kube_client("kubevirt.io/v1")
end

def openshift_template_client
@openshift_template_client ||= kube_client("template.openshift.io/v1")
end
end
59 changes: 35 additions & 24 deletions app/models/manageiq/providers/kubevirt/inventory/parser.rb
Original file line number Diff line number Diff line change
Expand Up @@ -54,25 +54,31 @@ def process_nodes(objects)

def process_node(object)
# Get the basic information:
uid = object.uid
name = object.name
uid = object.metadata.uid
name = object.metadata.name

addresses = object.status.addresses.index_by(&:type)
hostname = addresses["Hostname"]&.address
ipaddress = addresses["InternalIP"]&.address

# Add the inventory object for the host:
host_object = host_collection.find_or_build(uid)
host_object.connection_state = 'connected'
host_object.ems_cluster = cluster_collection.lazy_find(CLUSTER_ID)
host_object.ems_ref = uid
host_object.hostname = object.hostname
host_object.ipaddress = object.ip_address
host_object.hostname = hostname
host_object.ipaddress = ipaddress
host_object.name = name
host_object.uid_ems = uid

node_info = object.status.nodeInfo

# Add the inventory object for the operating system details:
os_object = os_collection.find_or_build(host_object)
os_object.name = object.hostname
os_object.product_name = object.os_image
os_object.product_type = object.operating_system
os_object.version = object.kernel_version
os_object.name = hostname
os_object.product_name = node_info.osImage
os_object.product_type = node_info.operatingSystem
os_object.version = node_info.kernelVersion

# Find the storage:
storage_object = storage_collection.lazy_find(STORAGE_ID)
Expand All @@ -92,10 +98,13 @@ def process_vms(objects)

def process_vm(object)
# Process the domain:
vm_object = process_domain(object.namespace, object.memory, object.cpu_cores, object.uid, object.name)
spec = object.spec.template.spec
domain = spec.domain

vm_object = process_domain(object.metadata.namespace, domain.resources&.requests&.memory, domain.cpu, object.metadata.uid, object.metadata.name)

# Add the inventory object for the OperatingSystem
process_os(vm_object, object.labels, object.annotations)
process_os(vm_object, object.metadata.labels, object.metadata.annotations)

# The power status is initially off, it will be set to on later if the virtual machine instance exists:
vm_object.raw_power_state = 'Succeeded'
Expand All @@ -109,23 +118,25 @@ def process_vm_instances(objects)

def process_vm_instance(object)
# Get the basic information:
uid = object.uid
name = object.name
uid = object.metadata.uid
name = object.metadata.name

# Get the identifier of the virtual machine from the owner reference:
unless object.owner_name.nil?
owner_references = object.metadata.ownerReferences&.first
if owner_references&.name
# seems like valid use case for now
uid = object.owner_uid
name = object.owner_name
uid = owner_references.uid
name = owner_references.name
end

# Process the domain:
vm_object = process_domain(object.namespace, object.memory, object.cpu_cores, uid, name)
process_status(vm_object, object.ip_address, object.node_name)
vm_object = process_domain(object.metadata.namespace, object.spec.domain.memory&.guest, object.spec.domain.cpu&.cores, uid, name)

process_status(vm_object, object.status.interfaces.first&.ipAddress, object.status.nodeName)

vm_object.host = host_collection.lazy_find(object.node_name, :ref => :by_name)
vm_object.host = host_collection.lazy_find(object.status.nodeName, :ref => :by_name)

vm_object.raw_power_state = object.status
vm_object.raw_power_state = object.status.phase
end

def process_domain(namespace, memory, cores, uid, name)
Expand Down Expand Up @@ -189,25 +200,25 @@ def process_templates(objects)

def process_template(object)
# Get the basic information:
uid = object.uid
uid = object.metadata.uid
vm = vm_from_objects(object.objects)
return if vm.nil?

# Add the inventory object for the template:
template_object = template_collection.find_or_build(uid)
template_object.connection_state = 'connected'
template_object.ems_ref = uid
template_object.name = object.name
template_object.name = object.metadata.name
template_object.raw_power_state = 'never'
template_object.template = true
template_object.uid_ems = uid
template_object.location = object.namespace
template_object.location = object.metadata.namespace

# Add the inventory object for the hardware:
process_hardware(template_object, object.parameters, object.labels, vm.dig(:spec, :template, :spec, :domain))
process_hardware(template_object, object.parameters, object.metadata.labels, vm.dig(:spec, :template, :spec, :domain))

# Add the inventory object for the OperatingSystem
process_os(template_object, object.labels, object.annotations)
process_os(template_object, object.metadata.labels, object.metadata.annotations)
end

def vm_from_objects(objects)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -44,6 +44,6 @@ def parse
private

def get_object_ids(objects)
objects.map { |o| o.uid }.uniq
objects.map { |o| o.metadata.uid }.uniq
end
end
8 changes: 8 additions & 0 deletions config/secrets.defaults.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,8 @@
---
test:
kubevirt_defaults: &kubevirt_defaults
hostname: host.example.com
token: theToken
port: "8443"
kubevirt:
<<: *kubevirt_defaults
18 changes: 0 additions & 18 deletions spec/fixtures/files/collectors/one_node.json

This file was deleted.

48 changes: 0 additions & 48 deletions spec/fixtures/files/collectors/one_vm.json

This file was deleted.

1 change: 1 addition & 0 deletions spec/fixtures/files/template-without-parameters.yml
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,7 @@ kind: Template
metadata:
name: template-without-parameters
namespace: default
uid: 7e6fb1ac-00ef-11e8-8840-525400b2cba8
annotations:
description: "OpenShift KubeVirt Cirros VM template"
tags: "kubevirt,openshift,template,linux"
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,93 @@
describe ManageIQ::Providers::Kubevirt::InfraManager::Refresher do
context '#refresh' do
let(:ems) do
host = Rails.application.secrets.kubevirt[:hostname]
token = Rails.application.secrets.kubevirt[:token]
port = Rails.application.secrets.kubevirt[:port]
zone = EvmSpecHelper.local_miq_server.zone

FactoryBot.create(:ems_kubevirt,
:name => "Kubevirt Virtualization Manager",
:zone => zone).tap do |ems|
ems.parent_manager.authentications << FactoryBot.create(:authentication, {:authtype => :bearer,
:type => "AuthToken",
:auth_key => token,
:password => nil,
:userid => "_"})
ems.parent_manager.default_endpoint.update!(:role => :default,
:hostname => host,
:port => port,
:security_protocol => "ssl-without-validation")
end
end

it 'works correctly with one node' do
2.times do
VCR.use_cassette(described_class.name.underscore) do
EmsRefresh.refresh(ems)
end

assert_counts
assert_specific_vm
assert_specific_host
assert_specific_cluster
assert_specific_storage
end
end

def assert_counts
expect(ems.vms.count).to eq(1)
expect(ems.hosts.count).to eq(6)
expect(ems.clusters.count).to eq(1)
expect(ems.storages.count).to eq(1)
end

def assert_specific_vm
vm = ems.vms.find_by(:name => "fedora-gold-porcupine-50")
expect(vm).to have_attributes(
:ems_ref => "50c54ad2-c2a6-44ae-89f5-14d2f313882c",
:name => "fedora-gold-porcupine-50",
:type => "ManageIQ::Providers::Kubevirt::InfraManager::Vm",
:uid_ems => "50c54ad2-c2a6-44ae-89f5-14d2f313882c",
:vendor => "kubevirt",
:power_state => "on",
:connection_state => "connected"
)
end

def assert_specific_host
host = ems.hosts.find_by(:ems_ref => "248af02e-7da9-49a4-b026-1dd1a341b0de")
expect(host).to have_attributes(
:connection_state => "connected",
:ems_ref => "248af02e-7da9-49a4-b026-1dd1a341b0de",
:type => "ManageIQ::Providers::Kubevirt::InfraManager::Host",
:uid_ems => "248af02e-7da9-49a4-b026-1dd1a341b0de",
:vmm_product => "KubeVirt",
:vmm_vendor => "kubevirt",
:vmm_version => "0.1.0",
:ems_cluster => ems.ems_clusters.find_by(:ems_ref => "0")
)
end

def assert_specific_cluster
cluster = ems.ems_clusters.find_by(:ems_ref => "0")
expect(cluster).to have_attributes(
:ems_ref => "0",
:name => "Kubevirt Virtualization Manager",
:uid_ems => "0",
:type => "ManageIQ::Providers::Kubevirt::InfraManager::Cluster"
)
end

def assert_specific_storage
storage = ems.storages.find_by(:ems_ref => "0")
expect(storage).to have_attributes(
:name => "Kubevirt Virtualization Manager",
:total_space => 0,
:free_space => 0,
:ems_ref => "0",
:type => "ManageIQ::Providers::Kubevirt::InfraManager::Storage"
)
end
end
end
Loading
Loading