diff --git a/new-docs/404.html b/new-docs/404.html index d74ac568a..aba71ab37 100644 --- a/new-docs/404.html +++ b/new-docs/404.html @@ -1,2 +1,2 @@ 404 Page not found :: XAPI Toolstack Developer Documentation -

44

Not found

Woops. Looks like this page doesn't exist ¯\_(ツ)_/¯.

Go to homepage

\ No newline at end of file +

44

Not found

Woops. Looks like this page doesn't exist ¯\_(ツ)_/¯.

Go to homepage

\ No newline at end of file diff --git a/new-docs/categories/index.html b/new-docs/categories/index.html index 9c9fe0999..6f4021a22 100644 --- a/new-docs/categories/index.html +++ b/new-docs/categories/index.html @@ -1,10 +1,10 @@ Categories :: XAPI Toolstack Developer Documentation -

Categories

\ No newline at end of file diff --git a/new-docs/design/RDP/index.html b/new-docs/design/RDP/index.html index 020ebc6aa..f2285ba89 100644 --- a/new-docs/design/RDP/index.html +++ b/new-docs/design/RDP/index.html @@ -1,12 +1,12 @@ RDP control :: XAPI Toolstack Developer Documentation -
\ No newline at end of file diff --git a/new-docs/design/aggr-storage-reboots/index.html b/new-docs/design/aggr-storage-reboots/index.html index f604564e0..1605da8af 100644 --- a/new-docs/design/aggr-storage-reboots/index.html +++ b/new-docs/design/aggr-storage-reboots/index.html @@ -1,12 +1,12 @@ Aggregated Local Storage and Host Reboots :: XAPI Toolstack Developer Documentation -
\ No newline at end of file diff --git a/new-docs/design/archival-redesign/index.html b/new-docs/design/archival-redesign/index.html index 2cd07ae2c..7cb705c5a 100644 --- a/new-docs/design/archival-redesign/index.html +++ b/new-docs/design/archival-redesign/index.html @@ -1,5 +1,5 @@ RRDD archival redesign :: XAPI Toolstack Developer Documentation -
\ No newline at end of file diff --git a/new-docs/design/backtraces/index.html b/new-docs/design/backtraces/index.html index 84ab16ece..66c95f9a3 100644 --- a/new-docs/design/backtraces/index.html +++ b/new-docs/design/backtraces/index.html @@ -1,5 +1,5 @@ Backtrace support :: XAPI Toolstack Developer Documentation -
\ No newline at end of file diff --git a/new-docs/design/bonding-improvements/index.html b/new-docs/design/bonding-improvements/index.html index 10b341989..741c5ff44 100644 --- a/new-docs/design/bonding-improvements/index.html +++ b/new-docs/design/bonding-improvements/index.html @@ -1,5 +1,5 @@ Bonding Improvements design :: XAPI Toolstack Developer Documentation -
\ No newline at end of file diff --git a/new-docs/design/coverage/index.html b/new-docs/design/coverage/index.html index 15ac2f9ad..9045f33c3 100644 --- a/new-docs/design/coverage/index.html +++ b/new-docs/design/coverage/index.html @@ -1,5 +1,5 @@ Code Coverage Profiling :: XAPI Toolstack Developer Documentation -
\ No newline at end of file diff --git a/new-docs/design/cpu-levelling-v2/index.html b/new-docs/design/cpu-levelling-v2/index.html index cac71bca2..2baf0b721 100644 --- a/new-docs/design/cpu-levelling-v2/index.html +++ b/new-docs/design/cpu-levelling-v2/index.html @@ -1,5 +1,5 @@ CPU feature levelling 2.0 :: XAPI Toolstack Developer Documentation -
\ No newline at end of file diff --git a/new-docs/design/distributed-database/index.html b/new-docs/design/distributed-database/index.html index fae277d8c..a40a50294 100644 --- a/new-docs/design/distributed-database/index.html +++ b/new-docs/design/distributed-database/index.html @@ -1,5 +1,5 @@ Distributed database :: XAPI Toolstack Developer Documentation -
\ No newline at end of file diff --git a/new-docs/design/emergency-network-reset/index.html b/new-docs/design/emergency-network-reset/index.html index b8f7452a2..8acdea9dc 100644 --- a/new-docs/design/emergency-network-reset/index.html +++ b/new-docs/design/emergency-network-reset/index.html @@ -1,5 +1,5 @@ Emergency Network Reset Design :: XAPI Toolstack Developer Documentation -
\ No newline at end of file diff --git a/new-docs/design/emulated-pci-spec/index.html b/new-docs/design/emulated-pci-spec/index.html index 3e35537fa..8585db52f 100644 --- a/new-docs/design/emulated-pci-spec/index.html +++ b/new-docs/design/emulated-pci-spec/index.html @@ -1,12 +1,12 @@ Specifying Emulated PCI Devices :: XAPI Toolstack Developer Documentation -
\ No newline at end of file diff --git a/new-docs/design/fcoe-nics/index.html b/new-docs/design/fcoe-nics/index.html index ba7f65b37..9f9dbcb27 100644 --- a/new-docs/design/fcoe-nics/index.html +++ b/new-docs/design/fcoe-nics/index.html @@ -1,13 +1,13 @@ FCoE capable NICs :: XAPI Toolstack Developer Documentation -
\ No newline at end of file diff --git a/new-docs/design/gpu-passthrough/index.html b/new-docs/design/gpu-passthrough/index.html index 0398f2eb4..8392a9991 100644 --- a/new-docs/design/gpu-passthrough/index.html +++ b/new-docs/design/gpu-passthrough/index.html @@ -1,5 +1,5 @@ GPU pass-through support :: XAPI Toolstack Developer Documentation -
\ No newline at end of file diff --git a/new-docs/design/gpu-support-evolution/index.html b/new-docs/design/gpu-support-evolution/index.html index 4bd352a7d..668b140c6 100644 --- a/new-docs/design/gpu-support-evolution/index.html +++ b/new-docs/design/gpu-support-evolution/index.html @@ -1,5 +1,5 @@ GPU support evolution :: XAPI Toolstack Developer Documentation -
\ No newline at end of file diff --git a/new-docs/design/heterogeneous-pools/index.html b/new-docs/design/heterogeneous-pools/index.html index cc2b54c95..a377eb33b 100644 --- a/new-docs/design/heterogeneous-pools/index.html +++ b/new-docs/design/heterogeneous-pools/index.html @@ -1,5 +1,5 @@ Heterogeneous pools :: XAPI Toolstack Developer Documentation -
\ No newline at end of file diff --git a/new-docs/design/index.html b/new-docs/design/index.html index b3a75d9f2..9bc97893d 100644 --- a/new-docs/design/index.html +++ b/new-docs/design/index.html @@ -1,5 +1,5 @@ Design Documents :: XAPI Toolstack Developer Documentation -

Design Documents

Key: Revision +

Design Documents

Key: Revision Proposed Confirmed Released (vA.B) @@ -120,9 +120,9 @@ label-success">released (xenserver 6.5 sp1)
\ No newline at end of file + 
\ No newline at end of file diff --git a/new-docs/design/index.print.html b/new-docs/design/index.print.html index a611ce217..e7933e744 100644 --- a/new-docs/design/index.print.html +++ b/new-docs/design/index.print.html @@ -1,5 +1,5 @@ Design Documents :: XAPI Toolstack Developer Documentation -

Design Documents

Key: Revision +

Design Documents

Key: Revision Proposed Confirmed Released (vA.B) @@ -383,8 +383,8 @@ produce a summary of annotated code that highlights what part of a codebase was executed.

BisectPPX has several desirable properties:

  • a robust code base that is well tested
  • it is easy to integrate into the compilation pipeline (see below)
  • is specific to the OCaml language; an expression-oriented language like OCaml doesn’t fit the traditional statement coverage well
  • it is actively maintained
  • is generates useful reports for interactive and non-interactive use -that help to improve code coverage

Coverage Analysis -Coverage Analysis

Red parts indicate code that wasn’t executed whereas green parts were. +that help to improve code coverage

Coverage Analysis +Coverage Analysis

Red parts indicate code that wasn’t executed whereas green parts were. Hovering over a dark green spot reveals how often that point was executed.

The individual steps of instrumenting code with BisectPPX are greatly abstracted by OCamlfind (OCaml’s library manager) and OCamlbuild @@ -527,16 +527,16 @@ If we placed our host and VM metadata in git then we could commit changes and pull and push them between replicas. The Irmin library provides an easy programming -interface on top of git which we could link with the Xapi database layer.

Proposed new architecture

Pools of one -Pools of one

The diagram above shows two hosts: one a master and the other a regular host. +interface on top of git which we could link with the Xapi database layer.

Proposed new architecture

Pools of one +Pools of one

The diagram above shows two hosts: one a master and the other a regular host. The XenAPI client has sent a request to the wrong host; normally this would result in a HOST_IS_SLAVE error being sent to the client. In the new world, the host is able to process the request, only contacting the master if it is necessary to acquire a lock. Starting a VM would require a lock; but rebooting or migrating an existing VM would not. Assuming the lock can be acquired, then the operation is executed locally with all state updates -being made to a git topic branch.

Topic branches -Topic branches

Roughly we would have 1 topic branch per +being made to a git topic branch.

Topic branches +Topic branches

Roughly we would have 1 topic branch per pending XenAPI Task. Once the Task completes successfully, the topic branch (containing the new VM state) is merged back into master. Separately each @@ -977,8 +977,8 @@ disable_on_reboot.

Disabling dom0 access will modify the xen commandline (using the xen-cmdline tool) such that dom0 will not be able to access the GPU on next boot.

Calling host.disable_display will modify the xen and dom0 commandlines such that neither will attempt to send console output to the system display device.

A state diagram for the fields PGPU.dom0_access and host.display is shown -below:

host.integrated_GPU_passthrough flow diagram -host.integrated_GPU_passthrough flow diagram

While it is possible for these two fields to be modified independently, a +below:

host.integrated_GPU_passthrough flow diagram +host.integrated_GPU_passthrough flow diagram

While it is possible for these two fields to be modified independently, a client must disable both the host display and dom0 access to the system display device before that device can be passed through to a guest.

Note that when a client enables or disables either of these fields, the change can be cancelled until the host is rebooted.

Handling vga_arbiter

Currently, xapi will not create a PGPU object for the PCI device with address @@ -1102,8 +1102,8 @@ any additional device models will be dealt with entirely by xenopsd.

Design document
Revisionv1
Statusproposed

OCFS2 storage

OCFS2 is a (host-)clustered filesystem which runs on top of a shared raw block device. Hosts using OCFS2 form a cluster using a combination of network and -storage heartbeats and host fencing to avoid split-brain.

The following diagram shows the proposed architecture with xapi:

Proposed architecture -Proposed architecture

Please note the following:

  • OCFS2 is configured to use global heartbeats rather than per-mount heartbeats +storage heartbeats and host fencing to avoid split-brain.

    The following diagram shows the proposed architecture with xapi:

    Proposed architecture +Proposed architecture

    Please note the following:

    • OCFS2 is configured to use global heartbeats rather than per-mount heartbeats because we quite often have many SRs and therefore many mountpoints
    • The OCFS2 global heartbeat should be collocated on the same SR as the XenServer HA SR so that we depend on fewer SRs (the storage is a single point of failure for OCFS2)
    • The OCFS2 global heartbeat should itself be a raw VDI within an LVHDSR.
    • Every host can be in at-most-one OCFS2 cluster i.e. the host cluster membership @@ -1247,18 +1247,18 @@ probably need to do this to avoid fencing.

      Walk-through: adding OCFS2 storage

      Assume you have an existing Pool of 2 hosts. First the client will set up the O2CB cluster, choosing where to put the global heartbeat volume. The client should check that the I/O paths have all been setup correctly with -bonding and multipath and prompt the user to fix any obvious problems.

      The client enables O2CB and then creates an SR -The client enables O2CB and then creates an SR

      Internally within Pool.enable_o2cb Xapi will set up the cluster metadata -on every host in the pool:

      Xapi creates the cluster configuration and each host updates its metadata -Xapi creates the cluster configuration and each host updates its metadata

      At this point all hosts have in-sync cluster.conf files but all cluster +bonding and multipath and prompt the user to fix any obvious problems.

      The client enables O2CB and then creates an SR +The client enables O2CB and then creates an SR

      Internally within Pool.enable_o2cb Xapi will set up the cluster metadata +on every host in the pool:

      Xapi creates the cluster configuration and each host updates its metadata +Xapi creates the cluster configuration and each host updates its metadata

      At this point all hosts have in-sync cluster.conf files but all cluster services are disabled. We also have requires_mainenance=true on all Membership entries and the global Cluster has enabled=false. -The client will now try to enable the cluster with Cluster.enable:

      Xapi enables the cluster software on all hosts -Xapi enables the cluster software on all hosts

      Now all hosts are in the cluster and the SR can be created using the standard +The client will now try to enable the cluster with Cluster.enable:

      Xapi enables the cluster software on all hosts +Xapi enables the cluster software on all hosts

      Now all hosts are in the cluster and the SR can be created using the standard SM APIs.

      Walk-through: remove a host

      Assume you have an existing Pool of 2 hosts with o2cb clustering enabled and at least one ocfs2 filesystem mounted. If the host is online then -XenAPI:Pool.eject will:

      Xapi ejects a host from the pool -Xapi ejects a host from the pool

      Note that:

      • All hosts will have modified their o2cb cluster.conf to comment out +XenAPI:Pool.eject will:

        Xapi ejects a host from the pool +Xapi ejects a host from the pool

        Note that:

        • All hosts will have modified their o2cb cluster.conf to comment out the former host
        • The Membership table still remembers the node number of the ejected host– this cannot be re-used until the SR is taken down for maintenance.
        • All hosts can see the difference between their current cluster.conf and the one they would use if they restarted the cluster service, so all @@ -1539,8 +1539,8 @@ using the XenAPI from within a plugin, which is racy, difficult to get right, unscalable and makes component testing impossible.

        • the protocol expects plugin authors to have a deep knowledge of the Xen storage datapath (tapdisk, blkback etc) and the storage.

        • the protocol is undocumented.

        • We shall create a new revision of the protocol (“SMAPIv3”) to address these -problems.

          The following diagram shows the new control plane:

          Storage control plane -Storage control plane

          Requests from xapi are filtered through the existing storage_access +problems.

          The following diagram shows the new control plane:

          Storage control plane +Storage control plane

          Requests from xapi are filtered through the existing storage_access layer which is responsible for managing the mapping between VM VBDs and VDIs.

          Each plugin is represented by a named queue, with APIs for

          • querying the state of each queue
          • explicitly cancelling or replying to messages

          Legacy SMAPIv1 plugins will be processed via the existing storage_access.SMAPIv1 module. Newer SMAPIv3 plugins will be handled by a new xapi-storage-script @@ -1552,8 +1552,8 @@ plugin, and see whether

          • the queue is being served or not (perhaps the xapi-storage-script has crashed)
          • there are unanswered messages (perhaps one of the messages has caused a deadlock in the implementation?)

          It will be possible to

          • delete/clear queues/messages
          • download a message-sequence chart of the last N messages for inclusion in -bugtools.

          Anatomy of a plugin

          The following diagram shows what a plugin would look like:

          Anatomy of a plugin -Anatomy of a plugin

          The SMAPIv3

          Please read the current SMAPIv3 documentation.

Design document
Revisionv1
Status +Anatomy of a plugin

The SMAPIv3

Please read the current SMAPIv3 documentation.

Design document
Revisionv1
Statusproposed

Specifying Emulated PCI Devices

Background and goals

At present (early March 2015) the datamodel defines a VM as having a “platform” string-string map, in which two keys are interpreted as specifying a PCI device which should be emulated for the VM. Those keys are “device_id” and “revision” (with int values represented as decimal strings).

Limitations:

  • Hardcoded defaults are used for the the vendor ID and all other parameters except device_id and revision.
  • Only one emulated PCI device can be specified.

When instructing qemu to emulate PCI devices, qemu accepts twelve parameters for each device.

Future guest-agent features rely on additional emulated PCI devices. We cannot know in advance the full details of all the devices that will be needed, but we can predict some.

We need a way to configure VMs such that they will be given additional emulated PCI devices.

Design

In the datamodel, there will be a new type of object for emulated PCI devices.

Tentative name: “emulated_pci_device”

Fields to be passed through to qemu are the following, all static read-only, and all ints except devicename:

  • devicename (string)
  • vendorid
  • deviceid
  • command
  • status
  • revision
  • classcode
  • headertype
  • subvendorid
  • subsystemid
  • interruptline
  • interruptpin

We also need a “built_in” flag: see below.

Allow creation of these objects through the API (and CLI).

(It would be nice, but by no means essential, to be able to create one by specifying an existing one as a basis, along with one or more altered fields, e.g. “Make a new one just like that existing one except with interruptpin=9.”)

Create some of these devices to be defined as standard in XenServer, along the same lines as the VM templates. Those ones should have built_in=true.

Allow destruction of these objects through the API (and CLI), but not if they are in use or if they have built_in=true.

A VM will have a list of zero or more of these emulated-pci-device objects. (OPEN QUESTION: Should we forbid having more than one of a given device?)

Provide API (and CLI) commands to add and remove one of these devices from a VM (identifying the VM and device by uuid or other identifier such as name).

The CLI should allow performing this on multiple VMs in one go, based on a selector or filter for the VMs. We have this concept already in the CLI in commands such as vm-start.

In the function that adds an emulated PCI device to a VM, we must check if this is the first device to be added, and must refuse if the VM’s Virtual Hardware Platform Version is too low. (Or should we just raise the version automatically if needed?)

When starting a VM, check its list of emulated pci devices and pass the details through to qemu (via xenopsd).

Design document
Revisionv11
Statusconfirmed
Review#139
Revision history
v1Initial version
v2Added details about the VDI's binary format and size, and the SR capability name.
v3Tar was not needed after all!
v4Add details about discovering the VDI using a new vdi_type.
v5Add details about the http handlers and interaction with xapi's database
v6Add details about the framing of the data within the VDI
v7Redesign semantics of the rrd_updates handler
v8Redesign semantics of the rrd_updates handler (again)
v9Magic number change in framing format of vdi
v10Add details of new APIs added to xapi and xcp-rrdd
v11Remove unneeded API calls

SR-Level RRDs

Introduction

Xapi has RRDs to track VM- and host-level metrics. There is a desire to have SR-level RRDs as a new category, because SR stats are not specific to a certain VM or host. Examples are size and free space on the SR. While recording SR metrics is relatively straightforward within the current RRD system, the main question is where to archive them, which is what this design aims to address.

Stats Collection

All SR types, including the existing ones, should be able to have RRDs defined for them. Some RRDs, such as a “free space” one, may make sense for multiple (if not all) SR types. However, the way to measure something like free space will be SR specific. Furthermore, it should be possible for each type of SR to have its own specialised RRDs.

It follows that each SR will need its own xcp-rrdd plugin, which runs on the SR master and defines and collects the stats. For the new thin-lvhd SR this could be xenvmd itself. The plugin registers itself with xcp-rrdd, so that the latter records the live stats from the plugin into RRDs.

Archiving

SR-level RRDs will be archived in the SR itself, in a VDI, rather than in the local filesystem of the SR master. This way, we don’t need to worry about master failover.

The VDI will be 4MB in size. This is a little more space than we would need for the RRDs we have in mind at the moment, but will give us enough headroom for the foreseeable future. It will not have a filesystem on it for simplicity and performance. There will only be one RRD archive file for each SR (possibly containing data for multiple metrics), which is gzipped by xcp-rrdd, and can be copied onto the VDI.

There will be a simple framing format for the data on the VDI. This will be as follows:

OffsetTypeNameComment
032 bit network-order intmagicMagic number = 0x7ada7ada
432 bit network-order intversion1
832 bit network-order intlengthlength of payload
12gzipped datadata

Xapi will be in charge of the lifecycle of this VDI, not the plugin or xcp-rrdd, which will make it a little easier to manage them. Only xapi will attach/detach and read from/write to this VDI. We will keep xcp-rrdd as simple as possible, and have it archive to its standard path in the local file system. Xapi will then copy the RRDs in and out of the VDI.

A new value "rrd" in the vdi_type enum of the datamodel will be defined, and the VDI.type of the VDI will be set to that value. The storage backend will write the VDI type to the LVM metadata of the VDI, so that xapi can discover the VDI containing the SR-level RRDs when attaching an SR to a new pool. This means that SR-level RRDs are currently restricted to LVM SRs.

Because we will not write plugins for all SRs at once, and therefore do not need xapi to set up the VDI for all SRs, we will add an SR “capability” for the backends to be able to tell xapi whether it has the ability to record stats and will need storage for them. The capability name will be: SR_STATS.

Management of the SR-stats VDI

The SR-stats VDI will be attached/detached on PBD.plug/unplug on the SR master.

  • On PBD.plug on the SR master, if the SR has the stats capability, xapi:

    • Creates a stats VDI if not already there (search for an existing one based on the VDI type).
    • Attaches the stats VDI if it did already exist, and copies the RRDs to the local file system (standard location in the filesystem; asks xcp-rrdd where to put them).
    • Informs xcp-rrdd about the RRDs so that it will load the RRDs and add newly recorded data to them (needs a function like push_rrd_local for VM-level RRDs).
    • Detaches stats VDI.
  • On PBD.unplug on the SR master, if the SR has the stats capability xapi:

    • Tells xcp-rrdd to archive the RRDs for the SR, which it will do to the local filesystem.
    • Attaches the stats VDI, copies the RRDs into it, detaches VDI.

Periodic Archiving

Xapi’s periodic scheduler regularly triggers xcp-rrdd to archive the host and VM RRDs. It will need to do this for the SR ones as well. Furthermore, xapi will need to attach the stats VDI and copy the RRD archives into it (as on PBD.unplug).

Exporting

There will be a new handler for downloading an SR RRD:

http://<server>/sr_rrd?session_id=<SESSION HANDLE>&uuid=<SR UUID>
 

RRD updates are handled via a single handler for the host, VM and SR UUIDs @@ -1594,8 +1594,8 @@ in the common-case; in particular there is no network RPC needed

  • when the resource pool master host has failed, allocations can still continue, up to some limit, allowing time for the master host to be recovered; in particular there is no need for very low HA timeouts.
  • we can (in future) support in-kernel block allocation through the -device mapper dm-thin target.
  • The following diagram shows the “Allocation plane”:

    Allocation plane -Allocation plane

    All VM disk writes are channelled through tapdisk which keeps track +device mapper dm-thin target.

    The following diagram shows the “Allocation plane”:

    Allocation plane +Allocation plane

    All VM disk writes are channelled through tapdisk which keeps track of the remaining reserved space within the device mapper device. When the free space drops below a “low-water mark”, tapdisk sends a message to a local per-SR daemon called local-allocator and requests more @@ -1630,8 +1630,8 @@ from “block for more than 120 seconds” issue due to slow I/O. This known issue is that, slow I/O during dirty pages writeback/flush may cause memory starvation, then other userland process or kernel threads -would be blocked.

    The following diagram shows the control-plane:

    control plane -control plane

    When thin-provisioning is enabled we will be modifying the LVM metadata at +would be blocked.

    The following diagram shows the control-plane:

    control plane +control plane

    When thin-provisioning is enabled we will be modifying the LVM metadata at an increased rate. We will cache the current metadata in the xenvmd process and funnel all queries through it, rather than “peeking” at the metadata on-disk. Note it will still be possible to peek at the on-disk metadata but it @@ -2220,4 +2220,4 @@ create a new VGPU_type object.

    Design document
    Revisionv1
    Statusreleased (7.0)

    Virtual Hardware Platform Version

    Background and goal

    Some VMs can only be run on hosts of sufficiently recent versions.

    We want a clean way to ensure that xapi only tries to run a guest VM on a host that supports the “virtual hardware platform” required by the VM.

    Suggested design

    • In the datamodel, VM has a new integer field “hardware_platform_version” which defaults to zero.
    • In the datamodel, Host has a corresponding new integer-list field “virtual_hardware_platform_versions” which defaults to list containing a single zero element (i.e. [0] or [0L] in OCaml notation). The zero represents the implicit version supported by older hosts that lack the code to handle the Virtual Hardware Platform Version concept.
    • When a host boots it populates its own entry from a hardcoded value, currently [0; 1] i.e. a list containing the two integer elements 0 and 1. (Alternatively this could come from a config file.)
      • If this new version-handling functionality is introduced in a hotfix, at some point the pool master will have the new functionality while at least one slave does not. An old slave-host that does not yet have software to handle this feature will not set its DB entry, which will therefore remain as [0] (maintained in the DB by the master).
    • The existing test for whether a VM can run on (or migrate to) a host must include a check that the VM’s virtual hardware platform version is in the host’s list of supported versions.
    • When a VM is made to start using a feature that is available only in a certain virtual hardware platform version, xapi must set the VM’s hardware_platform_version to the maximum of that version-number and its current value (i.e. raise if needed).

    For the version we could consider some type other than integer, but a strict ordering is needed.

    First use-case

    Version 1 denotes support for a certain feature:

    When a VM starts, if a certain flag is set in VM.platform then XenServer will provide an emulated PCI device which will trigger the guest Windows OS to seek drivers for the device, or updates for those drivers. Thus updated drivers can be obtained through the standard Windows Update mechanism.

    If the PCI device is removed, the guest OS will fail to boot. A VM using this feature must not be migrated to or started on a XenServer that lacks support for the feature.

    Therefore at VM start, we can look at whether this feature is being used; if it is, then if the VM’s Virtual Hardware Platform Version is less than 1 we should raise it to 1.

    Limitation

    Consider a VM that requires version 1 or higher. Suppose it is exported, then imported into an old host that does not support this feature. Then the host will not check the versions but will attempt to run the VM, which will then have difficulties.

    The only way to prevent this would be to make a backwards-incompatible change to the VM metadata (e.g. a new item in an enum) so that the old hosts cannot read it, but that seems like a bad idea.

    Design document
    Revisionv2
    Statusproposed

    XenPrep

    Background

    Windows guests should have XenServer-specific drivers installed. As of mid-2015 these have been always been installed and upgraded by an essentially manual process involving an ISO carrying the drivers. We have a plan to enable automation through the standard Windows Update mechanism. This will involve a new additional virtual PCI device being provided to the VM, to trigger Windows Update to fetch drivers for the device.

    There are many existing Windows guests that have drivers installed already. These drivers must be uninstalled before the new drivers are installed (and ideally before the new PCI device is added). To make this easier, we are planning a XenAPI call that will cause the removal of the old drivers and the addition of the new PCI device.

    Since this is only to help with updating old guests, the call may well be removed at some point in the future.

    Brief high-level design

    The XenAPI call will be called VM.xenprep_start. It will update the VM record to note that the process has started, and will insert a special ISO into the VM’s virtual CD drive.

    That ISO will contain a tool which will be set up to auto-run (if auto-run is enabled in the guest). The tool will:

    1. Lock the CD drive so other Windows programs cannot eject the disc.
    2. Uninstall the old drivers.
    3. Eject the CD to signal success.
    4. Shut down the VM.

    XenServer will interpret the ejection of the CD as a success signal, and when the VM shuts down without the special ISO in the drive, XenServer will:

    1. Update the VM record:
    • Remove the mark that shows that the xenprep process is in progress
    • Give it the new PCI device: set VM.auto_update_drivers to true.
    • If VM.virtual_hardware_platform_version is less than 2, then set it to 2.
    1. Start the VM.

    More details of the xapi-project parts

    (The tool that runs in the guest is out of scope for this document.)

    Start

    The XenAPI call VM.xenprep_start will throw a power-state error if the VM is not running. -For RBAC roles, it will be available to “VM Operator” and above.

    It will:

    1. Insert the xenprep ISO into the VM’s virtual CD drive.
    2. Write VM.other_config key xenprep_progress=ISO_inserted to record the fact that the xenprep process has been initiated.

    If xenprep_start is called on a VM already undergoing xenprep, the call will return successfully but will not do anything.

    If the VM does not have an empty virtual CD drive, the call will fail with a suitable error.

    Cancellation

    While xenprep is in progress, any request to eject the xenprep ISO (except from inside the guest) will be rejected with a new error “VBD_XENPREP_CD_IN_USE”.

    There will be a new XenAPI call VM.xenprep_abort which will:

    1. Remove the xenprep_progress entry from VM.other_config.
    2. Make a best-effort attempt to eject the CD. (The guest might prevent ejection.)

    This is not intended for cancellation while the xenprep tool is running, but rather for use before it starts, for example if auto-run is disabled or if the VM has a non-Windows OS.

    Completion

    Aim: when the guest shuts down after ejecting the CD, XenServer will start the guest again with the new PCI device.

    Xapi works through the queue of events it receives from xenopsd. It is possible that by the time xapi processes the cd-eject event, the guest might have shut down already.

    When the shutdown (not reboot) event is handled, we shall check whether we need to do anything xenprep-related. If

    • The VM other_config map has xenprep_progress as either of ISO_inserted or shutdown, and
    • The xenprep ISO is no longer in the drive

    then we must (in the specified order)

    1. Update the VM record:
    2. In VM.other_config set xenprep_progress=shutdown
    3. If VM.virtual_hardware_platform_version is less than 2, then set it to 2.
    4. Give it the new PCI device: set VM.auto_update_drivers to true.
    5. Initiate VM start.
    6. Remove xenprep_progress from VM.other_config

    The most relevant code is probably the update_vm function in ocaml/xapi/xapi_xenops.ml in the xen-api repo (or in some function called from there).

    \ No newline at end of file +For RBAC roles, it will be available to “VM Operator” and above.

    It will:

    1. Insert the xenprep ISO into the VM’s virtual CD drive.
    2. Write VM.other_config key xenprep_progress=ISO_inserted to record the fact that the xenprep process has been initiated.

    If xenprep_start is called on a VM already undergoing xenprep, the call will return successfully but will not do anything.

    If the VM does not have an empty virtual CD drive, the call will fail with a suitable error.

    Cancellation

    While xenprep is in progress, any request to eject the xenprep ISO (except from inside the guest) will be rejected with a new error “VBD_XENPREP_CD_IN_USE”.

    There will be a new XenAPI call VM.xenprep_abort which will:

    1. Remove the xenprep_progress entry from VM.other_config.
    2. Make a best-effort attempt to eject the CD. (The guest might prevent ejection.)

    This is not intended for cancellation while the xenprep tool is running, but rather for use before it starts, for example if auto-run is disabled or if the VM has a non-Windows OS.

    Completion

    Aim: when the guest shuts down after ejecting the CD, XenServer will start the guest again with the new PCI device.

    Xapi works through the queue of events it receives from xenopsd. It is possible that by the time xapi processes the cd-eject event, the guest might have shut down already.

    When the shutdown (not reboot) event is handled, we shall check whether we need to do anything xenprep-related. If

    • The VM other_config map has xenprep_progress as either of ISO_inserted or shutdown, and
    • The xenprep ISO is no longer in the drive

    then we must (in the specified order)

    1. Update the VM record:
    2. In VM.other_config set xenprep_progress=shutdown
    3. If VM.virtual_hardware_platform_version is less than 2, then set it to 2.
    4. Give it the new PCI device: set VM.auto_update_drivers to true.
    5. Initiate VM start.
    6. Remove xenprep_progress from VM.other_config

    The most relevant code is probably the update_vm function in ocaml/xapi/xapi_xenops.ml in the xen-api repo (or in some function called from there).

    \ No newline at end of file diff --git a/new-docs/design/integrated-gpu-passthrough/index.html b/new-docs/design/integrated-gpu-passthrough/index.html index 40e85ab67..47ccbc5fd 100644 --- a/new-docs/design/integrated-gpu-passthrough/index.html +++ b/new-docs/design/integrated-gpu-passthrough/index.html @@ -1,5 +1,5 @@ Integrated GPU passthrough support :: XAPI Toolstack Developer Documentation -
    \ No newline at end of file diff --git a/new-docs/design/local-database/index.html b/new-docs/design/local-database/index.html index db88250a3..aedd40d37 100644 --- a/new-docs/design/local-database/index.html +++ b/new-docs/design/local-database/index.html @@ -1,5 +1,5 @@ Local database :: XAPI Toolstack Developer Documentation -
    \ No newline at end of file diff --git a/new-docs/design/management-interface-on-vlan/index.html b/new-docs/design/management-interface-on-vlan/index.html index a821dbe5b..925f8c033 100644 --- a/new-docs/design/management-interface-on-vlan/index.html +++ b/new-docs/design/management-interface-on-vlan/index.html @@ -1,5 +1,5 @@ Management Interface on VLAN :: XAPI Toolstack Developer Documentation -
    \ No newline at end of file diff --git a/new-docs/design/multiple-cluster-managers/index.html b/new-docs/design/multiple-cluster-managers/index.html index b0e0e0c86..ad01b68eb 100644 --- a/new-docs/design/multiple-cluster-managers/index.html +++ b/new-docs/design/multiple-cluster-managers/index.html @@ -1,13 +1,13 @@ Multiple Cluster Managers :: XAPI Toolstack Developer Documentation -
    \ No newline at end of file diff --git a/new-docs/design/multiple-device-emulators/index.html b/new-docs/design/multiple-device-emulators/index.html index 652f5e4e9..024c2b154 100644 --- a/new-docs/design/multiple-device-emulators/index.html +++ b/new-docs/design/multiple-device-emulators/index.html @@ -1,5 +1,5 @@ Multiple device emulators :: XAPI Toolstack Developer Documentation -
    \ No newline at end of file diff --git a/new-docs/design/ocfs2/index.html b/new-docs/design/ocfs2/index.html index c45b5de4f..6183a5e86 100644 --- a/new-docs/design/ocfs2/index.html +++ b/new-docs/design/ocfs2/index.html @@ -1,10 +1,10 @@ OCFS2 storage :: XAPI Toolstack Developer Documentation -
    \ No newline at end of file diff --git a/new-docs/design/patches-in-vdis/index.html b/new-docs/design/patches-in-vdis/index.html index 5454eefa7..653d439ed 100644 --- a/new-docs/design/patches-in-vdis/index.html +++ b/new-docs/design/patches-in-vdis/index.html @@ -1,5 +1,5 @@ patches in VDIs :: XAPI Toolstack Developer Documentation -
    Design document
    Revisionv1
    Status
    Design document
    Revisionv1
    Statusproposed

    patches in VDIs

    “Patches” are signed binary blobs which can be queried and applied. They are stored in the dom0 filesystem under /var/patch. Unfortunately the patches can be quite large – imagine a repo full of RPMs – and @@ -33,9 +33,9 @@ to apply a patch on that host.

    \ No newline at end of file + 
    \ No newline at end of file diff --git a/new-docs/design/pci-passthrough/index.html b/new-docs/design/pci-passthrough/index.html index 4db0e5172..af353990e 100644 --- a/new-docs/design/pci-passthrough/index.html +++ b/new-docs/design/pci-passthrough/index.html @@ -1,5 +1,5 @@ PCI passthrough support :: XAPI Toolstack Developer Documentation -
    \ No newline at end of file diff --git a/new-docs/design/pif-properties/index.html b/new-docs/design/pif-properties/index.html index 791d24fda..14ea417ba 100644 --- a/new-docs/design/pif-properties/index.html +++ b/new-docs/design/pif-properties/index.html @@ -1,5 +1,5 @@ GRO and other properties of PIFs :: XAPI Toolstack Developer Documentation -
    \ No newline at end of file diff --git a/new-docs/design/plugin-protocol-v2/index.html b/new-docs/design/plugin-protocol-v2/index.html index 0c7a21f57..310061fb0 100644 --- a/new-docs/design/plugin-protocol-v2/index.html +++ b/new-docs/design/plugin-protocol-v2/index.html @@ -1,5 +1,5 @@ RRDD plugin protocol v2 :: XAPI Toolstack Developer Documentation -
    \ No newline at end of file diff --git a/new-docs/design/plugin-protocol-v3/index.html b/new-docs/design/plugin-protocol-v3/index.html index e51b20861..82c8393ba 100644 --- a/new-docs/design/plugin-protocol-v3/index.html +++ b/new-docs/design/plugin-protocol-v3/index.html @@ -1,5 +1,5 @@ RRDD plugin protocol v3 :: XAPI Toolstack Developer Documentation -
    \ No newline at end of file diff --git a/new-docs/design/pool-certificates/index.html b/new-docs/design/pool-certificates/index.html index 8a536801c..20648aa40 100644 --- a/new-docs/design/pool-certificates/index.html +++ b/new-docs/design/pool-certificates/index.html @@ -1,5 +1,5 @@ TLS vertification for intra-pool communications :: XAPI Toolstack Developer Documentation -
    \ No newline at end of file diff --git a/new-docs/design/pool-wide-ssh/index.html b/new-docs/design/pool-wide-ssh/index.html index 8dadc2fc8..66e60a8f3 100644 --- a/new-docs/design/pool-wide-ssh/index.html +++ b/new-docs/design/pool-wide-ssh/index.html @@ -1,5 +1,5 @@ Pool-wide SSH :: XAPI Toolstack Developer Documentation -
    \ No newline at end of file diff --git a/new-docs/design/schedule-snapshot/index.html b/new-docs/design/schedule-snapshot/index.html index 0ab6fbb45..da4bc3b37 100644 --- a/new-docs/design/schedule-snapshot/index.html +++ b/new-docs/design/schedule-snapshot/index.html @@ -1,12 +1,12 @@ Schedule Snapshot Design :: XAPI Toolstack Developer Documentation -
    \ No newline at end of file diff --git a/new-docs/design/smapiv3/index.html b/new-docs/design/smapiv3/index.html index 6583f09e9..397fe5dd5 100644 --- a/new-docs/design/smapiv3/index.html +++ b/new-docs/design/smapiv3/index.html @@ -1,12 +1,12 @@ SMAPIv3 :: XAPI Toolstack Developer Documentation -
    Design document
    Revisionv1
    Status
    Design document
    Revisionv1
    Statusreleased (7.6)

    SMAPIv3

    Xapi accesses storage through “plugins” which currently use a protocol called “SMAPIv1”. This protocol has a number of problems:

    1. the protocol has many missing features, and this leads to people using the XenAPI from within a plugin, which is racy, difficult to get right, unscalable and makes component testing impossible.

    2. the protocol expects plugin authors to have a deep knowledge of the Xen storage datapath (tapdisk, blkback etc) and the storage.

    3. the protocol is undocumented.

    We shall create a new revision of the protocol (“SMAPIv3”) to address these -problems.

    The following diagram shows the new control plane:

    Storage control plane -Storage control plane

    Requests from xapi are filtered through the existing storage_access +problems.

    The following diagram shows the new control plane:

    Storage control plane +Storage control plane

    Requests from xapi are filtered through the existing storage_access layer which is responsible for managing the mapping between VM VBDs and VDIs.

    Each plugin is represented by a named queue, with APIs for

    • querying the state of each queue
    • explicitly cancelling or replying to messages

    Legacy SMAPIv1 plugins will be processed via the existing storage_access.SMAPIv1 module. Newer SMAPIv3 plugins will be handled by a new xapi-storage-script @@ -18,13 +18,13 @@ plugin, and see whether

    • the queue is being served or not (perhaps the xapi-storage-script has crashed)
    • there are unanswered messages (perhaps one of the messages has caused a deadlock in the implementation?)

    It will be possible to

    • delete/clear queues/messages
    • download a message-sequence chart of the last N messages for inclusion in -bugtools.

    Anatomy of a plugin

    The following diagram shows what a plugin would look like:

    Anatomy of a plugin -Anatomy of a plugin

    The SMAPIv3

    Please read the current SMAPIv3 documentation.

    \ No newline at end of file diff --git a/new-docs/design/snapshot-revert/index.html b/new-docs/design/snapshot-revert/index.html index c2c706de2..a240f41bd 100644 --- a/new-docs/design/snapshot-revert/index.html +++ b/new-docs/design/snapshot-revert/index.html @@ -1,5 +1,5 @@ Improving snapshot revert behaviour :: XAPI Toolstack Developer Documentation -
    \ No newline at end of file diff --git a/new-docs/design/sr-level-rrds/index.html b/new-docs/design/sr-level-rrds/index.html index 62b737d0d..82d3c3ae2 100644 --- a/new-docs/design/sr-level-rrds/index.html +++ b/new-docs/design/sr-level-rrds/index.html @@ -1,5 +1,5 @@ SR-Level RRDs :: XAPI Toolstack Developer Documentation -
    \ No newline at end of file diff --git a/new-docs/design/thin-lvhd/index.html b/new-docs/design/thin-lvhd/index.html index e22bac872..b4bfc0373 100644 --- a/new-docs/design/thin-lvhd/index.html +++ b/new-docs/design/thin-lvhd/index.html @@ -1,5 +1,5 @@ thin LVHD storage :: XAPI Toolstack Developer Documentation -
    \ No newline at end of file diff --git a/new-docs/design/tunnelling/index.html b/new-docs/design/tunnelling/index.html index 2fcd6408e..8d280906f 100644 --- a/new-docs/design/tunnelling/index.html +++ b/new-docs/design/tunnelling/index.html @@ -1,5 +1,5 @@ Tunnelling API design :: XAPI Toolstack Developer Documentation -
    \ No newline at end of file diff --git a/new-docs/design/user-certificates/index.html b/new-docs/design/user-certificates/index.html index 41451ad3b..0052f8eb6 100644 --- a/new-docs/design/user-certificates/index.html +++ b/new-docs/design/user-certificates/index.html @@ -1,5 +1,5 @@ User-installable host certificates :: XAPI Toolstack Developer Documentation -
    \ No newline at end of file diff --git a/new-docs/design/vgpu-type-identifiers/index.html b/new-docs/design/vgpu-type-identifiers/index.html index b9c32c701..50e814bed 100644 --- a/new-docs/design/vgpu-type-identifiers/index.html +++ b/new-docs/design/vgpu-type-identifiers/index.html @@ -1,5 +1,5 @@ VGPU type identifiers :: XAPI Toolstack Developer Documentation -
    \ No newline at end of file diff --git a/new-docs/design/virt-hw-platform-vn/index.html b/new-docs/design/virt-hw-platform-vn/index.html index 92381651d..487c139d1 100644 --- a/new-docs/design/virt-hw-platform-vn/index.html +++ b/new-docs/design/virt-hw-platform-vn/index.html @@ -1,12 +1,12 @@ Virtual Hardware Platform Version :: XAPI Toolstack Developer Documentation -
    \ No newline at end of file diff --git a/new-docs/design/xenopsd_events/index.html b/new-docs/design/xenopsd_events/index.html index 067e458e7..08c006a91 100644 --- a/new-docs/design/xenopsd_events/index.html +++ b/new-docs/design/xenopsd_events/index.html @@ -1,5 +1,5 @@ Process events from xenopsd in a timely manner :: XAPI Toolstack Developer Documentation -
    Design document
    Revisionv1
    Status
    Design document
    Revisionv1
    Statusproposed

    Process events from xenopsd in a timely manner

    Background

    There is a significant delay between the VM being unpaused and XAPI reporting it as started during a bootstorm. It can happen that the VM is able to send UDP packets already, but XAPI still reports it as not started for minutes.

    XAPI currently processes all events from xenopsd in a single thread, the unpause @@ -15,9 +15,9 @@ and that a tag with a lot of items does not starve the execution of other tags.

    The XAPI side of the changes will look like this

    Known limitations: The active per-VM events should be a small number, this is already ensured in the push_with_coalesce / should_keep code on the xenopsd side. Events to XAPI from xenopsd should already arrive coalesced.

    \ No newline at end of file + 
    \ No newline at end of file diff --git a/new-docs/design/xenprep/index.html b/new-docs/design/xenprep/index.html index d396b3b50..dd89ac325 100644 --- a/new-docs/design/xenprep/index.html +++ b/new-docs/design/xenprep/index.html @@ -1,13 +1,13 @@ XenPrep :: XAPI Toolstack Developer Documentation -
    \ No newline at end of file diff --git a/new-docs/index.html b/new-docs/index.html index 489412440..599e101ee 100644 --- a/new-docs/index.html +++ b/new-docs/index.html @@ -1,14 +1,14 @@ XAPI Toolstack Developer Guide :: XAPI Toolstack Developer Documentation -

    XAPI Toolstack Developer Guide

    The XAPI Toolstack:

    \ No newline at end of file diff --git a/new-docs/index.print.html b/new-docs/index.print.html index c1926b181..1499961a0 100644 --- a/new-docs/index.print.html +++ b/new-docs/index.print.html @@ -1,5 +1,5 @@ XAPI Toolstack Developer Guide :: XAPI Toolstack Developer Documentation -

    XAPI Toolstack Developer Guide

    The XAPI Toolstack:

    • Forms the control plane of both XenServer as well as +

      XAPI Toolstack Developer Guide

      The XAPI Toolstack:

      • Forms the control plane of both XenServer as well as xcp-ng,
      • manages clusters of Xen hosts with shared storage and networking,
      • has a full-featured API, used by clients such as XenCenter and Xen Orchestra.

      The XAPI Toolstack is an open-source project developed by the xapi project, a sub-project of the Linux @@ -7,8 +7,8 @@ behalf of clients such as XenCenter and Xen Orchestra.

      The most fundamental concept is of a Resource pool: the whole cluster managed as a single entity. The following diagram shows a cluster of hosts running -xapi, all sharing some storage:

      A Resource Pool -A Resource Pool

      At any time, at most one host is known as the pool coordinator (formerly +xapi, all sharing some storage:

      A Resource Pool +A Resource Pool

      At any time, at most one host is known as the pool coordinator (formerly known as “master”) and is responsible for coordination and locking resources within the pool. When a pool is first created a coordinator host is chosen. The coordinator role can be transferred

      • on user request in an orderly fashion (xe pool-designate-new-master)
      • on user request in an emergency (xe pool-emergency-transition-to-master)
      • automatically if HA is enabled on the cluster.

      All hosts expose an HTTP, XML-RPC and JSON-RPC interface running on port 80 and @@ -22,8 +22,8 @@ done by xapi and hence it is not possible to share this kind of storage between resource pools.

      The following diagram shows the software running on a single host. Note that all hosts run the same software (although not necessarily the same version, if -we are in the middle of a rolling update).

      A Host -A Host

      The XAPI Toolstack expects the host to be running Xen on x86. The Xen +we are in the middle of a rolling update).

      A Host +A Host

      The XAPI Toolstack expects the host to be running Xen on x86. The Xen hypervisor partitions the host into Domains, some of which can have privileged hardware access, and the rest are unprivileged guests. The XAPI Toolstack normally runs all of its components in the privileged initial domain, @@ -42,8 +42,8 @@ component called xapi-idl.

      • Abstracts communication between daemons over the message-switch using JSON/RPC.
      • Contains the definition of the interfaces exposed by the daemons (except xapi).

      Subsections of Features

      Disaster Recovery

      The HA feature will restart VMs after hosts have failed, but what happens if a whole site (e.g. datacenter) is lost? A disaster recovery -configuration is shown in the following diagram:

      Disaster recovery maintaining a secondary site -Disaster recovery maintaining a secondary site

      We rely on the storage array’s built-in mirroring to replicate (synchronously +configuration is shown in the following diagram:

      Disaster recovery maintaining a secondary site +Disaster recovery maintaining a secondary site

      We rely on the storage array’s built-in mirroring to replicate (synchronously or asynchronously: the admin’s choice) between the primary and the secondary site. When DR is enabled the VM disk data and VM metadata are written to the storage server and mirrored. The secondary site contains the other side @@ -86,12 +86,12 @@ VMs etc).

    Given a choice between polling states and receiving events when the state change, we should in general opt for receiving events in the code in order to avoid adding bottlenecks in dom0 that will prevent the -scalability of XenServer to many VMs and virtual devices.

    Connection of events between XAPI, xenopsd and xenstore, with main functions and data structures responsible for receiving and sending them -Connection of events between XAPI, xenopsd and xenstore, with main functions and data structures responsible for receiving and sending them

    Xapi

    Sending events from the xenapi

    A xenapi user client, such as XenCenter, the xe-cli or a python script, +scalability of XenServer to many VMs and virtual devices.

    Connection of events between XAPI, xenopsd and xenstore, with main functions and data structures responsible for receiving and sending them +Connection of events between XAPI, xenopsd and xenstore, with main functions and data structures responsible for receiving and sending them

    Xapi

    Sending events from the xenapi

    A xenapi user client, such as XenCenter, the xe-cli or a python script, can register to receive events from XAPI for specific objects in the XAPI DB. XAPI will generate events for those registered clients whenever -the corresponding XAPI DB object changes.

    Sending events from the xenapi -Sending events from the xenapi

    This small python scripts shows how to register a simple event watch +the corresponding XAPI DB object changes.

    Sending events from the xenapi +Sending events from the xenapi

    This small python scripts shows how to register a simple event watch loop for XAPI:

    import XenAPI
     session = XenAPI.Session("http://xshost")
     session.login_with_password("username","password")
    @@ -131,11 +131,11 @@
     run xapi_xenops.update_vgpu() to query xenopsd about its state.
  • Task id: something changed in this VM, run xapi_xenops.update_task() to query xenopsd about its state. The function update_task() will update the progress of the task in -the xapi DB using the information of the task in xenopsd.
  • Receiving events from xenopsd -Receiving events from xenopsd

    All the xapi_xenops.update_X() functions above will call +the xapi DB using the information of the task in xenopsd.

    Receiving events from xenopsd +Receiving events from xenopsd

    All the xapi_xenops.update_X() functions above will call Xenopsd_client.X.stat() functions to obtain the current state of X from -xenopsd:

    Obtaining current state -Obtaining current state

    There are a couple of optimisations while processing the events in +xenopsd:

    Obtaining current state +Obtaining current state

    There are a couple of optimisations while processing the events in xapi_xenops.events_watch():

    • if an event X=(vm_id,dev_id) (eg. Vbd dev_id) has already been processed in a barrier_events, it’s not processed again. A typical value for X is eg. “<vm_uuid>.xvda” for a VBD.
    • if Events_from_xenopsd.are_supressed X, then this event @@ -191,16 +191,16 @@ the dom0 backend changed the state of a virtual device), it creates a signal for the corresponding object (VM_check_state, VBD_check_state etc) and send it up to xapi. Xapi will then process this event in its -xapi_xenops.events_watch() function.

      Sending events to xapi -Sending events to xapi

      These signals may need to wait a long time to be processed if the +xapi_xenops.events_watch() function.

      Sending events to xapi +Sending events to xapi

      These signals may need to wait a long time to be processed if the single-threaded xapi_xenops.events_watch() function is having difficulties (ie taking a long time) to process previous signals in the UPDATES queue from xenopsd.  

      Receiving events from xenstore

      Xenopsd watches a number of keys in xenstore, both in dom0 and in each guest. Xenstore is responsible to send watch events to xenopsd whenever the watched keys change state. Xenopsd uses a xenstore client library to make it easier to create a callback function that is called whenever -xenstore sends these events.

      Receiving events from xenstore -Receiving events from xenstore

      Xenopsd also needs to complement sometimes these watch events with +xenstore sends these events.

      Receiving events from xenstore +Receiving events from xenstore

      Xenopsd also needs to complement sometimes these watch events with polling of some values. An example is the @introduceDomain event in xenstore (handled in xenopsd/xc/xenstore_watch.ml), which indicates that a new VM has been created. This event unfortunately does not @@ -220,8 +220,8 @@ the following may happen:

      • during the night someone spills a cup of coffee over an FC switch; then
      • VMs running on the affected hosts will lose access to their storage; then
      • business-critical services will go down; then
      • monitoring software will send a text message to an off-duty admin; then
      • the admin will travel to the office and fix the problem by restarting the VMs elsewhere.

      With HA the following will happen:

      • during the night someone spills a cup of coffee over an FC switch; then
      • VMs running on the affected hosts will lose access to their storage; then
      • business-critical services will go down; then
      • the HA software will determine which hosts are affected and shut them down; then
      • the HA software will restart the VMs on unaffected hosts; then
      • services are restored; then on the next working day
      • the admin can arrange for the faulty switch to be replaced.

      HA is designed to handle an emergency and allow the admin time to fix failures properly.

      Example

      The following diagram shows an HA-enabled pool, before and after a network -link between two hosts fails.

      High-Availability in action -High-Availability in action

      When HA is enabled, all hosts in the pool

      • exchange periodic heartbeat messages over the network
      • send heartbeats to a shared storage device.
      • attempt to acquire a “master lock” on the shared storage.

      HA is designed to recover as much as possible of the pool after a single failure +link between two hosts fails.

      High-Availability in action +High-Availability in action

      When HA is enabled, all hosts in the pool

      • exchange periodic heartbeat messages over the network
      • send heartbeats to a shared storage device.
      • attempt to acquire a “master lock” on the shared storage.

      HA is designed to recover as much as possible of the pool after a single failure i.e. it removes single points of failure. When some subset of the pool suffers a failure then the remaining pool members

      • figure out whether they are in the largest fully-connected set (the “liveset”);
        • if they are not in the largest set then they “fence” themselves (i.e. @@ -596,13 +596,13 @@ distinguish between a temporary storage failure and a permanent HA disable.
        • the heartbeat SR can be created on expensive low-latency high-reliability storage and made as small as possible (to minimise infrastructure cost), safe in the knowledge that if HA enables successfully once, it won’t run -out of space and fail to enable in the future.

        The Xapi-to-Xapi communication looks as follows:

        Configuring HA around the Pool -Configuring HA around the Pool

        The Xapi Pool master calls Host.ha_join_liveset on all hosts in the +out of space and fail to enable in the future.

      The Xapi-to-Xapi communication looks as follows:

      Configuring HA around the Pool +Configuring HA around the Pool

      The Xapi Pool master calls Host.ha_join_liveset on all hosts in the pool simultaneously. Each host runs the ha_start_daemon script which starts Xhad. Each Xhad starts exchanging heartbeats over the network -and storage defined in the xhad.conf.

      Joining a liveset

      Starting up a host -Starting up a host

      The Xhad instances exchange heartbeats and decide which hosts are in +and storage defined in the xhad.conf.

      Joining a liveset

      Starting up a host +Starting up a host

      The Xhad instances exchange heartbeats and decide which hosts are in the “liveset” and which have been fenced.

      After joining the liveset, each host clears the “excluded” flag which would have been set if the host had been shutdown cleanly before – this is only @@ -613,8 +613,8 @@ enabled and there is a master already, this node will be expected to stand unopposed. Later when HA notices that the master host has been fenced, all remaining hosts will stand for election and one of them will -be chosen.

      Shutting down a host

      Shutting down a host -Shutting down a host

      When a host is to be shutdown cleanly, it can be safely “excluded” +be chosen.

      Shutting down a host

      Shutting down a host +Shutting down a host

      When a host is to be shutdown cleanly, it can be safely “excluded” from the pool such that a future failure of the storage heartbeat will not cause all pool hosts to self-fence (see survival rule 2 above). When a host is “excluded” all other hosts know that the host does not @@ -638,8 +638,8 @@ problem. Obviously this API should only be used if the admin is totally sure that HA has been disabled.

      Disabling HA

      There are 2 methods of disabling HA: one for the “normal” case when the statefile is available; and the other for the “emergency” case when the -statefile has failed and can’t be recovered.

      Disabling HA cleanly

      Disabling HA cleanly -Disabling HA cleanly

      HA can be shutdown cleanly when the statefile is working i.e. when hosts +statefile has failed and can’t be recovered.

      Disabling HA cleanly

      Disabling HA cleanly +Disabling HA cleanly

      HA can be shutdown cleanly when the statefile is working i.e. when hosts are alive because of survival rule 1. First the master Xapi tells the local Xhad to mark the pool state as “invalid” using ha_set_pool_state. Every xhad instance will notice this state change the next time it performs @@ -649,15 +649,15 @@ which sets ha_disable_failover_decisions in the lcoal database. This prevents the node rebooting, gaining statefile access, acquiring the master lock and restarting VMs when other hosts have disabled their -fencing (i.e. a “split brain”).

      Disabling HA uncleanly -Disabling HA uncleanly

      Once the master is sure that no host will suddenly start recovering VMs +fencing (i.e. a “split brain”).

      Disabling HA uncleanly +Disabling HA uncleanly

      Once the master is sure that no host will suddenly start recovering VMs it is safe to call Host.ha_disarm_fencing which runs the script ha_disarm_fencing and then shuts down the Xhad with ha_stop_daemon.

      Add a host to the pool

      We assume that adding a host to the pool is an operation the admin will perform manually, so it is acceptable to disable HA for the duration and to re-enable it afterwards. If a failure happens during this operation then the admin will take care of it by hand.

    NUMA

    NUMA in a nutshell

    Systems that contain more than one CPU socket are typically built on a Non-Uniform Memory Architecture (NUMA) 12. -In a NUMA system each node has fast, lower latency access to local memory.

    hwloc -hwloc

    In the diagram 3 above we have 4 NUMA nodes:

    • 2 of those are due to 2 separate physical packages (sockets)
    • a further 2 is due to Sub-NUMA-Clustering (aka Nodes Per Socket for AMD) where the L3 cache is split

    The L3 cache is shared among multiple cores, but cores 0-5 have lower latency access to one part of it, than cores 6-11, and this is also reflected by splitting memory addresses into 4 31GiB ranges in total.

    In the diagram the closer the memory is to the core, the lower the access latency:

    • per-core caches: L1, L2
    • per-package shared cache: L3 (local part), L3 (remote part)
    • local NUMA node (to a group of cores, e.g. L#0 P#0), node 0
    • remote NUMA node in same package (L#1 P#2), node 1
    • remote NUMA node in other packages (L#2 P#1 and ‘L#3P#3’), node 2 and 3

    The NUMA distance matrix

    Accessing remote NUMA node in the other package has to go through a shared interconnect, which has lower bandwidth than the direct connections, and also a bottleneck if both cores have to access remote memory: the bandwidth for a single core is effectively at most half.

    This is reflected in the NUMA distance/latency matrix. +In a NUMA system each node has fast, lower latency access to local memory.

    hwloc +hwloc

    In the diagram 3 above we have 4 NUMA nodes:

    • 2 of those are due to 2 separate physical packages (sockets)
    • a further 2 is due to Sub-NUMA-Clustering (aka Nodes Per Socket for AMD) where the L3 cache is split

    The L3 cache is shared among multiple cores, but cores 0-5 have lower latency access to one part of it, than cores 6-11, and this is also reflected by splitting memory addresses into 4 31GiB ranges in total.

    In the diagram the closer the memory is to the core, the lower the access latency:

    • per-core caches: L1, L2
    • per-package shared cache: L3 (local part), L3 (remote part)
    • local NUMA node (to a group of cores, e.g. L#0 P#0), node 0
    • remote NUMA node in same package (L#1 P#2), node 1
    • remote NUMA node in other packages (L#2 P#1 and ‘L#3P#3’), node 2 and 3

    The NUMA distance matrix

    Accessing remote NUMA node in the other package has to go through a shared interconnect, which has lower bandwidth than the direct connections, and also a bottleneck if both cores have to access remote memory: the bandwidth for a single core is effectively at most half.

    This is reflected in the NUMA distance/latency matrix. The units are arbitrary, and by convention access latency to the local NUMA node is given distance ‘10’.

    Relative latency matrix by logical indexes:

    index0213
    010211121
    221102111
    111211021
    321112110

    This follows the latencies described previously:

    • fast access to local NUMA node memory (by definition), node 0, cost 10
    • slightly slower access latency to the other NUMA node in same package, node 1, cost 11
    • twice as slow access latency to remote NUMA memory in the other physical package (socket): nodes 2 and 3, cost 21

    There is also I/O NUMA where a cost is similarly associated to where a PCIe is plugged in, but exploring that is future work (it requires exposing NUMA topology to the Dom0 kernel to benefit from it), and for simplicity the diagram above does not show it.

    Advantages of NUMA

    NUMA does have advantages though: if each node accesses only its local memory, then each node can independently achieve maximum throughput.

    For best performance, we should:

    • minimize the amount of interconnect bandwidth we are using
    • run code that accesses memory allocated on the closest NUMA node
    • maximize the number of NUMA nodes that we use in the system as a whole

    If a VM’s memory and vCPUs can entirely fit within a single NUMA node then we should tell Xen to prefer to allocate memory from and run the vCPUs on a single NUMA node.

    Xen vCPU soft-affinity

    The Xen scheduler supports 2 kinds of constraints:

    • hard pinning: a vCPU may only run on the specified set of pCPUs and nowhere else
    • soft pinning: a vCPU is preferably run on the specified set of pCPUs, but if they are all busy then it may run elsewhere

    Hard pinning can be used to partition the system. But, it can potentially leave part of the system idle while another part is bottlenecked by many vCPUs competing for the same limited set of pCPUs.

    Xen does not migrate workloads between NUMA nodes on its own (the Linux kernel can). Although, it is possible to achieve a similar effect with explicit migration. However, migration introduces additional delays and is best avoided for entire VMs.

    Therefore, soft pinning is preferred: Running on a potentially suboptimal pCPU that uses remote memory could still be better than not running it at all until a pCPU is free to run it.

    Xen will also allocate memory for the VM according to the vCPU (soft) pinning: If the vCPUs are pinned to NUMA nodes A and B, Xen allocates memory from NUMA nodes A and B in a round-robin way, resulting in interleaving.

    Current default: No vCPU pinning

    By default, when no vCPU pinning is used, Xen interleaves memory from all NUMA nodes. This averages the memory performance, but individual tasks’ performance may be significantly higher or lower depending on which NUMA node the application may have “landed” on. As a result, restarting processes will speed them up or slow them down as address space randomization picks different memory regions inside a VM.

    This uses the memory bandwidth of all memory controllers and distributes the load across all nodes. @@ -700,8 +700,8 @@ with some storage arrays in which snapshots are “second class” objects which are automatically deleted when the original disk is deleted.

    Disks are implemented in Xapi via “Storage Manager” (SM) plugins. The SM plugins conform to an api (the SMAPI) which has operations including

    • vdi_create: make a fresh disk, full of zeroes
    • vdi_snapshot: create a snapshot of a disk

    File-based vhd implementation

    The existing “EXT” and “NFS” file-based Xapi SM plugins store disk data in -trees of .vhd files as in the following diagram:

    Relationship between VDIs and vhd files -Relationship between VDIs and vhd files

    From the XenAPI point of view, we have one current VDI and a set of snapshots, +trees of .vhd files as in the following diagram:

    Relationship between VDIs and vhd files +Relationship between VDIs and vhd files

    From the XenAPI point of view, we have one current VDI and a set of snapshots, each taken at a different point in time. These VDIs correspond to leaf vhds in a tree stored on disk, where the non-leaf nodes contain all the shared blocks.

    The vhd files are always thinly-provisioned which means they only allocate new blocks on an as-needed basis. The snapshot leaf vhd files only contain vhd @@ -710,28 +710,28 @@ contains only the vhd metadata and therefore is very small (a few KiB) and will only grow when the VM writes blocks.

    File-based vhd implementations are a good choice if a “gold image” snapshot is going to be cloned lots of times.

    Block-based vhd implementation

    The existing “LVM”, “LVMoISCSI” and “LVMoHBA” block-based Xapi SM plugins store -disk data in trees of .vhd files contained within LVM logical volumes:

    Relationship between VDIs and LVs containing vhd data -Relationship between VDIs and LVs containing vhd data

    Non-snapshot VDIs are always stored full size (a.k.a. thickly-provisioned). +disk data in trees of .vhd files contained within LVM logical volumes:

    Relationship between VDIs and LVs containing vhd data +Relationship between VDIs and LVs containing vhd data

    Non-snapshot VDIs are always stored full size (a.k.a. thickly-provisioned). When parent nodes are created they are automatically shrunk to the minimum size needed to store the shared blocks. The LVs corresponding with snapshot VDIs only contain vhd metadata and by default consume 8MiB. Note: this is different to VDI.clones which are stored full size.

    Block-based vhd implementations are not a good choice if a “gold image” snapshot is going to be cloned lots of times, since each clone will be stored full size.

    Hypothetical LUN implementation

    A hypothetical Xapi SM plugin could use LUNs on an iSCSI storage array as VDIs, and the array’s custom control interface to implement the “snapshot” -operation:

    Relationship between VDIs and LUNs on a hypothetical storage target -Relationship between VDIs and LUNs on a hypothetical storage target

    From the XenAPI point of view, we have one current VDI and a set of snapshots, +operation:

    Relationship between VDIs and LUNs on a hypothetical storage target +Relationship between VDIs and LUNs on a hypothetical storage target

    From the XenAPI point of view, we have one current VDI and a set of snapshots, each taken at a different point in time. These VDIs correspond to LUNs on the same iSCSI target, and internally within the target these LUNs are comprised of blocks from a large shared copy-on-write pool with support for dedup.

    Reverting disk snapshots

    There is no current way to revert in-place a disk to a snapshot, but it is possible to create a writable disk by “cloning” a snapshot.

    VM snapshots

    Let’s say we have a VM, “VM1” that has 2 disks. Concentrating only -on the VM, VBDs and VDIs, we have the following structure:

    VM objects -VM objects

    When we take a snapshot, we first ask the storage backends to snapshot +on the VM, VBDs and VDIs, we have the following structure:

    VM objects +VM objects

    When we take a snapshot, we first ask the storage backends to snapshot all of the VDIs associated with the VM, producing new VDI objects. Then we copy all of the metadata, producing a new ‘snapshot’ VM object, complete with its own VBDs copied from the original, but now pointing at the snapshot VDIs. We also copy the VIFs and VGPUs -but for now we will ignore those.

    This process leads to a set of objects that look like this:

    VM and snapshot objects -VM and snapshot objects

    We have fields that help navigate the new objects: VM.snapshot_of, +but for now we will ignore those.

    This process leads to a set of objects that look like this:

    VM and snapshot objects +VM and snapshot objects

    We have fields that help navigate the new objects: VM.snapshot_of, and VDI.snapshot_of. These, like you would expect, point to the relevant other objects.

    Deleting VM snapshots

    When a snapshot is deleted Xapi calls the SM API vdi_delete. The Xapi SM plugins which use vhd format data do not reclaim space immediately; instead @@ -740,14 +740,14 @@ whether any parent nodes have only one child i.e. the “shared” blocks are only “shared” with one other node. In the following example the snapshot delete leaves such a parent node and the coalesce process copies blocks from the redundant -parent’s only child into the parent:

    We coalesce parent blocks into grand parent nodes -We coalesce parent blocks into grand parent nodes

    Note that if the vhd data is being stored in LVM, then the parent node will +parent’s only child into the parent:

    We coalesce parent blocks into grand parent nodes +We coalesce parent blocks into grand parent nodes

    Note that if the vhd data is being stored in LVM, then the parent node will have had to be expanded to full size to accommodate the writes. Unfortunately this means the act of reclaiming space actually consumes space itself, which means it is important to never completely run out of space in such an SR.

    Once the blocks have been copied, we can now cut one of the parents out of the -tree by relinking its children into their grandparent:

    Relink children into grand parent -Relink children into grand parent

    Finally the garbage collector can remove unused vhd files / LVM LVs:

    Clean up -Clean up

    Reverting VM snapshots

    The XenAPI call VM.revert overwrites the VM metadata with the snapshot VM +tree by relinking its children into their grandparent:

    Relink children into grand parent +Relink children into grand parent

    Finally the garbage collector can remove unused vhd files / LVM LVs:

    Clean up +Clean up

    Reverting VM snapshots

    The XenAPI call VM.revert overwrites the VM metadata with the snapshot VM metadata, deletes the current VDIs and replaces them with clones of the snapshot VDIs. Note there is no “vdi_revert” in the SMAPI.

    Revert implementation details

    This is the process by which we revert a VM to a snapshot. The first thing to notice is that there is some logic that is called @@ -770,8 +770,8 @@ boosting graphics performance within virtual machines.

    The K1 has four GK104 GPUs and the K2 two GK107 GPUs. Each of these will be exposed through Xapi so a host with a single K1 card will have access to four independent PGPUs.

    Each of the GPUs can then be subdivided into vGPUs. For each type of PGPU, there are a few options of vGPU type which consume different amounts of the PGPU. For example, K1 and K2 cards can currently be configured in the following -ways:

    Possible VGX configurations -Possible VGX configurations

    Note, this diagram is not to scale, the PGPU resource required by each +ways:

    Possible VGX configurations +Possible VGX configurations

    Note, this diagram is not to scale, the PGPU resource required by each vGPU type is as follows:

    vGPU typePGPU kindvGPUs / PGPU
    k100GK1048
    k140QGK1044
    k200GK1078
    k240QGK1074
    k260QGK1072

    Currently each physical GPU (PGPU) only supports homogeneous vGPU configurations but different configurations are supported on different PGPUs across a single K1/K2 card. This means that, for example, a host with a K1 card @@ -788,16 +788,16 @@ graphics device to the guest. The vgpu binary is responsible for handling the VGX-capable GPU and, once it has been successfully passed through, the in-guest drivers can be installed in the same way as when it detects new hardware.

    The diagram below shows the relevant parts of the architecture for this -project.

    XenServer&rsquo;s vGPU architecture -XenServer&rsquo;s vGPU architecture

    Relevant code

    • In Xenopsd: Xenops_server_xen is where +project.

      XenServer&rsquo;s vGPU architecture +XenServer&rsquo;s vGPU architecture

      Relevant code

      • In Xenopsd: Xenops_server_xen is where Xenopsd gets the vGPU information from the values passed from Xapi;
      • In Xenopsd: Device.__start is where the vgpu process is started, if necessary, before Qemu.

      Xapi’s API and data model

      A lot of work has gone into the toolstack to handle the creation and management of VMs with vGPUs. We revised our data model, introducing a semantic link between VGPU and PGPU objects to help with utilisation tracking; we maintained the GPU_group concept as a pool-wide abstraction of PGPUs available for VMs; and we added VGPU_types which are configurations for -VGPU objects.

      Xapi&rsquo;s vGPU datamodel -Xapi&rsquo;s vGPU datamodel

      Aside: The VGPU type in Xapi’s data model predates this feature and was +VGPU objects.

      Xapi&rsquo;s vGPU datamodel +Xapi&rsquo;s vGPU datamodel

      Aside: The VGPU type in Xapi’s data model predates this feature and was synonymous with GPU-passthrough. A VGPU is simply a display device assigned to a VM which may be a vGPU (this feature) or a whole GPU (a VGPU of type passthrough).

      VGPU_types can be enabled/disabled on a per-PGPU basis allowing for @@ -847,8 +847,8 @@ param-name=enabled-vgpu-types and param-name=resident-vgpus respectively. Or, alternatively, you can use the following command to list all the parameters for the PGPU. You can get the types supported or enabled for a given PGPU:

      $ xe pgpu-list uuid=... params=all

    Xapi Storage Migration

    The Xapi Storage Migration (XSM) also known as “Storage Motion” allows

    • a running VM to be migrated within a pool, between different hosts -and different storage simultaneously;
    • a running VM to be migrated to another pool;
    • a disk attached to a running VM to be moved to another SR.

    The following diagram shows how XSM works at a high level:

    Xapi Storage Migration -Xapi Storage Migration

    The slowest part of a storage migration is migrating the storage, since virtual +and different storage simultaneously;

  • a running VM to be migrated to another pool;
  • a disk attached to a running VM to be moved to another SR.
  • The following diagram shows how XSM works at a high level:

    Xapi Storage Migration +Xapi Storage Migration

    The slowest part of a storage migration is migrating the storage, since virtual disks can be very large. Xapi starts by taking a snapshot and copying that to the destination as a background task. Before the datapath connecting the VM to the disk is re-established, xapi tells tapdisk to start mirroring all @@ -858,8 +858,8 @@ across. Once the VM memory image has been received, the destination VM is complete and the original can be safely destroyed.

    Xapi

    Xapi is the xapi-project host and cluster manager.

    Xapi is responsible for:

    • providing a stable interface (the XenAPI)
    • allowing one client to manage multiple hosts
    • hosting the “xe” CLI
    • authenticating users and applying role-based access control
    • locking resources (in particular disks)
    • allowing storage to be managed through plugins
    • planning and coping with host failures (“High Availability”)
    • storing VM and host configuration
    • generating alerts
    • managing software patching

    Principles

    1. The XenAPI interface must remain backwards compatible, allowing older clients to continue working
    2. Xapi delegates all Xenstore/libxc/libxl access to Xenopsd, so Xapi could -be run in an unprivileged helper domain
    3. Xapi delegates the low-level storage manipulation to SM plugins.
    4. Xapi delegates setting up host networking to xcp-networkd.
    5. Xapi delegates monitoring performance counters to xcp-rrdd.

    Overview

    The following diagram shows the internals of Xapi:

    Internals of xapi -Internals of xapi

    The top of the diagram shows the XenAPI clients: XenCenter, XenOrchestra, +be run in an unprivileged helper domain

  • Xapi delegates the low-level storage manipulation to SM plugins.
  • Xapi delegates setting up host networking to xcp-networkd.
  • Xapi delegates monitoring performance counters to xcp-rrdd.
  • Overview

    The following diagram shows the internals of Xapi:

    Internals of xapi +Internals of xapi

    The top of the diagram shows the XenAPI clients: XenCenter, XenOrchestra, OpenStack and CloudStack using XenAPI and HTTP GET/PUT over ports 80 and 443 to talk to xapi. These XenAPI (JSON-RPC or XML-RPC over HTTP POST) and HTTP GET/PUT are always authenticated using either PAM (by default using the local @@ -1551,10 +1551,10 @@ Metadata-on-LUN feature using a healthy LUN to which all database writes can be successfully flushed.

  • The fourth configuration shows xapi with the Metadata-on-LUN feature using an inaccessible LUN for -which all database writes fail.

  • Impact of feature on xapi database-writing performance. (Green points
+which all database writes fail.</p></li></ul><p><a href=#image-deac5d7679b8a020a609669086fff81c class=lightbox-link><img src=/new-docs/xapi/database/redo-log/performance.svg alt= -Impact of feature on xapi database-writing performance. (Green points
+<a href=javascript:history.back(); class=lightbox-back id=image-deac5d7679b8a020a609669086fff81c><img src=/new-docs/xapi/database/redo-log/performance.svg alt=

    Testing strategy

    The section above shows how xapi performance is affected by this feature. The sections below describe the dev-testing which has already been undertaken, and @@ -1593,9 +1593,9 @@ at system boot time and model the per-VM overheads.

    Host overhead

    The host overhead is not managed by xapi, instead it is sampled. After the host boots and before any VMs start, xapi asks Xen how much memory the host has in total, and how much memory is currently free. Xapi subtracts the free from the -total and stores this as the host overhead.

    VM overhead

    The inputs to the model are

    • VM.memory_static_max: the maximum amount of RAM the domain will be able to use
    • VM.HVM_shadow_multiplier: allows the shadow memory to be increased
    • VM.VCPUs_max: the maximum number of vCPUs the domain will be able to use

    First the shadow memory is calculated, in MiB

    Shadow memory in MiB -Shadow memory in MiB

    Second the VM overhead is calculated, in MiB

    Memory overhead in MiB -Memory overhead in MiB

    Memory required to start a VM

    If ballooning is disabled, the memory required to start a VM is the same as the VM +total and stores this as the host overhead.

    VM overhead

    The inputs to the model are

    • VM.memory_static_max: the maximum amount of RAM the domain will be able to use
    • VM.HVM_shadow_multiplier: allows the shadow memory to be increased
    • VM.VCPUs_max: the maximum number of vCPUs the domain will be able to use

    First the shadow memory is calculated, in MiB

    Shadow memory in MiB +Shadow memory in MiB

    Second the VM overhead is calculated, in MiB

    Memory overhead in MiB +Memory overhead in MiB

    Memory required to start a VM

    If ballooning is disabled, the memory required to start a VM is the same as the VM overhead above.

    If ballooning is enabled then the memory calculation above is modified to use the VM.memory_dynamic_max rather than the VM.memory_static_max.

    Memory required to migrate a VM

    If ballooning is disabled, the memory required to receive a migrating VM is the same as the VM overhead above.

    If ballooning is enabled, then the VM will first be ballooned down to VM.memory_dynamic_min @@ -2975,8 +2975,8 @@ this state.

  • Be debuggable: Xenopsd will expose diagnostic APIs and tools to allow its internal state to be inspected and modified.
  • Subsections of Xenopsd

    Architecture

    Xenopsd instances run on a host and manage VMs on behalf of clients. This picture shows 3 different Xenopsd instances: 2 named “xenopsd-xc” and 1 named -“xenopsd-xenlight”.

    Where xenopsd fits on a host -Where xenopsd fits on a host

    Each instance is responsible for managing a disjoint set of VMs. Clients should +“xenopsd-xenlight”.

    Where xenopsd fits on a host +Where xenopsd fits on a host

    Each instance is responsible for managing a disjoint set of VMs. Clients should never ask more than one Xenopsd to manage the same VM. Managing a VM means:

    • handling start/shutdown/suspend/resume/migrate/reboot
    • allowing devices (disks, nics, PCI cards, vCPUs etc) to be manipulated
    • providing updates to clients when things change (reboots, console becomes available, guest agent says something etc).

    For a full list of features, consult the features list.

    Each Xenopsd instance has a unique name on the host. A typical name is

    • org.xen.xcp.xenops.classic
    • org.xen.xcp.xenops.xenlight

    A higher-level tool, such as xapi @@ -2993,8 +2993,8 @@ available

  • message encoding: by default we use JSON but XML is also available
  • RPCs over Unix domain sockets and persistent queues.
  • This library allows the communication details to be changed without having to change all the Xapi clients and servers.

    Xenopsd has a number of “backends” which perform the low-level VM operations such as (on Xen) “create domain” “hotplug disk” “destroy domain”. These backends -contain all the hypervisor-specific code including

    • connecting to Xenstore
    • opening the libxc /proc/xen/privcmd interface
    • initialising libxl contexts

    The following diagram shows the internal structure of Xenopsd:

    Inside xenopsd -Inside xenopsd

    At the top of the diagram two client RPC have been sent: one to start a VM +contain all the hypervisor-specific code including

    • connecting to Xenstore
    • opening the libxc /proc/xen/privcmd interface
    • initialising libxl contexts

    The following diagram shows the internal structure of Xenopsd:

    Inside xenopsd +Inside xenopsd

    At the top of the diagram two client RPC have been sent: one to start a VM and the other to fetch the latest events. The RPCs are all defined in xcp-idl/xen/xenops_interface.ml. The RPCs are received by the Xenops_server module and decomposed into @@ -3674,8 +3674,8 @@ host memory.

    Principles

    1. Avoid wasting host memory: unused memory should be put to use by returning it to VMs.
    2. Memory should be shared in proportion to the configured policy.
    3. Operate entirely at the level of domains (not VMs), and be independent of Xen toolstack.

    Subsections of Squeezed

    Architecture

    Squeezed is responsible for managing the memory on a single host. Squeezed -“balances” memory between VMs according to a policy written to Xenstore.

    The following diagram shows the internals of Squeezed:

    Internals of squeezed -Internals of squeezed

    At the center of squeezed is an abstract model of a Xen host. The model +“balances” memory between VMs according to a policy written to Xenstore.

    The following diagram shows the internals of Squeezed:

    Internals of squeezed +Internals of squeezed

    At the center of squeezed is an abstract model of a Xen host. The model includes:

    • The amount of already-used host memory (used by fixed overheads such as Xen and the crash kernel).
    • Per-domain memory policy specifically dynamic-min and dynamic-max which together describe a range, within which the domain’s actual used memory @@ -3764,10 +3764,10 @@ with an associated reservation id. Note this is an internal Squeezed concept and Xen is completely unaware of it. When the daemon is moving memory between -domains, it always aims to keep

      host free memory &gt;= s + sum_i(reservation_i) -host free memory &gt;= s + sum_i(reservation_i)

      where s is the size of the “slush fund” (currently 9MiB) and -reservation_t -reservation_t +domains, it always aims to keep

      host free memory &gt;= s + sum_i(reservation_i) +host free memory &gt;= s + sum_i(reservation_i)

      where s is the size of the “slush fund” (currently 9MiB) and +reservation_t +reservation_t is the amount corresponding to the ith reservation.

      As an aside: Earlier versions of Squeezed always associated memory with a Xen domain. Unfortunately @@ -3836,8 +3836,8 @@ to set memory/target. This can be used to dynamically cap the amount of memory a domain can use.

    If all balloon drivers are responsive then Squeezed daemon allocates memory proportionally, so that each domain has the same value of: -target-min/(max-min) -target-min/(max-min)

    So:

    • if memory is plentiful then all domains will have +target-min/(max-min) +target-min/(max-min)

      So:

      • if memory is plentiful then all domains will have memory/target=memory/dynamic-max

      • if memory is scarce then all domains will have memory/target=memory/dynamic-min

      Note that the values of memory/target suggested by the policy are ideal values. In many real-life situations (e.g. when a balloon driver @@ -3875,8 +3875,8 @@ since the domain will not be instructed to balloon. Since a domain which is being built will have 0 <= totpages <= reservation, Squeezed computes -unused(i)=reservation(i)-totpages -unused(i)=reservation(i)-totpages +unused(i)=reservation(i)-totpages +unused(i)=reservation(i)-totpages and subtracts this from its model of the host’s free memory, ensuring that it doesn’t accidentally reallocate this memory for some other purpose.

      The Squeezed @@ -3934,8 +3934,8 @@ adjusted-totpages and the arrow indicates the direction of the memory/target. For the host the square box indicates total free memory. Note the highlighted state where the host’s free memory is -temporarily exhausted

      Two phase target setting -Two phase target setting

      In the +temporarily exhausted

      Two phase target setting +Two phase target setting

      In the initial state (at the top of the diagram), there are two domains, one which has been requested to use more memory and the other requested to use less memory. In effect the memory is to be transferred from one @@ -3983,8 +3983,8 @@ memory free than desired. The second diagram shows the result of computing ideal target values and the third diagram shows the result after targets have been set and the balloon drivers have -responded.

      calculation -calculation

      The scenario above includes 3 domains (domain 1, +responded.

      calculation +calculation

      The scenario above includes 3 domains (domain 1, domain 2, domain 3) on a host. Each of the domains has a non-ideal adjusted-totpages value.

      Recall we also have the policy constraint that: dynamic-min <= target <= dynamic-max @@ -4004,8 +4004,8 @@ use the default built-in proportional policy then, since all domains have the same dynamic-min and dynamic-max, each gets the same fraction of this free memory which we call g: -definition of g -definition of g +definition of g +definition of g For each domain, the ideal balloon target is now target = dynamic-min + g. Squeezed does not set all the targets at once: this would allow the @@ -4492,8 +4492,8 @@ produce a summary of annotated code that highlights what part of a codebase was executed.

      BisectPPX has several desirable properties:

      • a robust code base that is well tested
      • it is easy to integrate into the compilation pipeline (see below)
      • is specific to the OCaml language; an expression-oriented language like OCaml doesn’t fit the traditional statement coverage well
      • it is actively maintained
      • is generates useful reports for interactive and non-interactive use -that help to improve code coverage

      Coverage Analysis -Coverage Analysis

      Red parts indicate code that wasn’t executed whereas green parts were. +that help to improve code coverage

    Coverage Analysis +Coverage Analysis

    Red parts indicate code that wasn’t executed whereas green parts were. Hovering over a dark green spot reveals how often that point was executed.

    The individual steps of instrumenting code with BisectPPX are greatly abstracted by OCamlfind (OCaml’s library manager) and OCamlbuild @@ -4636,16 +4636,16 @@ If we placed our host and VM metadata in git then we could commit changes and pull and push them between replicas. The Irmin library provides an easy programming -interface on top of git which we could link with the Xapi database layer.

    Proposed new architecture

    Pools of one -Pools of one

    The diagram above shows two hosts: one a master and the other a regular host. +interface on top of git which we could link with the Xapi database layer.

    Proposed new architecture

    Pools of one +Pools of one

    The diagram above shows two hosts: one a master and the other a regular host. The XenAPI client has sent a request to the wrong host; normally this would result in a HOST_IS_SLAVE error being sent to the client. In the new world, the host is able to process the request, only contacting the master if it is necessary to acquire a lock. Starting a VM would require a lock; but rebooting or migrating an existing VM would not. Assuming the lock can be acquired, then the operation is executed locally with all state updates -being made to a git topic branch.

    Topic branches -Topic branches

    Roughly we would have 1 topic branch per +being made to a git topic branch.

    Topic branches +Topic branches

    Roughly we would have 1 topic branch per pending XenAPI Task. Once the Task completes successfully, the topic branch (containing the new VM state) is merged back into master. Separately each @@ -5086,8 +5086,8 @@ disable_on_reboot.

    Disabling dom0 access will modify the xen commandline (using the xen-cmdline tool) such that dom0 will not be able to access the GPU on next boot.

    Calling host.disable_display will modify the xen and dom0 commandlines such that neither will attempt to send console output to the system display device.

    A state diagram for the fields PGPU.dom0_access and host.display is shown -below:

    host.integrated_GPU_passthrough flow diagram -host.integrated_GPU_passthrough flow diagram

    While it is possible for these two fields to be modified independently, a +below:

    host.integrated_GPU_passthrough flow diagram +host.integrated_GPU_passthrough flow diagram

    While it is possible for these two fields to be modified independently, a client must disable both the host display and dom0 access to the system display device before that device can be passed through to a guest.

    Note that when a client enables or disables either of these fields, the change can be cancelled until the host is rebooted.

    Handling vga_arbiter

    Currently, xapi will not create a PGPU object for the PCI device with address @@ -5211,8 +5211,8 @@ any additional device models will be dealt with entirely by xenopsd.

    Design document
    Revisionv1
    Statusproposed

    OCFS2 storage

    OCFS2 is a (host-)clustered filesystem which runs on top of a shared raw block device. Hosts using OCFS2 form a cluster using a combination of network and -storage heartbeats and host fencing to avoid split-brain.

    The following diagram shows the proposed architecture with xapi:

    Proposed architecture -Proposed architecture

    Please note the following:

    • OCFS2 is configured to use global heartbeats rather than per-mount heartbeats +storage heartbeats and host fencing to avoid split-brain.

      The following diagram shows the proposed architecture with xapi:

      Proposed architecture +Proposed architecture

      Please note the following:

      • OCFS2 is configured to use global heartbeats rather than per-mount heartbeats because we quite often have many SRs and therefore many mountpoints
      • The OCFS2 global heartbeat should be collocated on the same SR as the XenServer HA SR so that we depend on fewer SRs (the storage is a single point of failure for OCFS2)
      • The OCFS2 global heartbeat should itself be a raw VDI within an LVHDSR.
      • Every host can be in at-most-one OCFS2 cluster i.e. the host cluster membership @@ -5356,18 +5356,18 @@ probably need to do this to avoid fencing.

        Walk-through: adding OCFS2 storage

        Assume you have an existing Pool of 2 hosts. First the client will set up the O2CB cluster, choosing where to put the global heartbeat volume. The client should check that the I/O paths have all been setup correctly with -bonding and multipath and prompt the user to fix any obvious problems.

        The client enables O2CB and then creates an SR -The client enables O2CB and then creates an SR

        Internally within Pool.enable_o2cb Xapi will set up the cluster metadata -on every host in the pool:

        Xapi creates the cluster configuration and each host updates its metadata -Xapi creates the cluster configuration and each host updates its metadata

        At this point all hosts have in-sync cluster.conf files but all cluster +bonding and multipath and prompt the user to fix any obvious problems.

        The client enables O2CB and then creates an SR +The client enables O2CB and then creates an SR

        Internally within Pool.enable_o2cb Xapi will set up the cluster metadata +on every host in the pool:

        Xapi creates the cluster configuration and each host updates its metadata +Xapi creates the cluster configuration and each host updates its metadata

        At this point all hosts have in-sync cluster.conf files but all cluster services are disabled. We also have requires_mainenance=true on all Membership entries and the global Cluster has enabled=false. -The client will now try to enable the cluster with Cluster.enable:

        Xapi enables the cluster software on all hosts -Xapi enables the cluster software on all hosts

        Now all hosts are in the cluster and the SR can be created using the standard +The client will now try to enable the cluster with Cluster.enable:

        Xapi enables the cluster software on all hosts +Xapi enables the cluster software on all hosts

        Now all hosts are in the cluster and the SR can be created using the standard SM APIs.

        Walk-through: remove a host

        Assume you have an existing Pool of 2 hosts with o2cb clustering enabled and at least one ocfs2 filesystem mounted. If the host is online then -XenAPI:Pool.eject will:

        Xapi ejects a host from the pool -Xapi ejects a host from the pool

        Note that:

        • All hosts will have modified their o2cb cluster.conf to comment out +XenAPI:Pool.eject will:

          Xapi ejects a host from the pool +Xapi ejects a host from the pool

          Note that:

          • All hosts will have modified their o2cb cluster.conf to comment out the former host
          • The Membership table still remembers the node number of the ejected host– this cannot be re-used until the SR is taken down for maintenance.
          • All hosts can see the difference between their current cluster.conf and the one they would use if they restarted the cluster service, so all @@ -5648,8 +5648,8 @@ using the XenAPI from within a plugin, which is racy, difficult to get right, unscalable and makes component testing impossible.

          • the protocol expects plugin authors to have a deep knowledge of the Xen storage datapath (tapdisk, blkback etc) and the storage.

          • the protocol is undocumented.

          • We shall create a new revision of the protocol (“SMAPIv3”) to address these -problems.

            The following diagram shows the new control plane:

            Storage control plane -Storage control plane

            Requests from xapi are filtered through the existing storage_access +problems.

            The following diagram shows the new control plane:

            Storage control plane +Storage control plane

            Requests from xapi are filtered through the existing storage_access layer which is responsible for managing the mapping between VM VBDs and VDIs.

            Each plugin is represented by a named queue, with APIs for

            • querying the state of each queue
            • explicitly cancelling or replying to messages

            Legacy SMAPIv1 plugins will be processed via the existing storage_access.SMAPIv1 module. Newer SMAPIv3 plugins will be handled by a new xapi-storage-script @@ -5661,8 +5661,8 @@ plugin, and see whether

            • the queue is being served or not (perhaps the xapi-storage-script has crashed)
            • there are unanswered messages (perhaps one of the messages has caused a deadlock in the implementation?)

            It will be possible to

            • delete/clear queues/messages
            • download a message-sequence chart of the last N messages for inclusion in -bugtools.

            Anatomy of a plugin

            The following diagram shows what a plugin would look like:

            Anatomy of a plugin -Anatomy of a plugin

            The SMAPIv3

            Please read the current SMAPIv3 documentation.

    Design document
    Revisionv1
    Status +Anatomy of a plugin

    The SMAPIv3

    Please read the current SMAPIv3 documentation.

    Design document
    Revisionv1
    Statusproposed

    Specifying Emulated PCI Devices

    Background and goals

    At present (early March 2015) the datamodel defines a VM as having a “platform” string-string map, in which two keys are interpreted as specifying a PCI device which should be emulated for the VM. Those keys are “device_id” and “revision” (with int values represented as decimal strings).

    Limitations:

    • Hardcoded defaults are used for the the vendor ID and all other parameters except device_id and revision.
    • Only one emulated PCI device can be specified.

    When instructing qemu to emulate PCI devices, qemu accepts twelve parameters for each device.

    Future guest-agent features rely on additional emulated PCI devices. We cannot know in advance the full details of all the devices that will be needed, but we can predict some.

    We need a way to configure VMs such that they will be given additional emulated PCI devices.

    Design

    In the datamodel, there will be a new type of object for emulated PCI devices.

    Tentative name: “emulated_pci_device”

    Fields to be passed through to qemu are the following, all static read-only, and all ints except devicename:

    • devicename (string)
    • vendorid
    • deviceid
    • command
    • status
    • revision
    • classcode
    • headertype
    • subvendorid
    • subsystemid
    • interruptline
    • interruptpin

    We also need a “built_in” flag: see below.

    Allow creation of these objects through the API (and CLI).

    (It would be nice, but by no means essential, to be able to create one by specifying an existing one as a basis, along with one or more altered fields, e.g. “Make a new one just like that existing one except with interruptpin=9.”)

    Create some of these devices to be defined as standard in XenServer, along the same lines as the VM templates. Those ones should have built_in=true.

    Allow destruction of these objects through the API (and CLI), but not if they are in use or if they have built_in=true.

    A VM will have a list of zero or more of these emulated-pci-device objects. (OPEN QUESTION: Should we forbid having more than one of a given device?)

    Provide API (and CLI) commands to add and remove one of these devices from a VM (identifying the VM and device by uuid or other identifier such as name).

    The CLI should allow performing this on multiple VMs in one go, based on a selector or filter for the VMs. We have this concept already in the CLI in commands such as vm-start.

    In the function that adds an emulated PCI device to a VM, we must check if this is the first device to be added, and must refuse if the VM’s Virtual Hardware Platform Version is too low. (Or should we just raise the version automatically if needed?)

    When starting a VM, check its list of emulated pci devices and pass the details through to qemu (via xenopsd).

    Design document
    Revisionv11
    Statusconfirmed
    Review#139
    Revision history
    v1Initial version
    v2Added details about the VDI's binary format and size, and the SR capability name.
    v3Tar was not needed after all!
    v4Add details about discovering the VDI using a new vdi_type.
    v5Add details about the http handlers and interaction with xapi's database
    v6Add details about the framing of the data within the VDI
    v7Redesign semantics of the rrd_updates handler
    v8Redesign semantics of the rrd_updates handler (again)
    v9Magic number change in framing format of vdi
    v10Add details of new APIs added to xapi and xcp-rrdd
    v11Remove unneeded API calls

    SR-Level RRDs

    Introduction

    Xapi has RRDs to track VM- and host-level metrics. There is a desire to have SR-level RRDs as a new category, because SR stats are not specific to a certain VM or host. Examples are size and free space on the SR. While recording SR metrics is relatively straightforward within the current RRD system, the main question is where to archive them, which is what this design aims to address.

    Stats Collection

    All SR types, including the existing ones, should be able to have RRDs defined for them. Some RRDs, such as a “free space” one, may make sense for multiple (if not all) SR types. However, the way to measure something like free space will be SR specific. Furthermore, it should be possible for each type of SR to have its own specialised RRDs.

    It follows that each SR will need its own xcp-rrdd plugin, which runs on the SR master and defines and collects the stats. For the new thin-lvhd SR this could be xenvmd itself. The plugin registers itself with xcp-rrdd, so that the latter records the live stats from the plugin into RRDs.

    Archiving

    SR-level RRDs will be archived in the SR itself, in a VDI, rather than in the local filesystem of the SR master. This way, we don’t need to worry about master failover.

    The VDI will be 4MB in size. This is a little more space than we would need for the RRDs we have in mind at the moment, but will give us enough headroom for the foreseeable future. It will not have a filesystem on it for simplicity and performance. There will only be one RRD archive file for each SR (possibly containing data for multiple metrics), which is gzipped by xcp-rrdd, and can be copied onto the VDI.

    There will be a simple framing format for the data on the VDI. This will be as follows:

    OffsetTypeNameComment
    032 bit network-order intmagicMagic number = 0x7ada7ada
    432 bit network-order intversion1
    832 bit network-order intlengthlength of payload
    12gzipped datadata

    Xapi will be in charge of the lifecycle of this VDI, not the plugin or xcp-rrdd, which will make it a little easier to manage them. Only xapi will attach/detach and read from/write to this VDI. We will keep xcp-rrdd as simple as possible, and have it archive to its standard path in the local file system. Xapi will then copy the RRDs in and out of the VDI.

    A new value "rrd" in the vdi_type enum of the datamodel will be defined, and the VDI.type of the VDI will be set to that value. The storage backend will write the VDI type to the LVM metadata of the VDI, so that xapi can discover the VDI containing the SR-level RRDs when attaching an SR to a new pool. This means that SR-level RRDs are currently restricted to LVM SRs.

    Because we will not write plugins for all SRs at once, and therefore do not need xapi to set up the VDI for all SRs, we will add an SR “capability” for the backends to be able to tell xapi whether it has the ability to record stats and will need storage for them. The capability name will be: SR_STATS.

    Management of the SR-stats VDI

    The SR-stats VDI will be attached/detached on PBD.plug/unplug on the SR master.

    • On PBD.plug on the SR master, if the SR has the stats capability, xapi:

      • Creates a stats VDI if not already there (search for an existing one based on the VDI type).
      • Attaches the stats VDI if it did already exist, and copies the RRDs to the local file system (standard location in the filesystem; asks xcp-rrdd where to put them).
      • Informs xcp-rrdd about the RRDs so that it will load the RRDs and add newly recorded data to them (needs a function like push_rrd_local for VM-level RRDs).
      • Detaches stats VDI.
    • On PBD.unplug on the SR master, if the SR has the stats capability xapi:

      • Tells xcp-rrdd to archive the RRDs for the SR, which it will do to the local filesystem.
      • Attaches the stats VDI, copies the RRDs into it, detaches VDI.

    Periodic Archiving

    Xapi’s periodic scheduler regularly triggers xcp-rrdd to archive the host and VM RRDs. It will need to do this for the SR ones as well. Furthermore, xapi will need to attach the stats VDI and copy the RRD archives into it (as on PBD.unplug).

    Exporting

    There will be a new handler for downloading an SR RRD:

    http://<server>/sr_rrd?session_id=<SESSION HANDLE>&uuid=<SR UUID>
     

    RRD updates are handled via a single handler for the host, VM and SR UUIDs @@ -5703,8 +5703,8 @@ in the common-case; in particular there is no network RPC needed

  • when the resource pool master host has failed, allocations can still continue, up to some limit, allowing time for the master host to be recovered; in particular there is no need for very low HA timeouts.
  • we can (in future) support in-kernel block allocation through the -device mapper dm-thin target.
  • The following diagram shows the “Allocation plane”:

    Allocation plane -Allocation plane

    All VM disk writes are channelled through tapdisk which keeps track +device mapper dm-thin target.

    The following diagram shows the “Allocation plane”:

    Allocation plane +Allocation plane

    All VM disk writes are channelled through tapdisk which keeps track of the remaining reserved space within the device mapper device. When the free space drops below a “low-water mark”, tapdisk sends a message to a local per-SR daemon called local-allocator and requests more @@ -5739,8 +5739,8 @@ from “block for more than 120 seconds” issue due to slow I/O. This known issue is that, slow I/O during dirty pages writeback/flush may cause memory starvation, then other userland process or kernel threads -would be blocked.

    The following diagram shows the control-plane:

    control plane -control plane

    When thin-provisioning is enabled we will be modifying the LVM metadata at +would be blocked.

    The following diagram shows the control-plane:

    control plane +control plane

    When thin-provisioning is enabled we will be modifying the LVM metadata at an increased rate. We will cache the current metadata in the xenvmd process and funnel all queries through it, rather than “peeking” at the metadata on-disk. Note it will still be possible to peek at the on-disk metadata but it @@ -6339,16 +6339,16 @@ It provides a dedicated, clearly defined and always consistent Python environment. The easiest way to run all tests and checks is to simply run pre-commit. The example commands below assume that you have Python3 in your PATH. -Currently, Python 3.11 is required for it:

    pip3 install pre-commit
    +Currently, Python 3.11 is required for it:

    pip3 install pre-commit
     pre-commit run -av
     # Or, to just run the pytest hook:
     pre-commit run -av pytest

    Note: By default, CentOS 8 provides Python 3.6, whereas some tests need Python >= 3.7

    Alternatively, you can of course tests in any suitable environment, given that you install the supported versions of all dependencies. You can find the dependencies in the list additional_dependencies of the pytest hook -in the pre-commit configuration file .pre-commit-config.yaml.

    -

    RRDD

    The xcp-rrdd daemon (hereafter simply called “rrdd”) is a component in the +For development, pytest can also only run one test (expand)

    To run a specific pytest command, run pytest and pass the test case to it (example):

    pytest python3/tests/test_perfmon.py
    coverage run -m pytest python3/tests/test_perfmon.py && coverage report

    RRDD

    The xcp-rrdd daemon (hereafter simply called “rrdd”) is a component in the xapi toolstack that is responsible for collecting metrics, storing them as “Round-Robin Databases” (RRDs) and exposing these to clients.

    The code is in ocaml/xcp-rrdd.

    Subsections of RRDD

    Design document
    Revisionv1
    Statusreleased (7,0)

    RRDD archival redesign

    Introduction

    Current problems with rrdd:

    • rrdd stores knowledge about whether it is running on a master or a slave

    This determines the host to which rrdd will archive a VM’s rrd when the VM’s @@ -6786,9 +6786,9 @@ other_config map of the newly-created VM. This field can be updated by calling its getter (other_config <- VM.get_other_config(session, new_vm_ref)) and then its setter (VM.set_other_config(session, new_vm_ref, other_config)) with the modified other_config map.

  • At this stage the object referred to by new_vm_ref is still a template (just like the VM object referred to by t_ref, from which it was cloned). To make new_vm_ref into a VM object we need to call VM.provision(session, new_vm_ref). When this call returns the new_vm_ref object will have had its is_a_template field set to false, indicating that new_vm_ref now refers to a regular VM ready for starting.

  • Note

    The provision operation may take a few minutes, as it is as during this call that the template’s disk images are created. In the case of the Debian template, the newly created disks are also at this stage populated with a Debian root filesystem.

    Taking the VM through a start/suspend/resume/stop cycle

    Now we have an object reference representing our newly-installed VM, it is trivial to take it through a few lifecycle operations:

    • To start our VM we can just call VM.start(session, new_vm_ref)

    • After it’s running, we can suspend it by calling VM.suspend(session, new_vm_ref),

    • and then resume it by calling VM.resume(session, new_vm_ref).

    • We can call VM.shutdown(session, new_vm_ref) to shutdown the VM cleanly.

    Logging out

    Once an application is finished interacting with a XenServer Host it is good practice to call Session.logout(session). This invalidates the session reference (so it cannot be used in subsequent API calls) and simultaneously deallocates server-side memory used to store the session object.

    Although inactive sessions will eventually timeout, the server has a hardcoded limit of 500 concurrent sessions for each username or originator. Once this limit has been reached fresh logins will evict the session objects that have been used least recently, causing their associated session references to become invalid. For successful interoperability with other applications, concurrently accessing the server, the best policy is:

    • Choose a string that identifies your application and its version.

    • Create a single session at start-of-day, using that identifying string for the originator parameter to Session.login_with_password.

    • Use this session throughout the application (note that sessions can be used across multiple separate client-server network connections) and then explicitly logout when possible.

    If a poorly written client leaks sessions or otherwise exceeds the limit, then as long as the client uses an appropriate originator argument, it will be easily identifiable from the XenServer logs and XenServer will destroy the longest-idle sessions of the rogue client only; this may cause problems for that client but not for other clients. If the misbehaving client did not specify an originator, it would be harder to identify and would cause the premature destruction of sessions of any clients that also did not specify an originator

    Install and start example: summary

    We have seen how the API can be used to install a VM from a XenServer template and perform a number of lifecycle operations on it. You will note that the number of calls we had to make in order to affect these operations was small:

    • One call to acquire a session: Session.login_with_password()

    • One call to query the VM (and template) objects present on the XenServer installation: VM.get_all_records(). Recall that we used the information returned from this call to select a suitable template to install from.

    • Four calls to install a VM from our chosen template: VM.clone(), followed by the getter and setter of the other_config field to specify where to -create the disk images of the template, and then VM.provision().

    • One call to start the resultant VM: VM.start() (and similarly other single calls to suspend, resume and shutdown accordingly)

    • And then one call to logout Session.logout()

    The take-home message here is that, although the API as a whole is complex and fully featured, common tasks (such as creating and performing lifecycle operations on VMs) are very straightforward to perform, requiring only a small number of simple API calls. Keep this in mind while you study the next section which may, on first reading, appear a little daunting!

    Object Model Overview

    This section gives a high-level overview of the object model of the API. A more detailed description of the parameters and methods of each class outlined here can be found in the XenServer API Reference document.

    We start by giving a brief outline of some of the core classes that make up the API. (Don’t worry if these definitions seem somewhat abstract in their initial presentation; the textual description in subsequent sections, and the code-sample walk through in the next Chapter will help make these concepts concrete.)

    ClassDescription
    VMA VM object represents a particular virtual machine instance on a XenServer Host or Resource Pool. Example methods include startsuspendpool_migrate; example parameters include power_statememory_static_max, and name_label. (In the previous section we saw how the VM class is used to represent both templates and regular VMs)
    HostA host object represents a physical host in a XenServer pool. Example methods include reboot and shutdown. Example parameters include software_versionhostname, and [IP] address.
    VDIA VDI object represents a Virtual Disk Image. Virtual Disk Images can be attached to VMs, in which case a block device appears inside the VM through which the bits encapsulated by the Virtual Disk Image can be read and written. Example methods of the VDI class include “resize” and “clone”. Example fields include “virtual_size” and “sharable”. (When we called VM.provision on the VM template in our previous example, some VDI objects were automatically created to represent the newly created disks, and attached to the VM object.)
    SRAn SR (Storage Repository) aggregates a collection of VDIs and encapsulates the properties of physical storage on which the VDIs’ bits reside. Example parameters include type (which determines the storage-specific driver a XenServer installation uses to read/write the SR’s VDIs) and physical_utilisation; example methods include scan (which invokes the storage-specific driver to acquire a list of the VDIs contained with the SR and the properties of these VDIs) and create (which initializes a block of physical storage so it is ready to store VDIs).
    NetworkA network object represents a layer-2 network that exists in the environment in which the XenServer Host instance lives. Since XenServer does not manage networks directly this is a lightweight class that serves merely to model physical and virtual network topology. VM and Host objects that are attached to a particular Network object (by virtue of VIF and PIF instances – see below) can send network packets to each other.

    At this point, readers who are finding this enumeration of classes rather terse may wish to skip to the code walk-throughs of the next chapter: there are plenty of useful applications that can be written using only a subset of the classes already described! For those who wish to continue this description of classes in the abstract, read on.

    On top of the classes listed above, there are 4 more that act as connectors, specifying relationships between VMs and Hosts, and Storage and Networks. The first 2 of these classes that we will consider, VBD and VIF, determine how VMs are attached to virtual disks and network objects respectively:

    ClassDescription
    VBDA VBD (Virtual Block Device) object represents an attachment between a VM and a VDI. When a VM is booted its VBD objects are queried to determine which disk images (VDIs) should be attached. Example methods of the VBD class include “plug” (which hot plugs a disk device into a running VM, making the specified VDI accessible therein) and “unplug” (which hot unplugs a disk device from a running guest); example fields include “device” (which determines the device name inside the guest under which the specified VDI will be made accessible).
    VIFA VIF (Virtual network InterFace) object represents an attachment between a VM and a Network object. When a VM is booted its VIF objects are queried to determine which network devices should be created. Example methods of the VIF class include “plug” (which hot plugs a network device into a running VM) and “unplug” (which hot unplugs a network device from a running guest).

    The second set of “connector classes” that we will consider determine how Hosts are attached to Networks and Storage.

    ClassDescription
    PIFA PIF (Physical InterFace) object represents an attachment between a Host and a Network object. If a host is connected to a Network (over a PIF) then packets from the specified host can be transmitted/received by the corresponding host. Example fields of the PIF class include “device” (which specifies the device name to which the PIF corresponds – e.g. eth0) and “MAC” (which specifies the MAC address of the underlying NIC that a PIF represents). Note that PIFs abstract both physical interfaces and VLANs (the latter distinguished by the existence of a positive integer in the “VLAN” field).
    PBDA PBD (Physical Block Device) object represents an attachment between a Host and a SR (Storage Repository) object. Fields include “currently-attached” (which specifies whether the chunk of storage represented by the specified SR object) is currently available to the host; and “device_config” (which specifies storage-driver specific parameters that determines how the low-level storage devices are configured on the specified host – e.g. in the case of an SR rendered on an NFS filer, device_config may specify the host-name of the filer and the path on the filer in which the SR files live.).

    Graphical overview of API classes for managing VMs, Hosts, Storage and Networking -Graphical overview of API classes for managing VMs, Hosts, Storage and Networking

    The figure above presents a graphical overview of the API classes involved in managing VMs, Hosts, Storage and Networking. From this diagram, the symmetry between storage and network configuration, and also the symmetry between virtual machine and host configuration is plain to see.

    Working with VIFs and VBDs

    In this section we walk through a few more complex scenarios, describing informally how various tasks involving virtual storage and network devices can be accomplished using the API.

    Creating disks and attaching them to VMs

    Let’s start by considering how to make a new blank disk image and attach it to a running VM. We will assume that we already have ourselves a running VM, and we know its corresponding API object reference (e.g. we may have created this VM using the procedure described in the previous section, and had the server return its reference to us.) We will also assume that we have authenticated with the XenServer installation and have a corresponding session reference. Indeed in the rest of this chapter, for the sake of brevity, we will stop mentioning sessions altogether.

    Creating a new blank disk image

    The first step is to instantiate the disk image on physical storage. We do this by calling VDI.create(). The VDI.create call takes a number of parameters, including:

    • name_label and name_description: a human-readable name/description for the disk (e.g. for convenient display in the UI etc.). These fields can be left blank if desired.

    • SR: the object reference of the Storage Repository representing the physical storage in which the VDI’s bits will be placed.

    • read_only: setting this field to true indicates that the VDI can only be attached to VMs in a read-only fashion. (Attempting to attach a VDI with its read_only field set to true in a read/write fashion results in error.)

    Invoking the VDI.create call causes the XenServer installation to create a blank disk image on physical storage, create an associated VDI object (the datamodel instance that refers to the disk image on physical storage) and return a reference to this newly created VDI object.

    The way in which the disk image is represented on physical storage depends on the type of the SR in which the created VDI resides. For example, if the SR is of type “lvm” then the new disk image will be rendered as an LVM volume; if the SR is of type “nfs” then the new disk image will be a sparse VHD file created on an NFS filer. (You can query the SR type through the API using the SR.get_type() call.)

    Note

    Some SR types might round up the virtual-size value to make it divisible by a configured block size.

    Attaching the disk image to a VM

    So far we have a running VM (that we assumed the existence of at the start of this example) and a fresh VDI that we just created. Right now, these are both independent objects that exist on the XenServer Host, but there is nothing linking them together. So our next step is to create such a link, associating the VDI with our VM.

    The attachment is formed by creating a new “connector” object called a VBD (Virtual Block Device). To create our VBD we invoke the VBD.create() call. The VBD.create() call takes a number of parameters including:

    • VM - the object reference of the VM to which the VDI is to be attached

    • VDI - the object reference of the VDI that is to be attached

    • mode - specifies whether the VDI is to be attached in a read-only or a read-write fashion

    • userdevice - specifies the block device inside the guest through which applications running inside the VM will be able to read/write the VDI’s bits.

    • type - specifies whether the VDI should be presented inside the VM as a regular disk or as a CD. (Note that this particular field has more meaning for Windows VMs than it does for Linux VMs, but we will not explore this level of detail in this chapter.)

    Invoking VBD.create makes a VBD object on the XenServer installation and returns its object reference. However, this call in itself does not have any side-effects on the running VM (that is, if you go and look inside the running VM you will see that the block device has not been created). The fact that the VBD object exists but that the block device in the guest is not active, is reflected by the fact that the VBD object’s currently_attached field is set to false.

    A VM object with 2 associated VDIs -A VM object with 2 associated VDIs

    For expository purposes, the figure above presents a graphical example that shows the relationship between VMs, VBDs, VDIs and SRs. In this instance a VM object has 2 attached VDIs: there are 2 VBD objects that form the connections between the VM object and its VDIs; and the VDIs reside within the same SR.

    Hotplugging the VBD

    If we rebooted the VM at this stage then, after rebooting, the block device corresponding to the VBD would appear: on boot, XenServer queries all VBDs of a VM and actively attaches each of the corresponding VDIs.

    Rebooting the VM is all very well, but recall that we wanted to attach a newly created blank disk to a running VM. This can be achieved by invoking the plug method on the newly created VBD object. When the plug call returns successfully, the block device to which the VBD relates will have appeared inside the running VM – i.e. from the perspective of the running VM, the guest operating system is led to believe that a new disk device has just been hot plugged. Mirroring this fact in the managed world of the API, the currently_attached field of the VBD is set to true.

    Unsurprisingly, the VBD plug method has a dual called “unplug”. Invoking the unplug method on a VBD object causes the associated block device to be hot unplugged from a running VM, setting the currently_attached field of the VBD object to false accordingly.

    Creating and attaching Network Devices to VMs

    The API calls involved in configuring virtual network interfaces in VMs are similar in many respects to the calls involved in configuring virtual disk devices. For this reason we will not run through a full example of how one can create network interfaces using the API object-model; instead we will use this section just to outline briefly the symmetry between virtual networking device and virtual storage device configuration.

    The networking analogue of the VBD class is the VIF class. Just as a VBD is the API representation of a block device inside a VM, a VIF (Virtual network InterFace) is the API representation of a network device inside a VM. Whereas VBDs associate VM objects with VDI objects, VIFs associate VM objects with Network objects. Just like VBDs, VIFs have a currently_attached field that determines whether or not the network device (inside the guest) associated with the VIF is currently active or not. And as we saw with VBDs, at VM boot-time the VIFs of the VM are queried and a corresponding network device for each created inside the booting VM. Similarly, VIFs also have plug and unplug methods for hot plugging/unplugging network devices in/out of running VMs.

    Host configuration for networking and storage

    We have seen that the VBD and VIF classes are used to manage configuration of block devices and network devices (respectively) inside VMs. To manage host configuration of storage and networking there are two analogous classes: PBD (Physical Block Device) and PIF (Physical [network] InterFace).

    Host storage configuration: PBDs

    Let us start by considering the PBD class. A PBD_create() call takes a number of parameters including:

    ParameterDescription
    hostphysical machine on which the PBD is available
    SRthe Storage Repository that the PBD connects to
    device_configa string-to-string map that is provided to the host’s SR-backend-driver, containing the low-level parameters required to configure the physical storage device(s) on which the SR is to be realized. The specific contents of the device_config field depend on the type of the SR to which the PBD is connected. (Executing xe sm-list will show a list of possible SR types; the configuration field in this enumeration specifies the device_config parameters that each SR type expects.)

    For example, imagine we have an SR object s of type “nfs” (representing a directory on an NFS filer within which VDIs are stored as VHD files); and let’s say that we want a host, h, to be able to access s. In this case we invoke PBD.create() specifying host h, SR s, and a value for the device_config parameter that is the following map:

    ("server", "my_nfs_server.example.com"), ("serverpath", "/scratch/mysrs/sr1")

    This tells the XenServer Host that SR s is accessible on host h, and further that to access SR s, the host needs to mount the directory /scratch/mysrs/sr1 on the NFS server named my_nfs_server.example.com.

    Like VBD objects, PBD objects also have a field called currently_attached. Storage repositories can be attached and detached from a given host by invoking PBD.plug and PBD.unplug methods respectively.

    Host networking configuration: PIFs

    Host network configuration is specified by virtue of PIF objects. If a PIF object connects a network object, n, to a host object h, then the network corresponding to n is bridged onto a physical interface (or a physical interface plus a VLAN tag) specified by the fields of the PIF object.

    For example, imagine a PIF object exists connecting host h to a network n, and that device field of the PIF object is set to eth0. This means that all packets on network n are bridged to the NIC in the host corresponding to host network device eth0.

    XML-RPC notes

    Datetimes

    The API deviates from the XML-RPC specification in handling of datetimes. The API appends a “Z” to the end of datetime strings, which is meant to indicate that the time is expressed in UTC.

    API evolution

    All APIs evolve as bugs are fixed, new features added and features are removed

    • the XenAPI is no exception. This document lists policies describing how the +create the disk images of the template, and then VM.provision().

    • One call to start the resultant VM: VM.start() (and similarly other single calls to suspend, resume and shutdown accordingly)

    • And then one call to logout Session.logout()

    The take-home message here is that, although the API as a whole is complex and fully featured, common tasks (such as creating and performing lifecycle operations on VMs) are very straightforward to perform, requiring only a small number of simple API calls. Keep this in mind while you study the next section which may, on first reading, appear a little daunting!

    Object Model Overview

    This section gives a high-level overview of the object model of the API. A more detailed description of the parameters and methods of each class outlined here can be found in the XenServer API Reference document.

    We start by giving a brief outline of some of the core classes that make up the API. (Don’t worry if these definitions seem somewhat abstract in their initial presentation; the textual description in subsequent sections, and the code-sample walk through in the next Chapter will help make these concepts concrete.)

    ClassDescription
    VMA VM object represents a particular virtual machine instance on a XenServer Host or Resource Pool. Example methods include startsuspendpool_migrate; example parameters include power_statememory_static_max, and name_label. (In the previous section we saw how the VM class is used to represent both templates and regular VMs)
    HostA host object represents a physical host in a XenServer pool. Example methods include reboot and shutdown. Example parameters include software_versionhostname, and [IP] address.
    VDIA VDI object represents a Virtual Disk Image. Virtual Disk Images can be attached to VMs, in which case a block device appears inside the VM through which the bits encapsulated by the Virtual Disk Image can be read and written. Example methods of the VDI class include “resize” and “clone”. Example fields include “virtual_size” and “sharable”. (When we called VM.provision on the VM template in our previous example, some VDI objects were automatically created to represent the newly created disks, and attached to the VM object.)
    SRAn SR (Storage Repository) aggregates a collection of VDIs and encapsulates the properties of physical storage on which the VDIs’ bits reside. Example parameters include type (which determines the storage-specific driver a XenServer installation uses to read/write the SR’s VDIs) and physical_utilisation; example methods include scan (which invokes the storage-specific driver to acquire a list of the VDIs contained with the SR and the properties of these VDIs) and create (which initializes a block of physical storage so it is ready to store VDIs).
    NetworkA network object represents a layer-2 network that exists in the environment in which the XenServer Host instance lives. Since XenServer does not manage networks directly this is a lightweight class that serves merely to model physical and virtual network topology. VM and Host objects that are attached to a particular Network object (by virtue of VIF and PIF instances – see below) can send network packets to each other.

    At this point, readers who are finding this enumeration of classes rather terse may wish to skip to the code walk-throughs of the next chapter: there are plenty of useful applications that can be written using only a subset of the classes already described! For those who wish to continue this description of classes in the abstract, read on.

    On top of the classes listed above, there are 4 more that act as connectors, specifying relationships between VMs and Hosts, and Storage and Networks. The first 2 of these classes that we will consider, VBD and VIF, determine how VMs are attached to virtual disks and network objects respectively:

    ClassDescription
    VBDA VBD (Virtual Block Device) object represents an attachment between a VM and a VDI. When a VM is booted its VBD objects are queried to determine which disk images (VDIs) should be attached. Example methods of the VBD class include “plug” (which hot plugs a disk device into a running VM, making the specified VDI accessible therein) and “unplug” (which hot unplugs a disk device from a running guest); example fields include “device” (which determines the device name inside the guest under which the specified VDI will be made accessible).
    VIFA VIF (Virtual network InterFace) object represents an attachment between a VM and a Network object. When a VM is booted its VIF objects are queried to determine which network devices should be created. Example methods of the VIF class include “plug” (which hot plugs a network device into a running VM) and “unplug” (which hot unplugs a network device from a running guest).

    The second set of “connector classes” that we will consider determine how Hosts are attached to Networks and Storage.

    ClassDescription
    PIFA PIF (Physical InterFace) object represents an attachment between a Host and a Network object. If a host is connected to a Network (over a PIF) then packets from the specified host can be transmitted/received by the corresponding host. Example fields of the PIF class include “device” (which specifies the device name to which the PIF corresponds – e.g. eth0) and “MAC” (which specifies the MAC address of the underlying NIC that a PIF represents). Note that PIFs abstract both physical interfaces and VLANs (the latter distinguished by the existence of a positive integer in the “VLAN” field).
    PBDA PBD (Physical Block Device) object represents an attachment between a Host and a SR (Storage Repository) object. Fields include “currently-attached” (which specifies whether the chunk of storage represented by the specified SR object) is currently available to the host; and “device_config” (which specifies storage-driver specific parameters that determines how the low-level storage devices are configured on the specified host – e.g. in the case of an SR rendered on an NFS filer, device_config may specify the host-name of the filer and the path on the filer in which the SR files live.).

    Graphical overview of API classes for managing VMs, Hosts, Storage and Networking +Graphical overview of API classes for managing VMs, Hosts, Storage and Networking

    The figure above presents a graphical overview of the API classes involved in managing VMs, Hosts, Storage and Networking. From this diagram, the symmetry between storage and network configuration, and also the symmetry between virtual machine and host configuration is plain to see.

    Working with VIFs and VBDs

    In this section we walk through a few more complex scenarios, describing informally how various tasks involving virtual storage and network devices can be accomplished using the API.

    Creating disks and attaching them to VMs

    Let’s start by considering how to make a new blank disk image and attach it to a running VM. We will assume that we already have ourselves a running VM, and we know its corresponding API object reference (e.g. we may have created this VM using the procedure described in the previous section, and had the server return its reference to us.) We will also assume that we have authenticated with the XenServer installation and have a corresponding session reference. Indeed in the rest of this chapter, for the sake of brevity, we will stop mentioning sessions altogether.

    Creating a new blank disk image

    The first step is to instantiate the disk image on physical storage. We do this by calling VDI.create(). The VDI.create call takes a number of parameters, including:

    • name_label and name_description: a human-readable name/description for the disk (e.g. for convenient display in the UI etc.). These fields can be left blank if desired.

    • SR: the object reference of the Storage Repository representing the physical storage in which the VDI’s bits will be placed.

    • read_only: setting this field to true indicates that the VDI can only be attached to VMs in a read-only fashion. (Attempting to attach a VDI with its read_only field set to true in a read/write fashion results in error.)

    Invoking the VDI.create call causes the XenServer installation to create a blank disk image on physical storage, create an associated VDI object (the datamodel instance that refers to the disk image on physical storage) and return a reference to this newly created VDI object.

    The way in which the disk image is represented on physical storage depends on the type of the SR in which the created VDI resides. For example, if the SR is of type “lvm” then the new disk image will be rendered as an LVM volume; if the SR is of type “nfs” then the new disk image will be a sparse VHD file created on an NFS filer. (You can query the SR type through the API using the SR.get_type() call.)

    Note

    Some SR types might round up the virtual-size value to make it divisible by a configured block size.

    Attaching the disk image to a VM

    So far we have a running VM (that we assumed the existence of at the start of this example) and a fresh VDI that we just created. Right now, these are both independent objects that exist on the XenServer Host, but there is nothing linking them together. So our next step is to create such a link, associating the VDI with our VM.

    The attachment is formed by creating a new “connector” object called a VBD (Virtual Block Device). To create our VBD we invoke the VBD.create() call. The VBD.create() call takes a number of parameters including:

    • VM - the object reference of the VM to which the VDI is to be attached

    • VDI - the object reference of the VDI that is to be attached

    • mode - specifies whether the VDI is to be attached in a read-only or a read-write fashion

    • userdevice - specifies the block device inside the guest through which applications running inside the VM will be able to read/write the VDI’s bits.

    • type - specifies whether the VDI should be presented inside the VM as a regular disk or as a CD. (Note that this particular field has more meaning for Windows VMs than it does for Linux VMs, but we will not explore this level of detail in this chapter.)

    Invoking VBD.create makes a VBD object on the XenServer installation and returns its object reference. However, this call in itself does not have any side-effects on the running VM (that is, if you go and look inside the running VM you will see that the block device has not been created). The fact that the VBD object exists but that the block device in the guest is not active, is reflected by the fact that the VBD object’s currently_attached field is set to false.

    A VM object with 2 associated VDIs +A VM object with 2 associated VDIs

    For expository purposes, the figure above presents a graphical example that shows the relationship between VMs, VBDs, VDIs and SRs. In this instance a VM object has 2 attached VDIs: there are 2 VBD objects that form the connections between the VM object and its VDIs; and the VDIs reside within the same SR.

    Hotplugging the VBD

    If we rebooted the VM at this stage then, after rebooting, the block device corresponding to the VBD would appear: on boot, XenServer queries all VBDs of a VM and actively attaches each of the corresponding VDIs.

    Rebooting the VM is all very well, but recall that we wanted to attach a newly created blank disk to a running VM. This can be achieved by invoking the plug method on the newly created VBD object. When the plug call returns successfully, the block device to which the VBD relates will have appeared inside the running VM – i.e. from the perspective of the running VM, the guest operating system is led to believe that a new disk device has just been hot plugged. Mirroring this fact in the managed world of the API, the currently_attached field of the VBD is set to true.

    Unsurprisingly, the VBD plug method has a dual called “unplug”. Invoking the unplug method on a VBD object causes the associated block device to be hot unplugged from a running VM, setting the currently_attached field of the VBD object to false accordingly.

    Creating and attaching Network Devices to VMs

    The API calls involved in configuring virtual network interfaces in VMs are similar in many respects to the calls involved in configuring virtual disk devices. For this reason we will not run through a full example of how one can create network interfaces using the API object-model; instead we will use this section just to outline briefly the symmetry between virtual networking device and virtual storage device configuration.

    The networking analogue of the VBD class is the VIF class. Just as a VBD is the API representation of a block device inside a VM, a VIF (Virtual network InterFace) is the API representation of a network device inside a VM. Whereas VBDs associate VM objects with VDI objects, VIFs associate VM objects with Network objects. Just like VBDs, VIFs have a currently_attached field that determines whether or not the network device (inside the guest) associated with the VIF is currently active or not. And as we saw with VBDs, at VM boot-time the VIFs of the VM are queried and a corresponding network device for each created inside the booting VM. Similarly, VIFs also have plug and unplug methods for hot plugging/unplugging network devices in/out of running VMs.

    Host configuration for networking and storage

    We have seen that the VBD and VIF classes are used to manage configuration of block devices and network devices (respectively) inside VMs. To manage host configuration of storage and networking there are two analogous classes: PBD (Physical Block Device) and PIF (Physical [network] InterFace).

    Host storage configuration: PBDs

    Let us start by considering the PBD class. A PBD_create() call takes a number of parameters including:

    ParameterDescription
    hostphysical machine on which the PBD is available
    SRthe Storage Repository that the PBD connects to
    device_configa string-to-string map that is provided to the host’s SR-backend-driver, containing the low-level parameters required to configure the physical storage device(s) on which the SR is to be realized. The specific contents of the device_config field depend on the type of the SR to which the PBD is connected. (Executing xe sm-list will show a list of possible SR types; the configuration field in this enumeration specifies the device_config parameters that each SR type expects.)

    For example, imagine we have an SR object s of type “nfs” (representing a directory on an NFS filer within which VDIs are stored as VHD files); and let’s say that we want a host, h, to be able to access s. In this case we invoke PBD.create() specifying host h, SR s, and a value for the device_config parameter that is the following map:

    ("server", "my_nfs_server.example.com"), ("serverpath", "/scratch/mysrs/sr1")

    This tells the XenServer Host that SR s is accessible on host h, and further that to access SR s, the host needs to mount the directory /scratch/mysrs/sr1 on the NFS server named my_nfs_server.example.com.

    Like VBD objects, PBD objects also have a field called currently_attached. Storage repositories can be attached and detached from a given host by invoking PBD.plug and PBD.unplug methods respectively.

    Host networking configuration: PIFs

    Host network configuration is specified by virtue of PIF objects. If a PIF object connects a network object, n, to a host object h, then the network corresponding to n is bridged onto a physical interface (or a physical interface plus a VLAN tag) specified by the fields of the PIF object.

    For example, imagine a PIF object exists connecting host h to a network n, and that device field of the PIF object is set to eth0. This means that all packets on network n are bridged to the NIC in the host corresponding to host network device eth0.

    XML-RPC notes

    Datetimes

    The API deviates from the XML-RPC specification in handling of datetimes. The API appends a “Z” to the end of datetime strings, which is meant to indicate that the time is expressed in UTC.

    API evolution

    All APIs evolve as bugs are fixed, new features added and features are removed

    • the XenAPI is no exception. This document lists policies describing how the XenAPI evolves over time.

    The goals of XenAPI evolution are:

    • to allow bugs to be fixed efficiently;
    • to allow new, innovative features to be added easily;
    • to keep old, unmodified clients working as much as possible; and
    • where backwards-incompatible changes are to be made, publish this information early to enable affected parties to give timely feedback.

    Background

    In this document, the term XenAPI refers to the XMLRPC-derived wire protocol used by xapi. The XenAPI has objects which each have fields and @@ -7299,4 +7299,4 @@ running-- clean shutdown\n hard shutdown -->halted running-- pause -->paused halted-- destroy -->destroyed

    The figure above shows the states that a VM can be in and the -API calls that can be used to move the VM between these states.

      XenCenter

      XenCenter uses some conventions on top of the XenAPI:

      Internationalization for SR names

      The SRs created at install time now have an other_config key indicating how their names may be internationalized.

      other_config["i18n-key"] may be one of

      • local-hotplug-cd

      • local-hotplug-disk

      • local-storage

      • xenserver-tools

      Additionally, other_config["i18n-original-value-<field name>"] gives the value of that field when the SR was created. If XenCenter sees a record where SR.name_label equals other_config["i18n-original-value-name_label"] (that is, the record has not changed since it was created during XenServer installation), then internationalization will be applied. In other words, XenCenter will disregard the current contents of that field, and instead use a value appropriate to the user’s own language.

      If you change SR.name_label for your own purpose, then it no longer is the same as other_config["i18n-original-value-name_label"]. Therefore, XenCenter does not apply internationalization, and instead preserves your given name.

      Hiding objects from XenCenter

      Networks, PIFs, and VMs can be hidden from XenCenter by adding the key HideFromXenCenter=true to the other_config parameter for the object. This capability is intended for ISVs who know what they are doing, not general use by everyday users. For example, you might want to hide certain VMs because they are cloned VMs that shouldn’t be used directly by general users in your environment.

      In XenCenter, hidden Networks, PIFs, and VMs can be made visible, using the View menu.

      \ No newline at end of file +API calls that can be used to move the VM between these states.

        XenCenter

        XenCenter uses some conventions on top of the XenAPI:

        Internationalization for SR names

        The SRs created at install time now have an other_config key indicating how their names may be internationalized.

        other_config["i18n-key"] may be one of

        • local-hotplug-cd

        • local-hotplug-disk

        • local-storage

        • xenserver-tools

        Additionally, other_config["i18n-original-value-<field name>"] gives the value of that field when the SR was created. If XenCenter sees a record where SR.name_label equals other_config["i18n-original-value-name_label"] (that is, the record has not changed since it was created during XenServer installation), then internationalization will be applied. In other words, XenCenter will disregard the current contents of that field, and instead use a value appropriate to the user’s own language.

        If you change SR.name_label for your own purpose, then it no longer is the same as other_config["i18n-original-value-name_label"]. Therefore, XenCenter does not apply internationalization, and instead preserves your given name.

        Hiding objects from XenCenter

        Networks, PIFs, and VMs can be hidden from XenCenter by adding the key HideFromXenCenter=true to the other_config parameter for the object. This capability is intended for ISVs who know what they are doing, not general use by everyday users. For example, you might want to hide certain VMs because they are cloned VMs that shouldn’t be used directly by general users in your environment.

        In XenCenter, hidden Networks, PIFs, and VMs can be made visible, using the View menu.

        \ No newline at end of file diff --git a/new-docs/python/index.html b/new-docs/python/index.html index d000f7cac..a7ad4fba3 100644 --- a/new-docs/python/index.html +++ b/new-docs/python/index.html @@ -1,5 +1,5 @@ Python :: XAPI Toolstack Developer Documentation -
        \ No newline at end of file diff --git a/new-docs/python/index.print.html b/new-docs/python/index.print.html index 250d7a53c..e2559d48b 100644 --- a/new-docs/python/index.print.html +++ b/new-docs/python/index.print.html @@ -1,5 +1,5 @@ Python :: XAPI Toolstack Developer Documentation -

        Python

        Introduction

        Most Python3 scripts and plugins shall be located below the python3 directory. +

        Python

        Introduction

        Most Python3 scripts and plugins shall be located below the python3 directory. The structure of the directory is as follows:

        • python3/bin: This contains files installed in /opt/xensource/bin and are meant to be run by users
        • python3/libexec: This contains files installed in /opt/xensource/libexec and are meant to only be run by xapi and other daemons.
        • python3/packages: Contains files to be installed in python’s site-packages @@ -9,16 +9,16 @@ It provides a dedicated, clearly defined and always consistent Python environment. The easiest way to run all tests and checks is to simply run pre-commit. The example commands below assume that you have Python3 in your PATH. -Currently, Python 3.11 is required for it:

          pip3 install pre-commit
          +Currently, Python 3.11 is required for it:

          pip3 install pre-commit
           pre-commit run -av
           # Or, to just run the pytest hook:
           pre-commit run -av pytest

          Note: By default, CentOS 8 provides Python 3.6, whereas some tests need Python >= 3.7

          Alternatively, you can of course tests in any suitable environment, given that you install the supported versions of all dependencies. You can find the dependencies in the list additional_dependencies of the pytest hook -in the pre-commit configuration file .pre-commit-config.yaml.

          -
        \ No newline at end of file +For development, pytest can also only run one test (expand)

        To run a specific pytest command, run pytest and pass the test case to it (example):

        pytest python3/tests/test_perfmon.py
        coverage run -m pytest python3/tests/test_perfmon.py && coverage report
        \ No newline at end of file diff --git a/new-docs/squeezed/architecture/index.html b/new-docs/squeezed/architecture/index.html index ca762f524..f0be3b4fc 100644 --- a/new-docs/squeezed/architecture/index.html +++ b/new-docs/squeezed/architecture/index.html @@ -1,7 +1,7 @@ Architecture :: XAPI Toolstack Developer Documentation -

        Architecture

        Squeezed is responsible for managing the memory on a single host. Squeezed -“balances” memory between VMs according to a policy written to Xenstore.

        The following diagram shows the internals of Squeezed:

        Internals of squeezed -Internals of squeezed

        At the center of squeezed is an abstract model of a Xen host. The model +

        Architecture

        Squeezed is responsible for managing the memory on a single host. Squeezed +“balances” memory between VMs according to a policy written to Xenstore.

        The following diagram shows the internals of Squeezed:

        Internals of squeezed +Internals of squeezed

        At the center of squeezed is an abstract model of a Xen host. The model includes:

        • The amount of already-used host memory (used by fixed overheads such as Xen and the crash kernel).
        • Per-domain memory policy specifically dynamic-min and dynamic-max which together describe a range, within which the domain’s actual used memory @@ -20,9 +20,9 @@ domain cannot allocate).
        \ No newline at end of file + 
        \ No newline at end of file diff --git a/new-docs/squeezed/design/index.html b/new-docs/squeezed/design/index.html index c6a4bc3c6..7e4f42180 100644 --- a/new-docs/squeezed/design/index.html +++ b/new-docs/squeezed/design/index.html @@ -1,5 +1,5 @@ Design :: XAPI Toolstack Developer Documentation -
        \ No newline at end of file diff --git a/new-docs/squeezed/index.html b/new-docs/squeezed/index.html index f81b077d3..1b99154cf 100644 --- a/new-docs/squeezed/index.html +++ b/new-docs/squeezed/index.html @@ -1,5 +1,5 @@ Squeezed :: XAPI Toolstack Developer Documentation -
        \ No newline at end of file diff --git a/new-docs/squeezed/index.print.html b/new-docs/squeezed/index.print.html index a323bfd1c..eca0aeb0b 100644 --- a/new-docs/squeezed/index.print.html +++ b/new-docs/squeezed/index.print.html @@ -1,11 +1,11 @@ Squeezed :: XAPI Toolstack Developer Documentation -

        Squeezed

        Squeezed is the XAPI Toolstack’s host memory manager (aka balloon driver). +

        Squeezed

        Squeezed is the XAPI Toolstack’s host memory manager (aka balloon driver). Squeezed uses ballooning to move memory between running VMs, to avoid wasting host memory.

        Principles

        1. Avoid wasting host memory: unused memory should be put to use by returning it to VMs.
        2. Memory should be shared in proportion to the configured policy.
        3. Operate entirely at the level of domains (not VMs), and be independent of Xen toolstack.

        Subsections of Squeezed

        Architecture

        Squeezed is responsible for managing the memory on a single host. Squeezed -“balances” memory between VMs according to a policy written to Xenstore.

        The following diagram shows the internals of Squeezed:

        Internals of squeezed -Internals of squeezed

        At the center of squeezed is an abstract model of a Xen host. The model +“balances” memory between VMs according to a policy written to Xenstore.

        The following diagram shows the internals of Squeezed:

        Internals of squeezed +Internals of squeezed

        At the center of squeezed is an abstract model of a Xen host. The model includes:

        • The amount of already-used host memory (used by fixed overheads such as Xen and the crash kernel).
        • Per-domain memory policy specifically dynamic-min and dynamic-max which together describe a range, within which the domain’s actual used memory @@ -94,10 +94,10 @@ with an associated reservation id. Note this is an internal Squeezed concept and Xen is completely unaware of it. When the daemon is moving memory between -domains, it always aims to keep

          host free memory &gt;= s + sum_i(reservation_i) -host free memory &gt;= s + sum_i(reservation_i)

          where s is the size of the “slush fund” (currently 9MiB) and -reservation_t -reservation_t +domains, it always aims to keep

          host free memory &gt;= s + sum_i(reservation_i) +host free memory &gt;= s + sum_i(reservation_i)

          where s is the size of the “slush fund” (currently 9MiB) and +reservation_t +reservation_t is the amount corresponding to the ith reservation.

          As an aside: Earlier versions of Squeezed always associated memory with a Xen domain. Unfortunately @@ -166,8 +166,8 @@ to set memory/target. This can be used to dynamically cap the amount of memory a domain can use.

        If all balloon drivers are responsive then Squeezed daemon allocates memory proportionally, so that each domain has the same value of: -target-min/(max-min) -target-min/(max-min)

        So:

        • if memory is plentiful then all domains will have +target-min/(max-min) +target-min/(max-min)

          So:

          • if memory is plentiful then all domains will have memory/target=memory/dynamic-max

          • if memory is scarce then all domains will have memory/target=memory/dynamic-min

          Note that the values of memory/target suggested by the policy are ideal values. In many real-life situations (e.g. when a balloon driver @@ -205,8 +205,8 @@ since the domain will not be instructed to balloon. Since a domain which is being built will have 0 <= totpages <= reservation, Squeezed computes -unused(i)=reservation(i)-totpages -unused(i)=reservation(i)-totpages +unused(i)=reservation(i)-totpages +unused(i)=reservation(i)-totpages and subtracts this from its model of the host’s free memory, ensuring that it doesn’t accidentally reallocate this memory for some other purpose.

          The Squeezed @@ -264,8 +264,8 @@ adjusted-totpages and the arrow indicates the direction of the memory/target. For the host the square box indicates total free memory. Note the highlighted state where the host’s free memory is -temporarily exhausted

          Two phase target setting -Two phase target setting

          In the +temporarily exhausted

          Two phase target setting +Two phase target setting

          In the initial state (at the top of the diagram), there are two domains, one which has been requested to use more memory and the other requested to use less memory. In effect the memory is to be transferred from one @@ -313,8 +313,8 @@ memory free than desired. The second diagram shows the result of computing ideal target values and the third diagram shows the result after targets have been set and the balloon drivers have -responded.

          calculation -calculation

          The scenario above includes 3 domains (domain 1, +responded.

          calculation +calculation

          The scenario above includes 3 domains (domain 1, domain 2, domain 3) on a host. Each of the domains has a non-ideal adjusted-totpages value.

          Recall we also have the policy constraint that: dynamic-min <= target <= dynamic-max @@ -334,8 +334,8 @@ use the default built-in proportional policy then, since all domains have the same dynamic-min and dynamic-max, each gets the same fraction of this free memory which we call g: -definition of g -definition of g +definition of g +definition of g For each domain, the ideal balloon target is now target = dynamic-min + g. Squeezed does not set all the targets at once: this would allow the @@ -386,4 +386,4 @@ free 1 byte (or maybe 1 page) every 5s.

        • Likewise, declaring a domain “uncooperative” only if it has been inactive for 20s means that a domain could alternate between inactive for 19s and active -for 1s and not be declared “uncooperative”.

        Document history

        VersionDateChange
        0.210th Nov 2014Update to markdown
        0.19th Nov 2009Initial version
        \ No newline at end of file +for 1s and not be declared “uncooperative”.

        Document history

        VersionDateChange
        0.210th Nov 2014Update to markdown
        0.19th Nov 2009Initial version
        \ No newline at end of file diff --git a/new-docs/squeezed/squeezer/index.html b/new-docs/squeezed/squeezer/index.html index d71b6ec06..e635003ef 100644 --- a/new-docs/squeezed/squeezer/index.html +++ b/new-docs/squeezed/squeezer/index.html @@ -1,5 +1,5 @@ Overview of the memory squeezer :: XAPI Toolstack Developer Documentation -
        \ No newline at end of file diff --git a/new-docs/tags/index.html b/new-docs/tags/index.html index 60054ffe4..bdb9e8277 100644 --- a/new-docs/tags/index.html +++ b/new-docs/tags/index.html @@ -1,10 +1,10 @@ Tags :: XAPI Toolstack Developer Documentation - \ No newline at end of file diff --git a/new-docs/toolstack/features/DR/index.html b/new-docs/toolstack/features/DR/index.html index 6a619a510..44e568f55 100644 --- a/new-docs/toolstack/features/DR/index.html +++ b/new-docs/toolstack/features/DR/index.html @@ -1,8 +1,8 @@ Disaster Recovery :: XAPI Toolstack Developer Documentation -

        Disaster Recovery

        The HA feature will restart VMs after hosts have failed, but what +

        Disaster Recovery

        The HA feature will restart VMs after hosts have failed, but what happens if a whole site (e.g. datacenter) is lost? A disaster recovery -configuration is shown in the following diagram:

        Disaster recovery maintaining a secondary site -Disaster recovery maintaining a secondary site

        We rely on the storage array’s built-in mirroring to replicate (synchronously +configuration is shown in the following diagram:

        Disaster recovery maintaining a secondary site +Disaster recovery maintaining a secondary site

        We rely on the storage array’s built-in mirroring to replicate (synchronously or asynchronously: the admin’s choice) between the primary and the secondary site. When DR is enabled the VM disk data and VM metadata are written to the storage server and mirrored. The secondary site contains the other side @@ -17,9 +17,9 @@ and the VMs can be moved back.

        \ No newline at end of file + 
        \ No newline at end of file diff --git a/new-docs/toolstack/features/HA/index.html b/new-docs/toolstack/features/HA/index.html index aaf0d7965..2630dd239 100644 --- a/new-docs/toolstack/features/HA/index.html +++ b/new-docs/toolstack/features/HA/index.html @@ -1,12 +1,12 @@ High-Availability :: XAPI Toolstack Developer Documentation -
        \ No newline at end of file diff --git a/new-docs/toolstack/features/NUMA/index.html b/new-docs/toolstack/features/NUMA/index.html index b1da05a54..4ab292417 100644 --- a/new-docs/toolstack/features/NUMA/index.html +++ b/new-docs/toolstack/features/NUMA/index.html @@ -1,8 +1,8 @@ NUMA :: XAPI Toolstack Developer Documentation -
        \ No newline at end of file diff --git a/new-docs/toolstack/features/VGPU/index.html b/new-docs/toolstack/features/VGPU/index.html index 863527ff1..edbb6793d 100644 --- a/new-docs/toolstack/features/VGPU/index.html +++ b/new-docs/toolstack/features/VGPU/index.html @@ -1,13 +1,13 @@ vGPU :: XAPI Toolstack Developer Documentation -
        \ No newline at end of file diff --git a/new-docs/toolstack/features/XSM/index.html b/new-docs/toolstack/features/XSM/index.html index 1c56fd724..d48a12d7d 100644 --- a/new-docs/toolstack/features/XSM/index.html +++ b/new-docs/toolstack/features/XSM/index.html @@ -1,7 +1,7 @@ Xapi Storage Migration :: XAPI Toolstack Developer Documentation -

        Xapi Storage Migration

        The Xapi Storage Migration (XSM) also known as “Storage Motion” allows

        • a running VM to be migrated within a pool, between different hosts -and different storage simultaneously;
        • a running VM to be migrated to another pool;
        • a disk attached to a running VM to be moved to another SR.

        The following diagram shows how XSM works at a high level:

        Xapi Storage Migration -Xapi Storage Migration

        The slowest part of a storage migration is migrating the storage, since virtual +

        Xapi Storage Migration

        The Xapi Storage Migration (XSM) also known as “Storage Motion” allows

        • a running VM to be migrated within a pool, between different hosts +and different storage simultaneously;
        • a running VM to be migrated to another pool;
        • a disk attached to a running VM to be moved to another SR.

        The following diagram shows how XSM works at a high level:

        Xapi Storage Migration +Xapi Storage Migration

        The slowest part of a storage migration is migrating the storage, since virtual disks can be very large. Xapi starts by taking a snapshot and copying that to the destination as a background task. Before the datapath connecting the VM to the disk is re-established, xapi tells tapdisk to start mirroring all @@ -12,9 +12,9 @@ complete and the original can be safely destroyed.

        \ No newline at end of file + 
        \ No newline at end of file diff --git a/new-docs/toolstack/features/events/index.html b/new-docs/toolstack/features/events/index.html index 16162bb2d..00d84903d 100644 --- a/new-docs/toolstack/features/events/index.html +++ b/new-docs/toolstack/features/events/index.html @@ -1,5 +1,5 @@ Event handling in the Control Plane - Xapi, Xenopsd and Xenstore :: XAPI Toolstack Developer Documentation -
        \ No newline at end of file diff --git a/new-docs/toolstack/features/index.html b/new-docs/toolstack/features/index.html index 7f036911b..4a83c3432 100644 --- a/new-docs/toolstack/features/index.html +++ b/new-docs/toolstack/features/index.html @@ -1,10 +1,10 @@ Features :: XAPI Toolstack Developer Documentation - \ No newline at end of file diff --git a/new-docs/toolstack/features/index.print.html b/new-docs/toolstack/features/index.print.html index f545df65f..1caa4888e 100644 --- a/new-docs/toolstack/features/index.print.html +++ b/new-docs/toolstack/features/index.print.html @@ -1,8 +1,8 @@ Features :: XAPI Toolstack Developer Documentation -

        Subsections of Features

        Disaster Recovery

        The HA feature will restart VMs after hosts have failed, but what +

        Subsections of Features

        Disaster Recovery

        The HA feature will restart VMs after hosts have failed, but what happens if a whole site (e.g. datacenter) is lost? A disaster recovery -configuration is shown in the following diagram:

        Disaster recovery maintaining a secondary site -Disaster recovery maintaining a secondary site

        We rely on the storage array’s built-in mirroring to replicate (synchronously +configuration is shown in the following diagram:

        Disaster recovery maintaining a secondary site +Disaster recovery maintaining a secondary site

        We rely on the storage array’s built-in mirroring to replicate (synchronously or asynchronously: the admin’s choice) between the primary and the secondary site. When DR is enabled the VM disk data and VM metadata are written to the storage server and mirrored. The secondary site contains the other side @@ -45,12 +45,12 @@ VMs etc).

        Given a choice between polling states and receiving events when the state change, we should in general opt for receiving events in the code in order to avoid adding bottlenecks in dom0 that will prevent the -scalability of XenServer to many VMs and virtual devices.

        Connection of events between XAPI, xenopsd and xenstore, with main functions and data structures responsible for receiving and sending them -Connection of events between XAPI, xenopsd and xenstore, with main functions and data structures responsible for receiving and sending them

        Xapi

        Sending events from the xenapi

        A xenapi user client, such as XenCenter, the xe-cli or a python script, +scalability of XenServer to many VMs and virtual devices.

        Connection of events between XAPI, xenopsd and xenstore, with main functions and data structures responsible for receiving and sending them +Connection of events between XAPI, xenopsd and xenstore, with main functions and data structures responsible for receiving and sending them

        Xapi

        Sending events from the xenapi

        A xenapi user client, such as XenCenter, the xe-cli or a python script, can register to receive events from XAPI for specific objects in the XAPI DB. XAPI will generate events for those registered clients whenever -the corresponding XAPI DB object changes.

        Sending events from the xenapi -Sending events from the xenapi

        This small python scripts shows how to register a simple event watch +the corresponding XAPI DB object changes.

        Sending events from the xenapi +Sending events from the xenapi

        This small python scripts shows how to register a simple event watch loop for XAPI:

        import XenAPI
         session = XenAPI.Session("http://xshost")
         session.login_with_password("username","password")
        @@ -90,11 +90,11 @@
         run xapi_xenops.update_vgpu() to query xenopsd about its state.
      • Task id: something changed in this VM, run xapi_xenops.update_task() to query xenopsd about its state. The function update_task() will update the progress of the task in -the xapi DB using the information of the task in xenopsd.
      • Receiving events from xenopsd -Receiving events from xenopsd

        All the xapi_xenops.update_X() functions above will call +the xapi DB using the information of the task in xenopsd.

        Receiving events from xenopsd +Receiving events from xenopsd

        All the xapi_xenops.update_X() functions above will call Xenopsd_client.X.stat() functions to obtain the current state of X from -xenopsd:

        Obtaining current state -Obtaining current state

        There are a couple of optimisations while processing the events in +xenopsd:

        Obtaining current state +Obtaining current state

        There are a couple of optimisations while processing the events in xapi_xenops.events_watch():

        • if an event X=(vm_id,dev_id) (eg. Vbd dev_id) has already been processed in a barrier_events, it’s not processed again. A typical value for X is eg. “<vm_uuid>.xvda” for a VBD.
        • if Events_from_xenopsd.are_supressed X, then this event @@ -150,16 +150,16 @@ the dom0 backend changed the state of a virtual device), it creates a signal for the corresponding object (VM_check_state, VBD_check_state etc) and send it up to xapi. Xapi will then process this event in its -xapi_xenops.events_watch() function.

          Sending events to xapi -Sending events to xapi

          These signals may need to wait a long time to be processed if the +xapi_xenops.events_watch() function.

          Sending events to xapi +Sending events to xapi

          These signals may need to wait a long time to be processed if the single-threaded xapi_xenops.events_watch() function is having difficulties (ie taking a long time) to process previous signals in the UPDATES queue from xenopsd.  

          Receiving events from xenstore

          Xenopsd watches a number of keys in xenstore, both in dom0 and in each guest. Xenstore is responsible to send watch events to xenopsd whenever the watched keys change state. Xenopsd uses a xenstore client library to make it easier to create a callback function that is called whenever -xenstore sends these events.

          Receiving events from xenstore -Receiving events from xenstore

          Xenopsd also needs to complement sometimes these watch events with +xenstore sends these events.

          Receiving events from xenstore +Receiving events from xenstore

          Xenopsd also needs to complement sometimes these watch events with polling of some values. An example is the @introduceDomain event in xenstore (handled in xenopsd/xc/xenstore_watch.ml), which indicates that a new VM has been created. This event unfortunately does not @@ -179,8 +179,8 @@ the following may happen:

          • during the night someone spills a cup of coffee over an FC switch; then
          • VMs running on the affected hosts will lose access to their storage; then
          • business-critical services will go down; then
          • monitoring software will send a text message to an off-duty admin; then
          • the admin will travel to the office and fix the problem by restarting the VMs elsewhere.

          With HA the following will happen:

          • during the night someone spills a cup of coffee over an FC switch; then
          • VMs running on the affected hosts will lose access to their storage; then
          • business-critical services will go down; then
          • the HA software will determine which hosts are affected and shut them down; then
          • the HA software will restart the VMs on unaffected hosts; then
          • services are restored; then on the next working day
          • the admin can arrange for the faulty switch to be replaced.

          HA is designed to handle an emergency and allow the admin time to fix failures properly.

          Example

          The following diagram shows an HA-enabled pool, before and after a network -link between two hosts fails.

          High-Availability in action -High-Availability in action

          When HA is enabled, all hosts in the pool

          • exchange periodic heartbeat messages over the network
          • send heartbeats to a shared storage device.
          • attempt to acquire a “master lock” on the shared storage.

          HA is designed to recover as much as possible of the pool after a single failure +link between two hosts fails.

          High-Availability in action +High-Availability in action

          When HA is enabled, all hosts in the pool

          • exchange periodic heartbeat messages over the network
          • send heartbeats to a shared storage device.
          • attempt to acquire a “master lock” on the shared storage.

          HA is designed to recover as much as possible of the pool after a single failure i.e. it removes single points of failure. When some subset of the pool suffers a failure then the remaining pool members

          • figure out whether they are in the largest fully-connected set (the “liveset”);
            • if they are not in the largest set then they “fence” themselves (i.e. @@ -555,13 +555,13 @@ distinguish between a temporary storage failure and a permanent HA disable.
            • the heartbeat SR can be created on expensive low-latency high-reliability storage and made as small as possible (to minimise infrastructure cost), safe in the knowledge that if HA enables successfully once, it won’t run -out of space and fail to enable in the future.

            The Xapi-to-Xapi communication looks as follows:

            Configuring HA around the Pool -Configuring HA around the Pool

            The Xapi Pool master calls Host.ha_join_liveset on all hosts in the +out of space and fail to enable in the future.

          The Xapi-to-Xapi communication looks as follows:

          Configuring HA around the Pool +Configuring HA around the Pool

          The Xapi Pool master calls Host.ha_join_liveset on all hosts in the pool simultaneously. Each host runs the ha_start_daemon script which starts Xhad. Each Xhad starts exchanging heartbeats over the network -and storage defined in the xhad.conf.

          Joining a liveset

          Starting up a host -Starting up a host

          The Xhad instances exchange heartbeats and decide which hosts are in +and storage defined in the xhad.conf.

          Joining a liveset

          Starting up a host +Starting up a host

          The Xhad instances exchange heartbeats and decide which hosts are in the “liveset” and which have been fenced.

          After joining the liveset, each host clears the “excluded” flag which would have been set if the host had been shutdown cleanly before – this is only @@ -572,8 +572,8 @@ enabled and there is a master already, this node will be expected to stand unopposed. Later when HA notices that the master host has been fenced, all remaining hosts will stand for election and one of them will -be chosen.

          Shutting down a host

          Shutting down a host -Shutting down a host

          When a host is to be shutdown cleanly, it can be safely “excluded” +be chosen.

          Shutting down a host

          Shutting down a host +Shutting down a host

          When a host is to be shutdown cleanly, it can be safely “excluded” from the pool such that a future failure of the storage heartbeat will not cause all pool hosts to self-fence (see survival rule 2 above). When a host is “excluded” all other hosts know that the host does not @@ -597,8 +597,8 @@ problem. Obviously this API should only be used if the admin is totally sure that HA has been disabled.

          Disabling HA

          There are 2 methods of disabling HA: one for the “normal” case when the statefile is available; and the other for the “emergency” case when the -statefile has failed and can’t be recovered.

          Disabling HA cleanly

          Disabling HA cleanly -Disabling HA cleanly

          HA can be shutdown cleanly when the statefile is working i.e. when hosts +statefile has failed and can’t be recovered.

          Disabling HA cleanly

          Disabling HA cleanly +Disabling HA cleanly

          HA can be shutdown cleanly when the statefile is working i.e. when hosts are alive because of survival rule 1. First the master Xapi tells the local Xhad to mark the pool state as “invalid” using ha_set_pool_state. Every xhad instance will notice this state change the next time it performs @@ -608,15 +608,15 @@ which sets ha_disable_failover_decisions in the lcoal database. This prevents the node rebooting, gaining statefile access, acquiring the master lock and restarting VMs when other hosts have disabled their -fencing (i.e. a “split brain”).

          Disabling HA uncleanly -Disabling HA uncleanly

          Once the master is sure that no host will suddenly start recovering VMs +fencing (i.e. a “split brain”).

          Disabling HA uncleanly +Disabling HA uncleanly

          Once the master is sure that no host will suddenly start recovering VMs it is safe to call Host.ha_disarm_fencing which runs the script ha_disarm_fencing and then shuts down the Xhad with ha_stop_daemon.

          Add a host to the pool

          We assume that adding a host to the pool is an operation the admin will perform manually, so it is acceptable to disable HA for the duration and to re-enable it afterwards. If a failure happens during this operation then the admin will take care of it by hand.

        NUMA

        NUMA in a nutshell

        Systems that contain more than one CPU socket are typically built on a Non-Uniform Memory Architecture (NUMA) 12. -In a NUMA system each node has fast, lower latency access to local memory.

        hwloc -hwloc

        In the diagram 3 above we have 4 NUMA nodes:

        • 2 of those are due to 2 separate physical packages (sockets)
        • a further 2 is due to Sub-NUMA-Clustering (aka Nodes Per Socket for AMD) where the L3 cache is split

        The L3 cache is shared among multiple cores, but cores 0-5 have lower latency access to one part of it, than cores 6-11, and this is also reflected by splitting memory addresses into 4 31GiB ranges in total.

        In the diagram the closer the memory is to the core, the lower the access latency:

        • per-core caches: L1, L2
        • per-package shared cache: L3 (local part), L3 (remote part)
        • local NUMA node (to a group of cores, e.g. L#0 P#0), node 0
        • remote NUMA node in same package (L#1 P#2), node 1
        • remote NUMA node in other packages (L#2 P#1 and ‘L#3P#3’), node 2 and 3

        The NUMA distance matrix

        Accessing remote NUMA node in the other package has to go through a shared interconnect, which has lower bandwidth than the direct connections, and also a bottleneck if both cores have to access remote memory: the bandwidth for a single core is effectively at most half.

        This is reflected in the NUMA distance/latency matrix. +In a NUMA system each node has fast, lower latency access to local memory.

        hwloc +hwloc

        In the diagram 3 above we have 4 NUMA nodes:

        • 2 of those are due to 2 separate physical packages (sockets)
        • a further 2 is due to Sub-NUMA-Clustering (aka Nodes Per Socket for AMD) where the L3 cache is split

        The L3 cache is shared among multiple cores, but cores 0-5 have lower latency access to one part of it, than cores 6-11, and this is also reflected by splitting memory addresses into 4 31GiB ranges in total.

        In the diagram the closer the memory is to the core, the lower the access latency:

        • per-core caches: L1, L2
        • per-package shared cache: L3 (local part), L3 (remote part)
        • local NUMA node (to a group of cores, e.g. L#0 P#0), node 0
        • remote NUMA node in same package (L#1 P#2), node 1
        • remote NUMA node in other packages (L#2 P#1 and ‘L#3P#3’), node 2 and 3

        The NUMA distance matrix

        Accessing remote NUMA node in the other package has to go through a shared interconnect, which has lower bandwidth than the direct connections, and also a bottleneck if both cores have to access remote memory: the bandwidth for a single core is effectively at most half.

        This is reflected in the NUMA distance/latency matrix. The units are arbitrary, and by convention access latency to the local NUMA node is given distance ‘10’.

        Relative latency matrix by logical indexes:

        index0213
        010211121
        221102111
        111211021
        321112110

        This follows the latencies described previously:

        • fast access to local NUMA node memory (by definition), node 0, cost 10
        • slightly slower access latency to the other NUMA node in same package, node 1, cost 11
        • twice as slow access latency to remote NUMA memory in the other physical package (socket): nodes 2 and 3, cost 21

        There is also I/O NUMA where a cost is similarly associated to where a PCIe is plugged in, but exploring that is future work (it requires exposing NUMA topology to the Dom0 kernel to benefit from it), and for simplicity the diagram above does not show it.

        Advantages of NUMA

        NUMA does have advantages though: if each node accesses only its local memory, then each node can independently achieve maximum throughput.

        For best performance, we should:

        • minimize the amount of interconnect bandwidth we are using
        • run code that accesses memory allocated on the closest NUMA node
        • maximize the number of NUMA nodes that we use in the system as a whole

        If a VM’s memory and vCPUs can entirely fit within a single NUMA node then we should tell Xen to prefer to allocate memory from and run the vCPUs on a single NUMA node.

        Xen vCPU soft-affinity

        The Xen scheduler supports 2 kinds of constraints:

        • hard pinning: a vCPU may only run on the specified set of pCPUs and nowhere else
        • soft pinning: a vCPU is preferably run on the specified set of pCPUs, but if they are all busy then it may run elsewhere

        Hard pinning can be used to partition the system. But, it can potentially leave part of the system idle while another part is bottlenecked by many vCPUs competing for the same limited set of pCPUs.

        Xen does not migrate workloads between NUMA nodes on its own (the Linux kernel can). Although, it is possible to achieve a similar effect with explicit migration. However, migration introduces additional delays and is best avoided for entire VMs.

        Therefore, soft pinning is preferred: Running on a potentially suboptimal pCPU that uses remote memory could still be better than not running it at all until a pCPU is free to run it.

        Xen will also allocate memory for the VM according to the vCPU (soft) pinning: If the vCPUs are pinned to NUMA nodes A and B, Xen allocates memory from NUMA nodes A and B in a round-robin way, resulting in interleaving.

        Current default: No vCPU pinning

        By default, when no vCPU pinning is used, Xen interleaves memory from all NUMA nodes. This averages the memory performance, but individual tasks’ performance may be significantly higher or lower depending on which NUMA node the application may have “landed” on. As a result, restarting processes will speed them up or slow them down as address space randomization picks different memory regions inside a VM.

        This uses the memory bandwidth of all memory controllers and distributes the load across all nodes. @@ -659,8 +659,8 @@ with some storage arrays in which snapshots are “second class” objects which are automatically deleted when the original disk is deleted.

        Disks are implemented in Xapi via “Storage Manager” (SM) plugins. The SM plugins conform to an api (the SMAPI) which has operations including

        • vdi_create: make a fresh disk, full of zeroes
        • vdi_snapshot: create a snapshot of a disk

        File-based vhd implementation

        The existing “EXT” and “NFS” file-based Xapi SM plugins store disk data in -trees of .vhd files as in the following diagram:

        Relationship between VDIs and vhd files -Relationship between VDIs and vhd files

        From the XenAPI point of view, we have one current VDI and a set of snapshots, +trees of .vhd files as in the following diagram:

        Relationship between VDIs and vhd files +Relationship between VDIs and vhd files

        From the XenAPI point of view, we have one current VDI and a set of snapshots, each taken at a different point in time. These VDIs correspond to leaf vhds in a tree stored on disk, where the non-leaf nodes contain all the shared blocks.

        The vhd files are always thinly-provisioned which means they only allocate new blocks on an as-needed basis. The snapshot leaf vhd files only contain vhd @@ -669,28 +669,28 @@ contains only the vhd metadata and therefore is very small (a few KiB) and will only grow when the VM writes blocks.

        File-based vhd implementations are a good choice if a “gold image” snapshot is going to be cloned lots of times.

        Block-based vhd implementation

        The existing “LVM”, “LVMoISCSI” and “LVMoHBA” block-based Xapi SM plugins store -disk data in trees of .vhd files contained within LVM logical volumes:

        Relationship between VDIs and LVs containing vhd data -Relationship between VDIs and LVs containing vhd data

        Non-snapshot VDIs are always stored full size (a.k.a. thickly-provisioned). +disk data in trees of .vhd files contained within LVM logical volumes:

        Relationship between VDIs and LVs containing vhd data +Relationship between VDIs and LVs containing vhd data

        Non-snapshot VDIs are always stored full size (a.k.a. thickly-provisioned). When parent nodes are created they are automatically shrunk to the minimum size needed to store the shared blocks. The LVs corresponding with snapshot VDIs only contain vhd metadata and by default consume 8MiB. Note: this is different to VDI.clones which are stored full size.

        Block-based vhd implementations are not a good choice if a “gold image” snapshot is going to be cloned lots of times, since each clone will be stored full size.

        Hypothetical LUN implementation

        A hypothetical Xapi SM plugin could use LUNs on an iSCSI storage array as VDIs, and the array’s custom control interface to implement the “snapshot” -operation:

        Relationship between VDIs and LUNs on a hypothetical storage target -Relationship between VDIs and LUNs on a hypothetical storage target

        From the XenAPI point of view, we have one current VDI and a set of snapshots, +operation:

        Relationship between VDIs and LUNs on a hypothetical storage target +Relationship between VDIs and LUNs on a hypothetical storage target

        From the XenAPI point of view, we have one current VDI and a set of snapshots, each taken at a different point in time. These VDIs correspond to LUNs on the same iSCSI target, and internally within the target these LUNs are comprised of blocks from a large shared copy-on-write pool with support for dedup.

        Reverting disk snapshots

        There is no current way to revert in-place a disk to a snapshot, but it is possible to create a writable disk by “cloning” a snapshot.

        VM snapshots

        Let’s say we have a VM, “VM1” that has 2 disks. Concentrating only -on the VM, VBDs and VDIs, we have the following structure:

        VM objects -VM objects

        When we take a snapshot, we first ask the storage backends to snapshot +on the VM, VBDs and VDIs, we have the following structure:

        VM objects +VM objects

        When we take a snapshot, we first ask the storage backends to snapshot all of the VDIs associated with the VM, producing new VDI objects. Then we copy all of the metadata, producing a new ‘snapshot’ VM object, complete with its own VBDs copied from the original, but now pointing at the snapshot VDIs. We also copy the VIFs and VGPUs -but for now we will ignore those.

        This process leads to a set of objects that look like this:

        VM and snapshot objects -VM and snapshot objects

        We have fields that help navigate the new objects: VM.snapshot_of, +but for now we will ignore those.

        This process leads to a set of objects that look like this:

        VM and snapshot objects +VM and snapshot objects

        We have fields that help navigate the new objects: VM.snapshot_of, and VDI.snapshot_of. These, like you would expect, point to the relevant other objects.

        Deleting VM snapshots

        When a snapshot is deleted Xapi calls the SM API vdi_delete. The Xapi SM plugins which use vhd format data do not reclaim space immediately; instead @@ -699,14 +699,14 @@ whether any parent nodes have only one child i.e. the “shared” blocks are only “shared” with one other node. In the following example the snapshot delete leaves such a parent node and the coalesce process copies blocks from the redundant -parent’s only child into the parent:

        We coalesce parent blocks into grand parent nodes -We coalesce parent blocks into grand parent nodes

        Note that if the vhd data is being stored in LVM, then the parent node will +parent’s only child into the parent:

        We coalesce parent blocks into grand parent nodes +We coalesce parent blocks into grand parent nodes

        Note that if the vhd data is being stored in LVM, then the parent node will have had to be expanded to full size to accommodate the writes. Unfortunately this means the act of reclaiming space actually consumes space itself, which means it is important to never completely run out of space in such an SR.

        Once the blocks have been copied, we can now cut one of the parents out of the -tree by relinking its children into their grandparent:

        Relink children into grand parent -Relink children into grand parent

        Finally the garbage collector can remove unused vhd files / LVM LVs:

        Clean up -Clean up

        Reverting VM snapshots

        The XenAPI call VM.revert overwrites the VM metadata with the snapshot VM +tree by relinking its children into their grandparent:

        Relink children into grand parent +Relink children into grand parent

        Finally the garbage collector can remove unused vhd files / LVM LVs:

        Clean up +Clean up

        Reverting VM snapshots

        The XenAPI call VM.revert overwrites the VM metadata with the snapshot VM metadata, deletes the current VDIs and replaces them with clones of the snapshot VDIs. Note there is no “vdi_revert” in the SMAPI.

        Revert implementation details

        This is the process by which we revert a VM to a snapshot. The first thing to notice is that there is some logic that is called @@ -729,8 +729,8 @@ boosting graphics performance within virtual machines.

        The K1 has four GK104 GPUs and the K2 two GK107 GPUs. Each of these will be exposed through Xapi so a host with a single K1 card will have access to four independent PGPUs.

        Each of the GPUs can then be subdivided into vGPUs. For each type of PGPU, there are a few options of vGPU type which consume different amounts of the PGPU. For example, K1 and K2 cards can currently be configured in the following -ways:

        Possible VGX configurations -Possible VGX configurations

        Note, this diagram is not to scale, the PGPU resource required by each +ways:

        Possible VGX configurations +Possible VGX configurations

        Note, this diagram is not to scale, the PGPU resource required by each vGPU type is as follows:

        vGPU typePGPU kindvGPUs / PGPU
        k100GK1048
        k140QGK1044
        k200GK1078
        k240QGK1074
        k260QGK1072

        Currently each physical GPU (PGPU) only supports homogeneous vGPU configurations but different configurations are supported on different PGPUs across a single K1/K2 card. This means that, for example, a host with a K1 card @@ -747,16 +747,16 @@ graphics device to the guest. The vgpu binary is responsible for handling the VGX-capable GPU and, once it has been successfully passed through, the in-guest drivers can be installed in the same way as when it detects new hardware.

        The diagram below shows the relevant parts of the architecture for this -project.

        XenServer&rsquo;s vGPU architecture -XenServer&rsquo;s vGPU architecture

        Relevant code

        • In Xenopsd: Xenops_server_xen is where +project.

          XenServer&rsquo;s vGPU architecture +XenServer&rsquo;s vGPU architecture

          Relevant code

          • In Xenopsd: Xenops_server_xen is where Xenopsd gets the vGPU information from the values passed from Xapi;
          • In Xenopsd: Device.__start is where the vgpu process is started, if necessary, before Qemu.

          Xapi’s API and data model

          A lot of work has gone into the toolstack to handle the creation and management of VMs with vGPUs. We revised our data model, introducing a semantic link between VGPU and PGPU objects to help with utilisation tracking; we maintained the GPU_group concept as a pool-wide abstraction of PGPUs available for VMs; and we added VGPU_types which are configurations for -VGPU objects.

          Xapi&rsquo;s vGPU datamodel -Xapi&rsquo;s vGPU datamodel

          Aside: The VGPU type in Xapi’s data model predates this feature and was +VGPU objects.

          Xapi&rsquo;s vGPU datamodel +Xapi&rsquo;s vGPU datamodel

          Aside: The VGPU type in Xapi’s data model predates this feature and was synonymous with GPU-passthrough. A VGPU is simply a display device assigned to a VM which may be a vGPU (this feature) or a whole GPU (a VGPU of type passthrough).

          VGPU_types can be enabled/disabled on a per-PGPU basis allowing for @@ -806,8 +806,8 @@ param-name=enabled-vgpu-types and param-name=resident-vgpus respectively. Or, alternatively, you can use the following command to list all the parameters for the PGPU. You can get the types supported or enabled for a given PGPU:

          $ xe pgpu-list uuid=... params=all

        Xapi Storage Migration

        The Xapi Storage Migration (XSM) also known as “Storage Motion” allows

        • a running VM to be migrated within a pool, between different hosts -and different storage simultaneously;
        • a running VM to be migrated to another pool;
        • a disk attached to a running VM to be moved to another SR.

        The following diagram shows how XSM works at a high level:

        Xapi Storage Migration -Xapi Storage Migration

        The slowest part of a storage migration is migrating the storage, since virtual +and different storage simultaneously;

      • a running VM to be migrated to another pool;
      • a disk attached to a running VM to be moved to another SR.
      • The following diagram shows how XSM works at a high level:

        Xapi Storage Migration +Xapi Storage Migration

        The slowest part of a storage migration is migrating the storage, since virtual disks can be very large. Xapi starts by taking a snapshot and copying that to the destination as a background task. Before the datapath connecting the VM to the disk is re-established, xapi tells tapdisk to start mirroring all @@ -815,4 +815,4 @@ are written to both the old and the new disk. When the background snapshot copy is complete, xapi can migrate the VM memory across. Once the VM memory image has been received, the destination VM is -complete and the original can be safely destroyed.

        \ No newline at end of file +complete and the original can be safely destroyed.

        \ No newline at end of file diff --git a/new-docs/toolstack/features/snapshots/index.html b/new-docs/toolstack/features/snapshots/index.html index d85a8a144..3d4a3612c 100644 --- a/new-docs/toolstack/features/snapshots/index.html +++ b/new-docs/toolstack/features/snapshots/index.html @@ -1,5 +1,5 @@ Snapshots :: XAPI Toolstack Developer Documentation -
        \ No newline at end of file diff --git a/new-docs/toolstack/high-level/daemons/index.html b/new-docs/toolstack/high-level/daemons/index.html index eaefb8e75..5d9bbfe33 100644 --- a/new-docs/toolstack/high-level/daemons/index.html +++ b/new-docs/toolstack/high-level/daemons/index.html @@ -1,5 +1,5 @@ Daemons :: XAPI Toolstack Developer Documentation -

        Daemons

        The Toolstack consists of a set of co-operating daemons:

        xapi
        manages clusters of hosts, co-ordinating access to shared storage and networking.
        xenopsd
        a low-level “domain manager” which takes care of creating, suspending, +

        Daemons

        The Toolstack consists of a set of co-operating daemons:

        xapi
        manages clusters of hosts, co-ordinating access to shared storage and networking.
        xenopsd
        a low-level “domain manager” which takes care of creating, suspending, resuming, migrating, rebooting domains by interacting with Xen via libxc and libxl.
        xcp-rrdd
        a performance counter monitoring daemon which aggregates “datasources” defined via a plugin API and records history for each. There are various rrdd-plugin daemons:
        • xcp-rrdd-gpumon
        • xcp-rrdd-iostat
        • xcp-rrdd-squeezed
        • xcp-rrdd-xenpm
        • xcp-rrdd-dcmi
        • xcp-rrdd-netdev
        • xcp-rrdd-cpu
        xcp-networkd
        a host network manager which takes care of configuring interfaces, bridges @@ -8,9 +8,9 @@ if paths fail and need repair
        wsproxy
        handles access to VM consoles
        \ No newline at end of file + 
        \ No newline at end of file diff --git a/new-docs/toolstack/high-level/environment/index.html b/new-docs/toolstack/high-level/environment/index.html index 046cf364f..260bf45ad 100644 --- a/new-docs/toolstack/high-level/environment/index.html +++ b/new-docs/toolstack/high-level/environment/index.html @@ -1,13 +1,13 @@ Environment :: XAPI Toolstack Developer Documentation -

        Environment

        The Toolstack runs in an environment on a server (host) that has:

        • Physical hardware.
        • The Xen hypervisor.
        • The control domain (domain 0): the priviledged domain that the Toolstack runs in.
        • Other, mostly unpriviledged domains, usually for guests (VMs).

        The Toolstack relies on various bits of software inside the control domain, and directly communicates with most of these:

        \ No newline at end of file diff --git a/new-docs/toolstack/high-level/index.html b/new-docs/toolstack/high-level/index.html index 0e3f2b96d..51c01071a 100644 --- a/new-docs/toolstack/high-level/index.html +++ b/new-docs/toolstack/high-level/index.html @@ -1,10 +1,10 @@ High-level architecture :: XAPI Toolstack Developer Documentation -

        High-level architecture

        The XAPI Toolstack manages a cluster of hosts, network switches and storage on +

        High-level architecture

        The XAPI Toolstack manages a cluster of hosts, network switches and storage on behalf of clients such as XenCenter and Xen Orchestra.

        The most fundamental concept is of a Resource pool: the whole cluster managed as a single entity. The following diagram shows a cluster of hosts running -xapi, all sharing some storage:

        A Resource Pool -A Resource Pool

        At any time, at most one host is known as the pool coordinator (formerly +xapi, all sharing some storage:

        A Resource Pool +A Resource Pool

        At any time, at most one host is known as the pool coordinator (formerly known as “master”) and is responsible for coordination and locking resources within the pool. When a pool is first created a coordinator host is chosen. The coordinator role can be transferred

        • on user request in an orderly fashion (xe pool-designate-new-master)
        • on user request in an emergency (xe pool-emergency-transition-to-master)
        • automatically if HA is enabled on the cluster.

        All hosts expose an HTTP, XML-RPC and JSON-RPC interface running on port 80 and @@ -18,8 +18,8 @@ done by xapi and hence it is not possible to share this kind of storage between resource pools.

        The following diagram shows the software running on a single host. Note that all hosts run the same software (although not necessarily the same version, if -we are in the middle of a rolling update).

        A Host -A Host

        The XAPI Toolstack expects the host to be running Xen on x86. The Xen +we are in the middle of a rolling update).

        A Host +A Host

        The XAPI Toolstack expects the host to be running Xen on x86. The Xen hypervisor partitions the host into Domains, some of which can have privileged hardware access, and the rest are unprivileged guests. The XAPI Toolstack normally runs all of its components in the privileged initial domain, @@ -28,9 +28,9 @@ be isolated in their own domains.

        \ No newline at end of file + 
        \ No newline at end of file diff --git a/new-docs/toolstack/high-level/index.print.html b/new-docs/toolstack/high-level/index.print.html index 7bfc2a42f..f23af4d5b 100644 --- a/new-docs/toolstack/high-level/index.print.html +++ b/new-docs/toolstack/high-level/index.print.html @@ -1,10 +1,10 @@ High-level architecture :: XAPI Toolstack Developer Documentation -

        High-level architecture

        The XAPI Toolstack manages a cluster of hosts, network switches and storage on +

        High-level architecture

        The XAPI Toolstack manages a cluster of hosts, network switches and storage on behalf of clients such as XenCenter and Xen Orchestra.

        The most fundamental concept is of a Resource pool: the whole cluster managed as a single entity. The following diagram shows a cluster of hosts running -xapi, all sharing some storage:

        A Resource Pool -A Resource Pool

        At any time, at most one host is known as the pool coordinator (formerly +xapi, all sharing some storage:

        A Resource Pool +A Resource Pool

        At any time, at most one host is known as the pool coordinator (formerly known as “master”) and is responsible for coordination and locking resources within the pool. When a pool is first created a coordinator host is chosen. The coordinator role can be transferred

        • on user request in an orderly fashion (xe pool-designate-new-master)
        • on user request in an emergency (xe pool-emergency-transition-to-master)
        • automatically if HA is enabled on the cluster.

        All hosts expose an HTTP, XML-RPC and JSON-RPC interface running on port 80 and @@ -18,8 +18,8 @@ done by xapi and hence it is not possible to share this kind of storage between resource pools.

        The following diagram shows the software running on a single host. Note that all hosts run the same software (although not necessarily the same version, if -we are in the middle of a rolling update).

        A Host -A Host

        The XAPI Toolstack expects the host to be running Xen on x86. The Xen +we are in the middle of a rolling update).

        A Host +A Host

        The XAPI Toolstack expects the host to be running Xen on x86. The Xen hypervisor partitions the host into Domains, some of which can have privileged hardware access, and the rest are unprivileged guests. The XAPI Toolstack normally runs all of its components in the privileged initial domain, @@ -36,4 +36,4 @@ if values exceed some pre-defined threshold

        mpathalert
        a daemon which monitors “storage paths” and sends “alerts” if paths fail and need repair
        wsproxy
        handles access to VM consoles

        Interfaces

        Communication between the Toolstack daemon is built upon libraries from a component called -xapi-idl.

        • Abstracts communication between daemons over the message-switch using JSON/RPC.
        • Contains the definition of the interfaces exposed by the daemons (except xapi).
        \ No newline at end of file +xapi-idl.

        • Abstracts communication between daemons over the message-switch using JSON/RPC.
        • Contains the definition of the interfaces exposed by the daemons (except xapi).
        \ No newline at end of file diff --git a/new-docs/toolstack/high-level/interfaces/index.html b/new-docs/toolstack/high-level/interfaces/index.html index 05a11f69c..1dc68c832 100644 --- a/new-docs/toolstack/high-level/interfaces/index.html +++ b/new-docs/toolstack/high-level/interfaces/index.html @@ -1,12 +1,12 @@ Interfaces :: XAPI Toolstack Developer Documentation -

        Interfaces

        Communication between the Toolstack daemon is built upon libraries from a +

        Interfaces

        Communication between the Toolstack daemon is built upon libraries from a component called xapi-idl.

        • Abstracts communication between daemons over the message-switch using JSON/RPC.
        • Contains the definition of the interfaces exposed by the daemons (except xapi).
        \ No newline at end of file + 
        \ No newline at end of file diff --git a/new-docs/toolstack/index.html b/new-docs/toolstack/index.html index ee734c72c..5a110a8e5 100644 --- a/new-docs/toolstack/index.html +++ b/new-docs/toolstack/index.html @@ -1,10 +1,10 @@ The XAPI Toolstack :: XAPI Toolstack Developer Documentation - \ No newline at end of file diff --git a/new-docs/toolstack/index.print.html b/new-docs/toolstack/index.print.html index 55fc3aa89..9bf58879a 100644 --- a/new-docs/toolstack/index.print.html +++ b/new-docs/toolstack/index.print.html @@ -1,10 +1,10 @@ The XAPI Toolstack :: XAPI Toolstack Developer Documentation -

        Subsections of The XAPI Toolstack

        Responsibilities

        The XAPI Toolstack forms the main control plane of a pool of XenServer hosts. It allow the administrator to:

        • Configure the hardware resources of XenServer hosts: storage, networking, graphics, memory.
        • Create, configure and destroy VMs and their virtual resources.
        • Control the lifecycle of VMs.
        • Monitor the status of hosts, VMs and related resources.

        To this, the Toolstack:

        • Exposes an API that can be accessed by external clients over HTTP(s).
        • Exposes a CLI.
        • Ensures that physical resources are configured when needed, and VMs receive the resources they require.
        • Implements various features to help the administrator manage their systems.
        • Monitors running VMs.
        • Records metrics about physical and virtual resources.

        High-level architecture

        The XAPI Toolstack manages a cluster of hosts, network switches and storage on +

        Subsections of The XAPI Toolstack

        Responsibilities

        The XAPI Toolstack forms the main control plane of a pool of XenServer hosts. It allow the administrator to:

        • Configure the hardware resources of XenServer hosts: storage, networking, graphics, memory.
        • Create, configure and destroy VMs and their virtual resources.
        • Control the lifecycle of VMs.
        • Monitor the status of hosts, VMs and related resources.

        To this, the Toolstack:

        • Exposes an API that can be accessed by external clients over HTTP(s).
        • Exposes a CLI.
        • Ensures that physical resources are configured when needed, and VMs receive the resources they require.
        • Implements various features to help the administrator manage their systems.
        • Monitors running VMs.
        • Records metrics about physical and virtual resources.

        High-level architecture

        The XAPI Toolstack manages a cluster of hosts, network switches and storage on behalf of clients such as XenCenter and Xen Orchestra.

        The most fundamental concept is of a Resource pool: the whole cluster managed as a single entity. The following diagram shows a cluster of hosts running -xapi, all sharing some storage:

        A Resource Pool -A Resource Pool

        At any time, at most one host is known as the pool coordinator (formerly +xapi, all sharing some storage:

        A Resource Pool +A Resource Pool

        At any time, at most one host is known as the pool coordinator (formerly known as “master”) and is responsible for coordination and locking resources within the pool. When a pool is first created a coordinator host is chosen. The coordinator role can be transferred

        • on user request in an orderly fashion (xe pool-designate-new-master)
        • on user request in an emergency (xe pool-emergency-transition-to-master)
        • automatically if HA is enabled on the cluster.

        All hosts expose an HTTP, XML-RPC and JSON-RPC interface running on port 80 and @@ -18,8 +18,8 @@ done by xapi and hence it is not possible to share this kind of storage between resource pools.

        The following diagram shows the software running on a single host. Note that all hosts run the same software (although not necessarily the same version, if -we are in the middle of a rolling update).

        A Host -A Host

        The XAPI Toolstack expects the host to be running Xen on x86. The Xen +we are in the middle of a rolling update).

        A Host +A Host

        The XAPI Toolstack expects the host to be running Xen on x86. The Xen hypervisor partitions the host into Domains, some of which can have privileged hardware access, and the rest are unprivileged guests. The XAPI Toolstack normally runs all of its components in the privileged initial domain, @@ -38,8 +38,8 @@ component called xapi-idl.

        • Abstracts communication between daemons over the message-switch using JSON/RPC.
        • Contains the definition of the interfaces exposed by the daemons (except xapi).

        Subsections of Features

        Disaster Recovery

        The HA feature will restart VMs after hosts have failed, but what happens if a whole site (e.g. datacenter) is lost? A disaster recovery -configuration is shown in the following diagram:

        Disaster recovery maintaining a secondary site -Disaster recovery maintaining a secondary site

        We rely on the storage array’s built-in mirroring to replicate (synchronously +configuration is shown in the following diagram:

        Disaster recovery maintaining a secondary site +Disaster recovery maintaining a secondary site

        We rely on the storage array’s built-in mirroring to replicate (synchronously or asynchronously: the admin’s choice) between the primary and the secondary site. When DR is enabled the VM disk data and VM metadata are written to the storage server and mirrored. The secondary site contains the other side @@ -82,12 +82,12 @@ VMs etc).

        Given a choice between polling states and receiving events when the state change, we should in general opt for receiving events in the code in order to avoid adding bottlenecks in dom0 that will prevent the -scalability of XenServer to many VMs and virtual devices.

        Connection of events between XAPI, xenopsd and xenstore, with main functions and data structures responsible for receiving and sending them -Connection of events between XAPI, xenopsd and xenstore, with main functions and data structures responsible for receiving and sending them

        Xapi

        Sending events from the xenapi

        A xenapi user client, such as XenCenter, the xe-cli or a python script, +scalability of XenServer to many VMs and virtual devices.

        Connection of events between XAPI, xenopsd and xenstore, with main functions and data structures responsible for receiving and sending them +Connection of events between XAPI, xenopsd and xenstore, with main functions and data structures responsible for receiving and sending them

        Xapi

        Sending events from the xenapi

        A xenapi user client, such as XenCenter, the xe-cli or a python script, can register to receive events from XAPI for specific objects in the XAPI DB. XAPI will generate events for those registered clients whenever -the corresponding XAPI DB object changes.

        Sending events from the xenapi -Sending events from the xenapi

        This small python scripts shows how to register a simple event watch +the corresponding XAPI DB object changes.

        Sending events from the xenapi +Sending events from the xenapi

        This small python scripts shows how to register a simple event watch loop for XAPI:

        import XenAPI
         session = XenAPI.Session("http://xshost")
         session.login_with_password("username","password")
        @@ -127,11 +127,11 @@
         run xapi_xenops.update_vgpu() to query xenopsd about its state.
      • Task id: something changed in this VM, run xapi_xenops.update_task() to query xenopsd about its state. The function update_task() will update the progress of the task in -the xapi DB using the information of the task in xenopsd.
      • Receiving events from xenopsd -Receiving events from xenopsd

        All the xapi_xenops.update_X() functions above will call +the xapi DB using the information of the task in xenopsd.

        Receiving events from xenopsd +Receiving events from xenopsd

        All the xapi_xenops.update_X() functions above will call Xenopsd_client.X.stat() functions to obtain the current state of X from -xenopsd:

        Obtaining current state -Obtaining current state

        There are a couple of optimisations while processing the events in +xenopsd:

        Obtaining current state +Obtaining current state

        There are a couple of optimisations while processing the events in xapi_xenops.events_watch():

        • if an event X=(vm_id,dev_id) (eg. Vbd dev_id) has already been processed in a barrier_events, it’s not processed again. A typical value for X is eg. “<vm_uuid>.xvda” for a VBD.
        • if Events_from_xenopsd.are_supressed X, then this event @@ -187,16 +187,16 @@ the dom0 backend changed the state of a virtual device), it creates a signal for the corresponding object (VM_check_state, VBD_check_state etc) and send it up to xapi. Xapi will then process this event in its -xapi_xenops.events_watch() function.

          Sending events to xapi -Sending events to xapi

          These signals may need to wait a long time to be processed if the +xapi_xenops.events_watch() function.

          Sending events to xapi +Sending events to xapi

          These signals may need to wait a long time to be processed if the single-threaded xapi_xenops.events_watch() function is having difficulties (ie taking a long time) to process previous signals in the UPDATES queue from xenopsd.  

          Receiving events from xenstore

          Xenopsd watches a number of keys in xenstore, both in dom0 and in each guest. Xenstore is responsible to send watch events to xenopsd whenever the watched keys change state. Xenopsd uses a xenstore client library to make it easier to create a callback function that is called whenever -xenstore sends these events.

          Receiving events from xenstore -Receiving events from xenstore

          Xenopsd also needs to complement sometimes these watch events with +xenstore sends these events.

          Receiving events from xenstore +Receiving events from xenstore

          Xenopsd also needs to complement sometimes these watch events with polling of some values. An example is the @introduceDomain event in xenstore (handled in xenopsd/xc/xenstore_watch.ml), which indicates that a new VM has been created. This event unfortunately does not @@ -216,8 +216,8 @@ the following may happen:

          • during the night someone spills a cup of coffee over an FC switch; then
          • VMs running on the affected hosts will lose access to their storage; then
          • business-critical services will go down; then
          • monitoring software will send a text message to an off-duty admin; then
          • the admin will travel to the office and fix the problem by restarting the VMs elsewhere.

          With HA the following will happen:

          • during the night someone spills a cup of coffee over an FC switch; then
          • VMs running on the affected hosts will lose access to their storage; then
          • business-critical services will go down; then
          • the HA software will determine which hosts are affected and shut them down; then
          • the HA software will restart the VMs on unaffected hosts; then
          • services are restored; then on the next working day
          • the admin can arrange for the faulty switch to be replaced.

          HA is designed to handle an emergency and allow the admin time to fix failures properly.

          Example

          The following diagram shows an HA-enabled pool, before and after a network -link between two hosts fails.

          High-Availability in action -High-Availability in action

          When HA is enabled, all hosts in the pool

          • exchange periodic heartbeat messages over the network
          • send heartbeats to a shared storage device.
          • attempt to acquire a “master lock” on the shared storage.

          HA is designed to recover as much as possible of the pool after a single failure +link between two hosts fails.

          High-Availability in action +High-Availability in action

          When HA is enabled, all hosts in the pool

          • exchange periodic heartbeat messages over the network
          • send heartbeats to a shared storage device.
          • attempt to acquire a “master lock” on the shared storage.

          HA is designed to recover as much as possible of the pool after a single failure i.e. it removes single points of failure. When some subset of the pool suffers a failure then the remaining pool members

          • figure out whether they are in the largest fully-connected set (the “liveset”);
            • if they are not in the largest set then they “fence” themselves (i.e. @@ -592,13 +592,13 @@ distinguish between a temporary storage failure and a permanent HA disable.
            • the heartbeat SR can be created on expensive low-latency high-reliability storage and made as small as possible (to minimise infrastructure cost), safe in the knowledge that if HA enables successfully once, it won’t run -out of space and fail to enable in the future.

            The Xapi-to-Xapi communication looks as follows:

            Configuring HA around the Pool -Configuring HA around the Pool

            The Xapi Pool master calls Host.ha_join_liveset on all hosts in the +out of space and fail to enable in the future.

          The Xapi-to-Xapi communication looks as follows:

          Configuring HA around the Pool +Configuring HA around the Pool

          The Xapi Pool master calls Host.ha_join_liveset on all hosts in the pool simultaneously. Each host runs the ha_start_daemon script which starts Xhad. Each Xhad starts exchanging heartbeats over the network -and storage defined in the xhad.conf.

          Joining a liveset

          Starting up a host -Starting up a host

          The Xhad instances exchange heartbeats and decide which hosts are in +and storage defined in the xhad.conf.

          Joining a liveset

          Starting up a host +Starting up a host

          The Xhad instances exchange heartbeats and decide which hosts are in the “liveset” and which have been fenced.

          After joining the liveset, each host clears the “excluded” flag which would have been set if the host had been shutdown cleanly before – this is only @@ -609,8 +609,8 @@ enabled and there is a master already, this node will be expected to stand unopposed. Later when HA notices that the master host has been fenced, all remaining hosts will stand for election and one of them will -be chosen.

          Shutting down a host

          Shutting down a host -Shutting down a host

          When a host is to be shutdown cleanly, it can be safely “excluded” +be chosen.

          Shutting down a host

          Shutting down a host +Shutting down a host

          When a host is to be shutdown cleanly, it can be safely “excluded” from the pool such that a future failure of the storage heartbeat will not cause all pool hosts to self-fence (see survival rule 2 above). When a host is “excluded” all other hosts know that the host does not @@ -634,8 +634,8 @@ problem. Obviously this API should only be used if the admin is totally sure that HA has been disabled.

          Disabling HA

          There are 2 methods of disabling HA: one for the “normal” case when the statefile is available; and the other for the “emergency” case when the -statefile has failed and can’t be recovered.

          Disabling HA cleanly

          Disabling HA cleanly -Disabling HA cleanly

          HA can be shutdown cleanly when the statefile is working i.e. when hosts +statefile has failed and can’t be recovered.

          Disabling HA cleanly

          Disabling HA cleanly +Disabling HA cleanly

          HA can be shutdown cleanly when the statefile is working i.e. when hosts are alive because of survival rule 1. First the master Xapi tells the local Xhad to mark the pool state as “invalid” using ha_set_pool_state. Every xhad instance will notice this state change the next time it performs @@ -645,15 +645,15 @@ which sets ha_disable_failover_decisions in the lcoal database. This prevents the node rebooting, gaining statefile access, acquiring the master lock and restarting VMs when other hosts have disabled their -fencing (i.e. a “split brain”).

          Disabling HA uncleanly -Disabling HA uncleanly

          Once the master is sure that no host will suddenly start recovering VMs +fencing (i.e. a “split brain”).

          Disabling HA uncleanly +Disabling HA uncleanly

          Once the master is sure that no host will suddenly start recovering VMs it is safe to call Host.ha_disarm_fencing which runs the script ha_disarm_fencing and then shuts down the Xhad with ha_stop_daemon.

          Add a host to the pool

          We assume that adding a host to the pool is an operation the admin will perform manually, so it is acceptable to disable HA for the duration and to re-enable it afterwards. If a failure happens during this operation then the admin will take care of it by hand.

        NUMA

        NUMA in a nutshell

        Systems that contain more than one CPU socket are typically built on a Non-Uniform Memory Architecture (NUMA) 12. -In a NUMA system each node has fast, lower latency access to local memory.

        hwloc -hwloc

        In the diagram 3 above we have 4 NUMA nodes:

        • 2 of those are due to 2 separate physical packages (sockets)
        • a further 2 is due to Sub-NUMA-Clustering (aka Nodes Per Socket for AMD) where the L3 cache is split

        The L3 cache is shared among multiple cores, but cores 0-5 have lower latency access to one part of it, than cores 6-11, and this is also reflected by splitting memory addresses into 4 31GiB ranges in total.

        In the diagram the closer the memory is to the core, the lower the access latency:

        • per-core caches: L1, L2
        • per-package shared cache: L3 (local part), L3 (remote part)
        • local NUMA node (to a group of cores, e.g. L#0 P#0), node 0
        • remote NUMA node in same package (L#1 P#2), node 1
        • remote NUMA node in other packages (L#2 P#1 and ‘L#3P#3’), node 2 and 3

        The NUMA distance matrix

        Accessing remote NUMA node in the other package has to go through a shared interconnect, which has lower bandwidth than the direct connections, and also a bottleneck if both cores have to access remote memory: the bandwidth for a single core is effectively at most half.

        This is reflected in the NUMA distance/latency matrix. +In a NUMA system each node has fast, lower latency access to local memory.

        hwloc +hwloc

        In the diagram 3 above we have 4 NUMA nodes:

        • 2 of those are due to 2 separate physical packages (sockets)
        • a further 2 is due to Sub-NUMA-Clustering (aka Nodes Per Socket for AMD) where the L3 cache is split

        The L3 cache is shared among multiple cores, but cores 0-5 have lower latency access to one part of it, than cores 6-11, and this is also reflected by splitting memory addresses into 4 31GiB ranges in total.

        In the diagram the closer the memory is to the core, the lower the access latency:

        • per-core caches: L1, L2
        • per-package shared cache: L3 (local part), L3 (remote part)
        • local NUMA node (to a group of cores, e.g. L#0 P#0), node 0
        • remote NUMA node in same package (L#1 P#2), node 1
        • remote NUMA node in other packages (L#2 P#1 and ‘L#3P#3’), node 2 and 3

        The NUMA distance matrix

        Accessing remote NUMA node in the other package has to go through a shared interconnect, which has lower bandwidth than the direct connections, and also a bottleneck if both cores have to access remote memory: the bandwidth for a single core is effectively at most half.

        This is reflected in the NUMA distance/latency matrix. The units are arbitrary, and by convention access latency to the local NUMA node is given distance ‘10’.

        Relative latency matrix by logical indexes:

        index0213
        010211121
        221102111
        111211021
        321112110

        This follows the latencies described previously:

        • fast access to local NUMA node memory (by definition), node 0, cost 10
        • slightly slower access latency to the other NUMA node in same package, node 1, cost 11
        • twice as slow access latency to remote NUMA memory in the other physical package (socket): nodes 2 and 3, cost 21

        There is also I/O NUMA where a cost is similarly associated to where a PCIe is plugged in, but exploring that is future work (it requires exposing NUMA topology to the Dom0 kernel to benefit from it), and for simplicity the diagram above does not show it.

        Advantages of NUMA

        NUMA does have advantages though: if each node accesses only its local memory, then each node can independently achieve maximum throughput.

        For best performance, we should:

        • minimize the amount of interconnect bandwidth we are using
        • run code that accesses memory allocated on the closest NUMA node
        • maximize the number of NUMA nodes that we use in the system as a whole

        If a VM’s memory and vCPUs can entirely fit within a single NUMA node then we should tell Xen to prefer to allocate memory from and run the vCPUs on a single NUMA node.

        Xen vCPU soft-affinity

        The Xen scheduler supports 2 kinds of constraints:

        • hard pinning: a vCPU may only run on the specified set of pCPUs and nowhere else
        • soft pinning: a vCPU is preferably run on the specified set of pCPUs, but if they are all busy then it may run elsewhere

        Hard pinning can be used to partition the system. But, it can potentially leave part of the system idle while another part is bottlenecked by many vCPUs competing for the same limited set of pCPUs.

        Xen does not migrate workloads between NUMA nodes on its own (the Linux kernel can). Although, it is possible to achieve a similar effect with explicit migration. However, migration introduces additional delays and is best avoided for entire VMs.

        Therefore, soft pinning is preferred: Running on a potentially suboptimal pCPU that uses remote memory could still be better than not running it at all until a pCPU is free to run it.

        Xen will also allocate memory for the VM according to the vCPU (soft) pinning: If the vCPUs are pinned to NUMA nodes A and B, Xen allocates memory from NUMA nodes A and B in a round-robin way, resulting in interleaving.

        Current default: No vCPU pinning

        By default, when no vCPU pinning is used, Xen interleaves memory from all NUMA nodes. This averages the memory performance, but individual tasks’ performance may be significantly higher or lower depending on which NUMA node the application may have “landed” on. As a result, restarting processes will speed them up or slow them down as address space randomization picks different memory regions inside a VM.

        This uses the memory bandwidth of all memory controllers and distributes the load across all nodes. @@ -696,8 +696,8 @@ with some storage arrays in which snapshots are “second class” objects which are automatically deleted when the original disk is deleted.

        Disks are implemented in Xapi via “Storage Manager” (SM) plugins. The SM plugins conform to an api (the SMAPI) which has operations including

        • vdi_create: make a fresh disk, full of zeroes
        • vdi_snapshot: create a snapshot of a disk

        File-based vhd implementation

        The existing “EXT” and “NFS” file-based Xapi SM plugins store disk data in -trees of .vhd files as in the following diagram:

        Relationship between VDIs and vhd files -Relationship between VDIs and vhd files

        From the XenAPI point of view, we have one current VDI and a set of snapshots, +trees of .vhd files as in the following diagram:

        Relationship between VDIs and vhd files +Relationship between VDIs and vhd files

        From the XenAPI point of view, we have one current VDI and a set of snapshots, each taken at a different point in time. These VDIs correspond to leaf vhds in a tree stored on disk, where the non-leaf nodes contain all the shared blocks.

        The vhd files are always thinly-provisioned which means they only allocate new blocks on an as-needed basis. The snapshot leaf vhd files only contain vhd @@ -706,28 +706,28 @@ contains only the vhd metadata and therefore is very small (a few KiB) and will only grow when the VM writes blocks.

        File-based vhd implementations are a good choice if a “gold image” snapshot is going to be cloned lots of times.

        Block-based vhd implementation

        The existing “LVM”, “LVMoISCSI” and “LVMoHBA” block-based Xapi SM plugins store -disk data in trees of .vhd files contained within LVM logical volumes:

        Relationship between VDIs and LVs containing vhd data -Relationship between VDIs and LVs containing vhd data

        Non-snapshot VDIs are always stored full size (a.k.a. thickly-provisioned). +disk data in trees of .vhd files contained within LVM logical volumes:

        Relationship between VDIs and LVs containing vhd data +Relationship between VDIs and LVs containing vhd data

        Non-snapshot VDIs are always stored full size (a.k.a. thickly-provisioned). When parent nodes are created they are automatically shrunk to the minimum size needed to store the shared blocks. The LVs corresponding with snapshot VDIs only contain vhd metadata and by default consume 8MiB. Note: this is different to VDI.clones which are stored full size.

        Block-based vhd implementations are not a good choice if a “gold image” snapshot is going to be cloned lots of times, since each clone will be stored full size.

        Hypothetical LUN implementation

        A hypothetical Xapi SM plugin could use LUNs on an iSCSI storage array as VDIs, and the array’s custom control interface to implement the “snapshot” -operation:

        Relationship between VDIs and LUNs on a hypothetical storage target -Relationship between VDIs and LUNs on a hypothetical storage target

        From the XenAPI point of view, we have one current VDI and a set of snapshots, +operation:

        Relationship between VDIs and LUNs on a hypothetical storage target +Relationship between VDIs and LUNs on a hypothetical storage target

        From the XenAPI point of view, we have one current VDI and a set of snapshots, each taken at a different point in time. These VDIs correspond to LUNs on the same iSCSI target, and internally within the target these LUNs are comprised of blocks from a large shared copy-on-write pool with support for dedup.

        Reverting disk snapshots

        There is no current way to revert in-place a disk to a snapshot, but it is possible to create a writable disk by “cloning” a snapshot.

        VM snapshots

        Let’s say we have a VM, “VM1” that has 2 disks. Concentrating only -on the VM, VBDs and VDIs, we have the following structure:

        VM objects -VM objects

        When we take a snapshot, we first ask the storage backends to snapshot +on the VM, VBDs and VDIs, we have the following structure:

        VM objects +VM objects

        When we take a snapshot, we first ask the storage backends to snapshot all of the VDIs associated with the VM, producing new VDI objects. Then we copy all of the metadata, producing a new ‘snapshot’ VM object, complete with its own VBDs copied from the original, but now pointing at the snapshot VDIs. We also copy the VIFs and VGPUs -but for now we will ignore those.

        This process leads to a set of objects that look like this:

        VM and snapshot objects -VM and snapshot objects

        We have fields that help navigate the new objects: VM.snapshot_of, +but for now we will ignore those.

        This process leads to a set of objects that look like this:

        VM and snapshot objects +VM and snapshot objects

        We have fields that help navigate the new objects: VM.snapshot_of, and VDI.snapshot_of. These, like you would expect, point to the relevant other objects.

        Deleting VM snapshots

        When a snapshot is deleted Xapi calls the SM API vdi_delete. The Xapi SM plugins which use vhd format data do not reclaim space immediately; instead @@ -736,14 +736,14 @@ whether any parent nodes have only one child i.e. the “shared” blocks are only “shared” with one other node. In the following example the snapshot delete leaves such a parent node and the coalesce process copies blocks from the redundant -parent’s only child into the parent:

        We coalesce parent blocks into grand parent nodes -We coalesce parent blocks into grand parent nodes

        Note that if the vhd data is being stored in LVM, then the parent node will +parent’s only child into the parent:

        We coalesce parent blocks into grand parent nodes +We coalesce parent blocks into grand parent nodes

        Note that if the vhd data is being stored in LVM, then the parent node will have had to be expanded to full size to accommodate the writes. Unfortunately this means the act of reclaiming space actually consumes space itself, which means it is important to never completely run out of space in such an SR.

        Once the blocks have been copied, we can now cut one of the parents out of the -tree by relinking its children into their grandparent:

        Relink children into grand parent -Relink children into grand parent

        Finally the garbage collector can remove unused vhd files / LVM LVs:

        Clean up -Clean up

        Reverting VM snapshots

        The XenAPI call VM.revert overwrites the VM metadata with the snapshot VM +tree by relinking its children into their grandparent:

        Relink children into grand parent +Relink children into grand parent

        Finally the garbage collector can remove unused vhd files / LVM LVs:

        Clean up +Clean up

        Reverting VM snapshots

        The XenAPI call VM.revert overwrites the VM metadata with the snapshot VM metadata, deletes the current VDIs and replaces them with clones of the snapshot VDIs. Note there is no “vdi_revert” in the SMAPI.

        Revert implementation details

        This is the process by which we revert a VM to a snapshot. The first thing to notice is that there is some logic that is called @@ -766,8 +766,8 @@ boosting graphics performance within virtual machines.

        The K1 has four GK104 GPUs and the K2 two GK107 GPUs. Each of these will be exposed through Xapi so a host with a single K1 card will have access to four independent PGPUs.

        Each of the GPUs can then be subdivided into vGPUs. For each type of PGPU, there are a few options of vGPU type which consume different amounts of the PGPU. For example, K1 and K2 cards can currently be configured in the following -ways:

        Possible VGX configurations -Possible VGX configurations

        Note, this diagram is not to scale, the PGPU resource required by each +ways:

        Possible VGX configurations +Possible VGX configurations

        Note, this diagram is not to scale, the PGPU resource required by each vGPU type is as follows:

        vGPU typePGPU kindvGPUs / PGPU
        k100GK1048
        k140QGK1044
        k200GK1078
        k240QGK1074
        k260QGK1072

        Currently each physical GPU (PGPU) only supports homogeneous vGPU configurations but different configurations are supported on different PGPUs across a single K1/K2 card. This means that, for example, a host with a K1 card @@ -784,16 +784,16 @@ graphics device to the guest. The vgpu binary is responsible for handling the VGX-capable GPU and, once it has been successfully passed through, the in-guest drivers can be installed in the same way as when it detects new hardware.

        The diagram below shows the relevant parts of the architecture for this -project.

        XenServer&rsquo;s vGPU architecture -XenServer&rsquo;s vGPU architecture

        Relevant code

        • In Xenopsd: Xenops_server_xen is where +project.

          XenServer&rsquo;s vGPU architecture +XenServer&rsquo;s vGPU architecture

          Relevant code

          • In Xenopsd: Xenops_server_xen is where Xenopsd gets the vGPU information from the values passed from Xapi;
          • In Xenopsd: Device.__start is where the vgpu process is started, if necessary, before Qemu.

          Xapi’s API and data model

          A lot of work has gone into the toolstack to handle the creation and management of VMs with vGPUs. We revised our data model, introducing a semantic link between VGPU and PGPU objects to help with utilisation tracking; we maintained the GPU_group concept as a pool-wide abstraction of PGPUs available for VMs; and we added VGPU_types which are configurations for -VGPU objects.

          Xapi&rsquo;s vGPU datamodel -Xapi&rsquo;s vGPU datamodel

          Aside: The VGPU type in Xapi’s data model predates this feature and was +VGPU objects.

          Xapi&rsquo;s vGPU datamodel +Xapi&rsquo;s vGPU datamodel

          Aside: The VGPU type in Xapi’s data model predates this feature and was synonymous with GPU-passthrough. A VGPU is simply a display device assigned to a VM which may be a vGPU (this feature) or a whole GPU (a VGPU of type passthrough).

          VGPU_types can be enabled/disabled on a per-PGPU basis allowing for @@ -843,8 +843,8 @@ param-name=enabled-vgpu-types and param-name=resident-vgpus respectively. Or, alternatively, you can use the following command to list all the parameters for the PGPU. You can get the types supported or enabled for a given PGPU:

          $ xe pgpu-list uuid=... params=all

        Xapi Storage Migration

        The Xapi Storage Migration (XSM) also known as “Storage Motion” allows

        • a running VM to be migrated within a pool, between different hosts -and different storage simultaneously;
        • a running VM to be migrated to another pool;
        • a disk attached to a running VM to be moved to another SR.

        The following diagram shows how XSM works at a high level:

        Xapi Storage Migration -Xapi Storage Migration

        The slowest part of a storage migration is migrating the storage, since virtual +and different storage simultaneously;

      • a running VM to be migrated to another pool;
      • a disk attached to a running VM to be moved to another SR.
      • The following diagram shows how XSM works at a high level:

        Xapi Storage Migration +Xapi Storage Migration

        The slowest part of a storage migration is migrating the storage, since virtual disks can be very large. Xapi starts by taking a snapshot and copying that to the destination as a background task. Before the datapath connecting the VM to the disk is re-established, xapi tells tapdisk to start mirroring all @@ -852,4 +852,4 @@ are written to both the old and the new disk. When the background snapshot copy is complete, xapi can migrate the VM memory across. Once the VM memory image has been received, the destination VM is -complete and the original can be safely destroyed.

        \ No newline at end of file +complete and the original can be safely destroyed.

        \ No newline at end of file diff --git a/new-docs/toolstack/responsibilities/index.html b/new-docs/toolstack/responsibilities/index.html index e7a8186bc..733df45af 100644 --- a/new-docs/toolstack/responsibilities/index.html +++ b/new-docs/toolstack/responsibilities/index.html @@ -1,10 +1,10 @@ Responsibilities :: XAPI Toolstack Developer Documentation -

        Responsibilities

        The XAPI Toolstack forms the main control plane of a pool of XenServer hosts. It allow the administrator to:

        • Configure the hardware resources of XenServer hosts: storage, networking, graphics, memory.
        • Create, configure and destroy VMs and their virtual resources.
        • Control the lifecycle of VMs.
        • Monitor the status of hosts, VMs and related resources.

        To this, the Toolstack:

        • Exposes an API that can be accessed by external clients over HTTP(s).
        • Exposes a CLI.
        • Ensures that physical resources are configured when needed, and VMs receive the resources they require.
        • Implements various features to help the administrator manage their systems.
        • Monitors running VMs.
        • Records metrics about physical and virtual resources.
        \ No newline at end of file diff --git a/new-docs/xapi-guard/index.html b/new-docs/xapi-guard/index.html index c2fcbc00d..277c0a361 100644 --- a/new-docs/xapi-guard/index.html +++ b/new-docs/xapi-guard/index.html @@ -1,5 +1,5 @@ Xapi-guard :: XAPI Toolstack Developer Documentation -
        \ No newline at end of file diff --git a/new-docs/xapi-guard/index.print.html b/new-docs/xapi-guard/index.print.html index b923f3906..98038f942 100644 --- a/new-docs/xapi-guard/index.print.html +++ b/new-docs/xapi-guard/index.print.html @@ -1,5 +1,5 @@ Xapi-guard :: XAPI Toolstack Developer Documentation -

        Xapi-guard

        The xapi-guard daemon is the component in the xapi toolstack that is responsible for handling persistence requests from VMs (domains). +

        Xapi-guard

        The xapi-guard daemon is the component in the xapi toolstack that is responsible for handling persistence requests from VMs (domains). Currently these are UEFI vars and vTPM updates.

        The code is in ocaml/xapi-guard. When the daemon managed only with UEFI updates it was called varstored-guard. Some files and package names still use the previous name.

        Principles

        1. Calls from domains must be limited in privilege to do certain API calls, and @@ -51,4 +51,4 @@ This must be avoided and instead the updates with offending timestamps are renamed to a timestamp taken from the current timestamp, ensuring a consistent ordering. The routine is also used to keep a minimal file tree: unrecognised files are deleted, temporary files created to ensure atomic writes are left untouched, and empty directories are deleted. -This mechanism can be changed in the future to migrate to other formats.

        \ No newline at end of file +This mechanism can be changed in the future to migrate to other formats.

        \ No newline at end of file diff --git a/new-docs/xapi/cli/index.html b/new-docs/xapi/cli/index.html index 126a5532f..fb5eb9d8f 100644 --- a/new-docs/xapi/cli/index.html +++ b/new-docs/xapi/cli/index.html @@ -1,5 +1,5 @@ XE CLI architecture :: XAPI Toolstack Developer Documentation -
        \ No newline at end of file diff --git a/new-docs/xapi/cli/index.print.html b/new-docs/xapi/cli/index.print.html index f66ae60ef..ee419b524 100644 --- a/new-docs/xapi/cli/index.print.html +++ b/new-docs/xapi/cli/index.print.html @@ -1,5 +1,5 @@ XE CLI architecture :: XAPI Toolstack Developer Documentation -

        XE CLI architecture

        Info

        The links in this page point to the source files of xapi +

        XE CLI architecture

        Info

        The links in this page point to the source files of xapi v1.132.0, not to the latest source code. Meanwhile, the CLI server code in xapi has been moved to a library separate from the main xapi binary, and has its own subdirectory @@ -74,4 +74,4 @@ let bond = Client.Bond.create rpc session_id network pifs mac mode properties in let uuid = Client.Bond.get_uuid rpc session_id bond in printer (Cli_printer.PList [ uuid]) -

        • The necessary parameters are looked up in params using List.assoc or similar.
        • UUIDs are translated into reference by get_by_uuid XenAPI calls (note that the Client module is the XenAPI client, and functions in there require the rpc function and session reference).
        • Then the main API call is made (Client.Bond.create in this case).
        • Further API calls may be made to output data for the client, and passed to the printer.

        This is the common case for CLI operations: they do API calls based on the parameters that were passed in.

        However, other commands are more complicated, for example vm_import/export and vm_migrate. These contain a lot more logic in the CLI commands, and also send commands to the client to instruct it to read or write files and/or do HTTP calls.

        Yet other commands do not actually do any XenAPI calls, but instead get “helpful” information from other places. Example: diagnostic_gc_stats, which displays statistics from xapi’s OCaml GC.

        Tutorials

        The following tutorials show how to extend the CLI (and XenAPI):

        \ No newline at end of file +
        • The necessary parameters are looked up in params using List.assoc or similar.
        • UUIDs are translated into reference by get_by_uuid XenAPI calls (note that the Client module is the XenAPI client, and functions in there require the rpc function and session reference).
        • Then the main API call is made (Client.Bond.create in this case).
        • Further API calls may be made to output data for the client, and passed to the printer.

        This is the common case for CLI operations: they do API calls based on the parameters that were passed in.

        However, other commands are more complicated, for example vm_import/export and vm_migrate. These contain a lot more logic in the CLI commands, and also send commands to the client to instruct it to read or write files and/or do HTTP calls.

        Yet other commands do not actually do any XenAPI calls, but instead get “helpful” information from other places. Example: diagnostic_gc_stats, which displays statistics from xapi’s OCaml GC.

        Tutorials

        The following tutorials show how to extend the CLI (and XenAPI):

        \ No newline at end of file diff --git a/new-docs/xapi/database/index.html b/new-docs/xapi/database/index.html index ad6f7dc7b..5607fde0f 100644 --- a/new-docs/xapi/database/index.html +++ b/new-docs/xapi/database/index.html @@ -1,10 +1,10 @@ Database :: XAPI Toolstack Developer Documentation -

        Database

        \ No newline at end of file diff --git a/new-docs/xapi/database/index.print.html b/new-docs/xapi/database/index.print.html index d09c08a2c..6b5b83ce8 100644 --- a/new-docs/xapi/database/index.print.html +++ b/new-docs/xapi/database/index.print.html @@ -1,5 +1,5 @@ Database :: XAPI Toolstack Developer Documentation -

        Database

        Subsections of Database

        Metadata-on-LUN

        In the present version of XenServer, metadata changes resulting in +

        Database

        Subsections of Database

        Metadata-on-LUN

        In the present version of XenServer, metadata changes resulting in writes to the database are not persisted in non-volatile storage. Hence, in case of failure, up to five minutes’ worth of metadata changes could be lost. The Metadata-on-LUN feature addresses the issue by @@ -88,10 +88,10 @@ Metadata-on-LUN feature using a healthy LUN to which all database writes can be successfully flushed.

      • The fourth configuration shows xapi with the Metadata-on-LUN feature using an inaccessible LUN for -which all database writes fail.

      • Impact of feature on xapi database-writing performance. (Green points
+which all database writes fail.</p></li></ul><p><a href=#image-deac5d7679b8a020a609669086fff81c class=lightbox-link><img src=/new-docs/xapi/database/redo-log/performance.svg alt= -Impact of feature on xapi database-writing performance. (Green points
+<a href=javascript:history.back(); class=lightbox-back id=image-deac5d7679b8a020a609669086fff81c><img src=/new-docs/xapi/database/redo-log/performance.svg alt=

        Testing strategy

        The section above shows how xapi performance is affected by this feature. The sections below describe the dev-testing which has already been undertaken, and @@ -120,4 +120,4 @@ database:

        xe network-destroy uuid=<uuid>,

        where <uuid> is the UUID returned from step 2.

      • Forcefully power-cycle the master.

      • On fail-over, issue a CLI command on the new master to check that the row does not exist:

        xe network-list name-label=a,

        confirming that the returned string is empty.

      • Impact on existing regression tests

        The Metadata-on-LUN feature should mean that there is no need to perform an ‘xe pool-sync-database’ operation in existing HA -regression tests to ensure that database state persists on xapi failure.

        \ No newline at end of file +regression tests to ensure that database state persists on xapi failure.

        \ No newline at end of file diff --git a/new-docs/xapi/database/redo-log/index.html b/new-docs/xapi/database/redo-log/index.html index 0b31d13eb..3dcea60ce 100644 --- a/new-docs/xapi/database/redo-log/index.html +++ b/new-docs/xapi/database/redo-log/index.html @@ -1,5 +1,5 @@ Metadata-on-LUN :: XAPI Toolstack Developer Documentation -
        \ No newline at end of file diff --git a/new-docs/xapi/guides/howtos/add-api-extension/index.html b/new-docs/xapi/guides/howtos/add-api-extension/index.html index f693be4ee..6fe994e4e 100644 --- a/new-docs/xapi/guides/howtos/add-api-extension/index.html +++ b/new-docs/xapi/guides/howtos/add-api-extension/index.html @@ -1,5 +1,5 @@ Adding a XenAPI extension :: XAPI Toolstack Developer Documentation -
        \ No newline at end of file diff --git a/new-docs/xapi/guides/howtos/add-class/index.html b/new-docs/xapi/guides/howtos/add-class/index.html index b9f474c52..8f36ef22f 100644 --- a/new-docs/xapi/guides/howtos/add-class/index.html +++ b/new-docs/xapi/guides/howtos/add-class/index.html @@ -1,5 +1,5 @@ Adding a Class to the API :: XAPI Toolstack Developer Documentation -
        \ No newline at end of file diff --git a/new-docs/xapi/guides/howtos/add-field/index.html b/new-docs/xapi/guides/howtos/add-field/index.html index e0e56bbe9..d172f2de6 100644 --- a/new-docs/xapi/guides/howtos/add-field/index.html +++ b/new-docs/xapi/guides/howtos/add-field/index.html @@ -1,5 +1,5 @@ Adding a field to the API :: XAPI Toolstack Developer Documentation -
        \ No newline at end of file diff --git a/new-docs/xapi/guides/howtos/add-function/index.html b/new-docs/xapi/guides/howtos/add-function/index.html index 9be220d47..1ea4fbc75 100644 --- a/new-docs/xapi/guides/howtos/add-function/index.html +++ b/new-docs/xapi/guides/howtos/add-function/index.html @@ -1,5 +1,5 @@ Adding a function to the API :: XAPI Toolstack Developer Documentation -
        \ No newline at end of file diff --git a/new-docs/xapi/guides/howtos/index.html b/new-docs/xapi/guides/howtos/index.html index 8f3ddd2ec..317315525 100644 --- a/new-docs/xapi/guides/howtos/index.html +++ b/new-docs/xapi/guides/howtos/index.html @@ -1,10 +1,10 @@ How to add.... :: XAPI Toolstack Developer Documentation -

        How to add....

        \ No newline at end of file diff --git a/new-docs/xapi/guides/howtos/index.print.html b/new-docs/xapi/guides/howtos/index.print.html index 6a79eeaac..18ef07554 100644 --- a/new-docs/xapi/guides/howtos/index.print.html +++ b/new-docs/xapi/guides/howtos/index.print.html @@ -1,5 +1,5 @@ How to add.... :: XAPI Toolstack Developer Documentation -

        How to add....

        Subsections of How to add....

        Adding a Class to the API

        This document describes how to add a new class to the data model that +

        How to add....

        Subsections of How to add....

        Adding a Class to the API

        This document describes how to add a new class to the data model that defines the Xen Server API. It complements two other documents that describe how to extend an existing class:

        As a running example, we will use the addition of a class that is part of the design for the PVS Direct feature. PVS Direct introduces @@ -560,4 +560,4 @@ you can declare that a user must have a particular role (e.g. ‘VM admin’)

      • if the xapi version is too old to know about your specific extension: the extension will still be callable but the client must have the ‘Pool admin’ role.
      • Since a xapi which knows about your specific extension is stricter than an older xapi, it’s a good idea to develop against the new xapi and then test older -xapi versions later.

        \ No newline at end of file +xapi versions later.

        \ No newline at end of file diff --git a/new-docs/xapi/guides/index.html b/new-docs/xapi/guides/index.html index a9c040dd2..e25a9f010 100644 --- a/new-docs/xapi/guides/index.html +++ b/new-docs/xapi/guides/index.html @@ -1,10 +1,10 @@ Guides :: XAPI Toolstack Developer Documentation - \ No newline at end of file diff --git a/new-docs/xapi/guides/index.print.html b/new-docs/xapi/guides/index.print.html index 2973ce351..4c5908377 100644 --- a/new-docs/xapi/guides/index.print.html +++ b/new-docs/xapi/guides/index.print.html @@ -1,5 +1,5 @@ Guides :: XAPI Toolstack Developer Documentation -

        Subsections of Guides

        How to add....

        Subsections of How to add....

        Adding a Class to the API

        This document describes how to add a new class to the data model that +

        Subsections of Guides

        How to add....

        Subsections of How to add....

        Adding a Class to the API

        This document describes how to add a new class to the data model that defines the Xen Server API. It complements two other documents that describe how to extend an existing class:

        As a running example, we will use the addition of a class that is part of the design for the PVS Direct feature. PVS Direct introduces @@ -560,4 +560,4 @@ you can declare that a user must have a particular role (e.g. ‘VM admin’)

      • if the xapi version is too old to know about your specific extension: the extension will still be callable but the client must have the ‘Pool admin’ role.
      • Since a xapi which knows about your specific extension is stricter than an older xapi, it’s a good idea to develop against the new xapi and then test older -xapi versions later.

        \ No newline at end of file +xapi versions later.

        \ No newline at end of file diff --git a/new-docs/xapi/index.html b/new-docs/xapi/index.html index 944d1ca11..2c8104028 100644 --- a/new-docs/xapi/index.html +++ b/new-docs/xapi/index.html @@ -1,9 +1,9 @@ Xapi :: XAPI Toolstack Developer Documentation -
        \ No newline at end of file diff --git a/new-docs/xapi/index.print.html b/new-docs/xapi/index.print.html index 2695e5423..4e8b3778c 100644 --- a/new-docs/xapi/index.print.html +++ b/new-docs/xapi/index.print.html @@ -1,8 +1,8 @@ Xapi :: XAPI Toolstack Developer Documentation -

        Xapi

        Xapi is the xapi-project host and cluster manager.

        Xapi is responsible for:

        • providing a stable interface (the XenAPI)
        • allowing one client to manage multiple hosts
        • hosting the “xe” CLI
        • authenticating users and applying role-based access control
        • locking resources (in particular disks)
        • allowing storage to be managed through plugins
        • planning and coping with host failures (“High Availability”)
        • storing VM and host configuration
        • generating alerts
        • managing software patching

        Principles

        1. The XenAPI interface must remain backwards compatible, allowing older +

          Xapi

          Xapi is the xapi-project host and cluster manager.

          Xapi is responsible for:

          • providing a stable interface (the XenAPI)
          • allowing one client to manage multiple hosts
          • hosting the “xe” CLI
          • authenticating users and applying role-based access control
          • locking resources (in particular disks)
          • allowing storage to be managed through plugins
          • planning and coping with host failures (“High Availability”)
          • storing VM and host configuration
          • generating alerts
          • managing software patching

          Principles

          1. The XenAPI interface must remain backwards compatible, allowing older clients to continue working
          2. Xapi delegates all Xenstore/libxc/libxl access to Xenopsd, so Xapi could -be run in an unprivileged helper domain
          3. Xapi delegates the low-level storage manipulation to SM plugins.
          4. Xapi delegates setting up host networking to xcp-networkd.
          5. Xapi delegates monitoring performance counters to xcp-rrdd.

          Overview

          The following diagram shows the internals of Xapi:

          Internals of xapi -Internals of xapi

          The top of the diagram shows the XenAPI clients: XenCenter, XenOrchestra, +be run in an unprivileged helper domain

        2. Xapi delegates the low-level storage manipulation to SM plugins.
        3. Xapi delegates setting up host networking to xcp-networkd.
        4. Xapi delegates monitoring performance counters to xcp-rrdd.

        Overview

        The following diagram shows the internals of Xapi:

        Internals of xapi +Internals of xapi

        The top of the diagram shows the XenAPI clients: XenCenter, XenOrchestra, OpenStack and CloudStack using XenAPI and HTTP GET/PUT over ports 80 and 443 to talk to xapi. These XenAPI (JSON-RPC or XML-RPC over HTTP POST) and HTTP GET/PUT are always authenticated using either PAM (by default using the local @@ -694,10 +694,10 @@ Metadata-on-LUN feature using a healthy LUN to which all database writes can be successfully flushed.

      • The fourth configuration shows xapi with the Metadata-on-LUN feature using an inaccessible LUN for -which all database writes fail.

      • Impact of feature on xapi database-writing performance. (Green points
+which all database writes fail.</p></li></ul><p><a href=#image-deac5d7679b8a020a609669086fff81c class=lightbox-link><img src=/new-docs/xapi/database/redo-log/performance.svg alt= -Impact of feature on xapi database-writing performance. (Green points
+<a href=javascript:history.back(); class=lightbox-back id=image-deac5d7679b8a020a609669086fff81c><img src=/new-docs/xapi/database/redo-log/performance.svg alt=

        Testing strategy

        The section above shows how xapi performance is affected by this feature. The sections below describe the dev-testing which has already been undertaken, and @@ -736,9 +736,9 @@ at system boot time and model the per-VM overheads.

        Host overhead

        The host overhead is not managed by xapi, instead it is sampled. After the host boots and before any VMs start, xapi asks Xen how much memory the host has in total, and how much memory is currently free. Xapi subtracts the free from the -total and stores this as the host overhead.

        VM overhead

        The inputs to the model are

        • VM.memory_static_max: the maximum amount of RAM the domain will be able to use
        • VM.HVM_shadow_multiplier: allows the shadow memory to be increased
        • VM.VCPUs_max: the maximum number of vCPUs the domain will be able to use

        First the shadow memory is calculated, in MiB

        Shadow memory in MiB -Shadow memory in MiB

        Second the VM overhead is calculated, in MiB

        Memory overhead in MiB -Memory overhead in MiB

        Memory required to start a VM

        If ballooning is disabled, the memory required to start a VM is the same as the VM +total and stores this as the host overhead.

        VM overhead

        The inputs to the model are

        • VM.memory_static_max: the maximum amount of RAM the domain will be able to use
        • VM.HVM_shadow_multiplier: allows the shadow memory to be increased
        • VM.VCPUs_max: the maximum number of vCPUs the domain will be able to use

        First the shadow memory is calculated, in MiB

        Shadow memory in MiB +Shadow memory in MiB

        Second the VM overhead is calculated, in MiB

        Memory overhead in MiB +Memory overhead in MiB

        Memory required to start a VM

        If ballooning is disabled, the memory required to start a VM is the same as the VM overhead above.

        If ballooning is enabled then the memory calculation above is modified to use the VM.memory_dynamic_max rather than the VM.memory_static_max.

        Memory required to migrate a VM

        If ballooning is disabled, the memory required to receive a migrating VM is the same as the VM overhead above.

        If ballooning is enabled, then the VM will first be ballooned down to VM.memory_dynamic_min @@ -2106,4 +2106,4 @@ let bond = Client.Bond.create rpc session_id network pifs mac mode properties in let uuid = Client.Bond.get_uuid rpc session_id bond in printer (Cli_printer.PList [ uuid]) -

        • The necessary parameters are looked up in params using List.assoc or similar.
        • UUIDs are translated into reference by get_by_uuid XenAPI calls (note that the Client module is the XenAPI client, and functions in there require the rpc function and session reference).
        • Then the main API call is made (Client.Bond.create in this case).
        • Further API calls may be made to output data for the client, and passed to the printer.

        This is the common case for CLI operations: they do API calls based on the parameters that were passed in.

        However, other commands are more complicated, for example vm_import/export and vm_migrate. These contain a lot more logic in the CLI commands, and also send commands to the client to instruct it to read or write files and/or do HTTP calls.

        Yet other commands do not actually do any XenAPI calls, but instead get “helpful” information from other places. Example: diagnostic_gc_stats, which displays statistics from xapi’s OCaml GC.

        Tutorials

        The following tutorials show how to extend the CLI (and XenAPI):

        \ No newline at end of file +
        • The necessary parameters are looked up in params using List.assoc or similar.
        • UUIDs are translated into reference by get_by_uuid XenAPI calls (note that the Client module is the XenAPI client, and functions in there require the rpc function and session reference).
        • Then the main API call is made (Client.Bond.create in this case).
        • Further API calls may be made to output data for the client, and passed to the printer.

        This is the common case for CLI operations: they do API calls based on the parameters that were passed in.

        However, other commands are more complicated, for example vm_import/export and vm_migrate. These contain a lot more logic in the CLI commands, and also send commands to the client to instruct it to read or write files and/or do HTTP calls.

        Yet other commands do not actually do any XenAPI calls, but instead get “helpful” information from other places. Example: diagnostic_gc_stats, which displays statistics from xapi’s OCaml GC.

        Tutorials

        The following tutorials show how to extend the CLI (and XenAPI):

        \ No newline at end of file diff --git a/new-docs/xapi/memory/index.html b/new-docs/xapi/memory/index.html index 4394dd250..414307c82 100644 --- a/new-docs/xapi/memory/index.html +++ b/new-docs/xapi/memory/index.html @@ -1,5 +1,5 @@ Host memory accounting :: XAPI Toolstack Developer Documentation -
        \ No newline at end of file diff --git a/new-docs/xapi/storage/index.html b/new-docs/xapi/storage/index.html index b1171a87e..bce7f01ab 100644 --- a/new-docs/xapi/storage/index.html +++ b/new-docs/xapi/storage/index.html @@ -1,5 +1,5 @@ XAPI's Storage Layers :: XAPI Toolstack Developer Documentation -
        \ No newline at end of file diff --git a/new-docs/xapi/storage/index.print.html b/new-docs/xapi/storage/index.print.html index 4e136a823..ca6895b61 100644 --- a/new-docs/xapi/storage/index.print.html +++ b/new-docs/xapi/storage/index.print.html @@ -1,5 +1,5 @@ XAPI's Storage Layers :: XAPI Toolstack Developer Documentation -

        XAPI's Storage Layers

        Info

        The links in this page point to the source files of xapi +

        XAPI's Storage Layers

        Info

        The links in this page point to the source files of xapi v1.127.0, and xcp-idl v1.62.0, not to the latest source code.

        In the beginning of 2023, significant changes have been made in the layering. @@ -1238,4 +1238,4 @@ error "Caught exception while finally checking mirror state: %s" (Printexc.to_string e); s.failed <- true - )

        \ No newline at end of file + )
        \ No newline at end of file diff --git a/new-docs/xapi/storage/sxm/index.html b/new-docs/xapi/storage/sxm/index.html index b42d915d0..f71c79e71 100644 --- a/new-docs/xapi/storage/sxm/index.html +++ b/new-docs/xapi/storage/sxm/index.html @@ -1,5 +1,5 @@ Storage migration :: XAPI Toolstack Developer Documentation -
        \ No newline at end of file diff --git a/new-docs/xapi/walkthroughs/index.html b/new-docs/xapi/walkthroughs/index.html index 4ed1ca656..c01738ce6 100644 --- a/new-docs/xapi/walkthroughs/index.html +++ b/new-docs/xapi/walkthroughs/index.html @@ -1,11 +1,11 @@ XAPI requests walk-throughs :: XAPI Toolstack Developer Documentation -

        XAPI requests walk-throughs

        Let’s detail the handling process of an XML request within XAPI. +

        \ No newline at end of file + 
        \ No newline at end of file diff --git a/new-docs/xapi/walkthroughs/index.print.html b/new-docs/xapi/walkthroughs/index.print.html index abb9ba630..fb7e60e1d 100644 --- a/new-docs/xapi/walkthroughs/index.print.html +++ b/new-docs/xapi/walkthroughs/index.print.html @@ -1,5 +1,5 @@ XAPI requests walk-throughs :: XAPI Toolstack Developer Documentation -

        XAPI requests walk-throughs

        Let’s detail the handling process of an XML request within XAPI. +

        Subsections of XAPI requests walk-throughs

        From RPC migration request to xapi internals

        Overview

        In this document we will use the VM.pool_migrate request to illustrate the interaction between various components within the XAPI toolstack during migration. However this schema can be applied to other requests as well.

        Not all parts of the Xapi toolstack are shown here as not all are involved in @@ -48,4 +48,4 @@ style xen_hypervisor stroke:#BEEF00,stroke-width:2px %% Can send request to the host where call must be executed message_forwarding -.forward call to .-> elected_host["Host where call must be executed"] -style elected_host stroke:#B0A,stroke-width:4px

        \ No newline at end of file +style elected_host stroke:#B0A,stroke-width:4px
        \ No newline at end of file diff --git a/new-docs/xapi/walkthroughs/migration_overview/index.html b/new-docs/xapi/walkthroughs/migration_overview/index.html index d7c272c8a..632da796d 100644 --- a/new-docs/xapi/walkthroughs/migration_overview/index.html +++ b/new-docs/xapi/walkthroughs/migration_overview/index.html @@ -1,5 +1,5 @@ From RPC migration request to xapi internals :: XAPI Toolstack Developer Documentation -
        \ No newline at end of file diff --git a/new-docs/xcp-networkd/index.html b/new-docs/xcp-networkd/index.html index 86dd71873..d388cf0e3 100644 --- a/new-docs/xcp-networkd/index.html +++ b/new-docs/xcp-networkd/index.html @@ -1,5 +1,5 @@ Networkd :: XAPI Toolstack Developer Documentation -
        \ No newline at end of file diff --git a/new-docs/xcp-networkd/index.print.html b/new-docs/xcp-networkd/index.print.html index e2f6444e3..949c18d99 100644 --- a/new-docs/xcp-networkd/index.print.html +++ b/new-docs/xcp-networkd/index.print.html @@ -1,5 +1,5 @@ Networkd :: XAPI Toolstack Developer Documentation -

        Networkd

        The xcp-networkd daemon (hereafter simply called “networkd”) is a component in the xapi toolstack that is responsible for configuring network interfaces and virtual switches (bridges) on a host.

        The code is in ocaml/networkd.

        Principles

        1. Distro-agnostic. Networkd is meant to work on at least CentOS/RHEL as well a Debian/Ubuntu based distros. It therefore should not use any network configuration features specific to those distros.

        2. Stateless. By default, networkd should not maintain any state. If you ask networkd anything about a network interface or bridge, or any other network sub-system property, it will always query the underlying system (e.g. an IP address), rather than returning any cached state. However, if you want networkd to configure networking at host boot time, the you can ask it to remember your configuration you have set for any interface or bridge you choose.

        3. Idempotent. It should be possible to call any networkd function multiple times without breaking things. For example, calling a function to set an IP address on an interface twice in a row should have the same outcome as calling it just once.

        4. Do no harm. Networkd should only configure what you ask it to configure. This means that it can co-exist with other network managers.

        Usage

        Networkd is a daemon that is typically started at host-boot time. In the same way as the other daemons in the xapi toolstack, it is controlled by RPC requests. It typically receives requests from the xapi daemon, on behalf of which it configures host networking.

        Networkd’s RCP API is fully described by the network_interface.ml file. The API has two main namespaces: Interface and Bridge, which are implemented in two modules in network_server.ml.

        In line with other xapi daemons, all API functions take an argument of type debug_info (a string) as their first argument. The debug string appears in any log lines that are produced as a side effort of calling the function.

        Network Interface API

        The Interface API has functions to query and configure properties of Linux network devices, such as IP addresses, and bringing them up or down. Most Interface functions take a name string as a reference to a network interface as their second argument, which is expected to be the name of the Linux network device. There is also a special function, called Interface.make_config, that is able to configure a number of interfaces at once. It takes an argument called config of type (iface * interface_config_t) list, where iface is an interface name, and interface_config_t is a compound type containing the full configuration for an interface (as far as networkd is able to configure them), currently defined as follows:

        type interface_config_t = {
        +

        Networkd

        The xcp-networkd daemon (hereafter simply called “networkd”) is a component in the xapi toolstack that is responsible for configuring network interfaces and virtual switches (bridges) on a host.

        The code is in ocaml/networkd.

        Principles

        1. Distro-agnostic. Networkd is meant to work on at least CentOS/RHEL as well a Debian/Ubuntu based distros. It therefore should not use any network configuration features specific to those distros.

        2. Stateless. By default, networkd should not maintain any state. If you ask networkd anything about a network interface or bridge, or any other network sub-system property, it will always query the underlying system (e.g. an IP address), rather than returning any cached state. However, if you want networkd to configure networking at host boot time, the you can ask it to remember your configuration you have set for any interface or bridge you choose.

        3. Idempotent. It should be possible to call any networkd function multiple times without breaking things. For example, calling a function to set an IP address on an interface twice in a row should have the same outcome as calling it just once.

        4. Do no harm. Networkd should only configure what you ask it to configure. This means that it can co-exist with other network managers.

        Usage

        Networkd is a daemon that is typically started at host-boot time. In the same way as the other daemons in the xapi toolstack, it is controlled by RPC requests. It typically receives requests from the xapi daemon, on behalf of which it configures host networking.

        Networkd’s RCP API is fully described by the network_interface.ml file. The API has two main namespaces: Interface and Bridge, which are implemented in two modules in network_server.ml.

        In line with other xapi daemons, all API functions take an argument of type debug_info (a string) as their first argument. The debug string appears in any log lines that are produced as a side effort of calling the function.

        Network Interface API

        The Interface API has functions to query and configure properties of Linux network devices, such as IP addresses, and bringing them up or down. Most Interface functions take a name string as a reference to a network interface as their second argument, which is expected to be the name of the Linux network device. There is also a special function, called Interface.make_config, that is able to configure a number of interfaces at once. It takes an argument called config of type (iface * interface_config_t) list, where iface is an interface name, and interface_config_t is a compound type containing the full configuration for an interface (as far as networkd is able to configure them), currently defined as follows:

        type interface_config_t = {
         	ipv4_conf: ipv4;
         	ipv4_gateway: Unix.inet_addr option;
         	ipv6_conf: ipv6;
        @@ -26,4 +26,4 @@
         	bridge_config: (bridge * bridge_config_t) list;
         	gateway_interface: iface option;
         	dns_interface: iface option;
        -}

        The gateway_interface and dns_interface in the config are global host-level options to define from which interfaces the default gateway and DNS configuration is taken. This is especially important when multiple interfaces are configured by DHCP.

        When networkd starts up, it first reads network.conf to determine the network backend. It subsequently attempts to parse networkd.db, and tries to call Bridge.make_config and Interface.make_config on it, with a special options to only apply the config for persistent bridges and interfaces, as well as bridges related to those (for example, if a VLAN bridge is configured, then also its parent bridge must be configured).

        Networkd also supports upgrades from older versions of XenServer that used a network configuration script called interface-configure. If networkd.db is not found on startup, then networkd attempts to call this tool (via the /etc/init.d/management-interface script) in order to set up networking at boot time. This is normally followed immediately by a call from xapi instructing networkd to take over.

        Finally, if no network config (old or new) is found on disk at all, networkd looks for a XenServer “firstboot” data file, which is written by XenServer’s host installer, and tries to apply it to set up the management interface.

        Monitoring

        Besides the ability to configure bridges and network interfaces, networkd has facilities for monitoring interfaces and bonds. When networkd starts, a monitor thread is started, which does several things (see network_monitor_thread.ml):

        • Every 5 seconds, it gathers send/receive counters and link state of all network interfaces. It then writes these stats to a shared-memory file, to be picked up by other components such as xcp-rrdd and xapi (see documentation about “xenostats” elsewhere).
        • It monitors NIC bonds, and sends alerts through xapi in case of link state changes within a bond.
        • It uses ip monitor address to watch for an IP address changes, and if so, it calls xapi (Host.signal_networking_change) for it to update the IP addresses of the PIFs in its database that were configured by DHCP.
        \ No newline at end of file +}

        The gateway_interface and dns_interface in the config are global host-level options to define from which interfaces the default gateway and DNS configuration is taken. This is especially important when multiple interfaces are configured by DHCP.

        When networkd starts up, it first reads network.conf to determine the network backend. It subsequently attempts to parse networkd.db, and tries to call Bridge.make_config and Interface.make_config on it, with a special options to only apply the config for persistent bridges and interfaces, as well as bridges related to those (for example, if a VLAN bridge is configured, then also its parent bridge must be configured).

        Networkd also supports upgrades from older versions of XenServer that used a network configuration script called interface-configure. If networkd.db is not found on startup, then networkd attempts to call this tool (via the /etc/init.d/management-interface script) in order to set up networking at boot time. This is normally followed immediately by a call from xapi instructing networkd to take over.

        Finally, if no network config (old or new) is found on disk at all, networkd looks for a XenServer “firstboot” data file, which is written by XenServer’s host installer, and tries to apply it to set up the management interface.

        Monitoring

        Besides the ability to configure bridges and network interfaces, networkd has facilities for monitoring interfaces and bonds. When networkd starts, a monitor thread is started, which does several things (see network_monitor_thread.ml):

        • Every 5 seconds, it gathers send/receive counters and link state of all network interfaces. It then writes these stats to a shared-memory file, to be picked up by other components such as xcp-rrdd and xapi (see documentation about “xenostats” elsewhere).
        • It monitors NIC bonds, and sends alerts through xapi in case of link state changes within a bond.
        • It uses ip monitor address to watch for an IP address changes, and if so, it calls xapi (Host.signal_networking_change) for it to update the IP addresses of the PIFs in its database that were configured by DHCP.
        \ No newline at end of file diff --git a/new-docs/xcp-rrdd/design/plugin-protocol-v2/index.html b/new-docs/xcp-rrdd/design/plugin-protocol-v2/index.html index 883fb46ea..2df61ad1c 100644 --- a/new-docs/xcp-rrdd/design/plugin-protocol-v2/index.html +++ b/new-docs/xcp-rrdd/design/plugin-protocol-v2/index.html @@ -1,5 +1,5 @@ RRDD plugin protocol v2 :: XAPI Toolstack Developer Documentation -
        \ No newline at end of file diff --git a/new-docs/xcp-rrdd/futures/archival-redesign/index.html b/new-docs/xcp-rrdd/futures/archival-redesign/index.html index 3c026dbe6..f9c470f84 100644 --- a/new-docs/xcp-rrdd/futures/archival-redesign/index.html +++ b/new-docs/xcp-rrdd/futures/archival-redesign/index.html @@ -1,5 +1,5 @@ RRDD archival redesign :: XAPI Toolstack Developer Documentation -
        \ No newline at end of file diff --git a/new-docs/xcp-rrdd/futures/sr-level-rrds/index.html b/new-docs/xcp-rrdd/futures/sr-level-rrds/index.html index c9bd156f9..af4e55697 100644 --- a/new-docs/xcp-rrdd/futures/sr-level-rrds/index.html +++ b/new-docs/xcp-rrdd/futures/sr-level-rrds/index.html @@ -1,5 +1,5 @@ SR-Level RRDs :: XAPI Toolstack Developer Documentation -
        \ No newline at end of file diff --git a/new-docs/xcp-rrdd/index.html b/new-docs/xcp-rrdd/index.html index 170980d9d..ea2d5cba0 100644 --- a/new-docs/xcp-rrdd/index.html +++ b/new-docs/xcp-rrdd/index.html @@ -1,12 +1,12 @@ RRDD :: XAPI Toolstack Developer Documentation -

        RRDD

        The xcp-rrdd daemon (hereafter simply called “rrdd”) is a component in the +

        RRDD

        The xcp-rrdd daemon (hereafter simply called “rrdd”) is a component in the xapi toolstack that is responsible for collecting metrics, storing them as “Round-Robin Databases” (RRDs) and exposing these to clients.

        The code is in ocaml/xcp-rrdd.

        \ No newline at end of file + 
        \ No newline at end of file diff --git a/new-docs/xcp-rrdd/index.print.html b/new-docs/xcp-rrdd/index.print.html index c2f9a69e8..85baea20c 100644 --- a/new-docs/xcp-rrdd/index.print.html +++ b/new-docs/xcp-rrdd/index.print.html @@ -1,5 +1,5 @@ RRDD :: XAPI Toolstack Developer Documentation -

        RRDD

        The xcp-rrdd daemon (hereafter simply called “rrdd”) is a component in the +

        RRDD

        The xcp-rrdd daemon (hereafter simply called “rrdd”) is a component in the xapi toolstack that is responsible for collecting metrics, storing them as “Round-Robin Databases” (RRDs) and exposing these to clients.

        The code is in ocaml/xcp-rrdd.

        Subsections of RRDD

        Design document
        Revisionv1
        Statusreleased (7,0)

        RRDD archival redesign

        Introduction

        Current problems with rrdd:

        • rrdd stores knowledge about whether it is running on a master or a slave

        This determines the host to which rrdd will archive a VM’s rrd when the VM’s @@ -174,4 +174,4 @@ open to suggestions here), will query the attached SRs, then query RRDD for the latest data source for these, and update the database.

        The utilisation of VDIs will not be updated in this way until scalability worries for RRDs are addressed.

        Xapi will cache whether it is SR master for every attached SR and only -attempt to update if it is the SR master.

        New APIs.

        xcp-rrdd:

        • Get the filesystem location where sr rrds are archived: val sr_rrds_path : uid:string -> string

        • Archive the sr rrds to the filesystem: val archive_sr_rrd : sr_uuid:string -> unit

        • Load the sr rrds from the filesystem: val push_sr_rrd : sr_uuid:string -> unit

        \ No newline at end of file +attempt to update if it is the SR master.

        New APIs.

        xcp-rrdd:

        • Get the filesystem location where sr rrds are archived: val sr_rrds_path : uid:string -> string

        • Archive the sr rrds to the filesystem: val archive_sr_rrd : sr_uuid:string -> unit

        • Load the sr rrds from the filesystem: val push_sr_rrd : sr_uuid:string -> unit

        \ No newline at end of file diff --git a/new-docs/xen-api/basics/index.html b/new-docs/xen-api/basics/index.html index 41468ef41..85648a3b7 100644 --- a/new-docs/xen-api/basics/index.html +++ b/new-docs/xen-api/basics/index.html @@ -1,5 +1,5 @@ XenAPI Basics :: XAPI Toolstack Developer Documentation -
        \ No newline at end of file diff --git a/new-docs/xen-api/classes/auth/index.html b/new-docs/xen-api/classes/auth/index.html index f00b436d8..7db5cee19 100644 --- a/new-docs/xen-api/classes/auth/index.html +++ b/new-docs/xen-api/classes/auth/index.html @@ -1,5 +1,5 @@ auth :: XAPI Toolstack Developer Documentation -
        navigation +
        navigation

        Class: auth

        Management of remote authentication services

        Fields

        Messages

        string set get_group_membership @@ -10,9 +10,9 @@ (session ref, string)
        \ No newline at end of file + 
        \ No newline at end of file diff --git a/new-docs/xen-api/classes/blob/index.html b/new-docs/xen-api/classes/blob/index.html index 5b2484fb0..a9ca17274 100644 --- a/new-docs/xen-api/classes/blob/index.html +++ b/new-docs/xen-api/classes/blob/index.html @@ -1,5 +1,5 @@ blob :: XAPI Toolstack Developer Documentation -
        navigation +
        navigation

        Class: blob

        A placeholder for a binary blob

        Fields

        datetime last_updated [RO/constructor]
        string @@ -52,9 +52,9 @@ (session ref, blob ref, bool)
        \ No newline at end of file + 
        \ No newline at end of file diff --git a/new-docs/xen-api/classes/bond/index.html b/new-docs/xen-api/classes/bond/index.html index a10b23da6..2a702cd23 100644 --- a/new-docs/xen-api/classes/bond/index.html +++ b/new-docs/xen-api/classes/bond/index.html @@ -1,5 +1,5 @@ Bond :: XAPI Toolstack Developer Documentation -
        navigation +
        navigation

        Class: Bond

        A Network bond that combines physical network interfaces, also known as link aggregation

        Enums

        bond_mode

        Fields

        bool auto_update_mac [RO/runtime]
        int @@ -62,9 +62,9 @@ (session ref, Bond ref, string, string)
        \ No newline at end of file + 
        \ No newline at end of file diff --git a/new-docs/xen-api/classes/certificate/index.html b/new-docs/xen-api/classes/certificate/index.html index cfb1dfc29..c69848535 100644 --- a/new-docs/xen-api/classes/certificate/index.html +++ b/new-docs/xen-api/classes/certificate/index.html @@ -1,5 +1,5 @@ Certificate :: XAPI Toolstack Developer Documentation -
        navigation +
        navigation

        Class: Certificate

        An X509 certificate used for TLS connections

        Enums

        certificate_type

        Fields

        string fingerprint [RO/constructor]
        host ref @@ -40,9 +40,9 @@ (session ref, Certificate ref)
        \ No newline at end of file + 
        \ No newline at end of file diff --git a/new-docs/xen-api/classes/cluster/index.html b/new-docs/xen-api/classes/cluster/index.html index 3bb53641d..b8bfe9277 100644 --- a/new-docs/xen-api/classes/cluster/index.html +++ b/new-docs/xen-api/classes/cluster/index.html @@ -1,5 +1,5 @@ Cluster :: XAPI Toolstack Developer Documentation -
        navigation +
        navigation

        Class: Cluster

        Cluster-wide Cluster metadata

        Enums

        cluster_operation

        Fields

        enum cluster_operation set allowed_operations [RO/runtime]
        (string → string) map @@ -92,9 +92,9 @@ (session ref, Cluster ref, (string → string) map)
        \ No newline at end of file + 
        \ No newline at end of file diff --git a/new-docs/xen-api/classes/cluster_host/index.html b/new-docs/xen-api/classes/cluster_host/index.html index ce96e5bd3..bc43af31c 100644 --- a/new-docs/xen-api/classes/cluster_host/index.html +++ b/new-docs/xen-api/classes/cluster_host/index.html @@ -1,5 +1,5 @@ Cluster_host :: XAPI Toolstack Developer Documentation -
        navigation +
        navigation

        Class: Cluster_host

        Cluster member metadata

        Enums

        cluster_host_operation

        Fields

        enum cluster_host_operation set allowed_operations [RO/runtime]
        Cluster ref @@ -66,9 +66,9 @@ (session ref, Cluster_host ref)
        \ No newline at end of file + 
        \ No newline at end of file diff --git a/new-docs/xen-api/classes/console/index.html b/new-docs/xen-api/classes/console/index.html index 2b31e4fd3..d5ff83dc6 100644 --- a/new-docs/xen-api/classes/console/index.html +++ b/new-docs/xen-api/classes/console/index.html @@ -1,5 +1,5 @@ console :: XAPI Toolstack Developer Documentation -
        navigation +
        navigation

        Class: console

        A console

        Enums

        console_protocol

        Fields

        string location [RO/runtime]
        (string → string) map @@ -43,9 +43,9 @@ (session ref, console ref, (string → string) map)
        \ No newline at end of file + 
        \ No newline at end of file diff --git a/new-docs/xen-api/classes/crashdump/index.html b/new-docs/xen-api/classes/crashdump/index.html index acd3e3dd1..a59aa0d32 100644 --- a/new-docs/xen-api/classes/crashdump/index.html +++ b/new-docs/xen-api/classes/crashdump/index.html @@ -1,5 +1,5 @@ crashdump :: XAPI Toolstack Developer Documentation -
        navigation +
        navigation
        Deprecated

        Class: crashdump

        A VM crashdump

        Fields

        (string → string) map other_config [RW]
        string @@ -36,9 +36,9 @@ (session ref, crashdump ref, (string → string) map)
        \ No newline at end of file + 
        \ No newline at end of file diff --git a/new-docs/xen-api/classes/data_source/index.html b/new-docs/xen-api/classes/data_source/index.html index 48c8bc5cf..d31bbfe8a 100644 --- a/new-docs/xen-api/classes/data_source/index.html +++ b/new-docs/xen-api/classes/data_source/index.html @@ -1,5 +1,5 @@ data_source :: XAPI Toolstack Developer Documentation -
        navigation +
        navigation

        Class: data_source

        Data sources for logging in RRDs

        Fields

        bool enabled [RO/runtime]
        float @@ -20,9 +20,9 @@
        \ No newline at end of file + 
        \ No newline at end of file diff --git a/new-docs/xen-api/classes/dr_task/index.html b/new-docs/xen-api/classes/dr_task/index.html index 228c6d5d9..cba5046ea 100644 --- a/new-docs/xen-api/classes/dr_task/index.html +++ b/new-docs/xen-api/classes/dr_task/index.html @@ -1,5 +1,5 @@ DR_task :: XAPI Toolstack Developer Documentation -
        navigation +
        navigation

        Class: DR_task

        DR task

        Fields

        SR ref set introduced_SRs [RO/runtime]
        string @@ -24,9 +24,9 @@ (session ref, DR_task ref)
        \ No newline at end of file + 
        \ No newline at end of file diff --git a/new-docs/xen-api/classes/event/index.html b/new-docs/xen-api/classes/event/index.html index 4b61f0bf1..3f6172a9e 100644 --- a/new-docs/xen-api/classes/event/index.html +++ b/new-docs/xen-api/classes/event/index.html @@ -1,5 +1,5 @@ event :: XAPI Toolstack Developer Documentation -
        navigation +
        navigation

        Class: event

        Asynchronous event registration and handling

        Enums

        event_operation

        Fields

        string class [RO/constructor]
        int @@ -30,9 +30,9 @@ (session ref, string set)
        \ No newline at end of file + 
        \ No newline at end of file diff --git a/new-docs/xen-api/classes/feature/index.html b/new-docs/xen-api/classes/feature/index.html index a2410db9d..b556d79a4 100644 --- a/new-docs/xen-api/classes/feature/index.html +++ b/new-docs/xen-api/classes/feature/index.html @@ -1,5 +1,5 @@ Feature :: XAPI Toolstack Developer Documentation -
        navigation +
        navigation

        Class: Feature

        A new piece of functionality

        Fields

        bool enabled [RO/runtime]
        bool @@ -42,9 +42,9 @@ (session ref, Feature ref)
        \ No newline at end of file + 
        \ No newline at end of file diff --git a/new-docs/xen-api/classes/gpu_group/index.html b/new-docs/xen-api/classes/gpu_group/index.html index bd962cbd8..4f0a97389 100644 --- a/new-docs/xen-api/classes/gpu_group/index.html +++ b/new-docs/xen-api/classes/gpu_group/index.html @@ -1,5 +1,5 @@ GPU_group :: XAPI Toolstack Developer Documentation -
        navigation +
        navigation

        Class: GPU_group

        A group of compatible GPUs across the resource pool

        Enums

        allocation_algorithm

        Fields

        enum allocation_algorithm allocation_algorithm [RW]
        VGPU_type ref set @@ -72,9 +72,9 @@ (session ref, GPU_group ref, (string → string) map)
        \ No newline at end of file + 
        \ No newline at end of file diff --git a/new-docs/xen-api/classes/host/index.html b/new-docs/xen-api/classes/host/index.html index 28535d30f..750883609 100644 --- a/new-docs/xen-api/classes/host/index.html +++ b/new-docs/xen-api/classes/host/index.html @@ -1,5 +1,5 @@ host :: XAPI Toolstack Developer Documentation -
        navigation +
        navigation

        Class: host

        A physical host

        Enums

        host_allowed_operations
        latest_synced_updates_applied_state
        update_guidances
        host_display
        host_sched_gran
        host_numa_affinity_policy

        Fields

        string address [RW]
        enum host_allowed_operations set @@ -488,9 +488,9 @@ (session ref, host ref)
        \ No newline at end of file + 
        \ No newline at end of file diff --git a/new-docs/xen-api/classes/host_cpu/index.html b/new-docs/xen-api/classes/host_cpu/index.html index 56c227b47..6b9450840 100644 --- a/new-docs/xen-api/classes/host_cpu/index.html +++ b/new-docs/xen-api/classes/host_cpu/index.html @@ -1,5 +1,5 @@ host_cpu :: XAPI Toolstack Developer Documentation -
        navigation +
        navigation
        Deprecated

        Class: host_cpu

        A physical CPU

        Fields

        int family [RO/runtime]
        string @@ -70,9 +70,9 @@ (session ref, host_cpu ref, (string → string) map)
        \ No newline at end of file + 
        \ No newline at end of file diff --git a/new-docs/xen-api/classes/host_crashdump/index.html b/new-docs/xen-api/classes/host_crashdump/index.html index 327a72c27..6ce3dc916 100644 --- a/new-docs/xen-api/classes/host_crashdump/index.html +++ b/new-docs/xen-api/classes/host_crashdump/index.html @@ -1,5 +1,5 @@ host_crashdump :: XAPI Toolstack Developer Documentation -
        navigation +
        navigation

        Class: host_crashdump

        Represents a host crash dump

        Fields

        host ref host [RO/constructor]
        (string → string) map @@ -42,9 +42,9 @@ (session ref, host_crashdump ref, string, (string → string) map)
        \ No newline at end of file + 
        \ No newline at end of file diff --git a/new-docs/xen-api/classes/host_metrics/index.html b/new-docs/xen-api/classes/host_metrics/index.html index 093371db1..458d1f343 100644 --- a/new-docs/xen-api/classes/host_metrics/index.html +++ b/new-docs/xen-api/classes/host_metrics/index.html @@ -1,5 +1,5 @@ host_metrics :: XAPI Toolstack Developer Documentation -
        navigation +
        navigation

        Class: host_metrics

        The metrics associated with a host

        Fields

        datetime last_updated [RO/runtime]
        bool @@ -42,9 +42,9 @@ (session ref, host_metrics ref, (string → string) map)
        \ No newline at end of file + 
        \ No newline at end of file diff --git a/new-docs/xen-api/classes/host_patch/index.html b/new-docs/xen-api/classes/host_patch/index.html index d69146f6d..417fcbcd7 100644 --- a/new-docs/xen-api/classes/host_patch/index.html +++ b/new-docs/xen-api/classes/host_patch/index.html @@ -1,5 +1,5 @@ host_patch :: XAPI Toolstack Developer Documentation -
        navigation +
        navigation
        Deprecated

        Class: host_patch

        Represents a patch stored on a server

        Fields

        bool applied [RO/runtime]
        host ref @@ -64,9 +64,9 @@ (session ref, host_patch ref, (string → string) map)
        \ No newline at end of file + 
        \ No newline at end of file diff --git a/new-docs/xen-api/classes/index.html b/new-docs/xen-api/classes/index.html index 649ae6974..2dd3e284b 100644 --- a/new-docs/xen-api/classes/index.html +++ b/new-docs/xen-api/classes/index.html @@ -1,12 +1,12 @@ XenAPI Reference :: XAPI Toolstack Developer Documentation -
        navigation +
        navigation

        XenAPI Classes

        Click on a class to view the associated fields and messages.

        Xen-api class diagram

        Classes, Fields and Messages

        Classes have both fields and messages. Messages are either implicit or explicit where an implicit message is one of:

        • a constructor (usually called "create");
        • a destructor (usually called "destroy");
        • "get_by_name_label";
        • "get_by_uuid";
        • "get_record";
        • "get_all"; and
        • "get_all_records".

        Explicit messages include all the rest, more class-specific messages (e.g. "VM.start", "VM.clone")

        Every field has at least one accessor depending both on its type and whether it is read-only or read-write. Accessors for a field named "X" would be a proper subset of:

        • set_X: change the value of field X (only if it is read-write);
        • get_X: retrieve the value of field X;
        • add_X: add a key/value pair (for fields of type set);
        • remove_X: remove a key (for fields of type set);
        • add_to_X: add a key/value pair (for fields of type map); and
        • remove_from_X: remove a key (for fields of type map).
        \ No newline at end of file + 
        \ No newline at end of file diff --git a/new-docs/xen-api/classes/index.print.html b/new-docs/xen-api/classes/index.print.html index 0fe51b444..e199351b9 100644 --- a/new-docs/xen-api/classes/index.print.html +++ b/new-docs/xen-api/classes/index.print.html @@ -1,4 +1,4 @@ XenAPI Reference :: XAPI Toolstack Developer Documentation -
        navigation +
        navigation

        XenAPI Classes

        Click on a class to view the associated fields and messages.

        Xen-api class diagram -

        Classes, Fields and Messages

        Classes have both fields and messages. Messages are either implicit or explicit where an implicit message is one of:

        • a constructor (usually called "create");
        • a destructor (usually called "destroy");
        • "get_by_name_label";
        • "get_by_uuid";
        • "get_record";
        • "get_all"; and
        • "get_all_records".

        Explicit messages include all the rest, more class-specific messages (e.g. "VM.start", "VM.clone")

        Every field has at least one accessor depending both on its type and whether it is read-only or read-write. Accessors for a field named "X" would be a proper subset of:

        • set_X: change the value of field X (only if it is read-write);
        • get_X: retrieve the value of field X;
        • add_X: add a key/value pair (for fields of type set);
        • remove_X: remove a key (for fields of type set);
        • add_to_X: add a key/value pair (for fields of type map); and
        • remove_from_X: remove a key (for fields of type map).
        \ No newline at end of file +

        Classes, Fields and Messages

        Classes have both fields and messages. Messages are either implicit or explicit where an implicit message is one of:

        • a constructor (usually called "create");
        • a destructor (usually called "destroy");
        • "get_by_name_label";
        • "get_by_uuid";
        • "get_record";
        • "get_all"; and
        • "get_all_records".

        Explicit messages include all the rest, more class-specific messages (e.g. "VM.start", "VM.clone")

        Every field has at least one accessor depending both on its type and whether it is read-only or read-write. Accessors for a field named "X" would be a proper subset of:

        • set_X: change the value of field X (only if it is read-write);
        • get_X: retrieve the value of field X;
        • add_X: add a key/value pair (for fields of type set);
        • remove_X: remove a key (for fields of type set);
        • add_to_X: add a key/value pair (for fields of type map); and
        • remove_from_X: remove a key (for fields of type map).
        \ No newline at end of file diff --git a/new-docs/xen-api/classes/lvhd/index.html b/new-docs/xen-api/classes/lvhd/index.html index ce55baee8..b6766c950 100644 --- a/new-docs/xen-api/classes/lvhd/index.html +++ b/new-docs/xen-api/classes/lvhd/index.html @@ -1,5 +1,5 @@ LVHD :: XAPI Toolstack Developer Documentation -
        navigation +
        navigation

        Class: LVHD

        LVHD SR specific operations

        Fields

        string uuid [RO/runtime]

        Messages @@ -14,9 +14,9 @@ (session ref, LVHD ref)

        \ No newline at end of file + 
        \ No newline at end of file diff --git a/new-docs/xen-api/classes/message/index.html b/new-docs/xen-api/classes/message/index.html index ba8f895e5..59d00008d 100644 --- a/new-docs/xen-api/classes/message/index.html +++ b/new-docs/xen-api/classes/message/index.html @@ -1,5 +1,5 @@ message :: XAPI Toolstack Developer Documentation -
        navigation +
        navigation

        Class: message

        An message for the attention of the administrator

        Enums

        cls

        Fields

        string body [RO/runtime]
        enum cls @@ -38,9 +38,9 @@ (session ref, datetime)
        \ No newline at end of file + 
        \ No newline at end of file diff --git a/new-docs/xen-api/classes/network/index.html b/new-docs/xen-api/classes/network/index.html index 08d9504d7..fa3b829db 100644 --- a/new-docs/xen-api/classes/network/index.html +++ b/new-docs/xen-api/classes/network/index.html @@ -1,5 +1,5 @@ network :: XAPI Toolstack Developer Documentation -
        navigation +
        navigation

        Class: network

        A virtual network

        Enums

        network_operations
        network_default_locking_mode
        network_purpose

        Fields

        enum network_operations set allowed_operations [RO/runtime]
        (VIF ref → string) map @@ -109,9 +109,9 @@ (session ref, network ref, string set)
        \ No newline at end of file + 
        \ No newline at end of file diff --git a/new-docs/xen-api/classes/network_sriov/index.html b/new-docs/xen-api/classes/network_sriov/index.html index 2739539dc..d9d60279f 100644 --- a/new-docs/xen-api/classes/network_sriov/index.html +++ b/new-docs/xen-api/classes/network_sriov/index.html @@ -1,5 +1,5 @@ network_sriov :: XAPI Toolstack Developer Documentation -
        navigation +
        navigation

        Class: network_sriov

        network-sriov which connects logical pif and physical pif

        Enums

        sriov_configuration_mode

        Fields

        enum sriov_configuration_mode configuration_mode [RO/runtime]
        PIF ref @@ -38,9 +38,9 @@ (session ref, network_sriov ref)
        \ No newline at end of file + 
        \ No newline at end of file diff --git a/new-docs/xen-api/classes/observer/index.html b/new-docs/xen-api/classes/observer/index.html index 3b4794c72..900f629a2 100644 --- a/new-docs/xen-api/classes/observer/index.html +++ b/new-docs/xen-api/classes/observer/index.html @@ -1,5 +1,5 @@ Observer :: XAPI Toolstack Developer Documentation -
        navigation +
        navigation
        Prototype

        Class: Observer

        Describes a observer which will control observability activity in the Toolstack

        Fields

        Prototype
        (string → string) map attributes [RO/constructor]
        Prototype
        string set @@ -65,9 +65,9 @@ (session ref, Observer ref, string)
        \ No newline at end of file + 
        \ No newline at end of file diff --git a/new-docs/xen-api/classes/pbd/index.html b/new-docs/xen-api/classes/pbd/index.html index 1632fa74d..7cbe7ac47 100644 --- a/new-docs/xen-api/classes/pbd/index.html +++ b/new-docs/xen-api/classes/pbd/index.html @@ -1,5 +1,5 @@ PBD :: XAPI Toolstack Developer Documentation -
        navigation +
        navigation

        Class: PBD

        The physical block devices through which hosts access SRs

        Fields

        bool currently_attached [RO/runtime]
        (string → string) map @@ -53,9 +53,9 @@ (session ref, PBD ref)
        \ No newline at end of file + 
        \ No newline at end of file diff --git a/new-docs/xen-api/classes/pci/index.html b/new-docs/xen-api/classes/pci/index.html index 88193388e..b367f4b01 100644 --- a/new-docs/xen-api/classes/pci/index.html +++ b/new-docs/xen-api/classes/pci/index.html @@ -1,5 +1,5 @@ PCI :: XAPI Toolstack Developer Documentation -
        navigation +
        navigation

        Class: PCI

        A PCI device

        Fields

        string class_name [RO/constructor]
        PCI ref set @@ -62,9 +62,9 @@ (session ref, PCI ref, (string → string) map)
        \ No newline at end of file + 
        \ No newline at end of file diff --git a/new-docs/xen-api/classes/pgpu/index.html b/new-docs/xen-api/classes/pgpu/index.html index e605c9735..cc68f6a3b 100644 --- a/new-docs/xen-api/classes/pgpu/index.html +++ b/new-docs/xen-api/classes/pgpu/index.html @@ -1,5 +1,5 @@ PGPU :: XAPI Toolstack Developer Documentation -
        navigation +
        navigation

        Class: PGPU

        A physical GPU (pGPU)

        Enums

        pgpu_dom0_access

        Fields

        (string → string) map compatibility_metadata [RO/runtime]
        enum pgpu_dom0_access @@ -80,9 +80,9 @@ (session ref, PGPU ref, (string → string) map)
        \ No newline at end of file + 
        \ No newline at end of file diff --git a/new-docs/xen-api/classes/pif/index.html b/new-docs/xen-api/classes/pif/index.html index 3ba1e356e..1f9859bfd 100644 --- a/new-docs/xen-api/classes/pif/index.html +++ b/new-docs/xen-api/classes/pif/index.html @@ -1,5 +1,5 @@ PIF :: XAPI Toolstack Developer Documentation -
        navigation +
        navigation

        Class: PIF

        A physical network interface (note separate VLANs are represented as several PIFs)

        Enums

        pif_igmp_status
        ip_configuration_mode
        ipv6_configuration_mode
        primary_address_type

        Fields

        Bond ref set bond_master_of [RO/runtime]
        Bond ref @@ -186,9 +186,9 @@ (session ref, PIF ref)
        \ No newline at end of file + 
        \ No newline at end of file diff --git a/new-docs/xen-api/classes/pif_metrics/index.html b/new-docs/xen-api/classes/pif_metrics/index.html index 49383acad..d291b8ae1 100644 --- a/new-docs/xen-api/classes/pif_metrics/index.html +++ b/new-docs/xen-api/classes/pif_metrics/index.html @@ -1,5 +1,5 @@ PIF_metrics :: XAPI Toolstack Developer Documentation -
        navigation +
        navigation

        Class: PIF_metrics

        The metrics associated with a physical network interface

        Fields

        bool carrier [RO/runtime]
        string @@ -70,9 +70,9 @@ (session ref, PIF_metrics ref, (string → string) map)
        \ No newline at end of file + 
        \ No newline at end of file diff --git a/new-docs/xen-api/classes/pool/index.html b/new-docs/xen-api/classes/pool/index.html index b04908e23..8a8cecc2d 100644 --- a/new-docs/xen-api/classes/pool/index.html +++ b/new-docs/xen-api/classes/pool/index.html @@ -1,5 +1,5 @@ pool :: XAPI Toolstack Developer Documentation -
        navigation +
        navigation

        Class: pool

        Pool-wide information

        Enums

        pool_allowed_operations
        telemetry_frequency
        update_sync_frequency

        Fields

        enum pool_allowed_operations set allowed_operations [RO/runtime]
        (string → blob ref) map @@ -435,9 +435,9 @@ (session ref, string)
        \ No newline at end of file + 
        \ No newline at end of file diff --git a/new-docs/xen-api/classes/pool_patch/index.html b/new-docs/xen-api/classes/pool_patch/index.html index ea1062286..8dae03aac 100644 --- a/new-docs/xen-api/classes/pool_patch/index.html +++ b/new-docs/xen-api/classes/pool_patch/index.html @@ -1,5 +1,5 @@ pool_patch :: XAPI Toolstack Developer Documentation -
        navigation +
        navigation
        Deprecated

        Class: pool_patch

        Pool-wide patches

        Enums

        after_apply_guidance

        Fields

        enum after_apply_guidance set after_apply_guidance [RO/runtime]
        host_patch ref set @@ -74,9 +74,9 @@ (session ref, pool_patch ref, (string → string) map)
        \ No newline at end of file + 
        \ No newline at end of file diff --git a/new-docs/xen-api/classes/pool_update/index.html b/new-docs/xen-api/classes/pool_update/index.html index c51afc39f..7a687c4a8 100644 --- a/new-docs/xen-api/classes/pool_update/index.html +++ b/new-docs/xen-api/classes/pool_update/index.html @@ -1,5 +1,5 @@ pool_update :: XAPI Toolstack Developer Documentation -
        navigation +
        navigation

        Class: pool_update

        Pool-wide updates to the host software

        Enums

        update_after_apply_guidance
        livepatch_status

        Fields

        enum update_after_apply_guidance set after_apply_guidance [RO/constructor]
        bool @@ -76,9 +76,9 @@ (session ref, pool_update ref, (string → string) map)
        \ No newline at end of file + 
        \ No newline at end of file diff --git a/new-docs/xen-api/classes/probe_result/index.html b/new-docs/xen-api/classes/probe_result/index.html index 6299ef41d..d86a4ecee 100644 --- a/new-docs/xen-api/classes/probe_result/index.html +++ b/new-docs/xen-api/classes/probe_result/index.html @@ -1,5 +1,5 @@ probe_result :: XAPI Toolstack Developer Documentation -
        navigation +
        navigation

        Class: probe_result

        A set of properties that describe one result element of SR.probe. Result elements and properties can change dynamically based on changes to the the SR.probe input-parameters or the target.

        Fields

        bool complete [RO/runtime]
        (string → string) map @@ -12,9 +12,9 @@
        \ No newline at end of file + 
        \ No newline at end of file diff --git a/new-docs/xen-api/classes/pusb/index.html b/new-docs/xen-api/classes/pusb/index.html index 559e0e343..b40794ed8 100644 --- a/new-docs/xen-api/classes/pusb/index.html +++ b/new-docs/xen-api/classes/pusb/index.html @@ -1,5 +1,5 @@ PUSB :: XAPI Toolstack Developer Documentation -
        navigation +
        navigation

        Class: PUSB

        A physical USB device

        Fields

        string description [RO/constructor]
        host ref @@ -78,9 +78,9 @@ (session ref, PUSB ref, bool)
        \ No newline at end of file + 
        \ No newline at end of file diff --git a/new-docs/xen-api/classes/pvs_cache_storage/index.html b/new-docs/xen-api/classes/pvs_cache_storage/index.html index 2c23ee74c..1cba23c46 100644 --- a/new-docs/xen-api/classes/pvs_cache_storage/index.html +++ b/new-docs/xen-api/classes/pvs_cache_storage/index.html @@ -1,5 +1,5 @@ PVS_cache_storage :: XAPI Toolstack Developer Documentation -
        navigation +
        navigation

        Class: PVS_cache_storage

        Describes the storage that is available to a PVS site for caching purposes

        Fields

        host ref host [RO/constructor]
        PVS_site ref @@ -41,9 +41,9 @@ (session ref, PVS_cache_storage ref)
        \ No newline at end of file + 
        \ No newline at end of file diff --git a/new-docs/xen-api/classes/pvs_proxy/index.html b/new-docs/xen-api/classes/pvs_proxy/index.html index ebcf898fd..6c7fa2295 100644 --- a/new-docs/xen-api/classes/pvs_proxy/index.html +++ b/new-docs/xen-api/classes/pvs_proxy/index.html @@ -1,5 +1,5 @@ PVS_proxy :: XAPI Toolstack Developer Documentation -
        navigation +
        navigation

        Class: PVS_proxy

        a proxy connects a VM/VIF with a PVS site

        Enums

        pvs_proxy_status

        Fields

        bool currently_attached [RO/runtime]
        PVS_site ref @@ -36,9 +36,9 @@ (session ref, PVS_proxy ref)
        \ No newline at end of file + 
        \ No newline at end of file diff --git a/new-docs/xen-api/classes/pvs_server/index.html b/new-docs/xen-api/classes/pvs_server/index.html index e761d8c9b..7916439c0 100644 --- a/new-docs/xen-api/classes/pvs_server/index.html +++ b/new-docs/xen-api/classes/pvs_server/index.html @@ -1,5 +1,5 @@ PVS_server :: XAPI Toolstack Developer Documentation -
        navigation +
        navigation

        Class: PVS_server

        individual machine serving provisioning (block) data

        Fields

        string set addresses [RO/constructor]
        int @@ -36,9 +36,9 @@ (session ref, string set, int, int, PVS_site ref)
        \ No newline at end of file + 
        \ No newline at end of file diff --git a/new-docs/xen-api/classes/pvs_site/index.html b/new-docs/xen-api/classes/pvs_site/index.html index 2ff6afbab..454432a87 100644 --- a/new-docs/xen-api/classes/pvs_site/index.html +++ b/new-docs/xen-api/classes/pvs_site/index.html @@ -1,5 +1,5 @@ PVS_site :: XAPI Toolstack Developer Documentation -
        navigation +
        navigation

        Class: PVS_site

        machines serving blocks of data for provisioning VMs

        Fields

        PVS_cache_storage ref set cache_storage [RO/runtime]
        string @@ -52,9 +52,9 @@ (session ref, PVS_site ref, string)
        \ No newline at end of file + 
        \ No newline at end of file diff --git a/new-docs/xen-api/classes/repository/index.html b/new-docs/xen-api/classes/repository/index.html index 50b4500c3..18be08d36 100644 --- a/new-docs/xen-api/classes/repository/index.html +++ b/new-docs/xen-api/classes/repository/index.html @@ -1,5 +1,5 @@ Repository :: XAPI Toolstack Developer Documentation -
        navigation +
        navigation

        Class: Repository

        Repository for updates

        Fields

        string binary_url [RO/constructor]
        Prototype
        string @@ -60,9 +60,9 @@ (session ref, Repository ref, string)
        \ No newline at end of file + 
        \ No newline at end of file diff --git a/new-docs/xen-api/classes/role/index.html b/new-docs/xen-api/classes/role/index.html index 05445293e..14c0104b4 100644 --- a/new-docs/xen-api/classes/role/index.html +++ b/new-docs/xen-api/classes/role/index.html @@ -1,5 +1,5 @@ role :: XAPI Toolstack Developer Documentation -
        navigation +
        navigation

        Class: role

        A set of permissions associated with a subject

        Fields

        bool is_internal [RO/runtime]
        string @@ -42,9 +42,9 @@ (session ref, role ref)
        \ No newline at end of file + 
        \ No newline at end of file diff --git a/new-docs/xen-api/classes/sdn_controller/index.html b/new-docs/xen-api/classes/sdn_controller/index.html index 86c3fe5ea..acdd51a67 100644 --- a/new-docs/xen-api/classes/sdn_controller/index.html +++ b/new-docs/xen-api/classes/sdn_controller/index.html @@ -1,5 +1,5 @@ SDN_controller :: XAPI Toolstack Developer Documentation -
        navigation +
        navigation

        Class: SDN_controller

        Describes the SDN controller that is to connect with the pool

        Enums

        sdn_controller_protocol

        Fields

        string address [RO/constructor]
        int @@ -32,9 +32,9 @@ (session ref, enum sdn_controller_protocol, string, int)
        \ No newline at end of file + 
        \ No newline at end of file diff --git a/new-docs/xen-api/classes/secret/index.html b/new-docs/xen-api/classes/secret/index.html index 90b4b16be..3d195c290 100644 --- a/new-docs/xen-api/classes/secret/index.html +++ b/new-docs/xen-api/classes/secret/index.html @@ -1,5 +1,5 @@ secret :: XAPI Toolstack Developer Documentation -
        navigation +
        navigation

        Class: secret

        A secret

        Fields

        (string → string) map other_config [RW]
        string @@ -37,9 +37,9 @@ (session ref, secret ref, string)
        \ No newline at end of file + 
        \ No newline at end of file diff --git a/new-docs/xen-api/classes/session/index.html b/new-docs/xen-api/classes/session/index.html index 95f15fb4a..28983ffbd 100644 --- a/new-docs/xen-api/classes/session/index.html +++ b/new-docs/xen-api/classes/session/index.html @@ -1,5 +1,5 @@ session :: XAPI Toolstack Developer Documentation -
        navigation +
        navigation

        Class: session

        A session

        Fields

        string auth_user_name [RO/runtime]
        string @@ -94,9 +94,9 @@ (string, string)
        \ No newline at end of file + 
        \ No newline at end of file diff --git a/new-docs/xen-api/classes/sm/index.html b/new-docs/xen-api/classes/sm/index.html index 002b0cfdf..2de223b4b 100644 --- a/new-docs/xen-api/classes/sm/index.html +++ b/new-docs/xen-api/classes/sm/index.html @@ -1,5 +1,5 @@ SM :: XAPI Toolstack Developer Documentation -
        navigation +
        navigation

        Class: SM

        A storage manager plugin

        Fields

        Deprecated
        string set capabilities [RO/runtime]
        (string → string) map @@ -76,9 +76,9 @@ (session ref, SM ref, (string → string) map)
        \ No newline at end of file + 
        \ No newline at end of file diff --git a/new-docs/xen-api/classes/sr/index.html b/new-docs/xen-api/classes/sr/index.html index e69d41f15..353cf0cde 100644 --- a/new-docs/xen-api/classes/sr/index.html +++ b/new-docs/xen-api/classes/sr/index.html @@ -1,5 +1,5 @@ SR :: XAPI Toolstack Developer Documentation -
        navigation +
        navigation

        Class: SR

        A storage repository

        Enums

        storage_operations

        Fields

        enum storage_operations set allowed_operations [RO/runtime]
        (string → blob ref) map @@ -162,9 +162,9 @@ (session ref, SR ref)
        \ No newline at end of file + 
        \ No newline at end of file diff --git a/new-docs/xen-api/classes/sr_stat/index.html b/new-docs/xen-api/classes/sr_stat/index.html index 5bef08d62..7235a5b84 100644 --- a/new-docs/xen-api/classes/sr_stat/index.html +++ b/new-docs/xen-api/classes/sr_stat/index.html @@ -1,5 +1,5 @@ sr_stat :: XAPI Toolstack Developer Documentation -
        navigation +
        navigation

        Class: sr_stat

        A set of high-level properties associated with an SR.

        Enums

        sr_health

        Fields

        bool clustered [RO/runtime]
        int @@ -18,9 +18,9 @@
        \ No newline at end of file + 
        \ No newline at end of file diff --git a/new-docs/xen-api/classes/subject/index.html b/new-docs/xen-api/classes/subject/index.html index 4cf8003c2..071cfcbfa 100644 --- a/new-docs/xen-api/classes/subject/index.html +++ b/new-docs/xen-api/classes/subject/index.html @@ -1,5 +1,5 @@ subject :: XAPI Toolstack Developer Documentation -
        navigation +
        navigation

        Class: subject

        A user or group that can log in xapi

        Fields

        (string → string) map other_config [RO/constructor]
        role ref set @@ -39,9 +39,9 @@ (session ref, subject ref, role ref)
        \ No newline at end of file + 
        \ No newline at end of file diff --git a/new-docs/xen-api/classes/task/index.html b/new-docs/xen-api/classes/task/index.html index 67a90e938..4afac8383 100644 --- a/new-docs/xen-api/classes/task/index.html +++ b/new-docs/xen-api/classes/task/index.html @@ -1,5 +1,5 @@ task :: XAPI Toolstack Developer Documentation -
        navigation +
        navigation

        Class: task

        A long-running asynchronous task

        Enums

        task_allowed_operations
        task_status_type

        Fields

        enum task_allowed_operations set allowed_operations [RO/runtime]
        string @@ -102,9 +102,9 @@ (session ref, task ref, enum task_status_type)
        \ No newline at end of file + 
        \ No newline at end of file diff --git a/new-docs/xen-api/classes/tunnel/index.html b/new-docs/xen-api/classes/tunnel/index.html index a6beb4a59..33ddcc65d 100644 --- a/new-docs/xen-api/classes/tunnel/index.html +++ b/new-docs/xen-api/classes/tunnel/index.html @@ -1,5 +1,5 @@ tunnel :: XAPI Toolstack Developer Documentation -
        navigation +
        navigation

        Class: tunnel

        A tunnel for network traffic

        Enums

        tunnel_protocol

        Fields

        PIF ref access_PIF [RO/constructor]
        (string → string) map @@ -54,9 +54,9 @@ (session ref, tunnel ref, (string → string) map)
        \ No newline at end of file + 
        \ No newline at end of file diff --git a/new-docs/xen-api/classes/usb_group/index.html b/new-docs/xen-api/classes/usb_group/index.html index 9f4bc1bd1..1c47f0ea9 100644 --- a/new-docs/xen-api/classes/usb_group/index.html +++ b/new-docs/xen-api/classes/usb_group/index.html @@ -1,5 +1,5 @@ USB_group :: XAPI Toolstack Developer Documentation -
        navigation +
        navigation

        Class: USB_group

        A group of compatible USBs across the resource pool

        Fields

        string name_description [RW]
        string @@ -52,9 +52,9 @@ (session ref, USB_group ref, (string → string) map)
        \ No newline at end of file + 
        \ No newline at end of file diff --git a/new-docs/xen-api/classes/user/index.html b/new-docs/xen-api/classes/user/index.html index f9808a73d..adeed59be 100644 --- a/new-docs/xen-api/classes/user/index.html +++ b/new-docs/xen-api/classes/user/index.html @@ -1,5 +1,5 @@ user :: XAPI Toolstack Developer Documentation -
        navigation +
        navigation
        Deprecated

        Class: user

        A user of the system

        Fields

        string fullname [RW]
        (string → string) map @@ -37,9 +37,9 @@ (session ref, user ref, (string → string) map)
        \ No newline at end of file + 
        \ No newline at end of file diff --git a/new-docs/xen-api/classes/vbd/index.html b/new-docs/xen-api/classes/vbd/index.html index 30f4ff60c..4d4c17fa8 100644 --- a/new-docs/xen-api/classes/vbd/index.html +++ b/new-docs/xen-api/classes/vbd/index.html @@ -1,5 +1,5 @@ VBD :: XAPI Toolstack Developer Documentation -
        navigation +
        navigation

        Class: VBD

        A virtual block device

        Enums

        vbd_operations
        vbd_type
        vbd_mode

        Fields

        enum vbd_operations set allowed_operations [RO/runtime]
        bool @@ -141,9 +141,9 @@ (session ref, VBD ref)
        \ No newline at end of file + 
        \ No newline at end of file diff --git a/new-docs/xen-api/classes/vbd_metrics/index.html b/new-docs/xen-api/classes/vbd_metrics/index.html index 36adccd35..c76bdb51c 100644 --- a/new-docs/xen-api/classes/vbd_metrics/index.html +++ b/new-docs/xen-api/classes/vbd_metrics/index.html @@ -1,5 +1,5 @@ VBD_metrics :: XAPI Toolstack Developer Documentation -
        navigation +
        navigation
        Removed

        Class: VBD_metrics

        The metrics associated with a virtual block device

        Fields

        Removed
        float io_read_kbs [RO/runtime]
        Removed
        float @@ -38,9 +38,9 @@ (session ref, VBD_metrics ref, (string → string) map)
        \ No newline at end of file + 
        \ No newline at end of file diff --git a/new-docs/xen-api/classes/vdi/index.html b/new-docs/xen-api/classes/vdi/index.html index 0a43f539a..81cd403c7 100644 --- a/new-docs/xen-api/classes/vdi/index.html +++ b/new-docs/xen-api/classes/vdi/index.html @@ -1,5 +1,5 @@ VDI :: XAPI Toolstack Developer Documentation -
        navigation +
        navigation

        Class: VDI

        A virtual disk image

        Enums

        vdi_operations
        vdi_type
        on_boot

        Fields

        bool allow_caching [RO/runtime]
        enum vdi_operations set @@ -215,9 +215,9 @@ (session ref, VDI ref)
        \ No newline at end of file + 
        \ No newline at end of file diff --git a/new-docs/xen-api/classes/vdi_nbd_server_info/index.html b/new-docs/xen-api/classes/vdi_nbd_server_info/index.html index 6594f63c2..504560fb2 100644 --- a/new-docs/xen-api/classes/vdi_nbd_server_info/index.html +++ b/new-docs/xen-api/classes/vdi_nbd_server_info/index.html @@ -1,5 +1,5 @@ vdi_nbd_server_info :: XAPI Toolstack Developer Documentation -
        navigation +
        navigation

        Class: vdi_nbd_server_info

        Details for connecting to a VDI using the Network Block Device protocol

        Fields

        string address [RO/runtime]
        string @@ -14,9 +14,9 @@
        \ No newline at end of file + 
        \ No newline at end of file diff --git a/new-docs/xen-api/classes/vgpu/index.html b/new-docs/xen-api/classes/vgpu/index.html index d119094e9..d1391417a 100644 --- a/new-docs/xen-api/classes/vgpu/index.html +++ b/new-docs/xen-api/classes/vgpu/index.html @@ -1,5 +1,5 @@ VGPU :: XAPI Toolstack Developer Documentation -
        navigation +
        navigation

        Class: VGPU

        A virtual GPU (vGPU)

        Fields

        (string → string) map compatibility_metadata [RO/runtime]
        bool @@ -72,9 +72,9 @@ (session ref, VGPU ref, (string → string) map)
        \ No newline at end of file + 
        \ No newline at end of file diff --git a/new-docs/xen-api/classes/vgpu_type/index.html b/new-docs/xen-api/classes/vgpu_type/index.html index f09267ab6..2a245780a 100644 --- a/new-docs/xen-api/classes/vgpu_type/index.html +++ b/new-docs/xen-api/classes/vgpu_type/index.html @@ -1,5 +1,5 @@ VGPU_type :: XAPI Toolstack Developer Documentation -
        navigation +
        navigation

        Class: VGPU_type

        A type of virtual GPU

        Enums

        vgpu_type_implementation

        Fields

        VGPU_type ref set compatible_types_in_vm [RO/runtime]
        GPU_group ref set @@ -76,9 +76,9 @@ (session ref, VGPU_type ref)
        \ No newline at end of file + 
        \ No newline at end of file diff --git a/new-docs/xen-api/classes/vif/index.html b/new-docs/xen-api/classes/vif/index.html index 7d19caf8a..beb60090d 100644 --- a/new-docs/xen-api/classes/vif/index.html +++ b/new-docs/xen-api/classes/vif/index.html @@ -1,5 +1,5 @@ VIF :: XAPI Toolstack Developer Documentation -
        navigation +
        navigation

        Class: VIF

        A virtual network interface

        Enums

        vif_operations
        vif_locking_mode
        vif_ipv4_configuration_mode
        vif_ipv6_configuration_mode

        Fields

        enum vif_operations set allowed_operations [RO/runtime]
        (string → enum vif_operations) map @@ -165,9 +165,9 @@ (session ref, VIF ref)
        \ No newline at end of file + 
        \ No newline at end of file diff --git a/new-docs/xen-api/classes/vif_metrics/index.html b/new-docs/xen-api/classes/vif_metrics/index.html index 4d465df19..f5757c9d8 100644 --- a/new-docs/xen-api/classes/vif_metrics/index.html +++ b/new-docs/xen-api/classes/vif_metrics/index.html @@ -1,5 +1,5 @@ VIF_metrics :: XAPI Toolstack Developer Documentation -
        navigation +
        navigation
        Removed

        Class: VIF_metrics

        The metrics associated with a virtual network device

        Fields

        Removed
        float io_read_kbs [RO/runtime]
        Removed
        float @@ -38,9 +38,9 @@ (session ref, VIF_metrics ref, (string → string) map)
        \ No newline at end of file + 
        \ No newline at end of file diff --git a/new-docs/xen-api/classes/vlan/index.html b/new-docs/xen-api/classes/vlan/index.html index 6af7e1387..fd9a486a7 100644 --- a/new-docs/xen-api/classes/vlan/index.html +++ b/new-docs/xen-api/classes/vlan/index.html @@ -1,5 +1,5 @@ VLAN :: XAPI Toolstack Developer Documentation -
        navigation +
        navigation

        Class: VLAN

        A VLAN mux/demux

        Fields

        (string → string) map other_config [RW]
        int @@ -42,9 +42,9 @@ (session ref, VLAN ref, (string → string) map)
        \ No newline at end of file + 
        \ No newline at end of file diff --git a/new-docs/xen-api/classes/vm/index.html b/new-docs/xen-api/classes/vm/index.html index 0db020122..b7c797642 100644 --- a/new-docs/xen-api/classes/vm/index.html +++ b/new-docs/xen-api/classes/vm/index.html @@ -1,5 +1,5 @@ VM :: XAPI Toolstack Developer Documentation -
        navigation +
        navigation

        Class: VM

        A virtual machine (or &#39;guest&#39;).

        Enums

        vm_power_state
        update_guidances
        on_softreboot_behavior
        on_normal_exit
        vm_operations
        on_crash_behaviour
        domain_type

        Fields

        enum on_crash_behaviour actions_after_crash [RO/constructor]
        enum on_normal_exit @@ -625,9 +625,9 @@ (session ref, VM ref)
        \ No newline at end of file + 
        \ No newline at end of file diff --git a/new-docs/xen-api/classes/vm_appliance/index.html b/new-docs/xen-api/classes/vm_appliance/index.html index 9f3bc6b2f..c1f1110ee 100644 --- a/new-docs/xen-api/classes/vm_appliance/index.html +++ b/new-docs/xen-api/classes/vm_appliance/index.html @@ -1,5 +1,5 @@ VM_appliance :: XAPI Toolstack Developer Documentation -
        navigation +
        navigation

        Class: VM_appliance

        VM appliance

        Enums

        vm_appliance_operation

        Fields

        enum vm_appliance_operation set allowed_operations [RO/runtime]
        (string → enum vm_appliance_operation) map @@ -61,9 +61,9 @@ (session ref, VM_appliance ref, bool)
        \ No newline at end of file + 
        \ No newline at end of file diff --git a/new-docs/xen-api/classes/vm_guest_metrics/index.html b/new-docs/xen-api/classes/vm_guest_metrics/index.html index f00a08f41..69facb6ee 100644 --- a/new-docs/xen-api/classes/vm_guest_metrics/index.html +++ b/new-docs/xen-api/classes/vm_guest_metrics/index.html @@ -1,5 +1,5 @@ VM_guest_metrics :: XAPI Toolstack Developer Documentation -
        navigation +
        navigation

        Class: VM_guest_metrics

        The metrics reported by the guest (as opposed to inferred from outside)

        Enums

        tristate_type

        Fields

        enum tristate_type can_use_hotplug_vbd [RO/runtime]
        enum tristate_type @@ -74,9 +74,9 @@ (session ref, VM_guest_metrics ref, (string → string) map)
        \ No newline at end of file + 
        \ No newline at end of file diff --git a/new-docs/xen-api/classes/vm_metrics/index.html b/new-docs/xen-api/classes/vm_metrics/index.html index c57bcbc46..0b2263e0d 100644 --- a/new-docs/xen-api/classes/vm_metrics/index.html +++ b/new-docs/xen-api/classes/vm_metrics/index.html @@ -1,5 +1,5 @@ VM_metrics :: XAPI Toolstack Developer Documentation -
        navigation +
        navigation

        Class: VM_metrics

        The metrics associated with a VM

        Enums

        domain_type

        Fields

        enum domain_type current_domain_type [RO/runtime]
        bool @@ -82,9 +82,9 @@ (session ref, VM_metrics ref, (string → string) map)
        \ No newline at end of file + 
        \ No newline at end of file diff --git a/new-docs/xen-api/classes/vmpp/index.html b/new-docs/xen-api/classes/vmpp/index.html index ab5b83844..bd31daf9a 100644 --- a/new-docs/xen-api/classes/vmpp/index.html +++ b/new-docs/xen-api/classes/vmpp/index.html @@ -1,5 +1,5 @@ VMPP :: XAPI Toolstack Developer Documentation -
        navigation +
        navigation
        Removed

        Class: VMPP

        VM Protection Policy

        Enums

        vmpp_backup_type
        vmpp_backup_frequency
        vmpp_archive_frequency
        vmpp_archive_target_type

        Fields

        Removed
        (string → string) map alarm_config [RO/constructor]
        Removed
        enum vmpp_archive_frequency @@ -151,9 +151,9 @@ (session ref, VMPP ref, string)
        \ No newline at end of file + 
        \ No newline at end of file diff --git a/new-docs/xen-api/classes/vmss/index.html b/new-docs/xen-api/classes/vmss/index.html index 3aa7fce43..a2a287c02 100644 --- a/new-docs/xen-api/classes/vmss/index.html +++ b/new-docs/xen-api/classes/vmss/index.html @@ -1,5 +1,5 @@ VMSS :: XAPI Toolstack Developer Documentation -
        navigation +
        navigation

        Class: VMSS

        VM Snapshot Schedule

        Enums

        vmss_frequency
        vmss_type

        Fields

        bool enabled [RW]
        enum vmss_frequency @@ -81,9 +81,9 @@ (session ref, VMSS ref)
        \ No newline at end of file + 
        \ No newline at end of file diff --git a/new-docs/xen-api/classes/vtpm/index.html b/new-docs/xen-api/classes/vtpm/index.html index 1afe77eff..bdf118ed8 100644 --- a/new-docs/xen-api/classes/vtpm/index.html +++ b/new-docs/xen-api/classes/vtpm/index.html @@ -1,5 +1,5 @@ VTPM :: XAPI Toolstack Developer Documentation -
        navigation +
        navigation
        Prototype

        Class: VTPM

        A virtual TPM device

        Enums

        vtpm_operations
        persistence_backend

        Fields

        enum vtpm_operations set allowed_operations [RO/runtime]
        VM ref @@ -48,9 +48,9 @@ (session ref, VTPM ref)
        \ No newline at end of file + 
        \ No newline at end of file diff --git a/new-docs/xen-api/classes/vusb/index.html b/new-docs/xen-api/classes/vusb/index.html index a807ae8d0..ea96a820e 100644 --- a/new-docs/xen-api/classes/vusb/index.html +++ b/new-docs/xen-api/classes/vusb/index.html @@ -1,5 +1,5 @@ VUSB :: XAPI Toolstack Developer Documentation -
        navigation +
        navigation

        Class: VUSB

        Describes the vusb device

        Enums

        vusb_operations

        Fields

        enum vusb_operations set allowed_operations [RO/runtime]
        (string → enum vusb_operations) map @@ -52,9 +52,9 @@ (session ref, VUSB ref)
        \ No newline at end of file + 
        \ No newline at end of file diff --git a/new-docs/xen-api/evolution/index.html b/new-docs/xen-api/evolution/index.html index 11c10f8d2..6dbf0cf0f 100644 --- a/new-docs/xen-api/evolution/index.html +++ b/new-docs/xen-api/evolution/index.html @@ -1,5 +1,5 @@ API evolution :: XAPI Toolstack Developer Documentation -
        \ No newline at end of file diff --git a/new-docs/xen-api/index.html b/new-docs/xen-api/index.html index 3f67d3dff..03239c761 100644 --- a/new-docs/xen-api/index.html +++ b/new-docs/xen-api/index.html @@ -1,10 +1,10 @@ XenAPI :: XAPI Toolstack Developer Documentation - \ No newline at end of file diff --git a/new-docs/xen-api/index.print.html b/new-docs/xen-api/index.print.html index d652aa55e..b15bbbcad 100644 --- a/new-docs/xen-api/index.print.html +++ b/new-docs/xen-api/index.print.html @@ -1,5 +1,5 @@ XenAPI :: XAPI Toolstack Developer Documentation -

        Subsections of XenAPI

        XenAPI Basics

        This document contains a description of the Xen Management API – an interface for +

        Subsections of XenAPI

        XenAPI Basics

        This document contains a description of the Xen Management API – an interface for remotely configuring and controlling virtualised guests running on a Xen-enabled host.

        The XenAPI is presented here as a set of Remote Procedure Calls, with a wire format based upon XML-RPC. @@ -223,9 +223,9 @@ other_config map of the newly-created VM. This field can be updated by calling its getter (other_config <- VM.get_other_config(session, new_vm_ref)) and then its setter (VM.set_other_config(session, new_vm_ref, other_config)) with the modified other_config map.

      • At this stage the object referred to by new_vm_ref is still a template (just like the VM object referred to by t_ref, from which it was cloned). To make new_vm_ref into a VM object we need to call VM.provision(session, new_vm_ref). When this call returns the new_vm_ref object will have had its is_a_template field set to false, indicating that new_vm_ref now refers to a regular VM ready for starting.

      • Note

        The provision operation may take a few minutes, as it is as during this call that the template’s disk images are created. In the case of the Debian template, the newly created disks are also at this stage populated with a Debian root filesystem.

        Taking the VM through a start/suspend/resume/stop cycle

        Now we have an object reference representing our newly-installed VM, it is trivial to take it through a few lifecycle operations:

        • To start our VM we can just call VM.start(session, new_vm_ref)

        • After it’s running, we can suspend it by calling VM.suspend(session, new_vm_ref),

        • and then resume it by calling VM.resume(session, new_vm_ref).

        • We can call VM.shutdown(session, new_vm_ref) to shutdown the VM cleanly.

        Logging out

        Once an application is finished interacting with a XenServer Host it is good practice to call Session.logout(session). This invalidates the session reference (so it cannot be used in subsequent API calls) and simultaneously deallocates server-side memory used to store the session object.

        Although inactive sessions will eventually timeout, the server has a hardcoded limit of 500 concurrent sessions for each username or originator. Once this limit has been reached fresh logins will evict the session objects that have been used least recently, causing their associated session references to become invalid. For successful interoperability with other applications, concurrently accessing the server, the best policy is:

        • Choose a string that identifies your application and its version.

        • Create a single session at start-of-day, using that identifying string for the originator parameter to Session.login_with_password.

        • Use this session throughout the application (note that sessions can be used across multiple separate client-server network connections) and then explicitly logout when possible.

        If a poorly written client leaks sessions or otherwise exceeds the limit, then as long as the client uses an appropriate originator argument, it will be easily identifiable from the XenServer logs and XenServer will destroy the longest-idle sessions of the rogue client only; this may cause problems for that client but not for other clients. If the misbehaving client did not specify an originator, it would be harder to identify and would cause the premature destruction of sessions of any clients that also did not specify an originator

        Install and start example: summary

        We have seen how the API can be used to install a VM from a XenServer template and perform a number of lifecycle operations on it. You will note that the number of calls we had to make in order to affect these operations was small:

        • One call to acquire a session: Session.login_with_password()

        • One call to query the VM (and template) objects present on the XenServer installation: VM.get_all_records(). Recall that we used the information returned from this call to select a suitable template to install from.

        • Four calls to install a VM from our chosen template: VM.clone(), followed by the getter and setter of the other_config field to specify where to -create the disk images of the template, and then VM.provision().

        • One call to start the resultant VM: VM.start() (and similarly other single calls to suspend, resume and shutdown accordingly)

        • And then one call to logout Session.logout()

        The take-home message here is that, although the API as a whole is complex and fully featured, common tasks (such as creating and performing lifecycle operations on VMs) are very straightforward to perform, requiring only a small number of simple API calls. Keep this in mind while you study the next section which may, on first reading, appear a little daunting!

        Object Model Overview

        This section gives a high-level overview of the object model of the API. A more detailed description of the parameters and methods of each class outlined here can be found in the XenServer API Reference document.

        We start by giving a brief outline of some of the core classes that make up the API. (Don’t worry if these definitions seem somewhat abstract in their initial presentation; the textual description in subsequent sections, and the code-sample walk through in the next Chapter will help make these concepts concrete.)

        ClassDescription
        VMA VM object represents a particular virtual machine instance on a XenServer Host or Resource Pool. Example methods include startsuspendpool_migrate; example parameters include power_statememory_static_max, and name_label. (In the previous section we saw how the VM class is used to represent both templates and regular VMs)
        HostA host object represents a physical host in a XenServer pool. Example methods include reboot and shutdown. Example parameters include software_versionhostname, and [IP] address.
        VDIA VDI object represents a Virtual Disk Image. Virtual Disk Images can be attached to VMs, in which case a block device appears inside the VM through which the bits encapsulated by the Virtual Disk Image can be read and written. Example methods of the VDI class include “resize” and “clone”. Example fields include “virtual_size” and “sharable”. (When we called VM.provision on the VM template in our previous example, some VDI objects were automatically created to represent the newly created disks, and attached to the VM object.)
        SRAn SR (Storage Repository) aggregates a collection of VDIs and encapsulates the properties of physical storage on which the VDIs’ bits reside. Example parameters include type (which determines the storage-specific driver a XenServer installation uses to read/write the SR’s VDIs) and physical_utilisation; example methods include scan (which invokes the storage-specific driver to acquire a list of the VDIs contained with the SR and the properties of these VDIs) and create (which initializes a block of physical storage so it is ready to store VDIs).
        NetworkA network object represents a layer-2 network that exists in the environment in which the XenServer Host instance lives. Since XenServer does not manage networks directly this is a lightweight class that serves merely to model physical and virtual network topology. VM and Host objects that are attached to a particular Network object (by virtue of VIF and PIF instances – see below) can send network packets to each other.

        At this point, readers who are finding this enumeration of classes rather terse may wish to skip to the code walk-throughs of the next chapter: there are plenty of useful applications that can be written using only a subset of the classes already described! For those who wish to continue this description of classes in the abstract, read on.

        On top of the classes listed above, there are 4 more that act as connectors, specifying relationships between VMs and Hosts, and Storage and Networks. The first 2 of these classes that we will consider, VBD and VIF, determine how VMs are attached to virtual disks and network objects respectively:

        ClassDescription
        VBDA VBD (Virtual Block Device) object represents an attachment between a VM and a VDI. When a VM is booted its VBD objects are queried to determine which disk images (VDIs) should be attached. Example methods of the VBD class include “plug” (which hot plugs a disk device into a running VM, making the specified VDI accessible therein) and “unplug” (which hot unplugs a disk device from a running guest); example fields include “device” (which determines the device name inside the guest under which the specified VDI will be made accessible).
        VIFA VIF (Virtual network InterFace) object represents an attachment between a VM and a Network object. When a VM is booted its VIF objects are queried to determine which network devices should be created. Example methods of the VIF class include “plug” (which hot plugs a network device into a running VM) and “unplug” (which hot unplugs a network device from a running guest).

        The second set of “connector classes” that we will consider determine how Hosts are attached to Networks and Storage.

        ClassDescription
        PIFA PIF (Physical InterFace) object represents an attachment between a Host and a Network object. If a host is connected to a Network (over a PIF) then packets from the specified host can be transmitted/received by the corresponding host. Example fields of the PIF class include “device” (which specifies the device name to which the PIF corresponds – e.g. eth0) and “MAC” (which specifies the MAC address of the underlying NIC that a PIF represents). Note that PIFs abstract both physical interfaces and VLANs (the latter distinguished by the existence of a positive integer in the “VLAN” field).
        PBDA PBD (Physical Block Device) object represents an attachment between a Host and a SR (Storage Repository) object. Fields include “currently-attached” (which specifies whether the chunk of storage represented by the specified SR object) is currently available to the host; and “device_config” (which specifies storage-driver specific parameters that determines how the low-level storage devices are configured on the specified host – e.g. in the case of an SR rendered on an NFS filer, device_config may specify the host-name of the filer and the path on the filer in which the SR files live.).

        Graphical overview of API classes for managing VMs, Hosts, Storage and Networking -Graphical overview of API classes for managing VMs, Hosts, Storage and Networking

        The figure above presents a graphical overview of the API classes involved in managing VMs, Hosts, Storage and Networking. From this diagram, the symmetry between storage and network configuration, and also the symmetry between virtual machine and host configuration is plain to see.

        Working with VIFs and VBDs

        In this section we walk through a few more complex scenarios, describing informally how various tasks involving virtual storage and network devices can be accomplished using the API.

        Creating disks and attaching them to VMs

        Let’s start by considering how to make a new blank disk image and attach it to a running VM. We will assume that we already have ourselves a running VM, and we know its corresponding API object reference (e.g. we may have created this VM using the procedure described in the previous section, and had the server return its reference to us.) We will also assume that we have authenticated with the XenServer installation and have a corresponding session reference. Indeed in the rest of this chapter, for the sake of brevity, we will stop mentioning sessions altogether.

        Creating a new blank disk image

        The first step is to instantiate the disk image on physical storage. We do this by calling VDI.create(). The VDI.create call takes a number of parameters, including:

        • name_label and name_description: a human-readable name/description for the disk (e.g. for convenient display in the UI etc.). These fields can be left blank if desired.

        • SR: the object reference of the Storage Repository representing the physical storage in which the VDI’s bits will be placed.

        • read_only: setting this field to true indicates that the VDI can only be attached to VMs in a read-only fashion. (Attempting to attach a VDI with its read_only field set to true in a read/write fashion results in error.)

        Invoking the VDI.create call causes the XenServer installation to create a blank disk image on physical storage, create an associated VDI object (the datamodel instance that refers to the disk image on physical storage) and return a reference to this newly created VDI object.

        The way in which the disk image is represented on physical storage depends on the type of the SR in which the created VDI resides. For example, if the SR is of type “lvm” then the new disk image will be rendered as an LVM volume; if the SR is of type “nfs” then the new disk image will be a sparse VHD file created on an NFS filer. (You can query the SR type through the API using the SR.get_type() call.)

        Note

        Some SR types might round up the virtual-size value to make it divisible by a configured block size.

        Attaching the disk image to a VM

        So far we have a running VM (that we assumed the existence of at the start of this example) and a fresh VDI that we just created. Right now, these are both independent objects that exist on the XenServer Host, but there is nothing linking them together. So our next step is to create such a link, associating the VDI with our VM.

        The attachment is formed by creating a new “connector” object called a VBD (Virtual Block Device). To create our VBD we invoke the VBD.create() call. The VBD.create() call takes a number of parameters including:

        • VM - the object reference of the VM to which the VDI is to be attached

        • VDI - the object reference of the VDI that is to be attached

        • mode - specifies whether the VDI is to be attached in a read-only or a read-write fashion

        • userdevice - specifies the block device inside the guest through which applications running inside the VM will be able to read/write the VDI’s bits.

        • type - specifies whether the VDI should be presented inside the VM as a regular disk or as a CD. (Note that this particular field has more meaning for Windows VMs than it does for Linux VMs, but we will not explore this level of detail in this chapter.)

        Invoking VBD.create makes a VBD object on the XenServer installation and returns its object reference. However, this call in itself does not have any side-effects on the running VM (that is, if you go and look inside the running VM you will see that the block device has not been created). The fact that the VBD object exists but that the block device in the guest is not active, is reflected by the fact that the VBD object’s currently_attached field is set to false.

        A VM object with 2 associated VDIs -A VM object with 2 associated VDIs

        For expository purposes, the figure above presents a graphical example that shows the relationship between VMs, VBDs, VDIs and SRs. In this instance a VM object has 2 attached VDIs: there are 2 VBD objects that form the connections between the VM object and its VDIs; and the VDIs reside within the same SR.

        Hotplugging the VBD

        If we rebooted the VM at this stage then, after rebooting, the block device corresponding to the VBD would appear: on boot, XenServer queries all VBDs of a VM and actively attaches each of the corresponding VDIs.

        Rebooting the VM is all very well, but recall that we wanted to attach a newly created blank disk to a running VM. This can be achieved by invoking the plug method on the newly created VBD object. When the plug call returns successfully, the block device to which the VBD relates will have appeared inside the running VM – i.e. from the perspective of the running VM, the guest operating system is led to believe that a new disk device has just been hot plugged. Mirroring this fact in the managed world of the API, the currently_attached field of the VBD is set to true.

        Unsurprisingly, the VBD plug method has a dual called “unplug”. Invoking the unplug method on a VBD object causes the associated block device to be hot unplugged from a running VM, setting the currently_attached field of the VBD object to false accordingly.

        Creating and attaching Network Devices to VMs

        The API calls involved in configuring virtual network interfaces in VMs are similar in many respects to the calls involved in configuring virtual disk devices. For this reason we will not run through a full example of how one can create network interfaces using the API object-model; instead we will use this section just to outline briefly the symmetry between virtual networking device and virtual storage device configuration.

        The networking analogue of the VBD class is the VIF class. Just as a VBD is the API representation of a block device inside a VM, a VIF (Virtual network InterFace) is the API representation of a network device inside a VM. Whereas VBDs associate VM objects with VDI objects, VIFs associate VM objects with Network objects. Just like VBDs, VIFs have a currently_attached field that determines whether or not the network device (inside the guest) associated with the VIF is currently active or not. And as we saw with VBDs, at VM boot-time the VIFs of the VM are queried and a corresponding network device for each created inside the booting VM. Similarly, VIFs also have plug and unplug methods for hot plugging/unplugging network devices in/out of running VMs.

        Host configuration for networking and storage

        We have seen that the VBD and VIF classes are used to manage configuration of block devices and network devices (respectively) inside VMs. To manage host configuration of storage and networking there are two analogous classes: PBD (Physical Block Device) and PIF (Physical [network] InterFace).

        Host storage configuration: PBDs

        Let us start by considering the PBD class. A PBD_create() call takes a number of parameters including:

        ParameterDescription
        hostphysical machine on which the PBD is available
        SRthe Storage Repository that the PBD connects to
        device_configa string-to-string map that is provided to the host’s SR-backend-driver, containing the low-level parameters required to configure the physical storage device(s) on which the SR is to be realized. The specific contents of the device_config field depend on the type of the SR to which the PBD is connected. (Executing xe sm-list will show a list of possible SR types; the configuration field in this enumeration specifies the device_config parameters that each SR type expects.)

        For example, imagine we have an SR object s of type “nfs” (representing a directory on an NFS filer within which VDIs are stored as VHD files); and let’s say that we want a host, h, to be able to access s. In this case we invoke PBD.create() specifying host h, SR s, and a value for the device_config parameter that is the following map:

        ("server", "my_nfs_server.example.com"), ("serverpath", "/scratch/mysrs/sr1")

        This tells the XenServer Host that SR s is accessible on host h, and further that to access SR s, the host needs to mount the directory /scratch/mysrs/sr1 on the NFS server named my_nfs_server.example.com.

        Like VBD objects, PBD objects also have a field called currently_attached. Storage repositories can be attached and detached from a given host by invoking PBD.plug and PBD.unplug methods respectively.

        Host networking configuration: PIFs

        Host network configuration is specified by virtue of PIF objects. If a PIF object connects a network object, n, to a host object h, then the network corresponding to n is bridged onto a physical interface (or a physical interface plus a VLAN tag) specified by the fields of the PIF object.

        For example, imagine a PIF object exists connecting host h to a network n, and that device field of the PIF object is set to eth0. This means that all packets on network n are bridged to the NIC in the host corresponding to host network device eth0.

        XML-RPC notes

        Datetimes

        The API deviates from the XML-RPC specification in handling of datetimes. The API appends a “Z” to the end of datetime strings, which is meant to indicate that the time is expressed in UTC.

        API evolution

        All APIs evolve as bugs are fixed, new features added and features are removed

        • the XenAPI is no exception. This document lists policies describing how the +create the disk images of the template, and then VM.provision().

        • One call to start the resultant VM: VM.start() (and similarly other single calls to suspend, resume and shutdown accordingly)

        • And then one call to logout Session.logout()

        The take-home message here is that, although the API as a whole is complex and fully featured, common tasks (such as creating and performing lifecycle operations on VMs) are very straightforward to perform, requiring only a small number of simple API calls. Keep this in mind while you study the next section which may, on first reading, appear a little daunting!

        Object Model Overview

        This section gives a high-level overview of the object model of the API. A more detailed description of the parameters and methods of each class outlined here can be found in the XenServer API Reference document.

        We start by giving a brief outline of some of the core classes that make up the API. (Don’t worry if these definitions seem somewhat abstract in their initial presentation; the textual description in subsequent sections, and the code-sample walk through in the next Chapter will help make these concepts concrete.)

        ClassDescription
        VMA VM object represents a particular virtual machine instance on a XenServer Host or Resource Pool. Example methods include startsuspendpool_migrate; example parameters include power_statememory_static_max, and name_label. (In the previous section we saw how the VM class is used to represent both templates and regular VMs)
        HostA host object represents a physical host in a XenServer pool. Example methods include reboot and shutdown. Example parameters include software_versionhostname, and [IP] address.
        VDIA VDI object represents a Virtual Disk Image. Virtual Disk Images can be attached to VMs, in which case a block device appears inside the VM through which the bits encapsulated by the Virtual Disk Image can be read and written. Example methods of the VDI class include “resize” and “clone”. Example fields include “virtual_size” and “sharable”. (When we called VM.provision on the VM template in our previous example, some VDI objects were automatically created to represent the newly created disks, and attached to the VM object.)
        SRAn SR (Storage Repository) aggregates a collection of VDIs and encapsulates the properties of physical storage on which the VDIs’ bits reside. Example parameters include type (which determines the storage-specific driver a XenServer installation uses to read/write the SR’s VDIs) and physical_utilisation; example methods include scan (which invokes the storage-specific driver to acquire a list of the VDIs contained with the SR and the properties of these VDIs) and create (which initializes a block of physical storage so it is ready to store VDIs).
        NetworkA network object represents a layer-2 network that exists in the environment in which the XenServer Host instance lives. Since XenServer does not manage networks directly this is a lightweight class that serves merely to model physical and virtual network topology. VM and Host objects that are attached to a particular Network object (by virtue of VIF and PIF instances – see below) can send network packets to each other.

        At this point, readers who are finding this enumeration of classes rather terse may wish to skip to the code walk-throughs of the next chapter: there are plenty of useful applications that can be written using only a subset of the classes already described! For those who wish to continue this description of classes in the abstract, read on.

        On top of the classes listed above, there are 4 more that act as connectors, specifying relationships between VMs and Hosts, and Storage and Networks. The first 2 of these classes that we will consider, VBD and VIF, determine how VMs are attached to virtual disks and network objects respectively:

        ClassDescription
        VBDA VBD (Virtual Block Device) object represents an attachment between a VM and a VDI. When a VM is booted its VBD objects are queried to determine which disk images (VDIs) should be attached. Example methods of the VBD class include “plug” (which hot plugs a disk device into a running VM, making the specified VDI accessible therein) and “unplug” (which hot unplugs a disk device from a running guest); example fields include “device” (which determines the device name inside the guest under which the specified VDI will be made accessible).
        VIFA VIF (Virtual network InterFace) object represents an attachment between a VM and a Network object. When a VM is booted its VIF objects are queried to determine which network devices should be created. Example methods of the VIF class include “plug” (which hot plugs a network device into a running VM) and “unplug” (which hot unplugs a network device from a running guest).

        The second set of “connector classes” that we will consider determine how Hosts are attached to Networks and Storage.

        ClassDescription
        PIFA PIF (Physical InterFace) object represents an attachment between a Host and a Network object. If a host is connected to a Network (over a PIF) then packets from the specified host can be transmitted/received by the corresponding host. Example fields of the PIF class include “device” (which specifies the device name to which the PIF corresponds – e.g. eth0) and “MAC” (which specifies the MAC address of the underlying NIC that a PIF represents). Note that PIFs abstract both physical interfaces and VLANs (the latter distinguished by the existence of a positive integer in the “VLAN” field).
        PBDA PBD (Physical Block Device) object represents an attachment between a Host and a SR (Storage Repository) object. Fields include “currently-attached” (which specifies whether the chunk of storage represented by the specified SR object) is currently available to the host; and “device_config” (which specifies storage-driver specific parameters that determines how the low-level storage devices are configured on the specified host – e.g. in the case of an SR rendered on an NFS filer, device_config may specify the host-name of the filer and the path on the filer in which the SR files live.).

        Graphical overview of API classes for managing VMs, Hosts, Storage and Networking +Graphical overview of API classes for managing VMs, Hosts, Storage and Networking

        The figure above presents a graphical overview of the API classes involved in managing VMs, Hosts, Storage and Networking. From this diagram, the symmetry between storage and network configuration, and also the symmetry between virtual machine and host configuration is plain to see.

        Working with VIFs and VBDs

        In this section we walk through a few more complex scenarios, describing informally how various tasks involving virtual storage and network devices can be accomplished using the API.

        Creating disks and attaching them to VMs

        Let’s start by considering how to make a new blank disk image and attach it to a running VM. We will assume that we already have ourselves a running VM, and we know its corresponding API object reference (e.g. we may have created this VM using the procedure described in the previous section, and had the server return its reference to us.) We will also assume that we have authenticated with the XenServer installation and have a corresponding session reference. Indeed in the rest of this chapter, for the sake of brevity, we will stop mentioning sessions altogether.

        Creating a new blank disk image

        The first step is to instantiate the disk image on physical storage. We do this by calling VDI.create(). The VDI.create call takes a number of parameters, including:

        • name_label and name_description: a human-readable name/description for the disk (e.g. for convenient display in the UI etc.). These fields can be left blank if desired.

        • SR: the object reference of the Storage Repository representing the physical storage in which the VDI’s bits will be placed.

        • read_only: setting this field to true indicates that the VDI can only be attached to VMs in a read-only fashion. (Attempting to attach a VDI with its read_only field set to true in a read/write fashion results in error.)

        Invoking the VDI.create call causes the XenServer installation to create a blank disk image on physical storage, create an associated VDI object (the datamodel instance that refers to the disk image on physical storage) and return a reference to this newly created VDI object.

        The way in which the disk image is represented on physical storage depends on the type of the SR in which the created VDI resides. For example, if the SR is of type “lvm” then the new disk image will be rendered as an LVM volume; if the SR is of type “nfs” then the new disk image will be a sparse VHD file created on an NFS filer. (You can query the SR type through the API using the SR.get_type() call.)

        Note

        Some SR types might round up the virtual-size value to make it divisible by a configured block size.

        Attaching the disk image to a VM

        So far we have a running VM (that we assumed the existence of at the start of this example) and a fresh VDI that we just created. Right now, these are both independent objects that exist on the XenServer Host, but there is nothing linking them together. So our next step is to create such a link, associating the VDI with our VM.

        The attachment is formed by creating a new “connector” object called a VBD (Virtual Block Device). To create our VBD we invoke the VBD.create() call. The VBD.create() call takes a number of parameters including:

        • VM - the object reference of the VM to which the VDI is to be attached

        • VDI - the object reference of the VDI that is to be attached

        • mode - specifies whether the VDI is to be attached in a read-only or a read-write fashion

        • userdevice - specifies the block device inside the guest through which applications running inside the VM will be able to read/write the VDI’s bits.

        • type - specifies whether the VDI should be presented inside the VM as a regular disk or as a CD. (Note that this particular field has more meaning for Windows VMs than it does for Linux VMs, but we will not explore this level of detail in this chapter.)

        Invoking VBD.create makes a VBD object on the XenServer installation and returns its object reference. However, this call in itself does not have any side-effects on the running VM (that is, if you go and look inside the running VM you will see that the block device has not been created). The fact that the VBD object exists but that the block device in the guest is not active, is reflected by the fact that the VBD object’s currently_attached field is set to false.

        A VM object with 2 associated VDIs +A VM object with 2 associated VDIs

        For expository purposes, the figure above presents a graphical example that shows the relationship between VMs, VBDs, VDIs and SRs. In this instance a VM object has 2 attached VDIs: there are 2 VBD objects that form the connections between the VM object and its VDIs; and the VDIs reside within the same SR.

        Hotplugging the VBD

        If we rebooted the VM at this stage then, after rebooting, the block device corresponding to the VBD would appear: on boot, XenServer queries all VBDs of a VM and actively attaches each of the corresponding VDIs.

        Rebooting the VM is all very well, but recall that we wanted to attach a newly created blank disk to a running VM. This can be achieved by invoking the plug method on the newly created VBD object. When the plug call returns successfully, the block device to which the VBD relates will have appeared inside the running VM – i.e. from the perspective of the running VM, the guest operating system is led to believe that a new disk device has just been hot plugged. Mirroring this fact in the managed world of the API, the currently_attached field of the VBD is set to true.

        Unsurprisingly, the VBD plug method has a dual called “unplug”. Invoking the unplug method on a VBD object causes the associated block device to be hot unplugged from a running VM, setting the currently_attached field of the VBD object to false accordingly.

        Creating and attaching Network Devices to VMs

        The API calls involved in configuring virtual network interfaces in VMs are similar in many respects to the calls involved in configuring virtual disk devices. For this reason we will not run through a full example of how one can create network interfaces using the API object-model; instead we will use this section just to outline briefly the symmetry between virtual networking device and virtual storage device configuration.

        The networking analogue of the VBD class is the VIF class. Just as a VBD is the API representation of a block device inside a VM, a VIF (Virtual network InterFace) is the API representation of a network device inside a VM. Whereas VBDs associate VM objects with VDI objects, VIFs associate VM objects with Network objects. Just like VBDs, VIFs have a currently_attached field that determines whether or not the network device (inside the guest) associated with the VIF is currently active or not. And as we saw with VBDs, at VM boot-time the VIFs of the VM are queried and a corresponding network device for each created inside the booting VM. Similarly, VIFs also have plug and unplug methods for hot plugging/unplugging network devices in/out of running VMs.

        Host configuration for networking and storage

        We have seen that the VBD and VIF classes are used to manage configuration of block devices and network devices (respectively) inside VMs. To manage host configuration of storage and networking there are two analogous classes: PBD (Physical Block Device) and PIF (Physical [network] InterFace).

        Host storage configuration: PBDs

        Let us start by considering the PBD class. A PBD_create() call takes a number of parameters including:

        ParameterDescription
        hostphysical machine on which the PBD is available
        SRthe Storage Repository that the PBD connects to
        device_configa string-to-string map that is provided to the host’s SR-backend-driver, containing the low-level parameters required to configure the physical storage device(s) on which the SR is to be realized. The specific contents of the device_config field depend on the type of the SR to which the PBD is connected. (Executing xe sm-list will show a list of possible SR types; the configuration field in this enumeration specifies the device_config parameters that each SR type expects.)

        For example, imagine we have an SR object s of type “nfs” (representing a directory on an NFS filer within which VDIs are stored as VHD files); and let’s say that we want a host, h, to be able to access s. In this case we invoke PBD.create() specifying host h, SR s, and a value for the device_config parameter that is the following map:

        ("server", "my_nfs_server.example.com"), ("serverpath", "/scratch/mysrs/sr1")

        This tells the XenServer Host that SR s is accessible on host h, and further that to access SR s, the host needs to mount the directory /scratch/mysrs/sr1 on the NFS server named my_nfs_server.example.com.

        Like VBD objects, PBD objects also have a field called currently_attached. Storage repositories can be attached and detached from a given host by invoking PBD.plug and PBD.unplug methods respectively.

        Host networking configuration: PIFs

        Host network configuration is specified by virtue of PIF objects. If a PIF object connects a network object, n, to a host object h, then the network corresponding to n is bridged onto a physical interface (or a physical interface plus a VLAN tag) specified by the fields of the PIF object.

        For example, imagine a PIF object exists connecting host h to a network n, and that device field of the PIF object is set to eth0. This means that all packets on network n are bridged to the NIC in the host corresponding to host network device eth0.

        XML-RPC notes

        Datetimes

        The API deviates from the XML-RPC specification in handling of datetimes. The API appends a “Z” to the end of datetime strings, which is meant to indicate that the time is expressed in UTC.

        API evolution

        All APIs evolve as bugs are fixed, new features added and features are removed

        • the XenAPI is no exception. This document lists policies describing how the XenAPI evolves over time.

        The goals of XenAPI evolution are:

        • to allow bugs to be fixed efficiently;
        • to allow new, innovative features to be added easily;
        • to keep old, unmodified clients working as much as possible; and
        • where backwards-incompatible changes are to be made, publish this information early to enable affected parties to give timely feedback.

        Background

        In this document, the term XenAPI refers to the XMLRPC-derived wire protocol used by xapi. The XenAPI has objects which each have fields and @@ -736,4 +736,4 @@ running-- clean shutdown\n hard shutdown -->halted running-- pause -->paused halted-- destroy -->destroyed

        The figure above shows the states that a VM can be in and the -API calls that can be used to move the VM between these states.

          XenCenter

          XenCenter uses some conventions on top of the XenAPI:

          Internationalization for SR names

          The SRs created at install time now have an other_config key indicating how their names may be internationalized.

          other_config["i18n-key"] may be one of

          • local-hotplug-cd

          • local-hotplug-disk

          • local-storage

          • xenserver-tools

          Additionally, other_config["i18n-original-value-<field name>"] gives the value of that field when the SR was created. If XenCenter sees a record where SR.name_label equals other_config["i18n-original-value-name_label"] (that is, the record has not changed since it was created during XenServer installation), then internationalization will be applied. In other words, XenCenter will disregard the current contents of that field, and instead use a value appropriate to the user’s own language.

          If you change SR.name_label for your own purpose, then it no longer is the same as other_config["i18n-original-value-name_label"]. Therefore, XenCenter does not apply internationalization, and instead preserves your given name.

          Hiding objects from XenCenter

          Networks, PIFs, and VMs can be hidden from XenCenter by adding the key HideFromXenCenter=true to the other_config parameter for the object. This capability is intended for ISVs who know what they are doing, not general use by everyday users. For example, you might want to hide certain VMs because they are cloned VMs that shouldn’t be used directly by general users in your environment.

          In XenCenter, hidden Networks, PIFs, and VMs can be made visible, using the View menu.

          \ No newline at end of file +API calls that can be used to move the VM between these states.

            XenCenter

            XenCenter uses some conventions on top of the XenAPI:

            Internationalization for SR names

            The SRs created at install time now have an other_config key indicating how their names may be internationalized.

            other_config["i18n-key"] may be one of

            • local-hotplug-cd

            • local-hotplug-disk

            • local-storage

            • xenserver-tools

            Additionally, other_config["i18n-original-value-<field name>"] gives the value of that field when the SR was created. If XenCenter sees a record where SR.name_label equals other_config["i18n-original-value-name_label"] (that is, the record has not changed since it was created during XenServer installation), then internationalization will be applied. In other words, XenCenter will disregard the current contents of that field, and instead use a value appropriate to the user’s own language.

            If you change SR.name_label for your own purpose, then it no longer is the same as other_config["i18n-original-value-name_label"]. Therefore, XenCenter does not apply internationalization, and instead preserves your given name.

            Hiding objects from XenCenter

            Networks, PIFs, and VMs can be hidden from XenCenter by adding the key HideFromXenCenter=true to the other_config parameter for the object. This capability is intended for ISVs who know what they are doing, not general use by everyday users. For example, you might want to hide certain VMs because they are cloned VMs that shouldn’t be used directly by general users in your environment.

            In XenCenter, hidden Networks, PIFs, and VMs can be made visible, using the View menu.

            \ No newline at end of file diff --git a/new-docs/xen-api/overview/index.html b/new-docs/xen-api/overview/index.html index 9dfb88ea8..cee8908ef 100644 --- a/new-docs/xen-api/overview/index.html +++ b/new-docs/xen-api/overview/index.html @@ -1,5 +1,5 @@ Overview of the XenAPI :: XAPI Toolstack Developer Documentation -
            \ No newline at end of file diff --git a/new-docs/xen-api/releases/1.250.0/index.html b/new-docs/xen-api/releases/1.250.0/index.html index d7b273922..b0ca3566b 100644 --- a/new-docs/xen-api/releases/1.250.0/index.html +++ b/new-docs/xen-api/releases/1.250.0/index.html @@ -1,10 +1,10 @@ XAPI 1.250.0 :: XAPI Toolstack Developer Documentation -

            XAPI 1.250.0

            Code name: "1.250.0".

            Changes

            ChangeElementDescription
            Published fieldtunnel.protocolAdd protocol field to tunnel
            \ No newline at end of file diff --git a/new-docs/xen-api/releases/1.257.0/index.html b/new-docs/xen-api/releases/1.257.0/index.html index 2b3b6454e..5e406665d 100644 --- a/new-docs/xen-api/releases/1.257.0/index.html +++ b/new-docs/xen-api/releases/1.257.0/index.html @@ -1,10 +1,10 @@ XAPI 1.257.0 :: XAPI Toolstack Developer Documentation -

            XAPI 1.257.0

            Code name: "1.257.0".

            Changes

            ChangeElementDescription
            Changed classVMpossibility to create a VM in suspended mode with a suspend_VDI set
            Changed fieldVBD.currently_attachedMade StaticRO to allow plugged VIF and VBD creation for Suspended VM
            Changed fieldVBD.deviceBecome static to allow plugged VBD creation for Suspended VM
            Changed fieldVIF.currently_attachedMade StaticRO to allow plugged VIF and VBD creation for Suspended VM
            Changed fieldVM.last_booted_recordBecome static to allow Suspended VM creation
            Changed fieldVM.power_stateMade StaticRO to allow Suspended VM creation
            Changed fieldVM.suspend_VDIBecome static to allow Suspended VM creation
            \ No newline at end of file diff --git a/new-docs/xen-api/releases/1.271.0/index.html b/new-docs/xen-api/releases/1.271.0/index.html index c80fd86ad..d6eb9740b 100644 --- a/new-docs/xen-api/releases/1.271.0/index.html +++ b/new-docs/xen-api/releases/1.271.0/index.html @@ -1,10 +1,10 @@ XAPI 1.271.0 :: XAPI Toolstack Developer Documentation -

            XAPI 1.271.0

            Code name: "1.271.0".

            Changes

            ChangeElementDescription
            Published messagehost.get_sched_granGets xen's sched-gran on a host
            Published messagehost.set_sched_granSets xen's sched-gran on a host. See: https://xenbits.xen.org/docs/unstable/misc/xen-command-line.html#sched-gran-x86
            \ No newline at end of file diff --git a/new-docs/xen-api/releases/1.290.0/index.html b/new-docs/xen-api/releases/1.290.0/index.html index 2afda2729..39a86eabb 100644 --- a/new-docs/xen-api/releases/1.290.0/index.html +++ b/new-docs/xen-api/releases/1.290.0/index.html @@ -1,10 +1,10 @@ XAPI 1.290.0 :: XAPI Toolstack Developer Documentation -

            XAPI 1.290.0

            Code name: "1.290.0".

            Changes

            ChangeElementDescription
            Published fieldpool.tls_verification_enabledTrue iff TLS certificate verification is enabled
            Published messagehost.emergency_disable_tls_verificationDisable TLS verification for this host only
            Published messagehost.reset_server_certificateDelete the current TLS server certificate and replace by a new, self-signed one. This should only be used with extreme care.
            Published messagepool.enable_tls_verificationEnable TLS server certificate verification
            Published messagepool.install_ca_certificateInstall TLS CA certificate
            Published messagepool.uninstall_ca_certificateUninstall TLS CA certificate
            Deprecated fieldpool.wlb_verify_certDeprecated: to enable TLS verification use Pool.enable_tls_verification instead
            Deprecated messagepool.certificate_installUse Pool.install_ca_certificate instead
            Deprecated messagepool.certificate_listUse openssl to inspect certificate
            Deprecated messagepool.certificate_uninstallUse Pool.uninstall_ca_certificate instead
            \ No newline at end of file diff --git a/new-docs/xen-api/releases/1.294.0/index.html b/new-docs/xen-api/releases/1.294.0/index.html index 9ea4f55f8..32878ad73 100644 --- a/new-docs/xen-api/releases/1.294.0/index.html +++ b/new-docs/xen-api/releases/1.294.0/index.html @@ -1,10 +1,10 @@ XAPI 1.294.0 :: XAPI Toolstack Developer Documentation -

            XAPI 1.294.0

            Code name: "1.294.0".

            Changes

            ChangeElementDescription
            Published fieldCertificate.nameThe name of the certificate, only present on certificates of type 'ca'
            Published fieldCertificate.typeThe type of the certificate, either 'ca', 'host' or 'host_internal'
            \ No newline at end of file diff --git a/new-docs/xen-api/releases/1.297.0/index.html b/new-docs/xen-api/releases/1.297.0/index.html index 4d91469e4..a71b3e2e0 100644 --- a/new-docs/xen-api/releases/1.297.0/index.html +++ b/new-docs/xen-api/releases/1.297.0/index.html @@ -1,10 +1,10 @@ XAPI 1.297.0 :: XAPI Toolstack Developer Documentation -

            XAPI 1.297.0

            Code name: "1.297.0".

            Changes

            ChangeElementDescription
            Extended messagehost.evacuateEnable migration network selection.
            \ No newline at end of file diff --git a/new-docs/xen-api/releases/1.298.0/index.html b/new-docs/xen-api/releases/1.298.0/index.html index 817521b1e..28a4fe4b1 100644 --- a/new-docs/xen-api/releases/1.298.0/index.html +++ b/new-docs/xen-api/releases/1.298.0/index.html @@ -1,10 +1,10 @@ XAPI 1.298.0 :: XAPI Toolstack Developer Documentation -

            XAPI 1.298.0

            Code name: "1.298.0".

            Changes

            ChangeElementDescription
            Published messagehost.emergency_reenable_tls_verificationReenable TLS verification for this host only
            \ No newline at end of file diff --git a/new-docs/xen-api/releases/1.301.0/index.html b/new-docs/xen-api/releases/1.301.0/index.html index 6f02530d9..ba6f6bd12 100644 --- a/new-docs/xen-api/releases/1.301.0/index.html +++ b/new-docs/xen-api/releases/1.301.0/index.html @@ -1,10 +1,10 @@ XAPI 1.301.0 :: XAPI Toolstack Developer Documentation -

            XAPI 1.301.0

            Code name: "1.301.0".

            Changes

            ChangeElementDescription
            Published classRepositoryRepository for updates
            Published fieldRepository.binary_urlBase URL of binary packages in this repository
            Published fieldRepository.hashSHA256 checksum of latest updateinfo.xml.gz in this repository if its 'update' is true
            Published fieldRepository.source_urlBase URL of source packages in this repository
            Published fieldRepository.up_to_dateTrue if all hosts in pool is up to date with this repository
            Published fieldRepository.updateTrue if updateinfo.xml in this repository needs to be parsed
            Published fieldRepository.uuidUnique identifier/object reference
            Published fieldpool.repositoriesThe set of currently enabled repositories
            Published messageRepository.forgetRemove the repository record from the database
            Published messageRepository.introduceAdd the configuration for a new repository
            Published messagehost.apply_updatesapply updates from current enabled repository on a host
            Published messagepool.add_repositoryAdd a repository to the enabled set
            Published messagepool.remove_repositoryRemove a repository from the enabled set
            Published messagepool.set_repositoriesSet enabled set of repositories
            \ No newline at end of file diff --git a/new-docs/xen-api/releases/1.303.0/index.html b/new-docs/xen-api/releases/1.303.0/index.html index bae72fad1..7f3a0f572 100644 --- a/new-docs/xen-api/releases/1.303.0/index.html +++ b/new-docs/xen-api/releases/1.303.0/index.html @@ -1,10 +1,10 @@ XAPI 1.303.0 :: XAPI Toolstack Developer Documentation -

            XAPI 1.303.0

            Code name: "1.303.0".

            Changes

            ChangeElementDescription
            Published fieldVM.pending_guidancesThe set of pending mandatory guidances after applying updates, which must be applied, as otherwise there may be e.g. VM failures
            Published fieldhost.pending_guidancesThe set of pending mandatory guidances after applying updates, which must be applied, as otherwise there may be e.g. VM failures
            \ No newline at end of file diff --git a/new-docs/xen-api/releases/1.304.0/index.html b/new-docs/xen-api/releases/1.304.0/index.html index eefd53476..04a7350e8 100644 --- a/new-docs/xen-api/releases/1.304.0/index.html +++ b/new-docs/xen-api/releases/1.304.0/index.html @@ -1,10 +1,10 @@ XAPI 1.304.0 :: XAPI Toolstack Developer Documentation -

            XAPI 1.304.0

            Code name: "1.304.0".

            Changes

            ChangeElementDescription
            Published messagepool.check_update_readinessCheck if the pool is ready to be updated. If not, report the reasons.
            \ No newline at end of file diff --git a/new-docs/xen-api/releases/1.307.0/index.html b/new-docs/xen-api/releases/1.307.0/index.html index 672b27eca..2b27861ef 100644 --- a/new-docs/xen-api/releases/1.307.0/index.html +++ b/new-docs/xen-api/releases/1.307.0/index.html @@ -1,10 +1,10 @@ XAPI 1.307.0 :: XAPI Toolstack Developer Documentation -

            XAPI 1.307.0

            Code name: "1.307.0".

            Changes

            ChangeElementDescription
            Published messagehost.refresh_server_certificateReplace the internal self-signed host certficate with a new one.
            \ No newline at end of file diff --git a/new-docs/xen-api/releases/1.313.0/index.html b/new-docs/xen-api/releases/1.313.0/index.html index da1b04fa3..1c5c3d7a0 100644 --- a/new-docs/xen-api/releases/1.313.0/index.html +++ b/new-docs/xen-api/releases/1.313.0/index.html @@ -1,10 +1,10 @@ XAPI 1.313.0 :: XAPI Toolstack Developer Documentation -

            XAPI 1.313.0

            Code name: "1.313.0".

            Changes

            ChangeElementDescription
            Published fieldhost.tls_verification_enabledTrue if this host has TLS verifcation enabled
            Extended fieldmessage.clsAdded Certificate class
            \ No newline at end of file diff --git a/new-docs/xen-api/releases/1.318.0/index.html b/new-docs/xen-api/releases/1.318.0/index.html index 5849ed4d6..13ef5dd74 100644 --- a/new-docs/xen-api/releases/1.318.0/index.html +++ b/new-docs/xen-api/releases/1.318.0/index.html @@ -1,10 +1,10 @@ XAPI 1.318.0 :: XAPI Toolstack Developer Documentation -

            XAPI 1.318.0

            Code name: "1.318.0".

            Changes

            ChangeElementDescription
            Published fieldpool.client_certificate_auth_enabledTrue if authentication by TLS client certificates is enabled
            Published fieldpool.client_certificate_auth_nameThe name (CN/SAN) that an incoming client certificate must have to allow authentication
            Published messagepool.disable_client_certificate_authDisable client certificate authentication on the pool
            Published messagepool.enable_client_certificate_authEnable client certificate authentication on the pool
            \ No newline at end of file diff --git a/new-docs/xen-api/releases/1.329.0/index.html b/new-docs/xen-api/releases/1.329.0/index.html index f64ce0b6a..02dec6e00 100644 --- a/new-docs/xen-api/releases/1.329.0/index.html +++ b/new-docs/xen-api/releases/1.329.0/index.html @@ -1,10 +1,10 @@ XAPI 1.329.0 :: XAPI Toolstack Developer Documentation -

            XAPI 1.329.0

            Code name: "1.329.0".

            Changes

            ChangeElementDescription
            Published messagepool.sync_updatesSync with the enabled repository
            \ No newline at end of file diff --git a/new-docs/xen-api/releases/21.2.0/index.html b/new-docs/xen-api/releases/21.2.0/index.html index b7b42fe3b..3beefede6 100644 --- a/new-docs/xen-api/releases/21.2.0/index.html +++ b/new-docs/xen-api/releases/21.2.0/index.html @@ -1,10 +1,10 @@ XAPI 21.2.0 :: XAPI Toolstack Developer Documentation -

            XAPI 21.2.0

            Code name: "21.2.0".

            Changes

            ChangeElementDescription
            Published fieldsession.client_certificateindicates whether this session was authenticated using a client certificate
            \ No newline at end of file diff --git a/new-docs/xen-api/releases/21.3.0/index.html b/new-docs/xen-api/releases/21.3.0/index.html index df8489809..1d6ee771a 100644 --- a/new-docs/xen-api/releases/21.3.0/index.html +++ b/new-docs/xen-api/releases/21.3.0/index.html @@ -1,10 +1,10 @@ XAPI 21.3.0 :: XAPI Toolstack Developer Documentation -

            XAPI 21.3.0

            Code name: "21.3.0".

            Changes

            ChangeElementDescription
            Published fieldpool.repository_proxy_passwordPassword for the authentication of the proxy used in syncing with the enabled repositories
            Published fieldpool.repository_proxy_urlUrl of the proxy used in syncing with the enabled repositories
            Published fieldpool.repository_proxy_usernameUsername for the authentication of the proxy used in syncing with the enabled repositories
            Published messagepool.configure_repository_proxyConfigure proxy for RPM package repositories.
            Published messagetask.set_error_infoSet the task error info
            Published messagetask.set_resultSet the task result
            \ No newline at end of file diff --git a/new-docs/xen-api/releases/21.4.0/index.html b/new-docs/xen-api/releases/21.4.0/index.html index 9c852162c..bb223e28d 100644 --- a/new-docs/xen-api/releases/21.4.0/index.html +++ b/new-docs/xen-api/releases/21.4.0/index.html @@ -1,10 +1,10 @@ XAPI 21.4.0 :: XAPI Toolstack Developer Documentation -

            XAPI 21.4.0

            Code name: "21.4.0".

            Changes

            ChangeElementDescription
            Published messagepool.disable_repository_proxyDisable the proxy for RPM package repositories.
            \ No newline at end of file diff --git a/new-docs/xen-api/releases/22.12.0/index.html b/new-docs/xen-api/releases/22.12.0/index.html index ae5c5fe91..5fe46beb3 100644 --- a/new-docs/xen-api/releases/22.12.0/index.html +++ b/new-docs/xen-api/releases/22.12.0/index.html @@ -1,10 +1,10 @@ XAPI 22.12.0 :: XAPI Toolstack Developer Documentation -

            XAPI 22.12.0

            Code name: "22.12.0".

            Changes

            ChangeElementDescription
            Prototyped fieldRepository.gpgkey_path
            Prototyped messageRepository.set_gpgkey_path
            \ No newline at end of file diff --git a/new-docs/xen-api/releases/22.16.0/index.html b/new-docs/xen-api/releases/22.16.0/index.html index bab706fb6..36c3067fd 100644 --- a/new-docs/xen-api/releases/22.16.0/index.html +++ b/new-docs/xen-api/releases/22.16.0/index.html @@ -1,10 +1,10 @@ XAPI 22.16.0 :: XAPI Toolstack Developer Documentation -

            XAPI 22.16.0

            Code name: "22.16.0".

            Changes

            ChangeElementDescription
            Published messagepool.set_uefi_certificatesSet the UEFI certificates for a pool and all its hosts. Deprecated: use set_custom_uefi_certificates instead
            Changed fieldpool.uefi_certificatesBecame StaticRO to be editable through new method
            Deprecated fieldhost.uefi_certificatesUse Pool.uefi_certificates instead
            Deprecated messagehost.set_uefi_certificatesUse Pool.set_uefi_certificates instead
            \ No newline at end of file diff --git a/new-docs/xen-api/releases/22.19.0/index.html b/new-docs/xen-api/releases/22.19.0/index.html index 71142891b..86a347728 100644 --- a/new-docs/xen-api/releases/22.19.0/index.html +++ b/new-docs/xen-api/releases/22.19.0/index.html @@ -1,10 +1,10 @@ XAPI 22.19.0 :: XAPI Toolstack Developer Documentation -

            XAPI 22.19.0

            Code name: "22.19.0".

            Changes

            ChangeElementDescription
            Prototyped messagemessage.destroy_many
            \ No newline at end of file diff --git a/new-docs/xen-api/releases/22.20.0/index.html b/new-docs/xen-api/releases/22.20.0/index.html index f955f551d..664fba806 100644 --- a/new-docs/xen-api/releases/22.20.0/index.html +++ b/new-docs/xen-api/releases/22.20.0/index.html @@ -1,10 +1,10 @@ XAPI 22.20.0 :: XAPI Toolstack Developer Documentation -

            XAPI 22.20.0

            Code name: "22.20.0".

            Changes

            ChangeElementDescription
            Prototyped fieldhost.last_software_update
            \ No newline at end of file diff --git a/new-docs/xen-api/releases/22.26.0/index.html b/new-docs/xen-api/releases/22.26.0/index.html index 712d213f8..1adf76000 100644 --- a/new-docs/xen-api/releases/22.26.0/index.html +++ b/new-docs/xen-api/releases/22.26.0/index.html @@ -1,10 +1,10 @@ XAPI 22.26.0 :: XAPI Toolstack Developer Documentation -

            XAPI 22.26.0

            Code name: "22.26.0".

            Changes

            ChangeElementDescription
            Prototyped classVTPM
            Prototyped fieldVTPM.is_protected
            Prototyped fieldVTPM.is_unique
            Prototyped fieldVTPM.persistence_backend
            Prototyped messageVTPM.create
            Prototyped messageVTPM.destroy
            \ No newline at end of file diff --git a/new-docs/xen-api/releases/22.27.0/index.html b/new-docs/xen-api/releases/22.27.0/index.html index 28deec748..09fb042ef 100644 --- a/new-docs/xen-api/releases/22.27.0/index.html +++ b/new-docs/xen-api/releases/22.27.0/index.html @@ -1,10 +1,10 @@ XAPI 22.27.0 :: XAPI Toolstack Developer Documentation -

            XAPI 22.27.0

            Code name: "22.27.0".

            Changes

            ChangeElementDescription
            Prototyped fieldhost.https_only
            Prototyped messagehost.set_https_only
            Prototyped messagepool.set_https_only
            \ No newline at end of file diff --git a/new-docs/xen-api/releases/22.33.0/index.html b/new-docs/xen-api/releases/22.33.0/index.html index 288a084db..12fac549e 100644 --- a/new-docs/xen-api/releases/22.33.0/index.html +++ b/new-docs/xen-api/releases/22.33.0/index.html @@ -1,10 +1,10 @@ XAPI 22.33.0 :: XAPI Toolstack Developer Documentation -

            XAPI 22.33.0

            Code name: "22.33.0".

            Changes

            ChangeElementDescription
            Prototyped fieldpool.migration_compression
            \ No newline at end of file diff --git a/new-docs/xen-api/releases/22.37.0/index.html b/new-docs/xen-api/releases/22.37.0/index.html index 343c81bea..b459a49af 100644 --- a/new-docs/xen-api/releases/22.37.0/index.html +++ b/new-docs/xen-api/releases/22.37.0/index.html @@ -1,10 +1,10 @@ XAPI 22.37.0 :: XAPI Toolstack Developer Documentation -

            XAPI 22.37.0

            Code name: "22.37.0".

            Changes

            ChangeElementDescription
            Prototyped fieldpool.coordinator_bias
            \ No newline at end of file diff --git a/new-docs/xen-api/releases/22.5.0/index.html b/new-docs/xen-api/releases/22.5.0/index.html index 83fe2af69..e5e552019 100644 --- a/new-docs/xen-api/releases/22.5.0/index.html +++ b/new-docs/xen-api/releases/22.5.0/index.html @@ -1,10 +1,10 @@ XAPI 22.5.0 :: XAPI Toolstack Developer Documentation -

            XAPI 22.5.0

            Code name: "22.5.0".

            Changes

            ChangeElementDescription
            Published fieldrole.is_internalIndicates whether the role is only to be assigned internally by xapi, or can be used by clients
            \ No newline at end of file diff --git a/new-docs/xen-api/releases/23.1.0/index.html b/new-docs/xen-api/releases/23.1.0/index.html index 84ef00add..8d04f6b39 100644 --- a/new-docs/xen-api/releases/23.1.0/index.html +++ b/new-docs/xen-api/releases/23.1.0/index.html @@ -1,10 +1,10 @@ XAPI 23.1.0 :: XAPI Toolstack Developer Documentation -

            XAPI 23.1.0

            Code name: "23.1.0".

            Changes

            ChangeElementDescription
            Prototyped fieldVM.actions_after_softreboot
            \ No newline at end of file diff --git a/new-docs/xen-api/releases/23.14.0/index.html b/new-docs/xen-api/releases/23.14.0/index.html index 4c4759f03..862c4fa36 100644 --- a/new-docs/xen-api/releases/23.14.0/index.html +++ b/new-docs/xen-api/releases/23.14.0/index.html @@ -1,10 +1,10 @@ XAPI 23.14.0 :: XAPI Toolstack Developer Documentation -

            XAPI 23.14.0

            Code name: "23.14.0".

            Changes

            ChangeElementDescription
            Prototyped classObserver
            Prototyped fieldObserver.attributes
            Prototyped fieldObserver.components
            Prototyped fieldObserver.enabled
            Prototyped fieldObserver.endpoints
            Prototyped fieldObserver.hosts
            Prototyped fieldObserver.uuid
            Prototyped messageObserver.set_attributes
            Prototyped messageObserver.set_components
            Prototyped messageObserver.set_enabled
            Prototyped messageObserver.set_endpoints
            Prototyped messageObserver.set_hosts
            \ No newline at end of file diff --git a/new-docs/xen-api/releases/23.18.0/index.html b/new-docs/xen-api/releases/23.18.0/index.html index 2d71583c2..159097c82 100644 --- a/new-docs/xen-api/releases/23.18.0/index.html +++ b/new-docs/xen-api/releases/23.18.0/index.html @@ -1,10 +1,10 @@ XAPI 23.18.0 :: XAPI Toolstack Developer Documentation -

            XAPI 23.18.0

            Code name: "23.18.0".

            Changes

            ChangeElementDescription
            Prototyped fieldhost.latest_synced_updates_applied
            Prototyped fieldpool.last_update_sync
            Prototyped fieldpool.update_sync_day
            Prototyped fieldpool.update_sync_enabled
            Prototyped fieldpool.update_sync_frequency
            Prototyped messagehost.apply_recommended_guidances
            Prototyped messagepool.configure_update_sync
            Prototyped messagepool.set_update_sync_enabled
            Removed fieldRepository.up_to_dateThe up_to_date field of repository was removed
            \ No newline at end of file diff --git a/new-docs/xen-api/releases/23.25.0/index.html b/new-docs/xen-api/releases/23.25.0/index.html index bc7ce22e7..26ccd8e83 100644 --- a/new-docs/xen-api/releases/23.25.0/index.html +++ b/new-docs/xen-api/releases/23.25.0/index.html @@ -1,10 +1,10 @@ XAPI 23.25.0 :: XAPI Toolstack Developer Documentation -

            XAPI 23.25.0

            Code name: "23.25.0".

            Changes

            ChangeElementDescription
            Removed messagehost.apply_recommended_guidances
            \ No newline at end of file diff --git a/new-docs/xen-api/releases/23.27.0/index.html b/new-docs/xen-api/releases/23.27.0/index.html index b213d463e..d7619b52f 100644 --- a/new-docs/xen-api/releases/23.27.0/index.html +++ b/new-docs/xen-api/releases/23.27.0/index.html @@ -1,10 +1,10 @@ XAPI 23.27.0 :: XAPI Toolstack Developer Documentation -

            XAPI 23.27.0

            Code name: "23.27.0".

            Changes

            ChangeElementDescription
            Prototyped fieldpool.ext_auth_max_threads
            Prototyped fieldpool.local_auth_max_threads
            Prototyped messagepool.set_ext_auth_max_threads
            Prototyped messagepool.set_local_auth_max_threads
            Extended messagehost.evacuateChoose batch size of VM evacuation.
            \ No newline at end of file diff --git a/new-docs/xen-api/releases/23.30.0/index.html b/new-docs/xen-api/releases/23.30.0/index.html index 8abc3df73..f48111490 100644 --- a/new-docs/xen-api/releases/23.30.0/index.html +++ b/new-docs/xen-api/releases/23.30.0/index.html @@ -1,10 +1,10 @@ XAPI 23.30.0 :: XAPI Toolstack Developer Documentation -

            XAPI 23.30.0

            Code name: "23.30.0".

            Changes

            ChangeElementDescription
            Prototyped messageVM.restart_device_models
            \ No newline at end of file diff --git a/new-docs/xen-api/releases/23.9.0/index.html b/new-docs/xen-api/releases/23.9.0/index.html index dcb0ab080..79ad1b832 100644 --- a/new-docs/xen-api/releases/23.9.0/index.html +++ b/new-docs/xen-api/releases/23.9.0/index.html @@ -1,10 +1,10 @@ XAPI 23.9.0 :: XAPI Toolstack Developer Documentation -

            XAPI 23.9.0

            Code name: "23.9.0".

            Changes

            ChangeElementDescription
            Prototyped fieldpool.telemetry_frequency
            Prototyped fieldpool.telemetry_next_collection
            Prototyped fieldpool.telemetry_uuid
            Prototyped messagepool.reset_telemetry_uuid
            Prototyped messagepool.set_telemetry_next_collection
            Changed fieldpool.repository_proxy_passwordChanged internal_only to false
            \ No newline at end of file diff --git a/new-docs/xen-api/releases/24.0.0/index.html b/new-docs/xen-api/releases/24.0.0/index.html index d692c262c..9352265a1 100644 --- a/new-docs/xen-api/releases/24.0.0/index.html +++ b/new-docs/xen-api/releases/24.0.0/index.html @@ -1,10 +1,10 @@ XAPI 24.0.0 :: XAPI Toolstack Developer Documentation -

            XAPI 24.0.0

            Code name: "24.0.0".

            Changes

            ChangeElementDescription
            Prototyped fieldhost.numa_affinity_policy
            Prototyped fieldpool.custom_uefi_certificates
            Prototyped messagehost.set_numa_affinity_policy
            Prototyped messagepool.set_custom_uefi_certificates
            Deprecated messagepool.set_uefi_certificatesuse set_custom_uefi_certificates instead
            \ No newline at end of file diff --git a/new-docs/xen-api/releases/24.10.0/index.html b/new-docs/xen-api/releases/24.10.0/index.html index c44e9d402..58843bdbc 100644 --- a/new-docs/xen-api/releases/24.10.0/index.html +++ b/new-docs/xen-api/releases/24.10.0/index.html @@ -1,10 +1,10 @@ XAPI 24.10.0 :: XAPI Toolstack Developer Documentation -

            XAPI 24.10.0

            Code name: "24.10.0".

            Changes

            ChangeElementDescription
            Prototyped fieldVM.pending_guidances_full
            Prototyped fieldVM.pending_guidances_recommended
            Prototyped fieldhost.last_update_hash
            Prototyped fieldhost.pending_guidances_full
            Prototyped fieldhost.pending_guidances_recommended
            Prototyped messagehost.emergency_clear_mandatory_guidance
            \ No newline at end of file diff --git a/new-docs/xen-api/releases/24.14.0/index.html b/new-docs/xen-api/releases/24.14.0/index.html index cd977f134..5077fea3c 100644 --- a/new-docs/xen-api/releases/24.14.0/index.html +++ b/new-docs/xen-api/releases/24.14.0/index.html @@ -1,10 +1,10 @@ XAPI 24.14.0 :: XAPI Toolstack Developer Documentation -

            XAPI 24.14.0

            Code name: "24.14.0".

            Changes

            ChangeElementDescription
            Prototyped messagePCI.disable_dom0_access
            Prototyped messagePCI.enable_dom0_access
            Prototyped messagePCI.get_dom0_access_status
            Changed fieldVM.has_vendor_deviceNew default and not consulting Pool.policy_no_vendor_device
            Deprecated fieldPGPU.dom0_accessUse PCI.get_dom0_access_status instead.
            Deprecated fieldpool.policy_no_vendor_deviceNo longer considered by VM.create
            Deprecated messagePGPU.disable_dom0_accessUse PCI.disable_dom0_access instead.
            Deprecated messagePGPU.enable_dom0_accessUse PCI.enable_dom0_access instead.
            \ No newline at end of file diff --git a/new-docs/xen-api/releases/24.16.0/index.html b/new-docs/xen-api/releases/24.16.0/index.html index 95d5f1a8c..788633df1 100644 --- a/new-docs/xen-api/releases/24.16.0/index.html +++ b/new-docs/xen-api/releases/24.16.0/index.html @@ -1,10 +1,10 @@ XAPI 24.16.0 :: XAPI Toolstack Developer Documentation -

            XAPI 24.16.0

            Code name: "24.16.0".

            Changes

            ChangeElementDescription
            Extended classsr_statEnum extended with 'unreachable' and 'unavailable' values
            Extended fieldsr_stat.clusteredEnum extended with 'unreachable' and 'unavailable' values
            Extended fieldsr_stat.free_spaceEnum extended with 'unreachable' and 'unavailable' values
            Extended fieldsr_stat.healthEnum extended with 'unreachable' and 'unavailable' values
            Extended fieldsr_stat.name_descriptionEnum extended with 'unreachable' and 'unavailable' values
            Extended fieldsr_stat.name_labelEnum extended with 'unreachable' and 'unavailable' values
            Extended fieldsr_stat.total_spaceEnum extended with 'unreachable' and 'unavailable' values
            Extended fieldsr_stat.uuidEnum extended with 'unreachable' and 'unavailable' values
            \ No newline at end of file diff --git a/new-docs/xen-api/releases/24.3.0/index.html b/new-docs/xen-api/releases/24.3.0/index.html index a06ab000c..baa75a9f9 100644 --- a/new-docs/xen-api/releases/24.3.0/index.html +++ b/new-docs/xen-api/releases/24.3.0/index.html @@ -1,10 +1,10 @@ XAPI 24.3.0 :: XAPI Toolstack Developer Documentation -

            XAPI 24.3.0

            Code name: "24.3.0".

            Changes

            ChangeElementDescription
            Prototyped fieldCluster.is_quorate
            Prototyped fieldCluster.live_hosts
            Prototyped fieldCluster.quorum
            Prototyped fieldCluster_host.last_update_live
            Prototyped fieldCluster_host.live
            \ No newline at end of file diff --git a/new-docs/xen-api/releases/boston/index.html b/new-docs/xen-api/releases/boston/index.html index 25ab78eb5..ddb64678a 100644 --- a/new-docs/xen-api/releases/boston/index.html +++ b/new-docs/xen-api/releases/boston/index.html @@ -1,10 +1,10 @@ XenServer 6.0 :: XAPI Toolstack Developer Documentation -

            XenServer 6.0

            Code name: "boston".

            Changes

            ChangeElementDescription
            Published classDR_taskDR task
            Published classGPU_groupA group of compatible GPUs across the resource pool
            Published classPCIA PCI device
            Published classPGPUA physical GPU (pGPU)
            Published classVGPUA virtual GPU (vGPU)
            Published classVM_applianceVM appliance
            Published fieldBond.modeThe algorithm used to distribute traffic among the bonded NICs
            Published fieldBond.primary_slaveThe PIF of which the IP configuration and MAC were copied to the bond, and which will receive all configuration/VLANs/VIFs on the bond if the bond is destroyed
            Published fieldGPU_group.GPU_typesList of GPU types (vendor+device ID) that can be in this group
            Published fieldGPU_group.PGPUsList of pGPUs in the group
            Published fieldGPU_group.VGPUsList of vGPUs using the group
            Published fieldGPU_group.name_descriptiona notes field containing human-readable description
            Published fieldGPU_group.name_labela human-readable name
            Published fieldGPU_group.other_configAdditional configuration
            Published fieldGPU_group.uuidUnique identifier/object reference
            Published fieldPCI.class_namePCI class name
            Published fieldPCI.dependenciesList of dependent PCI devices
            Published fieldPCI.device_nameDevice name
            Published fieldPCI.hostPhysical machine that owns the PCI device
            Published fieldPCI.other_configAdditional configuration
            Published fieldPCI.pci_idPCI ID of the physical device
            Published fieldPCI.uuidUnique identifier/object reference
            Published fieldPCI.vendor_nameVendor name
            Published fieldPGPU.GPU_groupGPU group the pGPU is contained in
            Published fieldPGPU.PCILink to underlying PCI device
            Published fieldPGPU.hostHost that owns the GPU
            Published fieldPGPU.other_configAdditional configuration
            Published fieldPGPU.uuidUnique identifier/object reference
            Published fieldSR.introduced_byThe disaster recovery task which introduced this SR
            Published fieldVDI.metadata_latestWhether this VDI contains the latest known accessible metadata for the pool
            Published fieldVDI.metadata_of_poolThe pool whose metadata is contained in this VDI
            Published fieldVGPU.GPU_groupGPU group used by the vGPU
            Published fieldVGPU.VMVM that owns the vGPU
            Published fieldVGPU.currently_attachedReflects whether the virtual device is currently connected to a physical device
            Published fieldVGPU.deviceOrder in which the devices are plugged into the VM
            Published fieldVGPU.other_configAdditional configuration
            Published fieldVGPU.uuidUnique identifier/object reference
            Published fieldVM.VGPUsVirtual GPUs
            Published fieldVM.attached_PCIsCurrently passed-through PCI devices
            Published fieldVM.orderThe point in the startup or shutdown sequence at which this VM will be started
            Published fieldVM.shutdown_delayThe delay to wait before proceeding to the next order in the shutdown sequence (seconds)
            Published fieldVM.start_delayThe delay to wait before proceeding to the next order in the startup sequence (seconds)
            Published fieldVM.suspend_SRThe SR on which a suspend image is stored
            Published fieldVM.versionThe number of times this VM has been recovered
            Published fieldevent.snapshotThe record of the database object that was added, changed or deleted
            Published fieldhost.PCIsList of PCI devices in the host
            Published fieldhost.PGPUsList of physical GPUs in the host
            Published fieldhost.chipset_infoInformation about chipset features
            Published fieldpool.metadata_VDIsThe set of currently known metadata VDIs for this pool
            Published messageBond.set_modeChange the bond mode
            Published messageDR_task.createCreate a disaster recovery task which will query the supplied list of devices
            Published messageDR_task.destroyDestroy the disaster recovery task, detaching and forgetting any SRs introduced which are no longer required
            Published messageGPU_group.create
            Published messageGPU_group.destroy
            Published messageSR.assert_supports_database_replicationReturns successfully if the given SR supports database replication. Otherwise returns an error to explain why not.
            Published messageSR.disable_database_replication
            Published messageSR.enable_database_replication
            Published messageVDI.open_databaseLoad the metadata found on the supplied VDI and return a session reference which can be used in API calls to query its contents.
            Published messageVDI.read_database_pool_uuidCheck the VDI cache for the pool UUID of the database on this VDI.
            Published messageVGPU.create
            Published messageVGPU.destroy
            Published messageVIF.unplug_forceForcibly unplug the specified VIF
            Published messageVM.assert_can_be_recoveredAssert whether all SRs required to recover this VM are available.
            Published messageVM.recoverRecover the VM
            Published messageVM.set_applianceAssign this VM to an appliance.
            Published messageVM.set_orderSet this VM's boot order
            Published messageVM.set_shutdown_delaySet this VM's shutdown delay in seconds
            Published messageVM.set_start_delaySet this VM's start delay in seconds
            Published messageVM.set_suspend_VDISet this VM's suspend VDI, which must be indentical to its current one
            Published messageVM_appliance.assert_can_be_recoveredAssert whether all SRs required to recover this VM appliance are available.
            Published messageVM_appliance.clean_shutdownPerform a clean shutdown of all the VMs in the appliance
            Published messageVM_appliance.hard_shutdownPerform a hard shutdown of all the VMs in the appliance
            Published messageVM_appliance.recoverRecover the VM appliance
            Published messageVM_appliance.shutdownFor each VM in the appliance, try to shut it down cleanly. If this fails, perform a hard shutdown of the VM.
            Published messageVM_appliance.startStart all VMs in the appliance
            Published messageevent.fromBlocking call which returns a new token and a (possibly empty) batch of events. The returned token can be used in subsequent calls to this function.
            Deprecated fieldVM.PCI_busField was never used
            Deprecated fieldVM.ha_always_run
            Deprecated fieldevent.obj_uuid
            Deprecated fieldevent.timestamp
            Deprecated messageVM.set_ha_always_run
            Deprecated messageevent.next
            Deprecated messageevent.register
            Deprecated messageevent.unregister
            \ No newline at end of file diff --git a/new-docs/xen-api/releases/clearwater-felton/index.html b/new-docs/xen-api/releases/clearwater-felton/index.html index 86cf25389..b4db16e37 100644 --- a/new-docs/xen-api/releases/clearwater-felton/index.html +++ b/new-docs/xen-api/releases/clearwater-felton/index.html @@ -1,10 +1,10 @@ XenServer 6.2 SP1 Hotfix 4 :: XAPI Toolstack Developer Documentation -

            XenServer 6.2 SP1 Hotfix 4

            Code name: "clearwater-felton".

            Changes

            ChangeElementDescription
            Extended messageVDI.copyThe copy can now be performed into a pre-created VDI. It is now possible to request copying only changed blocks from a base VDI
            \ No newline at end of file diff --git a/new-docs/xen-api/releases/clearwater-whetstone/index.html b/new-docs/xen-api/releases/clearwater-whetstone/index.html index b30707cb5..5748abc31 100644 --- a/new-docs/xen-api/releases/clearwater-whetstone/index.html +++ b/new-docs/xen-api/releases/clearwater-whetstone/index.html @@ -1,10 +1,10 @@ XenServer 6.2 SP1 Hotfix 11 :: XAPI Toolstack Developer Documentation -

            XenServer 6.2 SP1 Hotfix 11

            Code name: "clearwater-whetstone".

            Changes

            ChangeElementDescription
            Published fieldPCI.subsystem_device_nameSubsystem device name
            Published fieldPCI.subsystem_vendor_nameSubsystem vendor name
            \ No newline at end of file diff --git a/new-docs/xen-api/releases/clearwater/index.html b/new-docs/xen-api/releases/clearwater/index.html index eff0d99a3..73559b957 100644 --- a/new-docs/xen-api/releases/clearwater/index.html +++ b/new-docs/xen-api/releases/clearwater/index.html @@ -1,10 +1,10 @@ XenServer 6.2 :: XAPI Toolstack Developer Documentation -

            XenServer 6.2

            Code name: "clearwater".

            Changes

            ChangeElementDescription
            Published fieldSM.featurescapabilities of the SM plugin, with capability version numbers
            Published fieldVM.generation_idGeneration ID of the VM
            Published fieldsession.originatora key string provided by a API user to distinguish itself from other users sharing the same login name
            Published messageVM.shutdownAttempts to first clean shutdown a VM and if it should fail then perform a hard shutdown on it.
            Published messagehost.declare_deadDeclare that a host is dead. This is a dangerous operation, and should only be called if the administrator is absolutely sure the host is definitely dead
            Published messagepool.apply_editionApply an edition to all hosts in the pool
            Published messagepool.get_license_stateThis call returns the license state for the pool
            Deprecated fieldSM.capabilitiesUse SM.features instead
            Deprecated fieldVM.protection_policyThe VMPR feature was removed
            Removed classVMPPThe VMPR feature was removed
            Removed fieldVM.is_snapshot_from_vmppThe VMPR feature was removed
            Removed fieldVMPP.VMsThe VMPR feature was removed
            Removed fieldVMPP.alarm_configThe VMPR feature was removed
            Removed fieldVMPP.archive_frequencyThe VMPR feature was removed
            Removed fieldVMPP.archive_last_run_timeThe VMPR feature was removed
            Removed fieldVMPP.archive_scheduleThe VMPR feature was removed
            Removed fieldVMPP.archive_target_configThe VMPR feature was removed
            Removed fieldVMPP.archive_target_typeThe VMPR feature was removed
            Removed fieldVMPP.backup_frequencyThe VMPR feature was removed
            Removed fieldVMPP.backup_last_run_timeThe VMPR feature was removed
            Removed fieldVMPP.backup_retention_valueThe VMPR feature was removed
            Removed fieldVMPP.backup_scheduleThe VMPR feature was removed
            Removed fieldVMPP.backup_typeThe VMPR feature was removed
            Removed fieldVMPP.is_alarm_enabledThe VMPR feature was removed
            Removed fieldVMPP.is_archive_runningThe VMPR feature was removed
            Removed fieldVMPP.is_backup_runningThe VMPR feature was removed
            Removed fieldVMPP.is_policy_enabledThe VMPR feature was removed
            Removed fieldVMPP.recent_alertsThe VMPR feature was removed
            Removed fieldVMPP.uuidThe VMPR feature was removed
            Removed messageVM.set_protection_policyThe VMPR feature was removed
            Removed messageVMPP.add_to_alarm_configThe VMPR feature was removed
            Removed messageVMPP.add_to_archive_scheduleThe VMPR feature was removed
            Removed messageVMPP.add_to_archive_target_configThe VMPR feature was removed
            Removed messageVMPP.add_to_backup_scheduleThe VMPR feature was removed
            Removed messageVMPP.archive_nowThe VMPR feature was removed
            Removed messageVMPP.get_alertsThe VMPR feature was removed
            Removed messageVMPP.protect_nowThe VMPR feature was removed
            Removed messageVMPP.remove_from_alarm_configThe VMPR feature was removed
            Removed messageVMPP.remove_from_archive_scheduleThe VMPR feature was removed
            Removed messageVMPP.remove_from_archive_target_configThe VMPR feature was removed
            Removed messageVMPP.remove_from_backup_scheduleThe VMPR feature was removed
            Removed messageVMPP.set_alarm_configThe VMPR feature was removed
            Removed messageVMPP.set_archive_frequencyThe VMPR feature was removed
            Removed messageVMPP.set_archive_last_run_timeThe VMPR feature was removed
            Removed messageVMPP.set_archive_scheduleThe VMPR feature was removed
            Removed messageVMPP.set_archive_target_configThe VMPR feature was removed
            Removed messageVMPP.set_archive_target_typeThe VMPR feature was removed
            Removed messageVMPP.set_backup_frequencyThe VMPR feature was removed
            Removed messageVMPP.set_backup_last_run_timeThe VMPR feature was removed
            Removed messageVMPP.set_backup_retention_valueThe VMPR feature was removed
            Removed messageVMPP.set_backup_scheduleThe VMPR feature was removed
            Removed messageVMPP.set_is_alarm_enabledThe VMPR feature was removed
            Removed messagehost.license_applyFree licenses no longer handled by xapi
            \ No newline at end of file diff --git a/new-docs/xen-api/releases/cowley/index.html b/new-docs/xen-api/releases/cowley/index.html index bfcec422e..ddcb353e3 100644 --- a/new-docs/xen-api/releases/cowley/index.html +++ b/new-docs/xen-api/releases/cowley/index.html @@ -1,10 +1,10 @@ XenServer 5.6 FP1 :: XAPI Toolstack Developer Documentation -

            XenServer 5.6 FP1

            Code name: "cowley".

            Changes

            ChangeElementDescription
            Published classVMPPVM Protection Policy
            Published classtunnelA tunnel for network traffic
            Published fieldPIF.tunnel_access_PIF_ofIndicates to which tunnel this PIF gives access
            Published fieldPIF.tunnel_transport_PIF_ofIndicates to which tunnel this PIF provides transport
            Published fieldSR.local_cache_enabledTrue if this SR is assigned to be the local cache for its host
            Published fieldVDI.allow_cachingtrue if this VDI is to be cached in the local cache SR
            Published fieldVDI.on_bootThe behaviour of this VDI on a VM boot
            Published fieldVM.is_snapshot_from_vmpptrue if this snapshot was created by the protection policy
            Published fieldVM.protection_policyRef pointing to a protection policy for this VM
            Published fieldVMPP.VMsall VMs attached to this protection policy
            Published fieldVMPP.alarm_configconfiguration for the alarm
            Published fieldVMPP.archive_frequencyfrequency of the archive schedule
            Published fieldVMPP.archive_last_run_timetime of the last archive
            Published fieldVMPP.archive_scheduleschedule of the archive containing 'hour', 'min', 'days'. Date/time-related information is in Local Timezone
            Published fieldVMPP.archive_target_configconfiguration for the archive, including its 'location', 'username', 'password'
            Published fieldVMPP.archive_target_typetype of the archive target config
            Published fieldVMPP.backup_frequencyfrequency of the backup schedule
            Published fieldVMPP.backup_last_run_timetime of the last backup
            Published fieldVMPP.backup_retention_valuemaximum number of backups that should be stored at any time
            Published fieldVMPP.backup_scheduleschedule of the backup containing 'hour', 'min', 'days'. Date/time-related information is in Local Timezone
            Published fieldVMPP.backup_typetype of the backup sub-policy
            Published fieldVMPP.is_alarm_enabledtrue if alarm is enabled for this policy
            Published fieldVMPP.is_archive_runningtrue if this protection policy's archive is running
            Published fieldVMPP.is_backup_runningtrue if this protection policy's backup is running
            Published fieldVMPP.is_policy_enabledenable or disable this policy
            Published fieldVMPP.recent_alertsrecent alerts
            Published fieldVMPP.uuidUnique identifier/object reference
            Published fieldhost.local_cache_srThe SR that is used as a local cache
            Published fieldtunnel.access_PIFThe interface through which the tunnel is accessed
            Published fieldtunnel.other_configAdditional configuration
            Published fieldtunnel.statusStatus information about the tunnel
            Published fieldtunnel.transport_PIFThe interface used by the tunnel
            Published fieldtunnel.uuidUnique identifier/object reference
            Published messageVDI.set_allow_cachingSet the value of the allow_caching parameter. This value can only be changed when the VDI is not attached to a running VM. The caching behaviour is only affected by this flag for VHD-based VDIs that have one parent and no child VHDs. Moreover, caching only takes place when the host running the VM containing this VDI has a nominated SR for local caching.
            Published messageVDI.set_on_bootSet the value of the on_boot parameter. This value can only be changed when the VDI is not attached to a running VM.
            Published messageVM.set_protection_policySet the value of the protection_policy field
            Published messageVMPP.add_to_alarm_config
            Published messageVMPP.add_to_archive_schedule
            Published messageVMPP.add_to_archive_target_config
            Published messageVMPP.add_to_backup_schedule
            Published messageVMPP.archive_nowThis call archives the snapshot provided as a parameter
            Published messageVMPP.get_alertsThis call fetches a history of alerts for a given protection policy
            Published messageVMPP.protect_nowThis call executes the protection policy immediately
            Published messageVMPP.remove_from_alarm_config
            Published messageVMPP.remove_from_archive_schedule
            Published messageVMPP.remove_from_archive_target_config
            Published messageVMPP.remove_from_backup_schedule
            Published messageVMPP.set_alarm_config
            Published messageVMPP.set_archive_frequencySet the value of the archive_frequency field
            Published messageVMPP.set_archive_last_run_time
            Published messageVMPP.set_archive_schedule
            Published messageVMPP.set_archive_target_config
            Published messageVMPP.set_archive_target_typeSet the value of the archive_target_config_type field
            Published messageVMPP.set_backup_frequencySet the value of the backup_frequency field
            Published messageVMPP.set_backup_last_run_time
            Published messageVMPP.set_backup_retention_value
            Published messageVMPP.set_backup_schedule
            Published messageVMPP.set_is_alarm_enabledSet the value of the is_alarm_enabled field
            Published messagehost.disable_local_storage_cachingDisable the use of a local SR for caching purposes
            Published messagehost.enable_local_storage_cachingEnable the use of a local SR for caching purposes
            Published messagehost.get_server_localtimeThis call queries the host's clock for the current time in the host's local timezone
            Published messagehost.set_power_on_modeSet the power-on-mode, host, user and password
            Published messagepool.disable_local_storage_cachingThis call disables pool-wide local storage caching
            Published messagepool.enable_local_storage_cachingThis call attempts to enable pool-wide local storage caching
            Published messagepool.test_archive_targetThis call tests if a location is valid
            Published messagetunnel.createCreate a tunnel
            Published messagetunnel.destroyDestroy a tunnel
            Extended messageVDI.copyThe copy can now be performed between any two SRs.
            Extended messageVM.copyThe copy can now be performed between any two SRs.
            Extended messagepool.set_vswitch_controllerAllow to be set to the empty string (no controller is used).
            \ No newline at end of file diff --git a/new-docs/xen-api/releases/cream/index.html b/new-docs/xen-api/releases/cream/index.html index 629f43c64..702a8abae 100644 --- a/new-docs/xen-api/releases/cream/index.html +++ b/new-docs/xen-api/releases/cream/index.html @@ -1,10 +1,10 @@ XenServer 6.5 SP1 :: XAPI Toolstack Developer Documentation -

            XenServer 6.5 SP1

            Code name: "cream".

            Changes

            ChangeElementDescription
            Published fieldPGPU.dom0_accessThe accessibility of this device from dom0
            Published fieldPGPU.is_system_display_deviceIs this device the system display device
            Published fieldVM.hardware_platform_versionThe host virtual hardware platform version the VM can run on
            Published fieldhost.displayindicates whether the host is configured to output its console to a physical display device
            Published fieldhost.virtual_hardware_platform_versionsThe set of versions of the virtual hardware platform that the host can offer to its guests
            Published messagePGPU.disable_dom0_access
            Published messagePGPU.enable_dom0_access
            Published messageVM.call_pluginCall an API plugin on this vm
            Published messagehost.disable_displayDisable console output to the physical display device next time this host boots
            Published messagehost.enable_displayEnable console output to the physical display device next time this host boots
            \ No newline at end of file diff --git a/new-docs/xen-api/releases/creedence/index.html b/new-docs/xen-api/releases/creedence/index.html index b7afaa03d..a5cf318ac 100644 --- a/new-docs/xen-api/releases/creedence/index.html +++ b/new-docs/xen-api/releases/creedence/index.html @@ -1,10 +1,10 @@ XenServer 6.5 :: XAPI Toolstack Developer Documentation -

            XenServer 6.5

            Code name: "creedence".

            Changes

            ChangeElementDescription
            Published fieldPIF.propertiesAdditional configuration properties for the interface.
            Published fieldnetwork.assigned_ipsThe IP addresses assigned to VIFs on networks that have active xapi-managed DHCP
            Published messagePIF.set_propertySet the value of a property of the PIF
            Published messageVM.get_SRs_required_for_recoveryList all the SR's that are required for the VM to be recovered
            Published messageVM_appliance.get_SRs_required_for_recoveryGet the list of SRs required by the VM appliance to recover.
            \ No newline at end of file diff --git a/new-docs/xen-api/releases/dundee/index.html b/new-docs/xen-api/releases/dundee/index.html index 9e106de0d..d9688c0c1 100644 --- a/new-docs/xen-api/releases/dundee/index.html +++ b/new-docs/xen-api/releases/dundee/index.html @@ -1,10 +1,10 @@ XenServer 7.0 :: XAPI Toolstack Developer Documentation -

            XenServer 7.0

            Code name: "dundee".

            Changes

            ChangeElementDescription
            Published classLVHDLVHD SR specific operations
            Published fieldPIF.capabilitiesAdditional capabilities on the interface.
            Published fieldSM.required_cluster_stackThe storage plugin requires that one of these cluster stacks is configured and running.
            Published fieldSR.clusteredTrue if the SR is using aggregated local storage
            Published fieldSR.is_tools_srTrue if this is the SR that contains the Tools ISO VDIs
            Published fieldVDI.is_tools_isoWhether this VDI is a Tools ISO
            Published fieldVGPU.scheduled_to_be_resident_onThe PGPU on which this VGPU is scheduled to run
            Published fieldVGPU_type.experimentalIndicates whether VGPUs of this type should be considered experimental
            Published fieldVGPU_type.identifierKey used to identify VGPU types and avoid creating duplicates - this field is used internally and not intended for interpretation by API clients
            Published fieldVGPU_type.implementationThe internal implementation of this VGPU type
            Published fieldVIF.ipv4_addressesIPv4 addresses in CIDR format
            Published fieldVIF.ipv4_configuration_modeDetermines whether IPv4 addresses are configured on the VIF
            Published fieldVIF.ipv4_gatewayIPv4 gateway (the empty string means that no gateway is set)
            Published fieldVIF.ipv6_addressesIPv6 addresses in CIDR format
            Published fieldVIF.ipv6_configuration_modeDetermines whether IPv6 addresses are configured on the VIF
            Published fieldVIF.ipv6_gatewayIPv6 gateway (the empty string means that no gateway is set)
            Published fieldVM.has_vendor_deviceWhen an HVM guest starts, this controls the presence of the emulated C000 PCI device which triggers Windows Update to fetch or update PV drivers.
            Published fieldVM_guest_metrics.PV_drivers_detectedAt least one of the guest's devices has successfully connected to the backend.
            Published fieldVM_guest_metrics.can_use_hotplug_vbdTo be used where relevant and available instead of checking PV driver version.
            Published fieldVM_guest_metrics.can_use_hotplug_vifTo be used where relevant and available instead of checking PV driver version.
            Published fieldhost.ssl_legacyAllow SSLv3 protocol and ciphersuites as used by older server versions. This controls both incoming and outgoing connections. When this is set to a different value, the host immediately restarts its SSL/TLS listening service; typically this takes less than a second but existing connections to it will be broken. API login sessions will remain valid.
            Published fieldpool.cpu_infoDetails about the physical CPUs on the pool
            Published fieldpool.guest_agent_configPool-wide guest agent configuration information
            Published fieldpool.ha_cluster_stackThe HA cluster stack that is currently in use. Only valid when HA is enabled.
            Published fieldpool.health_check_configConfiguration for the automatic health check feature
            Published fieldpool.policy_no_vendor_deviceThis field was consulted when VM.create did not specify a value for 'has_vendor_device'; VM.create now uses a simple default and no longer consults this value.
            Published fieldtask.backtraceFunction call trace for debugging.
            Published messageLVHD.enable_thin_provisioningUpgrades an LVHD SR to enable thin-provisioning. Future VDIs created in this SR will be thinly-provisioned, although existing VDIs will be left alone. Note that the SR must be attached to the SRmaster for upgrade to work.
            Published messageSR.forget_data_source_archivesForget the recorded statistics related to the specified data source
            Published messageSR.get_data_sources
            Published messageSR.query_data_sourceQuery the latest value of the specified data source
            Published messageSR.record_data_sourceStart recording the specified data source
            Published messageVIF.configure_ipv4Configure IPv4 settings for this virtual interface
            Published messageVIF.configure_ipv6Configure IPv6 settings for this virtual interface
            Published messageVM.importImport an XVA from a URI
            Published messageVM.set_has_vendor_deviceControls whether, when the VM starts in HVM mode, its virtual hardware will include the emulated PCI device for which drivers may be available through Windows Update. Usually this should never be changed on a VM on which Windows has been installed: changing it on such a VM is likely to lead to a crash on next start.
            Published messagehost.set_ssl_legacyEnable/disable SSLv3 for interoperability with older server versions. When this is set to a different value, the host immediately restarts its SSL/TLS listening service; typically this takes less than a second but existing connections to it will be broken. API login sessions will remain valid.
            Published messagepool.add_to_guest_agent_configAdd a key-value pair to the pool-wide guest agent configuration
            Published messagepool.disable_ssl_legacySets ssl_legacy false on each host, pool-master last. See Host.ssl_legacy and Host.set_ssl_legacy.
            Published messagepool.has_extensionReturn true if the extension is available on the pool
            Published messagepool.remove_from_guest_agent_configRemove a key-value pair from the pool-wide guest agent configuration
            Published messagesession.create_from_db_file
            Deprecated fieldVM_guest_metrics.PV_drivers_up_to_dateDeprecated in favour of PV_drivers_detected, and redefined in terms of it
            Deprecated messagepool.enable_ssl_legacyLegacy SSL will soon cease to be supported
            Removed messagehost.reset_cpu_featuresManual CPU feature setting was removed
            Removed messagehost.set_cpu_featuresManual CPU feature setting was removed
            \ No newline at end of file diff --git a/new-docs/xen-api/releases/ely/index.html b/new-docs/xen-api/releases/ely/index.html index 808794f5c..8ceaa5b0b 100644 --- a/new-docs/xen-api/releases/ely/index.html +++ b/new-docs/xen-api/releases/ely/index.html @@ -1,10 +1,10 @@ XenServer 7.1 :: XAPI Toolstack Developer Documentation -

            XenServer 7.1

            Code name: "ely".

            Changes

            ChangeElementDescription
            Published classPVS_cache_storageDescribes the storage that is available to a PVS site for caching purposes
            Published classPVS_proxya proxy connects a VM/VIF with a PVS site
            Published classPVS_serverindividual machine serving provisioning (block) data
            Published classPVS_sitemachines serving blocks of data for provisioning VMs
            Published classpool_updatePool-wide updates to the host software
            Published fieldPVS_cache_storage.SRSR providing storage for the PVS cache
            Published fieldPVS_cache_storage.VDIThe VDI used for caching
            Published fieldPVS_cache_storage.hostThe host on which this object defines PVS cache storage
            Published fieldPVS_cache_storage.siteThe PVS_site for which this object defines the storage
            Published fieldPVS_cache_storage.sizeThe size of the cache VDI (in bytes)
            Published fieldPVS_cache_storage.uuidUnique identifier/object reference
            Published fieldPVS_proxy.VIFVIF of the VM using the proxy
            Published fieldPVS_proxy.currently_attachedtrue = VM is currently proxied
            Published fieldPVS_proxy.sitePVS site this proxy is part of
            Published fieldPVS_proxy.statusThe run-time status of the proxy
            Published fieldPVS_proxy.uuidUnique identifier/object reference
            Published fieldPVS_server.addressesIPv4 addresses of this server
            Published fieldPVS_server.first_portFirst UDP port accepted by this server
            Published fieldPVS_server.last_portLast UDP port accepted by this server
            Published fieldPVS_server.sitePVS site this server is part of
            Published fieldPVS_server.uuidUnique identifier/object reference
            Published fieldPVS_site.PVS_uuidUnique identifier of the PVS site, as configured in PVS
            Published fieldPVS_site.cache_storageThe SR used by PVS proxy for the cache
            Published fieldPVS_site.name_descriptiona notes field containing human-readable description
            Published fieldPVS_site.name_labela human-readable name
            Published fieldPVS_site.proxiesThe set of proxies associated with the site
            Published fieldPVS_site.serversThe set of PVS servers in the site
            Published fieldPVS_site.uuidUnique identifier/object reference
            Published fieldVM.reference_labelTextual reference to the template used to create a VM. This can be used by clients in need of an immutable reference to the template since the latter's uuid and name_label may change, for example, after a package installation or upgrade.
            Published fieldVM.requires_rebootIndicates whether a VM requires a reboot in order to update its configuration, e.g. its memory allocation.
            Published fieldVM_metrics.hvmhardware virtual machine
            Published fieldVM_metrics.nested_virtVM supports nested virtualisation
            Published fieldVM_metrics.nomigrateVM is immobile and can't migrate between hosts
            Published fieldhost.control_domainThe control domain (domain 0)
            Published fieldhost.updatesSet of updates
            Published fieldhost.updates_requiring_rebootList of updates which require reboot
            Published fieldpool.live_patching_disabledThe pool-wide flag to show if the live patching feauture is disabled or not.
            Published fieldpool_patch.pool_updateA reference to the associated pool_update object
            Published fieldpool_update.after_apply_guidanceWhat the client should do after this update has been applied.
            Published fieldpool_update.hostsThe hosts that have applied this update.
            Published fieldpool_update.installation_sizeSize of the update in bytes
            Published fieldpool_update.keyGPG key of the update
            Published fieldpool_update.versionUpdate version number
            Published messagePVS_proxy.createConfigure a VM/VIF to use a PVS proxy
            Published messagePVS_proxy.destroyremove (or switch off) a PVS proxy for this VM
            Published messagePVS_server.forgetforget a PVS server
            Published messagePVS_server.introduceintroduce new PVS server
            Published messagePVS_site.forgetRemove a site's meta data
            Published messagePVS_site.introduceIntroduce new PVS site
            Published messagePVS_site.set_PVS_uuidUpdate the PVS UUID of the PVS site
            Published messageVIF.moveMove the specified VIF to the specified network, even while the VM is running
            Published messageVM.set_memorySet the memory allocation of this VM. Sets all of memory_static_max, memory_dynamic_min, and memory_dynamic_max to the given value, and leaves memory_static_min untouched.
            Published messagehost.call_extensionCall an API extension on this host
            Published messagehost.has_extensionReturn true if the extension is available on the host
            Published messagepool_update.applyApply the selected update to a host
            Published messagepool_update.destroyRemoves the database entry. Only works on unapplied update.
            Published messagepool_update.introduceIntroduce update VDI
            Published messagepool_update.pool_applyApply the selected update to all hosts in the pool
            Published messagepool_update.pool_cleanRemoves the update's files from all hosts in the pool, but does not revert the update
            Published messagepool_update.precheckExecute the precheck stage of the selected update on a host
            Changed messageVM.set_VCPUs_number_liveUnless the feature is explicitly enabled for every host in the pool, this fails with Api_errors.license_restriction.
            Deprecated classhost_patch
            Deprecated classpool_patch
            Deprecated fieldVDI.parentThe field was never used.
            Deprecated fieldhost.patches
            Deprecated messagehost.refresh_pack_infoUse Pool_update.resync_host instead
            Deprecated messagepool_patch.apply
            Deprecated messagepool_patch.clean
            Deprecated messagepool_patch.clean_on_host
            Deprecated messagepool_patch.destroy
            Deprecated messagepool_patch.pool_apply
            Deprecated messagepool_patch.pool_clean
            Deprecated messagepool_patch.precheck
            \ No newline at end of file diff --git a/new-docs/xen-api/releases/falcon/index.html b/new-docs/xen-api/releases/falcon/index.html index 1eabe1998..4d8ff502d 100644 --- a/new-docs/xen-api/releases/falcon/index.html +++ b/new-docs/xen-api/releases/falcon/index.html @@ -1,10 +1,10 @@ XenServer 7.2 :: XAPI Toolstack Developer Documentation -

            XenServer 7.2

            Code name: "falcon".

            Changes

            ChangeElementDescription
            Published classFeatureA new piece of functionality
            Published classSDN_controllerDescribes the SDN controller that is to connect with the pool
            Published classVMSSVM Snapshot Schedule
            Published fieldFeature.enabledIndicates whether the feature is enabled
            Published fieldFeature.experimentalIndicates whether the feature is experimental (as opposed to stable and fully supported)
            Published fieldFeature.hostThe host where this feature is available
            Published fieldFeature.uuidUnique identifier/object reference
            Published fieldFeature.versionThe version of this feature
            Published fieldSDN_controller.addressIP address of the controller
            Published fieldSDN_controller.portTCP port of the controller
            Published fieldSDN_controller.protocolProtocol to connect with SDN controller
            Published fieldSDN_controller.uuidUnique identifier/object reference
            Published fieldVM.is_default_templateIdentifies default templates
            Published fieldVM.is_vmss_snapshottrue if this snapshot was created by the snapshot schedule
            Published fieldVM.snapshot_scheduleRef pointing to a snapshot schedule for this VM
            Published fieldhost.featuresList of features available on this host
            Published fieldnetwork.managedtrue if the bridge is managed by xapi
            Published messageSDN_controller.forgetRemove the OVS manager of the pool and destroy the db record.
            Published messageSDN_controller.introduceIntroduce an SDN controller to the pool.
            Published messageVM.set_snapshot_scheduleSet the value of the snapshot schedule field
            Published messageVMSS.add_to_schedule
            Published messageVMSS.remove_from_schedule
            Published messageVMSS.set_frequencySet the value of the frequency field
            Published messageVMSS.set_last_run_time
            Published messageVMSS.set_retained_snapshots
            Published messageVMSS.set_schedule
            Published messageVMSS.set_type
            Published messageVMSS.snapshot_nowThis call executes the snapshot schedule immediately
            Published messagetask.set_statusSet the task status
            Changed fieldnetwork.bridgeAdded to the constructor (network.create)
            Deprecated fieldpool.vswitch_controllerDeprecated: set the IP address of the vswitch controller in SDN_controller instead.
            Deprecated messagepool.set_vswitch_controllerDeprecated: use 'SDN_controller.introduce' and 'SDN_controller.forget' instead.
            \ No newline at end of file diff --git a/new-docs/xen-api/releases/george/index.html b/new-docs/xen-api/releases/george/index.html index 0b20fce26..05e39d3d5 100644 --- a/new-docs/xen-api/releases/george/index.html +++ b/new-docs/xen-api/releases/george/index.html @@ -1,10 +1,10 @@ XenServer 5.5 :: XAPI Toolstack Developer Documentation -

            XenServer 5.5

            Code name: "george".

            Changes

            ChangeElementDescription
            Published classauthManagement of remote authentication services
            Published classsubjectA user or group that can log in xapi
            Published fieldVIF.MAC_autogeneratedtrue if the MAC was autogenerated; false indicates it was set manually
            Published fieldhost.external_auth_configurationconfiguration specific to external authentication service
            Published fieldhost.external_auth_service_namename of external authentication service configured; empty if none configured.
            Published fieldhost.external_auth_typetype of external authentication service configured; empty if none configured.
            Published fieldpool.wlb_enabledtrue if workload balancing is enabled on the pool, false otherwise
            Published fieldpool.wlb_urlUrl for the configured workload balancing host
            Published fieldpool.wlb_usernameUsername for accessing the workload balancing host
            Published fieldpool.wlb_verify_certtrue if communication with the WLB server should enforce TLS certificate verification.
            Published fieldsession.auth_user_sidthe subject identifier of the user that was externally authenticated. If a session instance has is_local_superuser set, then the value of this field is undefined.
            Published fieldsession.is_local_superusertrue iff this session was created using local superuser credentials
            Published fieldsession.subjectreferences the subject instance that created the session. If a session instance has is_local_superuser set, then the value of this field is undefined.
            Published fieldsession.validation_timetime when session was last validated
            Published fieldsubject.other_configadditional configuration
            Published fieldsubject.subject_identifierthe subject identifier, unique in the external directory service
            Published messageVDI.set_sharableSets the VDI's sharable field
            Published messageVM.retrieve_wlb_recommendationsReturns mapping of hosts to ratings, indicating the suitability of starting the VM at that location according to wlb. Rating is replaced with an error if the VM cannot boot there.
            Published messageauth.get_group_membershipThis calls queries the external directory service to obtain the transitively-closed set of groups that the the subject_identifier is member of.
            Published messageauth.get_subject_identifierThis call queries the external directory service to obtain the subject_identifier as a string from the human-readable subject_name
            Published messageauth.get_subject_information_from_identifierThis call queries the external directory service to obtain the user information (e.g. username, organization etc) from the specified subject_identifier
            Published messagehost.disable_external_authThis call disables external authentication on the local host
            Published messagehost.enable_external_authThis call enables external authentication on a host
            Published messagehost.get_server_certificateGet the installed server public TLS certificate.
            Published messagehost.retrieve_wlb_evacuate_recommendationsRetrieves recommended host migrations to perform when evacuating the host from the wlb server. If a VM cannot be migrated from the host the reason is listed instead of a recommendation.
            Published messagepool.certificate_installInstall TLS CA certificate
            Published messagepool.certificate_listList installed TLS CA certificate
            Published messagepool.certificate_syncCopy the TLS CA certificates and CRLs of the master to all slaves.
            Published messagepool.certificate_uninstallInstall TLS CA certificate
            Published messagepool.crl_installInstall a TLS CA-issued Certificate Revocation List, pool-wide.
            Published messagepool.crl_listList the names of all installed TLS CA-issued Certificate Revocation Lists.
            Published messagepool.crl_uninstallRemove a pool-wide TLS CA-issued Certificate Revocation List.
            Published messagepool.deconfigure_wlbPermanently deconfigures workload balancing monitoring on this pool
            Published messagepool.detect_nonhomogeneous_external_authThis call asynchronously detects if the external authentication configuration in any slave is different from that in the master and raises appropriate alerts
            Published messagepool.disable_external_authThis call disables external authentication on all the hosts of the pool
            Published messagepool.enable_external_authThis call enables external authentication on all the hosts of the pool
            Published messagepool.initialize_wlbInitializes workload balancing monitoring on this pool with the specified wlb server
            Published messagepool.retrieve_wlb_configurationRetrieves the pool optimization criteria from the workload balancing server
            Published messagepool.retrieve_wlb_recommendationsRetrieves vm migrate recommendations for the pool from the workload balancing server
            Published messagepool.send_test_postSend the given body to the given host and port, using HTTPS, and print the response. This is used for debugging the SSL layer.
            Published messagepool.send_wlb_configurationSets the pool optimization criteria for the workload balancing server
            Published messagesession.get_all_subject_identifiersReturn a list of all the user subject-identifiers of all existing sessions
            Published messagesession.logout_subject_identifierLog out all sessions associated to a user subject-identifier, except the session associated with the context calling this function
            Deprecated classuserDeprecated in favor of subject
            Removed fieldVM_guest_metrics.memoryDisabled in favour of the RRDs, to improve scalability
            \ No newline at end of file diff --git a/new-docs/xen-api/releases/index.html b/new-docs/xen-api/releases/index.html index 739a939cf..3c107e3f8 100644 --- a/new-docs/xen-api/releases/index.html +++ b/new-docs/xen-api/releases/index.html @@ -1,10 +1,10 @@ XenAPI Releases :: XAPI Toolstack Developer Documentation - \ No newline at end of file diff --git a/new-docs/xen-api/releases/index.print.html b/new-docs/xen-api/releases/index.print.html index a77f0a18e..1207a0727 100644 --- a/new-docs/xen-api/releases/index.print.html +++ b/new-docs/xen-api/releases/index.print.html @@ -1,2 +1,2 @@ XenAPI Releases :: XAPI Toolstack Developer Documentation -

            XenAPI Releases

            Subsections of XenAPI Releases

            XAPI 24.16.0

            XAPI 24.14.0

            XAPI 24.10.0

            XAPI 24.3.0

            XAPI 24.0.0

            XAPI 23.30.0

            XAPI 23.27.0

            XAPI 23.25.0

            XAPI 23.18.0

            XAPI 23.14.0

            XAPI 23.9.0

            XAPI 23.1.0

            XAPI 22.37.0

            XAPI 22.33.0

            XAPI 22.27.0

            XAPI 22.26.0

            XAPI 22.20.0

            XAPI 22.19.0

            XAPI 22.16.0

            XAPI 22.12.0

            XAPI 22.5.0

            XAPI 21.4.0

            XAPI 21.3.0

            XAPI 21.2.0

            XAPI 1.329.0

            XAPI 1.318.0

            XAPI 1.313.0

            XAPI 1.307.0

            XAPI 1.304.0

            XAPI 1.303.0

            XAPI 1.301.0

            XAPI 1.298.0

            XAPI 1.297.0

            XAPI 1.294.0

            XAPI 1.290.0

            XAPI 1.271.0

            XAPI 1.257.0

            XAPI 1.250.0

            XenServer 8 Preview

            Citrix Hypervisor 8.2 Hotfix 2

            Citrix Hypervisor 8.2

            Citrix Hypervisor 8.1

            Citrix Hypervisor 8.0

            XenServer 7.6

            XenServer 7.5

            XenServer 7.4

            XenServer 7.3

            XenServer 7.2

            XenServer 7.1

            XenServer 7.0

            XenServer 6.5 SP1 Hotfix 31

            XenServer 6.5 SP1

            XenServer 6.5

            XenServer 6.2 SP1 Hotfix 11

            XenServer 6.2 SP1 Hotfix 4

            XenServer 6.2 SP1

            XenServer 6.2 SP1 Tech-Preview

            XenServer 6.2

            XenServer 6.1

            XenServer 6.0

            XenServer 5.6 FP1

            XenServer 5.6

            XenServer 5.5

            XenServer 5.0 Update 1

            XenServer 5.0

            XenServer 4.1.1

            XenServer 4.1

            XenServer 4.0

            \ No newline at end of file +

            XenAPI Releases

            Subsections of XenAPI Releases

            XAPI 24.16.0

            XAPI 24.14.0

            XAPI 24.10.0

            XAPI 24.3.0

            XAPI 24.0.0

            XAPI 23.30.0

            XAPI 23.27.0

            XAPI 23.25.0

            XAPI 23.18.0

            XAPI 23.14.0

            XAPI 23.9.0

            XAPI 23.1.0

            XAPI 22.37.0

            XAPI 22.33.0

            XAPI 22.27.0

            XAPI 22.26.0

            XAPI 22.20.0

            XAPI 22.19.0

            XAPI 22.16.0

            XAPI 22.12.0

            XAPI 22.5.0

            XAPI 21.4.0

            XAPI 21.3.0

            XAPI 21.2.0

            XAPI 1.329.0

            XAPI 1.318.0

            XAPI 1.313.0

            XAPI 1.307.0

            XAPI 1.304.0

            XAPI 1.303.0

            XAPI 1.301.0

            XAPI 1.298.0

            XAPI 1.297.0

            XAPI 1.294.0

            XAPI 1.290.0

            XAPI 1.271.0

            XAPI 1.257.0

            XAPI 1.250.0

            XenServer 8 Preview

            Citrix Hypervisor 8.2 Hotfix 2

            Citrix Hypervisor 8.2

            Citrix Hypervisor 8.1

            Citrix Hypervisor 8.0

            XenServer 7.6

            XenServer 7.5

            XenServer 7.4

            XenServer 7.3

            XenServer 7.2

            XenServer 7.1

            XenServer 7.0

            XenServer 6.5 SP1 Hotfix 31

            XenServer 6.5 SP1

            XenServer 6.5

            XenServer 6.2 SP1 Hotfix 11

            XenServer 6.2 SP1 Hotfix 4

            XenServer 6.2 SP1

            XenServer 6.2 SP1 Tech-Preview

            XenServer 6.2

            XenServer 6.1

            XenServer 6.0

            XenServer 5.6 FP1

            XenServer 5.6

            XenServer 5.5

            XenServer 5.0 Update 1

            XenServer 5.0

            XenServer 4.1.1

            XenServer 4.1

            XenServer 4.0

            \ No newline at end of file diff --git a/new-docs/xen-api/releases/indigo/index.html b/new-docs/xen-api/releases/indigo/index.html index d26e41575..cf2398f85 100644 --- a/new-docs/xen-api/releases/indigo/index.html +++ b/new-docs/xen-api/releases/indigo/index.html @@ -1,10 +1,10 @@ XenServer 6.5 SP1 Hotfix 31 :: XAPI Toolstack Developer Documentation -

            XenServer 6.5 SP1 Hotfix 31

            Code name: "indigo".

            Changes

            ChangeElementDescription
            Published messagehost.license_addFunctionality for parsing license files re-added
            Published messagehost.license_removeRemove any license file from the specified host, and switch that host to the unlicensed edition
            \ No newline at end of file diff --git a/new-docs/xen-api/releases/inverness/index.html b/new-docs/xen-api/releases/inverness/index.html index 2b9dce190..67434faa9 100644 --- a/new-docs/xen-api/releases/inverness/index.html +++ b/new-docs/xen-api/releases/inverness/index.html @@ -1,10 +1,10 @@ XenServer 7.3 :: XAPI Toolstack Developer Documentation -

            XenServer 7.3

            Code name: "inverness".

            Changes

            ChangeElementDescription
            Published classPUSBA physical USB device
            Published classUSB_groupA group of compatible USBs across the resource pool
            Published classVUSBDescribes the vusb device
            Published classvdi_nbd_server_infoDetails for connecting to a VDI using the Network Block Device protocol
            Published fieldPGPU.compatibility_metadataPGPU metadata to determine whether a VGPU can migrate between two PGPUs
            Published fieldPIF.igmp_snooping_statusThe IGMP snooping status of the corresponding network bridge
            Published fieldPUSB.USB_groupUSB group the PUSB is contained in
            Published fieldPUSB.descriptionUSB device description
            Published fieldPUSB.hostPhysical machine that owns the USB device
            Published fieldPUSB.other_configadditional configuration
            Published fieldPUSB.passthrough_enabledenabled for passthrough
            Published fieldPUSB.pathport path of USB device
            Published fieldPUSB.product_descproduct description of the USB device
            Published fieldPUSB.product_idproduct id of the USB device
            Published fieldPUSB.serialserial of the USB device
            Published fieldPUSB.uuidUnique identifier/object reference
            Published fieldPUSB.vendor_descvendor description of the USB device
            Published fieldPUSB.vendor_idvendor id of the USB device
            Published fieldPUSB.versionUSB device version
            Published fieldUSB_group.PUSBsList of PUSBs in the group
            Published fieldUSB_group.VUSBsList of VUSBs using the group
            Published fieldUSB_group.name_descriptiona notes field containing human-readable description
            Published fieldUSB_group.name_labela human-readable name
            Published fieldUSB_group.other_configAdditional configuration
            Published fieldUSB_group.uuidUnique identifier/object reference
            Published fieldVDI.cbt_enabledTrue if changed blocks are tracked for this VDI
            Published fieldVGPU.compatibility_metadataVGPU metadata to determine whether a VGPU can migrate between two PGPUs
            Published fieldVUSB.USB_groupUSB group used by the VUSB
            Published fieldVUSB.VMVM that owns the VUSB
            Published fieldVUSB.other_configAdditional configuration
            Published fieldVUSB.uuidUnique identifier/object reference
            Published fieldhost.PUSBsList of physical USBs in the host
            Published fieldnetwork.purposeSet of purposes for which the server will use this network
            Published fieldpool.igmp_snooping_enabledtrue if IGMP snooping is enabled in the pool, false otherwise.
            Published fieldpool_update.enforce_homogeneityFlag - if true, all hosts in a pool must apply this update
            Published fieldpool_update.other_configadditional configuration
            Published fieldvdi_nbd_server_info.addressAn address on which the server can be reached; this can be IPv4, IPv6, or a DNS name.
            Published fieldvdi_nbd_server_info.certThe TLS certificate of the server
            Published fieldvdi_nbd_server_info.exportnameThe exportname to request over NBD. This holds details including an authentication token, so it must be protected appropriately. Clients should regard the exportname as an opaque string or token.
            Published fieldvdi_nbd_server_info.portThe TCP port
            Published fieldvdi_nbd_server_info.subjectFor convenience, this redundant field holds a DNS (hostname) subject of the certificate. This can be a wildcard, but only for a certificate that has a wildcard subject and no concrete hostname subjects.
            Published messagePUSB.scan
            Published messagePUSB.set_passthrough_enabled
            Published messageUSB_group.create
            Published messageUSB_group.destroy
            Published messageVDI.data_destroyDelete the data of the snapshot VDI, but keep its changed block tracking metadata. When successful, this call changes the type of the VDI to cbt_metadata. This operation is idempotent: calling it on a VDI of type cbt_metadata results in a no-op, and no error will be thrown.
            Published messageVDI.disable_cbtDisable changed block tracking for the VDI. This call is only allowed on VDIs that support enabling CBT. It is an idempotent operation - disabling CBT for a VDI for which CBT is not enabled results in a no-op, and no error will be thrown.
            Published messageVDI.enable_cbtEnable changed block tracking for the VDI. This call is idempotent - enabling CBT for a VDI for which CBT is already enabled results in a no-op, and no error will be thrown.
            Published messageVDI.get_nbd_infoGet details specifying how to access this VDI via a Network Block Device server. For each of a set of NBD server addresses on which the VDI is available, the return value set contains a vdi_nbd_server_info object that contains an exportname to request once the NBD connection is established, and connection details for the address. An empty list is returned if there is no network that has a PIF on a host with access to the relevant SR, or if no such network has been assigned an NBD-related purpose in its purpose field. To access the given VDI, any of the vdi_nbd_server_info objects can be used to make a connection to a server, and then the VDI will be available by requesting the exportname.
            Published messageVDI.list_changed_blocksCompare two VDIs in 64k block increments and report which blocks differ. This operation is not allowed when vdi_to is attached to a VM.
            Published messageVM.set_bios_stringsSet custom BIOS strings to this VM. VM will be given a default set of BIOS strings, only some of which can be overridden by the supplied values. Allowed keys are: 'bios-vendor', 'bios-version', 'system-manufacturer', 'system-product-name', 'system-version', 'system-serial-number', 'enclosure-asset-tag', 'baseboard-manufacturer', 'baseboard-product-name', 'baseboard-version', 'baseboard-serial-number', 'baseboard-asset-tag', 'baseboard-location-in-chassis', 'enclosure-asset-tag'
            Published messageVUSB.createCreate a new VUSB record in the database only
            Published messageVUSB.destroyRemoves a VUSB record from the database
            Published messageVUSB.unplugUnplug the vusb device from the vm.
            Published messagenetwork.add_purposeGive a network a new purpose (if not present already)
            Published messagenetwork.remove_purposeRemove a purpose from a network (if present)
            Published messagepool.management_reconfigureReconfigure the management network interface for all Hosts in the Pool
            Published messagepool.set_igmp_snooping_enabledEnable or disable IGMP Snooping on the pool.
            Changed messagehost.get_server_certificateNow available to all RBAC roles.
            Deprecated classcrashdump
            Deprecated messageVM.get_boot_recordUse the current VM record/fields instead
            Removed messageVDI.resize_onlineOnline VDI resize is not supported by any of the storage backends.
            \ No newline at end of file diff --git a/new-docs/xen-api/releases/jura/index.html b/new-docs/xen-api/releases/jura/index.html index 775d3241e..7cf0e4ccb 100644 --- a/new-docs/xen-api/releases/jura/index.html +++ b/new-docs/xen-api/releases/jura/index.html @@ -1,10 +1,10 @@ XenServer 7.4 :: XAPI Toolstack Developer Documentation -

            XenServer 7.4

            Code name: "jura".

            Changes

            ChangeElementDescription
            Prototyped fieldVM.domain_typeInternal-only field; not yet in the public API
            Prototyped fieldVM_metrics.current_domain_typeNot yet implemented (for future use)
            \ No newline at end of file diff --git a/new-docs/xen-api/releases/kolkata/index.html b/new-docs/xen-api/releases/kolkata/index.html index 57e01c621..0da12487a 100644 --- a/new-docs/xen-api/releases/kolkata/index.html +++ b/new-docs/xen-api/releases/kolkata/index.html @@ -1,10 +1,10 @@ XenServer 7.5 :: XAPI Toolstack Developer Documentation -

            XenServer 7.5

            Code name: "kolkata".

            Changes

            ChangeElementDescription
            Prototyped classCluster
            Prototyped classCluster_host
            Prototyped classprobe_result
            Prototyped classsr_stat
            Prototyped fieldCluster.cluster_config
            Prototyped fieldCluster.cluster_hosts
            Prototyped fieldCluster.cluster_stack
            Prototyped fieldCluster.cluster_stack_version
            Prototyped fieldCluster.cluster_token
            Prototyped fieldCluster.other_config
            Prototyped fieldCluster.pool_auto_join
            Prototyped fieldCluster.token_timeoutthe unit is milliseconds
            Prototyped fieldCluster.token_timeout_coefficientthe unit is milliseconds
            Prototyped fieldCluster.uuid
            Prototyped fieldCluster_host.PIF
            Prototyped fieldCluster_host.cluster
            Prototyped fieldCluster_host.enabled
            Prototyped fieldCluster_host.host
            Prototyped fieldCluster_host.joined
            Prototyped fieldCluster_host.other_config
            Prototyped fieldCluster_host.uuid
            Prototyped fieldprobe_result.complete
            Prototyped fieldprobe_result.configuration
            Prototyped fieldprobe_result.extra_info
            Prototyped fieldprobe_result.sr
            Prototyped fieldsr_stat.clustered
            Prototyped fieldsr_stat.free_space
            Prototyped fieldsr_stat.health
            Prototyped fieldsr_stat.name_description
            Prototyped fieldsr_stat.name_label
            Prototyped fieldsr_stat.total_space
            Prototyped fieldsr_stat.uuid
            Prototyped messageCluster.create
            Prototyped messageCluster.destroy
            Prototyped messageCluster.get_network
            Prototyped messageCluster.pool_create
            Prototyped messageCluster.pool_destroy
            Prototyped messageCluster.pool_force_destroy
            Prototyped messageCluster.pool_resync
            Prototyped messageCluster_host.create
            Prototyped messageCluster_host.destroy
            Prototyped messageCluster_host.disable
            Prototyped messageCluster_host.enable
            Prototyped messageCluster_host.force_destroy
            Prototyped messageSR.probe_ext
            Published classnetwork_sriovnetwork-sriov which connects logical pif and physical pif
            Published fieldPCI.driver_nameDriver name
            Published fieldPIF.PCILink to underlying PCI device
            Published fieldPIF.sriov_logical_PIF_ofIndicates which network_sriov this interface is logical of
            Published fieldPIF.sriov_physical_PIF_ofIndicates which network_sriov this interface is physical of
            Published fieldVM.domain_typeThe field is now valid
            Published fieldVM_metrics.current_domain_typeThis field now contains valid data
            Published fieldhost.iscsi_iqnThe initiator IQN for the host
            Published fieldhost.multipathingSpecifies whether multipathing is enabled
            Published fieldnetwork_sriov.configuration_modeThe mode for configure network sriov
            Published fieldnetwork_sriov.logical_PIFThe logical PIF to connect to the SR-IOV network after enable SR-IOV on the physical PIF
            Published fieldnetwork_sriov.physical_PIFThe PIF that has SR-IOV enabled
            Published fieldnetwork_sriov.requires_rebootIndicates whether the host need to be rebooted before SR-IOV is enabled on the physical PIF
            Published messageVM.set_domain_typeSet the VM.domain_type field of the given VM, which will take effect when it is next started
            Published messagehost.set_iscsi_iqnSets the initiator IQN for the host
            Published messagehost.set_multipathingSpecifies whether multipathing is enabled
            Published messagenetwork_sriov.createEnable SR-IOV on the specific PIF. It will create a network-sriov based on the specific PIF and automatically create a logical PIF to connect the specific network.
            Published messagenetwork_sriov.destroyDisable SR-IOV on the specific PIF. It will destroy the network-sriov and the logical PIF accordingly.
            Published messagenetwork_sriov.get_remaining_capacityGet the number of free SR-IOV VFs on the associated PIF
            Deprecated fieldVM.HVM_boot_policyReplaced by VM.domain_type
            Deprecated messageVM.set_HVM_boot_policyReplaced by VM.set_domain_type
            \ No newline at end of file diff --git a/new-docs/xen-api/releases/lima/index.html b/new-docs/xen-api/releases/lima/index.html index a13b24ee2..5a8570a31 100644 --- a/new-docs/xen-api/releases/lima/index.html +++ b/new-docs/xen-api/releases/lima/index.html @@ -1,10 +1,10 @@ XenServer 7.6 :: XAPI Toolstack Developer Documentation -

            XenServer 7.6

            Code name: "lima".

            Changes

            ChangeElementDescription
            Published classClusterCluster-wide Cluster metadata
            Published classCluster_hostCluster member metadata
            Published classprobe_resultA set of properties that describe one result element of SR.probe. Result elements and properties can change dynamically based on changes to the the SR.probe input-parameters or the target.
            Published classsr_statA set of high-level properties associated with an SR.
            Published fieldCluster.cluster_configContains read-only settings for the cluster, such as timeouts and other options. It can only be set at cluster create time
            Published fieldCluster.cluster_hostsA list of the cluster_host objects associated with the Cluster
            Published fieldCluster.cluster_stackSimply the string 'corosync'. No other cluster stacks are currently supported
            Published fieldCluster.cluster_stack_versionVersion of cluster stack, not writable via the API. Defaulting to 2 for backwards compatibility when upgrading from a cluster without this field, which means it is necessarily running version 2 of corosync, the only cluster stack supported so far.
            Published fieldCluster.cluster_tokenThe secret key used by xapi-clusterd when it talks to itself on other hosts
            Published fieldCluster.other_configAdditional configuration
            Published fieldCluster.pending_forgetInternal field used by Host.destroy to store the IP of cluster members marked as permanently dead but not yet removed
            Published fieldCluster.pool_auto_joinTrue if automatically joining new pool members to the cluster. This will be `true` in the first release
            Published fieldCluster.uuidUnique identifier/object reference
            Published fieldCluster_host.PIFReference to the PIF object
            Published fieldCluster_host.clusterReference to the Cluster object
            Published fieldCluster_host.enabledWhether the cluster host believes that clustering should be enabled on this host. This field can be altered by calling the enable/disable message on a cluster host. Only enabled members run the underlying cluster stack. Disabled members are still considered a member of the cluster (see joined), and can be re-enabled by the user.
            Published fieldCluster_host.hostReference to the Host object
            Published fieldCluster_host.joinedWhether the cluster host has joined the cluster. Contrary to enabled, a host that is not joined is not considered a member of the cluster, and hence enable and disable operations cannot be performed on this host.
            Published fieldCluster_host.other_configAdditional configuration
            Published fieldCluster_host.uuidUnique identifier/object reference
            Published fieldprobe_result.completeTrue if this configuration is complete and can be used to call SR.create. False if it requires further iterative calls to SR.probe, to potentially narrow down on a configuration that can be used.
            Published fieldprobe_result.configurationPlugin-specific configuration which describes where and how to locate the storage repository. This may include the physical block device name, a remote NFS server and path or an RBD storage pool.
            Published fieldprobe_result.extra_infoAdditional plugin-specific information about this configuration, that might be of use for an API user. This can for example include the LUN or the WWPN.
            Published fieldprobe_result.srExisting SR found for this configuration
            Published fieldsr_stat.clusteredIndicates whether the SR uses clustered local storage.
            Published fieldsr_stat.free_spaceNumber of bytes free on the backing storage (in bytes)
            Published fieldsr_stat.healthThe health status of the SR.
            Published fieldsr_stat.name_descriptionLonger, human-readable description of the SR. Descriptions are generally only displayed by clients when the user is examining SRs in detail.
            Published fieldsr_stat.name_labelShort, human-readable label for the SR.
            Published fieldsr_stat.total_spaceTotal physical size of the backing storage (in bytes)
            Published fieldsr_stat.uuidUuid that uniquely identifies this SR, if one is available.
            Published messageCluster.createCreates a Cluster object and one Cluster_host object as its first member
            Published messageCluster.destroyDestroys a Cluster object and the one remaining Cluster_host member
            Published messageCluster.get_networkReturns the network used by the cluster for inter-host communication, i.e. the network shared by all cluster host PIFs
            Published messageCluster.pool_createAttempt to create a Cluster from the entire pool
            Published messageCluster.pool_destroyAttempt to destroy the Cluster_host objects for all hosts in the pool and then destroy the Cluster.
            Published messageCluster.pool_force_destroyAttempt to force destroy the Cluster_host objects, and then destroy the Cluster.
            Published messageCluster.pool_resyncResynchronise the cluster_host objects across the pool. Creates them where they need creating and then plugs them
            Published messageCluster_host.createAdd a new host to an existing cluster.
            Published messageCluster_host.destroyRemove the host from an existing cluster. This operation is allowed even if a cluster host is not enabled.
            Published messageCluster_host.disableDisable cluster membership for an enabled cluster host.
            Published messageCluster_host.enableEnable cluster membership for a disabled cluster host.
            Published messageCluster_host.force_destroyRemove a host from an existing cluster forcefully.
            Published messageSR.probe_extPerform a backend-specific scan, using the given device_config. If the device_config is complete, then this will return a list of the SRs present of this type on the device, if any. If the device_config is partial, then a backend-specific scan will be performed, returning results that will guide the user in improving the device_config.
            Changed fieldCluster.token_timeoutthe unit is now seconds
            Changed fieldCluster.token_timeout_coefficientthe unit is now seconds
            \ No newline at end of file diff --git a/new-docs/xen-api/releases/miami/index.html b/new-docs/xen-api/releases/miami/index.html index 03dc0c174..4dd22b31c 100644 --- a/new-docs/xen-api/releases/miami/index.html +++ b/new-docs/xen-api/releases/miami/index.html @@ -1,10 +1,10 @@ XenServer 4.1 :: XAPI Toolstack Developer Documentation -

            XenServer 4.1

            Code name: "miami".

            Changes

            ChangeElementDescription
            Published classBondA Network bond that combines physical network interfaces, also known as link aggregation
            Published classVLANA VLAN mux/demux
            Published classpool_patchPool-wide patches
            Published fieldBond.masterThe bonded interface
            Published fieldBond.other_configadditional configuration
            Published fieldBond.slavesThe interfaces which are part of this bond
            Published fieldPBD.other_configadditional configuration
            Published fieldPIF.DNSComma separated list of the IP addresses of the DNS servers to use
            Published fieldPIF.IPIP address
            Published fieldPIF.VLAN_master_ofIndicates which VLAN this interface receives untagged traffic from
            Published fieldPIF.VLAN_slave_ofIndicates which VLANs this interface transmits tagged traffic to
            Published fieldPIF.bond_master_ofIndicates this PIF represents the results of a bond
            Published fieldPIF.bond_slave_ofIndicates which bond this interface is part of
            Published fieldPIF.currently_attachedtrue if this interface is online
            Published fieldPIF.gatewayIP gateway
            Published fieldPIF.ip_configuration_modeSets if and how this interface gets an IP address
            Published fieldPIF.managementIndicates whether the control software is listening for connections on this interface
            Published fieldPIF.netmaskIP netmask
            Published fieldPIF.other_configAdditional configuration
            Published fieldPIF.physicaltrue if this represents a physical network interface
            Published fieldSM.capabilitiescapabilities of the SM plugin
            Published fieldSM.other_configadditional configuration
            Published fieldSR.sm_configSM dependent data
            Published fieldVBD.unpluggabletrue if this VBD will support hot-unplug
            Published fieldVDI.locationlocation information
            Published fieldVDI.sm_configSM dependent data
            Published fieldVDI.xenstore_datadata to be inserted into the xenstore tree (/local/domain/0/backend/vbd/<domid>/<device-id>/sm-data) after the VDI is attached. This is generally set by the SM backends on vdi_attach.
            Published fieldVLAN.other_configadditional configuration
            Published fieldVLAN.tagVLAN tag in use
            Published fieldVLAN.tagged_PIFinterface on which traffic is tagged
            Published fieldVLAN.untagged_PIFinterface on which traffic is untagged
            Published fieldVM.HVM_shadow_multipliermultiplier applied to the amount of shadow that will be made available to the guest
            Published fieldVM.last_booted_recordMarshalled value containing VM record at time of last boot, updated dynamically to reflect the runtime state of the domain
            Published fieldVM.xenstore_datadata to be inserted into the xenstore tree (/local/domain/<domid>/vm-data) after the VM is created.
            Published fieldcrashdump.other_configadditional configuration
            Published fieldhost_crashdump.other_configadditional configuration
            Published fieldhost_patch.other_configadditional configuration
            Published fieldhost_patch.pool_patchThe patch applied
            Published fieldpool_patch.after_apply_guidanceWhat the client should do after this patch has been applied.
            Published fieldpool_patch.host_patchesThis hosts this patch is applied to.
            Published fieldpool_patch.other_configadditional configuration
            Published fieldpool_patch.pool_appliedThis patch should be applied across the entire pool
            Published fieldpool_patch.sizeSize of the patch
            Published fieldpool_patch.versionPatch version number
            Published fieldsession.other_configadditional configuration
            Published fieldtask.other_configadditional configuration
            Published messageBond.createCreate an interface bond
            Published messageBond.destroyDestroy an interface bond
            Published messagePBD.set_device_configSets the PBD's device_config field
            Published messagePIF.forgetDestroy the PIF object matching a particular network interface
            Published messagePIF.introduceCreate a PIF object matching a particular network interface
            Published messagePIF.plugAttempt to bring up a physical interface
            Published messagePIF.reconfigure_ipReconfigure the IP address settings for this interface
            Published messagePIF.scanScan for physical interfaces on a host and create PIF objects to represent them
            Published messagePIF.unplugAttempt to bring down a physical interface
            Published messageSR.probePerform a backend-specific scan, using the given device_config. If the device_config is complete, then this will return a list of the SRs present of this type on the device, if any. If the device_config is partial, then a backend-specific scan will be performed, returning results that will guide the user in improving the device_config.
            Published messageSR.set_physical_sizeSets the SR's physical_size field
            Published messageVDI.introduceCreate a new VDI record in the database only
            Published messageVLAN.createCreate a VLAN mux/demuxer
            Published messageVLAN.destroyDestroy a VLAN mux/demuxer
            Published messageVM.maximise_memoryReturns the maximum amount of guest memory which will fit, together with overheads, in the supplied amount of physical memory. If 'exact' is true then an exact calculation is performed using the VM's current settings. If 'exact' is false then a more conservative approximation is used
            Published messagehost.assert_can_evacuateCheck this host can be evacuated.
            Published messagehost.evacuateMigrate all VMs off of this host, where possible.
            Published messagehost.get_system_status_capabilities
            Published messagehost.local_management_reconfigureReconfigure the management network interface. Should only be used if Host.management_reconfigure is impossible because the network configuration is broken.
            Published messagehost.management_disableDisable the management network interface
            Published messagehost.management_reconfigureReconfigure the management network interface
            Published messagehost.set_hostname_liveSets the host name to the specified string. Both the API and lower-level system hostname are changed immediately.
            Published messagehost.syslog_reconfigureRe-configure syslog logging
            Published messagepool.designate_new_masterPerform an orderly handover of the role of master to the referenced host.
            Published messagepool.disable_haTurn off High Availability mode
            Published messagepool.enable_haTurn on High Availability mode
            Published messagepool_patch.applyApply the selected patch to a host and return its output
            Published messagepool_patch.cleanRemoves the patch's files from the server
            Published messagepool_patch.destroyRemoves the patch's files from all hosts in the pool, and removes the database entries. Only works on unapplied patches.
            Published messagepool_patch.pool_applyApply the selected patch to all hosts in the pool and return a map of host_ref -> patch output
            Published messagepool_patch.precheckExecute the precheck stage of the selected patch on a host and return its output
            Published messagesession.local_logoutLog out of local session.
            Published messagesession.slave_local_login_with_passwordAuthenticate locally against a slave in emergency mode. Note the resulting sessions are only good for use on this host.
            Deprecated messagePIF.create_VLANReplaced by VLAN.create
            Deprecated messagePIF.destroyReplaced by VLAN.destroy and Bond.destroy
            Deprecated messageSR.makeUse SR.create instead
            Deprecated messagehost_patch.apply
            Deprecated messagehost_patch.destroy
            \ No newline at end of file diff --git a/new-docs/xen-api/releases/midnight-ride/index.html b/new-docs/xen-api/releases/midnight-ride/index.html index c7bb333d3..25d82912d 100644 --- a/new-docs/xen-api/releases/midnight-ride/index.html +++ b/new-docs/xen-api/releases/midnight-ride/index.html @@ -1,10 +1,10 @@ XenServer 5.6 :: XAPI Toolstack Developer Documentation -

            XenServer 5.6

            Code name: "midnight-ride".

            Changes

            ChangeElementDescription
            Published classroleA set of permissions associated with a subject
            Published classsecretA secret
            Published fieldVM.bios_stringsBIOS strings
            Published fieldVM.childrenList pointing to all the children of this VM
            Published fieldVM.parentRef pointing to the parent of this VM
            Published fieldVM.snapshot_infoHuman-readable information concerning this snapshot
            Published fieldVM.snapshot_metadataEncoded information about the VM's metadata this is a snapshot of
            Published fieldhost.bios_stringsBIOS strings
            Published fieldhost.cpu_infoDetails about the physical CPUs on this host
            Published fieldhost.editionProduct edition
            Published fieldhost.license_serverContact information of the license server
            Published fieldhost.power_on_configThe power on config
            Published fieldhost.power_on_modeThe power on mode
            Published fieldnetwork.MTUMTU in octets
            Published fieldpool.redo_log_enabledtrue a redo-log is to be used other than when HA is enabled, false otherwise
            Published fieldpool.redo_log_vdiindicates the VDI to use for the redo-log other than when HA is enabled
            Published fieldpool.restrictionsPool-wide restrictions currently in effect
            Published fieldpool.vswitch_controllerthe IP address of the vswitch controller.
            Published fieldrole.name_descriptionwhat this role is for
            Published fieldrole.name_labela short user-friendly name for the role
            Published fieldrole.subrolesa list of pointers to other roles or permissions
            Published fieldsession.auth_user_namethe subject name of the user that was externally authenticated. If a session instance has is_local_superuser set, then the value of this field is undefined.
            Published fieldsession.parentreferences the parent session that created this session
            Published fieldsession.rbac_permissionslist with all RBAC permissions for this session
            Published fieldsession.taskslist of tasks created using the current session
            Published fieldsubject.rolesthe roles associated with this subject
            Published messageVM.checkpointCheckpoints the specified VM, making a new VM. Checkpoint automatically exploits the capabilities of the underlying storage repository in which the VM's disk images are stored (e.g. Copy on Write) and saves the memory image as well.
            Published messageVM.compute_memory_overheadComputes the virtualization memory overhead of a VM.
            Published messageVM.copy_bios_stringsCopy the BIOS strings from the given host to this VM
            Published messageVM.get_cooperativeReturn true if the VM is currently 'co-operative' i.e. is expected to reach a balloon target and actually has done
            Published messageVM.revertReverts the specified VM to a previous state.
            Published messageVM.set_HVM_shadow_multiplierSet the shadow memory multiplier on a halted VM
            Published messageVM.set_VCPUs_at_startupSet the number of startup VCPUs for a halted VM
            Published messageVM.set_VCPUs_maxSet the maximum number of VCPUs for a halted VM
            Published messageVM.set_memory_dynamic_maxSet the value of the memory_dynamic_max field
            Published messageVM.set_memory_dynamic_minSet the value of the memory_dynamic_min field
            Published messageVM.set_memory_dynamic_rangeSet the minimum and maximum amounts of physical memory the VM is allowed to use.
            Published messageVM.set_memory_limitsSet the memory limits of this VM.
            Published messageVM.set_memory_static_minSet the value of the memory_static_min field
            Published messageVM.set_memory_static_rangeSet the static (ie boot-time) range of virtual memory that the VM is allowed to use.
            Published messagehost.apply_editionChange to another edition, or reactivate the current edition after a license has expired. This may be subject to the successful checkout of an appropriate license.
            Published messagehost.compute_memory_overheadComputes the virtualization memory overhead of a host.
            Published messagehost.get_uncooperative_resident_VMsReturn a set of VMs which are not co-operating with the host's memory control system
            Published messagehost.refresh_pack_infoRefresh the list of installed Supplemental Packs.
            Published messagehost.reset_cpu_featuresRemove the feature mask, such that after a reboot all features of the CPU are enabled.
            Published messagehost.set_cpu_featuresSet the CPU features to be used after a reboot, if the given features string is valid.
            Published messagepool.disable_redo_logDisable the redo log if in use, unless HA is enabled.
            Published messagepool.enable_redo_logEnable the redo log on the given SR and start using it, unless HA is enabled.
            Published messagepool.set_vswitch_controllerSet the IP address of the vswitch controller.
            Published messagerole.get_by_permissionThis call returns a list of roles given a permission
            Published messagerole.get_by_permission_name_labelThis call returns a list of roles given a permission name
            Published messagerole.get_permissionsThis call returns a list of permissions given a role
            Published messagerole.get_permissions_name_labelThis call returns a list of permission names given a role
            Published messagesubject.add_to_rolesThis call adds a new role to a subject
            Published messagesubject.get_permissions_name_labelThis call returns a list of permission names given a subject
            Published messagesubject.remove_from_rolesThis call removes a role from a subject
            Deprecated classhost_cpuDeprecated in favour of the Host.cpu_info field
            Deprecated fieldVM.memory_target
            Deprecated fieldhost_metrics.memory_freeWill be disabled in favour of RRD
            Deprecated messageVM.set_memory_target_live
            Deprecated messageVM.wait_memory_target_live
            \ No newline at end of file diff --git a/new-docs/xen-api/releases/naples/index.html b/new-docs/xen-api/releases/naples/index.html index a21f8aab4..68795fabe 100644 --- a/new-docs/xen-api/releases/naples/index.html +++ b/new-docs/xen-api/releases/naples/index.html @@ -1,10 +1,10 @@ Citrix Hypervisor 8.0 :: XAPI Toolstack Developer Documentation -

            Citrix Hypervisor 8.0

            Code name: "naples".

            Changes

            ChangeElementDescription
            Published fieldVM.NVRAMinitial value for guest NVRAM (containing UEFI variables, etc). Cannot be changed while the VM is running
            Published messageVM.add_to_NVRAM
            Published messageVM.remove_from_NVRAM
            Published messageVM.set_NVRAM
            \ No newline at end of file diff --git a/new-docs/xen-api/releases/nile-preview/index.html b/new-docs/xen-api/releases/nile-preview/index.html index c2e1655eb..5ac1b6ba6 100644 --- a/new-docs/xen-api/releases/nile-preview/index.html +++ b/new-docs/xen-api/releases/nile-preview/index.html @@ -1,10 +1,10 @@ XenServer 8 Preview :: XAPI Toolstack Developer Documentation -

            XenServer 8 Preview

            Code name: "nile-preview".

            No changes.

            \ No newline at end of file diff --git a/new-docs/xen-api/releases/orlando-update-1/index.html b/new-docs/xen-api/releases/orlando-update-1/index.html index 2e21aa549..c265be6d7 100644 --- a/new-docs/xen-api/releases/orlando-update-1/index.html +++ b/new-docs/xen-api/releases/orlando-update-1/index.html @@ -1,10 +1,10 @@ XenServer 5.0 Update 1 :: XAPI Toolstack Developer Documentation -

            XenServer 5.0 Update 1

            Code name: "orlando-update-1".

            Changes

            ChangeElementDescription
            Published messagepool.ha_prevent_restarts_forWhen this call returns the VM restart logic will not run for the requested number of seconds. If the argument is zero then the restart thread is immediately unblocked
            \ No newline at end of file diff --git a/new-docs/xen-api/releases/orlando/index.html b/new-docs/xen-api/releases/orlando/index.html index 307b50583..f7c3079a1 100644 --- a/new-docs/xen-api/releases/orlando/index.html +++ b/new-docs/xen-api/releases/orlando/index.html @@ -1,10 +1,10 @@ XenServer 5.0 :: XAPI Toolstack Developer Documentation -

            XenServer 5.0

            Code name: "orlando".

            Changes

            ChangeElementDescription
            Published classblobA placeholder for a binary blob
            Published classdata_sourceData sources for logging in RRDs
            Published classmessageAn message for the attention of the administrator
            Published fieldPIF.disallow_unplugPrevent this PIF from being unplugged; set this to notify the management tool-stack that the PIF has a special use and should not be unplugged under any circumstances (e.g. because you're running storage traffic over it)
            Published fieldPIF_metrics.other_configadditional configuration
            Published fieldSM.driver_filenamefilename of the storage driver
            Published fieldSR.blobsBinary blobs associated with this SR
            Published fieldSR.tagsuser-specified tags for categorization purposes
            Published fieldVBD_metrics.other_configadditional configuration
            Published fieldVDI.is_a_snapshottrue if this is a snapshot.
            Published fieldVDI.snapshot_ofRef pointing to the VDI this snapshot is of.
            Published fieldVDI.snapshot_timeDate/time when this snapshot was created.
            Published fieldVDI.snapshotsList pointing to all the VDIs snapshots.
            Published fieldVDI.tagsuser-specified tags for categorization purposes
            Published fieldVIF_metrics.other_configadditional configuration
            Published fieldVM.blobsBinary blobs associated with this VM
            Published fieldVM.blocked_operationsList of operations which have been explicitly blocked and an error code
            Published fieldVM.ha_always_runif true then the system will attempt to keep the VM running as much as possible.
            Published fieldVM.ha_restart_priorityhas possible values: "best-effort" meaning "try to restart this VM if possible but don't consider the Pool to be overcommitted if this is not possible"; "restart" meaning "this VM should be restarted"; "" meaning "do not try to restart this VM"
            Published fieldVM.is_a_snapshottrue if this is a snapshot. Snapshotted VMs can never be started, they are used only for cloning other VMs
            Published fieldVM.snapshot_ofRef pointing to the VM this snapshot is of.
            Published fieldVM.snapshot_timeDate/time when this snapshot was created.
            Published fieldVM.snapshotsList pointing to all the VM snapshots.
            Published fieldVM.tagsuser-specified tags for categorization purposes
            Published fieldVM.transportable_snapshot_idTransportable ID of the snapshot VM
            Published fieldVM_guest_metrics.liveTrue if the guest is sending heartbeat messages via the guest agent
            Published fieldVM_guest_metrics.other_configadditional configuration
            Published fieldVM_metrics.other_configadditional configuration
            Published fieldhost.blobsBinary blobs associated with this host
            Published fieldhost.ha_network_peersThe set of hosts visible via the network from this host
            Published fieldhost.ha_statefilesThe set of statefiles accessible from this host
            Published fieldhost.tagsuser-specified tags for categorization purposes
            Published fieldhost_cpu.other_configadditional configuration
            Published fieldhost_metrics.other_configadditional configuration
            Published fieldmessage.clsThe class of the object this message is associated with
            Published fieldnetwork.blobsBinary blobs associated with this network
            Published fieldnetwork.tagsuser-specified tags for categorization purposes
            Published fieldpool.blobsBinary blobs associated with this pool
            Published fieldpool.gui_configgui-specific configuration for pool
            Published fieldpool.ha_allow_overcommitIf set to false then operations which would cause the Pool to become overcommitted will be blocked.
            Published fieldpool.ha_configurationThe current HA configuration
            Published fieldpool.ha_enabledtrue if HA is enabled on the pool, false otherwise
            Published fieldpool.ha_host_failures_to_tolerateNumber of host failures to tolerate before the Pool is declared to be overcommitted
            Published fieldpool.ha_overcommittedTrue if the Pool is considered to be overcommitted i.e. if there exist insufficient physical resources to tolerate the configured number of host failures
            Published fieldpool.ha_plan_exists_forNumber of future host failures we have managed to find a plan for. Once this reaches zero any future host failures will cause the failure of protected VMs.
            Published fieldpool.ha_statefilesHA statefile VDIs in use
            Published fieldpool.tagsuser-specified tags for categorization purposes
            Published fieldtask.subtask_ofRef pointing to the task this is a substask of.
            Published fieldtask.subtasksList pointing to all the substasks.
            Published fielduser.other_configadditional configuration
            Published messagePIF.db_forgetDestroy a PIF database record.
            Published messagePIF.db_introduceCreate a new PIF record in the database only
            Published messagePIF.set_disallow_unplugSet whether unplugging the PIF is allowed
            Published messageSR.assert_can_host_ha_statefileReturns successfully if the given SR can host an HA statefile. Otherwise returns an error to explain why not
            Published messageSR.create_new_blobCreate a placeholder for a named binary blob of data that is associated with this SR
            Published messageVM.assert_agileReturns an error if the VM is not considered agile e.g. because it is tied to a resource local to a host
            Published messageVM.create_new_blobCreate a placeholder for a named binary blob of data that is associated with this VM
            Published messageVM.forget_data_source_archivesForget the recorded statistics related to the specified data source
            Published messageVM.get_data_sources
            Published messageVM.query_data_sourceQuery the latest value of the specified data source
            Published messageVM.record_data_sourceStart recording the specified data source
            Published messageVM.set_ha_always_runSet the value of the ha_always_run
            Published messageVM.set_ha_restart_prioritySet the value of the ha_restart_priority field
            Published messageVM.set_memory_static_maxSet the value of the memory_static_max field
            Published messageVM.snapshotSnapshots the specified VM, making a new VM. Snapshot automatically exploits the capabilities of the underlying storage repository in which the VM's disk images are stored (e.g. Copy on Write).
            Published messageVM.snapshot_with_quiesceSnapshots the specified VM with quiesce, making a new VM. Snapshot automatically exploits the capabilities of the underlying storage repository in which the VM's disk images are stored (e.g. Copy on Write).
            Published messageVM.wait_memory_target_liveWait for a running VM to reach its current memory target
            Published messageblob.createCreate a placeholder for a binary blob
            Published messageblob.destroy
            Published messagehost.backup_rrdsThis causes the RRDs to be backed up to the master
            Published messagehost.call_pluginCall an API plugin on this host
            Published messagehost.compute_free_memoryComputes the amount of free memory on the host.
            Published messagehost.create_new_blobCreate a placeholder for a named binary blob of data that is associated with this host
            Published messagehost.emergency_ha_disableThis call disables HA on the local host. This should only be used with extreme care.
            Published messagehost.forget_data_source_archivesForget the recorded statistics related to the specified data source
            Published messagehost.get_data_sources
            Published messagehost.get_servertimeThis call queries the host's clock for the current time
            Published messagehost.get_vms_which_prevent_evacuationReturn a set of VMs which prevent the host being evacuated, with per-VM error codes
            Published messagehost.power_onAttempt to power-on the host (if the capability exists).
            Published messagehost.query_data_sourceQuery the latest value of the specified data source
            Published messagehost.record_data_sourceStart recording the specified data source
            Published messagehost.shutdown_agentShuts the agent down after a 10 second pause. WARNING: this is a dangerous operation. Any operations in progress will be aborted, and unrecoverable data loss may occur. The caller is responsible for ensuring that there are no operations in progress when this method is called.
            Published messagehost.sync_dataThis causes the synchronisation of the non-database data (messages, RRDs and so on) stored on the master to be synchronised with the host
            Published messagemessage.create
            Published messagemessage.destroy
            Published messagemessage.get
            Published messagemessage.get_all
            Published messagemessage.get_all_records
            Published messagemessage.get_all_records_where
            Published messagemessage.get_by_uuid
            Published messagemessage.get_record
            Published messagemessage.get_since
            Published messagenetwork.create_new_blobCreate a placeholder for a named binary blob of data that is associated with this pool
            Published messagepool.create_new_blobCreate a placeholder for a named binary blob of data that is associated with this pool
            Published messagepool.ha_compute_hypothetical_max_host_failures_to_tolerateReturns the maximum number of host failures we could tolerate before we would be unable to restart the provided VMs
            Published messagepool.ha_compute_max_host_failures_to_tolerateReturns the maximum number of host failures we could tolerate before we would be unable to restart configured VMs
            Published messagepool.ha_compute_vm_failover_planReturn a VM failover plan assuming a given subset of hosts fail
            Published messagepool.ha_failover_plan_existsReturns true if a VM failover plan exists for up to 'n' host failures
            Published messagepool.set_ha_host_failures_to_tolerateSet the maximum number of host failures to consider in the HA VM restart planner
            Removed fieldVM_guest_metrics.disksNo data
            \ No newline at end of file diff --git a/new-docs/xen-api/releases/quebec/index.html b/new-docs/xen-api/releases/quebec/index.html index 1a2704bcb..014b51d2c 100644 --- a/new-docs/xen-api/releases/quebec/index.html +++ b/new-docs/xen-api/releases/quebec/index.html @@ -1,10 +1,10 @@ Citrix Hypervisor 8.1 :: XAPI Toolstack Developer Documentation -

            Citrix Hypervisor 8.1

            Code name: "quebec".

            Changes

            ChangeElementDescription
            Published fieldBond.auto_update_mactrue if the MAC was taken from the primary slave when the bond was created, and false if the client specified the MAC
            Published fieldVGPU.PCIDevice passed trough to VM, either as full device or SR-IOV virtual function
            Published fieldVGPU.extra_argsExtra arguments for vGPU and passed to demu
            Published fieldVGPU_type.compatible_types_in_vmList of VGPU types which are compatible in one VM
            Published fieldhost.uefi_certificatesThe UEFI certificates allowing Secure Boot
            Published fieldpool.uefi_certificatesThe UEFI certificates allowing Secure Boot
            Published messagehost.set_uefi_certificatesSets the UEFI certificates on a host
            Changed messageVM.assert_can_boot_hereDoes additional compatibility checks when VM powerstate is not halted (e.g. CPUID). Use this before calling VM.resume or VM.pool_migrate.
            Removed messageVM.snapshot_with_quiesceVSS support has been removed
            \ No newline at end of file diff --git a/new-docs/xen-api/releases/rio/index.html b/new-docs/xen-api/releases/rio/index.html index dd11d42ac..7aff98724 100644 --- a/new-docs/xen-api/releases/rio/index.html +++ b/new-docs/xen-api/releases/rio/index.html @@ -1,10 +1,10 @@ XenServer 4.0 :: XAPI Toolstack Developer Documentation -

            XenServer 4.0

            Code name: "rio".

            Changes

            ChangeElementDescription
            Published classPBDThe physical block devices through which hosts access SRs
            Published classPIFA physical network interface (note separate VLANs are represented as several PIFs)
            Published classPIF_metricsThe metrics associated with a physical network interface
            Published classSMA storage manager plugin
            Published classSRA storage repository
            Published classVBDA virtual block device
            Published classVBD_metricsThe metrics associated with a virtual block device
            Published classVDIA virtual disk image
            Published classVIFA virtual network interface
            Published classVIF_metricsThe metrics associated with a virtual network device
            Published classVMA virtual machine (or 'guest').
            Published classVM_guest_metricsThe metrics reported by the guest (as opposed to inferred from outside)
            Published classVM_metricsThe metrics associated with a VM
            Published classconsoleA console
            Published classcrashdumpA VM crashdump
            Published classeventAsynchronous event registration and handling
            Published classhostA physical host
            Published classhost_cpuA physical CPU
            Published classhost_crashdumpRepresents a host crash dump
            Published classhost_metricsThe metrics associated with a host
            Published classhost_patchRepresents a patch stored on a server
            Published classnetworkA virtual network
            Published classpoolPool-wide information
            Published classsessionA session
            Published classtaskA long-running asynchronous task
            Published classuserA user of the system
            Published fieldBond.uuidUnique identifier/object reference
            Published fieldCluster.allowed_operationslist of the operations allowed in this state. This list is advisory only and the server state may have changed by the time this field is read by a client.
            Published fieldCluster.current_operationslinks each of the running tasks using this object (by reference) to a current_operation enum which describes the nature of the task.
            Published fieldCluster_host.allowed_operationslist of the operations allowed in this state. This list is advisory only and the server state may have changed by the time this field is read by a client.
            Published fieldCluster_host.current_operationslinks each of the running tasks using this object (by reference) to a current_operation enum which describes the nature of the task.
            Published fieldDR_task.introduced_SRsAll SRs introduced by this appliance
            Published fieldDR_task.uuidUnique identifier/object reference
            Published fieldFeature.name_descriptiona notes field containing human-readable description
            Published fieldFeature.name_labela human-readable name
            Published fieldLVHD.uuidUnique identifier/object reference
            Published fieldObserver.name_descriptiona notes field containing human-readable description
            Published fieldObserver.name_labela human-readable name
            Published fieldPBD.SRthe storage repository that the pbd realises
            Published fieldPBD.currently_attachedis the SR currently attached on this host?
            Published fieldPBD.device_configa config string to string map that is provided to the host's SR-backend-driver
            Published fieldPBD.hostphysical machine on which the pbd is available
            Published fieldPBD.uuidUnique identifier/object reference
            Published fieldPIF.MACethernet MAC address of physical interface
            Published fieldPIF.MTUMTU in octets
            Published fieldPIF.VLANVLAN tag for all traffic passing through this interface
            Published fieldPIF.devicemachine-readable name of the interface (e.g. eth0)
            Published fieldPIF.hostphysical machine to which this pif is connected
            Published fieldPIF.metricsmetrics associated with this PIF
            Published fieldPIF.networkvirtual network to which this pif is connected
            Published fieldPIF.uuidUnique identifier/object reference
            Published fieldPIF_metrics.carrierReport if the PIF got a carrier or not
            Published fieldPIF_metrics.device_idReport device ID
            Published fieldPIF_metrics.device_nameReport device name
            Published fieldPIF_metrics.duplexFull duplex capability of the link (if available)
            Published fieldPIF_metrics.io_read_kbsRead bandwidth (KiB/s)
            Published fieldPIF_metrics.io_write_kbsWrite bandwidth (KiB/s)
            Published fieldPIF_metrics.last_updatedTime at which this information was last updated
            Published fieldPIF_metrics.pci_bus_pathPCI bus path of the pif (if available)
            Published fieldPIF_metrics.speedSpeed of the link in Mbit/s (if available)
            Published fieldPIF_metrics.uuidUnique identifier/object reference
            Published fieldPIF_metrics.vendor_idReport vendor ID
            Published fieldPIF_metrics.vendor_nameReport vendor name
            Published fieldRepository.name_descriptiona notes field containing human-readable description
            Published fieldRepository.name_labela human-readable name
            Published fieldSM.configurationnames and descriptions of device config keys
            Published fieldSM.copyrightEntity which owns the copyright of this plugin
            Published fieldSM.name_descriptiona notes field containing human-readable description
            Published fieldSM.name_labela human-readable name
            Published fieldSM.required_api_versionMinimum SM API version required on the server
            Published fieldSM.typeSR.type
            Published fieldSM.uuidUnique identifier/object reference
            Published fieldSM.vendorVendor who created this plugin
            Published fieldSM.versionVersion of the plugin
            Published fieldSR.PBDsdescribes how particular hosts can see this storage repository
            Published fieldSR.VDIsall virtual disks known to this storage repository
            Published fieldSR.allowed_operationslist of the operations allowed in this state. This list is advisory only and the server state may have changed by the time this field is read by a client.
            Published fieldSR.content_typethe type of the SR's content, if required (e.g. ISOs)
            Published fieldSR.current_operationslinks each of the running tasks using this object (by reference) to a current_operation enum which describes the nature of the task.
            Published fieldSR.name_descriptiona notes field containing human-readable description
            Published fieldSR.name_labela human-readable name
            Published fieldSR.other_configadditional configuration
            Published fieldSR.physical_sizetotal physical size of the repository (in bytes)
            Published fieldSR.physical_utilisationphysical space currently utilised on this storage repository (in bytes). Note that for sparse disk formats, physical_utilisation may be less than virtual_allocation
            Published fieldSR.sharedtrue if this SR is (capable of being) shared between multiple hosts
            Published fieldSR.typetype of the storage repository
            Published fieldSR.uuidUnique identifier/object reference
            Published fieldSR.virtual_allocationsum of virtual_sizes of all VDIs in this storage repository (in bytes)
            Published fieldVBD.VDIthe virtual disk
            Published fieldVBD.VMthe virtual machine
            Published fieldVBD.allowed_operationslist of the operations allowed in this state. This list is advisory only and the server state may have changed by the time this field is read by a client.
            Published fieldVBD.bootabletrue if this VBD is bootable
            Published fieldVBD.current_operationslinks each of the running tasks using this object (by reference) to a current_operation enum which describes the nature of the task.
            Published fieldVBD.currently_attachedis the device currently attached (erased on reboot)
            Published fieldVBD.devicedevice seen by the guest e.g. hda1
            Published fieldVBD.emptyif true this represents an empty drive
            Published fieldVBD.metricsmetrics associated with this VBD
            Published fieldVBD.modethe mode the VBD should be mounted with
            Published fieldVBD.other_configadditional configuration
            Published fieldVBD.qos_algorithm_paramsparameters for chosen QoS algorithm
            Published fieldVBD.qos_algorithm_typeQoS algorithm to use
            Published fieldVBD.qos_supported_algorithmssupported QoS algorithms for this VBD
            Published fieldVBD.runtime_propertiesDevice runtime properties
            Published fieldVBD.status_codeerror/success code associated with last attach-operation (erased on reboot)
            Published fieldVBD.status_detailerror/success information associated with last attach-operation status (erased on reboot)
            Published fieldVBD.storage_locktrue if a storage level lock was acquired
            Published fieldVBD.typehow the VBD will appear to the guest (e.g. disk or CD)
            Published fieldVBD.userdeviceuser-friendly device name e.g. 0,1,2,etc.
            Published fieldVBD.uuidUnique identifier/object reference
            Published fieldVBD_metrics.io_read_kbsRead bandwidth (KiB/s)
            Published fieldVBD_metrics.io_write_kbsWrite bandwidth (KiB/s)
            Published fieldVBD_metrics.last_updatedTime at which this information was last updated
            Published fieldVBD_metrics.uuidUnique identifier/object reference
            Published fieldVDI.SRstorage repository in which the VDI resides
            Published fieldVDI.VBDslist of vbds that refer to this disk
            Published fieldVDI.allowed_operationslist of the operations allowed in this state. This list is advisory only and the server state may have changed by the time this field is read by a client.
            Published fieldVDI.crash_dumpslist of crash dumps that refer to this disk
            Published fieldVDI.current_operationslinks each of the running tasks using this object (by reference) to a current_operation enum which describes the nature of the task.
            Published fieldVDI.managed
            Published fieldVDI.missingtrue if SR scan operation reported this VDI as not present on disk
            Published fieldVDI.name_descriptiona notes field containing human-readable description
            Published fieldVDI.name_labela human-readable name
            Published fieldVDI.other_configadditional configuration
            Published fieldVDI.parentThis field is always null. Deprecated
            Published fieldVDI.physical_utilisationamount of physical space that the disk image is currently taking up on the storage repository (in bytes)
            Published fieldVDI.read_onlytrue if this disk may ONLY be mounted read-only
            Published fieldVDI.sharabletrue if this disk may be shared
            Published fieldVDI.storage_locktrue if this disk is locked at the storage level
            Published fieldVDI.typetype of the VDI
            Published fieldVDI.uuidUnique identifier/object reference
            Published fieldVDI.virtual_sizesize of disk as presented to the guest (in bytes). Note that, depending on storage backend type, requested size may not be respected exactly
            Published fieldVIF.MACethernet MAC address of virtual interface, as exposed to guest
            Published fieldVIF.MTUMTU in octets
            Published fieldVIF.VMvirtual machine to which this vif is connected
            Published fieldVIF.allowed_operationslist of the operations allowed in this state. This list is advisory only and the server state may have changed by the time this field is read by a client.
            Published fieldVIF.current_operationslinks each of the running tasks using this object (by reference) to a current_operation enum which describes the nature of the task.
            Published fieldVIF.currently_attachedis the device currently attached (erased on reboot)
            Published fieldVIF.deviceorder in which VIF backends are created by xapi
            Published fieldVIF.metricsmetrics associated with this VIF
            Published fieldVIF.networkvirtual network to which this vif is connected
            Published fieldVIF.other_configadditional configuration
            Published fieldVIF.qos_algorithm_paramsparameters for chosen QoS algorithm
            Published fieldVIF.qos_algorithm_typeQoS algorithm to use
            Published fieldVIF.qos_supported_algorithmssupported QoS algorithms for this VIF
            Published fieldVIF.runtime_propertiesDevice runtime properties
            Published fieldVIF.status_codeerror/success code associated with last attach-operation (erased on reboot)
            Published fieldVIF.status_detailerror/success information associated with last attach-operation status (erased on reboot)
            Published fieldVIF.uuidUnique identifier/object reference
            Published fieldVIF_metrics.io_read_kbsRead bandwidth (KiB/s)
            Published fieldVIF_metrics.io_write_kbsWrite bandwidth (KiB/s)
            Published fieldVIF_metrics.last_updatedTime at which this information was last updated
            Published fieldVIF_metrics.uuidUnique identifier/object reference
            Published fieldVLAN.uuidUnique identifier/object reference
            Published fieldVM.HVM_boot_paramsHVM boot params
            Published fieldVM.HVM_boot_policyHVM boot policy
            Published fieldVM.PCI_busPCI bus path for pass-through devices
            Published fieldVM.PV_argskernel command-line arguments
            Published fieldVM.PV_bootloadername of or path to bootloader
            Published fieldVM.PV_bootloader_argsmiscellaneous arguments for the bootloader
            Published fieldVM.PV_kernelpath to the kernel
            Published fieldVM.PV_legacy_argsto make Zurich guests boot
            Published fieldVM.PV_ramdiskpath to the initrd
            Published fieldVM.VBDsvirtual block devices
            Published fieldVM.VCPUs_at_startupBoot number of VCPUs
            Published fieldVM.VCPUs_maxMax number of VCPUs
            Published fieldVM.VCPUs_paramsconfiguration parameters for the selected VCPU policy
            Published fieldVM.VIFsvirtual network interfaces
            Published fieldVM.VTPMsvirtual TPMs
            Published fieldVM.VUSBsvirtual usb devices
            Published fieldVM.actions_after_crashaction to take if the guest crashes
            Published fieldVM.actions_after_rebootaction to take after the guest has rebooted itself
            Published fieldVM.actions_after_shutdownaction to take after the guest has shutdown itself
            Published fieldVM.affinityA host which the VM has some affinity for (or NULL). This is used as a hint to the start call when it decides where to run the VM. Resource constraints may cause the VM to be started elsewhere.
            Published fieldVM.allowed_operationslist of the operations allowed in this state. This list is advisory only and the server state may have changed by the time this field is read by a client.
            Published fieldVM.appliancethe appliance to which this VM belongs
            Published fieldVM.consolesvirtual console devices
            Published fieldVM.crash_dumpscrash dumps associated with this VM
            Published fieldVM.current_operationslinks each of the running tasks using this object (by reference) to a current_operation enum which describes the nature of the task.
            Published fieldVM.domarchDomain architecture (if available, null string otherwise)
            Published fieldVM.domiddomain ID (if available, -1 otherwise)
            Published fieldVM.guest_metricsmetrics associated with the running guest
            Published fieldVM.is_a_templatetrue if this is a template. Template VMs can never be started, they are used only for cloning other VMs
            Published fieldVM.is_control_domaintrue if this is a control domain (domain 0 or a driver domain)
            Published fieldVM.last_boot_CPU_flagsdescribes the CPU flags on which the VM was last booted
            Published fieldVM.memory_dynamic_maxDynamic maximum (bytes)
            Published fieldVM.memory_dynamic_minDynamic minimum (bytes)
            Published fieldVM.memory_overheadVirtualization memory overhead (bytes).
            Published fieldVM.memory_static_maxStatically-set (i.e. absolute) maximum (bytes). The value of this field at VM start time acts as a hard limit of the amount of memory a guest can use. New values only take effect on reboot.
            Published fieldVM.memory_static_minStatically-set (i.e. absolute) mininum (bytes). The value of this field indicates the least amount of memory this VM can boot with without crashing.
            Published fieldVM.memory_targetDynamically-set memory target (bytes). The value of this field indicates the current target for memory available to this VM.
            Published fieldVM.metricsmetrics associated with this VM
            Published fieldVM.name_descriptiona notes field containing human-readable description
            Published fieldVM.name_labela human-readable name
            Published fieldVM.other_configadditional configuration
            Published fieldVM.platformplatform-specific configuration
            Published fieldVM.power_stateCurrent power state of the machine
            Published fieldVM.recommendationsAn XML specification of recommended values and ranges for properties of this VM
            Published fieldVM.resident_onthe host the VM is currently resident on
            Published fieldVM.scheduled_to_be_resident_onthe host on which the VM is due to be started/resumed/migrated. This acts as a memory reservation indicator
            Published fieldVM.suspend_VDIThe VDI that a suspend image is stored on. (Only has meaning if VM is currently suspended)
            Published fieldVM.user_versionCreators of VMs and templates may store version information here.
            Published fieldVM.uuidUnique identifier/object reference
            Published fieldVMPP.name_descriptiona notes field containing human-readable description
            Published fieldVMPP.name_labela human-readable name
            Published fieldVMSS.VMsall VMs attached to this snapshot schedule
            Published fieldVMSS.enabledenable or disable this snapshot schedule
            Published fieldVMSS.frequencyfrequency of taking snapshot from snapshot schedule
            Published fieldVMSS.last_run_timetime of the last snapshot
            Published fieldVMSS.name_descriptiona notes field containing human-readable description
            Published fieldVMSS.name_labela human-readable name
            Published fieldVMSS.retained_snapshotsmaximum number of snapshots that should be stored at any time
            Published fieldVMSS.scheduleschedule of the snapshot containing 'hour', 'min', 'days'. Date/time-related information is in Local Timezone
            Published fieldVMSS.typetype of the snapshot schedule
            Published fieldVMSS.uuidUnique identifier/object reference
            Published fieldVM_appliance.VMsall VMs in this appliance
            Published fieldVM_appliance.allowed_operationslist of the operations allowed in this state. This list is advisory only and the server state may have changed by the time this field is read by a client.
            Published fieldVM_appliance.current_operationslinks each of the running tasks using this object (by reference) to a current_operation enum which describes the nature of the task.
            Published fieldVM_appliance.name_descriptiona notes field containing human-readable description
            Published fieldVM_appliance.name_labela human-readable name
            Published fieldVM_appliance.uuidUnique identifier/object reference
            Published fieldVM_guest_metrics.PV_drivers_up_to_datetrue if the PV drivers appear to be up to date
            Published fieldVM_guest_metrics.PV_drivers_versionversion of the PV drivers
            Published fieldVM_guest_metrics.disksDisk configuration/free space
            Published fieldVM_guest_metrics.last_updatedTime at which this information was last updated
            Published fieldVM_guest_metrics.memoryfree/used/total
            Published fieldVM_guest_metrics.networksnetwork configuration
            Published fieldVM_guest_metrics.os_versionversion of the OS
            Published fieldVM_guest_metrics.otheranything else
            Published fieldVM_guest_metrics.uuidUnique identifier/object reference
            Published fieldVM_metrics.VCPUs_CPUVCPU to PCPU map
            Published fieldVM_metrics.VCPUs_flagsCPU flags (blocked,online,running)
            Published fieldVM_metrics.VCPUs_numberCurrent number of VCPUs
            Published fieldVM_metrics.VCPUs_paramsThe live equivalent to VM.VCPUs_params
            Published fieldVM_metrics.VCPUs_utilisationUtilisation for all of guest's current VCPUs
            Published fieldVM_metrics.install_timeTime at which the VM was installed
            Published fieldVM_metrics.last_updatedTime at which this information was last updated
            Published fieldVM_metrics.memory_actualGuest's actual memory (bytes)
            Published fieldVM_metrics.start_timeTime at which this VM was last booted
            Published fieldVM_metrics.stateThe state of the guest, eg blocked, dying etc
            Published fieldVM_metrics.uuidUnique identifier/object reference
            Published fieldVTPM.VMThe virtual machine the TPM is attached to
            Published fieldVTPM.allowed_operationslist of the operations allowed in this state. This list is advisory only and the server state may have changed by the time this field is read by a client.
            Published fieldVTPM.backendThe domain where the backend is located (unused)
            Published fieldVTPM.current_operationslinks each of the running tasks using this object (by reference) to a current_operation enum which describes the nature of the task.
            Published fieldVTPM.uuidUnique identifier/object reference
            Published fieldVUSB.allowed_operationslist of the operations allowed in this state. This list is advisory only and the server state may have changed by the time this field is read by a client.
            Published fieldVUSB.current_operationslinks each of the running tasks using this object (by reference) to a current_operation enum which describes the nature of the task.
            Published fieldVUSB.currently_attachedis the device currently attached
            Published fieldblob.last_updatedTime at which the data in the blob was last updated
            Published fieldblob.mime_typeThe mime type associated with this object. Defaults to 'application/octet-stream' if the empty string is supplied
            Published fieldblob.name_descriptiona notes field containing human-readable description
            Published fieldblob.name_labela human-readable name
            Published fieldblob.sizeSize of the binary data, in bytes
            Published fieldblob.uuidUnique identifier/object reference
            Published fieldconsole.VMVM to which this console is attached
            Published fieldconsole.locationURI for the console service
            Published fieldconsole.other_configadditional configuration
            Published fieldconsole.protocolthe protocol used by this console
            Published fieldconsole.uuidUnique identifier/object reference
            Published fieldcrashdump.VDIthe virtual disk
            Published fieldcrashdump.VMthe virtual machine
            Published fieldcrashdump.uuidUnique identifier/object reference
            Published fielddata_source.enabledtrue if the data source is being logged
            Published fielddata_source.maxthe maximum value of the data source
            Published fielddata_source.minthe minimum value of the data source
            Published fielddata_source.name_descriptiona notes field containing human-readable description
            Published fielddata_source.name_labela human-readable name
            Published fielddata_source.standardtrue if the data source is enabled by default. Non-default data sources cannot be disabled
            Published fielddata_source.unitsthe units of the value
            Published fielddata_source.valuecurrent value of the data source
            Published fieldevent.classThe name of the class of the object that changed
            Published fieldevent.idAn ID, monotonically increasing, and local to the current session
            Published fieldevent.obj_uuidThe uuid of the object that changed
            Published fieldevent.operationThe operation that was performed
            Published fieldevent.refA reference to the object that changed
            Published fieldevent.timestampThe time at which the event occurred
            Published fieldhost.API_version_majormajor version number
            Published fieldhost.API_version_minorminor version number
            Published fieldhost.API_version_vendoridentification of vendor
            Published fieldhost.API_version_vendor_implementationdetails of vendor implementation
            Published fieldhost.PBDsphysical blockdevices
            Published fieldhost.PIFsphysical network interfaces
            Published fieldhost.addressThe address by which this host can be contacted from any other host in the pool
            Published fieldhost.allowed_operationslist of the operations allowed in this state. This list is advisory only and the server state may have changed by the time this field is read by a client.
            Published fieldhost.capabilitiesXen capabilities
            Published fieldhost.cpu_configurationThe CPU configuration on this host. May contain keys such as "nr_nodes", "sockets_per_node", "cores_per_socket", or "threads_per_core"
            Published fieldhost.crash_dump_srThe SR in which VDIs for crash dumps are created
            Published fieldhost.crashdumpsSet of host crash dumps
            Published fieldhost.current_operationslinks each of the running tasks using this object (by reference) to a current_operation enum which describes the nature of the task.
            Published fieldhost.enabledTrue if the host is currently enabled
            Published fieldhost.host_CPUsThe physical CPUs on this host
            Published fieldhost.hostnameThe hostname of this host
            Published fieldhost.license_paramsState of the current license
            Published fieldhost.logginglogging configuration
            Published fieldhost.memory_overheadVirtualization memory overhead (bytes).
            Published fieldhost.metricsmetrics associated with this host
            Published fieldhost.name_descriptiona notes field containing human-readable description
            Published fieldhost.name_labela human-readable name
            Published fieldhost.other_configadditional configuration
            Published fieldhost.patchesSet of host patches
            Published fieldhost.resident_VMslist of VMs currently resident on host
            Published fieldhost.sched_policyScheduler policy currently in force on this host
            Published fieldhost.software_versionversion strings
            Published fieldhost.supported_bootloadersa list of the bootloaders installed on the machine
            Published fieldhost.suspend_image_srThe SR in which VDIs for suspend images are created
            Published fieldhost.uuidUnique identifier/object reference
            Published fieldhost_cpu.familythe family (number) of the physical CPU
            Published fieldhost_cpu.featuresthe physical CPU feature bitmap
            Published fieldhost_cpu.flagsthe flags of the physical CPU (a decoded version of the features field)
            Published fieldhost_cpu.hostthe host the CPU is in
            Published fieldhost_cpu.modelthe model number of the physical CPU
            Published fieldhost_cpu.modelnamethe model name of the physical CPU
            Published fieldhost_cpu.numberthe number of the physical CPU within the host
            Published fieldhost_cpu.speedthe speed of the physical CPU
            Published fieldhost_cpu.steppingthe stepping of the physical CPU
            Published fieldhost_cpu.utilisationthe current CPU utilisation
            Published fieldhost_cpu.uuidUnique identifier/object reference
            Published fieldhost_cpu.vendorthe vendor of the physical CPU
            Published fieldhost_crashdump.hostHost the crashdump relates to
            Published fieldhost_crashdump.sizeSize of the crashdump
            Published fieldhost_crashdump.timestampTime the crash happened
            Published fieldhost_crashdump.uuidUnique identifier/object reference
            Published fieldhost_metrics.last_updatedTime at which this information was last updated
            Published fieldhost_metrics.livePool master thinks this host is live
            Published fieldhost_metrics.memory_freeFree host memory (bytes)
            Published fieldhost_metrics.memory_totalTotal host memory (bytes)
            Published fieldhost_metrics.uuidUnique identifier/object reference
            Published fieldhost_patch.appliedTrue if the patch has been applied
            Published fieldhost_patch.hostHost the patch relates to
            Published fieldhost_patch.name_descriptiona notes field containing human-readable description
            Published fieldhost_patch.name_labela human-readable name
            Published fieldhost_patch.sizeSize of the patch
            Published fieldhost_patch.timestamp_appliedTime the patch was applied
            Published fieldhost_patch.uuidUnique identifier/object reference
            Published fieldhost_patch.versionPatch version number
            Published fieldmessage.bodyThe body of the message
            Published fieldmessage.nameThe name of the message
            Published fieldmessage.obj_uuidThe uuid of the object this message is associated with
            Published fieldmessage.priorityThe message priority, 0 being low priority
            Published fieldmessage.timestampThe time at which the message was created
            Published fieldmessage.uuidUnique identifier/object reference
            Published fieldnetwork.PIFslist of connected pifs
            Published fieldnetwork.VIFslist of connected vifs
            Published fieldnetwork.allowed_operationslist of the operations allowed in this state. This list is advisory only and the server state may have changed by the time this field is read by a client.
            Published fieldnetwork.bridgename of the bridge corresponding to this network on the local host
            Published fieldnetwork.current_operationslinks each of the running tasks using this object (by reference) to a current_operation enum which describes the nature of the task.
            Published fieldnetwork.name_descriptiona notes field containing human-readable description
            Published fieldnetwork.name_labela human-readable name
            Published fieldnetwork.other_configadditional configuration
            Published fieldnetwork.uuidUnique identifier/object reference
            Published fieldnetwork_sriov.uuidUnique identifier/object reference
            Published fieldpool.allowed_operationslist of the operations allowed in this state. This list is advisory only and the server state may have changed by the time this field is read by a client.
            Published fieldpool.coordinator_biastrue if bias against pool master when scheduling vms is enabled, false otherwise
            Published fieldpool.crash_dump_SRThe SR in which VDIs for crash dumps are created
            Published fieldpool.current_operationslinks each of the running tasks using this object (by reference) to a current_operation enum which describes the nature of the task.
            Published fieldpool.default_SRDefault SR for VDIs
            Published fieldpool.masterThe host that is pool master
            Published fieldpool.name_descriptionDescription
            Published fieldpool.name_labelShort name
            Published fieldpool.other_configadditional configuration
            Published fieldpool.suspend_image_SRThe SR in which VDIs for suspend images are created
            Published fieldpool.uuidUnique identifier/object reference
            Published fieldpool_patch.name_descriptiona notes field containing human-readable description
            Published fieldpool_patch.name_labela human-readable name
            Published fieldpool_patch.uuidUnique identifier/object reference
            Published fieldpool_update.name_descriptiona notes field containing human-readable description
            Published fieldpool_update.name_labela human-readable name
            Published fieldpool_update.uuidUnique identifier/object reference
            Published fieldpool_update.vdiVDI the update was uploaded to
            Published fieldrole.uuidUnique identifier/object reference
            Published fieldsecret.other_configother_config
            Published fieldsecret.uuidUnique identifier/object reference
            Published fieldsecret.valuethe secret
            Published fieldsession.last_activeTimestamp for last time session was active
            Published fieldsession.poolTrue if this session relates to a intra-pool login, false otherwise
            Published fieldsession.this_hostCurrently connected host
            Published fieldsession.this_userCurrently connected user
            Published fieldsession.uuidUnique identifier/object reference
            Published fieldsubject.uuidUnique identifier/object reference
            Published fieldtask.allowed_operationslist of the operations allowed in this state. This list is advisory only and the server state may have changed by the time this field is read by a client.
            Published fieldtask.createdTime task was created
            Published fieldtask.current_operationslinks each of the running tasks using this object (by reference) to a current_operation enum which describes the nature of the task.
            Published fieldtask.error_infoif the task has failed, this field contains the set of associated error strings. Undefined otherwise.
            Published fieldtask.finishedTime task finished (i.e. succeeded or failed). If task-status is pending, then the value of this field has no meaning
            Published fieldtask.name_descriptiona notes field containing human-readable description
            Published fieldtask.name_labela human-readable name
            Published fieldtask.progressThis field contains the estimated fraction of the task which is complete. This field should not be used to determine whether the task is complete - for this the status field of the task should be used.
            Published fieldtask.resident_onthe host on which the task is running
            Published fieldtask.resultif the task has completed successfully, this field contains the result value (either Void or an object reference). Undefined otherwise.
            Published fieldtask.statuscurrent status of the task
            Published fieldtask.typeif the task has completed successfully, this field contains the type of the encoded result (i.e. name of the class whose reference is in the result field). Undefined otherwise.
            Published fieldtask.uuidUnique identifier/object reference
            Published fielduser.fullnamefull name
            Published fielduser.short_nameshort name (e.g. userid)
            Published fielduser.uuidUnique identifier/object reference
            Published messagePBD.plugActivate the specified PBD, causing the referenced SR to be attached and scanned
            Published messagePBD.unplugDeactivate the specified PBD, causing the referenced SR to be detached and nolonger scanned
            Published messagePIF.create_VLANCreate a VLAN interface from an existing physical interface
            Published messagePIF.destroyDestroy the PIF object (provided it is a VLAN interface)
            Published messageSR.createCreate a new Storage Repository and introduce it into the managed system, creating both SR record and PBD record to attach it to current host (with specified device_config parameters)
            Published messageSR.destroyDestroy specified SR, removing SR-record from database and remove SR from disk. (In order to affect this operation the appropriate device_config is read from the specified SR's PBD on current host)
            Published messageSR.forgetRemoving specified SR-record from database, without attempting to remove SR from disk
            Published messageSR.get_supported_typesReturn a set of all the SR types supported by the system
            Published messageSR.introduceIntroduce a new Storage Repository into the managed system
            Published messageSR.makeCreate a new Storage Repository on disk
            Published messageSR.scanRefreshes the list of VDIs associated with an SR
            Published messageSR.set_name_descriptionSet the name description of the SR
            Published messageSR.set_name_labelSet the name label of the SR
            Published messageSR.set_sharedSets the shared flag on the SR
            Published messageVBD.assert_attachableThrows an error if this VBD could not be attached to this VM if the VM were running. Intended for debugging.
            Published messageVBD.ejectRemove the media from the device and leave it empty
            Published messageVBD.insertInsert new media into the device
            Published messageVBD.plugHotplug the specified VBD, dynamically attaching it to the running VM
            Published messageVBD.set_modeSets the mode of the VBD. The power_state of the VM must be halted.
            Published messageVBD.unplugHot-unplug the specified VBD, dynamically unattaching it from the running VM
            Published messageVBD.unplug_forceForcibly unplug the specified VBD
            Published messageVDI.cloneTake an exact copy of the VDI and return a reference to the new disk. If any driver_params are specified then these are passed through to the storage-specific substrate driver that implements the clone operation. NB the clone lives in the same Storage Repository as its parent.
            Published messageVDI.copyCopies a VDI to an SR. There must be a host that can see both the source and destination SRs simultaneously
            Published messageVDI.forgetRemoves a VDI record from the database
            Published messageVDI.resizeResize the VDI.
            Published messageVDI.resize_onlineResize the VDI which may or may not be attached to running guests.
            Published messageVDI.set_name_descriptionSet the name description of the VDI. This can only happen when its SR is currently attached.
            Published messageVDI.set_name_labelSet the name label of the VDI. This can only happen when then its SR is currently attached.
            Published messageVDI.set_read_onlySets the VDI's read_only field
            Published messageVDI.snapshotTake a read-only snapshot of the VDI, returning a reference to the snapshot. If any driver_params are specified then these are passed through to the storage-specific substrate driver that takes the snapshot. NB the snapshot lives in the same Storage Repository as its parent.
            Published messageVIF.plugHotplug the specified VIF, dynamically attaching it to the running VM
            Published messageVIF.unplugHot-unplug the specified VIF, dynamically unattaching it from the running VM
            Published messageVM.add_to_VCPUs_params_liveAdd the given key-value pair to VM.VCPUs_params, and apply that value on the running VM
            Published messageVM.assert_can_boot_hereReturns an error if the VM could not boot on this host for some reason
            Published messageVM.assert_operation_validCheck to see whether this operation is acceptable in the current state of the system, raising an error if the operation is invalid for some reason
            Published messageVM.clean_rebootAttempt to cleanly shutdown the specified VM (Note: this may not be supported---e.g. if a guest agent is not installed). This can only be called when the specified VM is in the Running state.
            Published messageVM.clean_shutdownAttempt to cleanly shutdown the specified VM. (Note: this may not be supported---e.g. if a guest agent is not installed). This can only be called when the specified VM is in the Running state.
            Published messageVM.cloneClones the specified VM, making a new VM. Clone automatically exploits the capabilities of the underlying storage repository in which the VM's disk images are stored (e.g. Copy on Write). This function can only be called when the VM is in the Halted State.
            Published messageVM.copyCopies a VM to an SR. There must be a host that can see both the source and destination SRs simultaneously
            Published messageVM.get_allowed_VBD_devicesReturns a list of the allowed values that a VBD device field can take
            Published messageVM.get_allowed_VIF_devicesReturns a list of the allowed values that a VIF device field can take
            Published messageVM.get_boot_recordReturns a record describing the VM's dynamic state, initialised when the VM boots and updated to reflect runtime configuration changes e.g. CPU hotplug
            Published messageVM.get_possible_hostsReturn the list of hosts on which this VM may run.
            Published messageVM.hard_rebootStop executing the specified VM without attempting a clean shutdown and immediately restart the VM.
            Published messageVM.hard_shutdownStop executing the specified VM without attempting a clean shutdown.
            Published messageVM.pausePause the specified VM. This can only be called when the specified VM is in the Running state.
            Published messageVM.pool_migrateMigrate a VM to another Host.
            Published messageVM.power_state_resetReset the power-state of the VM to halted in the database only. (Used to recover from slave failures in pooling scenarios by resetting the power-states of VMs running on dead slaves to halted.) This is a potentially dangerous operation; use with care.
            Published messageVM.provisionInspects the disk configuration contained within the VM's other_config, creates VDIs and VBDs and then executes any applicable post-install script.
            Published messageVM.resumeAwaken the specified VM and resume it. This can only be called when the specified VM is in the Suspended state.
            Published messageVM.resume_onAwaken the specified VM and resume it on a particular Host. This can only be called when the specified VM is in the Suspended state.
            Published messageVM.send_sysrqSend the given key as a sysrq to this VM. The key is specified as a single character (a String of length 1). This can only be called when the specified VM is in the Running state.
            Published messageVM.send_triggerSend the named trigger to this VM. This can only be called when the specified VM is in the Running state.
            Published messageVM.set_HVM_boot_policySet the VM.HVM_boot_policy field of the given VM, which will take effect when it is next started
            Published messageVM.set_VCPUs_number_liveSet the number of VCPUs for a running VM
            Published messageVM.set_actions_after_crashSets the actions_after_crash parameter
            Published messageVM.set_memory_target_liveSet the memory target for a running VM
            Published messageVM.set_shadow_multiplier_liveSet the shadow memory multiplier on a running VM
            Published messageVM.startStart the specified VM. This function can only be called with the VM is in the Halted State.
            Published messageVM.start_onStart the specified VM on a particular host. This function can only be called with the VM is in the Halted State.
            Published messageVM.suspendSuspend the specified VM to disk. This can only be called when the specified VM is in the Running state.
            Published messageVM.unpauseResume the specified VM. This can only be called when the specified VM is in the Paused state.
            Published messageVM.update_allowed_operationsRecomputes the list of acceptable operations
            Published messagecrashdump.destroyDestroy the specified crashdump
            Published messageevent.get_current_idReturn the ID of the next event to be generated by the system
            Published messageevent.nextBlocking call which returns a (possibly empty) batch of events. This method is only recommended for legacy use. New development should use event.from which supersedes this method.
            Published messageevent.registerRegisters this session with the event system for a set of given classes. This method is only recommended for legacy use in conjunction with event.next.
            Published messageevent.unregisterRemoves this session's registration with the event system for a set of given classes. This method is only recommended for legacy use in conjunction with event.next.
            Published messagehost.bugreport_uploadRun xen-bugtool --yestoall and upload the output to support
            Published messagehost.destroyDestroy specified host record in database
            Published messagehost.disablePuts the host into a state in which no new VMs can be started. Currently active VMs on the host continue to execute.
            Published messagehost.dmesgGet the host xen dmesg.
            Published messagehost.dmesg_clearGet the host xen dmesg, and clear the buffer.
            Published messagehost.enablePuts the host into a state in which new VMs can be started.
            Published messagehost.get_logGet the host's log file
            Published messagehost.license_applyApply a new license to a host
            Published messagehost.list_methodsList all supported methods
            Published messagehost.rebootReboot the host. (This function can only be called if there are no currently running VMs on the host and it is disabled.)
            Published messagehost.restart_agentRestarts the agent after a 10 second pause. WARNING: this is a dangerous operation. Any operations in progress will be aborted, and unrecoverable data loss may occur. The caller is responsible for ensuring that there are no operations in progress when this method is called.
            Published messagehost.send_debug_keysInject the given string as debugging keys into Xen
            Published messagehost.shutdownShutdown the host. (This function can only be called if there are no currently running VMs on the host and it is disabled.)
            Published messagehost_crashdump.destroyDestroy specified host crash dump, removing it from the disk.
            Published messagehost_crashdump.uploadUpload the specified host crash dump to a specified URL
            Published messagehost_patch.applyApply the selected patch and return its output
            Published messagehost_patch.destroyDestroy the specified host patch, removing it from the disk. This does NOT reverse the patch
            Published messagepool.create_VLANCreate PIFs, mapping a network to the same physical interface/VLAN on each host. This call is deprecated: use Pool.create_VLAN_from_PIF instead.
            Published messagepool.create_VLAN_from_PIFCreate a pool-wide VLAN by taking the PIF.
            Published messagepool.ejectInstruct a pool master to eject a host from the pool
            Published messagepool.emergency_reset_masterInstruct a slave already in a pool that the master has changed
            Published messagepool.emergency_transition_to_masterInstruct host that's currently a slave to transition to being master
            Published messagepool.joinInstruct host to join a new pool
            Published messagepool.join_forceInstruct host to join a new pool
            Published messagepool.recover_slavesInstruct a pool master, M, to try and contact its slaves and, if slaves are in emergency mode, reset their master address to M.
            Published messagepool.sync_databaseForcibly synchronise the database now
            Published messagesession.change_passwordChange the account password; if your session is authenticated with root privileges then the old_pwd is validated and the new_pwd is set regardless
            Published messagesession.login_with_passwordAttempt to authenticate the user, returning a session reference if successful
            Published messagesession.logoutLog out of a session
            Published messagetask.cancelRequest that a task be cancelled. Note that a task may fail to be cancelled and may complete or fail normally and note that, even when a task does cancel, it might take an arbitrary amount of time.
            Published messagetask.createCreate a new task object which must be manually destroyed.
            Published messagetask.destroyDestroy the task object
            \ No newline at end of file diff --git a/new-docs/xen-api/releases/stockholm/index.html b/new-docs/xen-api/releases/stockholm/index.html index e823d7ff1..c804af487 100644 --- a/new-docs/xen-api/releases/stockholm/index.html +++ b/new-docs/xen-api/releases/stockholm/index.html @@ -1,10 +1,10 @@ Citrix Hypervisor 8.2 :: XAPI Toolstack Developer Documentation -

            Citrix Hypervisor 8.2

            Code name: "stockholm".

            Changes

            ChangeElementDescription
            Published classCertificateAn X509 certificate used for TLS connections
            Published fieldCertificate.fingerprintThe certificate's SHA256 fingerprint / hash
            Published fieldCertificate.hostThe host where the certificate is installed
            Published fieldCertificate.not_afterDate before which the certificate is valid
            Published fieldCertificate.not_beforeDate after which the certificate is valid
            Published fieldCertificate.uuidUnique identifier/object reference
            Published fieldPUSB.speedUSB device speed
            Published fieldhost.certificatesList of certificates installed in the host
            Published fieldhost.editionsList of all available product editions
            Published messagehost.emergency_reset_server_certificateDelete the current TLS server certificate and replace by a new, self-signed one. This should only be used with extreme care.
            Published messagehost.install_server_certificateInstall the TLS server certificate.
            Published messagetask.set_progressSet the task progress
            Changed messagehost.set_power_on_modeRemoved iLO script
            Changed messagehost.set_ssl_legacyLegacy SSL no longer supported
            Deprecated fieldhost.ssl_legacyLegacy SSL no longer supported
            Deprecated messagepool.disable_ssl_legacyLegacy SSL no longer supported
            Removed messagepool.enable_ssl_legacyLegacy SSL no longer supported
            \ No newline at end of file diff --git a/new-docs/xen-api/releases/stockholm_psr/index.html b/new-docs/xen-api/releases/stockholm_psr/index.html index 07a7f573a..45bffd6a1 100644 --- a/new-docs/xen-api/releases/stockholm_psr/index.html +++ b/new-docs/xen-api/releases/stockholm_psr/index.html @@ -1,10 +1,10 @@ Citrix Hypervisor 8.2 Hotfix 2 :: XAPI Toolstack Developer Documentation -

            Citrix Hypervisor 8.2 Hotfix 2

            Code name: "stockholm_psr".

            Changes

            ChangeElementDescription
            Published fieldpool.is_psr_pendingTrue if either a PSR is running or we are waiting for a PSR to be re-run
            Published messagepool.rotate_secret
            \ No newline at end of file diff --git a/new-docs/xen-api/releases/symc/index.html b/new-docs/xen-api/releases/symc/index.html index ab727c978..51328e3d8 100644 --- a/new-docs/xen-api/releases/symc/index.html +++ b/new-docs/xen-api/releases/symc/index.html @@ -1,10 +1,10 @@ XenServer 4.1.1 :: XAPI Toolstack Developer Documentation -

            XenServer 4.1.1

            Code name: "symc".

            Changes

            ChangeElementDescription
            Published messageSR.updateRefresh the fields on the SR object
            Published messageVDI.updateAsk the storage backend to refresh the fields in the VDI object
            \ No newline at end of file diff --git a/new-docs/xen-api/releases/tampa/index.html b/new-docs/xen-api/releases/tampa/index.html index a9f7c104e..c43e182c4 100644 --- a/new-docs/xen-api/releases/tampa/index.html +++ b/new-docs/xen-api/releases/tampa/index.html @@ -1,10 +1,10 @@ XenServer 6.1 :: XAPI Toolstack Developer Documentation -

            XenServer 6.1

            Code name: "tampa".

            Changes

            ChangeElementDescription
            Published fieldBond.links_upNumber of links up in this bond
            Published fieldBond.propertiesAdditional configuration properties specific to the bond mode.
            Published fieldPIF.IPv6IPv6 address
            Published fieldPIF.ipv6_configuration_modeSets if and how this interface gets an IPv6 address
            Published fieldPIF.ipv6_gatewayIPv6 gateway
            Published fieldPIF.primary_address_typeWhich protocol should define the primary address of this interface
            Published fieldVIF.ipv4_allowedA list of IPv4 addresses which can be used to filter traffic passing through this VIF
            Published fieldVIF.ipv6_allowedA list of IPv6 addresses which can be used to filter traffic passing through this VIF
            Published fieldVIF.locking_modecurrent locking mode of the VIF
            Published fieldblob.publicTrue if the blob is publicly accessible
            Published fieldhost.guest_VCPUs_paramsVCPUs params to apply to all resident guests
            Published fieldnetwork.default_locking_modeThe network will use this value to determine the behaviour of all VIFs where locking_mode = default
            Published messageBond.set_propertySet the value of a property of the bond
            Published messagePIF.reconfigure_ipv6Reconfigure the IPv6 address settings for this interface
            Published messagePIF.set_primary_address_typeChange the primary address type used by this PIF
            Published messageVDI.pool_migrateMigrate a VDI, which may be attached to a running guest, to a different SR. The destination SR must be visible to the guest.
            Published messageVIF.add_ipv4_allowedAssociates an IPv4 address with this VIF
            Published messageVIF.add_ipv6_allowedAssociates an IPv6 address with this VIF
            Published messageVIF.remove_ipv4_allowedRemoves an IPv4 address from this VIF
            Published messageVIF.remove_ipv6_allowedRemoves an IPv6 address from this VIF
            Published messageVIF.set_ipv4_allowedSet the IPv4 addresses to which traffic on this VIF can be restricted
            Published messageVIF.set_ipv6_allowedSet the IPv6 addresses to which traffic on this VIF can be restricted
            Published messageVIF.set_locking_modeSet the locking mode for this VIF
            Published messageVM.assert_can_migrateAssert whether a VM can be migrated to the specified destination.
            Published messageVM.import_convertImport using a conversion service.
            Published messageVM.migrate_sendMigrate the VM to another host. This can only be called when the specified VM is in the Running state.
            Published messageVM.query_servicesQuery the system services advertised by this VM and register them. This can only be applied to a system domain.
            Published messageevent.injectInjects an artificial event on the given object and returns the corresponding ID in the form of a token, which can be used as a point of reference for database events. For example, to check whether an object has reached the right state before attempting an operation, one can inject an artificial event on the object and wait until the token returned by consecutive event.from calls is lexicographically greater than the one returned by event.inject.
            Published messagehost.get_management_interfaceReturns the management interface for the specified host
            Published messagehost.migrate_receivePrepare to receive a VM, returning a token which can be passed to VM.migrate.
            Published messagenetwork.set_default_locking_modeSet the default locking mode for VIFs attached to this network
            Published messagepool_patch.clean_on_hostRemoves the patch's files from the specified host
            Published messagepool_patch.pool_cleanRemoves the patch's files from all hosts in the pool, but does not remove the database entries
            Deprecated messageVM.get_cooperative
            Deprecated messagehost.get_uncooperative_resident_VMs
            Removed classVBD_metricsDisabled in favour of RRD
            Removed classVIF_metricsDisabled in favour of RRDs
            Removed fieldPIF_metrics.io_read_kbsDisabled and replaced by RRDs
            Removed fieldPIF_metrics.io_write_kbsDisabled and replaced by RRDs
            Removed fieldVBD.metricsDisabled in favour of RRDs
            Removed fieldVBD_metrics.io_read_kbsDisabled and replaced by RRDs
            Removed fieldVBD_metrics.io_write_kbsDisabled and replaced by RRDs
            Removed fieldVBD_metrics.last_updatedDisabled in favour of RRD
            Removed fieldVBD_metrics.other_configDisabled in favour of RRD
            Removed fieldVIF.metricsDisabled in favour of RRDs
            Removed fieldVIF_metrics.io_read_kbsDisabled and replaced by RRDs
            Removed fieldVIF_metrics.io_write_kbsDisabled and replaced by RRDs
            Removed fieldVM_metrics.VCPUs_utilisationDisabled in favour of RRDs
            Removed fieldhost_metrics.memory_freeDisabled in favour of RRD
            \ No newline at end of file diff --git a/new-docs/xen-api/releases/vgpu-productisation/index.html b/new-docs/xen-api/releases/vgpu-productisation/index.html index 9924ea5d2..d77697390 100644 --- a/new-docs/xen-api/releases/vgpu-productisation/index.html +++ b/new-docs/xen-api/releases/vgpu-productisation/index.html @@ -1,10 +1,10 @@ XenServer 6.2 SP1 :: XAPI Toolstack Developer Documentation -

            XenServer 6.2 SP1

            Code name: "vgpu-productisation".

            Changes

            ChangeElementDescription
            Published fieldGPU_group.enabled_VGPU_typesvGPU types supported on at least one of the pGPUs in this group
            Published fieldGPU_group.supported_VGPU_typesvGPU types supported on at least one of the pGPUs in this group
            Published fieldPGPU.supported_VGPU_max_capacitiesA map relating each VGPU type supported on this GPU to the maximum number of VGPUs of that type which can run simultaneously on this GPU
            Published fieldPIF.managedIndicates whether the interface is managed by xapi. If it is not, then xapi will not configure the interface, the commands PIF.plug/unplug/reconfigure_ip(v6) cannot be used, nor can the interface be bonded or have VLANs based on top through xapi.
            Published fieldVGPU_type.enabled_on_GPU_groupsList of GPU groups in which at least one have this VGPU type enabled
            Published fieldVGPU_type.max_resolution_xMaximum resolution (width) supported by the VGPU type
            Published fieldVGPU_type.max_resolution_yMaximum resolution (height) supported by the VGPU type
            Published fieldVGPU_type.supported_on_GPU_groupsList of GPU groups in which at least one PGPU supports this VGPU type
            \ No newline at end of file diff --git a/new-docs/xen-api/releases/vgpu-tech-preview/index.html b/new-docs/xen-api/releases/vgpu-tech-preview/index.html index c0ebdadb6..e895bb099 100644 --- a/new-docs/xen-api/releases/vgpu-tech-preview/index.html +++ b/new-docs/xen-api/releases/vgpu-tech-preview/index.html @@ -1,10 +1,10 @@ XenServer 6.2 SP1 Tech-Preview :: XAPI Toolstack Developer Documentation -

            XenServer 6.2 SP1 Tech-Preview

            Code name: "vgpu-tech-preview".

            Changes

            ChangeElementDescription
            Published classVGPU_typeA type of virtual GPU
            Published fieldGPU_group.allocation_algorithmCurrent allocation of vGPUs to pGPUs for this group
            Published fieldPGPU.enabled_VGPU_typesList of VGPU types which have been enabled for this PGPU
            Published fieldPGPU.resident_VGPUsList of VGPUs running on this PGPU
            Published fieldPGPU.supported_VGPU_typesList of VGPU types supported by the underlying hardware
            Published fieldVGPU.resident_onThe PGPU on which this VGPU is running
            Published fieldVGPU.typePreset type for this VGPU
            Published fieldVGPU_type.VGPUsList of VGPUs of this type
            Published fieldVGPU_type.enabled_on_PGPUsList of PGPUs that have this VGPU type enabled
            Published fieldVGPU_type.framebuffer_sizeFramebuffer size of the VGPU type, in bytes
            Published fieldVGPU_type.max_headsMaximum number of displays supported by the VGPU type
            Published fieldVGPU_type.model_nameModel name associated with the VGPU type
            Published fieldVGPU_type.supported_on_PGPUsList of PGPUs that support this VGPU type
            Published fieldVGPU_type.uuidUnique identifier/object reference
            Published fieldVGPU_type.vendor_nameName of VGPU vendor
            Published messageGPU_group.get_remaining_capacity
            Published messagePGPU.add_enabled_VGPU_types
            Published messagePGPU.get_remaining_capacity
            Published messagePGPU.remove_enabled_VGPU_types
            Published messagePGPU.set_GPU_group
            Published messagePGPU.set_enabled_VGPU_types
            \ No newline at end of file diff --git a/new-docs/xen-api/topics/consoles/index.html b/new-docs/xen-api/topics/consoles/index.html index 3350de3dd..294c772f4 100644 --- a/new-docs/xen-api/topics/consoles/index.html +++ b/new-docs/xen-api/topics/consoles/index.html @@ -1,5 +1,5 @@ VM consoles :: XAPI Toolstack Developer Documentation -

            VM consoles

            Most XenAPI graphical interfaces will want to gain access to the VM consoles, in order to render them to the user as if they were physical machines. There are several types of consoles available, depending on the type of guest or if the physical host console is being accessed:

            Types of consoles

            Operating SystemTextGraphicalOptimized graphical
            WindowsNoVNC, using an API callRDP, directly from guest
            LinuxYes, through VNC and an API callNoVNC, directly from guest
            Physical HostYes, through VNC and an API callNoNo

            Hardware-assisted VMs, such as Windows, directly provide a graphical console over VNC. There is no text-based console, and guest networking is not necessary to use the graphical console. Once guest networking has been established, it is more efficient to setup Remote Desktop Access and use an RDP client to connect directly (this must be done outside of the XenAPI).

            Paravirtual VMs, such as Linux guests, provide a native text console directly. XenServer provides a utility (called vncterm) to convert this text-based console into a graphical VNC representation. Guest networking is not necessary for this console to function. As with Windows above, Linux distributions often configure VNC within the guest, and directly connect to it over a guest network interface.

            The physical host console is only available as a vt100 console, which is exposed through the XenAPI as a VNC console by using vncterm in the control domain.

            RFB (Remote Framebuffer) is the protocol which underlies VNC, specified in The RFB Protocol. Third-party developers are expected to provide their own VNC viewers, and many freely available implementations can be adapted for this purpose. RFB 3.3 is the minimum version which viewers must support.

            Retrieving VNC consoles using the API

            VNC consoles are retrieved using a special URL passed through to the host agent. The sequence of API calls is as follows:

            1. Client to Master/443: XML-RPC: Session.login_with_password().

            2. Master/443 to Client: Returns a session reference to be used with subsequent calls.

            3. Client to Master/443: XML-RPC: VM.get_by_name_label().

            4. Master/443 to Client: Returns a reference to a particular VM (or the “control domain” if you want to retrieve the physical host console).

            5. Client to Master/443: XML-RPC: VM.get_consoles().

            6. Master/443 to Client: Returns a list of console objects associated with the VM.

            7. Client to Master/443: XML-RPC: VM.get_location().

            8. Returns a URI describing where the requested console is located. The URIs are of the form: https://192.168.0.1/console?ref=OpaqueRef:c038533a-af99-a0ff-9095-c1159f2dc6a0.

            9. Client to 192.168.0.1: HTTP CONNECT “/console?ref=(…)”

            The final HTTP CONNECT is slightly non-standard since the HTTP/1.1 RFC specifies that it should only be a host and a port, rather than a URL. Once the HTTP connect is complete, the connection can subsequently directly be used as a VNC server without any further HTTP protocol action.

            This scheme requires direct access from the client to the control domain’s IP, and will not work correctly if there are Network Address Translation (NAT) devices blocking such connectivity. You can use the CLI to retrieve the console URI from the client and perform a connectivity check.

            Retrieve the VM UUID by running:

            $ VM=$(xe vm-list params=uuid --minimal name-label=<name>)

            Retrieve the console information:

            $ xe console-list vm-uuid=$VM
            +

            VM consoles

            Most XenAPI graphical interfaces will want to gain access to the VM consoles, in order to render them to the user as if they were physical machines. There are several types of consoles available, depending on the type of guest or if the physical host console is being accessed:

            Types of consoles

            Operating SystemTextGraphicalOptimized graphical
            WindowsNoVNC, using an API callRDP, directly from guest
            LinuxYes, through VNC and an API callNoVNC, directly from guest
            Physical HostYes, through VNC and an API callNoNo

            Hardware-assisted VMs, such as Windows, directly provide a graphical console over VNC. There is no text-based console, and guest networking is not necessary to use the graphical console. Once guest networking has been established, it is more efficient to setup Remote Desktop Access and use an RDP client to connect directly (this must be done outside of the XenAPI).

            Paravirtual VMs, such as Linux guests, provide a native text console directly. XenServer provides a utility (called vncterm) to convert this text-based console into a graphical VNC representation. Guest networking is not necessary for this console to function. As with Windows above, Linux distributions often configure VNC within the guest, and directly connect to it over a guest network interface.

            The physical host console is only available as a vt100 console, which is exposed through the XenAPI as a VNC console by using vncterm in the control domain.

            RFB (Remote Framebuffer) is the protocol which underlies VNC, specified in The RFB Protocol. Third-party developers are expected to provide their own VNC viewers, and many freely available implementations can be adapted for this purpose. RFB 3.3 is the minimum version which viewers must support.

            Retrieving VNC consoles using the API

            VNC consoles are retrieved using a special URL passed through to the host agent. The sequence of API calls is as follows:

            1. Client to Master/443: XML-RPC: Session.login_with_password().

            2. Master/443 to Client: Returns a session reference to be used with subsequent calls.

            3. Client to Master/443: XML-RPC: VM.get_by_name_label().

            4. Master/443 to Client: Returns a reference to a particular VM (or the “control domain” if you want to retrieve the physical host console).

            5. Client to Master/443: XML-RPC: VM.get_consoles().

            6. Master/443 to Client: Returns a list of console objects associated with the VM.

            7. Client to Master/443: XML-RPC: VM.get_location().

            8. Returns a URI describing where the requested console is located. The URIs are of the form: https://192.168.0.1/console?ref=OpaqueRef:c038533a-af99-a0ff-9095-c1159f2dc6a0.

            9. Client to 192.168.0.1: HTTP CONNECT “/console?ref=(…)”

            The final HTTP CONNECT is slightly non-standard since the HTTP/1.1 RFC specifies that it should only be a host and a port, rather than a URL. Once the HTTP connect is complete, the connection can subsequently directly be used as a VNC server without any further HTTP protocol action.

            This scheme requires direct access from the client to the control domain’s IP, and will not work correctly if there are Network Address Translation (NAT) devices blocking such connectivity. You can use the CLI to retrieve the console URI from the client and perform a connectivity check.

            Retrieve the VM UUID by running:

            $ VM=$(xe vm-list params=uuid --minimal name-label=<name>)

            Retrieve the console information:

            $ xe console-list vm-uuid=$VM
             uuid ( RO)             : 8013b937-ff7e-60d1-ecd8-e52d66c5879e
                       vm-uuid ( RO): 2d7c558a-8f03-b1d0-e813-cbe7adfa534c
                 vm-name-label ( RO): 6
            @@ -7,9 +7,9 @@
                      location ( RO): https://10.80.228.30/console?uuid=8013b937-ff7e-60d1-ecd8-e52d66c5879e

            Use command-line utilities like ping to test connectivity to the IP address provided in the location field.

            Disabling VNC forwarding for Linux VM

            When creating and destroying Linux VMs, the host agent automatically manages the vncterm processes which convert the text console into VNC. Advanced users who wish to directly access the text console can disable VNC forwarding for that VM. The text console can then only be accessed directly from the control domain directly, and graphical interfaces such as XenCenter will not be able to render a console for that VM.

            Before starting the guest, set the following parameter on the VM record:

            $ xe vm-param-set uuid=$VM other-config:disable_pv_vnc=1

            Start the VM.

            Use the CLI to retrieve the underlying domain ID of the VM with:

            $ DOMID=$(xe vm-list params=dom-id uuid=$VM --minimal)

            On the host console, connect to the text console directly by:

            $ /usr/lib/xen/bin/xenconsole $DOMID

            This configuration is an advanced procedure, and we do not recommend that the text console is directly used for heavy I/O operations. Instead, connect to the guest over SSH or some other network-based connection mechanism.

            \ No newline at end of file + 
            \ No newline at end of file diff --git a/new-docs/xen-api/topics/guest-agents/index.html b/new-docs/xen-api/topics/guest-agents/index.html index 7eee7a480..72eebc799 100644 --- a/new-docs/xen-api/topics/guest-agents/index.html +++ b/new-docs/xen-api/topics/guest-agents/index.html @@ -1,13 +1,13 @@ Guest agents :: XAPI Toolstack Developer Documentation -
            \ No newline at end of file diff --git a/new-docs/xen-api/topics/importexport/index.html b/new-docs/xen-api/topics/importexport/index.html index eafcc0f0d..8824cdd43 100644 --- a/new-docs/xen-api/topics/importexport/index.html +++ b/new-docs/xen-api/topics/importexport/index.html @@ -1,5 +1,5 @@ VM import/export :: XAPI Toolstack Developer Documentation -

            VM import/export

            VMs can be exported to a file and later imported to any Xapi host. The export +

            VM import/export

            VMs can be exported to a file and later imported to any Xapi host. The export protocol is a simple HTTP(S) GET, which should be sent to the Pool master. Authorization is either via a pre-created session_id or by HTTP basic authentication (particularly useful on the command-line). @@ -43,9 +43,9 @@

            \ No newline at end of file + 
            \ No newline at end of file diff --git a/new-docs/xen-api/topics/index.html b/new-docs/xen-api/topics/index.html index 716daa29e..bd5e2bd72 100644 --- a/new-docs/xen-api/topics/index.html +++ b/new-docs/xen-api/topics/index.html @@ -1,10 +1,10 @@ Topics :: XAPI Toolstack Developer Documentation - \ No newline at end of file diff --git a/new-docs/xen-api/topics/index.print.html b/new-docs/xen-api/topics/index.print.html index 83c6d2407..f9a1ccdab 100644 --- a/new-docs/xen-api/topics/index.print.html +++ b/new-docs/xen-api/topics/index.print.html @@ -1,5 +1,5 @@ Topics :: XAPI Toolstack Developer Documentation -

            Subsections of Topics

            API for configuring the udhcp server in Dom0

            This API allows you to configure the DHCP service running on the Host +

            Subsections of Topics

            API for configuring the udhcp server in Dom0

            This API allows you to configure the DHCP service running on the Host Internal Management Network (HIMN). The API configures a udhcp daemon residing in Dom0 and alters the service configuration for any VM using the network.

            It should be noted that for this reason, that callers who modify the @@ -317,4 +317,4 @@ running-- clean shutdown\n hard shutdown -->halted running-- pause -->paused halted-- destroy -->destroyed

            The figure above shows the states that a VM can be in and the -API calls that can be used to move the VM between these states.

              XenCenter

              XenCenter uses some conventions on top of the XenAPI:

              Internationalization for SR names

              The SRs created at install time now have an other_config key indicating how their names may be internationalized.

              other_config["i18n-key"] may be one of

              • local-hotplug-cd

              • local-hotplug-disk

              • local-storage

              • xenserver-tools

              Additionally, other_config["i18n-original-value-<field name>"] gives the value of that field when the SR was created. If XenCenter sees a record where SR.name_label equals other_config["i18n-original-value-name_label"] (that is, the record has not changed since it was created during XenServer installation), then internationalization will be applied. In other words, XenCenter will disregard the current contents of that field, and instead use a value appropriate to the user’s own language.

              If you change SR.name_label for your own purpose, then it no longer is the same as other_config["i18n-original-value-name_label"]. Therefore, XenCenter does not apply internationalization, and instead preserves your given name.

              Hiding objects from XenCenter

              Networks, PIFs, and VMs can be hidden from XenCenter by adding the key HideFromXenCenter=true to the other_config parameter for the object. This capability is intended for ISVs who know what they are doing, not general use by everyday users. For example, you might want to hide certain VMs because they are cloned VMs that shouldn’t be used directly by general users in your environment.

              In XenCenter, hidden Networks, PIFs, and VMs can be made visible, using the View menu.

              \ No newline at end of file +API calls that can be used to move the VM between these states.

                XenCenter

                XenCenter uses some conventions on top of the XenAPI:

                Internationalization for SR names

                The SRs created at install time now have an other_config key indicating how their names may be internationalized.

                other_config["i18n-key"] may be one of

                • local-hotplug-cd

                • local-hotplug-disk

                • local-storage

                • xenserver-tools

                Additionally, other_config["i18n-original-value-<field name>"] gives the value of that field when the SR was created. If XenCenter sees a record where SR.name_label equals other_config["i18n-original-value-name_label"] (that is, the record has not changed since it was created during XenServer installation), then internationalization will be applied. In other words, XenCenter will disregard the current contents of that field, and instead use a value appropriate to the user’s own language.

                If you change SR.name_label for your own purpose, then it no longer is the same as other_config["i18n-original-value-name_label"]. Therefore, XenCenter does not apply internationalization, and instead preserves your given name.

                Hiding objects from XenCenter

                Networks, PIFs, and VMs can be hidden from XenCenter by adding the key HideFromXenCenter=true to the other_config parameter for the object. This capability is intended for ISVs who know what they are doing, not general use by everyday users. For example, you might want to hide certain VMs because they are cloned VMs that shouldn’t be used directly by general users in your environment.

                In XenCenter, hidden Networks, PIFs, and VMs can be made visible, using the View menu.

                \ No newline at end of file diff --git a/new-docs/xen-api/topics/memory/index.html b/new-docs/xen-api/topics/memory/index.html index dacb8a2bc..216beb408 100644 --- a/new-docs/xen-api/topics/memory/index.html +++ b/new-docs/xen-api/topics/memory/index.html @@ -1,5 +1,5 @@ Memory :: XAPI Toolstack Developer Documentation -

                Memory

                Memory is used for many things:

                \ No newline at end of file diff --git a/new-docs/xen-api/topics/metrics/index.html b/new-docs/xen-api/topics/metrics/index.html index efd383091..ca0c56478 100644 --- a/new-docs/xen-api/topics/metrics/index.html +++ b/new-docs/xen-api/topics/metrics/index.html @@ -1,5 +1,5 @@ Metrics :: XAPI Toolstack Developer Documentation -
                \ No newline at end of file diff --git a/new-docs/xen-api/topics/snapshots/index.html b/new-docs/xen-api/topics/snapshots/index.html index 7667c0d78..b06e811ea 100644 --- a/new-docs/xen-api/topics/snapshots/index.html +++ b/new-docs/xen-api/topics/snapshots/index.html @@ -1,5 +1,5 @@ Snapshots :: XAPI Toolstack Developer Documentation -

                Snapshots

                Snapshots represent the state of a VM, or a disk (VDI) at a point in time. +

                Snapshots

                Snapshots represent the state of a VM, or a disk (VDI) at a point in time. They can be used for:

                • backups (hourly, daily, weekly etc)
                • experiments (take snapshot, try something, revert back again)
                • golden images (install OS, get it just right, clone it 1000s of times)

                Read more about Snapshots: the High-Level Feature.

                Taking a VDI snapshot

                To take a snapshot of a single disk (VDI):

                snapshot_vdi <- VDI.snapshot(session_id, vdi, driver_params)

                where vdi is the reference to the disk to be snapshotted, and driver_params is a list of string pairs providing optional backend implementation-specific hints. The snapshot operation should be quick (i.e. it should never be implemented as @@ -60,9 +60,9 @@ is up to you to check you are importing them to the right place.

                Now the VDI $RESTORE should have the same contents as $DELTA.

                \ No newline at end of file + 
                \ No newline at end of file diff --git a/new-docs/xen-api/topics/udhcp/index.html b/new-docs/xen-api/topics/udhcp/index.html index e1dcbf464..fa3bb85ec 100644 --- a/new-docs/xen-api/topics/udhcp/index.html +++ b/new-docs/xen-api/topics/udhcp/index.html @@ -1,5 +1,5 @@ API for configuring the udhcp server in Dom0 :: XAPI Toolstack Developer Documentation -
                \ No newline at end of file diff --git a/new-docs/xen-api/topics/vm-lifecycle/index.html b/new-docs/xen-api/topics/vm-lifecycle/index.html index 62335494a..1ca4facad 100644 --- a/new-docs/xen-api/topics/vm-lifecycle/index.html +++ b/new-docs/xen-api/topics/vm-lifecycle/index.html @@ -1,5 +1,5 @@ VM Lifecycle :: XAPI Toolstack Developer Documentation -

                VM Lifecycle

                graph +

                VM Lifecycle

                graph halted-- start(paused) -->paused halted-- start(not paused) -->running running-- suspend -->suspended @@ -14,9 +14,9 @@ API calls that can be used to move the VM between these states.

                  \ No newline at end of file + 
                  \ No newline at end of file diff --git a/new-docs/xen-api/topics/xencenter/index.html b/new-docs/xen-api/topics/xencenter/index.html index 8b303eb0c..06e6d0bdd 100644 --- a/new-docs/xen-api/topics/xencenter/index.html +++ b/new-docs/xen-api/topics/xencenter/index.html @@ -1,11 +1,11 @@ XenCenter :: XAPI Toolstack Developer Documentation -
                  \ No newline at end of file diff --git a/new-docs/xen-api/usage/index.html b/new-docs/xen-api/usage/index.html index 30f63c4f0..d9769d4db 100644 --- a/new-docs/xen-api/usage/index.html +++ b/new-docs/xen-api/usage/index.html @@ -1,5 +1,5 @@ Using the API :: XAPI Toolstack Developer Documentation -
                  \ No newline at end of file diff --git a/new-docs/xen-api/wire-protocol/index.html b/new-docs/xen-api/wire-protocol/index.html index 87ded8947..967481487 100644 --- a/new-docs/xen-api/wire-protocol/index.html +++ b/new-docs/xen-api/wire-protocol/index.html @@ -1,5 +1,5 @@ Wire Protocol :: XAPI Toolstack Developer Documentation -
                  \ No newline at end of file diff --git a/new-docs/xenopsd/architecture/index.html b/new-docs/xenopsd/architecture/index.html index dcb15261a..a4a8b9794 100644 --- a/new-docs/xenopsd/architecture/index.html +++ b/new-docs/xenopsd/architecture/index.html @@ -1,8 +1,8 @@ Architecture :: XAPI Toolstack Developer Documentation -

                  Architecture

                  Xenopsd instances run on a host and manage VMs on behalf of clients. This +

                  Architecture

                  Xenopsd instances run on a host and manage VMs on behalf of clients. This picture shows 3 different Xenopsd instances: 2 named “xenopsd-xc” and 1 named -“xenopsd-xenlight”.

                  Where xenopsd fits on a host -Where xenopsd fits on a host

                  Each instance is responsible for managing a disjoint set of VMs. Clients should +“xenopsd-xenlight”.

                  Where xenopsd fits on a host +Where xenopsd fits on a host

                  Each instance is responsible for managing a disjoint set of VMs. Clients should never ask more than one Xenopsd to manage the same VM. Managing a VM means:

                  • handling start/shutdown/suspend/resume/migrate/reboot
                  • allowing devices (disks, nics, PCI cards, vCPUs etc) to be manipulated
                  • providing updates to clients when things change (reboots, console becomes available, guest agent says something etc).

                  For a full list of features, consult the features list.

                  Each Xenopsd instance has a unique name on the host. A typical name is

                  • org.xen.xcp.xenops.classic
                  • org.xen.xcp.xenops.xenlight

                  A higher-level tool, such as xapi @@ -19,8 +19,8 @@ available

                • message encoding: by default we use JSON but XML is also available
                • RPCs over Unix domain sockets and persistent queues.
                • This library allows the communication details to be changed without having to change all the Xapi clients and servers.

                  Xenopsd has a number of “backends” which perform the low-level VM operations such as (on Xen) “create domain” “hotplug disk” “destroy domain”. These backends -contain all the hypervisor-specific code including

                  • connecting to Xenstore
                  • opening the libxc /proc/xen/privcmd interface
                  • initialising libxl contexts

                  The following diagram shows the internal structure of Xenopsd:

                  Inside xenopsd -Inside xenopsd

                  At the top of the diagram two client RPC have been sent: one to start a VM +contain all the hypervisor-specific code including

                  • connecting to Xenstore
                  • opening the libxc /proc/xen/privcmd interface
                  • initialising libxl contexts

                  The following diagram shows the internal structure of Xenopsd:

                  Inside xenopsd +Inside xenopsd

                  At the top of the diagram two client RPC have been sent: one to start a VM and the other to fetch the latest events. The RPCs are all defined in xcp-idl/xen/xenops_interface.ml. The RPCs are received by the Xenops_server module and decomposed into @@ -72,9 +72,9 @@ a simple directory hierarchy.

                  \ No newline at end of file + 
                  \ No newline at end of file diff --git a/new-docs/xenopsd/architecture/index.print.html b/new-docs/xenopsd/architecture/index.print.html index 061317a03..198bd2c5a 100644 --- a/new-docs/xenopsd/architecture/index.print.html +++ b/new-docs/xenopsd/architecture/index.print.html @@ -1,8 +1,8 @@ Architecture :: XAPI Toolstack Developer Documentation -

                  Architecture

                  Xenopsd instances run on a host and manage VMs on behalf of clients. This +

                  Architecture

                  Xenopsd instances run on a host and manage VMs on behalf of clients. This picture shows 3 different Xenopsd instances: 2 named “xenopsd-xc” and 1 named -“xenopsd-xenlight”.

                  Where xenopsd fits on a host -Where xenopsd fits on a host

                  Each instance is responsible for managing a disjoint set of VMs. Clients should +“xenopsd-xenlight”.

                  Where xenopsd fits on a host +Where xenopsd fits on a host

                  Each instance is responsible for managing a disjoint set of VMs. Clients should never ask more than one Xenopsd to manage the same VM. Managing a VM means:

                  • handling start/shutdown/suspend/resume/migrate/reboot
                  • allowing devices (disks, nics, PCI cards, vCPUs etc) to be manipulated
                  • providing updates to clients when things change (reboots, console becomes available, guest agent says something etc).

                  For a full list of features, consult the features list.

                  Each Xenopsd instance has a unique name on the host. A typical name is

                  • org.xen.xcp.xenops.classic
                  • org.xen.xcp.xenops.xenlight

                  A higher-level tool, such as xapi @@ -19,8 +19,8 @@ available

                • message encoding: by default we use JSON but XML is also available
                • RPCs over Unix domain sockets and persistent queues.
                • This library allows the communication details to be changed without having to change all the Xapi clients and servers.

                  Xenopsd has a number of “backends” which perform the low-level VM operations such as (on Xen) “create domain” “hotplug disk” “destroy domain”. These backends -contain all the hypervisor-specific code including

                  • connecting to Xenstore
                  • opening the libxc /proc/xen/privcmd interface
                  • initialising libxl contexts

                  The following diagram shows the internal structure of Xenopsd:

                  Inside xenopsd -Inside xenopsd

                  At the top of the diagram two client RPC have been sent: one to start a VM +contain all the hypervisor-specific code including

                  • connecting to Xenstore
                  • opening the libxc /proc/xen/privcmd interface
                  • initialising libxl contexts

                  The following diagram shows the internal structure of Xenopsd:

                  Inside xenopsd +Inside xenopsd

                  At the top of the diagram two client RPC have been sent: one to start a VM and the other to fetch the latest events. The RPCs are all defined in xcp-idl/xen/xenops_interface.ml. The RPCs are received by the Xenops_server module and decomposed into @@ -69,4 +69,4 @@ configuration of the VM in order for suspend/resume and migrate to work. It is also useful to be able to tell a client, “on next reboot this value will be x but currently it is x-1”.

                  VM and VmExtra metadata is stored by Xenopsd in the domain 0 filesystem, in -a simple directory hierarchy.

                  \ No newline at end of file +a simple directory hierarchy.

                  \ No newline at end of file diff --git a/new-docs/xenopsd/design/Events/index.html b/new-docs/xenopsd/design/Events/index.html index 6abdc72ef..b8543adc3 100644 --- a/new-docs/xenopsd/design/Events/index.html +++ b/new-docs/xenopsd/design/Events/index.html @@ -1,10 +1,10 @@ Events :: XAPI Toolstack Developer Documentation -
                  \ No newline at end of file diff --git a/new-docs/xenopsd/design/Tasks/index.html b/new-docs/xenopsd/design/Tasks/index.html index 1e633cf1d..0ec63f3e4 100644 --- a/new-docs/xenopsd/design/Tasks/index.html +++ b/new-docs/xenopsd/design/Tasks/index.html @@ -1,5 +1,5 @@ Tasks :: XAPI Toolstack Developer Documentation -
                  \ No newline at end of file diff --git a/new-docs/xenopsd/design/hooks/index.html b/new-docs/xenopsd/design/hooks/index.html index 329556a1f..3580e6a41 100644 --- a/new-docs/xenopsd/design/hooks/index.html +++ b/new-docs/xenopsd/design/hooks/index.html @@ -1,5 +1,5 @@ Hooks :: XAPI Toolstack Developer Documentation -

                  Hooks

                  There are a number of hook points at which xenopsd may execute certain scripts. These scripts are found in hook-specific directories of the form /etc/xapi.d/<hookname>/. All executable scripts in these directories are run with the following arguments:

                  <script.sh> -reason <reason> -vmuuid <uuid of VM>
                  +

                  Hooks

                  There are a number of hook points at which xenopsd may execute certain scripts. These scripts are found in hook-specific directories of the form /etc/xapi.d/<hookname>/. All executable scripts in these directories are run with the following arguments:

                  <script.sh> -reason <reason> -vmuuid <uuid of VM>
                   

                  The scripts are executed in filename-order. By convention, the filenames are usually of the form 10resetvdis.

                  The hook points are:

                  vm-pre-shutdown
                   vm-pre-migrate
                   vm-post-migrate (Dundee only)
                  @@ -25,9 +25,9 @@
                   
                  \ No newline at end of file + 
                  \ No newline at end of file diff --git a/new-docs/xenopsd/design/index.html b/new-docs/xenopsd/design/index.html index 909462d8b..931dafec8 100644 --- a/new-docs/xenopsd/design/index.html +++ b/new-docs/xenopsd/design/index.html @@ -1,10 +1,10 @@ Design :: XAPI Toolstack Developer Documentation - \ No newline at end of file diff --git a/new-docs/xenopsd/design/index.print.html b/new-docs/xenopsd/design/index.print.html index 653993138..946f75092 100644 --- a/new-docs/xenopsd/design/index.print.html +++ b/new-docs/xenopsd/design/index.print.html @@ -1,5 +1,5 @@ Design :: XAPI Toolstack Developer Documentation -

                  Design

                  Subsections of Design

                  Hooks

                  There are a number of hook points at which xenopsd may execute certain scripts. These scripts are found in hook-specific directories of the form /etc/xapi.d/<hookname>/. All executable scripts in these directories are run with the following arguments:

                  <script.sh> -reason <reason> -vmuuid <uuid of VM>
                  +

                  Design

                  Subsections of Design

                  Hooks

                  There are a number of hook points at which xenopsd may execute certain scripts. These scripts are found in hook-specific directories of the form /etc/xapi.d/<hookname>/. All executable scripts in these directories are run with the following arguments:

                  <script.sh> -reason <reason> -vmuuid <uuid of VM>
                   

                  The scripts are executed in filename-order. By convention, the filenames are usually of the form 10resetvdis.

                  The hook points are:

                  vm-pre-shutdown
                   vm-pre-migrate
                   vm-post-migrate (Dundee only)
                  @@ -234,4 +234,4 @@
                   they have processed the result. What if a client like xapi is restarted while
                   a Task is running?

                  We assume that, if xapi is talking to a xenopsd, then xapi completely owns it. Therefore xapi should destroy any completed tasks that it doesn’t recognise.

                  If a user wishes to manage VMs with xenopsd in parallel with xapi, the user -should run a separate xenopsd.

                  \ No newline at end of file +should run a separate xenopsd.

                  \ No newline at end of file diff --git a/new-docs/xenopsd/design/pvs-proxy-ovs/index.html b/new-docs/xenopsd/design/pvs-proxy-ovs/index.html index 95a8f5505..a02d5d1ee 100644 --- a/new-docs/xenopsd/design/pvs-proxy-ovs/index.html +++ b/new-docs/xenopsd/design/pvs-proxy-ovs/index.html @@ -1,5 +1,5 @@ PVS Proxy OVS Rules :: XAPI Toolstack Developer Documentation -
                  \ No newline at end of file diff --git a/new-docs/xenopsd/design/suspend-image-considerations/index.html b/new-docs/xenopsd/design/suspend-image-considerations/index.html index 15fb0833f..a14587027 100644 --- a/new-docs/xenopsd/design/suspend-image-considerations/index.html +++ b/new-docs/xenopsd/design/suspend-image-considerations/index.html @@ -1,5 +1,5 @@ Requirements for suspend image framing :: XAPI Toolstack Developer Documentation -
                  \ No newline at end of file diff --git a/new-docs/xenopsd/design/suspend-image-framing-format/index.html b/new-docs/xenopsd/design/suspend-image-framing-format/index.html index db178eb00..d4817f6be 100644 --- a/new-docs/xenopsd/design/suspend-image-framing-format/index.html +++ b/new-docs/xenopsd/design/suspend-image-framing-format/index.html @@ -1,5 +1,5 @@ Suspend image framing format :: XAPI Toolstack Developer Documentation -

                  Suspend image framing format

                  Example suspend image layout:

                  +----------------------------+
                  +

                  Suspend image framing format

                  Example suspend image layout:

                  +----------------------------+
                   | 1. Suspend image signature |
                   +============================+
                   | 2.0 Xenops header          |
                  @@ -32,9 +32,9 @@
                   libxl.

                  \ No newline at end of file + 
                  \ No newline at end of file diff --git a/new-docs/xenopsd/features/index.html b/new-docs/xenopsd/features/index.html index bf07cdbe9..f21e75b8e 100644 --- a/new-docs/xenopsd/features/index.html +++ b/new-docs/xenopsd/features/index.html @@ -1,5 +1,5 @@ Features :: XAPI Toolstack Developer Documentation -
                  \ No newline at end of file diff --git a/new-docs/xenopsd/index.html b/new-docs/xenopsd/index.html index 111104d81..4e5dc3015 100644 --- a/new-docs/xenopsd/index.html +++ b/new-docs/xenopsd/index.html @@ -1,5 +1,5 @@ Xenopsd :: XAPI Toolstack Developer Documentation -
                  \ No newline at end of file diff --git a/new-docs/xenopsd/index.print.html b/new-docs/xenopsd/index.print.html index 2afbc5b84..baf35d042 100644 --- a/new-docs/xenopsd/index.print.html +++ b/new-docs/xenopsd/index.print.html @@ -1,5 +1,5 @@ Xenopsd :: XAPI Toolstack Developer Documentation -

                  Xenopsd

                  Xenopsd is the VM manager of the XAPI Toolstack. +

                  Xenopsd

                  Xenopsd is the VM manager of the XAPI Toolstack. Xenopsd is responsible for:

                  • Starting, stopping, rebooting, suspending, resuming, migrating VMs.
                  • (Hot-)plugging and unplugging devices such as VBDs, VIFs, vGPUs and PCI devices.
                  • Setting up VM consoles.
                  • Running bootloaders.
                  • Setting QoS parameters.
                  • Configuring SMBIOS tables.
                  • Handling crashes.
                  • etc.

                  Check out the full features list.

                  The code is in ocaml/xenopsd.

                  Principles

                  1. Do no harm: Xenopsd should never touch domains/VMs which it hasn’t been asked to manage. This means that it can co-exist with other VM managers such as ‘xl’ and ’libvirt’.
                  2. Be independent: Xenopsd should be able to work in isolation. In particular @@ -11,8 +11,8 @@ this state.
                  3. Be debuggable: Xenopsd will expose diagnostic APIs and tools to allow its internal state to be inspected and modified.

                  Subsections of Xenopsd

                  Architecture

                  Xenopsd instances run on a host and manage VMs on behalf of clients. This picture shows 3 different Xenopsd instances: 2 named “xenopsd-xc” and 1 named -“xenopsd-xenlight”.

                  Where xenopsd fits on a host -Where xenopsd fits on a host

                  Each instance is responsible for managing a disjoint set of VMs. Clients should +“xenopsd-xenlight”.

                  Where xenopsd fits on a host +Where xenopsd fits on a host

                  Each instance is responsible for managing a disjoint set of VMs. Clients should never ask more than one Xenopsd to manage the same VM. Managing a VM means:

                  • handling start/shutdown/suspend/resume/migrate/reboot
                  • allowing devices (disks, nics, PCI cards, vCPUs etc) to be manipulated
                  • providing updates to clients when things change (reboots, console becomes available, guest agent says something etc).

                  For a full list of features, consult the features list.

                  Each Xenopsd instance has a unique name on the host. A typical name is

                  • org.xen.xcp.xenops.classic
                  • org.xen.xcp.xenops.xenlight

                  A higher-level tool, such as xapi @@ -29,8 +29,8 @@ available

                • message encoding: by default we use JSON but XML is also available
                • RPCs over Unix domain sockets and persistent queues.
                • This library allows the communication details to be changed without having to change all the Xapi clients and servers.

                  Xenopsd has a number of “backends” which perform the low-level VM operations such as (on Xen) “create domain” “hotplug disk” “destroy domain”. These backends -contain all the hypervisor-specific code including

                  • connecting to Xenstore
                  • opening the libxc /proc/xen/privcmd interface
                  • initialising libxl contexts

                  The following diagram shows the internal structure of Xenopsd:

                  Inside xenopsd -Inside xenopsd

                  At the top of the diagram two client RPC have been sent: one to start a VM +contain all the hypervisor-specific code including

                  • connecting to Xenstore
                  • opening the libxc /proc/xen/privcmd interface
                  • initialising libxl contexts

                  The following diagram shows the internal structure of Xenopsd:

                  Inside xenopsd +Inside xenopsd

                  At the top of the diagram two client RPC have been sent: one to start a VM and the other to fetch the latest events. The RPCs are all defined in xcp-idl/xen/xenops_interface.ml. The RPCs are received by the Xenops_server module and decomposed into @@ -678,4 +678,4 @@ Opt.iter (fun stubdom_domid -> Domain.unpause ~xc stubdom_domid - ) (get_stubdom ~xs di.Xenctrl.domid)

                  \ No newline at end of file +
                  ) (get_stubdom ~xs di.Xenctrl.domid)
                  \ No newline at end of file diff --git a/new-docs/xenopsd/walkthroughs/VM.migrate/index.html b/new-docs/xenopsd/walkthroughs/VM.migrate/index.html index 79e293246..5885c0541 100644 --- a/new-docs/xenopsd/walkthroughs/VM.migrate/index.html +++ b/new-docs/xenopsd/walkthroughs/VM.migrate/index.html @@ -1,5 +1,5 @@ Walkthrough: Migrating a VM :: XAPI Toolstack Developer Documentation -
                  \ No newline at end of file diff --git a/new-docs/xenopsd/walkthroughs/VM.start/index.html b/new-docs/xenopsd/walkthroughs/VM.start/index.html index f2034904b..e590b4829 100644 --- a/new-docs/xenopsd/walkthroughs/VM.start/index.html +++ b/new-docs/xenopsd/walkthroughs/VM.start/index.html @@ -1,5 +1,5 @@ Walkthrough: Starting a VM :: XAPI Toolstack Developer Documentation -
                  \ No newline at end of file diff --git a/new-docs/xenopsd/walkthroughs/index.html b/new-docs/xenopsd/walkthroughs/index.html index 1bd278d8b..91cefd53b 100644 --- a/new-docs/xenopsd/walkthroughs/index.html +++ b/new-docs/xenopsd/walkthroughs/index.html @@ -1,11 +1,11 @@ Operation Walk-Throughs :: XAPI Toolstack Developer Documentation -

                  Operation Walk-Throughs

                  Let’s trace through interesting operations to see how the whole system +

                  Operation Walk-Throughs

                  Let’s trace through interesting operations to see how the whole system works.

                  • Starting a VM
                  • Migrating a VM
                  • Shutting down a VM and waiting for it to happen
                  • A VM wants to reboot itself
                  • A disk is hotplugged
                  • A disk refuses to hotunplug
                  • A VM is suspended
                  \ No newline at end of file + 
                  \ No newline at end of file diff --git a/new-docs/xenopsd/walkthroughs/index.print.html b/new-docs/xenopsd/walkthroughs/index.print.html index f626a1951..2d92ad308 100644 --- a/new-docs/xenopsd/walkthroughs/index.print.html +++ b/new-docs/xenopsd/walkthroughs/index.print.html @@ -1,5 +1,5 @@ Operation Walk-Throughs :: XAPI Toolstack Developer Documentation -

                  Operation Walk-Throughs

                  Let’s trace through interesting operations to see how the whole system +

                  Operation Walk-Throughs

                  Let’s trace through interesting operations to see how the whole system works.

                  • Starting a VM
                  • Migrating a VM
                  • Shutting down a VM and waiting for it to happen
                  • A VM wants to reboot itself
                  • A disk is hotplugged
                  • A disk refuses to hotunplug
                  • A VM is suspended

                  Subsections of Operation Walk-Throughs

                  Live Migration Sequence Diagram

                  sequenceDiagram autonumber participant tx as sender @@ -358,4 +358,4 @@ Opt.iter (fun stubdom_domid -> Domain.unpause ~xc stubdom_domid - ) (get_stubdom ~xs di.Xenctrl.domid)
                  \ No newline at end of file + ) (get_stubdom ~xs di.Xenctrl.domid)
                  \ No newline at end of file diff --git a/new-docs/xenopsd/walkthroughs/live-migration/index.html b/new-docs/xenopsd/walkthroughs/live-migration/index.html index 906f41dd6..71e19bbba 100644 --- a/new-docs/xenopsd/walkthroughs/live-migration/index.html +++ b/new-docs/xenopsd/walkthroughs/live-migration/index.html @@ -1,5 +1,5 @@ Live Migration Sequence Diagram :: XAPI Toolstack Developer Documentation -

                  Live Migration Sequence Diagram

                  sequenceDiagram +

                  Live Migration Sequence Diagram

                  sequenceDiagram autonumber participant tx as sender participant rx0 as receiver thread 0 @@ -29,9 +29,9 @@ deactivate tx
                  \ No newline at end of file + 
                  \ No newline at end of file