Skip to content

Commit

Permalink
Merge pull request xapi-project#5131 from psafont/private/paus/tpm-re…
Browse files Browse the repository at this point in the history
…sume

CA-379350: Use up-to-date vTPM UUIDs when creating device models
  • Loading branch information
robhoes authored Jul 28, 2023
2 parents 47f2319 + 2eda7e4 commit 72f1487
Show file tree
Hide file tree
Showing 7 changed files with 884 additions and 973 deletions.
30 changes: 11 additions & 19 deletions ocaml/xapi/xapi_vm_snapshot.ml
Original file line number Diff line number Diff line change
Expand Up @@ -104,23 +104,15 @@ let checkpoint ~__context ~vm ~new_name =
in
(* Check if SR has snapshot feature *)
let sr_has_snapshot_feature sr =
if
not
Smint.(
has_capability Vdi_snapshot
(Xapi_sr_operations.features_of_sr ~__context sr)
)
then
false
else
true
Smint.has_capability Vdi_snapshot
(Xapi_sr_operations.features_of_sr ~__context sr)
in
List.iter
(fun sr ->
if not (sr_has_snapshot_feature sr) then
raise
(Api_errors.Server_error
(Api_errors.sr_operation_not_supported, [Ref.string_of vm])
Api_errors.(
Server_error (sr_operation_not_supported, [Ref.string_of vm])
)
)
sr_records ;
Expand Down Expand Up @@ -211,8 +203,8 @@ let safe_destroy_vusb ~__context ~rpc ~session_id vusb =
if Db.is_valid_ref __context vusb then
Client.VUSB.destroy ~rpc ~session_id ~self:vusb

(* Copy the VBDs and VIFs from a source VM to a dest VM and then delete the old disks. *)
(* This operation destroys the data of the dest VM. *)
(* Copy the VBDs and VIFs from a source VM to a dest VM and then delete the old
disks. This operation destroys the data of the dest VM. *)
let update_vifs_vbds_vgpus_and_vusbs ~__context ~snapshot ~vm =
let snap_VBDs = Db.VM.get_VBDs ~__context ~self:snapshot in
let snap_VBDs_disk, snap_VBDs_CD =
Expand Down Expand Up @@ -240,7 +232,7 @@ let update_vifs_vbds_vgpus_and_vusbs ~__context ~snapshot ~vm =
List.map (fun vbd -> Db.VBD.get_VDI ~__context ~self:vbd) vm_VBDs_disk
in
(* Filter out VM disks for which the snapshot does not have a corresponding
* disk - these disks will be left unattached after the revert is complete. *)
disk - these disks will be left unattached after the revert is complete. *)
let vm_disks_with_snapshot =
List.filter (fun vdi -> List.mem vdi snap_disks_snapshot_of) vm_disks
in
Expand Down Expand Up @@ -271,10 +263,10 @@ let update_vifs_vbds_vgpus_and_vusbs ~__context ~snapshot ~vm =
List.iter2
(fun snap_disk (_, cloned_disk, _) ->
(* For each snapshot disk which was just cloned:
* 1) Find the value of snapshot_of
* 2) Find all snapshots with the same snapshot_of
* 3) Update each of these snapshots so that their snapshot_of points
* to the new cloned disk. *)
1) Find the value of snapshot_of
2) Find all snapshots with the same snapshot_of
3) Update each of these snapshots so that their snapshot_of points
to the new cloned disk. *)
let open Db_filter_types in
let snapshot_of = Db.VDI.get_snapshot_of ~__context ~self:snap_disk in
let all_snaps_in_tree =
Expand Down
137 changes: 63 additions & 74 deletions ocaml/xapi/xapi_xenops.ml
Original file line number Diff line number Diff line change
Expand Up @@ -1122,10 +1122,10 @@ module MD = struct
let open Vm in
let scheduler_params =
(* vcpu <-> pcpu affinity settings are stored here.
Format is either:
1,2,3 :: all vCPUs receive this mask
1,2,3; 4,5,6 :: vCPU n receives mask n. Unlisted vCPUs
receive first mask *)
Format is either:
1,2,3 :: all vCPUs receive this mask
1,2,3; 4,5,6 :: vCPU n receives mask n. Unlisted vCPUs
receive first mask *)
let affinity =
try
List.map
Expand Down Expand Up @@ -1811,21 +1811,19 @@ module Events_from_xenopsd = struct
let module Client = (val make_client queue_name : XENOPS) in
Client.UPDATES.remove_barrier dbg id ;
let t =
with_lock active_m (fun () ->
if not (Hashtbl.mem active id) then (
warn "Events_from_xenopsd.wakeup: unknown id %d" id ;
None
) else
let t = Hashtbl.find active id in
Hashtbl.remove active id ; Some t
)
with_lock active_m @@ fun () ->
if not (Hashtbl.mem active id) then (
warn "Events_from_xenopsd.wakeup: unknown id %d" id ;
None
) else
let t = Hashtbl.find active id in
Hashtbl.remove active id ; Some t
in
Option.iter
(fun t ->
with_lock t.m (fun () ->
t.finished <- true ;
Condition.signal t.c
)
with_lock t.m @@ fun () ->
t.finished <- true ;
Condition.signal t.c
)
t

Expand Down Expand Up @@ -1892,12 +1890,7 @@ let update_vm ~__context id =
a <> b
in
(* Helpers to create and update guest metrics when needed *)
let lookup state key =
if List.mem_assoc key state.Vm.guest_agent then
Some (List.assoc key state.Vm.guest_agent)
else
None
in
let lookup state key = List.assoc_opt key state.Vm.guest_agent in
let list state dir =
let dir =
if dir.[0] = '/' then
Expand Down Expand Up @@ -3923,58 +3916,54 @@ let resume ~__context ~self ~start_paused ~force:_ =
let dbg = Context.string_of_task_and_tracing __context in
let queue_name = queue_of_vm ~__context ~self in
let vm_id = id_of_vm ~__context ~self in
transform_xenops_exn ~__context ~vm:self queue_name (fun () ->
maybe_refresh_vm ~__context ~self ;
let vdi = Db.VM.get_suspend_VDI ~__context ~self in
if vdi = Ref.null then (
info "VM suspend VDI not found; Performing VM hard_shutdown" ;
Xapi_vm_lifecycle.force_state_reset ~__context ~self ~value:`Halted ;
raise
Api_errors.(
Server_error (vm_has_no_suspend_vdi, ["VM"; Ref.string_of self])
)
) ;
let d = disk_of_vdi ~__context ~self:vdi |> Option.get in
let module Client = (val make_client queue_name : XENOPS) in
(* NB we don't set resident_on because we don't want to
modify the VM.power_state, {VBD,VIF}.currently_attached in the
failures cases. This means we must remove the metadata from
xenopsd on failure. *)
( try
Events_from_xenopsd.with_suppressed queue_name dbg vm_id (fun () ->
debug "Sending VM %s configuration to xenopsd" (Ref.string_of self) ;
let id = Xenopsd_metadata.push ~__context ~self in
Xapi_network.with_networks_attached_for_vm ~__context ~vm:self
(fun () ->
info "xenops: VM.resume %s from %s" id
(d |> rpc_of disk |> Jsonrpc.to_string) ;
Client.VM.resume dbg id d
|> sync_with_task __context ~cancellable:false queue_name ;
if not start_paused then (
info "xenops: VM.unpause %s" id ;
Client.VM.unpause dbg id
|> sync_with_task __context ~cancellable:false queue_name
)
)
)
with e ->
error "Caught exception resuming VM: %s" (string_of_exn e) ;
let id = id_of_vm ~__context ~self in
Xenopsd_metadata.delete ~__context id ;
raise e
) ;
set_resident_on ~__context ~self ;
Db.VM.set_suspend_VDI ~__context ~self ~value:Ref.null ;
(* Clearing vGPU metadata should happen as late as possible
* to make sure we only do it on a successful resume
*)
Xapi_gpumon.clear_vgpu_metadata ~__context ~vm:self ;
Helpers.call_api_functions ~__context (fun rpc session_id ->
XenAPI.VDI.destroy ~rpc ~session_id ~self:vdi
) ;
check_power_state_is ~__context ~self
~expected:(if start_paused then `Paused else `Running)
)
transform_xenops_exn ~__context ~vm:self queue_name @@ fun () ->
maybe_refresh_vm ~__context ~self ;
let vdi = Db.VM.get_suspend_VDI ~__context ~self in
if vdi = Ref.null then (
info "VM suspend VDI not found; Performing VM hard_shutdown" ;
Xapi_vm_lifecycle.force_state_reset ~__context ~self ~value:`Halted ;
let err_content = ["VM"; Ref.string_of self] in
raise Api_errors.(Server_error (vm_has_no_suspend_vdi, err_content))
) ;
let d = disk_of_vdi ~__context ~self:vdi |> Option.get in
let module Client = (val make_client queue_name : XENOPS) in
(* NB we don't set resident_on because we don't want to
modify the VM.power_state, {VBD,VIF}.currently_attached in the
failures cases. This means we must remove the metadata from
xenopsd on failure. *)
( try
Events_from_xenopsd.with_suppressed queue_name dbg vm_id @@ fun () ->
debug "Sending VM %s configuration to xenopsd" (Ref.string_of self) ;
let id = Xenopsd_metadata.push ~__context ~self in

Xapi_network.with_networks_attached_for_vm ~__context ~vm:self
@@ fun () ->
info "%s: VM.resume %s from %s" __FUNCTION__ id
(d |> rpc_of disk |> Jsonrpc.to_string) ;
Client.VM.resume dbg id d
|> sync_with_task __context ~cancellable:false queue_name ;
if not start_paused then (
info "%s: VM.unpause %s" __FUNCTION__ id ;
Client.VM.unpause dbg id
|> sync_with_task __context ~cancellable:false queue_name
)
with e ->
error "Caught exception resuming VM: %s" (string_of_exn e) ;
let id = id_of_vm ~__context ~self in
Xenopsd_metadata.delete ~__context id ;
raise e
) ;
set_resident_on ~__context ~self ;
Db.VM.set_suspend_VDI ~__context ~self ~value:Ref.null ;
(* Clearing vGPU metadata should happen as late as possible
* to make sure we only do it on a successful resume
*)
Xapi_gpumon.clear_vgpu_metadata ~__context ~vm:self ;
Helpers.call_api_functions ~__context (fun rpc session_id ->
XenAPI.VDI.destroy ~rpc ~session_id ~self:vdi
) ;
check_power_state_is ~__context ~self
~expected:(if start_paused then `Paused else `Running)

let s3suspend ~__context ~self =
let queue_name = queue_of_vm ~__context ~self in
Expand Down
Loading

0 comments on commit 72f1487

Please sign in to comment.