Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Revert VM migration shutdown order change + change post_detach to post_deactivate #6260

Merged
merged 2 commits into from
Jan 29, 2025
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion ocaml/xapi/storage_migrate.ml
Original file line number Diff line number Diff line change
Expand Up @@ -1320,7 +1320,7 @@ let pre_deactivate_hook ~dbg:_ ~dp:_ ~sr ~vdi =
s.failed <- true
)

let post_detach_hook ~sr ~vdi ~dp:_ =
let post_deactivate_hook ~sr ~vdi ~dp:_ =
let open State.Send_state in
let id = State.mirror_id_of (sr, vdi) in
State.find_active_local_mirror id
Expand Down
2 changes: 1 addition & 1 deletion ocaml/xapi/storage_smapiv1_wrapper.ml
Original file line number Diff line number Diff line change
Expand Up @@ -422,10 +422,10 @@ functor
| Vdi_automaton.Deactivate ->
Storage_migrate.pre_deactivate_hook ~dbg ~dp ~sr ~vdi ;
Impl.VDI.deactivate context ~dbg ~dp ~sr ~vdi ~vm ;
Storage_migrate.post_deactivate_hook ~sr ~vdi ~dp ;
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This is SMAPIv1 only. I guess it still works for the v1 to v3 storage migration but anything migrating from SMAPIv3 would need another plan.

vdi_t
| Vdi_automaton.Detach ->
Impl.VDI.detach context ~dbg ~dp ~sr ~vdi ~vm ;
Storage_migrate.post_detach_hook ~sr ~vdi ~dp ;
vdi_t
in
Sr.add_or_replace vdi new_vdi_t sr_t ;
Expand Down
42 changes: 14 additions & 28 deletions ocaml/xenopsd/lib/xenops_server.ml
Original file line number Diff line number Diff line change
Expand Up @@ -2732,23 +2732,6 @@ and perform_exn ?result (op : operation) (t : Xenops_task.task_handle) : unit =
) ;
debug "VM.migrate: Synchronisation point 1"
in
let pause_src_vm () =
debug
"VM.migrate: pause src vm before allowing destination to proceed" ;
(* cleanup tmp src VM *)
let atomics =
[
VM_hook_script_stable
( id
, Xenops_hooks.VM_pre_destroy
, Xenops_hooks.reason__suspend
, new_src_id
)
]
@ atomics_of_operation (VM_shutdown (new_src_id, None))
in
perform_atomics atomics t
in
let final_handshake () =
Handshake.send vm_fd Handshake.Success ;
debug "VM.migrate: Synchronisation point 3" ;
Expand Down Expand Up @@ -2789,10 +2772,7 @@ and perform_exn ?result (op : operation) (t : Xenops_task.task_handle) : unit =
the main VM migration sequence. *)
match VGPU_DB.ids id with
| [] ->
first_handshake () ;
save () ;
pause_src_vm () ;
final_handshake ()
first_handshake () ; save () ; final_handshake ()
| (_vm_id, dev_id) :: _ ->
let url =
make_url "/migrate/vgpu/"
Expand All @@ -2809,12 +2789,20 @@ and perform_exn ?result (op : operation) (t : Xenops_task.task_handle) : unit =
first_handshake () ;
save ~vgpu_fd:(FD vgpu_fd) ()
) ;
pause_src_vm () ;
final_handshake ()
) ;
let cleanup_src_vm () =
let atomics =
[
(* cleanup tmp src VM *)
let atomics =
[
VM_hook_script_stable
( id
, Xenops_hooks.VM_pre_destroy
, Xenops_hooks.reason__suspend
, new_src_id
)
]
@ atomics_of_operation (VM_shutdown (new_src_id, None))
@ [
VM_hook_script_stable
( id
, Xenops_hooks.VM_post_destroy
Expand All @@ -2823,10 +2811,8 @@ and perform_exn ?result (op : operation) (t : Xenops_task.task_handle) : unit =
)
; VM_remove new_src_id
]
in
perform_atomics atomics t
in
cleanup_src_vm ()
perform_atomics atomics t
| VM_receive_memory
{
vmr_id= id
Expand Down
Loading