Skip to content

Commit 15c8ba7

Browse files
committed
[shimV2] refactor scsi device controller to V2 template
In this commit, we are just ensuring that all the files and format for this device/scsi package conforms with the other controllers for V2 shims. This includes- - `states` encapsulated in states file - `types` encapsulated in types file - package documentation in `doc.go` - Similar comment and error message style Note that in this commit, we have not made changes to the unit test files and hence we can be sure that the refactor has not broken any functional behavior. Signed-off-by: Harsh Rawat <harshrawat@microsoft.com>
1 parent 5a0252a commit 15c8ba7

20 files changed

Lines changed: 994 additions & 498 deletions

internal/controller/device/scsi/controller.go

Lines changed: 125 additions & 93 deletions
Original file line numberDiff line numberDiff line change
@@ -9,32 +9,16 @@ import (
99

1010
"github.com/Microsoft/hcsshim/internal/controller/device/scsi/disk"
1111
"github.com/Microsoft/hcsshim/internal/controller/device/scsi/mount"
12+
"github.com/Microsoft/hcsshim/internal/log"
13+
"github.com/Microsoft/hcsshim/internal/logfields"
1214

1315
"github.com/google/uuid"
16+
"github.com/sirupsen/logrus"
1417
)
1518

16-
type VMSCSIOps interface {
17-
disk.VMSCSIAdder
18-
disk.VMSCSIRemover
19-
}
20-
21-
type LinuxGuestSCSIOps interface {
22-
mount.LinuxGuestSCSIMounter
23-
mount.LinuxGuestSCSIUnmounter
24-
disk.LinuxGuestSCSIEjector
25-
}
26-
27-
type WindowsGuestSCSIOps interface {
28-
mount.WindowsGuestSCSIMounter
29-
mount.WindowsGuestSCSIUnmounter
30-
}
31-
32-
// numLUNsPerController is the maximum number of LUNs per controller, fixed by Hyper-V.
33-
const numLUNsPerController = 64
34-
35-
// The controller manages all SCSI attached devices and guest mounted
36-
// directories.
37-
//
19+
// Controller manages the full SCSI disk lifecycle — slot allocation, VM
20+
// attachment, guest mounting, and teardown — across one or more controllers
21+
// on a Hyper-V VM. All operations are serialized by a single mutex.
3822
// It is required that all callers:
3923
//
4024
// 1. Obtain a reservation using Reserve().
@@ -49,35 +33,43 @@ const numLUNsPerController = 64
4933
// If UnmapFromGuest() fails, the caller must call UnmapFromGuest() again until
5034
// it succeeds to release the reservation and all resources.
5135
type Controller struct {
52-
vm VMSCSIOps
53-
lGuest LinuxGuestSCSIOps
54-
wGuest WindowsGuestSCSIOps
55-
36+
// mu serializes all public operations on the Controller.
5637
mu sync.Mutex
5738

58-
// Every call to Reserve gets a unique reservation ID which holds pointers
59-
// to its controllerSlot for the disk and its partition for the mount.
39+
// vm is the host-side interface for adding and removing SCSI disks.
40+
// Immutable after construction.
41+
vm VMSCSIOps
42+
// linuxGuest is the guest-side interface for LCOW SCSI operations.
43+
// Immutable after construction.
44+
linuxGuest LinuxGuestSCSIOps
45+
// windowsGuest is the guest-side interface for WCOW SCSI operations.
46+
// Immutable after construction.
47+
windowsGuest WindowsGuestSCSIOps
48+
49+
// reservations maps a reservation ID to its disk slot and partition.
50+
// Guarded by mu.
6051
reservations map[uuid.UUID]*reservation
6152

62-
// For fast lookup we keep a hostPath to controllerSlot mapping for all
63-
// allocated disks.
53+
// disksByPath maps a host disk path to its controllerSlots index for
54+
// fast deduplication of disk attachments. Guarded by mu.
6455
disksByPath map[string]int
6556

66-
// Tracks all allocated and unallocated available slots on the SCSI
67-
// controllers.
57+
// controllerSlots tracks all disk slots across all SCSI controllers.
58+
// A nil entry means the slot is free for allocation.
6859
//
69-
// NumControllers == len(controllerSlots) / numLUNsPerController
70-
// ControllerID == index / numLUNsPerController
71-
// LunID == index % numLUNsPerController
60+
// Index layout:
61+
// ControllerID = index / numLUNsPerController
62+
// LUN = index % numLUNsPerController
7263
controllerSlots []*disk.Disk
7364
}
7465

75-
func New(numControllers int, vm VMSCSIOps, lGuest LinuxGuestSCSIOps, wGuest WindowsGuestSCSIOps) *Controller {
66+
// New creates a new [Controller] for the given number of SCSI controllers and
67+
// host/guest operation interfaces.
68+
func New(numControllers int, vm VMSCSIOps, linuxGuest LinuxGuestSCSIOps, windowsGuest WindowsGuestSCSIOps) *Controller {
7669
return &Controller{
7770
vm: vm,
78-
lGuest: lGuest,
79-
wGuest: wGuest,
80-
mu: sync.Mutex{},
71+
linuxGuest: linuxGuest,
72+
windowsGuest: windowsGuest,
8173
reservations: make(map[uuid.UUID]*reservation),
8274
disksByPath: make(map[string]int),
8375
controllerSlots: make([]*disk.Disk, numControllers*numLUNsPerController),
@@ -101,47 +93,56 @@ func (c *Controller) ReserveForRootfs(ctx context.Context, controller, lun uint)
10193
if c.controllerSlots[slot] != nil {
10294
return fmt.Errorf("slot for controller %d and lun %d is already reserved", controller, lun)
10395
}
104-
c.controllerSlots[slot] = disk.NewReserved(controller, lun, disk.DiskConfig{})
96+
c.controllerSlots[slot] = disk.NewReserved(controller, lun, disk.Config{})
10597
return nil
10698
}
10799

108-
// Reserves a referenced counted mapping entry for a SCSI attachment based on
100+
// Reserve reserves a referenced counted mapping entry for a SCSI attachment based on
109101
// the SCSI disk path, and partition number.
110102
//
111103
// If an error is returned from this function, it is guaranteed that no
112104
// reservation mapping was made and no UnmapFromGuest() call is necessary to
113105
// clean up.
114-
func (c *Controller) Reserve(ctx context.Context, diskConfig disk.DiskConfig, mountConfig mount.MountConfig) (uuid.UUID, error) {
106+
func (c *Controller) Reserve(ctx context.Context, diskConfig disk.Config, mountConfig mount.Config) (uuid.UUID, error) {
115107
c.mu.Lock()
116108
defer c.mu.Unlock()
117109

118-
// Generate a new reservation id.
110+
ctx, _ = log.WithContext(ctx, logrus.WithFields(logrus.Fields{
111+
logfields.HostPath: diskConfig.HostPath,
112+
logfields.Partition: mountConfig.Partition,
113+
}))
114+
115+
log.G(ctx).Debug("reserving SCSI slot")
116+
117+
// Generate a unique reservation ID.
119118
id := uuid.New()
120119
if _, ok := c.reservations[id]; ok {
121-
return uuid.Nil, fmt.Errorf("reservation ID collision")
120+
return uuid.Nil, fmt.Errorf("reservation ID already exists: %s", id)
122121
}
122+
123+
// Create the reservation entry.
123124
r := &reservation{
124125
controllerSlot: -1,
125126
partition: mountConfig.Partition,
126127
}
127128

128-
// Determine if this hostPath already had a disk known.
129+
// Check whether this disk path already has an allocated slot.
129130
if slot, ok := c.disksByPath[diskConfig.HostPath]; ok {
130-
r.controllerSlot = slot // Update our reservation where the disk is.
131-
d := c.controllerSlots[slot]
131+
r.controllerSlot = slot // Update our reservation where the dsk is.
132+
existingDisk := c.controllerSlots[slot]
132133

133-
// Verify the caller config is the same.
134-
if !d.Config().Equals(diskConfig) {
134+
// Verify the caller is requesting the same disk configuration.
135+
if !existingDisk.Config().Equals(diskConfig) {
135136
return uuid.Nil, fmt.Errorf("cannot reserve ref on disk with different config")
136137
}
137138

138-
// We at least have a disk, now determine if we have a mount for this
139+
// We at least have a dsk, now determine if we have a mount for this
139140
// partition.
140-
if _, err := d.ReservePartition(ctx, mountConfig); err != nil {
141+
if _, err := existingDisk.ReservePartition(ctx, mountConfig); err != nil {
141142
return uuid.Nil, fmt.Errorf("reserve partition %d: %w", mountConfig.Partition, err)
142143
}
143144
} else {
144-
// No hostPath was found. Find a slot for the disk.
145+
// No existing slot for this path — find a free one.
145146
nextSlot := -1
146147
for i, d := range c.controllerSlots {
147148
if d == nil {
@@ -150,72 +151,103 @@ func (c *Controller) Reserve(ctx context.Context, diskConfig disk.DiskConfig, mo
150151
}
151152
}
152153
if nextSlot == -1 {
153-
return uuid.Nil, fmt.Errorf("no available slots")
154+
return uuid.Nil, fmt.Errorf("no available SCSI slots")
154155
}
155156

156157
// Create the Disk and Partition Mount in the reserved states.
157158
controller := uint(nextSlot / numLUNsPerController)
158159
lun := uint(nextSlot % numLUNsPerController)
159-
d := disk.NewReserved(controller, lun, diskConfig)
160-
if _, err := d.ReservePartition(ctx, mountConfig); err != nil {
160+
newDisk := disk.NewReserved(controller, lun, diskConfig)
161+
if _, err := newDisk.ReservePartition(ctx, mountConfig); err != nil {
161162
return uuid.Nil, fmt.Errorf("reserve partition %d: %w", mountConfig.Partition, err)
162163
}
163-
c.controllerSlots[controller*numLUNsPerController+lun] = d
164+
c.controllerSlots[controller*numLUNsPerController+lun] = newDisk
164165
c.disksByPath[diskConfig.HostPath] = nextSlot
165166
r.controllerSlot = nextSlot
166167
}
167168

168169
// Ensure our reservation is saved for all future operations.
169170
c.reservations[id] = r
171+
log.G(ctx).WithField("reservation", id).Debug("SCSI slot reserved")
170172
return id, nil
171173
}
172174

173-
func (c *Controller) MapToGuest(ctx context.Context, reservation uuid.UUID) (string, error) {
175+
// MapToGuest attaches the reserved disk to the VM and mounts its partition
176+
// inside the guest, returning the guest path. It is idempotent for a
177+
// reservation that is already fully mapped.
178+
func (c *Controller) MapToGuest(ctx context.Context, id uuid.UUID) (string, error) {
174179
c.mu.Lock()
175180
defer c.mu.Unlock()
176181

177-
if r, ok := c.reservations[reservation]; ok {
178-
d := c.controllerSlots[r.controllerSlot]
179-
if err := d.AttachToVM(ctx, c.vm); err != nil {
180-
return "", fmt.Errorf("attach disk to vm: %w", err)
181-
}
182-
guestPath, err := d.MountPartitionToGuest(ctx, r.partition, c.lGuest, c.wGuest)
183-
if err != nil {
184-
return "", fmt.Errorf("mount partition %d to guest: %w", r.partition, err)
185-
}
186-
return guestPath, nil
182+
r, ok := c.reservations[id]
183+
if !ok {
184+
return "", fmt.Errorf("reservation %s not found", id)
185+
}
186+
187+
existingDisk := c.controllerSlots[r.controllerSlot]
188+
189+
log.G(ctx).WithFields(logrus.Fields{
190+
logfields.HostPath: existingDisk.HostPath(),
191+
logfields.Partition: r.partition,
192+
}).Debug("mapping SCSI disk to guest")
193+
194+
// Attach the disk to the VM's SCSI bus (idempotent if already attached).
195+
if err := existingDisk.AttachToVM(ctx, c.vm); err != nil {
196+
return "", fmt.Errorf("attach disk to VM: %w", err)
187197
}
188-
return "", fmt.Errorf("reservation %s not found", reservation)
198+
199+
// Mount the partition inside the guest.
200+
guestPath, err := existingDisk.MountPartitionToGuest(ctx, r.partition, c.linuxGuest, c.windowsGuest)
201+
if err != nil {
202+
return "", fmt.Errorf("mount partition %d to guest: %w", r.partition, err)
203+
}
204+
205+
log.G(ctx).WithField(logfields.UVMPath, guestPath).Debug("SCSI disk mapped to guest")
206+
return guestPath, nil
189207
}
190208

191-
func (c *Controller) UnmapFromGuest(ctx context.Context, reservation uuid.UUID) error {
209+
// UnmapFromGuest unmounts the partition from the guest and, when all
210+
// reservations for a disk are released, detaches the disk from the VM and
211+
// frees the SCSI slot. A failed call is retryable with the same reservation ID.
212+
func (c *Controller) UnmapFromGuest(ctx context.Context, id uuid.UUID) error {
192213
c.mu.Lock()
193214
defer c.mu.Unlock()
194215

195-
if r, ok := c.reservations[reservation]; ok {
196-
d := c.controllerSlots[r.controllerSlot]
197-
// Ref counted unmount.
198-
if err := d.UnmountPartitionFromGuest(ctx, r.partition, c.lGuest, c.wGuest); err != nil {
199-
return fmt.Errorf("unmount partition %d from guest: %w", r.partition, err)
200-
}
201-
if err := d.DetachFromVM(ctx, c.vm, c.lGuest); err != nil {
202-
return fmt.Errorf("detach disk from vm: %w", err)
203-
}
204-
if d.State() == disk.DiskStateDetached {
205-
// If we have no more mounts on this disk, remove the disk from the
206-
// known disks and free the slot.
207-
delete(c.disksByPath, d.HostPath())
208-
c.controllerSlots[r.controllerSlot] = nil
209-
}
210-
delete(c.reservations, reservation)
211-
return nil
216+
ctx, _ = log.WithContext(ctx, logrus.WithField("reservation", id.String()))
217+
218+
r, ok := c.reservations[id]
219+
if !ok {
220+
return fmt.Errorf("reservation %s not found", id)
212221
}
213-
return fmt.Errorf("reservation %s not found", reservation)
214-
}
215222

216-
type reservation struct {
217-
// This is the index into controllerSlots that holds this disk.
218-
controllerSlot int
219-
// This is the index into the disk mounts for this partition.
220-
partition uint64
223+
existingDisk := c.controllerSlots[r.controllerSlot]
224+
225+
log.G(ctx).WithFields(logrus.Fields{
226+
logfields.HostPath: existingDisk.HostPath(),
227+
logfields.Partition: r.partition,
228+
}).Debug("unmapping SCSI disk from guest")
229+
230+
// Unmount the partition from the guest (ref-counted; only issues the
231+
// guest call when this is the last reservation on the partition).
232+
if err := existingDisk.UnmountPartitionFromGuest(ctx, r.partition, c.linuxGuest, c.windowsGuest); err != nil {
233+
return fmt.Errorf("unmount partition %d from guest: %w", r.partition, err)
234+
}
235+
236+
// Detach the disk from the VM when no partitions remain active.
237+
if err := existingDisk.DetachFromVM(ctx, c.vm, c.linuxGuest); err != nil {
238+
return fmt.Errorf("detach disk from VM: %w", err)
239+
}
240+
241+
// If the disk is now fully detached, free its slot for reuse.
242+
if existingDisk.State() == disk.StateDetached {
243+
delete(c.disksByPath, existingDisk.HostPath())
244+
c.controllerSlots[r.controllerSlot] = nil
245+
log.G(ctx).Debug("SCSI slot freed")
246+
}
247+
248+
// Remove the reservation last so it remains available for retries if
249+
// any earlier step above fails.
250+
delete(c.reservations, id)
251+
log.G(ctx).Debug("SCSI disk unmapped from guest")
252+
return nil
221253
}

0 commit comments

Comments
 (0)