Skip to content

Test Fixtures

This section documents the test fixtures provided by sts-libs. These fixtures help set up test environments for various storage technologies.

Common Fixtures

sts.fixtures.common_fixtures

Common test fixtures.

This module provides fixtures that can be used across different test suites: - Virtual block devices (loop, scsi_debug) - System resources - Common utilities

Fixture Dependencies: 1. loop_devices - Independent fixture - Creates temporary loop devices - Handles cleanup automatically

  1. scsi_debug_devices
  2. Independent fixture
  3. Creates SCSI debug devices
  4. Handles cleanup automatically

Common Usage:

  1. Basic device testing:

    def test_single_device(loop_devices):
        device = loop_devices[0]
        # Test with single device
    

  2. Multi-device testing:

    @pytest.mark.parametrize('loop_devices', [2], indirect=True)
    def test_multiple_devices(loop_devices):
        dev1, dev2 = loop_devices
        # Test with multiple devices
    

  3. SCSI debug testing:

    @pytest.mark.parametrize('scsi_debug_devices', [2], indirect=True)
    def test_scsi_devices(scsi_debug_devices):
        dev1, dev2 = scsi_debug_devices
        # Test with SCSI debug devices
    

Error Handling: - Device creation failures skip the test - Cleanup runs even if test fails - Resource limits are checked

debugfs_module_reader(managed_module)

Fixture to prepare and provide access to a module's debugfs directory.

Relies on the 'managed_module' fixture to ensure the module is loaded. Ensures debugfs is mounted and the module's debugfs directory exists.

Source code in sts_libs/src/sts/fixtures/common_fixtures.py
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
@pytest.fixture
def debugfs_module_reader(managed_module: ModuleInfo) -> Generator[Directory, None, None]:
    """Fixture to prepare and provide access to a module's debugfs directory.

    Relies on the 'managed_module' fixture to ensure the module is loaded.
    Ensures debugfs is mounted and the module's debugfs directory exists.
    """
    module_name = managed_module.name
    if not module_name:
        pytest.skip(f'Managed module fixture yielded ModuleInfo with name=None for {managed_module}')

    logging.info(f'Setting up debugfs reader for already loaded module: {module_name}')

    # Module loading is handled by the managed_module fixture dependency.
    # We can assume managed_module.loaded is True here, otherwise the test would have skipped.

    # Ensure debugfs is mounted
    debugfs_path = Path('/sys/kernel/debug')
    if not debugfs_path.is_mount():
        logging.info(f'Debugfs not mounted at {debugfs_path}, attempting mount.')
        try:
            # Check if debugfs filesystem type is already mounted somewhere else
            mount_output = run('mount')
            if f'debugfs on {debugfs_path}' not in mount_output.stdout and ' type debugfs' in mount_output.stdout:
                existing_mount = [line for line in mount_output.stdout.splitlines() if ' type debugfs' in line]
                logging.warning(f'Debugfs already mounted elsewhere: {existing_mount}. Proceeding anyway.')
            # Only attempt mount if not already mounted at the target path
            elif f'debugfs on {debugfs_path}' not in mount_output.stdout:
                run(f'mount -t debugfs none {debugfs_path}')

            if not debugfs_path.is_mount():
                pytest.skip(f'Failed to mount debugfs at {debugfs_path} after attempt')
        except OSError as e:
            pytest.skip(f'OS error mounting debugfs at {debugfs_path}: {e}')

    # Check module debugfs directory
    module_debugfs_dir = debugfs_path / module_name
    if not module_debugfs_dir.exists():
        pytest.skip(f'Debugfs directory {module_debugfs_dir} not found for module {module_name}')
    if not module_debugfs_dir.is_dir():
        pytest.skip(f'Path {module_debugfs_dir} exists but is not a directory')

    logging.info(f'Providing Directory object for {module_debugfs_dir}')
    yield Directory(module_debugfs_dir)

    # No specific cleanup needed here; module unloading is handled by managed_module teardown.
    logging.info(f'Finished using debugfs reader for module: {module_name}')

ensure_minimum_devices()

Fixture that ensures minimum number of devices without block size filtering.

Source code in sts_libs/src/sts/fixtures/common_fixtures.py
277
278
279
280
@pytest.fixture
def ensure_minimum_devices() -> Generator:
    """Fixture that ensures minimum number of devices without block size filtering."""
    yield from _ensure_minimum_devices_base(filter_by_block_size=False)

ensure_minimum_devices_with_same_block_sizes()

Fixture that ensures minimum number of devices with same block sizes.

Source code in sts_libs/src/sts/fixtures/common_fixtures.py
271
272
273
274
@pytest.fixture
def ensure_minimum_devices_with_same_block_sizes() -> Generator:
    """Fixture that ensures minimum number of devices with same block sizes."""
    yield from _ensure_minimum_devices_base(filter_by_block_size=True)

loop_devices(request)

Create loop devices for testing.

Creates virtual block devices using the loop driver: - Each device is 1GB in size - Devices are sparse (only allocate used space) - Devices are automatically cleaned up - Supports multiple devicesce(loop_devices): assert len(loop_devices) == 1 assert loop_d per test

Configuration: - count: Number of devices to create (default: 1) - size_mb: Size of each device in MB (default: 1024) Set via parametrize: @pytest.mark.parametrize('loop_devices', [2], indirect=True) Or with custom size: @pytest.mark.parametrize('loop_devices', [{'count': 1, 'size_mb': 4096}], indirect=True)

Error Handling: - Skips test if device creation fails - Cleans up any created devices on failure - Validates device paths before yielding

Parameters:

Name Type Description Default
request FixtureRequest

Pytest fixture request with 'count' parameter

required

Yields:

Type Description
list[str]

List of loop device paths (e.g. ['/dev/loop0', '/dev/loop1'])

Example

Single device

def test_device(loop_devices):
    assert len(loop_devices) == 1
    assert loop_devices[0].startswith('/dev/loop')

Multiple devices

@pytest.mark.parametrize('loop_devices', [2], indirect=True)
    def test_devices(loop_devices):
        assert len(loop_devices) == 2
        assert all(d.startswith('/dev/loop') for d in loop_devices)
Source code in sts_libs/src/sts/fixtures/common_fixtures.py
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
@pytest.fixture
def loop_devices(request: pytest.FixtureRequest) -> Generator[list[str], None, None]:
    """Create loop devices for testing.

    Creates virtual block devices using the loop driver:
    - Each device is 1GB in size
    - Devices are sparse (only allocate used space)
    - Devices are automatically cleaned up
    - Supports multiple devicesce(loop_devices): assert len(loop_devices) == 1 assert loop_d per test

    Configuration:
    - count: Number of devices to create (default: 1)
    - size_mb: Size of each device in MB (default: 1024)
      Set via parametrize: @pytest.mark.parametrize('loop_devices', [2], indirect=True)
      Or with custom size: @pytest.mark.parametrize('loop_devices', [{'count': 1, 'size_mb': 4096}], indirect=True)

    Error Handling:
    - Skips test if device creation fails
    - Cleans up any created devices on failure
    - Validates device paths before yielding

    Args:
        request: Pytest fixture request with 'count' parameter

    Yields:
        List of loop device paths (e.g. ['/dev/loop0', '/dev/loop1'])

    Example:
        # Single device
        ```python
        def test_device(loop_devices):
            assert len(loop_devices) == 1
            assert loop_devices[0].startswith('/dev/loop')
        ```
        # Multiple devices
        ```
        @pytest.mark.parametrize('loop_devices', [2], indirect=True)
            def test_devices(loop_devices):
                assert len(loop_devices) == 2
                assert all(d.startswith('/dev/loop') for d in loop_devices)
        ```
    """
    # Handle different parameter formats
    param = getattr(request, 'param', 1)
    if isinstance(param, dict):
        count = param.get('count', 1)
        size_mb = param.get('size_mb', 1024)
    else:
        count = param  # Backward compatibility for just count
        size_mb = 1024  # Default size
    devices = []

    # Create devices one by one
    for _ in range(count):
        device = LoopDevice.create(size_mb=size_mb)
        if not device:
            # Clean up any created devices
            for dev in devices:
                dev.remove()
            pytest.skip(f'Failed to create loop device {len(devices) + 1} of {count}')
        devices.append(device)

    # Yield device paths
    yield [str(dev.device_path) for dev in devices]

    # Clean up
    for device in devices:
        device.remove()

prepare_1minutetip_disk()

This fixture is used to prepare a spare disk for testing on specific 1minutetip flavor (ci.m1.small.ephemeral).

It will wipe /dev/vdb and return a single-item list of BlockDevice objects.

Source code in sts_libs/src/sts/fixtures/common_fixtures.py
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
@pytest.fixture(scope='class')
def prepare_1minutetip_disk() -> list[BlockDevice]:
    """This fixture is used to prepare a spare disk for testing on specific 1minutetip flavor (ci.m1.small.ephemeral).

    It will wipe /dev/vdb and return a single-item list of BlockDevice objects.
    """
    flag = Path('/var/tmp/STS_PREPARE_1MINUTETIP_DISK_FLAG')
    disk_path = '/dev/vdb'
    try:
        disk = BlockDevice(disk_path)
    except DeviceNotFoundError:
        pytest.fail(f'Disk {disk_path} not found')

    # Wipe disk if flag file does not exist, this is to avoid wiping the disk multiple times.
    # We need to remove partition table that is always there after provisioning.
    if not flag.exists():
        assert disk.wipe_device()
        flag.touch()
    else:
        logging.info(f'Disk {disk_path} already wiped')

    # Return a list of one device to be used with setup_vg fixture
    return [disk]

scsi_debug_devices(request)

Create SCSI debug devices for testing.

Creates virtual SCSI devices using the scsi_debug module: - Each device is 1GB in size - Devices share a single scsi_debug instance - Devices are automatically cleaned up - Supports multiple devices per test

Configuration: - count: Number of devices to create (default: 1) Set via parametrize: @pytest.mark.parametrize('scsi_debug_devices', [2])

Error Handling: - Skips test if module loading fails - Skips test if device creation fails - Cleans up module and devices on failure - Validates device count before yielding

Parameters:

Name Type Description Default
request FixtureRequest

Pytest fixture request with 'count' parameter

required

Yields:

Type Description
list[str]

List of SCSI device paths (e.g. ['/dev/sda', '/dev/sdb'])

Example
# Single device
def test_device(scsi_debug_devices):
    assert len(scsi_debug_devices) == 1
    assert scsi_debug_devices[0].startswith('/dev/sd')


# Multiple devices
@pytest.mark.parametrize('scsi_debug_devices', [2], indirect=True)
def test_devices(scsi_debug_devices):
    assert len(scsi_debug_devices) == 4
    assert all(d.startswith('/dev/sd') for d in scsi_debug_devices)
Source code in sts_libs/src/sts/fixtures/common_fixtures.py
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
@pytest.fixture(scope='class')
def scsi_debug_devices(request: pytest.FixtureRequest) -> Generator[list[str], None, None]:
    """Create SCSI debug devices for testing.

    Creates virtual SCSI devices using the scsi_debug module:
    - Each device is 1GB in size
    - Devices share a single scsi_debug instance
    - Devices are automatically cleaned up
    - Supports multiple devices per test

    Configuration:
    - count: Number of devices to create (default: 1)
      Set via parametrize: @pytest.mark.parametrize('scsi_debug_devices', [2])

    Error Handling:
    - Skips test if module loading fails
    - Skips test if device creation fails
    - Cleans up module and devices on failure
    - Validates device count before yielding

    Args:
        request: Pytest fixture request with 'count' parameter

    Yields:
        List of SCSI device paths (e.g. ['/dev/sda', '/dev/sdb'])

    Example:
        ```python
        # Single device
        def test_device(scsi_debug_devices):
            assert len(scsi_debug_devices) == 1
            assert scsi_debug_devices[0].startswith('/dev/sd')


        # Multiple devices
        @pytest.mark.parametrize('scsi_debug_devices', [2], indirect=True)
        def test_devices(scsi_debug_devices):
            assert len(scsi_debug_devices) == 4
            assert all(d.startswith('/dev/sd') for d in scsi_debug_devices)
        ```
    """
    count = getattr(request, 'param', 1)  # Default to 1 device if not specified
    total = count**2  # expected_devices = num_tgts * add_host
    logging.info(f'Creating {total} scsi_debug devices')

    # Create SCSI debug device with specified number of targets
    device = ScsiDebugDevice.create(
        size=1024 * 1024 * 1024,  # 1GB
        options=f'num_tgts={count} add_host={count}',
    )
    if not device:
        pytest.skip('Failed to create SCSI debug device')

    # Get all SCSI debug devices
    devices = ScsiDebugDevice.get_devices()
    if not devices or len(devices) < total:
        device.remove()
        pytest.skip(f'Expected {total} SCSI debug devices, got {len(devices or [])}')

    # Yield device paths
    yield [f'/dev/{dev}' for dev in devices[:total]]

    # Clean up
    device.remove()

timed_operation()

Fixture providing timed operation context manager.

Example
def test_example(timed_operation):
    with timed_operation('My operation'):
        do_something()
Source code in sts_libs/src/sts/fixtures/common_fixtures.py
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
@pytest.fixture
def timed_operation() -> Callable[[str], AbstractContextManager[None]]:
    """Fixture providing timed operation context manager.

    Example:
        ```python
        def test_example(timed_operation):
            with timed_operation('My operation'):
                do_something()
        ```
    """

    @contextmanager
    def _timed_operation(description: str) -> Generator[None, None, None]:
        start = datetime.now(tz=timezone.utc)
        logging.info(f'Starting: {description}')
        try:
            yield
        finally:
            duration = datetime.now(tz=timezone.utc) - start
            logging.info(f'Completed: {description} (took {duration.total_seconds():.1f}s)')

    return _timed_operation

iSCSI Fixtures

sts.fixtures.iscsi_fixtures

iSCSI test fixtures.

This module provides fixtures for testing iSCSI: - Package installation - Service management - Device configuration - Parameter verification - Session management

Fixture Dependencies: 1. _iscsi_test (base fixture) - Installs iSCSI utilities - Manages sessions 2. iscsi_localhost_test (depends on _iscsi_test) - Sets up target environment 3. iscsi_target (depends on iscsi_localhost_test) - Creates target and initiator - Manages connections

IscsiTestConfig dataclass

Configuration for iSCSI test environment.

Attributes:

Name Type Description
base_iqn str

Base IQN for test

target_iqn str

Target IQN

initiator_iqn str

Initiator IQN

size str

Size of LUNs

n_luns int

Number of LUNs

Source code in sts_libs/src/sts/fixtures/iscsi_fixtures.py
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
@dataclass
class IscsiTestConfig:
    """Configuration for iSCSI test environment.

    Attributes:
        base_iqn: Base IQN for test
        target_iqn: Target IQN
        initiator_iqn: Initiator IQN
        size: Size of LUNs
        n_luns: Number of LUNs
    """

    base_iqn: str
    target_iqn: str
    initiator_iqn: str
    size: str = '1G'
    n_luns: int = 1

generate_test_iqns(test_name)

Generate IQNs for test environment.

Parameters:

Name Type Description Default
test_name str

Name of the test

required

Returns:

Type Description
tuple[str, str, str]

Tuple of (base_iqn, target_iqn, initiator_iqn)

Source code in sts_libs/src/sts/fixtures/iscsi_fixtures.py
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
def generate_test_iqns(test_name: str) -> tuple[str, str, str]:
    """Generate IQNs for test environment.

    Args:
        test_name: Name of the test

    Returns:
        Tuple of (base_iqn, target_iqn, initiator_iqn)
    """
    test_name = test_name.split('[')[0]  # Remove parametrize part
    # Replace underscores with dashes for IQN compatibility
    test_name = test_name.replace('_', '-')
    base_iqn = f'iqn.2024-01.sts.{test_name}'
    target_iqn = f'{base_iqn}:target'
    initiator_iqn = f'{base_iqn}:initiator'
    return base_iqn, target_iqn, initiator_iqn

get_test_device()

Get test device path.

Returns:

Type Description
Callable[[], Path]

Function to get device path

Example
def test_device(get_test_device):
    device = get_test_device()
    assert device.exists()
Source code in sts_libs/src/sts/fixtures/iscsi_fixtures.py
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
@pytest.fixture
def get_test_device() -> Callable[[], Path]:
    """Get test device path.

    Returns:
        Function to get device path

    Example:
        ```python
        def test_device(get_test_device):
            device = get_test_device()
            assert device.exists()
        ```
    """

    def _get_test_device() -> Path:
        """Get test device path.

        Returns:
            Device path

        Raises:
            AssertionError: If no device found
        """
        mp_service = MultipathService()
        if mp_service.is_running():
            devices = MultipathDevice.get_all()
            if devices and devices[0].path:
                return Path(str(devices[0].path))

        devices = ScsiDevice.get_by_vendor('LIO-ORG')
        # Break down complex assertion
        assert devices, 'No LIO devices found'
        assert devices[0].path, 'Device path not available'
        return Path(str(devices[0].path))

    return _get_test_device

iscsi_localhost_test(request, _iscsi_test)

Set up iSCSI target environment.

This fixture: - Installs target utilities - Creates target configuration - Cleans up environment

Parameters:

Name Type Description Default
request FixtureRequest

Fixture request

required
_iscsi_test None

Parent fixture providing base setup

required

Yields:

Type Description
str

Target IQN

Example
def test_target(iscsi_localhost_test):
    target_iqn = iscsi_localhost_test
    assert Iscsi(target_iqn).exists()
Source code in sts_libs/src/sts/fixtures/iscsi_fixtures.py
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
@pytest.fixture(scope='class')
def iscsi_localhost_test(request: pytest.FixtureRequest, _iscsi_test: None) -> Generator[str, None, None]:
    """Set up iSCSI target environment.

    This fixture:
    - Installs target utilities
    - Creates target configuration
    - Cleans up environment

    Args:
        request: Fixture request
        _iscsi_test: Parent fixture providing base setup

    Yields:
        Target IQN

    Example:
        ```python
        def test_target(iscsi_localhost_test):
            target_iqn = iscsi_localhost_test
            assert Iscsi(target_iqn).exists()
        ```
    """
    assert ensure_installed('targetcli')

    # Generate IQNs
    _, target_iqn, _ = generate_test_iqns(request.node.name)

    # Clean up target config
    target = Iscsi(target_wwn=target_iqn)
    target.delete_target()

    yield target_iqn

    # Clean up target config
    target.delete_target()

iscsi_target(request, iscsi_localhost_test)

Create iSCSI target and connect initiator.

This fixture: - Creates target with specified size and number of LUNs - Sets up initiator - Logs in to target - Yields connected node - Cleans up on exit

Parameters:

Name Type Description Default
request FixtureRequest

Fixture request with parameters: - size: Size of each LUN (default: '1G') - n_luns: Number of LUNs (default: 1)

required
iscsi_localhost_test None

Parent fixture providing target IQN

required
Example
@pytest.mark.parametrize('iscsi_target', [{'size': '2G', 'n_luns': 2}], indirect=True)
def test_something(iscsi_target):
    assert iscsi_target.exists()
Source code in sts_libs/src/sts/fixtures/iscsi_fixtures.py
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
@pytest.fixture
def iscsi_target(request: pytest.FixtureRequest, iscsi_localhost_test: None) -> Generator[IscsiNode, None, None]:  # noqa: ARG001
    """Create iSCSI target and connect initiator.

    This fixture:
    - Creates target with specified size and number of LUNs
    - Sets up initiator
    - Logs in to target
    - Yields connected node
    - Cleans up on exit

    Args:
        request: Fixture request with parameters:
            - size: Size of each LUN (default: '1G')
            - n_luns: Number of LUNs (default: 1)
        iscsi_localhost_test: Parent fixture providing target IQN

    Example:
        ```python
        @pytest.mark.parametrize('iscsi_target', [{'size': '2G', 'n_luns': 2}], indirect=True)
        def test_something(iscsi_target):
            assert iscsi_target.exists()
        ```
    """
    # Generate IQNs
    _, target_iqn, initiator_iqn = generate_test_iqns(request.node.name)

    # Get parameters
    params = request.param if hasattr(request, 'param') else {}
    config = IscsiTestConfig(
        base_iqn=target_iqn.rsplit(':', 1)[0],
        target_iqn=target_iqn,
        initiator_iqn=initiator_iqn,
        size=params.get('size', '1G'),
        n_luns=params.get('n_luns', 1),
    )

    # Set initiator name
    assert set_initiatorname(config.initiator_iqn), 'Failed to set initiator name'

    # Create target
    assert create_basic_iscsi_target(
        target_wwn=config.target_iqn,
        initiator_wwn=config.initiator_iqn,
        size=config.size,
    ), 'Failed to create target'

    # Create additional LUNs if needed
    if config.n_luns > 1:
        test_name = request.node.name.split('[')[0]
        for i in range(1, config.n_luns):
            backstore_name = f'{test_name}_lun{i}'
            backstore = BackstoreFileio(name=backstore_name)
            backstore.create_backstore(size=config.size, file_or_dev=f'{backstore_name}_file')
            IscsiLUN(target_wwn=config.target_iqn).create_lun(storage_object=backstore.path)

    # Set up initiator and login
    node = IscsiNode.setup_and_login(
        portal='127.0.0.1:3260',
        initiator_iqn=config.initiator_iqn,
        target_iqn=config.target_iqn,
    )

    with manage_iscsi_session(node):
        yield node

manage_iscsi_session(node)

Context manager for iSCSI session management.

Parameters:

Name Type Description Default
node IscsiNode

IscsiNode instance to manage

required

Yields:

Type Description
None

None

Source code in sts_libs/src/sts/fixtures/iscsi_fixtures.py
79
80
81
82
83
84
85
86
87
88
89
90
91
92
@contextmanager
def manage_iscsi_session(node: IscsiNode) -> Generator[None, None, None]:
    """Context manager for iSCSI session management.

    Args:
        node: IscsiNode instance to manage

    Yields:
        None
    """
    try:
        yield
    finally:
        node.logout()

LVM Fixtures

sts.fixtures.lvm_fixtures

LVM test fixtures.

This module provides fixtures for testing LVM (Logical Volume Management): - Package installation and cleanup - Service management - Device configuration - VDO (Virtual Data Optimizer) support

Fixture Dependencies: 1. _lvm_test (base fixture) - Installs LVM packages - Manages volume cleanup - Logs system information

  1. _vdo_test (depends on _lvm_test)
  2. Installs VDO packages
  3. Manages kernel module
  4. Provides data reduction features

Common Usage: 1. Basic LVM testing: @pytest.mark.usefixtures('_lvm_test') def test_lvm(): # LVM utilities are installed # Volumes are cleaned up after test

  1. VDO-enabled testing: @pytest.mark.usefixtures('_vdo_test') def test_vdo(): # VDO module is loaded # Data reduction is available

Error Handling: - Package installation failures fail the test - Module loading failures fail the test - Volume cleanup runs even if test fails - Service issues are logged

basic_thin_pool(setup_loopdev_vg)

Create a basic thin pool for testing.

Creates a 3GB thin pool that can accommodate thin volumes with filesystem support.

Parameters:

Name Type Description Default
setup_loopdev_vg str

Volume group name from setup_loopdev_vg fixture

required

Yields:

Name Type Description
dict dict[str, Any]

Information about the created thin pool

Source code in sts_libs/src/sts/fixtures/lvm_fixtures.py
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
@pytest.fixture
def basic_thin_pool(setup_loopdev_vg: str) -> Generator[dict[str, Any], None, None]:
    """Create a basic thin pool for testing.

    Creates a 3GB thin pool that can accommodate thin volumes with filesystem support.

    Args:
        setup_loopdev_vg: Volume group name from setup_loopdev_vg fixture

    Yields:
        dict: Information about the created thin pool
    """
    vg_name = setup_loopdev_vg
    pool_name = 'thinpool'

    # Create thin pool (3GB to accommodate 10x300MB thin volumes with filesystem support)
    pool_lv = lvm.LogicalVolume(name=pool_name, vg=vg_name)
    assert pool_lv.create(type='thin-pool', size='3G')

    pool_info = {
        'vg_name': vg_name,
        'pool_name': pool_name,
        'pool_path': f'/dev/{vg_name}/{pool_name}',
    }

    yield pool_info

    # Cleanup
    pool_lv.remove()

cache_metadata_backup(cache_metadata_swap)

Create cache metadata backup files for testing.

Creates cache metadata dump and prepares repair files for testing.

Parameters:

Name Type Description Default
cache_metadata_swap dict[str, Any]

Cache metadata swap setup from cache_metadata_swap fixture

required

Yields:

Name Type Description
dict dict[str, Any]

Extended information with backup file paths

Source code in sts_libs/src/sts/fixtures/lvm_fixtures.py
1224
1225
1226
1227
1228
1229
1230
1231
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244
1245
1246
1247
1248
1249
1250
1251
1252
1253
1254
1255
1256
1257
1258
@pytest.fixture
def cache_metadata_backup(cache_metadata_swap: dict[str, Any]) -> Generator[dict[str, Any], None, None]:
    """Create cache metadata backup files for testing.

    Creates cache metadata dump and prepares repair files for testing.

    Args:
        cache_metadata_swap: Cache metadata swap setup from cache_metadata_swap fixture

    Yields:
        dict: Extended information with backup file paths
    """
    cache_info = cache_metadata_swap.copy()
    cache_metadata_dev = cache_info['cache_metadata_dev']
    cache_dump_path = Path('/var/tmp/cache_dump')
    cache_repair_path = Path('/var/tmp/cache_repair')

    # Create cache metadata dump (to match testing expectations)
    dump_result = dmpd.cache_dump(cache_metadata_dev, output=str(cache_dump_path))
    assert dump_result.succeeded

    # Create empty repair file with proper allocation (5MB should be enough)
    assert fallocate(cache_repair_path, length='5M')

    cache_info.update(
        {
            'cache_dump_path': cache_dump_path,
            'cache_repair_path': cache_repair_path,
        }
    )

    yield cache_info

    # Cleanup files
    run(f'rm -f {cache_dump_path} {cache_repair_path}')

cache_metadata_swap(cache_split, swap_volume)

Perform cache metadata swap operation.

Swaps cache metadata to the swap volume for DMPD testing.

Parameters:

Name Type Description Default
cache_split dict[str, Any]

Cache split setup from cache_split fixture

required
swap_volume dict[str, Any]

Swap volume setup from swap_volume fixture

required

Returns:

Name Type Description
dict dict[str, Any]

Combined information with cache metadata device details

Source code in sts_libs/src/sts/fixtures/lvm_fixtures.py
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215
1216
1217
1218
1219
1220
1221
@pytest.fixture
def cache_metadata_swap(cache_split: dict[str, Any], swap_volume: dict[str, Any]) -> dict[str, Any]:
    """Perform cache metadata swap operation.

    Swaps cache metadata to the swap volume for DMPD testing.

    Args:
        cache_split: Cache split setup from cache_split fixture
        swap_volume: Swap volume setup from swap_volume fixture

    Returns:
        dict: Combined information with cache metadata device details
    """
    cache_info = cache_split.copy()
    swap_info = swap_volume

    # Ensure both fixtures reference the same VG
    assert cache_info['vg_name'] == swap_info['vg_name'], 'Cache and swap must be in same VG'

    vg_name = cache_info['vg_name']
    cache_pool_name = cache_info['cache_pool_name']
    swap_name = swap_info['swap_name']

    # Deactivate volumes before metadata swap
    cache_pool_lv = lvm.LogicalVolume(name=cache_pool_name, vg=vg_name)
    cache_pool_lv.deactivate()  # Ignore errors
    swap_lv = lvm.LogicalVolume(name=swap_name, vg=vg_name)
    swap_lv.deactivate()
    run('udevadm settle')

    # Swap cache metadata to swap volume (matching setup logic)
    convert_result = run(f'lvconvert -y --cachepool {vg_name}/{cache_pool_name} --poolmetadata {vg_name}/{swap_name}')
    assert convert_result.succeeded

    # Activate swap volume (now containing cache metadata)
    swap_lv = lvm.LogicalVolume(name=swap_name, vg=vg_name)
    swap_lv.activate()
    run('udevadm settle')

    # Use swap LV as cache metadata device
    cache_metadata_dev = f'/dev/{vg_name}/{swap_name}'

    # Combine information from both fixtures
    combined_info = cache_info.copy()
    combined_info.update(swap_info)
    combined_info.update(
        {
            'cache_metadata_dev': cache_metadata_dev,
            'cache_metadata_swapped': True,
        }
    )

    return combined_info

cache_pool(cache_volumes)

Create cache pool by merging cache data and metadata volumes.

Parameters:

Name Type Description Default
cache_volumes dict[str, Any]

Cache volumes setup from cache_volumes fixture

required

Returns:

Name Type Description
dict dict[str, Any]

Extended cache information with pool details

Source code in sts_libs/src/sts/fixtures/lvm_fixtures.py
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
@pytest.fixture
def cache_pool(cache_volumes: dict[str, Any]) -> dict[str, Any]:
    """Create cache pool by merging cache data and metadata volumes.

    Args:
        cache_volumes: Cache volumes setup from cache_volumes fixture

    Returns:
        dict: Extended cache information with pool details
    """
    cache_info = cache_volumes.copy()
    vg_name = cache_info['vg_name']
    cache_data_name = cache_info['cache_data_name']
    cache_meta_name = cache_info['cache_meta_name']

    # Use lvm convert to create cache pool (matching setup logic)
    convert_result = run(
        f'lvconvert -y --type cache-pool --cachemode writeback '
        f'--poolmetadata {vg_name}/{cache_meta_name} {vg_name}/{cache_data_name}'
    )
    assert convert_result.succeeded

    cache_info.update(
        {
            'cache_pool_created': True,
            'cache_pool_name': cache_data_name,  # Pool takes the name of data LV
            'cache_pool_path': f'/dev/{vg_name}/{cache_data_name}',
        }
    )

    return cache_info

cache_split(cache_volume)

Split cache volume to separate cache pool and origin.

Parameters:

Name Type Description Default
cache_volume dict[str, Any]

Cache volume setup from cache_volume fixture

required

Returns:

Name Type Description
dict dict[str, Any]

Extended cache information with split cache details

Source code in sts_libs/src/sts/fixtures/lvm_fixtures.py
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
@pytest.fixture
def cache_split(cache_volume: dict[str, Any]) -> dict[str, Any]:
    """Split cache volume to separate cache pool and origin.

    Args:
        cache_volume: Cache volume setup from cache_volume fixture

    Returns:
        dict: Extended cache information with split cache details
    """
    cache_info = cache_volume.copy()
    vg_name = cache_info['vg_name']
    cached_lv_name = cache_info['cached_lv_name']

    # Split cache (matching setup logic)
    split_result = run(f'lvconvert -y --splitcache {vg_name}/{cached_lv_name}')
    assert split_result.succeeded

    cache_info.update(
        {
            'cache_split': True,
        }
    )

    return cache_info

cache_volume(cache_pool)

Create cached volume by adding origin to cache pool.

Parameters:

Name Type Description Default
cache_pool dict[str, Any]

Cache pool setup from cache_pool fixture

required

Returns:

Name Type Description
dict dict[str, Any]

Extended cache information with cached volume details

Source code in sts_libs/src/sts/fixtures/lvm_fixtures.py
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
@pytest.fixture
def cache_volume(cache_pool: dict[str, Any]) -> dict[str, Any]:
    """Create cached volume by adding origin to cache pool.

    Args:
        cache_pool: Cache pool setup from cache_pool fixture

    Returns:
        dict: Extended cache information with cached volume details
    """
    cache_info = cache_pool.copy()
    vg_name = cache_info['vg_name']
    cache_origin_name = cache_info['cache_origin_name']
    cache_pool_name = cache_info['cache_pool_name']

    # Convert origin LV to cached LV
    convert_result = run(
        f'lvconvert -y --type cache --cachepool {vg_name}/{cache_pool_name} {vg_name}/{cache_origin_name}'
    )
    assert convert_result.succeeded

    # Create ext4 filesystem on cached volume (matching setup logic)
    run(f'mkfs.ext4 /dev/{vg_name}/{cache_origin_name}')

    cache_info.update(
        {
            'cache_volume_created': True,
            'cached_lv_name': cache_origin_name,  # Origin LV becomes the cached LV
            'cached_lv_path': f'/dev/{vg_name}/{cache_origin_name}',
        }
    )

    return cache_info

cache_volumes(setup_loopdev_vg)

Create cache volumes for testing.

Creates cache metadata, origin, and data logical volumes that can be used for creating cache pools and cached volumes.

Parameters:

Name Type Description Default
setup_loopdev_vg str

Volume group name from setup_loopdev_vg fixture

required

Yields:

Name Type Description
dict dict[str, Any]

Information about the created cache volumes

Source code in sts_libs/src/sts/fixtures/lvm_fixtures.py
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
@pytest.fixture
def cache_volumes(setup_loopdev_vg: str) -> Generator[dict[str, Any], None, None]:
    """Create cache volumes for testing.

    Creates cache metadata, origin, and data logical volumes that can be used
    for creating cache pools and cached volumes.

    Args:
        setup_loopdev_vg: Volume group name from setup_loopdev_vg fixture

    Yields:
        dict: Information about the created cache volumes
    """
    vg_name = setup_loopdev_vg
    cache_meta_name = 'cache_meta'
    cache_origin_name = 'cache_origin'
    cache_data_name = 'cache_data'

    # Create cache metadata LV (12MB as per original setup)
    cache_meta_lv = lvm.LogicalVolume(name=cache_meta_name, vg=vg_name)
    assert cache_meta_lv.create(size='12M')

    # Create cache origin LV (300MB as per original setup)
    cache_origin_lv = lvm.LogicalVolume(name=cache_origin_name, vg=vg_name)
    assert cache_origin_lv.create(size='300M')

    # Create cache data LV (100MB as per original setup)
    cache_data_lv = lvm.LogicalVolume(name=cache_data_name, vg=vg_name)
    assert cache_data_lv.create(size='100M')

    cache_info = {
        'vg_name': vg_name,
        'cache_meta_name': cache_meta_name,
        'cache_origin_name': cache_origin_name,
        'cache_data_name': cache_data_name,
        'cache_meta_path': f'/dev/{vg_name}/{cache_meta_name}',
        'cache_origin_path': f'/dev/{vg_name}/{cache_origin_name}',
        'cache_data_path': f'/dev/{vg_name}/{cache_data_name}',
        'cache_meta_lv': cache_meta_lv,
        'cache_origin_lv': cache_origin_lv,
        'cache_data_lv': cache_data_lv,
    }

    yield cache_info

    # Cleanup
    cache_data_lv.remove()
    cache_origin_lv.remove()
    cache_meta_lv.remove()

install_dmpd(_lvm_test)

Install required packages for device-mapper-persistent-data tools.

This fixture installs the device-mapper-persistent-data package which provides cache metadata tools like cache_check, cache_dump, cache_repair, etc.

Example
@pytest.mark.usefixtures('install_dmpd_packages')
def test_cache_tools():
    # DMPD tools are now available
    pass
Source code in sts_libs/src/sts/fixtures/lvm_fixtures.py
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
@pytest.fixture(scope='class')
def install_dmpd(_lvm_test: None) -> None:
    """Install required packages for device-mapper-persistent-data tools.

    This fixture installs the device-mapper-persistent-data package which provides
    cache metadata tools like cache_check, cache_dump, cache_repair, etc.

    Example:
        ```python
        @pytest.mark.usefixtures('install_dmpd_packages')
        def test_cache_tools():
            # DMPD tools are now available
            pass
        ```
    """
    system = SystemManager()
    package = 'device-mapper-persistent-data'

    assert system.package_manager.install(package), f'Failed to install {package}'

load_vdo_module(_lvm_test)

Load the appropriate VDO kernel module based on kernel version.

This fixture installs the VDO package and loads the correct VDO kernel module depending on the system's kernel version: - For kernel 6.9+: uses dm-vdo module (built into kernel) - For kernel 6.8 and earlier: uses kvdo module (from kmod-kvdo package)

The fixture handles kernel version detection and falls back to dm-vdo if version parsing fails.

Parameters:

Name Type Description Default
_lvm_test None

LVM test fixture dependency (ensures LVM setup is complete)

required

Returns:

Name Type Description
str str

Name of the loaded VDO module ('dm-vdo' or 'kvdo')

Raises:

Type Description
AssertionError

If VDO package installation or module loading fails

Source code in sts_libs/src/sts/fixtures/lvm_fixtures.py
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
@pytest.fixture(scope='class')
def load_vdo_module(_lvm_test: None) -> str:
    """Load the appropriate VDO kernel module based on kernel version.

    This fixture installs the VDO package and loads the correct VDO kernel module
    depending on the system's kernel version:
    - For kernel 6.9+: uses dm-vdo module (built into kernel)
    - For kernel 6.8 and earlier: uses kvdo module (from kmod-kvdo package)

    The fixture handles kernel version detection and falls back to dm-vdo if
    version parsing fails.

    Args:
        _lvm_test: LVM test fixture dependency (ensures LVM setup is complete)

    Returns:
        str: Name of the loaded VDO module ('dm-vdo' or 'kvdo')

    Raises:
        AssertionError: If VDO package installation or module loading fails
    """
    module = 'dm-vdo'
    system = SystemManager()
    assert system.package_manager.install(VDO_PACKAGE_NAME)
    log_package_versions(VDO_PACKAGE_NAME)
    try:
        k_version = system.info.kernel
        if k_version:
            k_version = k_version.split('.')
            # dm-vdo is available from kernel 6.9, for older version it's available
            # from kmod-kvdo package
            if int(k_version[0]) < 6 or (int(k_version[0]) == 6 and int(k_version[1]) <= 8):
                logging.info('Using kmod-kvdo')
                assert system.package_manager.install('kmod-kvdo')
                log_package_versions('kmod-kvdo')
                module = 'kvdo'
    except (ValueError, IndexError):
        # if we can't get kernel version, just try to load dm-vdo
        logging.warning('Unable to parse kernel version; defaulting to dm-vdo')

    kmod = ModuleManager()
    assert kmod.load(name=module)

    return module

lv_quarter_of_vg(_lvm_test, setup_vg)

Create a logical volume using 25% of a volume group.

Creates: - Logical volume 'lv1' using 25% of VG space

Yields:

Name Type Description
str str

device path

Source code in sts_libs/src/sts/fixtures/lvm_fixtures.py
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
@pytest.fixture
def lv_quarter_of_vg(_lvm_test: None, setup_vg: str) -> Generator[str, None, None]:
    """Create a logical volume using 25% of a volume group.

    Creates:
    - Logical volume 'lv1' using 25% of VG space

    Yields:
        str: device path
    """
    lv_name = getenv('LV_NAME', 'stscow25vglv1')
    vg_name = setup_vg
    # Create LV
    lv = lvm.LogicalVolume(name=lv_name, vg=vg_name)
    assert lv.create(extents='25%vg')

    yield f'/dev/{vg_name}/{lv_name}'

    # Cleanup
    lv = lvm.LogicalVolume(name=lv_name, vg=vg_name)
    assert lv.remove()

metadata_backup(metadata_swap)

Create metadata backup files for testing.

Creates metadata backup using thin_dump and prepares repair file for testing.

Parameters:

Name Type Description Default
metadata_swap dict[str, Any]

Metadata swap setup from metadata_swap fixture

required

Yields:

Name Type Description
dict dict[str, Any]

Extended information with backup file paths

Source code in sts_libs/src/sts/fixtures/lvm_fixtures.py
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
@pytest.fixture
def metadata_backup(metadata_swap: dict[str, Any]) -> Generator[dict[str, Any], None, None]:
    """Create metadata backup files for testing.

    Creates metadata backup using thin_dump and prepares repair file for testing.

    Args:
        metadata_swap: Metadata swap setup from metadata_swap fixture

    Yields:
        dict: Extended information with backup file paths
    """
    vol_info = metadata_swap.copy()
    metadata_dev = vol_info['metadata_dev']
    metadata_backup_path = Path('/var/tmp/metadata')
    metadata_repair_path = Path('/var/tmp/metadata_repair')

    # Create metadata backup using thin_dump (matching backup_metadata from main.fmf)
    backup_cmd = f'thin_dump --format xml --repair {metadata_dev} --output {metadata_backup_path}'
    backup_result = run(backup_cmd)
    assert backup_result.succeeded

    # Create proper metadata files for testing
    # 1. Create empty repair file with proper allocation (5MB should be enough)
    assert fallocate(metadata_repair_path, length='5M')

    # 2. Create a working metadata file that thin_repair can actually repair
    metadata_working_path = Path('/var/tmp/metadata_working')
    assert fallocate(metadata_working_path, length='5M')

    # 3. Populate the working metadata file with valid data from backup
    restore_working_cmd = f'thin_restore -i {metadata_backup_path} -o {metadata_working_path}'
    restore_working_result = run(restore_working_cmd)
    assert restore_working_result.succeeded, f'Failed to create working metadata: {restore_working_result.stderr}'

    # Update vol_info to include all metadata files
    vol_info.update(
        {
            'metadata_backup_path': metadata_backup_path,
            'metadata_repair_path': metadata_repair_path,
            'metadata_working_path': metadata_working_path,
        }
    )

    yield vol_info

    # Cleanup files
    run(f'rm -f {metadata_backup_path} {metadata_repair_path} {metadata_working_path}')

metadata_snapshot(thin_volumes_with_lifecycle)

Create metadata snapshot for thin pool.

Creates a metadata snapshot while the thin pool is active and handles the suspend/message/resume sequence for snapshot creation.

Parameters:

Name Type Description Default
thin_volumes_with_lifecycle dict[str, Any]

Thin volumes setup from thin_volumes_with_lifecycle fixture

required

Yields:

Name Type Description
dict dict[str, Any]

Pool information with snapshot status

Source code in sts_libs/src/sts/fixtures/lvm_fixtures.py
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
@pytest.fixture
def metadata_snapshot(thin_volumes_with_lifecycle: dict[str, Any]) -> Generator[dict[str, Any], None, None]:
    """Create metadata snapshot for thin pool.

    Creates a metadata snapshot while the thin pool is active and handles
    the suspend/message/resume sequence for snapshot creation.

    Args:
        thin_volumes_with_lifecycle: Thin volumes setup from thin_volumes_with_lifecycle fixture

    Yields:
        dict: Pool information with snapshot status
    """
    pool_info = thin_volumes_with_lifecycle.copy()
    vg_name = pool_info['vg_name']
    pool_name = pool_info['pool_name']

    udevadm_settle()

    # Create metadata snapshot while pool is still active
    pool_device = f'/dev/mapper/{vg_name}-{pool_name}-tpool'

    # Suspend -> message -> resume sequence (matching metadata_snapshot from setup.py)
    suspend_result = run(f'dmsetup suspend {pool_device}')
    assert suspend_result.succeeded

    message_result = run(f'dmsetup message {pool_device} 0 reserve_metadata_snap')
    assert message_result.succeeded

    resume_result = run(f'dmsetup resume {pool_device}')
    assert resume_result.succeeded

    # Now deactivate thin volumes (matching deactivate_thinvols from setup)
    for i in range(int(pool_info['thin_count'])):
        thin_name = f'{pool_info["thin_base_name"]}{i}'
        thin_lv = lvm.LogicalVolume(name=thin_name, vg=vg_name)
        thin_lv.deactivate()

    udevadm_settle()

    pool_info.update(
        {
            'pool_device': pool_device,
            'has_snapshot': True,
        }
    )

    yield pool_info

    # Release metadata snapshot
    run(f'dmsetup message {pool_device} 0 release_metadata_snap')

metadata_swap(metadata_snapshot, swap_volume)

Perform metadata swap operation between thin pool and swap volume.

Deactivates the thin pool and swap volume, then uses lvconvert to swap the metadata from the thin pool to the swap volume.

Parameters:

Name Type Description Default
metadata_snapshot dict[str, Any]

Metadata snapshot setup from metadata_snapshot fixture

required
swap_volume dict[str, Any]

Swap volume setup from swap_volume fixture

required

Returns:

Name Type Description
dict dict[str, Any]

Combined information with metadata device details

Source code in sts_libs/src/sts/fixtures/lvm_fixtures.py
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
@pytest.fixture
def metadata_swap(metadata_snapshot: dict[str, Any], swap_volume: dict[str, Any]) -> dict[str, Any]:
    """Perform metadata swap operation between thin pool and swap volume.

    Deactivates the thin pool and swap volume, then uses lvconvert to swap
    the metadata from the thin pool to the swap volume.

    Args:
        metadata_snapshot: Metadata snapshot setup from metadata_snapshot fixture
        swap_volume: Swap volume setup from swap_volume fixture

    Returns:
        dict: Combined information with metadata device details
    """
    pool_info = metadata_snapshot.copy()
    swap_info = swap_volume

    # Ensure both fixtures reference the same VG
    assert pool_info['vg_name'] == swap_info['vg_name'], 'Pool and swap must be in same VG'

    vg_name = pool_info['vg_name']
    pool_name = pool_info['pool_name']
    swap_name = swap_info['swap_name']

    # Deactivate pool and swap (matching swap_metadata logic from setup.py)
    pool_lv = lvm.LogicalVolume(name=pool_name, vg=vg_name)
    pool_lv.deactivate()
    swap_lv = lvm.LogicalVolume(name=swap_name, vg=vg_name)
    swap_lv.deactivate()

    logging.info(run('lvs').stdout)
    udevadm_settle()

    # Swap metadata using lv_convert --poolmetadata (exact logic from setup.py)
    # This converts the swap LV to hold the thin pool's metadata
    convert_cmd = f'lvconvert -y --thinpool {vg_name}/{pool_name} --poolmetadata {vg_name}/{swap_name}'
    convert_result = run(convert_cmd)
    assert convert_result.succeeded

    # Activate swap volume (now containing metadata)
    swap_lv = lvm.LogicalVolume(name=swap_name, vg=vg_name)
    swap_lv.activate()

    # Use swap LV as metadata device (it now contains the metadata)
    metadata_dev = f'/dev/{vg_name}/{swap_name}'

    # Combine information from both fixtures
    combined_info = pool_info.copy()
    combined_info.update(swap_info)
    combined_info.update(
        {
            'metadata_dev': metadata_dev,
        }
    )

    return combined_info

mount_lv(lv_quarter_of_vg)

Mount a logical volume on a test directory.

Parameters:

Name Type Description Default
lv_quarter_of_vg str

Fixture providing LV info

required

Yields:

Name Type Description
Directory Directory

Directory representation of mount point

Source code in sts_libs/src/sts/fixtures/lvm_fixtures.py
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
@pytest.fixture
def mount_lv(lv_quarter_of_vg: str) -> Generator[Directory, None, None]:
    """Mount a logical volume on a test directory.

    Args:
        lv_quarter_of_vg: Fixture providing LV info

    Yields:
        Directory: Directory representation of mount point
    """
    dev_path = lv_quarter_of_vg
    mount_point = getenv('STS_LV_MOUNT_POINT', '/mnt/lvcowmntdir')

    # Create filesystem on the LV
    assert mkfs(device=dev_path, fs_type='xfs')

    # Create mount point directory using Directory class
    mnt_dir = Directory(Path(mount_point), create=True)
    assert mnt_dir.exists, f'Failed to create mount point directory {mount_point}'

    # Mount the LV
    assert mount(device=dev_path, mountpoint=mount_point)

    yield mnt_dir

    # Cleanup
    assert umount(mountpoint=mount_point)
    mnt_dir.remove_dir()

mount_thin_lv(thin_lv_quarter_of_vg)

Mount a thin logical volume on a test directory.

Parameters:

Name Type Description Default
thin_lv_quarter_of_vg str

Fixture providing thin LV info

required

Yields:

Name Type Description
Directory Directory

Directory representation of mount point

Source code in sts_libs/src/sts/fixtures/lvm_fixtures.py
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
@pytest.fixture
def mount_thin_lv(thin_lv_quarter_of_vg: str) -> Generator[Directory, None, None]:
    """Mount a thin logical volume on a test directory.

    Args:
        thin_lv_quarter_of_vg: Fixture providing thin LV info

    Yields:
        Directory: Directory representation of mount point
    """
    dev_path = thin_lv_quarter_of_vg
    mount_point = getenv('STS_THIN_LV_MOUNT_POINT', '/mnt/thinlvmntdir')

    # Create filesystem on the thin LV
    assert mkfs(device=dev_path, fs_type='xfs')

    # Create mount point directory using Directory class
    mnt_dir = Directory(Path(mount_point), create=True)
    assert mnt_dir.exists, f'Failed to create mount point directory {mount_point}'

    # Mount the LV
    assert mount(device=dev_path, mountpoint=mount_point)

    yield mnt_dir

    # Cleanup
    assert umount(mountpoint=mount_point)
    mnt_dir.remove_dir()

prepare_multiple_cow_mntpoints(_lvm_test, setup_vg, request)

Create multiple COW logical volumes with mounted filesystems for testing.

This fixture creates multiple logical volumes within a volume group, formats them with filesystems, and mounts them to separate mount points. It's designed for testing Copy-on-Write (COW) snapshots with multiple source volumes.

Supports parameter customization via pytest.param or environment variables.

Yields:

Type Description
list[Directory]

list[Directory]: List of Directory objects representing the mount points

Source code in sts_libs/src/sts/fixtures/lvm_fixtures.py
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
@pytest.fixture
def prepare_multiple_cow_mntpoints(
    _lvm_test: None, setup_vg: str, request: pytest.FixtureRequest
) -> Generator[list[Directory], None, None]:
    """Create multiple COW logical volumes with mounted filesystems for testing.

    This fixture creates multiple logical volumes within a volume group, formats them
    with filesystems, and mounts them to separate mount points. It's designed for
    testing Copy-on-Write (COW) snapshots with multiple source volumes.

    Supports parameter customization via pytest.param or environment variables.

    Yields:
        list[Directory]: List of Directory objects representing the mount points
    """
    # Get parameters from request if provided, otherwise use environment variables
    params = getattr(request, 'param', {})

    yield from _create_multiple_lv_mntpoints(
        vg_name=setup_vg,
        lv_type='cow',
        **params,
    )

prepare_multiple_cow_mntpoints_ext4(_lvm_test, setup_vg)

Create multiple COW logical volumes with ext4 filesystems for testing.

This is a convenience wrapper that configures COW logical volumes to use ext4 filesystem by default.

Yields:

Type Description
list[Directory]

list[Directory]: List of Directory objects representing the mount points

Source code in sts_libs/src/sts/fixtures/lvm_fixtures.py
554
555
556
557
558
559
560
561
562
563
564
@pytest.fixture
def prepare_multiple_cow_mntpoints_ext4(_lvm_test: None, setup_vg: str) -> Generator[list[Directory], None, None]:
    """Create multiple COW logical volumes with ext4 filesystems for testing.

    This is a convenience wrapper that configures COW logical volumes
    to use ext4 filesystem by default.

    Yields:
        list[Directory]: List of Directory objects representing the mount points
    """
    yield from _create_multiple_lv_mntpoints(vg_name=setup_vg, lv_type='cow', fs_type='ext4')

prepare_multiple_thin_mntpoints(_lvm_test, setup_vg, request)

Create multiple thin logical volumes with mounted filesystems for testing.

This fixture creates multiple thin logical volumes within a volume group, each with its own thin pool, formats them with filesystems, and mounts them to separate mount points. It's designed for testing thin provisioning scenarios with multiple volumes.

Supports parameter customization via pytest.param or environment variables.

Yields:

Type Description
list[Directory]

list[Directory]: List of Directory objects representing the mount points

Source code in sts_libs/src/sts/fixtures/lvm_fixtures.py
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
@pytest.fixture
def prepare_multiple_thin_mntpoints(
    _lvm_test: None, setup_vg: str, request: pytest.FixtureRequest
) -> Generator[list[Directory], None, None]:
    """Create multiple thin logical volumes with mounted filesystems for testing.

    This fixture creates multiple thin logical volumes within a volume group, each with
    its own thin pool, formats them with filesystems, and mounts them to separate mount
    points. It's designed for testing thin provisioning scenarios with multiple volumes.

    Supports parameter customization via pytest.param or environment variables.

    Yields:
        list[Directory]: List of Directory objects representing the mount points
    """
    # Get parameters from request if provided, otherwise use environment variables
    params = getattr(request, 'param', {})

    yield from _create_multiple_lv_mntpoints(
        vg_name=setup_vg,
        lv_type='thin',
        **params,
    )

prepare_multiple_thin_mntpoints_ext4(_lvm_test, setup_vg)

Create multiple thin logical volumes with ext4 filesystems for testing.

This is a convenience wrapper that configures thin logical volumes to use ext4 filesystem by default.

Yields:

Type Description
list[Directory]

list[Directory]: List of Directory objects representing the mount points

Source code in sts_libs/src/sts/fixtures/lvm_fixtures.py
567
568
569
570
571
572
573
574
575
576
577
@pytest.fixture
def prepare_multiple_thin_mntpoints_ext4(_lvm_test: None, setup_vg: str) -> Generator[list[Directory], None, None]:
    """Create multiple thin logical volumes with ext4 filesystems for testing.

    This is a convenience wrapper that configures thin logical volumes
    to use ext4 filesystem by default.

    Yields:
        list[Directory]: List of Directory objects representing the mount points
    """
    yield from _create_multiple_lv_mntpoints(vg_name=setup_vg, lv_type='thin', fs_type='ext4')

restored_thin_pool(metadata_backup)

Restore thin pool to a usable state after metadata operations.

WARNING: Use this fixture ONLY for tests that specifically need an active thin pool (like thin_trim). Most DMPD tools are designed to work with "broken" metadata and should use setup_thin_metadata_for_dmpd instead, which preserves the intentionally inconsistent metadata state.

This fixture uses thin_restore to repair the metadata and make the pool activatable.

Parameters:

Name Type Description Default
metadata_backup dict[str, Any]

Metadata backup setup from metadata_backup fixture

required

Yields:

Name Type Description
dict dict[str, Any]

Pool information with restored pool that can be activated

Source code in sts_libs/src/sts/fixtures/lvm_fixtures.py
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
@pytest.fixture
def restored_thin_pool(metadata_backup: dict[str, Any]) -> Generator[dict[str, Any], None, None]:
    """Restore thin pool to a usable state after metadata operations.

    WARNING: Use this fixture ONLY for tests that specifically need an active thin pool
    (like thin_trim). Most DMPD tools are designed to work with "broken" metadata and
    should use setup_thin_metadata_for_dmpd instead, which preserves the intentionally
    inconsistent metadata state.

    This fixture uses thin_restore to repair the metadata and make the pool activatable.

    Args:
        metadata_backup: Metadata backup setup from metadata_backup fixture

    Yields:
        dict: Pool information with restored pool that can be activated
    """
    vol_info = metadata_backup.copy()
    vg_name = vol_info['vg_name']
    pool_name = vol_info['pool_name']
    swap_name = vol_info['swap_name']
    metadata_backup_path = vol_info['metadata_backup_path']
    metadata_dev = vol_info['metadata_dev']

    # Step 1: Use thin_restore to repair the metadata in the swap device
    logging.info(f'Restoring metadata to repair inconsistencies in {metadata_dev}')
    restore_cmd = f'thin_restore -i {metadata_backup_path} -o {metadata_dev}'
    restore_result = run(restore_cmd)
    assert restore_result.succeeded, f'Failed to restore metadata: {restore_result.stderr}'

    # Step 2: Deactivate both volumes before swapping metadata back
    pool_lv = lvm.LogicalVolume(name=pool_name, vg=vg_name)
    pool_lv.deactivate()  # Pool might already be deactivated
    swap_lv = lvm.LogicalVolume(name=swap_name, vg=vg_name)
    swap_lv.deactivate()
    udevadm_settle()

    # Step 3: "Swap back metadata" - restore the fixed metadata to the pool
    # This matches the "Swapping back metadata" step from python-stqe cleanup
    swap_back_cmd = f'lvconvert -y --thinpool {vg_name}/{pool_name} --poolmetadata {vg_name}/{swap_name}'
    swap_back_result = run(swap_back_cmd)
    assert swap_back_result.succeeded, f'Failed to swap metadata back to pool: {swap_back_result.stderr}'

    # Step 4: Reactivate the swap volume and verify device accessibility
    swap_lv = lvm.LogicalVolume(name=swap_name, vg=vg_name)
    activate_swap_result = swap_lv.activate()
    assert activate_swap_result, 'Failed to reactivate swap volume'
    udevadm_settle()

    # Verify the metadata device exists and update the path if needed
    metadata_dev_path = f'/dev/{vg_name}/{swap_name}'
    check_dev = run(f'ls -la {metadata_dev_path}')
    if not check_dev.succeeded:
        # Try alternative device path
        metadata_dev_path = f'/dev/mapper/{vg_name}-{swap_name}'
        check_dev_alt = run(f'ls -la {metadata_dev_path}')
        assert check_dev_alt.succeeded, f'Swap device not accessible at {metadata_dev_path}'

    # Update the metadata_dev path to the verified working path
    vol_info['metadata_dev'] = metadata_dev_path

    # Now the pool should have the fixed metadata and be activatable
    vol_info.update(
        {
            'pool_can_activate': True,
            'metadata_restored': True,
            'metadata_swapped_back': True,
        }
    )

    yield vol_info

    # Leave pool in deactivated state for cleanup
    pool_lv = lvm.LogicalVolume(name=pool_name, vg=vg_name)
    pool_lv.deactivate()  # Ignore errors

setup_cache_metadata_for_dmpd(install_dmpd, cache_metadata_backup)

Set up cache metadata configuration for DMPD tool testing.

This fixture creates the necessary cache metadata setup that DMPD cache tools can operate on. Unlike thin metadata which intentionally creates "broken" state, cache metadata swap creates a working metadata device that cache tools can analyze.

Parameters:

Name Type Description Default
install_dmpd None

DMPD package installation fixture

required
cache_metadata_backup dict[str, Any]

Cache metadata backup setup from cache_metadata_backup fixture

required

Returns:

Name Type Description
dict dict[str, Any]

Extended cache information for DMPD testing

Source code in sts_libs/src/sts/fixtures/lvm_fixtures.py
1261
1262
1263
1264
1265
1266
1267
1268
1269
1270
1271
1272
1273
1274
1275
1276
1277
1278
1279
1280
@pytest.fixture
def setup_cache_metadata_for_dmpd(install_dmpd: None, cache_metadata_backup: dict[str, Any]) -> dict[str, Any]:
    """Set up cache metadata configuration for DMPD tool testing.

    This fixture creates the necessary cache metadata setup that DMPD cache tools
    can operate on. Unlike thin metadata which intentionally creates "broken" state,
    cache metadata swap creates a working metadata device that cache tools can analyze.

    Args:
        install_dmpd: DMPD package installation fixture
        cache_metadata_backup: Cache metadata backup setup from cache_metadata_backup fixture

    Returns:
        dict: Extended cache information for DMPD testing
    """
    # DMPD packages are installed via install_dmpd fixture
    _ = install_dmpd

    # Use cache_metadata_backup which provides working cache metadata for DMPD testing
    return cache_metadata_backup.copy()

setup_loopdev_vg(_lvm_test, loop_devices)

Set up a volume group using loop devices.

This fixture creates a volume group using the provided loop devices. The volume group name can be customized using the STS_VG_NAME environment variable, otherwise defaults to 'stsvg0'.

Parameters:

Name Type Description Default
loop_devices list[str]

List of loop device paths to use as physical volumes.

required

Yields:

Name Type Description
str str

The name of the created volume group.

Examples:

Basic usage with custom loop device configuration:

@pytest.mark.parametrize('loop_devices', [{'count': 1, 'size_mb': 4096}], indirect=True)
@pytest.mark.usefixtures('setup_loopdev_vg')
def test_large_vg_operations(setup_loopdev_vg):
    vg_name = setup_loopdev_vg
    # Create logical volumes in the 4GB VG
    lv = lvm.LogicalVolume(name='testlv', vg=vg_name, size='1G')
    assert lv.create()

Using with multiple loop devices:

@pytest.mark.parametrize('loop_devices', [{'count': 2, 'size_mb': 2048}], indirect=True)
@pytest.mark.usefixtures('setup_loopdev_vg')
def test_multi_pv_vg(setup_loopdev_vg):
    vg_name = setup_loopdev_vg
    vg = lvm.VolumeGroup(name=vg_name)
    assert vg.exists()
Source code in sts_libs/src/sts/fixtures/lvm_fixtures.py
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
@pytest.fixture
def setup_loopdev_vg(_lvm_test: None, loop_devices: list[str]) -> Generator[str, None, None]:
    """Set up a volume group using loop devices.

    This fixture creates a volume group using the provided loop devices.
    The volume group name can be customized using the STS_VG_NAME environment
    variable, otherwise defaults to 'stsvg0'.

    Args:
        loop_devices: List of loop device paths to use as physical volumes.

    Yields:
        str: The name of the created volume group.

    Examples:
        Basic usage with custom loop device configuration:

        ```python
        @pytest.mark.parametrize('loop_devices', [{'count': 1, 'size_mb': 4096}], indirect=True)
        @pytest.mark.usefixtures('setup_loopdev_vg')
        def test_large_vg_operations(setup_loopdev_vg):
            vg_name = setup_loopdev_vg
            # Create logical volumes in the 4GB VG
            lv = lvm.LogicalVolume(name='testlv', vg=vg_name, size='1G')
            assert lv.create()
        ```

        Using with multiple loop devices:

        ```python
        @pytest.mark.parametrize('loop_devices', [{'count': 2, 'size_mb': 2048}], indirect=True)
        @pytest.mark.usefixtures('setup_loopdev_vg')
        def test_multi_pv_vg(setup_loopdev_vg):
            vg_name = setup_loopdev_vg
            vg = lvm.VolumeGroup(name=vg_name)
            assert vg.exists()
        ```
    """
    vg_name = getenv('STS_VG_NAME', 'stsvg0')
    pvs = []

    try:
        for device in loop_devices:
            pv = lvm.PhysicalVolume(name=device, path=device)
            assert pv.create(), f'Failed to create PV on device {device}'
            pvs.append(pv)

        vg = lvm.VolumeGroup(name=vg_name, pvs=[pv.path for pv in pvs])
        assert vg.create(), f'Failed to create VG {vg_name}'

        yield vg_name

    finally:
        vg = lvm.VolumeGroup(name=vg_name)
        if not vg.remove():
            logging.warning(f'Failed to remove VG {vg_name}')

        for pv in pvs:
            if not pv.remove():
                logging.warning(f'Failed to remove PV {pv.path}')

setup_thin_metadata_for_dmpd(install_dmpd, metadata_backup)

Set up thin metadata configuration for DMPD tool testing with snapshot support.

This fixture creates the intended "broken" metadata state that DMPD tools are designed to detect, analyze, and repair. The metadata swap operation intentionally leaves the thin pool in an inconsistent state (transaction_id mismatch) to test that DMPD tools can properly handle corrupted/problematic metadata scenarios.

Parameters:

Name Type Description Default
install_dmpd None

DMPD package installation fixture

required
metadata_backup dict[str, Any]

Metadata backup setup from metadata_backup fixture

required

Returns:

Name Type Description
dict dict[str, Any]

Extended volume information with intentionally inconsistent metadata for testing

Source code in sts_libs/src/sts/fixtures/lvm_fixtures.py
 997
 998
 999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
@pytest.fixture
def setup_thin_metadata_for_dmpd(install_dmpd: None, metadata_backup: dict[str, Any]) -> dict[str, Any]:
    """Set up thin metadata configuration for DMPD tool testing with snapshot support.

    This fixture creates the intended "broken" metadata state that DMPD tools are designed
    to detect, analyze, and repair. The metadata swap operation intentionally leaves the
    thin pool in an inconsistent state (transaction_id mismatch) to test that DMPD tools
    can properly handle corrupted/problematic metadata scenarios.

    Args:
        install_dmpd: DMPD package installation fixture
        metadata_backup: Metadata backup setup from metadata_backup fixture

    Returns:
        dict: Extended volume information with intentionally inconsistent metadata for testing
    """
    # DMPD packages are installed via install_dmpd fixture
    _ = install_dmpd

    # Use metadata_backup which preserves the "broken" metadata state for DMPD testing
    return metadata_backup.copy()

setup_thin_pool_with_vols(thin_volumes_with_lifecycle, swap_volume)

Set up thin pool with thin volumes for DMPD testing.

This is a backward-compatible fixture that combines the modular fixtures to recreate the original functionality. Uses the new modular approach internally.

Parameters:

Name Type Description Default
thin_volumes_with_lifecycle dict[str, str]

Thin volumes setup from thin_volumes_with_lifecycle fixture

required
swap_volume dict[str, str]

Swap volume setup from swap_volume fixture

required

Returns:

Name Type Description
dict dict[str, str]

Information about created volumes (compatible with original format)

Source code in sts_libs/src/sts/fixtures/lvm_fixtures.py
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
@pytest.fixture
def setup_thin_pool_with_vols(
    thin_volumes_with_lifecycle: dict[str, str], swap_volume: dict[str, str]
) -> dict[str, str]:
    """Set up thin pool with thin volumes for DMPD testing.

    This is a backward-compatible fixture that combines the modular fixtures
    to recreate the original functionality. Uses the new modular approach internally.

    Args:
        thin_volumes_with_lifecycle: Thin volumes setup from thin_volumes_with_lifecycle fixture
        swap_volume: Swap volume setup from swap_volume fixture

    Returns:
        dict: Information about created volumes (compatible with original format)
    """
    pool_info = thin_volumes_with_lifecycle.copy()
    swap_info = swap_volume

    # Ensure both fixtures reference the same VG
    assert pool_info['vg_name'] == swap_info['vg_name'], 'Pool and swap must be in same VG'

    # Combine information from both fixtures to match original format
    volume_info = pool_info.copy()
    volume_info.update(
        {
            'swap_name': swap_info['swap_name'],
            'swap_path': swap_info['swap_path'],
        }
    )

    return volume_info

setup_vg(_lvm_test, ensure_minimum_devices_with_same_block_sizes)

Set up an LVM Volume Group (VG) with Physical Volumes (PVs) for testing.

This fixture creates a Volume Group using the provided block devices. It handles the creation of Physical Volumes from the block devices and ensures proper cleanup after tests, even if they fail.

Parameters:

Name Type Description Default
ensure_minimum_devices_with_same_block_sizes list[BlockDevice]

List of BlockDevice objects with matching block sizes to be used for creating Physical Volumes.

required

Yields:

Name Type Description
str str

Name of the created Volume Group.

Raises:

Type Description
AssertionError

If PV creation fails for any device.

Example

def test_volume_group(setup_vg): vg_name = setup_vg # Use vg_name in your test...

Source code in sts_libs/src/sts/fixtures/lvm_fixtures.py
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
@pytest.fixture
def setup_vg(
    _lvm_test: None, ensure_minimum_devices_with_same_block_sizes: list[BlockDevice]
) -> Generator[str, None, None]:
    """Set up an LVM Volume Group (VG) with Physical Volumes (PVs) for testing.

    This fixture creates a Volume Group using the provided block devices. It handles the creation
    of Physical Volumes from the block devices and ensures proper cleanup after tests, even if
    they fail.

    Args:
        ensure_minimum_devices_with_same_block_sizes: List of BlockDevice objects with matching
            block sizes to be used for creating Physical Volumes.

    Yields:
        str: Name of the created Volume Group.

    Raises:
        AssertionError: If PV creation fails for any device.

    Example:
        def test_volume_group(setup_vg):
            vg_name = setup_vg
            # Use vg_name in your test...
    """
    vg_name = getenv('STS_VG_NAME', 'stsvg0')
    pvs = []

    try:
        # Create PVs
        for device in ensure_minimum_devices_with_same_block_sizes:
            device_name = str(device.path).replace('/dev/', '')
            device_path = str(device.path)

            pv = lvm.PhysicalVolume(name=device_name, path=device_path)
            assert pv.create(), f'Failed to create PV on device {device_path}'
            pvs.append(pv)

        # Create VG
        vg = lvm.VolumeGroup(name=vg_name, pvs=[pv.path for pv in pvs])
        assert vg.create(), f'Failed to create VG {vg_name}'

        yield vg_name

    finally:
        # Cleanup in reverse order
        vg = lvm.VolumeGroup(name=vg_name)
        if not vg.remove():
            logging.warning(f'Failed to remove VG {vg_name}')

        for pv in pvs:
            if not pv.remove():
                logging.warning(f'Failed to remove PV {pv.path}')

swap_volume(setup_loopdev_vg)

Create a swap volume for metadata operations.

Creates a 75MB swap logical volume that can be used for metadata swapping.

Parameters:

Name Type Description Default
setup_loopdev_vg str

Volume group name from setup_loopdev_vg fixture

required

Yields:

Name Type Description
dict dict[str, Any]

Information about the created swap volume

Source code in sts_libs/src/sts/fixtures/lvm_fixtures.py
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
@pytest.fixture
def swap_volume(setup_loopdev_vg: str) -> Generator[dict[str, Any], None, None]:
    """Create a swap volume for metadata operations.

    Creates a 75MB swap logical volume that can be used for metadata swapping.

    Args:
        setup_loopdev_vg: Volume group name from setup_loopdev_vg fixture

    Yields:
        dict: Information about the created swap volume
    """
    vg_name = setup_loopdev_vg
    swap_name = 'swapvol'

    # Create swap LV (75MB as per original setup)
    swap_lv = lvm.LogicalVolume(name=swap_name, vg=vg_name)
    assert swap_lv.create(size='75M')

    swap_info = {
        'vg_name': vg_name,
        'swap_name': swap_name,
        'swap_path': f'/dev/{vg_name}/{swap_name}',
        'swap_lv': swap_lv,
    }

    yield swap_info

    # Cleanup
    swap_lv.remove()

thin_lv_quarter_of_vg(_lvm_test, setup_vg)

Create a thin logical volume using a thin pool that uses 25% of a volume group.

Creates: - Thin pool using 25% of the provided volume group space - Thin logical volume with 512MB virtual size

Yields:

Name Type Description
str str

Device path to the thin logical volume

Source code in sts_libs/src/sts/fixtures/lvm_fixtures.py
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
@pytest.fixture
def thin_lv_quarter_of_vg(_lvm_test: None, setup_vg: str) -> Generator[str, None, None]:
    """Create a thin logical volume using a thin pool that uses 25% of a volume group.

    Creates:
    - Thin pool using 25% of the provided volume group space
    - Thin logical volume with 512MB virtual size

    Yields:
        str: Device path to the thin logical volume

    """
    lv_name = getenv('LV_NAME', 'ststhin25vglv1')
    vg_name = setup_vg
    pool_name = getenv('THIN_POOL_NAME', 'stspool1_25vg')

    # Create thin pool
    lv = lvm.LogicalVolume(name=lv_name, vg=vg_name)
    assert lv.create(
        type='thin',
        thinpool=pool_name,
        extents='25%VG',
        virtualsize='512M',
    )

    yield f'/dev/{vg_name}/{lv_name}'

    # Cleanup
    lv = lvm.LogicalVolume(name=lv_name, vg=vg_name)
    assert lv.remove()

    pool_lv = lvm.LogicalVolume(name=pool_name, vg=vg_name)
    assert pool_lv.remove()

thin_volumes_with_lifecycle(basic_thin_pool)

Create thin volumes and perform filesystem lifecycle operations.

Creates 10 thin volumes of 300MB each and performs filesystem operations (create, mount, unmount, deactivate) to generate metadata activity.

Parameters:

Name Type Description Default
basic_thin_pool dict[str, Any]

Basic thin pool information from basic_thin_pool fixture

required

Yields:

Name Type Description
dict dict[str, Any]

Extended pool information with thin volume details

Source code in sts_libs/src/sts/fixtures/lvm_fixtures.py
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
@pytest.fixture
def thin_volumes_with_lifecycle(basic_thin_pool: dict[str, Any]) -> Generator[dict[str, Any], None, None]:
    """Create thin volumes and perform filesystem lifecycle operations.

    Creates 10 thin volumes of 300MB each and performs filesystem operations
    (create, mount, unmount, deactivate) to generate metadata activity.

    Args:
        basic_thin_pool: Basic thin pool information from basic_thin_pool fixture

    Yields:
        dict: Extended pool information with thin volume details
    """
    pool_info = basic_thin_pool.copy()
    vg_name = pool_info['vg_name']
    pool_name = pool_info['pool_name']
    thin_base_name = 'thinvol'

    # Create 10 thin volumes of 300MB each (minimum size for filesystem support)
    thin_lvs = []
    for i in range(10):
        thin_name = f'{thin_base_name}{i}'
        thin_lv = lvm.LogicalVolume(name=thin_name, vg=vg_name)
        assert thin_lv.create(type='thin', thinpool=pool_name, virtualsize='300M')
        thin_lvs.append(thin_lv)

        # Create filesystem and mount/unmount to generate metadata activity
        # This matches the mount_lv/umount_lv logic from setup.py
        thin_path = f'/dev/{vg_name}/{thin_name}'
        mount_point = f'/mnt/{thin_name}'

        mnt_dir = Directory(Path(mount_point), create=True)
        assert mkfs(device=thin_path, fs_type='xfs')
        assert mount(device=thin_path, mountpoint=mount_point)
        assert umount(mountpoint=mount_point)
        mnt_dir.remove_dir()

        # Deactivate thin LV with verification
        assert thin_lv.deactivate()

    pool_info.update(
        {
            'thin_count': 10,
            'thin_base_name': thin_base_name,
            'thin_lvs': thin_lvs,
        }
    )

    yield pool_info

    # Cleanup thin volumes
    for thin_lv in thin_lvs:
        thin_lv.remove()

RDMA Fixtures

sts.fixtures.rdma_fixtures

RDMA test fixtures.

This module provides fixtures for testing RDMA (Remote Direct Memory Access): - Device discovery and validation - Device configuration and management - Port and interface handling - SR-IOV configuration

Fixture Dependencies: 1. _exists_rdma (base fixture) - Validates RDMA device presence - Skips tests if no devices found

  1. rdma_device (independent fixture)
  2. Creates device factory function
  3. Validates device existence
  4. Provides device management

Common Usage: 1. Basic device validation: @pytest.mark.usefixtures('_exists_rdma') def test_rdma(): # Test runs only if RDMA device exists

  1. Specific device testing: def test_device(rdma_device): device = rdma_device('mlx5_0') # Test specific RDMA device

  2. Port configuration: def test_ports(rdma_device): device = rdma_device('mlx5_0') ports = device.get_ports() # Test port configuration

  3. SR-IOV setup: def test_sriov(rdma_device): device = rdma_device('mlx5_0') sriov = device.get_sriov() # Test SR-IOV configuration

Error Handling: - Missing devices skip tests - Invalid device IDs raise assertion errors - Device access issues are logged - Configuration failures are reported

rdma_device()

Create RDMA device factory.

This fixture provides a factory function for RDMA devices: - Creates device instances on demand - Validates device existence - Provides device management interface - Supports multiple device types

Device Management: - Port configuration - Interface binding - SR-IOV setup - Power management

Returns:

Type Description
Callable[[str], RdmaDevice]

Factory function that takes HCA ID and returns RdmaDevice

Example
def test_rdma(rdma_device):
    # Create device instance
    device = rdma_device('mlx5_0')
...
    # Access device information
    assert device.exists
...
    # Configure ports
    ports = device.get_ports()
    for port in ports:
        print(f'Port {port.name}: {port.state}')
...
    # Set up SR-IOV if supported
    if device.is_sriov_capable:
        sriov = device.get_sriov()
        sriov.set_numvfs('4')
Source code in sts_libs/src/sts/fixtures/rdma_fixtures.py
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
@pytest.fixture(scope='class')
def rdma_device() -> Callable[[str], RdmaDeviceType]:
    """Create RDMA device factory.

    This fixture provides a factory function for RDMA devices:
    - Creates device instances on demand
    - Validates device existence
    - Provides device management interface
    - Supports multiple device types

    Device Management:
    - Port configuration
    - Interface binding
    - SR-IOV setup
    - Power management

    Returns:
        Factory function that takes HCA ID and returns RdmaDevice

    Example:
        ```python
        def test_rdma(rdma_device):
            # Create device instance
            device = rdma_device('mlx5_0')
        ...
            # Access device information
            assert device.exists
        ...
            # Configure ports
            ports = device.get_ports()
            for port in ports:
                print(f'Port {port.name}: {port.state}')
        ...
            # Set up SR-IOV if supported
            if device.is_sriov_capable:
                sriov = device.get_sriov()
                sriov.set_numvfs('4')
        ```
    """

    def _device_factory(hca_id: str) -> RdmaDeviceType:
        """Create RDMA device.

        Creates and validates RDMA device instance:
        - Checks device existence
        - Initializes device paths
        - Sets up device attributes
        - Validates configuration

        Args:
            hca_id: HCA ID (e.g. 'mlx5_0', 'mlx4_1')

        Returns:
            RDMA device instance

        Raises:
            AssertionError: If device not found or invalid

        Example:
            ```python
            device = _device_factory('mlx5_0')
            assert device.exists
            ```
        """
        assert exists_device(hca_id), f'No RDMA device found: {hca_id}'
        return RdmaDevice(ibdev=hca_id)

    return _device_factory

Stratis Fixtures

sts.fixtures.stratis_fixtures

Stratis test fixtures.

This module provides fixtures for testing Stratis storage: - Pool creation and management - Filesystem operations - Encryption configuration - Error injection and recovery

Fixture Dependencies: 1. _stratis_test (base fixture) - Installs Stratis packages - Manages pool cleanup - Logs system information

  1. setup_stratis_key (independent fixture)
  2. Creates encryption key
  3. Manages key registration
  4. Handles key cleanup

  5. stratis_test_pool (depends on loop_devices)

  6. Creates test pool
  7. Manages devices
  8. Handles cleanup

  9. stratis_encrypted_pool (depends on loop_devices, setup_stratis_key)

  10. Creates encrypted pool
  11. Manages key and devices
  12. Handles secure cleanup

  13. stratis_failing_pool (depends on scsi_debug_devices)

  14. Creates pool with failing device
  15. Injects failures
  16. Tests error handling

Common Usage: 1. Basic pool testing: @pytest.mark.usefixtures('_stratis_test') def test_stratis(): # Create and test pools # Pools are cleaned up after test

  1. Encrypted pool testing: def test_encryption(stratis_encrypted_pool): assert stratis_encrypted_pool.is_encrypted # Test encrypted operations

  2. Error handling testing: def test_failures(stratis_failing_pool): assert not stratis_failing_pool.stop() # Test failure handling

Error Handling: - Package installation failures fail test - Pool creation failures skip test - Device failures are handled gracefully - Resources are cleaned up on failure

setup_stratis_key()

Set up Stratis encryption key.

Creates and manages encryption key: - Creates temporary key file - Registers key with Stratis - Handles key cleanup - Supports custom key configuration

Configuration (via environment): - STRATIS_KEY_DESC: Key description (default: 'sts-stratis-test-key') - STRATIS_KEY_PATH: Key file path (default: '/tmp/sts-stratis-test-key') - STRATIS_KEY: Key content (default: 'Stra123tisKey45')

Key Management: 1. Creates key file with specified content 2. Registers key with Stratis daemon 3. Yields key description for use 4. Unregisters key and removes file

Example
def test_encryption(setup_stratis_key):
    # Create encrypted pool
    pool = StratisPool()
    pool.create(key_desc=setup_stratis_key)
    assert pool.is_encrypted
Source code in sts_libs/src/sts/fixtures/stratis_fixtures.py
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
@pytest.fixture
def setup_stratis_key() -> Generator[str, None, None]:
    """Set up Stratis encryption key.

    Creates and manages encryption key:
    - Creates temporary key file
    - Registers key with Stratis
    - Handles key cleanup
    - Supports custom key configuration

    Configuration (via environment):
    - STRATIS_KEY_DESC: Key description (default: 'sts-stratis-test-key')
    - STRATIS_KEY_PATH: Key file path (default: '/tmp/sts-stratis-test-key')
    - STRATIS_KEY: Key content (default: 'Stra123tisKey45')

    Key Management:
    1. Creates key file with specified content
    2. Registers key with Stratis daemon
    3. Yields key description for use
    4. Unregisters key and removes file

    Example:
        ```python
        def test_encryption(setup_stratis_key):
            # Create encrypted pool
            pool = StratisPool()
            pool.create(key_desc=setup_stratis_key)
            assert pool.is_encrypted
        ```
    """
    stratis_key = Key()
    keydesc = getenv('STRATIS_KEY_DESC', 'sts-stratis-test-key')
    keypath = getenv('STRATIS_KEY_PATH', '/tmp/sts-stratis-test-key')
    key = getenv('STRATIS_KEY', 'Stra123tisKey45')

    # Create key file
    keyp = Path(keypath)
    keyp.write_text(key)
    assert keyp.is_file()

    # Register key with Stratis
    assert stratis_key.set(keydesc=keydesc, keyfile_path=keypath).succeeded

    yield keydesc

    # Clean up
    assert stratis_key.unset(keydesc).succeeded
    keyp.unlink()
    assert not keyp.is_file()

stratis_clevis_test()

Set up Tang server for Stratis Clevis encryption testing.

This fixture configures the Tang server environment: - Ensures Tang server packages are installed - Starts Tang service - Gets server information for encryption - Handles cleanup

Package Installation: - tang: Tang server package - curl: For HTTP requests - jose: For JWK operations - jq: For JSON processing

Service Management: 1. Installs required packages 2. Starts Tang service 3. Gets server thumbprint 4. Cleans up after tests

Returns:

Type Description
None

Dictionary containing:

None
  • thumbprint: Server thumbprint for verification
None
  • url: Tang server URL
Example
def test_tang(stratis_clevis_test):
    # Create encrypted pool with Tang
    config = PoolCreateConfig(
        clevis='tang', tang_url=stratis_clevis_test['url'], thumbprint=stratis_clevis_test['thumbprint']
    )
Source code in sts_libs/src/sts/fixtures/stratis_fixtures.py
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
@pytest.fixture
def stratis_clevis_test() -> Generator[dict[str, str], None, None]:
    """Set up Tang server for Stratis Clevis encryption testing.

    This fixture configures the Tang server environment:
    - Ensures Tang server packages are installed
    - Starts Tang service
    - Gets server information for encryption
    - Handles cleanup

    Package Installation:
    - tang: Tang server package
    - curl: For HTTP requests
    - jose: For JWK operations
    - jq: For JSON processing

    Service Management:
    1. Installs required packages
    2. Starts Tang service
    3. Gets server thumbprint
    4. Cleans up after tests

    Returns:
        Dictionary containing:
        - thumbprint: Server thumbprint for verification
        - url: Tang server URL

    Example:
        ```python
        def test_tang(stratis_clevis_test):
            # Create encrypted pool with Tang
            config = PoolCreateConfig(
                clevis='tang', tang_url=stratis_clevis_test['url'], thumbprint=stratis_clevis_test['thumbprint']
            )
        ```
    """
    system = SystemManager()
    system_info = SystemInfo()
    tang_service = 'tangd.socket'

    # Install required packages
    required_packages = ['tang', 'curl', 'jose', 'jq', 'coreutils']
    assert ensure_installed(*required_packages), 'Failed to install required packages'

    # Start Tang service if not running
    if not system.is_service_running(tang_service):
        assert system.service_start(tang_service), f'Failed to start {tang_service}'

    # Get server thumbprint
    cmd = (
        f'curl -s {system_info.hostname}/adv | '
        f'jq -r .payload | '
        f'base64 -d | '
        f'jose jwk use -i- -r -u verify -o- | '
        f'jose jwk thp -i-'
    )
    result = run(cmd=cmd)
    assert result.succeeded, 'Failed to get Tang server thumbprint'
    assert result.stdout.strip(), 'Empty thumbprint received'

    # Prepare server information
    clevis_info = {'thumbprint': result.stdout.strip(), 'url': f'http://{system_info.hostname}'}

    yield clevis_info

    # Clean up
    if system.is_service_running(tang_service):
        assert system.service_stop(tang_service), f'Failed to stop {tang_service}'

stratis_encrypted_pool(loop_devices, setup_stratis_key)

Create encrypted test pool with loop devices.

Creates and manages encrypted pool: - Uses loop devices as storage - Creates encrypted pool - Manages encryption key - Handles secure cleanup

Parameters:

Name Type Description Default
loop_devices list[str]

Loop device fixture (requires 2 devices)

required
setup_stratis_key str

Stratis key fixture

required

Pool Configuration: - Name: 'sts-stratis-test-pool' - Devices: Provided loop devices - Encrypted with provided key - Default settings

Example
@pytest.mark.parametrize('loop_devices', [2], indirect=True)
def test_pool(stratis_encrypted_pool):
    # Test encrypted operations
    assert stratis_encrypted_pool.is_encrypted
    fs = stratis_encrypted_pool.create_filesystem('test')
    assert fs.exists
Source code in sts_libs/src/sts/fixtures/stratis_fixtures.py
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
@pytest.fixture
def stratis_encrypted_pool(loop_devices: list[str], setup_stratis_key: str) -> Generator[StratisPool, None, None]:
    """Create encrypted test pool with loop devices.

    Creates and manages encrypted pool:
    - Uses loop devices as storage
    - Creates encrypted pool
    - Manages encryption key
    - Handles secure cleanup

    Args:
        loop_devices: Loop device fixture (requires 2 devices)
        setup_stratis_key: Stratis key fixture

    Pool Configuration:
    - Name: 'sts-stratis-test-pool'
    - Devices: Provided loop devices
    - Encrypted with provided key
    - Default settings

    Example:
        ```python
        @pytest.mark.parametrize('loop_devices', [2], indirect=True)
        def test_pool(stratis_encrypted_pool):
            # Test encrypted operations
            assert stratis_encrypted_pool.is_encrypted
            fs = stratis_encrypted_pool.create_filesystem('test')
            assert fs.exists
        ```
    """
    pool = StratisPool()
    pool.name = 'sts-stratis-test-pool'
    pool.blockdevs = loop_devices

    # Create encrypted pool
    config = PoolCreateConfig(key_desc=setup_stratis_key)
    if not pool.create(config):
        pytest.skip('Failed to create encrypted test pool')

    yield pool

    # Clean up
    pool.destroy()

stratis_extend_lvm(_lvm_test, loop_devices)

Create a logical volume 70%vg size which we will use to test stratis extend-data.

First two devices from loop_devices are used for creating stratis pool. This fixture will use third and fourth loop device as PV.

Source code in sts_libs/src/sts/fixtures/stratis_fixtures.py
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
@pytest.fixture
def stratis_extend_lvm(
    _lvm_test: None,
    loop_devices: list[str],
) -> Generator[LogicalVolume, None, None]:
    """Create a logical volume 70%vg size which we will use to test stratis extend-data.

    First two devices from loop_devices are used for creating stratis pool.
    This fixture will use third and fourth loop device as PV.
    """
    vg_name = getenv('STRATIS_VG_NAME', 'sts-stratis-volume-group')
    lv_name = getenv('STRATIS_LV_NAME', 'sts-stratis-logical-volume')
    pvs = []
    assert len(loop_devices) > 4, 'Not enough loop devices is available'
    devices = loop_devices[2:4]
    try:
        # Create PVs
        for device in devices:
            pv = PhysicalVolume(name=device, path=device)
            assert pv.create(), f'Failed to create PV on device {device}'
            pvs.append(pv)

        # Create VG
        vg = VolumeGroup(name=vg_name, pvs=devices)
        assert vg.create(), f'Failed to create VG {vg_name}'
        lv = LogicalVolume(name=lv_name, vg=vg_name)
        assert lv.create(extents='70%vg')
        yield lv

    finally:
        # Cleanup in reverse order
        vg = VolumeGroup(name=vg_name)
        if not vg.remove():
            logging.warning(f'Failed to remove VG {vg_name}')

        for pv in pvs:
            if not pv.remove():
                logging.warning(f'Failed to remove PV {pv.path}')

stratis_failing_pool(scsi_debug_devices)

Create test pool with failing devices.

Creates pool for failure testing: - Uses SCSI debug devices - Injects device failures - Tests error handling - Manages cleanup

Parameters:

Name Type Description Default
scsi_debug_devices list[str]

SCSI debug device fixture

required

Failure Injection: - Every operation fails - Noisy error reporting - Tests error handling - Recovery procedures

Example
@pytest.mark.parametrize('scsi_debug_devices', [2], indirect=True)
def test_pool(stratis_failing_pool):
    # Test failure handling
    assert not stratis_failing_pool.stop()
    assert 'error' in stratis_failing_pool.status
Source code in sts_libs/src/sts/fixtures/stratis_fixtures.py
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
@pytest.fixture
def stratis_failing_pool(scsi_debug_devices: list[str]) -> Generator[StratisPool, None, None]:
    """Create test pool with failing devices.

    Creates pool for failure testing:
    - Uses SCSI debug devices
    - Injects device failures
    - Tests error handling
    - Manages cleanup

    Args:
        scsi_debug_devices: SCSI debug device fixture

    Failure Injection:
    - Every operation fails
    - Noisy error reporting
    - Tests error handling
    - Recovery procedures

    Example:
        ```python
        @pytest.mark.parametrize('scsi_debug_devices', [2], indirect=True)
        def test_pool(stratis_failing_pool):
            # Test failure handling
            assert not stratis_failing_pool.stop()
            assert 'error' in stratis_failing_pool.status
        ```
    """
    # Get first device for injection
    device = ScsiDebugDevice(scsi_debug_devices[0])

    # Inject failures (every operation fails with noisy error)
    device.inject_failure(every_nth=1, opts=1)

    # Create pool
    pool = StratisPool()
    pool.name = 'sts-stratis-test-pool'
    pool.blockdevs = [scsi_debug_devices[0]]  # Only use first device

    if not pool.create():
        pytest.skip('Failed to create test pool')

    yield pool

    # Clean up
    pool.destroy()

stratis_key_desc_pool(loop_devices, setup_stratis_key)

Create a pool with keyring encryption.

Source code in sts_libs/src/sts/fixtures/stratis_fixtures.py
348
349
350
351
352
353
354
355
356
357
@pytest.fixture
def stratis_key_desc_pool(loop_devices: list[str], setup_stratis_key: str) -> Generator[StratisPool, None, None]:
    """Create a pool with keyring encryption."""
    pool = StratisPool()
    pool.name = 'sts-stratis-test-pool'
    pool.blockdevs = loop_devices[:2]  # Use first two devices initially
    config = PoolCreateConfig(key_desc=setup_stratis_key)
    assert pool.create(config)
    yield pool
    pool.destroy()

stratis_no_enc_pool(loop_devices)

Create a pool without encryption.

Source code in sts_libs/src/sts/fixtures/stratis_fixtures.py
416
417
418
419
420
421
422
423
424
@pytest.fixture
def stratis_no_enc_pool(loop_devices: list[str]) -> Generator[StratisPool, None, None]:
    """Create a pool without encryption."""
    pool = StratisPool()
    pool.name = 'sts-stratis-test-pool'
    pool.blockdevs = loop_devices[:2]  # Use first two devices initially
    assert pool.create()
    yield pool
    pool.destroy()

stratis_tang_pool(loop_devices, stratis_clevis_test)

Create a pool with Tang encryption.

Source code in sts_libs/src/sts/fixtures/stratis_fixtures.py
360
361
362
363
364
365
366
367
368
369
370
371
372
373
@pytest.fixture
def stratis_tang_pool(
    loop_devices: list[str], stratis_clevis_test: dict[str, str]
) -> Generator[StratisPool, None, None]:
    """Create a pool with Tang encryption."""
    pool = StratisPool()
    pool.name = 'sts-stratis-test-pool'
    pool.blockdevs = loop_devices[:2]  # Use first two devices initially
    config = PoolCreateConfig(
        clevis='tang', tang_url=stratis_clevis_test['url'], thumbprint=stratis_clevis_test['thumbprint']
    )
    assert pool.create(config)
    yield pool
    pool.destroy()

stratis_test_pool(loop_devices)

Create test pool with loop devices.

Creates and manages test pool: - Uses loop devices as storage - Creates standard pool - Handles cleanup - Supports testing operations

Parameters:

Name Type Description Default
loop_devices list[str]

Loop device fixture (requires 2 devices)

required

Pool Configuration: - Name: 'sts-stratis-test-pool' - Devices: Provided loop devices - Standard (non-encrypted) pool - Default settings

Example
@pytest.mark.parametrize('loop_devices', [2], indirect=True)
def test_pool(stratis_test_pool):
    # Test pool operations
    fs = stratis_test_pool.create_filesystem('test')
    assert fs.exists
Source code in sts_libs/src/sts/fixtures/stratis_fixtures.py
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
@pytest.fixture
def stratis_test_pool(loop_devices: list[str]) -> Generator[StratisPool, None, None]:
    """Create test pool with loop devices.

    Creates and manages test pool:
    - Uses loop devices as storage
    - Creates standard pool
    - Handles cleanup
    - Supports testing operations

    Args:
        loop_devices: Loop device fixture (requires 2 devices)

    Pool Configuration:
    - Name: 'sts-stratis-test-pool'
    - Devices: Provided loop devices
    - Standard (non-encrypted) pool
    - Default settings

    Example:
        ```python
        @pytest.mark.parametrize('loop_devices', [2], indirect=True)
        def test_pool(stratis_test_pool):
            # Test pool operations
            fs = stratis_test_pool.create_filesystem('test')
            assert fs.exists
        ```
    """
    pool = StratisPool()
    pool.name = 'sts-stratis-test-pool'
    pool.blockdevs = loop_devices

    # Create pool
    if not pool.create():
        pytest.skip('Failed to create test pool')

    yield pool

    # Clean up
    pool.destroy()

Target Fixtures

sts.fixtures.target_fixtures

Target test fixtures.

This module provides fixtures for testing storage targets: - Target creation and configuration - Backstore management (block, fileio, ramdisk) - ACL and authentication setup - LUN management

Fixture Dependencies: 1. _target_test (base fixture) - Installs target utilities - Manages target cleanup - Logs system information

  1. backstore_*_setup (depends on _target_test)
  2. block: Creates block backstore with loop device
  3. fileio: Creates fileio backstore
  4. ramdisk: Creates ramdisk backstore

  5. iscsi_target_setup (depends on _target_test)

  6. Creates iSCSI target
  7. Configures ACLs and LUNs
  8. Manages cleanup

  9. configure_auth (depends on _target_test)

  10. Sets up CHAP authentication
  11. Configures mutual CHAP
  12. Manages credentials

  13. loopback_devices

  14. Creates loopback devices for testing
  15. Supports custom block size and device count
  16. Manages device cleanup automatically
  17. Yields list of BlockDevice instances

Common Usage: 1. Basic target testing: @pytest.mark.usefixtures('_target_test') def test_target(): # Create and test targets # Targets are cleaned up after test

  1. Backstore testing: @pytest.mark.parametrize('backstore_block_setup', [{'name': 'test', 'size': 1024*1024}], indirect=True) def test_backstore(backstore_block_setup): # Test backstore operations

  2. iSCSI target testing: @pytest.mark.parametrize('iscsi_target_setup', [{'t_iqn': 'iqn.test', 'n_luns': 2}], indirect=True) def test_iscsi(iscsi_target_setup): # Test iSCSI target operations

  3. Authentication testing: @pytest.mark.parametrize('configure_auth', [{'t_iqn': 'iqn.test', 'chap_username': 'user', 'chap_password': 'pass'}], indirect=True) def test_auth(configure_auth): # Test authentication

  4. Loopback device testing: # Using test parametrization @pytest.mark.parametrize('block_size', [512, 1024]) def test_loopback(loopback_devices): # Uses block_size from parametrize, device_count=2 (default)

# Using fixture parametrization @pytest.mark.parametrize('loopback_devices', [{'device_count': 4, 'block_size': 1024}], indirect=True) def test_loopback_custom(loopback_devices): # Uses custom device_count and block_size # Devices are automatically cleaned up

Error Handling: - Package installation failures fail test - Target creation failures are handled - Resource cleanup runs on failure - Authentication errors are logged

backstore_block_setup(_target_test, request)

Create block backstore with loop device.

Creates block backstore using loop device: - Creates temporary loop device - Sets up block backstore - Manages cleanup - Supports custom size

Parameters:

Name Type Description Default
request FixtureRequest

Fixture request with parameters: - name: Backstore name - size: Loop device size in MB

required
Example
@pytest.mark.parametrize('backstore_block_setup', [{'name': 'test', 'size': 1024}], indirect=True)
def test_backstore(backstore_block_setup):
    assert backstore_block_setup.exists
Source code in sts_libs/src/sts/fixtures/target_fixtures.py
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
@pytest.fixture
def backstore_block_setup(_target_test: None, request: pytest.FixtureRequest) -> Generator[BackstoreBlock, None, None]:
    """Create block backstore with loop device.

    Creates block backstore using loop device:
    - Creates temporary loop device
    - Sets up block backstore
    - Manages cleanup
    - Supports custom size

    Args:
        request: Fixture request with parameters:
            - name: Backstore name
            - size: Loop device size in MB

    Example:
        ```python
        @pytest.mark.parametrize('backstore_block_setup', [{'name': 'test', 'size': 1024}], indirect=True)
        def test_backstore(backstore_block_setup):
            assert backstore_block_setup.exists
        ```
    """
    loop_dev = None
    backstore = None
    try:
        # Create loop device
        loop_dev = LoopDevice.create(
            name=request.param['name'],
            size_mb=request.param['size'] // (1024 * 1024),
        )
        if not loop_dev:
            pytest.skip('Failed to create loop device')

        # Create backstore
        backstore = BackstoreBlock(name=request.param['name'])
        backstore.create_backstore(dev=str(loop_dev.path))
        yield backstore

    except Exception:
        logging.exception('Failed to set up block backstore')
        raise

    finally:
        # Clean up
        if backstore:
            backstore.delete_backstore()
        if loop_dev:
            loop_dev.remove()

backstore_fileio_setup(_target_test, request)

Create fileio backstore.

Creates fileio backstore: - Creates backing file - Sets up fileio backstore - Manages cleanup - Supports custom size

Parameters:

Name Type Description Default
request FixtureRequest

Fixture request with parameters: - name: Backstore name - size: File size in bytes - file_or_dev: File path

required
Example
@pytest.mark.parametrize('backstore_fileio_setup', [{'name': 'test', 'size': 1024 * 1024}], indirect=True)
def test_backstore(backstore_fileio_setup):
    assert backstore_fileio_setup.exists
Source code in sts_libs/src/sts/fixtures/target_fixtures.py
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
@pytest.fixture
def backstore_fileio_setup(
    _target_test: None, request: pytest.FixtureRequest
) -> Generator[BackstoreFileio, None, None]:
    """Create fileio backstore.

    Creates fileio backstore:
    - Creates backing file
    - Sets up fileio backstore
    - Manages cleanup
    - Supports custom size

    Args:
        request: Fixture request with parameters:
            - name: Backstore name
            - size: File size in bytes
            - file_or_dev: File path

    Example:
        ```python
        @pytest.mark.parametrize('backstore_fileio_setup', [{'name': 'test', 'size': 1024 * 1024}], indirect=True)
        def test_backstore(backstore_fileio_setup):
            assert backstore_fileio_setup.exists
        ```
    """
    backstore = None
    try:
        backstore = BackstoreFileio(name=request.param['name'])
        backstore.create_backstore(
            size=str(request.param['size']),
            file_or_dev=request.param.get('file_or_dev') or f'{request.param["name"]}_file',
        )
        yield backstore

    except Exception:
        logging.exception('Failed to set up fileio backstore')
        raise

    finally:
        if backstore:
            backstore.delete_backstore()

backstore_ramdisk_setup(_target_test, request)

Create ramdisk backstore.

Creates ramdisk backstore: - Allocates memory - Sets up ramdisk backstore - Manages cleanup - Supports custom size

Parameters:

Name Type Description Default
request FixtureRequest

Fixture request with parameters: - name: Backstore name - size: Size in bytes

required
Example
@pytest.mark.parametrize('backstore_ramdisk_setup', [{'name': 'test', 'size': 1024 * 1024}], indirect=True)
def test_backstore(backstore_ramdisk_setup):
    assert backstore_ramdisk_setup.exists
Source code in sts_libs/src/sts/fixtures/target_fixtures.py
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
@pytest.fixture
def backstore_ramdisk_setup(
    _target_test: None, request: pytest.FixtureRequest
) -> Generator[BackstoreRamdisk, None, None]:
    """Create ramdisk backstore.

    Creates ramdisk backstore:
    - Allocates memory
    - Sets up ramdisk backstore
    - Manages cleanup
    - Supports custom size

    Args:
        request: Fixture request with parameters:
            - name: Backstore name
            - size: Size in bytes

    Example:
        ```python
        @pytest.mark.parametrize('backstore_ramdisk_setup', [{'name': 'test', 'size': 1024 * 1024}], indirect=True)
        def test_backstore(backstore_ramdisk_setup):
            assert backstore_ramdisk_setup.exists
        ```
    """
    backstore = None
    try:
        backstore = BackstoreRamdisk(name=request.param['name'])
        backstore.create_backstore(size=str(request.param['size']))
        yield backstore

    except Exception:
        logging.exception('Failed to set up ramdisk backstore')
        raise

    finally:
        if backstore:
            backstore.delete_backstore()

configure_auth(request)

Configure CHAP authentication.

Sets up CHAP authentication: - Creates target with auth - Configures CHAP credentials - Supports mutual CHAP - Manages cleanup

Parameters:

Name Type Description Default
request FixtureRequest

Fixture request with parameters: - t_iqn: Target IQN - i_iqn: Initiator IQN - chap_username: CHAP username - chap_password: CHAP password - chap_target_username: Mutual CHAP username (optional) - chap_target_password: Mutual CHAP password (optional) - tpg_or_acl: Configure TPG or ACL auth

required
Example
@pytest.mark.parametrize(
    'configure_auth', [{'t_iqn': 'iqn.test', 'chap_username': 'user', 'chap_password': 'pass'}], indirect=True
)
def test_auth(configure_auth):
    assert configure_auth.exists
Source code in sts_libs/src/sts/fixtures/target_fixtures.py
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
@pytest.fixture
def configure_auth(request: pytest.FixtureRequest) -> Generator[Iscsi, None, None]:
    """Configure CHAP authentication.

    Sets up CHAP authentication:
    - Creates target with auth
    - Configures CHAP credentials
    - Supports mutual CHAP
    - Manages cleanup

    Args:
        request: Fixture request with parameters:
            - t_iqn: Target IQN
            - i_iqn: Initiator IQN
            - chap_username: CHAP username
            - chap_password: CHAP password
            - chap_target_username: Mutual CHAP username (optional)
            - chap_target_password: Mutual CHAP password (optional)
            - tpg_or_acl: Configure TPG or ACL auth

    Example:
        ```python
        @pytest.mark.parametrize(
            'configure_auth', [{'t_iqn': 'iqn.test', 'chap_username': 'user', 'chap_password': 'pass'}], indirect=True
        )
        def test_auth(configure_auth):
            assert configure_auth.exists
        ```
    """
    target_wwn = request.param['t_iqn']
    target = Iscsi(target_wwn=target_wwn)

    try:
        # Create target
        target.create_target()

        # Add backstore
        backstore = BackstoreFileio(name='auth_test')
        backstore.create_backstore(size='1M', file_or_dev='auth_test_file')
        luns = IscsiLUN(target_wwn=target_wwn)
        luns.create_lun(storage_object=backstore.path)

        # Configure auth
        if request.param['tpg_or_acl'] == 'acl':
            acl = ACL(target_wwn=target_wwn, initiator_wwn=request.param['i_iqn'])
            acl.create_acl()
            acl.set_auth(
                userid=request.param['chap_username'],
                password=request.param['chap_password'],
                mutual_userid=request.param.get('chap_target_username', ''),
                mutual_password=request.param.get('chap_target_password', ''),
            )

        yield target

    finally:
        target.delete_target()

iscsi_target_setup(_target_test, request)

Create iSCSI target with ACLs and LUNs.

Creates complete iSCSI target: - Creates target with IQN - Sets up ACLs - Creates LUNs - Manages cleanup

Parameters:

Name Type Description Default
request FixtureRequest

Fixture request with parameters: - t_iqn: Target IQN (optional) - i_iqn: Initiator IQN (optional) - n_luns: Number of LUNs (optional) - back_size: Backstore size in bytes (optional)

required
Example
@pytest.mark.parametrize('iscsi_target_setup', [{'t_iqn': 'iqn.test', 'n_luns': 2}], indirect=True)
def test_target(iscsi_target_setup):
    assert iscsi_target_setup.exists
Source code in sts_libs/src/sts/fixtures/target_fixtures.py
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
@pytest.fixture(scope='class')
def iscsi_target_setup(_target_test: None, request: pytest.FixtureRequest) -> Generator[Iscsi, None, None]:
    """Create iSCSI target with ACLs and LUNs.

    Creates complete iSCSI target:
    - Creates target with IQN
    - Sets up ACLs
    - Creates LUNs
    - Manages cleanup

    Args:
        request: Fixture request with parameters:
            - t_iqn: Target IQN (optional)
            - i_iqn: Initiator IQN (optional)
            - n_luns: Number of LUNs (optional)
            - back_size: Backstore size in bytes (optional)

    Example:
        ```python
        @pytest.mark.parametrize('iscsi_target_setup', [{'t_iqn': 'iqn.test', 'n_luns': 2}], indirect=True)
        def test_target(iscsi_target_setup):
            assert iscsi_target_setup.exists
        ```
    """
    params = request.param
    with target_setup(
        t_iqn=params.get('t_iqn'),
        i_iqn=params.get('i_iqn'),
        n_luns=params.get('n_luns', 0),
        back_size=params.get('back_size'),
    ) as target:
        yield target

loopback_devices(request)

Create loopback devices for testing.

This fixture creates loopback devices with the specified block size from the parametrized test, yields them for testing, and ensures proper cleanup afterward.

Parameters (from test parametrization or fixture params): block_size: Block size for devices (default: 512) device_count: Number of devices to create (default: 2)

Yields:

Type Description
list[BlockDevice]

List of BlockDevice instances representing the created loopback devices

Example
# Using test parametrization
@pytest.mark.parametrize('block_size', [512, 1024])
def test_loopback(loopback_devices):
    # Uses block_size from parametrize, device_count=2 (default)

# Using fixture parametrization
@pytest.mark.parametrize('loopback_devices', [{'device_count': 4, 'block_size': 1024}], indirect=True)
def test_loopback(loopback_devices):
    # Uses custom device_count and block_size
Source code in sts_libs/src/sts/fixtures/target_fixtures.py
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
@pytest.fixture
def loopback_devices(request: pytest.FixtureRequest) -> Generator[list[BlockDevice], None, None]:
    """Create loopback devices for testing.

    This fixture creates loopback devices with the specified block size
    from the parametrized test, yields them for testing, and ensures
    proper cleanup afterward.

    Parameters (from test parametrization or fixture params):
        block_size: Block size for devices (default: 512)
        device_count: Number of devices to create (default: 2)

    Yields:
        List of BlockDevice instances representing the created loopback devices

    Example:
        ```python
        # Using test parametrization
        @pytest.mark.parametrize('block_size', [512, 1024])
        def test_loopback(loopback_devices):
            # Uses block_size from parametrize, device_count=2 (default)

        # Using fixture parametrization
        @pytest.mark.parametrize('loopback_devices', [{'device_count': 4, 'block_size': 1024}], indirect=True)
        def test_loopback(loopback_devices):
            # Uses custom device_count and block_size
        ```
    """
    # Get parameters with defaults
    if hasattr(request, 'param') and isinstance(request.param, dict):
        # Fixture is parametrized with dict
        device_count = request.param.get('device_count', 2)
        block_size = request.param.get('block_size', 512)
    else:
        # Get from test parametrization or use defaults
        device_count = getattr(request.node.callspec, 'params', {}).get('device_count', 2)
        block_size = getattr(request.node.callspec, 'params', {}).get('block_size', 512)

    # Create devices
    devices = create_loopback_devices(device_count, block_size=block_size)

    try:
        yield devices
    finally:
        # Clean up
        cleanup_loopback_devices(devices)

target_setup(*, t_iqn=None, i_iqn=None, n_luns=0, back_size=None)

Set up iSCSI target.

Creates and manages iSCSI target: - Creates target with IQN - Sets up ACLs if needed - Creates LUNs if needed - Manages cleanup

Parameters:

Name Type Description Default
t_iqn str | None

Target IQN

None
i_iqn str | None

Initiator IQN

None
n_luns int

Number of LUNs

0
back_size int | None

Backstore size in bytes

None

Yields:

Type Description
Iscsi

iSCSI target instance

Example
with target_setup(t_iqn='iqn.test', n_luns=2) as target:
    # Use target
    assert target.exists
Source code in sts_libs/src/sts/fixtures/target_fixtures.py
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
@contextmanager
def target_setup(
    *,
    t_iqn: str | None = None,
    i_iqn: str | None = None,
    n_luns: int = 0,
    back_size: int | None = None,
) -> Generator[Iscsi, None, None]:
    """Set up iSCSI target.

    Creates and manages iSCSI target:
    - Creates target with IQN
    - Sets up ACLs if needed
    - Creates LUNs if needed
    - Manages cleanup

    Args:
        t_iqn: Target IQN
        i_iqn: Initiator IQN
        n_luns: Number of LUNs
        back_size: Backstore size in bytes

    Yields:
        iSCSI target instance

    Example:
        ```python
        with target_setup(t_iqn='iqn.test', n_luns=2) as target:
            # Use target
            assert target.exists
        ```
    """
    target_wwn = t_iqn or DEFAULT_TARGET_IQN
    target = Iscsi(target_wwn=target_wwn)

    try:
        # Create target
        target.create_target()

        # Add ACL if needed
        if i_iqn:
            acl = ACL(target_wwn=target_wwn, initiator_wwn=i_iqn)
            acl.create_acl()

        # Add LUNs if needed
        if back_size and n_luns > 0:
            luns = IscsiLUN(target_wwn)
            for n in range(n_luns):
                name = f'backstore{n}'
                backstore = BackstoreFileio(name=name)
                backstore.create_backstore(size=str(back_size), file_or_dev=f'{name}_file')
                luns.create_lun(storage_object=backstore.path)

        yield target

    finally:
        target.delete_target()
        # Clean up backstore files
        for n in range(n_luns):
            Path(f'backstore{n}_file').unlink(missing_ok=True)