Page MenuHomePhorge

D266.1768249823.diff
No OneTemporary

Size
26 KB
Referenced Files
None
Subscribers
None

D266.1768249823.diff

diff --git a/lilybuild/lilybuild/podman_helper.py b/lilybuild/lilybuild/podman_helper.py
new file mode 100755
--- /dev/null
+++ b/lilybuild/lilybuild/podman_helper.py
@@ -0,0 +1,405 @@
+#!/usr/bin/env python3
+
+import subprocess
+import sys
+import os
+import json
+import random
+import traceback
+import string
+import time
+import hashlib
+
+col_info = '\x1b[1;34m[INFO]'
+col_success = '\x1b[1;32m[SUCC]'
+col_warn = '\x1b[1;33m<WARN>'
+col_error = '\x1b[1;31m!ERROR!'
+col_reset = '\x1b[0m'
+
+def pinfo(*args, **kwargs):
+ print(col_info, *args, col_reset, **kwargs)
+ sys.stdout.flush()
+
+def perror(*args, **kwargs):
+ print(col_error, *args, col_reset, **kwargs)
+ sys.stdout.flush()
+
+def pwarn(*args, **kwargs):
+ print(col_warn, *args, col_reset, **kwargs)
+ sys.stdout.flush()
+
+def psuccess(*args, **kwargs):
+ print(col_success, *args, col_reset, **kwargs)
+ sys.stdout.flush()
+
+def gen_random_id():
+ # https://stackoverflow.com/questions/2257441/random-string-generation-with-upper-case-letters-and-digits
+ return ''.join(random.SystemRandom().choice(string.ascii_lowercase + string.digits) for _ in range(10))
+
+def image_to_podman_args(image):
+ name = image['name']
+ args = []
+ if 'entrypoint' in image:
+ # ci.json requires that the entrypoint is an array of strings
+ ep = json.dumps(image['entrypoint'])
+ args += ['--entrypoint', ep]
+ args += ['--', name]
+ return args
+
+class PodmanHelper:
+ cache_storage_root_dir = '/cache'
+ cache_max_bytes = 10 * 1024 * 1024
+
+ work_vol_mount_dir = '/build'
+ script_vol_mount_dir = '/script'
+ script_name = script_vol_mount_dir + '/run.sh'
+ env_file_basename = 'env'
+ metadata_file_basename = 'metadata.json'
+ cur_cache_basename = 'cur'
+ volume_helper_image = os.environ.get('LILYBUILD_VOLUME_HELPER_IMAGE', 'r.lily-is.land/infra/lilybuild/volume-helper:servant')
+ key_file_pub = '/secrets/lilybuild-volume-helper-key.pub'
+ key_file_sub = '/secrets/lilybuild-volume-helper-key'
+ ssh_port = '2222'
+ ssh_command = f'ssh -p {ssh_port} -i {key_file_sub} -oStrictHostKeyChecking=no -oUserKnownHostsFile=/dev/null'
+ worker_container_name = os.environ.get('HOSTNAME', '')
+ ssh_max_wait = 10
+ ssh_wait_interval_sec = 1
+ service_max_wait_sec = 60 * 5
+ service_wait_interval_sec = 10
+ container_run_timeout_sec = 60 * 60 * 2 # 2 hours by default
+
+ def __init__(self, **kwargs):
+ self.volumes_to_remove = []
+ self.helper_container_id = None
+ self.service_network_id = None
+ self.service_containers = []
+ self.metadata = {
+ 'repo_id': None,
+ 'caches': [],
+ 'cache_last_invalidated_sec': 0,
+ 'protected': False,
+ }
+ self.__dict__.update(kwargs)
+
+ # This calls podman which is hard to duplicate so we mock this function
+ # in tests instead
+ def verbose_run(self, *args, **kwargs):
+ print('run:', args, kwargs)
+ sys.stdout.flush()
+ return subprocess.run(*args, **kwargs)
+
+ def create_volume(self, t):
+ res = self.verbose_run([
+ 'podman', 'volume', 'create',
+ '--label', 'lilybuild=' + t,
+ ], check=True, capture_output=True, encoding='utf-8')
+ volname = res.stdout.strip()
+ self.volumes_to_remove.append(volname)
+ return volname
+
+ def clean_volumes(self):
+ self.verbose_run([
+ 'podman', 'volume', 'rm', '-f', '--',
+ ] + self.volumes_to_remove, capture_output=True)
+
+ def clean_helper_container(self):
+ self.verbose_run([
+ 'podman', 'container', 'rm', '-f', '--', self.helper_container_id,
+ ], capture_output=True)
+
+ def start_helper_service(self, work_volname, script_volname):
+ res = self.verbose_run([
+ 'podman', 'container', 'inspect', '--', self.worker_container_name,
+ ], check=True, capture_output=True, encoding='utf-8')
+ container_stat = json.loads(res.stdout)[0]
+ pod = container_stat.get('Pod')
+ networks = list(container_stat.get('NetworkSettings').get('Networks').keys())
+ alias = gen_random_id()
+ container_name = 'lilybuild-helper-' + alias
+
+ with open(self.key_file_pub) as f:
+ pub_key = f.readline().strip()
+
+ res = self.verbose_run([
+ 'podman', 'run', '--rm', '-d', '--name', container_name,
+ f'--mount=type=volume,source={work_volname},destination={self.work_vol_mount_dir}',
+ f'--mount=type=volume,source={script_volname},destination={self.script_vol_mount_dir}',
+ f'--pod={pod}',
+ f'--net={networks[0]}',
+ f'--network-alias={alias}',
+ '--image-volume=ignore',
+ '--label', 'lilybuild=helper',
+ '-e', 'PUID=0',
+ '-e', 'PGID=0',
+ '-e', f'PUBLIC_KEY={pub_key}',
+ '-e', 'USER_NAME=helper',
+ '-e', 'SUDO_ACCESS=true',
+ '--',
+ self.volume_helper_image,
+ ], check=True, capture_output=True, encoding='utf-8')
+ self.helper_container_id = container_name
+ self.helper_container_alias = alias
+ pinfo('Waiting for ssh service to be up...')
+ service_up = False
+ for i in range(self.ssh_max_wait):
+ chk = self.verbose_run(['nc', alias, self.ssh_port], input=b'', capture_output=True)
+ if chk.returncode == 0 and chk.stdout is not None and chk.stdout.startswith(b'SSH'):
+ service_up = True
+ break
+ else:
+ time.sleep(self.ssh_wait_interval_sec)
+ if not service_up:
+ raise RuntimeError('Service is still not up!')
+ psuccess('Service is up.')
+ return (container_name, alias)
+
+ def import_volume(self, local_dir, vol_mount_dir):
+ # I'll just use the shell instead of pipe2+fork+exec+wait, much easier
+ self.verbose_run([
+ 'rsync', '-a', '--delete',
+ '--rsh', self.ssh_command,
+ f'{local_dir}/',
+ f'helper@{self.helper_container_alias}:{vol_mount_dir}',
+ ], check=True)
+
+ def export_volume(self, local_dir, vol_mount_dir):
+ self.verbose_run([
+ 'rsync', '-a', '--delete',
+ '--rsh', self.ssh_command,
+ f'helper@{self.helper_container_alias}:{vol_mount_dir}/',
+ local_dir,
+ ], check=True)
+
+ def create_service_network(self):
+ res = self.verbose_run([
+ 'podman', 'network', 'create', '--label', 'lilybuild=service-network'
+ ], capture_output=True, check=True, encoding='utf-8')
+ self.service_network_id = res.stdout.strip()
+ return self.service_network_id
+
+ def maybe_clean_service_network(self):
+ if self.service_network_id is None:
+ return
+ res = self.verbose_run([
+ 'podman', 'network', 'rm', '-f', '--', self.service_network_id
+ ], capture_output=True, encoding='utf-8')
+ if res.returncode != 0:
+ perror('Cannot remove service network.')
+
+ def start_and_record_service_container(self, service):
+ image = service['name']
+ ep_args = []
+ if service['entrypoint']:
+ if isinstance(service['entrypoint'], str):
+ entrypoint = service['entrypoint']
+ else:
+ entrypoint = json.dumps(service['entrypoint'])
+ ep_args += [f'--entrypoint={entrypoint}']
+ cmd_args = []
+ if service['command']:
+ if isinstance(service['command'], str):
+ cmd_args += [service['command']]
+ else:
+ cmd_args += service['command']
+ res = self.verbose_run([
+ 'podman', 'run', '-d', '--label', 'lilybuild=job-service',
+ f'--env-file={self.env_filename}',
+ f'--network={self.service_network_id}',
+ ] + [
+ f'--network-alias={alias}' for alias in service['aliases']
+ ] + ep_args + [
+ '--',
+ image,
+ ] + cmd_args, check=True, capture_output=True, encoding='utf-8')
+ service_id = res.stdout.strip()
+ self.service_containers.append(service_id)
+
+ def ensure_service_containers_up(self):
+ waiting_container_ids = self.service_containers[:]
+ steady_deadline = time.monotonic() + self.service_max_wait_sec
+ pinfo('Waiting for service containers...')
+ while waiting_container_ids:
+ for cid in waiting_container_ids[:]:
+ res = self.verbose_run([
+ 'podman', 'container', 'inspect', '--', cid
+ ], check=True, capture_output=True, encoding='utf-8')
+ ins = json.loads(res.stdout)[0]
+ if ins.get('State', {}).get('Status') == 'running':
+ psuccess(f'Container {cid} is up')
+ waiting_container_ids.remove(cid)
+ if waiting_container_ids:
+ if time.monotonic() > steady_deadline:
+ perror('Containers are not yet up after deadline.')
+ raise TimeoutError('Service containers startup timeout')
+ pinfo('Some containers are not yet up. Waiting...')
+ time.sleep(self.service_wait_interval_sec)
+ psuccess('All service containers are up.')
+
+ def maybe_prune_service_containers(self):
+ container_ids = self.service_containers
+ if not container_ids:
+ return
+ stop_proc = self.verbose_run(['podman', 'container', 'stop', '--'] + container_ids)
+ if stop_proc.returncode != 0:
+ pwarn('Cannot stop container.')
+ # -v removes anonymous volumes associated with the container
+ rm_proc = self.verbose_run(['podman', 'container', 'rm', '-f', '-v', '--'] + container_ids)
+
+ def run_in_container(self, image, work_volname, script_volname):
+ timeout = self.container_run_timeout_sec
+ steady_deadline = time.monotonic() + timeout
+ network_args = []
+ if self.service_network_id:
+ network_args += [f'--network={self.service_network_id}']
+
+ start_process = self.verbose_run([
+ 'podman', 'run', '-d',
+ f'--mount=type=volume,source={work_volname},destination={self.work_vol_mount_dir}',
+ f'--mount=type=volume,source={script_volname},destination={self.script_vol_mount_dir}',
+ f'--env-file={self.env_filename}',
+ ] + network_args + image_to_podman_args(image) + [
+ self.script_name,
+ ], capture_output=True, encoding='utf-8')
+ if start_process.returncode != 0:
+ perror('Cannot run container. Error message:')
+ print(start_process.stderr)
+ return start_process.returncode
+ container_id = start_process.stdout.strip()
+
+ steady_now = time.monotonic()
+ log_args = []
+ retcode = None
+ try:
+ while steady_deadline > steady_now:
+ log_process = self.verbose_run([
+ 'podman', 'logs', '--follow'
+ ] + log_args + ['--', container_id], timeout=steady_deadline - steady_now)
+ # Exited from `podman logs`: why? Is the container still running?
+ inspect_running = self.verbose_run([
+ 'podman', 'container', 'inspect',
+ '--format', '{{.State.Status}}', '--', container_id,
+ ], capture_output=True, encoding='utf-8', check=True)
+ if inspect_running.stdout.strip() == 'exited':
+ inspect_retcode = self.verbose_run([
+ 'podman', 'container', 'inspect',
+ '--format', '{{.State.ExitCode}}', '--', container_id,
+ ], capture_output=True, encoding='utf-8', check=True)
+ retcode = int(inspect_retcode.stdout.strip())
+ break
+ else:
+ pwarn('`podman logs` unexpectedly quits when the container is still running, resuming logs...')
+ log_args = ['--tail', '10']
+ steady_now = time.monotonic()
+ if retcode is None:
+ perror('Command timed out.')
+ retcode = 1
+ except subprocess.TimeoutExpired as e:
+ perror('Command timed out.')
+ retcode = 1
+ except subprocess.CalledProcessError as e:
+ perror('Cannot inspect container:', e)
+ except:
+ perror('Another exception happened:', sys.exception())
+ finally:
+ pinfo('Cleaning up container...')
+ stop_proc = self.verbose_run(['podman', 'container', 'stop', '--', container_id])
+ if stop_proc.returncode != 0:
+ pwarn('Cannot stop container.')
+ # -v removes anonymous volumes associated with the container
+ rm_proc = self.verbose_run(['podman', 'container', 'rm', '-f', '-v', '--', container_id])
+ pinfo('Cleaned.')
+ return retcode
+
+ def main(self, argv):
+ image = json.loads(argv[1])
+ self.work_dir = argv[2]
+ self.script_dir = argv[3]
+ self.result_dir = argv[4]
+
+ self.env_filename = os.path.join(self.script_dir, self.env_file_basename)
+
+ services = []
+ if len(argv) >= 6:
+ services = json.loads(argv[5])
+
+ metadata_filename = os.path.join(self.script_dir, self.metadata_file_basename)
+ if os.path.exists(metadata_filename):
+ pinfo('Parsing metadata...')
+ with open(metadata_filename) as f:
+ self.metadata = json.loads(f.read())
+ psuccess('Parsed.')
+
+ pinfo('Creating volumes...')
+ work_vol = self.create_volume('work')
+ script_vol = self.create_volume('script')
+ psuccess('Created.')
+
+ pinfo('Starting helper service...')
+ self.start_helper_service(work_vol, script_vol)
+ psuccess('Started...')
+
+ if services:
+ pinfo('Creating service network...')
+ self.create_service_network()
+ psuccess('Created.')
+
+ pinfo('Starting job-defined services...')
+ for service in services:
+ self.start_and_record_service_container(service)
+
+ pinfo('Waiting for job-defined services...')
+ self.ensure_service_containers_up()
+
+ pinfo('Importing volumes...')
+ self.import_volume(self.work_dir, self.work_vol_mount_dir)
+ self.import_volume(self.script_dir, self.script_vol_mount_dir)
+ psuccess('Imported.')
+
+ pinfo('Running container...')
+ retcode = self.run_in_container(image, work_vol, script_vol)
+
+ pinfo(f'Returned {retcode}.')
+
+ if retcode != 0:
+ perror('Job failed.')
+ else:
+ psuccess('Job succeeded.')
+
+ # We should collect the result regardless whether it succeeded
+ pinfo('Collecting build changes...')
+ self.export_volume(self.result_dir, self.work_vol_mount_dir)
+ psuccess('Collected.')
+
+ return retcode
+
+ def cleanup_all(self):
+ pinfo('Cleaning service containers...')
+ self.maybe_prune_service_containers()
+ psuccess('Cleaned.')
+
+ pinfo('Cleaning service network...')
+ self.maybe_clean_service_network()
+ psuccess('Cleaned.')
+
+ if self.helper_container_id:
+ pinfo('Cleaning helper container')
+ self.clean_helper_container()
+ psuccess('Cleaned.')
+
+ pinfo('Cleaning volumes...')
+ self.clean_volumes()
+ psuccess('Cleaned.')
+
+if __name__ == '__main__':
+ retcode = 1
+
+ try:
+ ph = PodmanHelper()
+ retcode = ph.main(sys.argv)
+ except Exception as e:
+ perror('Error!', e)
+ print(traceback.format_exc())
+ raise
+ finally:
+ ph.cleanup_all()
+ sys.exit(retcode)
diff --git a/lilybuild/lilybuild/tests/podman_helper_test_worker.py b/lilybuild/lilybuild/tests/podman_helper_test_worker.py
new file mode 100644
--- /dev/null
+++ b/lilybuild/lilybuild/tests/podman_helper_test_worker.py
@@ -0,0 +1,273 @@
+
+import unittest
+from unittest.mock import Mock
+import tempfile
+import time
+import os
+import json
+import subprocess
+from dataclasses import dataclass
+from contextlib import contextmanager
+from lilybuild.podman_helper import PodmanHelper, image_to_podman_args
+
+@dataclass
+class MockedCompletedProcess:
+ stdout: str | bytes | None = None
+ stderr: str | bytes | None = None
+ returncode: int = 0
+
+def mocked(ph, mock=None):
+ ph.verbose_run = mock or Mock()
+ return ph
+
+class PodmanHelperTest(unittest.TestCase):
+ def test_create_and_clean_volume(self):
+ ph = mocked(
+ PodmanHelper(),
+ Mock(side_effect=[
+ MockedCompletedProcess(stdout='volume1\n'),
+ MockedCompletedProcess(stdout='volume2\n'),
+ MockedCompletedProcess(stdout='volume1\nvolume2\n'),
+ ])
+ )
+ res = ph.create_volume('work')
+ self.assertEqual(res, 'volume1')
+ res = ph.create_volume('script')
+ self.assertEqual(res, 'volume2')
+ self.assertEqual(ph.volumes_to_remove, ['volume1', 'volume2'])
+ ph.clean_volumes()
+ self.assertEqual(
+ ph.verbose_run.call_args.args[0][-3:],
+ ['--', 'volume1', 'volume2'],
+ )
+
+ def test_helper_service(self):
+ key_content = 'ssh-ed25519 somethingsomething a@example.com'
+ with tempfile.NamedTemporaryFile(mode='w+', encoding='utf-8') as key_pub:
+ print(key_content, file=key_pub)
+ key_pub.flush()
+ ph = mocked(
+ PodmanHelper(
+ worker_container_name='workerhostname',
+ key_file_pub=key_pub.name,
+ ssh_wait_interval_sec=0.001,
+ volume_helper_image='lilybuild-volume-helper',
+ ),
+ Mock(side_effect=[
+ # podman inspect
+ MockedCompletedProcess(stdout=json.dumps([{
+ 'Pod': 'pod0',
+ 'NetworkSettings': {
+ 'Networks': {
+ 'network0': {
+ },
+ }
+ }
+ }])),
+ # podman run
+ MockedCompletedProcess(stdout='container0\n'),
+ # nc
+ MockedCompletedProcess(returncode=1),
+ # nc
+ MockedCompletedProcess(stdout=b'SSH 1.1.1\n'),
+ # podman container rm
+ MockedCompletedProcess(),
+ ])
+ )
+
+ (cont_name, alias) = ph.start_helper_service('volume1', 'volume2')
+ self.assertEqual(
+ ph.verbose_run.call_args_list[0].args[0][-3:],
+ ['inspect', '--', 'workerhostname'],
+ )
+ self.assertEqual(
+ ph.verbose_run.call_args_list[1].args[0][-2:],
+ ['--', 'lilybuild-volume-helper'],
+ )
+ self.assertTrue(
+ 'run' in ph.verbose_run.call_args_list[1].args[0]
+ )
+ self.assertTrue(ph.helper_container_id is not None)
+ ph.clean_helper_container()
+ self.assertEqual(
+ ph.verbose_run.call_args.args[0][-2:],
+ ['--', cont_name],
+ )
+
+ def test_create_prune_service_containers(self):
+ env = '/path/to/script/env'
+ ph = mocked(
+ PodmanHelper(env_filename=env),
+ Mock(side_effect=
+ # network create
+ [MockedCompletedProcess(stdout='network0')]
+ # container run
+ + [MockedCompletedProcess(stdout=f'container{i}') for i in range(4)]
+ # stop & rm
+ + [MockedCompletedProcess(), MockedCompletedProcess()]
+ ),
+ )
+ ph.create_service_network()
+ self.assertEqual(ph.service_network_id, 'network0')
+
+ # no entrypoint, no command
+ ph.start_and_record_service_container({
+ 'name': 'service:latest',
+ 'aliases': ['foo', 'bar'],
+ 'command': None,
+ 'entrypoint': None,
+ })
+ self.assertTrue(f'--env-file={env}' in ph.verbose_run.call_args.args[0])
+ self.assertTrue('--network=network0' in ph.verbose_run.call_args.args[0])
+ self.assertTrue('--network-alias=foo' in ph.verbose_run.call_args.args[0])
+ self.assertTrue('--network-alias=bar' in ph.verbose_run.call_args.args[0])
+ self.assertEqual(
+ ph.verbose_run.call_args.args[0][-2:],
+ ['--', 'service:latest'],
+ )
+
+ # no entrypoint, with command
+ ph.start_and_record_service_container({
+ 'name': 'service:latest',
+ 'aliases': ['foo', 'bar'],
+ 'command': ['abc', 'def'],
+ 'entrypoint': None,
+ })
+ self.assertEqual(
+ ph.verbose_run.call_args.args[0][-4:],
+ ['--', 'service:latest', 'abc', 'def'],
+ )
+
+ # entrypoint str
+ ph.start_and_record_service_container({
+ 'name': 'service:latest',
+ 'aliases': ['foo', 'bar'],
+ 'command': ['abc', 'def'],
+ 'entrypoint': '/bin/sh',
+ })
+ self.assertTrue('--entrypoint=/bin/sh', ph.verbose_run.call_args.args[0])
+
+ # entrypoint list
+ ph.start_and_record_service_container({
+ 'name': 'service:latest',
+ 'aliases': ['foo', 'bar'],
+ 'command': ['abc', 'def'],
+ 'entrypoint': ['/bin/sh', '-c'],
+ })
+ self.assertTrue('--entrypoint=["/bin/sh", "-c"]', ph.verbose_run.call_args.args[0])
+
+ self.assertEqual(ph.service_containers, ['container0', 'container1', 'container2', 'container3'])
+
+ ph.maybe_prune_service_containers()
+ self.assertEqual(
+ ph.verbose_run.call_args_list[-2].args[0][-5:],
+ ['--', 'container0', 'container1', 'container2', 'container3'],
+ )
+ self.assertEqual(
+ ph.verbose_run.call_args_list[-1].args[0][-5:],
+ ['--', 'container0', 'container1', 'container2', 'container3'],
+ )
+
+ def test_ensure_service_containers_up(self):
+ count = 0
+ def inspect_func(run_args, **kwargs):
+ nonlocal count
+ count += 1
+ cont_id = run_args[-1]
+ res = [{
+ 'State': {
+ 'Status': 'running',
+ }
+ }]
+ if cont_id == 'container0' and count < 5:
+ res[0]['State']['Status'] = 'starting'
+ return MockedCompletedProcess(stdout=json.dumps(res))
+
+ ph = mocked(
+ PodmanHelper(service_wait_interval_sec=0.001, service_max_wait_sec=1),
+ Mock(side_effect=inspect_func),
+ )
+ ph.service_containers += ['container0', 'container1']
+ ph.ensure_service_containers_up()
+
+ def test_ensure_service_containers_up_failed(self):
+ def inspect_func(run_args, **kwargs):
+ res = [{
+ 'State': {
+ 'Status': 'starting',
+ }
+ }]
+ return MockedCompletedProcess(stdout=json.dumps(res))
+
+ ph = mocked(
+ PodmanHelper(service_wait_interval_sec=0.5, service_max_wait_sec=1),
+ Mock(side_effect=inspect_func),
+ )
+ ph.service_containers += ['container0', 'container1']
+ with self.assertRaises(TimeoutError) as m:
+ ph.ensure_service_containers_up()
+
+ def test_image_to_podman_args(self):
+ self.assertEqual(
+ image_to_podman_args({'name': 'aaa'}),
+ ['--', 'aaa'],
+ )
+ self.assertEqual(
+ image_to_podman_args({'name': 'aaa', 'entrypoint': ['mew', 'abc']}),
+ ['--entrypoint', json.dumps(['mew', 'abc']), '--', 'aaa'],
+ )
+
+ def test_run_in_container(self):
+ def make_handle(rc):
+ def handle(run_args, **kwargs):
+ if run_args[1] == 'run':
+ return MockedCompletedProcess(stdout='container0\n')
+ if run_args[1] == 'logs':
+ return MockedCompletedProcess()
+ if run_args[2] == 'inspect' and '{{.State.Status}}' in run_args:
+ return MockedCompletedProcess(stdout='exited\n')
+ if run_args[2] == 'inspect' and '{{.State.ExitCode}}' in run_args:
+ return MockedCompletedProcess(stdout=f'{rc}\n')
+ if run_args[2] == 'stop' or run_args[2] == 'rm':
+ return MockedCompletedProcess()
+ raise RuntimeError(f'Unexpected command called: {run_args}')
+ return handle
+ ph = mocked(
+ PodmanHelper(container_run_timeout_sec=10, env_filename='/env'),
+ Mock(side_effect=make_handle(0)),
+ )
+ rc = ph.run_in_container({'name': 'foo:latest'}, 'work_vol', 'script_vol')
+ self.assertEqual(rc, 0)
+
+ ph = mocked(
+ PodmanHelper(container_run_timeout_sec=10, env_filename='/env'),
+ Mock(side_effect=make_handle(1)),
+ )
+ rc = ph.run_in_container({'name': 'foo:latest'}, 'work_vol', 'script_vol')
+ self.assertEqual(rc, 1)
+
+ def test_run_in_container_timeout(self):
+ def handle(run_args, **kwargs):
+ if run_args[1] == 'run':
+ return MockedCompletedProcess(stdout='container0\n')
+ if run_args[1] == 'logs':
+ raise subprocess.TimeoutExpired(run_args, 10)
+ if run_args[2] == 'stop' or run_args[2] == 'rm':
+ return MockedCompletedProcess()
+ raise RuntimeError(f'Unexpected command called: {run_args}')
+ ph = mocked(
+ PodmanHelper(container_run_timeout_sec=10, env_filename='/env'),
+ Mock(side_effect=handle),
+ )
+ rc = ph.run_in_container({'name': 'foo:latest'}, 'work_vol', 'script_vol')
+ self.assertEqual(rc, 1)
+ # check the container is cleaned up
+ self.assertTrue('stop' in ph.verbose_run.call_args_list[-2].args[0])
+ self.assertTrue('rm' in ph.verbose_run.call_args_list[-1].args[0])
+
+ def test_cleanup_all(self):
+ ph = mocked(PodmanHelper())
+ ph.cleanup_all()
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/lilybuild/podman-helper b/lilybuild/podman-helper
old mode 100755
new mode 120000
--- /dev/null
+++ b/lilybuild/podman-helper
@@ -0,0 +1 @@
+lilybuild/podman_helper.py
\ No newline at end of file

File Metadata

Mime Type
text/plain
Expires
Mon, Jan 12, 12:30 PM (19 h, 47 m)
Storage Engine
blob
Storage Format
Raw Data
Storage Handle
947665
Default Alt Text
D266.1768249823.diff (26 KB)

Event Timeline