Page MenuHomePhorge

D262.1768008484.diff
No OneTemporary

Size
11 KB
Referenced Files
None
Subscribers
None

D262.1768008484.diff

diff --git a/lilybuild/lilybuild/ci_steps.py b/lilybuild/lilybuild/ci_steps.py
--- a/lilybuild/lilybuild/ci_steps.py
+++ b/lilybuild/lilybuild/ci_steps.py
@@ -5,7 +5,7 @@
from twisted.internet import defer
from .ci_syntax import ci_file
from .ci_syntax import rules as ci_rules
-from .helpers import rsync_rules_from_artifacts, get_job_script, normalize_image
+from .helpers import rsync_rules_from_artifacts, get_job_script, normalize_image, normalize_services
from .phorge import SendCoverageToPhorge
import re
import sys
@@ -254,6 +254,7 @@
self.src_relative,
self.script_dir,
self.result_relative,
+ normalize_services(job.services),
],
workdir=self.work_root_dir,
doStepIf=on_success,
diff --git a/lilybuild/lilybuild/ci_syntax/ci_file.py b/lilybuild/lilybuild/ci_syntax/ci_file.py
--- a/lilybuild/lilybuild/ci_syntax/ci_file.py
+++ b/lilybuild/lilybuild/ci_syntax/ci_file.py
@@ -101,6 +101,7 @@
self.artifacts = job_struct.get('artifacts') or {}
self.rules = job_struct.get('rules') or []
self.dependencies = job_struct.get('dependencies')
+ self.services = job_struct.get('services') or []
def get_predefined_ci_variables(self):
vs = {
diff --git a/lilybuild/lilybuild/helpers.py b/lilybuild/lilybuild/helpers.py
--- a/lilybuild/lilybuild/helpers.py
+++ b/lilybuild/lilybuild/helpers.py
@@ -1,6 +1,7 @@
import json
import shlex
+import re
def normalize_path_for_rsync(path):
n = path
@@ -74,3 +75,30 @@
return json.dumps({'name': image})
else:
return json.dumps(image)
+
+def get_service_aliases_from_name(name):
+ # https://docs.gitlab.com/ci/services/#accessing-the-services
+ pos = name.find(':')
+ if pos != -1:
+ name = name[:pos]
+ primary = name.replace('/', '__')
+ secondary = name.replace('/', '-')
+ if primary == secondary:
+ return [primary]
+ else:
+ return [primary, secondary]
+
+SERVICE_ALIAS_SEPARATOR = re.compile(r'[ ,]+')
+def normalize_services(services):
+ res = []
+ for s in services:
+ so = s if isinstance(s, dict) else {'name': s}
+ normalized_service = {
+ 'name': so['name'],
+ 'aliases': SERVICE_ALIAS_SEPARATOR.split(so.get('alias')) if so.get('alias') else get_service_aliases_from_name(so['name']),
+ 'entrypoint': so.get('entrypoint'),
+ 'command': so.get('command'),
+ }
+ res.append(normalized_service)
+
+ return json.dumps(res)
diff --git a/lilybuild/lilybuild/tests/helpers_test.py b/lilybuild/lilybuild/tests/helpers_test.py
--- a/lilybuild/lilybuild/tests/helpers_test.py
+++ b/lilybuild/lilybuild/tests/helpers_test.py
@@ -10,6 +10,8 @@
get_job_script,
DEFAULT_SCRIPT_HEADER,
normalize_image,
+ get_service_aliases_from_name,
+ normalize_services,
)
from lilybuild.tests.resources import get_res
@@ -139,5 +141,55 @@
res = normalize_image(orig)
self.assertEqual(json.loads(res), orig)
+class GetServiceAliasesFromNameTest(unittest.TestCase):
+ def test_simple(self):
+ self.assertEqual(
+ get_service_aliases_from_name('mewmew:abcdefg'),
+ ['mewmew'])
+ self.assertEqual(
+ get_service_aliases_from_name('mewmew/a:abcdefg'),
+ ['mewmew__a', 'mewmew-a'])
+ self.assertEqual(
+ get_service_aliases_from_name('mew-mew/a:abc-defg'),
+ ['mew-mew__a', 'mew-mew-a'])
+ self.assertEqual(
+ get_service_aliases_from_name('a.example/mew-mew/a:abc-defg'),
+ ['a.example__mew-mew__a', 'a.example-mew-mew-a'])
+
+class NormalizeServicesTest(unittest.TestCase):
+ def test_simple(self):
+ self.assertEqual(
+ json.loads(normalize_services([
+ 'mysql:latest',
+ 'mysql:latest',
+ ])),
+ [{ 'name': 'mysql:latest', 'aliases': ['mysql'], 'entrypoint': None, 'command': None },
+ { 'name': 'mysql:latest', 'aliases': ['mysql'], 'entrypoint': None, 'command': None }],
+ )
+
+ def test_own_alias(self):
+ self.assertEqual(
+ json.loads(normalize_services([
+ {'name': 'mysql:latest', 'alias': 'a, b c'},
+ 'mysql:latest',
+ ])),
+ [{ 'name': 'mysql:latest', 'aliases': ['a', 'b', 'c'], 'entrypoint': None, 'command': None },
+ { 'name': 'mysql:latest', 'aliases': ['mysql'], 'entrypoint': None, 'command': None }],
+ )
+
+ def test_entrypoint_command(self):
+ self.assertEqual(
+ json.loads(normalize_services([
+ {'name': 'mysql:latest', 'entrypoint': 'a', 'command': 'b c'},
+ ])),
+ [{ 'name': 'mysql:latest', 'aliases': ['mysql'], 'entrypoint': 'a', 'command': 'b c' }]
+ )
+ self.assertEqual(
+ json.loads(normalize_services([
+ {'name': 'mysql:latest', 'entrypoint': ['a', 'b'], 'command': ['b c', 'c d']},
+ ])),
+ [{ 'name': 'mysql:latest', 'aliases': ['mysql'], 'entrypoint': ['a', 'b'], 'command': ['b c', 'c d'] }]
+ )
+
if __name__ == '__main__':
unittest.main()
diff --git a/lilybuild/podman-helper b/lilybuild/podman-helper
--- a/lilybuild/podman-helper
+++ b/lilybuild/podman-helper
@@ -22,6 +22,10 @@
helper_container_id = None
ssh_max_wait = 10
ssh_wait_interval_sec = 1
+service_network_id = None
+service_containers = []
+service_max_wait_sec = 60 * 5
+service_wait_interval_sec = 10
col_info = '\x1b[1;34m'
col_success = '\x1b[1;32m'
@@ -135,17 +139,88 @@
# ci.json requires that the entrypoint is an array of strings
ep = json.dumps(image['entrypoint'])
args += ['--entrypoint', ep]
- args += [name]
+ args += ['--', name]
return args
-def run_in_container(image, work_volname, script_volname):
+def create_service_network():
+ res = verbose_run([
+ 'podman', 'network', 'create', '--label', 'lilybuild=service-network'
+ ], capture_output=True, check=True, encoding='utf-8')
+ return res.stdout.strip()
+
+def clean_service_network(network_id):
+ res = verbose_run([
+ 'podman', 'network', 'rm', '-f', '--', network_id
+ ], capture_output=True, encoding='utf-8')
+ if res.returncode != 0:
+ perror('Cannot remove service network.')
+
+def start_service_container(service, network_id):
+ image = service['name']
+ ep_args = []
+ if service['entrypoint']:
+ if isinstance(service['entrypoint'], str):
+ entrypoint = service['entrypoint']
+ else:
+ entrypoint = json.dumps(service['entrypoint'])
+ ep_args += [f'--entrypoint={entrypoint}']
+ cmd_args = []
+ if service['command']:
+ if isinstance(service['command'], str):
+ cmd_args += [service['command']]
+ else:
+ cmd_args += service['command']
+ res = verbose_run([
+ 'podman', 'run', '-d', '--label', 'lilybuild=job-service',
+ f'--network={network_id}',
+ ] + [
+ f'--network-alias={alias}' for alias in service['aliases']
+ ] + ep_args + [
+ '--',
+ image,
+ ] + cmd_args, check=True, capture_output=True, encoding='utf-8')
+ return res.stdout.strip()
+
+def ensure_service_containers_up(container_ids):
+ waiting_container_ids = container_ids[:]
+ steady_deadline = time.monotonic() + service_max_wait_sec
+ pinfo('Waiting for service containers...')
+ while waiting_container_ids:
+ for cid in waiting_container_ids[:]:
+ res = verbose_run([
+ 'podman', 'container', 'inspect', '--', cid
+ ], check=True, capture_output=True, encoding='utf-8')
+ ins = json.loads(res.stdout)[0]
+ if ins.get('State', {}).get('Status') == 'running':
+ psuccess(f'Container {cid} is up')
+ waiting_container_ids.remove(cid)
+ if waiting_container_ids:
+ if time.monotonic() > steady_deadline:
+ perror('Containers are not yet up after deadline.')
+ raise TimeoutError('Service containers startup timeout')
+ pinfo('Some containers are not yet up. Waiting...')
+ time.sleep(service_wait_interval_sec)
+ psuccess('All service containers are up.')
+
+def prune_service_containers(container_ids):
+ stop_proc = verbose_run(['podman', 'container', 'stop', '--'] + container_ids)
+ if stop_proc.returncode != 0:
+ perror('Cannot stop container.')
+ # -v removes anonymous volumes associated with the container
+ rm_proc = verbose_run(['podman', 'container', 'rm', '-f', '-v', '--'] + container_ids)
+
+def run_in_container(image, work_volname, script_volname, network_id):
timeout = 60 * 60 * 2 # 2 hours by default
steady_deadline = time.monotonic() + timeout
+ network_args = []
+ if network_id:
+ network_args += [f'--network={network_id}']
+
start_process = verbose_run([
'podman', 'run', '-d',
f'--mount=type=volume,source={work_volname},destination={work_vol_mount_dir}',
f'--mount=type=volume,source={script_volname},destination={script_vol_mount_dir}',
- ] + image_to_podman_args(image) + [
+ ] + network_args + image_to_podman_args(image) + [
script_name,
], capture_output=True, encoding='utf-8')
if start_process.returncode != 0:
@@ -201,6 +276,9 @@
work_dir = sys.argv[2]
script_dir = sys.argv[3]
result_dir = sys.argv[4]
+ services = []
+ if len(sys.argv) >= 6:
+ services = json.loads(sys.argv[5])
pinfo('Creating volumes...')
work_vol = create_volume('work')
@@ -212,13 +290,27 @@
(helper_container_id, alias) = start_helper_service(work_vol, script_vol)
psuccess('Started...')
+ if services:
+ pinfo('Creating service network...')
+ global service_network_id
+ service_network_id = create_service_network()
+ psuccess('Created.')
+
+ pinfo('Starting job-defined services...')
+ global service_containers
+ for service in services:
+ service_containers.append(start_service_container(service, service_network_id))
+
+ pinfo('Waiting for job-defined services...')
+ ensure_service_containers_up(service_containers)
+
pinfo('Importing volumes...')
import_volume(alias, work_dir, work_vol_mount_dir)
import_volume(alias, script_dir, script_vol_mount_dir)
psuccess('Imported.')
pinfo('Running container...')
- retcode = run_in_container(image, work_vol, script_vol)
+ retcode = run_in_container(image, work_vol, script_vol, service_network_id)
pinfo(f'Returned {retcode}.')
@@ -234,15 +326,16 @@
return retcode
-retcode = 1
+def cleanup_all():
+ if service_containers:
+ pinfo('Cleaning service containers...')
+ prune_service_containers(service_containers)
+ psuccess('Cleaned.')
+ if service_network_id:
+ pinfo('Cleaning service network...')
+ clean_service_network(service_network_id)
+ psuccess('Cleaned.')
-try:
- retcode = main()
-except Exception as e:
- perror('Error!', e)
- print(traceback.format_exc())
- raise
-finally:
if helper_container_id:
pinfo('Cleaning helper container')
clean_helper_container()
@@ -252,4 +345,14 @@
clean_volumes()
psuccess('Cleaned.')
+retcode = 1
+
+try:
+ retcode = main()
+except Exception as e:
+ perror('Error!', e)
+ print(traceback.format_exc())
+ raise
+finally:
+ cleanup_all()
sys.exit(retcode)

File Metadata

Mime Type
text/plain
Expires
Fri, Jan 9, 5:28 PM (2 h, 9 m)
Storage Engine
blob
Storage Format
Raw Data
Storage Handle
937107
Default Alt Text
D262.1768008484.diff (11 KB)

Event Timeline