1
- #! /bin/bash
1
+ #! /usr/ bin/env bash
2
2
3
3
# A basic test to ensure Docker images are built correctly.
4
4
# Build a wrapper around the compute, start all services and runs a simple SQL query.
13
13
#
14
14
set -eux -o pipefail
15
15
16
+ cd " $( dirname " ${0} " ) "
16
17
export COMPOSE_FILE=' docker-compose.yml'
17
18
export COMPOSE_PROFILES=test-extensions
18
- cd " $( dirname " ${0} " ) "
19
+ export PARALLEL_COMPUTES=${PARALLEL_COMPUTES:- 1}
20
+ READY_MESSAGE=" All computes are started"
21
+ COMPUTES=()
22
+ for i in $( seq 1 " ${PARALLEL_COMPUTES} " ) ; do
23
+ COMPUTES+=(" compute${i} " )
24
+ done
25
+ CURRENT_TMPDIR=$( mktemp -d)
26
+ trap ' rm -rf ${CURRENT_TMPDIR} docker-compose-parallel.yml' EXIT
27
+ if [[ ${PARALLEL_COMPUTES} -gt 1 ]]; then
28
+ export COMPOSE_FILE=docker-compose-parallel.yml
29
+ cp docker-compose.yml docker-compose-parallel.yml
30
+ # Replace the environment variable PARALLEL_COMPUTES with the actual value
31
+ yq eval -i " .services.compute_is_ready.environment |= map(select(. | test(\" ^PARALLEL_COMPUTES=\" ) | not)) + [\" PARALLEL_COMPUTES=${PARALLEL_COMPUTES} \" ]" ${COMPOSE_FILE}
32
+ for i in $( seq 2 " ${PARALLEL_COMPUTES} " ) ; do
33
+ # Duplicate compute1 as compute${i} for parallel execution
34
+ yq eval -i " .services.compute${i} = .services.compute1" ${COMPOSE_FILE}
35
+ # We don't need these sections, so delete them
36
+ yq eval -i " (del .services.compute${i} .build) | (del .services.compute${i} .ports) | (del .services.compute${i} .networks)" ${COMPOSE_FILE}
37
+ # Let the compute 1 be the only dependence
38
+ yq eval -i " .services.compute${i} .depends_on = [\" compute1\" ]" ${COMPOSE_FILE}
39
+ # Set RUN_PARALLEL=true for compute2. They will generate tenant_id and timeline_id to avoid using the same as other computes
40
+ yq eval -i " .services.compute${i} .environment += [\" RUN_PARALLEL=true\" ]" ${COMPOSE_FILE}
41
+ # Remove TENANT_ID and TIMELINE_ID from the environment variables of the generated computes
42
+ # They will create new TENANT_ID and TIMELINE_ID anyway.
43
+ yq eval -i " .services.compute${i} .environment |= map(select(. | (test(\" ^TENANT_ID=\" ) or test(\" ^TIMELINE_ID=\" )) | not))" ${COMPOSE_FILE}
44
+ done
45
+ fi
19
46
PSQL_OPTION=" -h localhost -U cloud_admin -p 55433 -d postgres"
20
47
21
48
function cleanup() {
@@ -27,11 +54,11 @@ function cleanup() {
27
54
28
55
for pg_version in ${TEST_VERSION_ONLY-14 15 16 17} ; do
29
56
pg_version=${pg_version/ v/ }
30
- echo " clean up containers if exists "
57
+ echo " clean up containers if exist "
31
58
cleanup
32
59
PG_TEST_VERSION=$(( pg_version < 16 ? 16 : pg_version))
33
- PG_VERSION=${pg_version} PG_TEST_VERSION=${PG_TEST_VERSION} docker compose up --quiet-pull -- build -d
34
-
60
+ PG_VERSION=${pg_version} PG_TEST_VERSION=${PG_TEST_VERSION} docker compose build compute1
61
+ PG_VERSION= ${pg_version} PG_TEST_VERSION= ${PG_TEST_VERSION} docker compose up --quiet-pull -d
35
62
echo " wait until the compute is ready. timeout after 60s. "
36
63
cnt=0
37
64
while sleep 3; do
@@ -41,45 +68,50 @@ for pg_version in ${TEST_VERSION_ONLY-14 15 16 17}; do
41
68
echo " timeout before the compute is ready."
42
69
exit 1
43
70
fi
44
- if docker compose logs " compute_is_ready" | grep -q " accepting connections " ; then
71
+ if docker compose logs compute_is_ready | grep -q " ${READY_MESSAGE} " ; then
45
72
echo " OK. The compute is ready to connect."
46
73
echo " execute simple queries."
47
- docker compose exec compute /bin/bash -c " psql ${PSQL_OPTION} -c 'SELECT 1'"
74
+ for compute in " ${COMPUTES[@]} " ; do
75
+ docker compose exec " ${compute} " /bin/bash -c " psql ${PSQL_OPTION} -c 'SELECT 1'"
76
+ done
48
77
break
49
78
fi
50
79
done
51
80
52
81
if [[ ${pg_version} -ge 16 ]]; then
53
- # This is required for the pg_hint_plan test, to prevent flaky log message causing the test to fail
54
- # It cannot be moved to Dockerfile now because the database directory is created after the start of the container
55
- echo Adding dummy config
56
- docker compose exec compute touch /var/db/postgres/compute/compute_ctl_temp_override.conf
57
- # Prepare for the PostGIS test
58
- docker compose exec compute mkdir -p /tmp/pgis_reg/pgis_reg_tmp
59
- TMPDIR=$( mktemp -d)
60
- docker compose cp neon-test-extensions:/ext-src/postgis-src/raster/test " ${TMPDIR} "
61
- docker compose cp neon-test-extensions:/ext-src/postgis-src/regress/00-regress-install " ${TMPDIR} "
62
- docker compose exec compute mkdir -p /ext-src/postgis-src/raster /ext-src/postgis-src/regress /ext-src/postgis-src/regress/00-regress-install
63
- docker compose cp " ${TMPDIR} /test" compute:/ext-src/postgis-src/raster/test
64
- docker compose cp " ${TMPDIR} /00-regress-install" compute:/ext-src/postgis-src/regress
65
- rm -rf " ${TMPDIR} "
66
- # The following block copies the files for the pg_hintplan test to the compute node for the extension test in an isolated docker-compose environment
67
- TMPDIR=$( mktemp -d)
68
- docker compose cp neon-test-extensions:/ext-src/pg_hint_plan-src/data " ${TMPDIR} /data"
69
- docker compose cp " ${TMPDIR} /data" compute:/ext-src/pg_hint_plan-src/
70
- rm -rf " ${TMPDIR} "
71
- # The following block does the same for the contrib/file_fdw test
72
- TMPDIR=$( mktemp -d)
73
- docker compose cp neon-test-extensions:/postgres/contrib/file_fdw/data " ${TMPDIR} /data"
74
- docker compose cp " ${TMPDIR} /data" compute:/postgres/contrib/file_fdw/data
75
- rm -rf " ${TMPDIR} "
82
+ mkdir " ${CURRENT_TMPDIR} " /{pg_hint_plan-src,file_fdw,postgis-src}
83
+ docker compose cp neon-test-extensions:/ext-src/postgis-src/raster/test " ${CURRENT_TMPDIR} /postgis-src/test"
84
+ docker compose cp neon-test-extensions:/ext-src/postgis-src/regress/00-regress-install " ${CURRENT_TMPDIR} /postgis-src/00-regress-install"
85
+ docker compose cp neon-test-extensions:/ext-src/pg_hint_plan-src/data " ${CURRENT_TMPDIR} /pg_hint_plan-src/data"
86
+ docker compose cp neon-test-extensions:/postgres/contrib/file_fdw/data " ${CURRENT_TMPDIR} /file_fdw/data"
87
+
88
+ for compute in " ${COMPUTES[@]} " ; do
89
+ # This is required for the pg_hint_plan test, to prevent flaky log message causing the test to fail
90
+ # It cannot be moved to Dockerfile now because the database directory is created after the start of the container
91
+ echo Adding dummy config on " ${compute} "
92
+ docker compose exec " ${compute} " touch /var/db/postgres/compute/compute_ctl_temp_override.conf
93
+ # Prepare for the PostGIS test
94
+ docker compose exec " ${compute} " mkdir -p /tmp/pgis_reg/pgis_reg_tmp /ext-src/postgis-src/raster /ext-src/postgis-src/regress /ext-src/postgis-src/regress/00-regress-install
95
+ docker compose cp " ${CURRENT_TMPDIR} /postgis-src/test" " ${compute} " :/ext-src/postgis-src/raster/test
96
+ docker compose cp " ${CURRENT_TMPDIR} /postgis-src/00-regress-install" " ${compute} " :/ext-src/postgis-src/regress
97
+ # The following block copies the files for the pg_hintplan test to the compute node for the extension test in an isolated docker-compose environment
98
+ docker compose cp " ${CURRENT_TMPDIR} /pg_hint_plan-src/data" " ${compute} " :/ext-src/pg_hint_plan-src/
99
+ # The following block does the same for the contrib/file_fdw test
100
+ docker compose cp " ${CURRENT_TMPDIR} /file_fdw/data" " ${compute} " :/postgres/contrib/file_fdw/data
101
+ done
76
102
# Apply patches
77
103
docker compose exec -T neon-test-extensions bash -c " (cd /postgres && patch -p1)" < " ../compute/patches/contrib_pg${pg_version} .patch"
78
104
# We are running tests now
79
105
rm -f testout.txt testout_contrib.txt
106
+ # We want to run the longest tests first to better utilize parallelization and reduce overall test time.
107
+ # Tests listed in the RUN_FIRST variable will be run before others.
108
+ # If parallelization is not used, this environment variable will be ignored.
109
+
80
110
docker compose exec -e USE_PGXS=1 -e SKIP=timescaledb-src,rdkit-src,pg_jsonschema-src,kq_imcx-src,wal2json_2_5-src,rag_jina_reranker_v1_tiny_en-src,rag_bge_small_en_v15-src \
111
+ -e RUN_FIRST=hll-src,postgis-src,pgtap-src -e PARALLEL_COMPUTES=" ${PARALLEL_COMPUTES} " \
81
112
neon-test-extensions /run-tests.sh /ext-src | tee testout.txt && EXT_SUCCESS=1 || EXT_SUCCESS=0
82
113
docker compose exec -e SKIP=start-scripts,postgres_fdw,ltree_plpython,jsonb_plpython,jsonb_plperl,hstore_plpython,hstore_plperl,dblink,bool_plperl \
114
+ -e PARALLEL_COMPUTES=" ${PARALLEL_COMPUTES} " \
83
115
neon-test-extensions /run-tests.sh /postgres/contrib | tee testout_contrib.txt && CONTRIB_SUCCESS=1 || CONTRIB_SUCCESS=0
84
116
if [[ ${EXT_SUCCESS} -eq 0 || ${CONTRIB_SUCCESS} -eq 0 ]]; then
85
117
CONTRIB_FAILED=
0 commit comments