Skip to content
This repository was archived by the owner on Oct 8, 2025. It is now read-only.

Commit 01a563e

Browse files
author
Jason Schmidt
authored
chore: additional bug fixes and usability fixes to automation branch (#174)
* chore: update cert-manager chart and crds * chore: update logagent (filebeat) chart version * chore: update nginx IC to latest chart version * chore: update prometheus chart to latest version * chore: update logstore (Elasticsearch) to latest chart versoin * chore: update observability to new yaml and new chart * chore: update example config with new values * fix: remediation of deployment bugs * fix: removed JWT-only logic from BoS * fix: remove logic for sirius_host from deprecated jwt deploys * fix: remove deprecated ingress-repo-only project * fix: adjust min kubectl version deployed
1 parent beeb2f4 commit 01a563e

File tree

5 files changed

+16
-261
lines changed

5 files changed

+16
-261
lines changed

bin/setup_venv.sh

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -258,7 +258,7 @@ fi
258258
# downloading the kubectl if it did not exist; this could result in versions not being updated if the
259259
# MARA project was run in the same environment w/o a refresh.
260260
#
261-
# The two fixes here are to hardcode (For now) to a known good version (1.23.6) and force the script to
261+
# The two fixes here are to hardcode (For now) to a known good version (1.24.3) and force the script to
262262
# always download this version.
263263
#
264264
# TODO: Figure out a way to not hardcode the kubectl version
@@ -275,7 +275,7 @@ if [ ! -x "${VIRTUAL_ENV}/bin/kubectl" ]; then
275275
else
276276
echo "kubectl is already installed, but will overwrite to ensure correct version"
277277
echo "Downloading kubectl into virtual environment"
278-
KUBECTL_VERSION="v1.23.6"
278+
KUBECTL_VERSION="v1.24.3"
279279
${download_cmd} "https://dl.k8s.io/release/${KUBECTL_VERSION}/bin/${OS}/${ARCH}/kubectl" >"${VIRTUAL_ENV}/bin/kubectl"
280280
KUBECTL_CHECKSUM="$(${download_cmd} "https://dl.k8s.io/${KUBECTL_VERSION}/bin/${OS}/${ARCH}/kubectl.sha256")"
281281
echo "${KUBECTL_CHECKSUM} ${VIRTUAL_ENV}/bin/kubectl" | ${sha256sum_cmd}
@@ -316,4 +316,4 @@ if [ ! -x "${VIRTUAL_ENV}/bin/doctl" ]; then
316316
tar --extract --gunzip --directory "${VIRTUAL_ENV}/bin" --file "${DOCTL_TARBALL_DEST}"
317317
[ $? -eq 0 ] && echo "Digital Ocean CLI installed successfully" || echo "Failed to install Digital Ocean CLI"
318318
rm "${DOCTL_TARBALL_DEST}"
319-
fi
319+
fi

pulumi/python/kubernetes/applications/sirius/__main__.py

Lines changed: 13 additions & 62 deletions
Original file line numberDiff line numberDiff line change
@@ -37,12 +37,6 @@ def pulumi_ingress_project_name():
3737
return pulumi_config.get_pulumi_project_name(ingress_project_path)
3838

3939

40-
def pulumi_repo_ingress_project_name():
41-
script_dir = os.path.dirname(os.path.abspath(__file__))
42-
ingress_project_path = os.path.join(script_dir, '..', '..', 'nginx', 'ingress-controller-repo-only')
43-
return pulumi_config.get_pulumi_project_name(ingress_project_path)
44-
45-
4640
def sirius_manifests_location():
4741
script_dir = os.path.dirname(os.path.abspath(__file__))
4842
sirius_manifests_path = os.path.join(script_dir, 'src', 'kubernetes-manifests', '*.yaml')
@@ -102,58 +96,15 @@ def add_namespace(obj):
10296

10397
k8s_provider = k8s.Provider(resource_name=f'ingress-controller', kubeconfig=kubeconfig)
10498

105-
# TODO: Streamline the logic for FQDN/IP into something a bit more sane and scalable #82
10699
#
107-
# Currently, if we are doing an AWS deployment we use the AWS IC deployment, which uses the ELB hostname
108-
# as part of the certificate (self-signed).
100+
# We use the hostanme to set the value for our FQDN, which drives the cert
101+
# process as well.
109102
#
110-
# If we are using a kubeconfig file (ie, not type AWS) we expect we are going to get an IP address and not
111-
# a hostname in return. So we use the hostname variable to create the certificate we need, and then we use
112-
# the IP address in output to the user to tell them to setup DNS or a hostfile.
113-
#
114-
115-
# We use the kubernetes namespace for this
116-
config = pulumi.Config('kubernetes')
117-
infra_type = config.require('infra_type')
118-
119-
if infra_type == 'AWS':
120-
# Logic to extract the FQDN of the load balancer for Ingress
121-
ingress_project_name = pulumi_ingress_project_name()
122-
ingress_stack_ref_id = f"{pulumi_user}/{ingress_project_name}/{stack_name}"
123-
ingress_stack_ref = pulumi.StackReference(ingress_stack_ref_id)
124-
lb_ingress_hostname = ingress_stack_ref.get_output('lb_ingress_hostname')
125-
sirius_host = lb_ingress_hostname
126-
elif infra_type == 'kubeconfig':
127-
# Logic to extract the FQDN of the load balancer for Ingress
128-
ingress_project_name = pulumi_repo_ingress_project_name()
129-
ingress_stack_ref_id = f"{pulumi_user}/{ingress_project_name}/{stack_name}"
130-
ingress_stack_ref = pulumi.StackReference(ingress_stack_ref_id)
131-
lb_ingress_hostname = ingress_stack_ref.get_output('lb_ingress_hostname')
132-
# Set back to kubernetes
133-
config = pulumi.Config('kubernetes')
134-
lb_ingress_ip = ingress_stack_ref.get_output('lb_ingress_ip')
135-
sirius_host = lb_ingress_hostname
136-
elif infra_type == 'DO':
137-
# Logic to extract the FQDN of the load balancer for Ingress
138-
ingress_project_name = pulumi_repo_ingress_project_name()
139-
ingress_stack_ref_id = f"{pulumi_user}/{ingress_project_name}/{stack_name}"
140-
ingress_stack_ref = pulumi.StackReference(ingress_stack_ref_id)
141-
lb_ingress_hostname = ingress_stack_ref.get_output('lb_ingress_hostname')
142-
# Set back to kubernetes
143-
config = pulumi.Config('kubernetes')
144-
lb_ingress_ip = ingress_stack_ref.get_output('lb_ingress_ip')
145-
sirius_host = lb_ingress_hostname
146-
elif infra_type == 'LKE':
147-
# Logic to extract the FQDN of the load balancer for Ingress
148-
ingress_project_name = pulumi_repo_ingress_project_name()
149-
ingress_stack_ref_id = f"{pulumi_user}/{ingress_project_name}/{stack_name}"
150-
ingress_stack_ref = pulumi.StackReference(ingress_stack_ref_id)
151-
lb_ingress_hostname = ingress_stack_ref.get_output('lb_ingress_hostname')
152-
# Set back to kubernetes
153-
config = pulumi.Config('kubernetes')
154-
lb_ingress_ip = ingress_stack_ref.get_output('lb_ingress_ip')
155-
sirius_host = lb_ingress_hostname
156-
103+
ingress_project_name = pulumi_ingress_project_name()
104+
ingress_stack_ref_id = f"{pulumi_user}/{ingress_project_name}/{stack_name}"
105+
ingress_stack_ref = pulumi.StackReference(ingress_stack_ref_id)
106+
lb_ingress_hostname = ingress_stack_ref.get_output('lb_ingress_hostname')
107+
sirius_host = lb_ingress_hostname
157108

158109
# Create the namespace for Bank of Sirius
159110
ns = k8s.core.v1.Namespace(resource_name='bos',
@@ -421,12 +372,12 @@ def add_namespace(obj):
421372
elif infra_type == 'kubeconfig':
422373
pulumi.export('hostname', lb_ingress_hostname)
423374
pulumi.export('ipaddress', lb_ingress_ip)
424-
#pulumi.export('application_url', f'https://{lb_ingress_hostname}')
375+
# pulumi.export('application_url', f'https://{lb_ingress_hostname}')
425376
application_url = sirius_host.apply(lambda host: f'https://{host}')
426377
elif infra_type == 'DO':
427378
pulumi.export('hostname', lb_ingress_hostname)
428379
pulumi.export('ipaddress', lb_ingress_ip)
429-
#pulumi.export('application_url', f'https://{lb_ingress_hostname}')
380+
# pulumi.export('application_url', f'https://{lb_ingress_hostname}')
430381
application_url = sirius_host.apply(lambda host: f'https://{host}')
431382

432383
#
@@ -457,11 +408,11 @@ def add_namespace(obj):
457408
namespace=ns,
458409

459410
# Values from Chart's parameters specified hierarchically,
460-
values = {
411+
values={
461412
"serviceMonitor": {
462413
"enabled": True,
463414
"namespace": "prometheus"
464-
},
415+
},
465416
"config": {
466417
"datasource": {
467418
"host": "accounts-db",
@@ -504,11 +455,11 @@ def add_namespace(obj):
504455
namespace=ns,
505456

506457
# Values from Chart's parameters specified hierarchically,
507-
values = {
458+
values={
508459
"serviceMonitor": {
509460
"enabled": True,
510461
"namespace": "prometheus"
511-
},
462+
},
512463
"config": {
513464
"datasource": {
514465
"host": "ledger-db",

pulumi/python/kubernetes/nginx/ingress-controller-repo-only/Pulumi.yaml

Lines changed: 0 additions & 7 deletions
This file was deleted.

pulumi/python/kubernetes/nginx/ingress-controller-repo-only/__main__.py

Lines changed: 0 additions & 189 deletions
This file was deleted.

pulumi/python/kubernetes/nginx/ingress-controller-repo-only/manifests/.gitkeep

Whitespace-only changes.

0 commit comments

Comments
 (0)