Skip to content
This repository was archived by the owner on Oct 8, 2025. It is now read-only.
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
6 changes: 3 additions & 3 deletions bin/setup_venv.sh
Original file line number Diff line number Diff line change
Expand Up @@ -258,7 +258,7 @@ fi
# downloading the kubectl if it did not exist; this could result in versions not being updated if the
# MARA project was run in the same environment w/o a refresh.
#
# The two fixes here are to hardcode (For now) to a known good version (1.23.6) and force the script to
# The two fixes here are to hardcode (For now) to a known good version (1.24.3) and force the script to
# always download this version.
#
# TODO: Figure out a way to not hardcode the kubectl version
Expand All @@ -275,7 +275,7 @@ if [ ! -x "${VIRTUAL_ENV}/bin/kubectl" ]; then
else
echo "kubectl is already installed, but will overwrite to ensure correct version"
echo "Downloading kubectl into virtual environment"
KUBECTL_VERSION="v1.23.6"
KUBECTL_VERSION="v1.24.3"
${download_cmd} "https://dl.k8s.io/release/${KUBECTL_VERSION}/bin/${OS}/${ARCH}/kubectl" >"${VIRTUAL_ENV}/bin/kubectl"
KUBECTL_CHECKSUM="$(${download_cmd} "https://dl.k8s.io/${KUBECTL_VERSION}/bin/${OS}/${ARCH}/kubectl.sha256")"
echo "${KUBECTL_CHECKSUM} ${VIRTUAL_ENV}/bin/kubectl" | ${sha256sum_cmd}
Expand Down Expand Up @@ -316,4 +316,4 @@ if [ ! -x "${VIRTUAL_ENV}/bin/doctl" ]; then
tar --extract --gunzip --directory "${VIRTUAL_ENV}/bin" --file "${DOCTL_TARBALL_DEST}"
[ $? -eq 0 ] && echo "Digital Ocean CLI installed successfully" || echo "Failed to install Digital Ocean CLI"
rm "${DOCTL_TARBALL_DEST}"
fi
fi
26 changes: 13 additions & 13 deletions config/pulumi/Pulumi.stackname.yaml.example
Original file line number Diff line number Diff line change
Expand Up @@ -127,7 +127,7 @@ config:
# Chart name for the helm chart for kic
kic-helm:chart_name: nginx-ingress
# Chart version for the helm chart for kic
kic-helm:chart_version: 0.13.2
kic-helm:chart_version: 0.14.0
# Name of the repo to pull the kic chart from
kic-helm:helm_repo_name: nginx-stable
# URL of the chart repo to pull kic from
Expand All @@ -151,12 +151,12 @@ config:
# https://docs.nginx.com/nginx-ingress-controller/installation/pulling-ingress-controller-image/
#
# The following are all valid image names:
# kic:image_name: private-registry.nginx.com/nginx-ic/nginx-plus-ingress:2.2.2
# kic:image_name: private-registry.nginx.com/nginx-ic/nginx-plus-ingress:2.2.2-ot
# kic:image_name: docker.io/nginx/nginx-ingress:2.2.2
# kic:image_name: nginx/nginx-ingress:2.2.2
# kic:image_name: nginx/nginx-ingress:2.2.2-alpine
kic:image_name: nginx/nginx-ingress:2.2.2
# kic:image_name: private-registry.nginx.com/nginx-ic/nginx-plus-ingress:2.3.0
# kic:image_name: private-registry.nginx.com/nginx-ic/nginx-plus-ingress:2.3.0-ot
# kic:image_name: docker.io/nginx/nginx-ingress:2.3.0
# kic:image_name: nginx/nginx-ingress:2.3.0
# kic:image_name: nginx/nginx-ingress:2.3.0-alpine
kic:image_name: nginx/nginx-ingress:2.3.0


############################################################################
Expand Down Expand Up @@ -230,7 +230,7 @@ config:
# Logagent Configuration
logagent:chart_name: filebeat
# Chart name for the helm chart for the logagent
logagent:chart_version: 7.16.3
logagent:chart_version: 7.17.3
# Chart version for the helm chart for the logagent
logagent:helm_repo_name: elastic
# Name of the repo to pull the logagent from
Expand All @@ -246,7 +246,7 @@ config:
# Logstore Configuration
logstore:chart_name: elasticsearch
# Chart name for the helm chart for the logstore
logstore:chart_version: 17.6.2
logstore:chart_version: 19.1.4
# Chart version for the helm chart for the logstore
logstore:helm_repo_name: bitnami
# Name of the repo to pull the logstore from
Expand Down Expand Up @@ -277,7 +277,7 @@ config:
# Cert Manager Configuration
certmgr:chart_name: cert-manager
# Chart hame for the helm chart for certmanager
certmgr:chart_version: v1.6.1
certmgr:chart_version: v1.9.1
# Chart version for the helm chart for certmanager
certmgr:certmgr_helm_repo_name: jetstack
# Name of the repo to pull the certmanager chart from
Expand All @@ -293,15 +293,15 @@ config:
# Prometheus Configuration
prometheus:chart_name: kube-prometheus-stack
# Chart name for the helm chart for prometheus
prometheus:chart_version: 30.0.1
prometheus:chart_version: 39.2.1
# Chart version for the helm chart for prometheus
prometheus:helm_repo_name: prometheus-community
# Name of the repo to pull the prometheus chart from
prometheus:helm_repo_url: https://https://prometheus-community.github.io/helm-charts
# URL of the chart repo
prometheus:statsd_chart_name: prometheus-statsd-exporter
# Name of the statsd chart (uses the same repo as the prom chart)
prometheus.statsd_chart_version: 0.4.2
prometheus.statsd_chart_version: 0.5.0
# Version of the statsd chart (uses the same repo as the prom chart)
prometheus:helm_timeout: 300
# Timeout value for helm operations in seconds
Expand Down Expand Up @@ -338,7 +338,7 @@ config:
# Linode Kubernetes Engine
############################################################################
# This is the Kubernetes version to install using Linode K8s.
linode:k8s_version: 1.22
linode:k8s_version: 1.23
# This is the default instance type used Linode Kubernetes
linode:instance_type: g6-standard-8
# The desired node count of the Linode K8s cluster.
Expand Down
75 changes: 13 additions & 62 deletions pulumi/python/kubernetes/applications/sirius/__main__.py
Original file line number Diff line number Diff line change
Expand Up @@ -37,12 +37,6 @@ def pulumi_ingress_project_name():
return pulumi_config.get_pulumi_project_name(ingress_project_path)


def pulumi_repo_ingress_project_name():
script_dir = os.path.dirname(os.path.abspath(__file__))
ingress_project_path = os.path.join(script_dir, '..', '..', 'nginx', 'ingress-controller-repo-only')
return pulumi_config.get_pulumi_project_name(ingress_project_path)


def sirius_manifests_location():
script_dir = os.path.dirname(os.path.abspath(__file__))
sirius_manifests_path = os.path.join(script_dir, 'src', 'kubernetes-manifests', '*.yaml')
Expand Down Expand Up @@ -102,58 +96,15 @@ def add_namespace(obj):

k8s_provider = k8s.Provider(resource_name=f'ingress-controller', kubeconfig=kubeconfig)

# TODO: Streamline the logic for FQDN/IP into something a bit more sane and scalable #82
#
# Currently, if we are doing an AWS deployment we use the AWS IC deployment, which uses the ELB hostname
# as part of the certificate (self-signed).
# We use the hostanme to set the value for our FQDN, which drives the cert
# process as well.
#
# If we are using a kubeconfig file (ie, not type AWS) we expect we are going to get an IP address and not
# a hostname in return. So we use the hostname variable to create the certificate we need, and then we use
# the IP address in output to the user to tell them to setup DNS or a hostfile.
#

# We use the kubernetes namespace for this
config = pulumi.Config('kubernetes')
infra_type = config.require('infra_type')

if infra_type == 'AWS':
# Logic to extract the FQDN of the load balancer for Ingress
ingress_project_name = pulumi_ingress_project_name()
ingress_stack_ref_id = f"{pulumi_user}/{ingress_project_name}/{stack_name}"
ingress_stack_ref = pulumi.StackReference(ingress_stack_ref_id)
lb_ingress_hostname = ingress_stack_ref.get_output('lb_ingress_hostname')
sirius_host = lb_ingress_hostname
elif infra_type == 'kubeconfig':
# Logic to extract the FQDN of the load balancer for Ingress
ingress_project_name = pulumi_repo_ingress_project_name()
ingress_stack_ref_id = f"{pulumi_user}/{ingress_project_name}/{stack_name}"
ingress_stack_ref = pulumi.StackReference(ingress_stack_ref_id)
lb_ingress_hostname = ingress_stack_ref.get_output('lb_ingress_hostname')
# Set back to kubernetes
config = pulumi.Config('kubernetes')
lb_ingress_ip = ingress_stack_ref.get_output('lb_ingress_ip')
sirius_host = lb_ingress_hostname
elif infra_type == 'DO':
# Logic to extract the FQDN of the load balancer for Ingress
ingress_project_name = pulumi_repo_ingress_project_name()
ingress_stack_ref_id = f"{pulumi_user}/{ingress_project_name}/{stack_name}"
ingress_stack_ref = pulumi.StackReference(ingress_stack_ref_id)
lb_ingress_hostname = ingress_stack_ref.get_output('lb_ingress_hostname')
# Set back to kubernetes
config = pulumi.Config('kubernetes')
lb_ingress_ip = ingress_stack_ref.get_output('lb_ingress_ip')
sirius_host = lb_ingress_hostname
elif infra_type == 'LKE':
# Logic to extract the FQDN of the load balancer for Ingress
ingress_project_name = pulumi_repo_ingress_project_name()
ingress_stack_ref_id = f"{pulumi_user}/{ingress_project_name}/{stack_name}"
ingress_stack_ref = pulumi.StackReference(ingress_stack_ref_id)
lb_ingress_hostname = ingress_stack_ref.get_output('lb_ingress_hostname')
# Set back to kubernetes
config = pulumi.Config('kubernetes')
lb_ingress_ip = ingress_stack_ref.get_output('lb_ingress_ip')
sirius_host = lb_ingress_hostname

ingress_project_name = pulumi_ingress_project_name()
ingress_stack_ref_id = f"{pulumi_user}/{ingress_project_name}/{stack_name}"
ingress_stack_ref = pulumi.StackReference(ingress_stack_ref_id)
lb_ingress_hostname = ingress_stack_ref.get_output('lb_ingress_hostname')
sirius_host = lb_ingress_hostname

# Create the namespace for Bank of Sirius
ns = k8s.core.v1.Namespace(resource_name='bos',
Expand Down Expand Up @@ -421,12 +372,12 @@ def add_namespace(obj):
elif infra_type == 'kubeconfig':
pulumi.export('hostname', lb_ingress_hostname)
pulumi.export('ipaddress', lb_ingress_ip)
#pulumi.export('application_url', f'https://{lb_ingress_hostname}')
# pulumi.export('application_url', f'https://{lb_ingress_hostname}')
application_url = sirius_host.apply(lambda host: f'https://{host}')
elif infra_type == 'DO':
pulumi.export('hostname', lb_ingress_hostname)
pulumi.export('ipaddress', lb_ingress_ip)
#pulumi.export('application_url', f'https://{lb_ingress_hostname}')
# pulumi.export('application_url', f'https://{lb_ingress_hostname}')
application_url = sirius_host.apply(lambda host: f'https://{host}')

#
Expand Down Expand Up @@ -457,11 +408,11 @@ def add_namespace(obj):
namespace=ns,

# Values from Chart's parameters specified hierarchically,
values = {
values={
"serviceMonitor": {
"enabled": True,
"namespace": "prometheus"
},
},
"config": {
"datasource": {
"host": "accounts-db",
Expand Down Expand Up @@ -504,11 +455,11 @@ def add_namespace(obj):
namespace=ns,

# Values from Chart's parameters specified hierarchically,
values = {
values={
"serviceMonitor": {
"enabled": True,
"namespace": "prometheus"
},
},
"config": {
"datasource": {
"host": "ledger-db",
Expand Down
26 changes: 6 additions & 20 deletions pulumi/python/kubernetes/certmgr/__main__.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,12 +8,6 @@
from kic_util import pulumi_config


def crd_deployment_manifest():
script_dir = os.path.dirname(os.path.abspath(__file__))
crd_deployment_path = os.path.join(script_dir, 'manifests', 'cert-manager.crds.yaml')
return crd_deployment_path


def project_name_from_project_dir(dirname: str):
script_dir = os.path.dirname(os.path.abspath(__file__))
project_path = os.path.join(script_dir, '..', '..', '..', 'python', 'infrastructure', dirname)
Expand All @@ -40,24 +34,13 @@ def add_namespace(obj):
metadata={'name': 'cert-manager'},
opts=pulumi.ResourceOptions(provider=k8s_provider))

# Config Manifests
crd_deployment = crd_deployment_manifest()

crd_dep = ConfigFile(
'crd-dep',
file=crd_deployment,
transformations=[add_namespace], # Need to review w/ operator
opts=pulumi.ResourceOptions(depends_on=[ns])
)


config = pulumi.Config('certmgr')
chart_name = config.get('chart_name')
if not chart_name:
chart_name = 'cert-manager'
chart_version = config.get('chart_version')
if not chart_version:
chart_version = 'v1.7.0'
chart_version = 'v1.9.1'
helm_repo_name = config.get('certmgr_helm_repo_name')
if not helm_repo_name:
helm_repo_name = 'jetstack'
Expand All @@ -81,6 +64,9 @@ def add_namespace(obj):
),
version=chart_version,
namespace=ns.metadata.name,
values={
"installCRDs": "True"
},
# Configure the timeout value.
timeout=helm_timeout,
# By default Release resource will wait till all created resources
Expand All @@ -96,7 +82,7 @@ def add_namespace(obj):
# Force update if required
force_update=True)

certmgr_release = Release("certmgr", args=certmgr_release_args, opts=pulumi.ResourceOptions(depends_on=crd_dep))
certmgr_release = Release("certmgr", args=certmgr_release_args, opts=pulumi.ResourceOptions(depends_on=ns))

status = certmgr_release.status
pulumi.export("certmgr_status", status)
pulumi.export("certmgr_status", status)
Loading