k8s upgrade to 1.26.0

upgrades kubernetes client to v1.26.0
remove installation of containerd during genesis.sh to prevent containerd downgrade
update bitnami kubectl image to image with curl installed for readiness check

Change-Id: I3afd5a7e7211bae3f52263167a62a012da0619a0
This commit is contained in:
SPEARS, DUSTIN (ds443n) 2022-12-29 16:43:47 -05:00
parent 8ce937a9f7
commit 27a8b0d798
34 changed files with 118 additions and 117 deletions

View File

@ -17,7 +17,7 @@ release_uuid: null
images:
tags:
apiserver: k8s.gcr.io/kube-apiserver-amd64:v1.24.4
apiserver: k8s.gcr.io/kube-apiserver-amd64:v1.26.0
kubernetes_keystone_webhook: docker.io/k8scloudprovider/k8s-keystone-auth:latest
scripted_test: docker.io/openstackhelm/heat:newton
dep_check: quay.io/stackanetes/kubernetes-entrypoint:v0.3.1

View File

@ -58,9 +58,9 @@ const:
images:
tags:
dep_check: quay.io/stackanetes/kubernetes-entrypoint:v0.3.1
anchor: bitnami/kubectl:1.24.4
apiserver: k8s.gcr.io/kube-apiserver-amd64:v1.24.4
key_rotate: bitnami/kubectl:1.24.4
anchor: quay.io/airshipit/porthole-compute-utility:master-ubuntu_focal
apiserver: k8s.gcr.io/kube-apiserver-amd64:v1.26.0
key_rotate: quay.io/airshipit/porthole-compute-utility:master-ubuntu_focal
pull_policy: "IfNotPresent"
local_registry:
active: false

View File

@ -16,8 +16,8 @@ release_group: null
images:
tags:
anchor: bitnami/kubectl:1.24.4
controller_manager: k8s.gcr.io/kube-controller-manager-amd64:v1.24.4
anchor: quay.io/airshipit/porthole-compute-utility:master-ubuntu_focal
controller_manager: k8s.gcr.io/kube-controller-manager-amd64:v1.26.0
pull_policy: "IfNotPresent"
labels:

View File

@ -64,7 +64,7 @@ conf:
images:
tags:
anchor: bitnami/kubectl:1.24.4
anchor: quay.io/airshipit/porthole-compute-utility:master-ubuntu_focal
haproxy: haproxy:1.8.25
test: python:3.6
pull_policy: "IfNotPresent"

View File

@ -68,7 +68,7 @@ pod:
images:
tags:
proxy: k8s.gcr.io/kube-proxy-amd64:v1.24.4
proxy: k8s.gcr.io/kube-proxy-amd64:v1.26.0
pull_policy: "IfNotPresent"
proxy:

View File

@ -85,8 +85,8 @@ secrets:
images:
tags:
anchor: bitnami/kubectl:1.24.4
scheduler: k8s.gcr.io/kube-scheduler-amd64:v1.24.4
anchor: quay.io/airshipit/porthole-compute-utility:master-ubuntu_focal
scheduler: k8s.gcr.io/kube-scheduler-amd64:v1.26.0
pull_policy: "IfNotPresent"
network:

View File

@ -42,10 +42,10 @@ Here is a complete sample document:
images:
armada: quay.io/airshipit/armada:latest
kubernetes:
apiserver: k8s.gcr.io/kube-apiserver-amd64:v1.24.4
controller-manager: k8s.gcr.io/kube-controller-manager-amd64:v1.24.4
apiserver: k8s.gcr.io/kube-apiserver-amd64:v1.26.0
controller-manager: k8s.gcr.io/kube-controller-manager-amd64:v1.26.0
etcd: quay.io/coreos/etcd:v3.5.4
scheduler: k8s.gcr.io/kube-scheduler-amd64:v1.24.4
scheduler: k8s.gcr.io/kube-scheduler-amd64:v1.26.0
files:
- path: /var/lib/anchor/calico-etcd-bootstrap
content: ""

View File

@ -16,13 +16,13 @@ Sample Document to run containers in Docker runtime
data:
files:
- path: /opt/kubernetes/bin/kubelet
tar_url: https://dl.k8s.io/v1.24.4/kubernetes-node-linux-amd64.tar.gz
tar_url: https://dl.k8s.io/v1.26.0/kubernetes-node-linux-amd64.tar.gz
tar_path: kubernetes/node/bin/kubelet
mode: 0555
images:
haproxy: haproxy:1.8.3
helm:
helm: lachlanevenson/k8s-helm:v3.9.4
helm: lachlanevenson/k8s-helm:v3.10.2
monitoring_image: busybox:1.28.3
packages:
repositories:
@ -115,13 +115,13 @@ Sample Document to run containers in Containerd runtime
data:
files:
- path: /opt/kubernetes/bin/kubelet
tar_url: https://dl.k8s.io/v1.24.4/kubernetes-node-linux-amd64.tar.gz
tar_url: https://dl.k8s.io/v1.26.0/kubernetes-node-linux-amd64.tar.gz
tar_path: kubernetes/node/bin/kubelet
mode: 0555
images:
haproxy: haproxy:1.8.3
helm:
helm: lachlanevenson/k8s-helm:v3.9.4
helm: lachlanevenson/k8s-helm:v3.10.2
monitoring_image: busybox:1.28.3
packages:
additional:

View File

@ -46,10 +46,10 @@ data:
images:
armada: quay.io/airshipit/armada:master-ubuntu_bionic
kubernetes:
apiserver: k8s.gcr.io/kube-apiserver-amd64:v1.24.4
controller-manager: k8s.gcr.io/kube-controller-manager-amd64:v1.24.4
apiserver: k8s.gcr.io/kube-apiserver-amd64:v1.26.0
controller-manager: k8s.gcr.io/kube-controller-manager-amd64:v1.26.0
etcd: quay.io/coreos/etcd:v3.5.4
scheduler: k8s.gcr.io/kube-scheduler-amd64:v1.24.4
scheduler: k8s.gcr.io/kube-scheduler-amd64:v1.26.0
files:
- path: /var/lib/anchor/calico-etcd-bootstrap
content: "# placeholder for triggering calico etcd bootstrapping"

View File

@ -13,11 +13,11 @@ data:
enable: true
files:
- path: /opt/kubernetes/bin/kubelet
tar_url: https://dl.k8s.io/v1.24.4/kubernetes-node-linux-amd64.tar.gz
tar_url: https://dl.k8s.io/v1.26.0/kubernetes-node-linux-amd64.tar.gz
tar_path: kubernetes/node/bin/kubelet
mode: 0555
- path: /usr/local/bin/kubectl
tar_url: https://dl.k8s.io/v1.24.4/kubernetes-node-linux-amd64.tar.gz
tar_url: https://dl.k8s.io/v1.26.0/kubernetes-node-linux-amd64.tar.gz
tar_path: kubernetes/node/bin/kubectl
mode: 0555
- path: /etc/systemd/system/kube-cgroup.service

View File

@ -159,7 +159,7 @@ data:
values:
images:
tags:
proxy: k8s.gcr.io/kube-proxy-amd64:v1.24.4
proxy: k8s.gcr.io/kube-proxy-amd64:v1.26.0
network:
kubernetes_netloc: 127.0.0.1:6553
source:
@ -626,7 +626,7 @@ data:
images:
tags:
anchor: bitnami/kubectl:1.24.4
anchor: quay.io/airshipit/porthole-compute-utility:master-ubuntu_focal
haproxy: haproxy:1.8.3
test: python:3.6
@ -734,8 +734,8 @@ data:
endpoints: https://127.0.0.1:2378
images:
tags:
anchor: bitnami/kubectl:1.24.4
apiserver: k8s.gcr.io/kube-apiserver-amd64:v1.24.4
anchor: quay.io/airshipit/porthole-compute-utility:master-ubuntu_focal
apiserver: k8s.gcr.io/kube-apiserver-amd64:v1.26.0
network:
kubernetes_service_ip: 10.96.0.1
pod_cidr: 10.97.0.0/16
@ -800,8 +800,8 @@ data:
values:
images:
tags:
anchor: bitnami/kubectl:1.24.4
controller_manager: k8s.gcr.io/kube-controller-manager-amd64:v1.24.4
anchor: quay.io/airshipit/porthole-compute-utility:master-ubuntu_focal
controller_manager: k8s.gcr.io/kube-controller-manager-amd64:v1.26.0
secrets:
service_account:
private_key: placeholder
@ -874,8 +874,8 @@ data:
images:
tags:
anchor: bitnami/kubectl:1.24.4
scheduler: k8s.gcr.io/kube-scheduler-amd64:v1.24.4
anchor: quay.io/airshipit/porthole-compute-utility:master-ubuntu_focal
scheduler: k8s.gcr.io/kube-scheduler-amd64:v1.26.0
source:
type: local

View File

@ -35,10 +35,10 @@ data:
images:
armada: quay.io/airshipit/armada:master-ubuntu_bionic
kubernetes:
apiserver: k8s.gcr.io/kube-apiserver-amd64:v1.24.4
controller-manager: k8s.gcr.io/kube-controller-manager-amd64:v1.24.4
apiserver: k8s.gcr.io/kube-apiserver-amd64:v1.26.0
controller-manager: k8s.gcr.io/kube-controller-manager-amd64:v1.26.0
etcd: quay.io/coreos/etcd:v3.5.4
scheduler: k8s.gcr.io/kube-scheduler-amd64:v1.24.4
scheduler: k8s.gcr.io/kube-scheduler-amd64:v1.26.0
files:
- path: /var/lib/anchor/calico-etcd-bootstrap
content: "# placeholder for triggering calico etcd bootstrapping"

View File

@ -13,11 +13,11 @@ data:
enable: true
files:
- path: /opt/kubernetes/bin/kubelet
tar_url: https://dl.k8s.io/v1.24.4/kubernetes-node-linux-amd64.tar.gz
tar_url: https://dl.k8s.io/v1.26.0/kubernetes-node-linux-amd64.tar.gz
tar_path: kubernetes/node/bin/kubelet
mode: 0555
- path: /usr/local/bin/kubectl
tar_url: https://dl.k8s.io/v1.24.4/kubernetes-node-linux-amd64.tar.gz
tar_url: https://dl.k8s.io/v1.26.0/kubernetes-node-linux-amd64.tar.gz
tar_path: kubernetes/node/bin/kubectl
mode: 0555
- path: /etc/systemd/system/kube-cgroup.service

View File

@ -200,7 +200,7 @@ data:
values:
images:
tags:
proxy: k8s.gcr.io/kube-proxy-amd64:v1.24.4
proxy: k8s.gcr.io/kube-proxy-amd64:v1.26.0
network:
kubernetes_netloc: 127.0.0.1:6553
source:
@ -642,7 +642,7 @@ data:
images:
tags:
anchor: bitnami/kubectl:1.24.4
anchor: quay.io/airshipit/porthole-compute-utility:master-ubuntu_focal
haproxy: haproxy:1.8.3
test: python:3.6
@ -735,8 +735,8 @@ data:
endpoints: https://127.0.0.1:2378
images:
tags:
anchor: bitnami/kubectl:1.24.4
apiserver: k8s.gcr.io/kube-apiserver-amd64:v1.24.4
anchor: quay.io/airshipit/porthole-compute-utility:master-ubuntu_focal
apiserver: k8s.gcr.io/kube-apiserver-amd64:v1.26.0
secrets:
service_account:
public_key: placeholder
@ -812,8 +812,8 @@ data:
values:
images:
tags:
anchor: bitnami/kubectl:1.24.4
controller_manager: k8s.gcr.io/kube-controller-manager-amd64:v1.24.4
anchor: quay.io/airshipit/porthole-compute-utility:master-ubuntu_focal
controller_manager: k8s.gcr.io/kube-controller-manager-amd64:v1.26.0
secrets:
service_account:
private_key: placeholder
@ -885,8 +885,8 @@ data:
images:
tags:
anchor: bitnami/kubectl:1.24.4
scheduler: k8s.gcr.io/kube-scheduler-amd64:v1.24.4
anchor: quay.io/airshipit/porthole-compute-utility:master-ubuntu_focal
scheduler: k8s.gcr.io/kube-scheduler-amd64:v1.26.0
source:
type: local

View File

@ -46,10 +46,10 @@ data:
images:
armada: quay.io/airshipit/armada:master-ubuntu_bionic
kubernetes:
apiserver: k8s.gcr.io/kube-apiserver-amd64:v1.24.4
controller-manager: k8s.gcr.io/kube-controller-manager-amd64:v1.24.4
apiserver: k8s.gcr.io/kube-apiserver-amd64:v1.26.0
controller-manager: k8s.gcr.io/kube-controller-manager-amd64:v1.26.0
etcd: quay.io/coreos/etcd:v3.5.4
scheduler: k8s.gcr.io/kube-scheduler-amd64:v1.24.4
scheduler: k8s.gcr.io/kube-scheduler-amd64:v1.26.0
files:
- path: /var/lib/anchor/calico-etcd-bootstrap
content: "# placeholder for triggering calico etcd bootstrapping"

View File

@ -13,11 +13,11 @@ data:
enable: true
files:
- path: /opt/kubernetes/bin/kubelet
tar_url: https://dl.k8s.io/v1.24.4/kubernetes-node-linux-amd64.tar.gz
tar_url: https://dl.k8s.io/v1.26.0/kubernetes-node-linux-amd64.tar.gz
tar_path: kubernetes/node/bin/kubelet
mode: 0555
- path: /usr/local/bin/kubectl
tar_url: https://dl.k8s.io/v1.24.4/kubernetes-node-linux-amd64.tar.gz
tar_url: https://dl.k8s.io/v1.26.0/kubernetes-node-linux-amd64.tar.gz
tar_path: kubernetes/node/bin/kubectl
mode: 0555
- path: /etc/systemd/system/kube-cgroup.service
@ -94,7 +94,6 @@ data:
- jq
- chrony
required:
runtime: containerd
socat: socat
genesis:
additional:
@ -103,7 +102,6 @@ data:
- jq
- chrony
required:
runtime: containerd
socat: socat
join:
additional:
@ -112,7 +110,6 @@ data:
- jq
- chrony
required:
runtime: containerd
socat: socat
validation:
pod_logs:

View File

@ -159,7 +159,7 @@ data:
values:
images:
tags:
proxy: k8s.gcr.io/kube-proxy-amd64:v1.24.4
proxy: k8s.gcr.io/kube-proxy-amd64:v1.26.0
network:
kubernetes_netloc: 127.0.0.1:6553
source:
@ -523,7 +523,7 @@ data:
images:
tags:
anchor: bitnami/kubectl:1.24.4
anchor: quay.io/airshipit/porthole-compute-utility:master-ubuntu_focal
haproxy: haproxy:1.8.3
test: python:3.6
@ -632,8 +632,8 @@ data:
endpoints: https://127.0.0.1:2378
images:
tags:
anchor: bitnami/kubectl:1.24.4
apiserver: k8s.gcr.io/kube-apiserver-amd64:v1.24.4
anchor: quay.io/airshipit/porthole-compute-utility:master-ubuntu_focal
apiserver: k8s.gcr.io/kube-apiserver-amd64:v1.26.0
network:
kubernetes_service_ip: 10.96.0.1
pod_cidr: 10.97.0.0/16
@ -698,8 +698,8 @@ data:
values:
images:
tags:
anchor: bitnami/kubectl:1.24.4
controller_manager: k8s.gcr.io/kube-controller-manager-amd64:v1.24.4
anchor: quay.io/airshipit/porthole-compute-utility:master-ubuntu_focal
controller_manager: k8s.gcr.io/kube-controller-manager-amd64:v1.26.0
secrets:
service_account:
private_key: placeholder
@ -772,8 +772,8 @@ data:
images:
tags:
anchor: bitnami/kubectl:1.24.4
scheduler: k8s.gcr.io/kube-scheduler-amd64:v1.24.4
anchor: quay.io/airshipit/porthole-compute-utility:master-ubuntu_focal
scheduler: k8s.gcr.io/kube-scheduler-amd64:v1.26.0
source:
type: local

View File

@ -46,10 +46,10 @@ data:
images:
armada: quay.io/airshipit/armada:master-ubuntu_bionic
kubernetes:
apiserver: k8s.gcr.io/kube-apiserver-amd64:v1.24.4
controller-manager: k8s.gcr.io/kube-controller-manager-amd64:v1.24.4
apiserver: k8s.gcr.io/kube-apiserver-amd64:v1.26.0
controller-manager: k8s.gcr.io/kube-controller-manager-amd64:v1.26.0
etcd: quay.io/coreos/etcd:v3.5.4
scheduler: k8s.gcr.io/kube-scheduler-amd64:v1.24.4
scheduler: k8s.gcr.io/kube-scheduler-amd64:v1.26.0
files:
- path: /var/lib/anchor/calico-etcd-bootstrap
content: "# placeholder for triggering calico etcd bootstrapping"

View File

@ -13,11 +13,11 @@ data:
enable: true
files:
- path: /opt/kubernetes/bin/kubelet
tar_url: https://dl.k8s.io/v1.24.4/kubernetes-node-linux-amd64.tar.gz
tar_url: https://dl.k8s.io/v1.26.0/kubernetes-node-linux-amd64.tar.gz
tar_path: kubernetes/node/bin/kubelet
mode: 0555
- path: /usr/local/bin/kubectl
tar_url: https://dl.k8s.io/v1.24.4/kubernetes-node-linux-amd64.tar.gz
tar_url: https://dl.k8s.io/v1.26.0/kubernetes-node-linux-amd64.tar.gz
tar_path: kubernetes/node/bin/kubectl
mode: 0555
- path: /etc/systemd/system/kube-cgroup.service

View File

@ -159,7 +159,7 @@ data:
values:
images:
tags:
proxy: k8s.gcr.io/kube-proxy-amd64:v1.24.4
proxy: k8s.gcr.io/kube-proxy-amd64:v1.26.0
network:
kubernetes_netloc: 127.0.0.1:6553
source:
@ -529,7 +529,7 @@ data:
images:
tags:
anchor: bitnami/kubectl:1.24.4
anchor: quay.io/airshipit/porthole-compute-utility:master-ubuntu_focal
haproxy: haproxy:1.8.3
test: python:3.6
@ -638,8 +638,8 @@ data:
endpoints: https://127.0.0.1:2378
images:
tags:
anchor: bitnami/kubectl:1.24.4
apiserver: k8s.gcr.io/kube-apiserver-amd64:v1.24.4
anchor: quay.io/airshipit/porthole-compute-utility:master-ubuntu_focal
apiserver: k8s.gcr.io/kube-apiserver-amd64:v1.26.0
network:
kubernetes_service_ip: 10.96.0.1
pod_cidr: 10.97.0.0/16
@ -704,8 +704,8 @@ data:
values:
images:
tags:
anchor: bitnami/kubectl:1.24.4
controller_manager: k8s.gcr.io/kube-controller-manager-amd64:v1.24.4
anchor: quay.io/airshipit/porthole-compute-utility:master-ubuntu_focal
controller_manager: k8s.gcr.io/kube-controller-manager-amd64:v1.26.0
secrets:
service_account:
private_key: placeholder
@ -778,8 +778,8 @@ data:
images:
tags:
anchor: bitnami/kubectl:1.24.4
scheduler: k8s.gcr.io/kube-scheduler-amd64:v1.24.4
anchor: quay.io/airshipit/porthole-compute-utility:master-ubuntu_focal
scheduler: k8s.gcr.io/kube-scheduler-amd64:v1.26.0
source:
type: local

View File

@ -191,7 +191,7 @@ def _fetch_tar_url(url):
# by a timeout.
for attempt in itertools.count():
try:
response = requests.get(url)
response = requests.get(url, timeout=5)
response.raise_for_status()
break
except requests.exceptions.RequestException:

View File

@ -30,7 +30,7 @@ def get_documents(design_ref, ctx=None):
def _get_from_basic_web(design_ref):
return requests.get(design_ref)
return requests.get(design_ref, timeout=5)
def _get_from_deckhand(design_ref, ctx=None):

View File

@ -167,16 +167,17 @@ def _detect_gpg_version():
def _generate_key():
# Ignore bandit false positive:
# B603:subprocess_without_shell_equals_true
# This method takes no input and generates random output.
result = subprocess.run( # nosec
['/usr/bin/openssl', 'rand', '-hex', '48'],
check=True,
env={
'RANDFILE': '/tmp/rnd',
},
stdout=subprocess.PIPE,
)
with tempfile.TemporaryDirectory() as tmp:
# Ignore bandit false positive:
# B603:subprocess_without_shell_equals_true
# This method takes no input and generates random output.
result = subprocess.run( # nosec
['/usr/bin/openssl', 'rand', '-hex', '48'],
check=True,
env={
'RANDFILE': tmp,
},
stdout=subprocess.PIPE,
)
return result.stdout.decode().strip()

View File

@ -31,9 +31,11 @@ class TarBundler:
if tar_info.size > 0:
# Ignore bandit false positive: B303:blacklist
# This is a basic checksum for debugging not a secure hash.
checksum = hashlib.new('md5', usedforsecurity=False)
checksum.update(data_bytes)
LOG.debug( # nosec
'Adding file path=%s size=%s md5=%s', path, tar_info.size,
hashlib.md5(data_bytes).hexdigest())
checksum.hexdigest())
else:
LOG.warning('Zero length file added to path=%s', path)

View File

@ -7,7 +7,7 @@ jsonschema==3.2.0
keystoneauth1==5.1.1
keystonemiddleware==10.2.0
setuptools==67.0.0
kubernetes==24.2.0
kubernetes==26.1.0
oslo.context==5.0.0
oslo.policy==4.0.0
PasteDeploy==3.0.1

View File

@ -45,7 +45,7 @@ jsonschema==3.2.0
keystoneauth1==5.1.1
keystonemiddleware==10.2.0
kombu==5.1.0
kubernetes==24.2.0
kubernetes==26.1.0
Mako==1.2.4
MarkupSafe==2.1.2
mccabe==0.6.1

View File

@ -104,13 +104,13 @@ VALID_DOCS = [
'armada': 'quay.io/airshipit/armada:master-ubuntu_bionic',
'kubernetes': {
'apiserver':
'k8s.gcr.io/kube-apiserver-amd64:v1.24.4',
'k8s.gcr.io/kube-apiserver-amd64:v1.26.0',
'controller-manager':
'k8s.gcr.io/kube-controller-manager-amd64:v1.24.4',
'k8s.gcr.io/kube-controller-manager-amd64:v1.26.0',
'etcd':
'quay.io/coreos/etcd:v3.5.4',
'scheduler':
'k8s.gcr.io/kube-scheduler-amd64:v1.24.4'
'k8s.gcr.io/kube-scheduler-amd64:v1.26.0'
}
},
'ip':
@ -146,7 +146,7 @@ VALID_DOCS = [
'tar_path':
'kubernetes/node/bin/kubelet',
'tar_url':
'https://dl.k8s.io/v1.24.4/kubernetes-node-linux-amd64.tar.gz'
'https://dl.k8s.io/v1.26.0/kubernetes-node-linux-amd64.tar.gz'
},
{
'content':

View File

@ -32,10 +32,10 @@ data:
images:
armada: quay.io/airshipit/armada:master-ubuntu_bionic
kubernetes:
apiserver: k8s.gcr.io/kube-apiserver-amd64:v1.24.4
controller-manager: k8s.gcr.io/kube-controller-manager-amd64:v1.24.4
apiserver: k8s.gcr.io/kube-apiserver-amd64:v1.26.0
controller-manager: k8s.gcr.io/kube-controller-manager-amd64:v1.26.0
etcd: quay.io/coreos/etcd:v3.5.4
scheduler: k8s.gcr.io/kube-scheduler-amd64:v1.24.4
scheduler: k8s.gcr.io/kube-scheduler-amd64:v1.26.0
files:
- path: /var/lib/anchor/calico-etcd-bootstrap
content: "# placeholder for triggering calico etcd bootstrapping"

View File

@ -14,7 +14,7 @@ data:
# attempt to actually run Kubernetes, only to construct the genesis and
# join scripts.
# - path: /opt/kubernetes/bin/kubelet
# tar_url: https://dl.k8s.io/v1.24.4/kubernetes-node-linux-amd64.tar.gz
# tar_url: https://dl.k8s.io/v1.26.0/kubernetes-node-linux-amd64.tar.gz
# tar_path: kubernetes/node/bin/kubelet
# mode: 0555
- path: /etc/logrotate.d/json-logrotate

View File

@ -160,7 +160,7 @@ data:
values:
images:
tags:
proxy: k8s.gcr.io/kube-proxy-amd64:v1.24.4
proxy: k8s.gcr.io/kube-proxy-amd64:v1.26.0
network:
kubernetes_netloc: 127.0.0.1:6553
source:
@ -535,7 +535,7 @@ data:
images:
tags:
anchor: bitnami/kubectl:1.24.4
anchor: quay.io/airshipit/porthole-compute-utility:master-ubuntu_focal
haproxy: haproxy:1.8.3
test: python:3.6
@ -634,8 +634,8 @@ data:
endpoints: https://127.0.0.1:2378
images:
tags:
anchor: bitnami/kubectl:1.24.4
apiserver: k8s.gcr.io/kube-apiserver-amd64:v1.24.4
anchor: quay.io/airshipit/porthole-compute-utility:master-ubuntu_focal
apiserver: k8s.gcr.io/kube-apiserver-amd64:v1.26.0
secrets:
service_account:
public_key: placeholder
@ -711,8 +711,8 @@ data:
values:
images:
tags:
anchor: bitnami/kubectl:1.24.4
controller_manager: k8s.gcr.io/kube-controller-manager-amd64:v1.24.4
anchor: quay.io/airshipit/porthole-compute-utility:master-ubuntu_focal
controller_manager: k8s.gcr.io/kube-controller-manager-amd64:v1.26.0
secrets:
service_account:
private_key: placeholder
@ -784,8 +784,8 @@ data:
images:
tags:
anchor: bitnami/kubectl:1.24.4
scheduler: k8s.gcr.io/kube-scheduler-amd64:v1.24.4
anchor: quay.io/airshipit/porthole-compute-utility:master-ubuntu_focal
scheduler: k8s.gcr.io/kube-scheduler-amd64:v1.26.0
source:
type: local

View File

@ -8,9 +8,9 @@ IMAGE_DEP_CHECK=quay.io/stackanetes/kubernetes-entrypoint:v0.3.1
IMAGE_ETCD=quay.io/coreos/etcd:v3.5.4
IMAGE_HAPROXY=haproxy:1.8.3
IMAGE_HELM=lachlanevenson/k8s-helm:v3.9.4
IMAGE_APISERVER=k8s.gcr.io/kube-apiserver-amd64:v1.24.4
IMAGE_CONTROLLER_MANAGER=k8s.gcr.io/kube-controller-manager-amd64:v1.24.4
IMAGE_SCHEDULER=k8s.gcr.io/kube-scheduler-amd64:v1.24.4
IMAGE_PROXY=k8s.gcr.io/kube-proxy-amd64:v1.24.4
IMAGE_ANCHOR=bitnami/kubectl:1.24.4
KUBELET_URL=https://dl.k8s.io/v1.24.4/kubernetes-node-linux-amd64.tar.gz
IMAGE_APISERVER=k8s.gcr.io/kube-apiserver-amd64:v1.26.0
IMAGE_CONTROLLER_MANAGER=k8s.gcr.io/kube-controller-manager-amd64:v1.26.0
IMAGE_SCHEDULER=k8s.gcr.io/kube-scheduler-amd64:v1.26.0
IMAGE_PROXY=k8s.gcr.io/kube-proxy-amd64:v1.26.0
IMAGE_ANCHOR=quay.io/airshipit/porthole-compute-utility:master-ubuntu_focal
KUBELET_URL=https://dl.k8s.io/v1.26.0/kubernetes-node-linux-amd64.tar.gz

View File

@ -1,10 +1,10 @@
# source_name, tag, cache_name
coredns/coredns,1.9.4,coredns
bitnami/kubectl,1.24.4,kubectl
k8s.gcr.io/kube-apiserver-amd64,v1.24.4,apiserver
k8s.gcr.io/kube-controller-manager-amd64,v1.24.4,controller-manager
k8s.gcr.io/kube-scheduler-amd64,v1.24.4,scheduler
k8s.gcr.io/kube-proxy-amd64,v1.24.4,proxy
quay.io/airshipit/porthole-compute-utility,master-ubuntu_focal,kubectl
k8s.gcr.io/kube-apiserver-amd64,v1.26.0,apiserver
k8s.gcr.io/kube-controller-manager-amd64,v1.26.0,controller-manager
k8s.gcr.io/kube-scheduler-amd64,v1.26.0,scheduler
k8s.gcr.io/kube-proxy-amd64,v1.26.0,proxy
lachlanevenson/k8s-helm,v3.9.4,helm
quay.io/airshipit/armada,master,armada
quay.io/calico/cni,v3.4.0,calico-cni

View File

@ -23,6 +23,7 @@ sudo apt-get install -q -y --no-install-recommends --allow-downgrades \
apt-transport-https \
build-essential \
ca-certificates \
apt-utils \
curl \
fio \
genisoimage \

View File

@ -25,7 +25,7 @@ commands =
deps =
-r{toxinidir}/test-requirements.txt
commands =
bandit -r promenade
bandit --skip B324 -r promenade
[testenv:docs]
pass_env = {[pkgenv]pass_env}
@ -64,7 +64,7 @@ deps =
commands =
yapf -rd {toxinidir}/promenade {toxinidir}/tests {toxinidir}/tools/image_tags.py
flake8 {toxinidir}/promenade
bandit -r promenade
bandit --skip B324 -r promenade
[flake8]
# [H106] Don't put vim configuration in source files.