"Fossies" - the Fresh Open Source Software Archive  

Source code changes of the file "magnum/drivers/common/templates/kubernetes/fragments/enable-auto-healing.sh" between
magnum-8.0.0.tar.gz and magnum-8.1.0.tar.gz

About: OpenStack Magnum makes container orchestration engines such as Docker and Kubernetes available as first class resources in OpenStack.
The "Stein" series (latest release).

enable-auto-healing.sh  (magnum-8.0.0):enable-auto-healing.sh  (magnum-8.1.0)
#!/bin/sh #!/bin/sh
step="enable-auto-healing" step="enable-node-problem-detector"
printf "Starting to run ${step}\n" printf "Starting to run ${step}\n"
. /etc/sysconfig/heat-params . /etc/sysconfig/heat-params
_gcr_prefix=${CONTAINER_INFRA_PREFIX:-k8s.gcr.io/} _gcr_prefix=${CONTAINER_INFRA_PREFIX:-k8s.gcr.io/}
# Generate Node Problem Detector manifest file # Generate Node Problem Detector manifest file
NPD_DEPLOY=/srv/magnum/kubernetes/manifests/npd.yaml NPD_DEPLOY=/srv/magnum/kubernetes/manifests/npd.yaml
[ -f ${NPD_DEPLOY} ] || { [ -f ${NPD_DEPLOY} ] || {
skipping to change at line 71 skipping to change at line 71
version: ${NODE_PROBLEM_DETECTOR_TAG} version: ${NODE_PROBLEM_DETECTOR_TAG}
kubernetes.io/cluster-service: "true" kubernetes.io/cluster-service: "true"
spec: spec:
containers: containers:
- name: node-problem-detector - name: node-problem-detector
image: ${_gcr_prefix}node-problem-detector:${NODE_PROBLEM_DETECTOR_TAG} image: ${_gcr_prefix}node-problem-detector:${NODE_PROBLEM_DETECTOR_TAG}
command: command:
- "/bin/sh" - "/bin/sh"
- "-c" - "-c"
# Pass both config to support both journald and syslog. # Pass both config to support both journald and syslog.
- "exec /node-problem-detector --logtostderr --system-log-monitors=/conf ig/kernel-monitor.json,/config/kernel-monitor-filelog.json,/config/docker-monito r.json,/config/docker-monitor-filelog.json >>/var/log/node-problem-detector.log 2>&1" - "exec /node-problem-detector --logtostderr --system-log-monitors=/conf ig/kernel-monitor.json,/config/kernel-monitor-filelog.json,/config/docker-monito r.json,/config/docker-monitor-filelog.json 2>&1 | tee /var/log/node-problem-dete ctor.log"
securityContext: securityContext:
privileged: true privileged: true
resources: resources:
limits: limits:
cpu: "200m" cpu: "200m"
memory: "100Mi" memory: "100Mi"
requests: requests:
cpu: "20m" cpu: "20m"
memory: "20Mi" memory: "20Mi"
env: env:
skipping to change at line 118 skipping to change at line 118
echo "Waiting for Kubernetes API..." echo "Waiting for Kubernetes API..."
until [ "ok" = "$(curl --silent http://127.0.0.1:8080/healthz)" ] until [ "ok" = "$(curl --silent http://127.0.0.1:8080/healthz)" ]
do do
sleep 5 sleep 5
done done
kubectl apply -f ${NPD_DEPLOY} kubectl apply -f ${NPD_DEPLOY}
printf "Finished running ${step}\n" printf "Finished running ${step}\n"
_docker_draino_prefix=${CONTAINER_INFRA_PREFIX:-docker.io/planetlabs/}
step="enable-auto-healing"
printf "Starting to run ${step}\n"
if [ "$(echo $AUTO_HEALING_ENABLED | tr '[:upper:]' '[:lower:]')" = "true" ]; th
en
# Generate Draino manifest file
DRAINO_DEPLOY=/srv/magnum/kubernetes/manifests/draino.yaml
[ -f ${DRAINO_DEPLOY} ] || {
echo "Writing File: $DRAINO_DEPLOY"
mkdir -p $(dirname ${DRAINO_DEPLOY})
cat << EOF > ${DRAINO_DEPLOY}
---
apiVersion: v1
kind: ServiceAccount
metadata:
labels: {component: draino}
name: draino
namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
labels: {component: draino}
name: draino
rules:
- apiGroups: ['']
resources: [events]
verbs: [create, patch, update]
- apiGroups: ['']
resources: [nodes]
verbs: [get, watch, list, update]
- apiGroups: ['']
resources: [nodes/status]
verbs: [patch]
- apiGroups: ['']
resources: [pods]
verbs: [get, watch, list]
- apiGroups: ['']
resources: [pods/eviction]
verbs: [create]
- apiGroups: [extensions]
resources: [daemonsets]
verbs: [get, watch, list]
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
labels: {component: draino}
name: draino
roleRef: {apiGroup: rbac.authorization.k8s.io, kind: ClusterRole, name: draino}
subjects:
- {kind: ServiceAccount, name: draino, namespace: kube-system}
---
apiVersion: apps/v1
kind: Deployment
metadata:
labels: {component: draino}
name: draino
namespace: kube-system
spec:
# Draino does not currently support locking/master election, so you should
# only run one draino at a time. Draino won't start draining nodes immediately
# so it's usually safe for multiple drainos to exist for a brief period of
# time.
replicas: 1
selector:
matchLabels: {component: draino}
template:
metadata:
labels: {component: draino}
name: draino
namespace: kube-system
spec:
nodeSelector:
node-role.kubernetes.io/master: ""
hostNetwork: true
tolerations:
- effect: NoSchedule
operator: Exists
- key: CriticalAddonsOnly
operator: Exists
- effect: NoExecute
operator: Exists
- key: node.cloudprovider.kubernetes.io/uninitialized
value: "true"
effect: NoSchedule
- key: node-role.kubernetes.io/master
effect: NoSchedule
containers:
# You'll want to change these labels and conditions to suit your deploymen
t.
- command: [/draino, --node-label=draino-enabled=true, --evict-daemonset-p
ods, --evict-emptydir-pods, NotReady]
image: ${_docker_draino_prefix}draino:${DRAINO_TAG}
livenessProbe:
httpGet: {path: /healthz, port: 10002}
initialDelaySeconds: 30
name: draino
serviceAccountName: draino
EOF
}
kubectl apply -f ${DRAINO_DEPLOY}
fi
printf "Finished running ${step}\n"
 End of changes. 3 change blocks. 
2 lines changed or deleted 2 lines changed or added

Home  |  About  |  Features  |  All  |  Newest  |  Dox  |  Diffs  |  RSS Feeds  |  Screenshots  |  Comments  |  Imprint  |  Privacy  |  HTTP(S)