chore: initialize project
Some checks failed
Deploy monie-landing (kaniko) / build-and-deploy (push) Failing after 9m46s
Some checks failed
Deploy monie-landing (kaniko) / build-and-deploy (push) Failing after 9m46s
This commit is contained in:
185
.gitea/workflows/deploy-dev.yml
Normal file
185
.gitea/workflows/deploy-dev.yml
Normal file
@@ -0,0 +1,185 @@
|
||||
name: Deploy monie-landing to dev (kaniko)
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [ develop ]
|
||||
|
||||
jobs:
|
||||
build-and-deploy:
|
||||
runs-on: [self-hosted, linux, k8s]
|
||||
|
||||
env:
|
||||
CI_NS: ci
|
||||
APP_NS: dev
|
||||
|
||||
# Kaniko job runs inside cluster pods and can reach registry via node IP.
|
||||
PUSH_REGISTRY: 192.168.1.250:32000
|
||||
# Runtime pull should use the endpoint configured in MicroK8s containerd.
|
||||
DEPLOY_REGISTRY: localhost:32000
|
||||
IMAGE: monie-landing
|
||||
|
||||
DEPLOYMENT: monie-landing
|
||||
CONTAINER: monie-landing
|
||||
|
||||
# repo без кредов (креды берём из secret внутри Kaniko Job)
|
||||
REPO_HOST: git.denjs.ru
|
||||
REPO_PATH: monie/monie-landing.git
|
||||
|
||||
steps:
|
||||
- name: Debug
|
||||
run: |
|
||||
set -eu
|
||||
echo "sha=${{ github.sha }}"
|
||||
echo "ref=${{ github.ref_name }}"
|
||||
echo "repo=git://${REPO_HOST}/${REPO_PATH}"
|
||||
microk8s kubectl version --client=true
|
||||
|
||||
- name: Build & push with Kaniko (K8s Job)
|
||||
env:
|
||||
SHA: ${{ github.sha }}
|
||||
REF: ${{ github.ref_name }}
|
||||
run: |
|
||||
set -euo pipefail
|
||||
|
||||
JOB="kaniko-${SHA}"
|
||||
DEST="${PUSH_REGISTRY}/${IMAGE}:${SHA}"
|
||||
|
||||
echo "JOB=${JOB}"
|
||||
echo "DEST=${DEST}"
|
||||
echo "REF=${REF}"
|
||||
echo "REPO=git://${REPO_HOST}/${REPO_PATH}"
|
||||
|
||||
microk8s kubectl -n "${CI_NS}" delete job "${JOB}" --ignore-not-found=true
|
||||
|
||||
cat <<EOF_JOB | microk8s kubectl -n "${CI_NS}" apply -f -
|
||||
apiVersion: batch/v1
|
||||
kind: Job
|
||||
metadata:
|
||||
name: ${JOB}
|
||||
labels:
|
||||
app: kaniko
|
||||
spec:
|
||||
backoffLimit: 0
|
||||
template:
|
||||
spec:
|
||||
restartPolicy: Never
|
||||
containers:
|
||||
- name: kaniko
|
||||
image: gcr.io/kaniko-project/executor:latest
|
||||
env:
|
||||
- name: GIT_USERNAME
|
||||
value: denis
|
||||
- name: GIT_PASSWORD
|
||||
valueFrom:
|
||||
secretKeyRef:
|
||||
name: gitea-git-token
|
||||
key: token
|
||||
args:
|
||||
- --context=git://${REPO_HOST}/${REPO_PATH}#refs/heads/${REF}
|
||||
- --dockerfile=Dockerfile
|
||||
- --destination=${DEST}
|
||||
- --verbosity=debug
|
||||
- --cache=true
|
||||
- --cache-repo=${PUSH_REGISTRY}/${IMAGE}-cache
|
||||
- --insecure-registry=${PUSH_REGISTRY}
|
||||
- --skip-tls-verify-registry=${PUSH_REGISTRY}
|
||||
ttlSecondsAfterFinished: 3600
|
||||
EOF_JOB
|
||||
|
||||
DEADLINE_SECONDS=1800
|
||||
START_TS="$(date +%s)"
|
||||
OK=1
|
||||
|
||||
while true; do
|
||||
SUCCEEDED="$(microk8s kubectl -n "${CI_NS}" get job "${JOB}" -o jsonpath='{.status.succeeded}' 2>/dev/null || true)"
|
||||
FAILED="$(microk8s kubectl -n "${CI_NS}" get job "${JOB}" -o jsonpath='{.status.failed}' 2>/dev/null || true)"
|
||||
|
||||
SUCCEEDED="${SUCCEEDED:-0}"
|
||||
FAILED="${FAILED:-0}"
|
||||
|
||||
if [ "${SUCCEEDED}" -ge 1 ]; then
|
||||
OK=0
|
||||
break
|
||||
fi
|
||||
|
||||
if [ "${FAILED}" -ge 1 ]; then
|
||||
OK=1
|
||||
break
|
||||
fi
|
||||
|
||||
NOW_TS="$(date +%s)"
|
||||
if [ $((NOW_TS - START_TS)) -ge "${DEADLINE_SECONDS}" ]; then
|
||||
OK=2
|
||||
break
|
||||
fi
|
||||
|
||||
sleep 5
|
||||
done
|
||||
|
||||
echo "[ci] job status:"
|
||||
microk8s kubectl -n "${CI_NS}" get job "${JOB}" -o wide || true
|
||||
|
||||
echo "[ci] job logs (tail):"
|
||||
microk8s kubectl -n "${CI_NS}" logs "job/${JOB}" --tail=300 || true
|
||||
|
||||
if [ "${OK}" -ne 0 ]; then
|
||||
echo "[ci] job did not reach Complete; describing job/pods for debug"
|
||||
microk8s kubectl -n "${CI_NS}" describe job "${JOB}" || true
|
||||
microk8s kubectl -n "${CI_NS}" get pods -l job-name="${JOB}" -o wide || true
|
||||
microk8s kubectl -n "${CI_NS}" describe pod -l job-name="${JOB}" || true
|
||||
exit 1
|
||||
fi
|
||||
|
||||
- name: Deploy to dev
|
||||
env:
|
||||
SHA: ${{ github.sha }}
|
||||
run: |
|
||||
set -euo pipefail
|
||||
TARGET_IMAGE="${DEPLOY_REGISTRY}/${IMAGE}:${SHA}"
|
||||
|
||||
microk8s kubectl -n "${APP_NS}" set image "deployment/${DEPLOYMENT}" \
|
||||
"${CONTAINER}=${TARGET_IMAGE}"
|
||||
|
||||
set +e
|
||||
microk8s kubectl -n "${APP_NS}" rollout status "deployment/${DEPLOYMENT}" --timeout=15m
|
||||
ROLLOUT_RC=$?
|
||||
set -e
|
||||
|
||||
if [ "${ROLLOUT_RC}" -ne 0 ]; then
|
||||
echo "[deploy] rollout did not complete in time; collecting diagnostics"
|
||||
|
||||
SELECTOR="$(microk8s kubectl -n "${APP_NS}" get deployment "${DEPLOYMENT}" \
|
||||
-o jsonpath='{range $k,$v := .spec.selector.matchLabels}{$k}={$v},{end}' 2>/dev/null || true)"
|
||||
SELECTOR="${SELECTOR%,}"
|
||||
|
||||
microk8s kubectl -n "${APP_NS}" get deployment "${DEPLOYMENT}" -o wide || true
|
||||
microk8s kubectl -n "${APP_NS}" describe deployment "${DEPLOYMENT}" || true
|
||||
|
||||
if [ -n "${SELECTOR}" ]; then
|
||||
microk8s kubectl -n "${APP_NS}" get rs -l "${SELECTOR}" -o wide || true
|
||||
microk8s kubectl -n "${APP_NS}" get pods -l "${SELECTOR}" -o wide || true
|
||||
microk8s kubectl -n "${APP_NS}" describe pods -l "${SELECTOR}" || true
|
||||
fi
|
||||
|
||||
microk8s kubectl -n "${APP_NS}" get events --sort-by=.lastTimestamp | tail -n 100 || true
|
||||
|
||||
DESIRED="$(microk8s kubectl -n "${APP_NS}" get deployment "${DEPLOYMENT}" \
|
||||
-o jsonpath='{.spec.replicas}' 2>/dev/null || true)"
|
||||
UPDATED="$(microk8s kubectl -n "${APP_NS}" get deployment "${DEPLOYMENT}" \
|
||||
-o jsonpath='{.status.updatedReplicas}' 2>/dev/null || true)"
|
||||
AVAILABLE="$(microk8s kubectl -n "${APP_NS}" get deployment "${DEPLOYMENT}" \
|
||||
-o jsonpath='{.status.availableReplicas}' 2>/dev/null || true)"
|
||||
|
||||
DESIRED="${DESIRED:-0}"
|
||||
UPDATED="${UPDATED:-0}"
|
||||
AVAILABLE="${AVAILABLE:-0}"
|
||||
|
||||
echo "[deploy] desired=${DESIRED} updated=${UPDATED} available=${AVAILABLE}"
|
||||
|
||||
if [ "${UPDATED}" -ge "${DESIRED}" ] && [ "${AVAILABLE}" -ge "${DESIRED}" ]; then
|
||||
echo "[deploy] New replica is healthy; old replica termination is delayed. Continuing."
|
||||
exit 0
|
||||
fi
|
||||
|
||||
exit "${ROLLOUT_RC}"
|
||||
fi
|
||||
174
.gitea/workflows/deploy-prod.yml
Normal file
174
.gitea/workflows/deploy-prod.yml
Normal file
@@ -0,0 +1,174 @@
|
||||
# .gitea/workflows/deploy-prod.yml
|
||||
name: Deploy monie-landing (kaniko)
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [ main ]
|
||||
|
||||
jobs:
|
||||
build-and-deploy:
|
||||
runs-on: [self-hosted, linux, k8s]
|
||||
|
||||
env:
|
||||
CI_NS: ci
|
||||
APP_NS: prod
|
||||
|
||||
# Kaniko job runs inside cluster pods and can reach registry via node IP.
|
||||
PUSH_REGISTRY: 192.168.1.250:32000
|
||||
# Runtime pull should use the endpoint configured in MicroK8s containerd.
|
||||
DEPLOY_REGISTRY: localhost:32000
|
||||
IMAGE: monie-landing
|
||||
|
||||
DEPLOYMENT: monie-landing
|
||||
CONTAINER: monie-landing
|
||||
|
||||
# repo без кредов (креды берём из secret внутри Kaniko Job)
|
||||
REPO_HOST: git.denjs.ru
|
||||
REPO_PATH: monie/monie-landing.git
|
||||
|
||||
steps:
|
||||
- name: Build & push image with Kaniko (K8s Job)
|
||||
env:
|
||||
SHA: ${{ github.sha }}
|
||||
REF: ${{ github.ref_name }}
|
||||
run: |
|
||||
set -euo pipefail
|
||||
|
||||
JOB="kaniko-${SHA}"
|
||||
DEST="${PUSH_REGISTRY}/${IMAGE}:${SHA}"
|
||||
|
||||
kubectl -n "${CI_NS}" delete job "${JOB}" --ignore-not-found=true
|
||||
|
||||
cat <<EOF_JOB | kubectl -n "${CI_NS}" apply -f -
|
||||
apiVersion: batch/v1
|
||||
kind: Job
|
||||
metadata:
|
||||
name: ${JOB}
|
||||
spec:
|
||||
backoffLimit: 0
|
||||
activeDeadlineSeconds: 1800
|
||||
ttlSecondsAfterFinished: 3600
|
||||
template:
|
||||
spec:
|
||||
restartPolicy: Never
|
||||
containers:
|
||||
- name: kaniko
|
||||
image: gcr.io/kaniko-project/executor:latest
|
||||
imagePullPolicy: IfNotPresent
|
||||
env:
|
||||
- name: GIT_USERNAME
|
||||
value: denis
|
||||
- name: GIT_PASSWORD
|
||||
valueFrom:
|
||||
secretKeyRef:
|
||||
name: gitea-git-token
|
||||
key: token
|
||||
args:
|
||||
- --dockerfile=Dockerfile
|
||||
- --context=git://${REPO_HOST}/${REPO_PATH}#refs/heads/${REF}
|
||||
- --destination=${DEST}
|
||||
- --verbosity=debug
|
||||
- --cache=true
|
||||
- --cache-repo=${PUSH_REGISTRY}/${IMAGE}-cache
|
||||
- --insecure-registry=${PUSH_REGISTRY}
|
||||
- --skip-tls-verify-registry=${PUSH_REGISTRY}
|
||||
EOF_JOB
|
||||
|
||||
# ждём terminal state job и не висим 30 минут при явном Failed
|
||||
DEADLINE_SECONDS=1800
|
||||
START_TS="$(date +%s)"
|
||||
OK=1
|
||||
|
||||
while true; do
|
||||
SUCCEEDED="$(kubectl -n "${CI_NS}" get job "${JOB}" -o jsonpath='{.status.succeeded}' 2>/dev/null || true)"
|
||||
FAILED="$(kubectl -n "${CI_NS}" get job "${JOB}" -o jsonpath='{.status.failed}' 2>/dev/null || true)"
|
||||
|
||||
SUCCEEDED="${SUCCEEDED:-0}"
|
||||
FAILED="${FAILED:-0}"
|
||||
|
||||
if [ "${SUCCEEDED}" -ge 1 ]; then
|
||||
OK=0
|
||||
break
|
||||
fi
|
||||
|
||||
if [ "${FAILED}" -ge 1 ]; then
|
||||
OK=1
|
||||
break
|
||||
fi
|
||||
|
||||
NOW_TS="$(date +%s)"
|
||||
if [ $((NOW_TS - START_TS)) -ge "${DEADLINE_SECONDS}" ]; then
|
||||
OK=2
|
||||
break
|
||||
fi
|
||||
|
||||
sleep 5
|
||||
done
|
||||
|
||||
echo "[ci] job status:"
|
||||
kubectl -n "${CI_NS}" get job "${JOB}" -o wide || true
|
||||
|
||||
echo "[ci] job logs (tail):"
|
||||
kubectl -n "${CI_NS}" logs "job/${JOB}" --tail=300 || true
|
||||
|
||||
if [ "${OK}" -ne 0 ]; then
|
||||
echo "[ci] job did not reach Complete; describing job/pods for debug"
|
||||
kubectl -n "${CI_NS}" describe job "${JOB}" || true
|
||||
kubectl -n "${CI_NS}" get pods -l job-name="${JOB}" -o wide || true
|
||||
kubectl -n "${CI_NS}" describe pod -l job-name="${JOB}" || true
|
||||
exit 1
|
||||
fi
|
||||
|
||||
- name: Deploy to prod
|
||||
env:
|
||||
SHA: ${{ github.sha }}
|
||||
run: |
|
||||
set -euo pipefail
|
||||
TARGET_IMAGE="${DEPLOY_REGISTRY}/${IMAGE}:${SHA}"
|
||||
|
||||
kubectl -n "${APP_NS}" set image "deployment/${DEPLOYMENT}" \
|
||||
"${CONTAINER}=${TARGET_IMAGE}"
|
||||
|
||||
set +e
|
||||
kubectl -n "${APP_NS}" rollout status "deployment/${DEPLOYMENT}" --timeout=15m
|
||||
ROLLOUT_RC=$?
|
||||
set -e
|
||||
|
||||
if [ "${ROLLOUT_RC}" -ne 0 ]; then
|
||||
echo "[deploy] rollout did not complete in time; collecting diagnostics"
|
||||
|
||||
SELECTOR="$(kubectl -n "${APP_NS}" get deployment "${DEPLOYMENT}" \
|
||||
-o jsonpath='{range $k,$v := .spec.selector.matchLabels}{$k}={$v},{end}' 2>/dev/null || true)"
|
||||
SELECTOR="${SELECTOR%,}"
|
||||
|
||||
kubectl -n "${APP_NS}" get deployment "${DEPLOYMENT}" -o wide || true
|
||||
kubectl -n "${APP_NS}" describe deployment "${DEPLOYMENT}" || true
|
||||
|
||||
if [ -n "${SELECTOR}" ]; then
|
||||
kubectl -n "${APP_NS}" get rs -l "${SELECTOR}" -o wide || true
|
||||
kubectl -n "${APP_NS}" get pods -l "${SELECTOR}" -o wide || true
|
||||
kubectl -n "${APP_NS}" describe pods -l "${SELECTOR}" || true
|
||||
fi
|
||||
|
||||
kubectl -n "${APP_NS}" get events --sort-by=.lastTimestamp | tail -n 100 || true
|
||||
|
||||
DESIRED="$(kubectl -n "${APP_NS}" get deployment "${DEPLOYMENT}" \
|
||||
-o jsonpath='{.spec.replicas}' 2>/dev/null || true)"
|
||||
UPDATED="$(kubectl -n "${APP_NS}" get deployment "${DEPLOYMENT}" \
|
||||
-o jsonpath='{.status.updatedReplicas}' 2>/dev/null || true)"
|
||||
AVAILABLE="$(kubectl -n "${APP_NS}" get deployment "${DEPLOYMENT}" \
|
||||
-o jsonpath='{.status.availableReplicas}' 2>/dev/null || true)"
|
||||
|
||||
DESIRED="${DESIRED:-0}"
|
||||
UPDATED="${UPDATED:-0}"
|
||||
AVAILABLE="${AVAILABLE:-0}"
|
||||
|
||||
echo "[deploy] desired=${DESIRED} updated=${UPDATED} available=${AVAILABLE}"
|
||||
|
||||
if [ "${UPDATED}" -ge "${DESIRED}" ] && [ "${AVAILABLE}" -ge "${DESIRED}" ]; then
|
||||
echo "[deploy] New replica is healthy; old replica termination is delayed. Continuing."
|
||||
exit 0
|
||||
fi
|
||||
|
||||
exit "${ROLLOUT_RC}"
|
||||
fi
|
||||
Reference in New Issue
Block a user