Files
helm-chart-lions-app/values.yaml

311 lines
9.3 KiB
YAML

# ============================================================
# helm-chart-lions-app — Defaults
# ============================================================
# Ces valeurs sont les defaults raisonnables pour une app Quarkus
# sur le cluster Lions. Chaque app override ce qui lui est propre.
# ------------------------------------------------------------
# Image
# ------------------------------------------------------------
image:
registry: registry.lions.dev
repository: lionsdev
# name: <auto> — par défaut = .Release.Name (= nom de l'app)
# name: override-name # si nom d'image != nom release
tag: latest
pullPolicy: IfNotPresent
pullSecrets:
- lionsregistry-secret
# ------------------------------------------------------------
# Replicas
# ------------------------------------------------------------
replicaCount: 1
# ------------------------------------------------------------
# Stratégie de déploiement
# ------------------------------------------------------------
strategy:
type: RollingUpdate
rollingUpdate:
maxSurge: 1
maxUnavailable: 0
# ------------------------------------------------------------
# Resources — conservateur pour un node partagé
# ------------------------------------------------------------
resources:
requests:
cpu: 200m
memory: 256Mi
limits:
cpu: "1"
memory: 512Mi
# ------------------------------------------------------------
# Securité pod & container (OWASP Kubernetes top 10)
# ------------------------------------------------------------
podSecurityContext:
runAsNonRoot: true
runAsUser: 1001
runAsGroup: 1001
fsGroup: 1001
seccompProfile:
type: RuntimeDefault
containerSecurityContext:
allowPrivilegeEscalation: false
readOnlyRootFilesystem: true
runAsNonRoot: true
runAsUser: 1001
runAsGroup: 1001
capabilities:
drop: ["ALL"]
# ------------------------------------------------------------
# Service
# ------------------------------------------------------------
service:
type: ClusterIP
port: 80
targetPort: 8080
protocol: TCP
annotations: {}
# ------------------------------------------------------------
# Conteneur
# ------------------------------------------------------------
container:
port: 8080
# extraArgs: [] # args passés à l'entrypoint
# extraEnv: [] # env additionnels (objets {name, value|valueFrom})
# ------------------------------------------------------------
# Probes (valeurs Quarkus SmallRye Health par défaut)
# ------------------------------------------------------------
probes:
liveness:
enabled: true
httpGet:
path: /q/health/live
port: 8080
initialDelaySeconds: 30
periodSeconds: 30
timeoutSeconds: 5
failureThreshold: 3
readiness:
enabled: true
httpGet:
path: /q/health/ready
port: 8080
initialDelaySeconds: 5
periodSeconds: 10
timeoutSeconds: 5
failureThreshold: 3
startup:
enabled: false
httpGet:
path: /q/health/started
port: 8080
initialDelaySeconds: 10
periodSeconds: 10
failureThreshold: 30
# ------------------------------------------------------------
# Environment variables
# ------------------------------------------------------------
# env: liste d'env vars en clair (non-sensibles uniquement)
env: []
# - name: QUARKUS_PROFILE
# value: prod
# - name: JAVA_OPTS
# value: "-Xms256m -Xmx512m"
# ------------------------------------------------------------
# ConfigMap (env vars non-sensibles, partagées)
# ------------------------------------------------------------
configMap:
enabled: true
# envFrom: true → toutes les clés sont injectées comme env
envFrom: true
data: {}
# QUARKUS_PROFILE: prod
# APP_ENV: production
# KAFKA_BOOTSTRAP_SERVERS: kafka-service.kafka.svc.cluster.local:9092
# ------------------------------------------------------------
# extraEnvFrom — envFrom additionnels vers Secrets/ConfigMaps existants
# ------------------------------------------------------------
# Utile pour référencer des Secrets K8s créés hors Helm (ex: migration
# depuis un déploiement legacy, pré-Vault).
extraEnvFrom: []
# - secretRef:
# name: my-existing-db-secret
# - configMapRef:
# name: my-existing-config
# ------------------------------------------------------------
# ExternalSecret (Vault → K8s Secret via ESO)
# ------------------------------------------------------------
externalSecret:
enabled: false
# Référence au ClusterSecretStore (Vault côté cluster)
secretStoreRef:
kind: ClusterSecretStore
name: vault-backend
refreshInterval: 1h
target:
creationPolicy: Owner
deletionPolicy: Retain
# Mapping : clé du K8s Secret → chemin Vault + propriété
data: []
# - secretKey: QUARKUS_DATASOURCE_USERNAME
# remoteRef:
# key: lions/applications/unionflow-server/db
# property: username
# - secretKey: QUARKUS_DATASOURCE_PASSWORD
# remoteRef:
# key: lions/applications/unionflow-server/db
# property: password
# ------------------------------------------------------------
# Ingress
# ------------------------------------------------------------
ingress:
enabled: true
className: nginx
clusterIssuer: letsencrypt-prod
host: lions.dev
# Deux modes de routing :
# 1. simple : path "/", pathType Prefix, pas de rewrite
# 2. prefix : path "/monchemin(/|$)(.*)", pathType ImplementationSpecific, rewrite-target /$2
path: /
pathType: Prefix
pathPrefix:
enabled: false
# strip: "/myapp" # prefix à stripper (ex: /unionflow)
tls:
enabled: true
# secretName: <auto> = "<release-name>-tls"
rateLimit:
enabled: true
rpm: 1000 # nginx.ingress.kubernetes.io/limit-rpm
connections: 100 # connections simultanées max par IP
cors:
enabled: false
origins: "*"
methods: "GET, POST, PUT, DELETE, OPTIONS, PATCH"
headers: "DNT,Keep-Alive,User-Agent,X-Requested-With,If-Modified-Since,Cache-Control,Content-Type,Authorization"
# Annotations nginx additionnelles
annotations:
nginx.ingress.kubernetes.io/proxy-body-size: "50m"
nginx.ingress.kubernetes.io/proxy-read-timeout: "300"
nginx.ingress.kubernetes.io/proxy-send-timeout: "300"
nginx.ingress.kubernetes.io/proxy-buffer-size: "16k"
nginx.ingress.kubernetes.io/proxy-buffers-number: "4"
nginx.ingress.kubernetes.io/proxy-buffering: "on"
nginx.ingress.kubernetes.io/enable-compression: "true"
nginx.ingress.kubernetes.io/compression-types: "text/plain,text/css,application/json,application/javascript,text/xml,application/xml"
# ------------------------------------------------------------
# NetworkPolicy (zero-trust, default-deny + allow list)
# ------------------------------------------------------------
networkPolicy:
enabled: true
# Ingress autorisés (toujours depuis ingress-nginx)
allowIngressFrom:
- namespaceSelector:
kubernetes.io/metadata.name: ingress-nginx
- namespaceSelector:
kubernetes.io/metadata.name: monitoring # Prometheus scrape
# Egress : DNS + K8s API toujours autorisés
allowEgressDNS: true
allowEgressKubeAPI: true
# Egress spécifique (à override par app)
allowEgressTo: []
# - namespaceSelector:
# kubernetes.io/metadata.name: postgresql
# ports:
# - port: 5432
# protocol: TCP
# - namespaceSelector:
# kubernetes.io/metadata.name: keycloak
# ports:
# - port: 8080
# protocol: TCP
# ------------------------------------------------------------
# PodDisruptionBudget
# ------------------------------------------------------------
pdb:
enabled: false
minAvailable: 1
# ------------------------------------------------------------
# ServiceAccount
# ------------------------------------------------------------
serviceAccount:
create: true
annotations: {}
name: ""
# ------------------------------------------------------------
# ServiceMonitor (Prometheus Operator, si déployé)
# ------------------------------------------------------------
serviceMonitor:
enabled: false
path: /q/metrics
interval: 30s
scrapeTimeout: 10s
labels: {}
# ------------------------------------------------------------
# Horizontal Pod Autoscaler
# ------------------------------------------------------------
hpa:
enabled: false
minReplicas: 1
maxReplicas: 3
targetCPUUtilizationPercentage: 70
targetMemoryUtilizationPercentage: 80
# ------------------------------------------------------------
# Volumes — nécessaire avec readOnlyRootFilesystem=true
# ------------------------------------------------------------
volumes:
tmp:
enabled: true
sizeLimit: 100Mi
logs:
enabled: true
sizeLimit: 500Mi
mountPath: /app/logs
# extra : volumes user-defined (PVC, configMap mounts, etc.)
extra: []
volumeMounts: []
# ------------------------------------------------------------
# Scheduling
# ------------------------------------------------------------
nodeSelector:
kubernetes.io/os: linux
tolerations:
# Autorise scheduling sur control-plane (single-node cluster)
- key: node-role.kubernetes.io/control-plane
operator: Exists
effect: NoSchedule
affinity: {}
# ------------------------------------------------------------
# Pod annotations/labels additionnels
# ------------------------------------------------------------
podAnnotations: {}
podLabels: {}
# Annotations/labels sur le chart lui-même
commonAnnotations: {}
commonLabels: {}