Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[IBCDPE-1004] Move back to celery executor #28

Merged
merged 4 commits into from
Aug 21, 2024
Merged
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
68 changes: 34 additions & 34 deletions modules/apache-airflow/templates/values.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -241,7 +241,7 @@ rbac:

# Airflow executor
# One of: LocalExecutor, LocalKubernetesExecutor, CeleryExecutor, KubernetesExecutor, CeleryKubernetesExecutor
executor: "KubernetesExecutor"
executor: "CeleryExecutor"

# If this is true and using LocalExecutor/KubernetesExecutor/CeleryKubernetesExecutor, the scheduler's
# service account will have access to communicate with the api-server and launch pods.
Expand Down Expand Up @@ -469,7 +469,7 @@ kerberos:
# Airflow Worker Config
workers:
# Number of airflow celery workers in StatefulSet
replicas: 1
replicas: 2
# Max number of old replicasets to retain
revisionHistoryLimit: ~

Expand Down Expand Up @@ -641,16 +641,16 @@ workers:
nodeSelector: {}
runtimeClassName: ~
priorityClassName: ~
affinity: {}
affinity:
# default worker affinity is:
# podAntiAffinity:
# preferredDuringSchedulingIgnoredDuringExecution:
# - podAffinityTerm:
# labelSelector:
# matchLabels:
# component: worker
# topologyKey: kubernetes.io/hostname
# weight: 100
podAntiAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- podAffinityTerm:
labelSelector:
matchLabels:
component: worker
topologyKey: kubernetes.io/hostname
weight: 100
tolerations: []
topologySpreadConstraints: []
# hostAliases to use in worker pods.
Expand Down Expand Up @@ -723,7 +723,7 @@ scheduler:
command: ~
# Airflow 2.0 allows users to run multiple schedulers,
# However this feature is only recommended for MySQL 8+ and Postgres
replicas: 1
replicas: 2
# Max number of old replicasets to retain
revisionHistoryLimit: ~

Expand Down Expand Up @@ -809,16 +809,16 @@ scheduler:

# Select certain nodes for airflow scheduler pods.
nodeSelector: {}
affinity: {}
affinity:
# default scheduler affinity is:
# podAntiAffinity:
# preferredDuringSchedulingIgnoredDuringExecution:
# - podAffinityTerm:
# labelSelector:
# matchLabels:
# component: scheduler
# topologyKey: kubernetes.io/hostname
# weight: 100
podAntiAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- podAffinityTerm:
labelSelector:
matchLabels:
component: scheduler
topologyKey: kubernetes.io/hostname
weight: 100
tolerations: []
topologySpreadConstraints: []

Expand Down Expand Up @@ -1250,7 +1250,7 @@ webserver:
triggerer:
enabled: true
# Number of airflow triggerers in the deployment
replicas: 1
replicas: 2
# Max number of old replicasets to retain
revisionHistoryLimit: ~

Expand Down Expand Up @@ -1351,16 +1351,16 @@ triggerer:

# Select certain nodes for airflow triggerer pods.
nodeSelector: {}
affinity: {}
affinity:
# default triggerer affinity is:
# podAntiAffinity:
# preferredDuringSchedulingIgnoredDuringExecution:
# - podAffinityTerm:
# labelSelector:
# matchLabels:
# component: triggerer
# topologyKey: kubernetes.io/hostname
# weight: 100
podAntiAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- podAffinityTerm:
labelSelector:
matchLabels:
component: triggerer
topologyKey: kubernetes.io/hostname
weight: 100
tolerations: []
topologySpreadConstraints: []

Expand Down Expand Up @@ -1661,7 +1661,7 @@ flower:

# StatsD settings
statsd:
enabled: true
enabled: false
# Max number of old replicasets to retain
revisionHistoryLimit: ~

Expand Down Expand Up @@ -2010,9 +2010,9 @@ limits: []

# This runs as a CronJob to cleanup old pods.
cleanup:
enabled: true
enabled: false
Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This is not needed when we are running celery executor since the worker pods are controlled in a different way than when starting pods for tasks through the kubernetes exector.

# Run every 15 minutes (templated).
schedule: "*/15 * * * *"
schedule: "*/60 * * * *"
# To select a random-ish, deterministic starting minute between 3 and 12 inclusive for each release:
# '{{- add 3 (regexFind ".$" (adler32sum .Release.Name)) -}}-59/15 * * * *'
# To select the last digit of unix epoch time as the starting minute on each deploy:
Expand Down