-
Notifications
You must be signed in to change notification settings - Fork 5
Expand file tree
/
Copy pathraycluster-template-autoscaler.yaml
More file actions
195 lines (194 loc) · 7.97 KB
/
raycluster-template-autoscaler.yaml
File metadata and controls
195 lines (194 loc) · 7.97 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
apiVersion: ray.io/v1
kind: RayCluster
metadata:
name: raycluster-autoscaler
labels:
controller-tools.k8s.io: "1.0"
spec:
# The version of Ray you are using. Make sure all Ray containers are running this version of Ray.
rayVersion: '2.9.0'
# If `enableInTreeAutoscaling` is true, the Autoscaler sidecar will be added to the Ray head pod.
# Ray Autoscaler integration is Beta with KubeRay >= 0.3.0 and Ray >= 2.0.0.
enableInTreeAutoscaling: true
# `autoscalerOptions` is an OPTIONAL field specifying configuration overrides for the Ray Autoscaler.
# The example configuration shown below represents the DEFAULT values.
# (You may delete autoscalerOptions if the defaults are suitable.)
autoscalerOptions:
# `upscalingMode` is "Default" or "Aggressive."
# Conservative: Upscaling is rate-limited; the number of pending worker pods is at most the size of the Ray cluster.
# Default: Upscaling is not rate-limited.
# Aggressive: An alias for Default; upscaling is not rate-limited.
upscalingMode: Default
# `idleTimeoutSeconds` is the number of seconds to wait before scaling down a worker pod which is not using Ray resources.
idleTimeoutSeconds: 60
# `image` optionally overrides the Autoscaler's container image. The Autoscaler uses the same image as the Ray container by default.
## image: "my-repo/my-custom-autoscaler-image:tag"
# `imagePullPolicy` optionally overrides the Autoscaler container's default image pull policy (IfNotPresent).
imagePullPolicy: IfNotPresent
# Optionally specify the Autoscaler container's securityContext.
securityContext: {}
env: []
envFrom: []
# resources specifies optional resource request and limit overrides for the Autoscaler container.
# The default Autoscaler resource limits and requests should be sufficient for production use-cases.
# However, for large Ray clusters, we recommend monitoring container resource usage to determine if overriding the defaults is required.
resources:
limits:
cpu: "500m"
memory: "512Mi"
requests:
cpu: "500m"
memory: "512Mi"
# Ray head pod template
headGroupSpec:
# The `rayStartParams` are used to configure the `ray start` command.
# See https://github.com/ray-project/kuberay/blob/master/docs/guidance/rayStartParams.md for the default settings of `rayStartParams` in KubeRay.
# See https://docs.ray.io/en/latest/cluster/cli.html#ray-start for all available options in `rayStartParams`.
rayStartParams:
dashboard-host: '0.0.0.0'
#pod template
template:
spec:
#nodeSelector:
#node.kubernetes.io/instance-type: "ml.m5.2xlarge"
securityContext:
runAsUser: 0
runAsGroup: 0
fsGroup: 0
containers:
- name: ray-head
image: ${REGISTRY}aws-ray-custom:latest ## IMAGE: Here you may choose which image your head pod will run
env: ## ENV: Here is where you can send stuff to the head pod
- name: RAY_GRAFANA_IFRAME_HOST ## PROMETHEUS AND GRAFANA
value: http://localhost:3000
- name: RAY_GRAFANA_HOST
value: http://prometheus-grafana.prometheus-system.svc:80
- name: RAY_PROMETHEUS_HOST
value: http://prometheus-kube-prometheus-prometheus.prometheus-system.svc:9090
# - name: AWS_ACCESS_KEY_ID ## after running ./deploy/kubectl-secrets/kubectl-secret-keys.sh
# valueFrom: ## if you need your code to access other private S3 buckets
# secretKeyRef:
# name: aws-creds
# key: AWS_ACCESS_KEY_ID
# - name: AWS_SECRET_ACCESS_KEY
# valueFrom:
# secretKeyRef:
# name: aws-creds
# key: AWS_SECRET_ACCESS_KEY
# - name: AWS_SESSION_TOKEN
# valueFrom:
# secretKeyRef:
# name: aws-creds
# key: AWS_SESSION_TOKEN
lifecycle:
preStop:
exec:
command: ["/bin/sh","-c","ray stop"]
resources:
limits: ## LIMITS: Set resource limits for your head pod
cpu: 1
memory: 8Gi
requests: ## REQUESTS: Set resource requests for your head pod
cpu: 1
memory: 8Gi
ports:
- containerPort: 6379
name: gcs-server
- containerPort: 8265 # Ray dashboard
name: dashboard
- containerPort: 10001
name: client
- containerPort: 8000
name: serve
volumeMounts: ## VOLUMEMOUNTS: Mount your S3 CSI EKS Add-On to head pod
- name: s3-storage
mountPath: /s3
- name: fsx-storage
mountPath: /fsx
- name: ray-logs
mountPath: /tmp/ray
volumes:
- name: ray-logs
emptyDir: {}
- name: s3-storage
persistentVolumeClaim:
claimName: s3-claim
- name: fsx-storage
persistentVolumeClaim:
claimName: fsx-claim
workerGroupSpecs:
# the pod replicas in this group typed worker
- replicas: 4 ## REPLICAS: How many worker pods you want
minReplicas: 1
maxReplicas: 10
# logical group name, for this called small-group, also can be functional
groupName: gpu-group
rayStartParams:
num-gpus: "1"
#pod template
template:
spec:
#nodeSelector:
#node.kubernetes.io/instance-type: "ml.g5.8xlarge"
securityContext:
runAsUser: 0
runAsGroup: 0
fsGroup: 0
containers:
- name: ray-worker
image: ${REGISTRY}aws-ray-custom:latest ## IMAGE: Here you may choose which image your head node will run
lifecycle:
preStop:
exec:
command: ["/bin/sh","-c","ray stop"]
resources:
limits: ## LIMITS: Set resource limits for your worker pods
cpu: 4
memory: 24Gi
nvidia.com/gpu: 1
requests: ## REQUESTS: Set resource requests for your worker pods
cpu: 4
memory: 24Gi
nvidia.com/gpu: 1
volumeMounts: ## VOLUMEMOUNTS: Mount your S3 CSI EKS Add-On to worker pods
- name: s3-storage
mountPath: /s3
- name: ray-logs
mountPath: /tmp/ray
- name: fsx-storage
mountPath: /fsx
# Please add the following taints to the GPU node.
# tolerations: ## TOLERATIONS: These would be the taints set on your node groups
# - key: "ray.io/node-type" ## in this case, these taints are set on my worker node group
# operator: "Equal" ## if no taints, leave blank or delete "tolerations"
# value: "worker"
# effect: "NoSchedule"
volumes:
- name: s3-storage
persistentVolumeClaim:
claimName: s3-claim
- name: fsx-storage
persistentVolumeClaim:
claimName: fsx-claim
- name: ray-logs
emptyDir: {}
---
# UNCOMMENT BELOW IF YOU HAVE DEPLOYED PROMETHEUS
#apiVersion: monitoring.coreos.com/v1
#kind: PodMonitor
#metadata:
# name: ray-workers-monitor
# namespace: prometheus-system
# labels:
# release: prometheus
# ray.io/cluster: rayml # $RAY_CLUSTER_NAME: "kubectl get rayclusters.ray.io"
#spec:
# jobLabel: ray-workers
# namespaceSelector:
# matchNames:
# - default
# selector:
# matchLabels:
# ray.io/node-type: worker
# podMetricsEndpoints:
# - port: metrics