-
Notifications
You must be signed in to change notification settings - Fork 3
Expand file tree
/
Copy pathconfig.yaml
More file actions
170 lines (162 loc) · 5.45 KB
/
config.yaml
File metadata and controls
170 lines (162 loc) · 5.45 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
repo:
url: https://github.com/InfiniTensor/InfiniOps.git
branch: master
github:
status_context_prefix: "ci/infiniops"
# Uncomment and replace the URLs below with actual host IPs to dispatch jobs to remote
# machines via `agent.py run`. Required on the trigger machine when each platform's
# agent runs on a separate host. See the README for multi-machine deployment details.
# agents:
# nvidia:
# url: http://nvidia-host:8080
# iluvatar:
# url: http://iluvatar-host:8080
# metax:
# url: http://metax-host:8080
# moore:
# url: http://moore-host:8080
# cambricon:
# url: http://cambricon-host:8080
platforms:
nvidia:
image:
dockerfile: .ci/images/nvidia/
build_args:
BASE_IMAGE: nvcr.io/nvidia/pytorch:24.10-py3
setup: pip install .[dev] --no-build-isolation
jobs:
gpu:
resources:
ngpus: 1 # Scheduler auto-picks this many free GPUs
memory: 32GB
shm_size: 16g # Prevent PyTorch default 64MB shared memory limit
timeout: 3600
# env: # Uncomment to inject extra env vars into the container.
# MY_VAR: value
stages:
- name: test
run: pytest tests/ -n 8 -v --tb=short --junitxml=/workspace/results/test-results.xml
iluvatar:
image:
dockerfile: .ci/images/iluvatar/
build_args:
BASE_IMAGE: corex:qs_pj20250825
APT_MIRROR: http://archive.ubuntu.com/ubuntu
PIP_INDEX_URL: https://pypi.org/simple
docker_args:
- "--privileged"
- "--cap-add=ALL"
- "--pid=host"
- "--ipc=host"
volumes:
- /dev:/dev
- /lib/firmware:/lib/firmware
- /usr/src:/usr/src
- /lib/modules:/lib/modules
setup: pip install .[dev] --no-build-isolation
jobs:
gpu:
resources:
gpu_ids: "0" # GPU visibility via CUDA_VISIBLE_DEVICES
gpu_style: none # CoreX: passthrough via --privileged + /dev mount
memory: 32GB
shm_size: 16g
timeout: 3600
stages:
- name: test
run: pytest tests/ -n 8 -v --tb=short --junitxml=/workspace/results/test-results.xml
metax:
image:
dockerfile: .ci/images/metax/
build_args:
BASE_IMAGE: cr.metax-tech.com/public-library/maca-pytorch:3.2.1.4-torch2.4-py310-ubuntu22.04-amd64
APT_MIRROR: http://archive.ubuntu.com/ubuntu
PIP_INDEX_URL: https://pypi.org/simple
docker_args:
- "--privileged"
- "--ulimit=memlock=-1"
- "--ulimit=stack=67108864"
setup: pip install .[dev] --no-build-isolation
jobs:
gpu:
resources:
gpu_ids: "0"
gpu_style: none # MetaX: passthrough via --privileged, no CUDA_VISIBLE_DEVICES
memory: 32GB
shm_size: 16g
timeout: 3600
stages:
- name: test
run: pytest tests/ -n 4 -v --tb=short --junitxml=/workspace/results/test-results.xml
moore:
image:
dockerfile: .ci/images/moore/
build_args:
BASE_IMAGE: sh-harbor.mthreads.com/mcctest/vllm_musa:20251112_hygon
APT_MIRROR: http://archive.ubuntu.com/ubuntu
PIP_INDEX_URL: https://pypi.org/simple
docker_args:
- "--privileged"
setup: pip install .[dev] --no-build-isolation
jobs:
gpu:
resources:
gpu_ids: "0"
gpu_style: none # Moore: passthrough via --privileged, MTHREADS_VISIBLE_DEVICES set by base image
memory: 32GB
shm_size: 16g
timeout: 3600
stages:
- name: test
run: pytest tests/test_add.py tests/test_gemm.py tests/test_swiglu.py -n 4 -v --tb=short --junitxml=/workspace/results/test-results.xml
cambricon:
image:
dockerfile: .ci/images/cambricon/
build_args:
BASE_IMAGE: cambricon/pytorch:v1.25.3-torch2.1-anolisos8.8-py310
PIP_INDEX_URL: https://pypi.org/simple
docker_args:
- "--privileged"
setup: pip install .[dev] --no-build-isolation
jobs:
gpu:
resources:
gpu_ids: "0"
gpu_style: mlu # Cambricon: passthrough via --privileged, MLU_VISIBLE_DEVICES for device control
memory: 32GB
shm_size: 16g
timeout: 3600
stages:
- name: test
run: pytest tests/test_gemm.py -n 4 -v --tb=short --junitxml=/workspace/results/test-results.xml
ascend:
image:
dockerfile: .ci/images/ascend/
build_args:
BASE_IMAGE: quay.io/ascend/vllm-ascend:v0.18.0rc1-openeuler
PIP_INDEX_URL: https://pypi.org/simple
docker_args:
- "--runtime=runc"
- "--privileged"
- "--device=/dev/davinci0"
- "--device=/dev/davinci_manager"
- "--device=/dev/devmm_svm"
- "--device=/dev/hisi_hdc"
volumes:
- /usr/local/Ascend/driver:/usr/local/Ascend/driver:ro
- /usr/local/dcmi:/usr/local/dcmi:ro
- /usr/local/bin/npu-smi:/usr/local/bin/npu-smi:ro
env:
ASCEND_HOME_PATH: /usr/local/Ascend/ascend-toolkit/latest
setup: pip install .[dev] --no-build-isolation
jobs:
npu:
resources:
gpu_ids: "0"
gpu_style: npu
memory: 32GB
shm_size: 16g
timeout: 3600
stages:
- name: test
run: pytest tests/ -n 1 -k npu -v --tb=short --junitxml=/workspace/results/test-results.xml