|
38 | 38 | from posebench.analysis.complex_alignment import align_complex_to_protein_only |
39 | 39 | from posebench.data.components.protein_apo_to_holo_alignment import read_molecule |
40 | 40 | from posebench.models.inference_relaxation import relax_single_filepair |
41 | | -from posebench.models.minimize_energy import minimize_energy |
| 41 | +# from posebench.models.minimize_energy import minimize_energy |
42 | 42 | from posebench.utils.data_utils import ( |
43 | 43 | extract_sequences_from_protein_structure_file, |
44 | 44 | renumber_biopython_structure_residues, |
@@ -145,32 +145,26 @@ def insert_hpc_headers( |
145 | 145 | :return: Batch headers string for SLURM job scheduling. |
146 | 146 | """ |
147 | 147 | return f"""######################### Batch Headers ######################### |
148 | | -#SBATCH --partition {gpu_partition} # use reserved partition `chengji-lab-gpu` |
149 | | -#SBATCH --account {gpu_account} # NOTE: this must be specified to use the reserved partition above |
150 | | -#SBATCH --nodes=1 # NOTE: this needs to match Lightning's `Trainer(num_nodes=...)` |
151 | | -#SBATCH --gres gpu:{f'{gpu_type}:' if gpu_type else ''}1 # request {gpu_type} GPU resource(s) |
152 | | -#SBATCH --ntasks-per-node=1 # NOTE: this needs to be `1` on SLURM clusters when using Lightning's `ddp_spawn` strategy`; otherwise, set to match Lightning's quantity of `Trainer(devices=...)` |
153 | | -#SBATCH --mem={cpu_memory_in_gb}G # NOTE: use `--mem=0` to request all memory "available" on the assigned node |
154 | | -#SBATCH -t {time_limit} # time limit for the job (up to two days: `2-00:00:00`) |
155 | | -#SBATCH -J posebench_{method}_ensembling # job name |
156 | | -#SBATCH --output=R-%x.%j.out # output log file |
157 | | -#SBATCH --error=R-%x.%j.err # error log file |
158 | | -
|
159 | | -module purge |
160 | | -module load cuda/11.8.0_gcc_9.5.0 |
161 | | -
|
162 | | -# determine location of the project directory |
163 | | -use_private_project_dir=false # NOTE: customize as needed |
164 | | -if [ "$use_private_project_dir" = true ]; then |
165 | | - project_dir="/home/$USER/data/Repositories/Lab_Repositories/PoseBench" |
166 | | -else |
167 | | - project_dir="/cluster/pixstor/chengji-lab/$USER/Repositories/Lab_Repositories/PoseBench" |
168 | | -fi |
169 | | -
|
170 | | -# shellcheck source=/dev/null |
171 | | -source /home/$USER/mambaforge/etc/profile.d/conda.sh |
172 | | -
|
173 | | -cd "$project_dir" || exit""" |
| 148 | +#SBATCH --qos=shared # use specified partition for job |
| 149 | +#SBATCH --image=registry.nersc.gov/m5008/acmwhb/posebench:0.0.1 # use specified container image |
| 150 | +#SBATCH --account=m5008 # use specified account for billing (e.g., `m5008` for AI4Science projects) |
| 151 | +#SBATCH --nodes=1 # NOTE: this needs to match Lightning's `Trainer(num_nodes=...)` |
| 152 | +#SBATCH --ntasks-per-node=1 # NOTE: this needs to be `1` on SLURM clusters when using Lightning's `ddp_spawn` strategy`; otherwise, set to match Lightning's quantity of `Trainer(devices=...)` |
| 153 | +#SBATCH --time=00-05:00:00 # time limit for the job (up to 2 days: `02-00:00:00`) |
| 154 | +#SBATCH --job-name=inference_analysis_sweep # job name |
| 155 | +#SBATCH --output=scripts/perlmutter/regular/logs/inference_analysis_sweep%j.out # output log file |
| 156 | +#SBATCH --error=scripts/perlmutter/regular/logs/inference_analysis_sweep%j.err # error log file |
| 157 | +
|
| 158 | +# Wait for 5-10 seconds randomly to avoid race condition |
| 159 | +sleep $((RANDOM % 6 + 5)) |
| 160 | +
|
| 161 | +# Determine location of the project's directory |
| 162 | +# PROJECT_ID="m5008" |
| 163 | +# PROJECT_DIR="/global/cfs/cdirs/$PROJECT_ID/$USER/Repositories/posebench" # long term storage community drive |
| 164 | +PROJECT_DIR="/pscratch/sd/a/$USER/Repositories/posebench" # high-performance storage scratch drive with an 8-week purge policy |
| 165 | +cd "$PROJECT_DIR" || exit |
| 166 | +
|
| 167 | +""" |
174 | 168 |
|
175 | 169 |
|
176 | 170 | def create_diffdock_bash_script( |
|
0 commit comments