|
| 1 | +// Copyright (c) 2026, the Dart project authors. Please see the AUTHORS file |
| 2 | +// for details. All rights reserved. Use of this source code is governed by a |
| 3 | +// BSD-style license that can be found in the LICENSE file. |
| 4 | + |
| 5 | +import 'dart:io'; |
| 6 | + |
| 7 | +import 'package:crypto/crypto.dart'; |
| 8 | +import 'package:pana/pana.dart'; |
| 9 | +import 'package:path/path.dart' as p; |
| 10 | +import 'package:pub_dev/database/schema.dart'; |
| 11 | +import 'package:typed_sql/typed_sql.dart'; |
| 12 | + |
| 13 | +const _devDbContainerName = 'atlas-dev-postgres'; |
| 14 | +const _atlasMigratorContainerName = 'atlas-migrator'; |
| 15 | + |
| 16 | +/// Creates a new database migration by diffing the current desired schema |
| 17 | +/// (from the generated `typed_sql` schema) against existing migrations using Atlas. |
| 18 | +/// |
| 19 | +/// Usage: dart tool/create_migration.dart migration_name |
| 20 | +Future<void> main(List<String> args) async { |
| 21 | + final migrationName = args.isEmpty ? 'new_migration' : args.first; |
| 22 | + |
| 23 | + final appDir = Directory( |
| 24 | + Platform.script.resolve('..').toFilePath(), |
| 25 | + ).absolute.path; |
| 26 | + final migrationsDir = p.join(appDir, 'migrations'); |
| 27 | + await Directory(migrationsDir).create(recursive: true); |
| 28 | + |
| 29 | + final tempSql = File(p.join(appDir, 'tmp_schema.$pid.sql')); |
| 30 | + final tempConfig = File(p.join(appDir, 'tmp_config.$pid.hcl')); |
| 31 | + |
| 32 | + try { |
| 33 | + print('Starting dev postgres instance: $_devDbContainerName'); |
| 34 | + await runConstrained([ |
| 35 | + 'docker', |
| 36 | + 'run', |
| 37 | + '--rm', |
| 38 | + '-d', |
| 39 | + '--name', |
| 40 | + _devDbContainerName, |
| 41 | + '-e', |
| 42 | + 'POSTGRES_PASSWORD=pass', |
| 43 | + '-p', |
| 44 | + '5432:5432', |
| 45 | + 'postgres:17-alpine', |
| 46 | + ], throwOnError: true); |
| 47 | + await Future<void>.delayed(Duration(seconds: 3)); |
| 48 | + |
| 49 | + await tempConfig.writeAsString(''' |
| 50 | +env "local" { |
| 51 | + migration { |
| 52 | + dir = "file://migrations" |
| 53 | + } |
| 54 | + src = "file://schema.sql" |
| 55 | + dev = "postgres://postgres:pass@localhost:5432/postgres?sslmode=disable" |
| 56 | +} |
| 57 | +'''); |
| 58 | + |
| 59 | + print('Creating ${tempSql.path} with the current desired schema...'); |
| 60 | + await tempSql.writeAsString( |
| 61 | + createPrimarySchemaTables(SqlDialect.postgres()), |
| 62 | + ); |
| 63 | + |
| 64 | + // Clear existing atlas.sum and regenerate hash. |
| 65 | + final atlasSumFile = File('$migrationsDir/atlas.sum'); |
| 66 | + if (atlasSumFile.existsSync()) { |
| 67 | + atlasSumFile.deleteSync(); |
| 68 | + } |
| 69 | + |
| 70 | + final uid = (await runConstrained([ |
| 71 | + 'id', |
| 72 | + '-u', |
| 73 | + ], throwOnError: true)).stdout.toString().trim(); |
| 74 | + final gid = (await runConstrained([ |
| 75 | + 'id', |
| 76 | + '-g', |
| 77 | + ], throwOnError: true)).stdout.toString().trim(); |
| 78 | + |
| 79 | + print('Creating atlas hash on existing migrations...'); |
| 80 | + await runConstrained([ |
| 81 | + 'docker', |
| 82 | + 'run', |
| 83 | + '--rm', |
| 84 | + '--name', |
| 85 | + _atlasMigratorContainerName, |
| 86 | + '--network', |
| 87 | + 'host', |
| 88 | + '-u', |
| 89 | + '$uid:$gid', |
| 90 | + '-v', |
| 91 | + '$migrationsDir:/migrations', |
| 92 | + '-v', |
| 93 | + '${tempSql.path}:/schema.sql', |
| 94 | + '-v', |
| 95 | + '${tempConfig.path}:/atlas.hcl', |
| 96 | + 'arigaio/atlas:latest-community', |
| 97 | + 'migrate', |
| 98 | + 'hash', |
| 99 | + '--config', |
| 100 | + 'file:///atlas.hcl', |
| 101 | + '--env', |
| 102 | + 'local', |
| 103 | + ], throwOnError: true); |
| 104 | + |
| 105 | + print('Creating migration: $migrationName...'); |
| 106 | + await runConstrained([ |
| 107 | + 'docker', |
| 108 | + 'run', |
| 109 | + '--rm', |
| 110 | + '--name', |
| 111 | + _atlasMigratorContainerName, |
| 112 | + '--network', |
| 113 | + 'host', |
| 114 | + '-u', |
| 115 | + '$uid:$gid', |
| 116 | + '-v', |
| 117 | + '$migrationsDir:/migrations', |
| 118 | + '-v', |
| 119 | + '${tempSql.path}:/schema.sql', |
| 120 | + '-v', |
| 121 | + '${tempConfig.path}:/atlas.hcl', |
| 122 | + 'arigaio/atlas:latest-community', |
| 123 | + 'migrate', |
| 124 | + 'diff', |
| 125 | + migrationName, |
| 126 | + '--config', |
| 127 | + 'file:///atlas.hcl', |
| 128 | + '--env', |
| 129 | + 'local', |
| 130 | + ], throwOnError: true); |
| 131 | + |
| 132 | + print('Migration created successfully.'); |
| 133 | + |
| 134 | + // No need for atlas.sum (we use our own sha256sum.txt). |
| 135 | + if (atlasSumFile.existsSync()) { |
| 136 | + atlasSumFile.deleteSync(); |
| 137 | + } |
| 138 | + |
| 139 | + // Find the newest .sql file in migrations dir. |
| 140 | + final sqlFiles = Directory(migrationsDir) |
| 141 | + .listSync() |
| 142 | + .whereType<File>() |
| 143 | + .where((f) => f.path.endsWith('.sql')) |
| 144 | + .toList(); |
| 145 | + sqlFiles.sort((a, b) => b.path.compareTo(a.path)); |
| 146 | + |
| 147 | + if (sqlFiles.isEmpty) { |
| 148 | + print('Error: no SQL files found in migrations directory.'); |
| 149 | + exit(1); |
| 150 | + } |
| 151 | + |
| 152 | + // Atlas creates timestamped files; find the one that does NOT match |
| 153 | + // our sequential format (6 digits + underscore). |
| 154 | + final sequentialPattern = RegExp(r'^\d{6}_'); |
| 155 | + final latestFile = sqlFiles |
| 156 | + .where((f) => !sequentialPattern.hasMatch(f.uri.pathSegments.last)) |
| 157 | + .firstOrNull; |
| 158 | + |
| 159 | + if (latestFile != null) { |
| 160 | + print( |
| 161 | + 'Timestamp detected. Renaming ${latestFile.path} to sequential format...', |
| 162 | + ); |
| 163 | + |
| 164 | + // generate the next number |
| 165 | + final nextVal = sqlFiles.length.toString().padLeft(6, '0'); |
| 166 | + final newFileName = '${nextVal}_$migrationName.sql'; |
| 167 | + final newFile = File(p.join(migrationsDir, newFileName)); |
| 168 | + |
| 169 | + await latestFile.rename(newFile.path); |
| 170 | + |
| 171 | + // remove "public". schema prefix |
| 172 | + final rawContent = await newFile.readAsString(); |
| 173 | + await newFile.writeAsString(rawContent.replaceAll('"public".', '')); |
| 174 | + |
| 175 | + // format sql file |
| 176 | + await runConstrained([ |
| 177 | + 'docker', |
| 178 | + 'run', |
| 179 | + '--rm', |
| 180 | + '-u', |
| 181 | + '$uid:$gid', |
| 182 | + '-v', |
| 183 | + '$migrationsDir:/work', |
| 184 | + '-w', |
| 185 | + '/work', |
| 186 | + 'backplane/sql-formatter', |
| 187 | + '--config', |
| 188 | + '{"language": "postgresql", "uppercase": true, "indent": " "}', |
| 189 | + '--fix', |
| 190 | + newFileName, |
| 191 | + ], throwOnError: true); |
| 192 | + } else { |
| 193 | + print('File already follows sequential format, skipping rename.'); |
| 194 | + } |
| 195 | + |
| 196 | + // Update sha256sum.txt. |
| 197 | + await _updateSha256sum(migrationsDir); |
| 198 | + } finally { |
| 199 | + if (tempSql.existsSync()) tempSql.deleteSync(); |
| 200 | + if (tempConfig.existsSync()) tempConfig.deleteSync(); |
| 201 | + await Process.run('docker', ['rm', '-f', _devDbContainerName]); |
| 202 | + } |
| 203 | +} |
| 204 | + |
| 205 | +/// Writes sha256sum.txt with checksums of all .sql files sorted by name. |
| 206 | +Future<void> _updateSha256sum(String migrationsDir) async { |
| 207 | + final sqlFiles = Directory( |
| 208 | + migrationsDir, |
| 209 | + ).listSync().whereType<File>().where((f) => f.path.endsWith('.sql')).toList(); |
| 210 | + sqlFiles.sort((a, b) => a.path.compareTo(b.path)); |
| 211 | + |
| 212 | + final sb = StringBuffer(); |
| 213 | + for (final file in sqlFiles) { |
| 214 | + final bytes = await file.readAsBytes(); |
| 215 | + final hash = sha256.convert(bytes).toString(); |
| 216 | + final name = file.uri.pathSegments.last; |
| 217 | + sb.writeln('$hash $name'); |
| 218 | + } |
| 219 | + |
| 220 | + await File( |
| 221 | + p.join(migrationsDir, 'sha256sum.txt'), |
| 222 | + ).writeAsString(sb.toString()); |
| 223 | +} |
0 commit comments