2023-10-27 18:48:06 +03:00
|
|
|
import console from 'console';
|
|
|
|
|
2023-11-07 01:15:02 +03:00
|
|
|
import { connectionSource, performQuery } from './utils';
|
2023-10-27 18:48:06 +03:00
|
|
|
|
2024-02-19 19:28:40 +03:00
|
|
|
async function dropSchemasSequentially() {
|
|
|
|
try {
|
|
|
|
await connectionSource.initialize();
|
|
|
|
|
|
|
|
// Fetch all schemas
|
|
|
|
const schemas = await performQuery(
|
2023-10-27 18:48:06 +03:00
|
|
|
`
|
2024-02-19 19:28:40 +03:00
|
|
|
SELECT n.nspname AS "schema_name"
|
|
|
|
FROM pg_catalog.pg_namespace n
|
|
|
|
WHERE n.nspname !~ '^pg_' AND n.nspname <> 'information_schema'
|
|
|
|
`,
|
|
|
|
'Fetching schemas...',
|
2023-10-27 18:48:06 +03:00
|
|
|
);
|
2024-02-19 19:28:40 +03:00
|
|
|
|
|
|
|
// Iterate over each schema and drop it
|
|
|
|
// This is to avoid dropping all schemas at once, which would cause an out of shared memory error
|
|
|
|
for (const schema of schemas) {
|
|
|
|
await performQuery(
|
|
|
|
`
|
|
|
|
DROP SCHEMA IF EXISTS "${schema.schema_name}" CASCADE;
|
|
|
|
`,
|
|
|
|
`Dropping schema ${schema.schema_name}...`,
|
|
|
|
);
|
|
|
|
}
|
|
|
|
|
|
|
|
console.log('All schemas dropped successfully.');
|
|
|
|
} catch (err) {
|
|
|
|
console.error('Error during schema dropping:', err);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
dropSchemasSequentially();
|