mirror of
https://github.com/TryGhost/Ghost.git
synced 2024-12-18 16:01:40 +03:00
4ff467794f
refs: https://github.com/TryGhost/DevOps/issues/11 This is a pretty huge commit, but the relevant points are: * Each importer no longer needs to be passed a set of data, it just gets the data it needs * Each importer specifies its dependencies, so that the order of import can be determined at runtime using a topological sort * The main data generator function can just tell each importer to import the data it has This makes working on the data generator much easier. Some other benefits are: * Batched importing, massively speeding up the whole process * `--tables` to set the exact tables you want to import, and specify the quantity of each
39 lines
1000 B
JavaScript
39 lines
1000 B
JavaScript
const {faker} = require('@faker-js/faker');
|
|
|
|
class JsonImporter {
|
|
constructor(knex, transaction) {
|
|
this.knex = knex;
|
|
this.transaction = transaction;
|
|
}
|
|
|
|
/**
|
|
* @typedef {Object} JsonImportOptions
|
|
* @property {string} name Name of the table to import
|
|
* @property {Object} data Models without ids to be imported
|
|
* @property {Array<string>} [rows] Set of rows to be returned
|
|
*/
|
|
|
|
/**
|
|
* Import a dataset to the database
|
|
* @param {JsonImportOptions} options
|
|
* @returns {Promise}
|
|
*/
|
|
async import({
|
|
name,
|
|
data,
|
|
rows = []
|
|
}) {
|
|
for (const obj of data) {
|
|
if (!('id' in obj)) {
|
|
obj.id = faker.database.mongodbObjectId();
|
|
}
|
|
}
|
|
if (rows.findIndex(row => row === 'id') === -1) {
|
|
rows.unshift('id');
|
|
}
|
|
await this.knex.batchInsert(name, data, 500).transacting(this.transaction);
|
|
}
|
|
}
|
|
|
|
module.exports = JsonImporter;
|