Ghost/core/server/models/base/index.js

862 lines
31 KiB
JavaScript
Raw Normal View History

// # Base Model
// This is the model from which all other Ghost models extend. The model is based on Bookshelf.Model, and provides
// several basic behaviours such as UUIDs, as well as a set of Data methods for accessing information from the database.
//
// The models are internal to Ghost, only the API and some internal functions such as migration and import/export
// accesses the models directly. All other parts of Ghost, including the blog frontend, admin UI, and apps are only
// allowed to access data via the API.
✨ post update collision detection (#8328) (#8362) closes #5599 If two users edit the same post, it can happen that they override each others content or post settings. With this change this won't happen anymore. ✨ Update collision for posts - add a new bookshelf plugin to detect these changes - use the `changed` object of bookshelf -> we don't have to create our own diff - compare client and server updated_at field - run editing posts in a transaction (see comments in code base) 🙀 update collision for tags - `updateTags` for adding posts on `onCreated` - happens after the post was inserted --> it's "okay" to attach the tags afterwards on insert --> there is no need to add collision for inserting data --> it's very hard to move the updateTags call to `onCreating`, because the `updateTags` function queries the database to look up the affected post - `updateTags` while editing posts on `onSaving` - all operations run in a transactions and are rolled back if something get's rejected - Post model edit: if we push a transaction from outside, take this one ✨ introduce options.forUpdate - if two queries happening in a transaction we have to signalise knex/mysql that we select for an update - otherwise the following case happens: >> you fetch posts for an update >> a user requests comes in and updates the post (e.g. sets title to "X") >> you update the fetched posts, title would get overriden to the old one use options.forUpdate and protect internal post updates: model listeners - use a transaction for listener updates - signalise forUpdate - write a complex test use options.forUpdate and protect internal post updates: scheduling - publish endpoint runs in a transaction - add complex test - @TODO: right now scheduling api uses posts api, therefor we had to extend the options for api's >> allowed to pass transactions through it >> but these are only allowed if defined from outside {opts: [...]} >> so i think this is fine and not dirty >> will wait for opinions >> alternatively we have to re-write the scheduling endpoint to use the models directly
2017-04-19 16:53:23 +03:00
var _ = require('lodash'),
bookshelf = require('bookshelf'),
moment = require('moment'),
Promise = require('bluebird'),
ObjectId = require('bson-objectid'),
config = require('../../config'),
db = require('../../data/db'),
common = require('../../lib/common'),
security = require('../../lib/security'),
✨ post update collision detection (#8328) (#8362) closes #5599 If two users edit the same post, it can happen that they override each others content or post settings. With this change this won't happen anymore. ✨ Update collision for posts - add a new bookshelf plugin to detect these changes - use the `changed` object of bookshelf -> we don't have to create our own diff - compare client and server updated_at field - run editing posts in a transaction (see comments in code base) 🙀 update collision for tags - `updateTags` for adding posts on `onCreated` - happens after the post was inserted --> it's "okay" to attach the tags afterwards on insert --> there is no need to add collision for inserting data --> it's very hard to move the updateTags call to `onCreating`, because the `updateTags` function queries the database to look up the affected post - `updateTags` while editing posts on `onSaving` - all operations run in a transactions and are rolled back if something get's rejected - Post model edit: if we push a transaction from outside, take this one ✨ introduce options.forUpdate - if two queries happening in a transaction we have to signalise knex/mysql that we select for an update - otherwise the following case happens: >> you fetch posts for an update >> a user requests comes in and updates the post (e.g. sets title to "X") >> you update the fetched posts, title would get overriden to the old one use options.forUpdate and protect internal post updates: model listeners - use a transaction for listener updates - signalise forUpdate - write a complex test use options.forUpdate and protect internal post updates: scheduling - publish endpoint runs in a transaction - add complex test - @TODO: right now scheduling api uses posts api, therefor we had to extend the options for api's >> allowed to pass transactions through it >> but these are only allowed if defined from outside {opts: [...]} >> so i think this is fine and not dirty >> will wait for opinions >> alternatively we have to re-write the scheduling endpoint to use the models directly
2017-04-19 16:53:23 +03:00
filters = require('../../filters'),
schema = require('../../data/schema'),
urlService = require('../../services/url'),
validation = require('../../data/validation'),
✨ post update collision detection (#8328) (#8362) closes #5599 If two users edit the same post, it can happen that they override each others content or post settings. With this change this won't happen anymore. ✨ Update collision for posts - add a new bookshelf plugin to detect these changes - use the `changed` object of bookshelf -> we don't have to create our own diff - compare client and server updated_at field - run editing posts in a transaction (see comments in code base) 🙀 update collision for tags - `updateTags` for adding posts on `onCreated` - happens after the post was inserted --> it's "okay" to attach the tags afterwards on insert --> there is no need to add collision for inserting data --> it's very hard to move the updateTags call to `onCreating`, because the `updateTags` function queries the database to look up the affected post - `updateTags` while editing posts on `onSaving` - all operations run in a transactions and are rolled back if something get's rejected - Post model edit: if we push a transaction from outside, take this one ✨ introduce options.forUpdate - if two queries happening in a transaction we have to signalise knex/mysql that we select for an update - otherwise the following case happens: >> you fetch posts for an update >> a user requests comes in and updates the post (e.g. sets title to "X") >> you update the fetched posts, title would get overriden to the old one use options.forUpdate and protect internal post updates: model listeners - use a transaction for listener updates - signalise forUpdate - write a complex test use options.forUpdate and protect internal post updates: scheduling - publish endpoint runs in a transaction - add complex test - @TODO: right now scheduling api uses posts api, therefor we had to extend the options for api's >> allowed to pass transactions through it >> but these are only allowed if defined from outside {opts: [...]} >> so i think this is fine and not dirty >> will wait for opinions >> alternatively we have to re-write the scheduling endpoint to use the models directly
2017-04-19 16:53:23 +03:00
plugins = require('../plugins'),
ghostBookshelf,
proto;
// ### ghostBookshelf
// Initializes a new Bookshelf instance called ghostBookshelf, for reference elsewhere in Ghost.
ghostBookshelf = bookshelf(db.knex);
// Load the Bookshelf registry plugin, which helps us avoid circular dependencies
ghostBookshelf.plugin('registry');
// Load the Ghost access rules plugin, which handles passing permissions/context through the model layer
ghostBookshelf.plugin(plugins.accessRules);
// Load the Ghost filter plugin, which handles applying a 'filter' to findPage requests
ghostBookshelf.plugin(plugins.filter);
// Load the Ghost include count plugin, which allows for the inclusion of cross-table counts
2015-11-11 21:24:24 +03:00
ghostBookshelf.plugin(plugins.includeCount);
// Load the Ghost pagination plugin, which gives us the `fetchPage` method on Models
2015-11-11 21:24:24 +03:00
ghostBookshelf.plugin(plugins.pagination);
✨ post update collision detection (#8328) (#8362) closes #5599 If two users edit the same post, it can happen that they override each others content or post settings. With this change this won't happen anymore. ✨ Update collision for posts - add a new bookshelf plugin to detect these changes - use the `changed` object of bookshelf -> we don't have to create our own diff - compare client and server updated_at field - run editing posts in a transaction (see comments in code base) 🙀 update collision for tags - `updateTags` for adding posts on `onCreated` - happens after the post was inserted --> it's "okay" to attach the tags afterwards on insert --> there is no need to add collision for inserting data --> it's very hard to move the updateTags call to `onCreating`, because the `updateTags` function queries the database to look up the affected post - `updateTags` while editing posts on `onSaving` - all operations run in a transactions and are rolled back if something get's rejected - Post model edit: if we push a transaction from outside, take this one ✨ introduce options.forUpdate - if two queries happening in a transaction we have to signalise knex/mysql that we select for an update - otherwise the following case happens: >> you fetch posts for an update >> a user requests comes in and updates the post (e.g. sets title to "X") >> you update the fetched posts, title would get overriden to the old one use options.forUpdate and protect internal post updates: model listeners - use a transaction for listener updates - signalise forUpdate - write a complex test use options.forUpdate and protect internal post updates: scheduling - publish endpoint runs in a transaction - add complex test - @TODO: right now scheduling api uses posts api, therefor we had to extend the options for api's >> allowed to pass transactions through it >> but these are only allowed if defined from outside {opts: [...]} >> so i think this is fine and not dirty >> will wait for opinions >> alternatively we have to re-write the scheduling endpoint to use the models directly
2017-04-19 16:53:23 +03:00
// Update collision plugin
ghostBookshelf.plugin(plugins.collision);
// Manages nested updates (relationships)
ghostBookshelf.plugin('bookshelf-relations', {
allowedOptions: ['context'],
unsetRelations: true,
hooks: {
belongsToMany: {
after: function (existing, targets, options) {
// reorder tags
return Promise.each(targets.models, function (target, index) {
return existing.updatePivot({
sort_order: index
}, _.extend({}, options, {query: {where: {tag_id: target.id}}}));
});
},
beforeRelationCreation: function onCreatingRelation(model, data) {
data.id = ObjectId.generate();
}
}
}
});
// Cache an instance of the base model prototype
proto = ghostBookshelf.Model.prototype;
// ## ghostBookshelf.Model
// The Base Model which other Ghost objects will inherit from,
// including some convenience functions as static properties on the model.
ghostBookshelf.Model = ghostBookshelf.Model.extend({
// Bookshelf `hasTimestamps` - handles created_at and updated_at properties
hasTimestamps: true,
// Ghost option handling - get permitted attributes from server/data/schema.js, where the DB schema is defined
permittedAttributes: function permittedAttributes() {
return _.keys(schema.tables[this.tableName]);
},
// Bookshelf `defaults` - default values setup on every model creation
defaults: function defaults() {
✨ replace auto increment id's by object id (#7495) * 🛠 bookshelf tarball, bson-objectid * 🎨 schema changes - change increment type to string - add a default fallback for string length 191 (to avoid adding this logic to every single column which uses an ID) - remove uuid, because ID now represents a global resource identifier - keep uuid for post, because we are using this as preview id - keep uuid for clients for now - we are using this param for Ghost-Auth * ✨ base model: generate ObjectId on creating event - each new resource get's a auto generate ObjectId - this logic won't work for attached models, this commit comes later * 🎨 centralised attach method When attaching models there are two things important two know 1. To be able to attach an ObjectId, we need to register the `onCreating` event the fetched model!This is caused by the Bookshelf design in general. On this target model we are attaching the new model. 2. We need to manually fetch the target model, because Bookshelf has a weird behaviour (which is known as a bug, see see https://github.com/tgriesser/bookshelf/issues/629). The most important property when attaching a model is `parentFk`, which is the foreign key. This can be null when fetching the model with the option `withRelated`. To ensure quality and consistency, the custom attach wrapper always fetches the target model manual. By fetching the target model (again) is a little performance decrease, but it also has advantages: we can register the event, and directly unregister the event again. So very clean code. Important: please only use the custom attach wrapper in the future. * 🎨 token model had overriden the onCreating function because of the created_at field - we need to ensure that the base onCreating hook get's triggered for ALL models - if not, they don't get an ObjectId assigned - in this case: be smart and check if the target model has a created_at field * 🎨 we don't have a uuid field anymore, remove the usages - no default uuid creation in models - i am pretty sure we have some more definitions in our tests (for example in the export json files), but that is too much work to delete them all * 🎨 do not parse ID to Number - we had various occurances of parsing all ID's to numbers - we don't need this behaviour anymore - ID is string - i will adapt the ID validation in the next commit * 🎨 change ID regex for validation - we only allow: ID as ObjectId, ID as 1 and ID as me - we need to keep ID 1, because our whole software relies on ID 1 (permissions etc) * 🎨 owner fixture - roles: [4] does not work anymore - 4 means -> static id 4 - this worked in an auto increment system (not even in a system with distributed writes) - with ObjectId we generate each ID automatically (for static and dynamic resources) - it is possible to define all id's for static resources still, but that means we need to know which ID is already used and for consistency we have to define ObjectId's for these static resources - so no static id's anymore, except of: id 1 for owner and id 0 for external usage (because this is required from our permission system) - NOTE: please read through the comment in the user model * 🎨 tests: DataGenerator and test utils First of all: we need to ensure using ObjectId's in the tests. When don't, we can't ensure that ObjectId's work properly. This commit brings lot's of dynamic into all the static defined id's. In one of the next commits, i will adapt all the tests. * 🚨 remove counter in Notification API - no need to add a counter - we simply generate ObjectId's (they are auto incremental as well) - our id validator does only allow ObjectId as id,1 and me * 🎨 extend contextUser in Base Model - remove isNumber check, because id's are no longer numbers, except of id 0/1 - use existing isExternalUser - support id 0/1 as string or number * ✨ Ghost Owner has id 1 - ensure we define this id in the fixtures.json - doesn't matter if number or string * 🎨 functional tests adaptions - use dynamic id's * 🎨 fix unit tests * 🎨 integration tests adaptions * 🎨 change importer utils - all our export examples (test/fixtures/exports) contain id's as numbers - fact: but we ignore them anyway when inserting into the database, see https://github.com/TryGhost/Ghost/blob/master/core/server/data/import/utils.js#L249 - in https://github.com/TryGhost/Ghost/pull/7495/commits/0e6ed957cd54dc02a25cf6fb1ab7d7e723295e2c#diff-70f514a06347c048648be464819503c4L67 i removed parsing id's to integers - i realised that this ^ check just existed, because the userIdToMap was an object key and object keys are always strings! - i think this logic is a little bit complicated, but i don't want to refactor this now - this commit ensures when trying to find the user, the id comparison works again - i've added more documentation to understand this logic ;) - plus i renamed an attribute to improve readability * 🎨 Data-Generator: add more defaults to createUser - if i use the function DataGenerator.forKnex.createUser i would like to get a full set of defaults * 🎨 test utils: change/extend function set for functional tests - functional tests work a bit different - they boot Ghost and seed the database - some functional tests have mis-used the test setup - the test setup needs two sections: integration/unit and functional tests - any functional test is allowed to either add more data or change data in the existing Ghost db - but what it should not do is: add test fixtures like roles or users from our DataGenerator and cross fingers it will work - this commit adds a clean method for functional tests to add extra users * 🎨 functional tests adaptions - use last commit to insert users for functional tests clean - tidy up usage of testUtils.setup or testUtils.doAuth * 🐛 test utils: reset database before init - ensure we don't have any left data from other tests in the database when starting ghost * 🐛 fix test (unrelated to this PR) - fixes a random failure - return statement was missing * 🎨 make changes for invites
2016-11-17 12:09:11 +03:00
return {};
},
// When loading an instance, subclasses can specify default to fetch
defaultColumnsToFetch: function defaultColumnsToFetch() {
return [];
},
// Bookshelf `initialize` - declare a constructor-like method for model creation
initialize: function initialize() {
var self = this,
options = arguments[1] || {};
// make options include available for toJSON()
if (options.include) {
this.include = _.clone(options.include);
}
✨ post update collision detection (#8328) (#8362) closes #5599 If two users edit the same post, it can happen that they override each others content or post settings. With this change this won't happen anymore. ✨ Update collision for posts - add a new bookshelf plugin to detect these changes - use the `changed` object of bookshelf -> we don't have to create our own diff - compare client and server updated_at field - run editing posts in a transaction (see comments in code base) 🙀 update collision for tags - `updateTags` for adding posts on `onCreated` - happens after the post was inserted --> it's "okay" to attach the tags afterwards on insert --> there is no need to add collision for inserting data --> it's very hard to move the updateTags call to `onCreating`, because the `updateTags` function queries the database to look up the affected post - `updateTags` while editing posts on `onSaving` - all operations run in a transactions and are rolled back if something get's rejected - Post model edit: if we push a transaction from outside, take this one ✨ introduce options.forUpdate - if two queries happening in a transaction we have to signalise knex/mysql that we select for an update - otherwise the following case happens: >> you fetch posts for an update >> a user requests comes in and updates the post (e.g. sets title to "X") >> you update the fetched posts, title would get overriden to the old one use options.forUpdate and protect internal post updates: model listeners - use a transaction for listener updates - signalise forUpdate - write a complex test use options.forUpdate and protect internal post updates: scheduling - publish endpoint runs in a transaction - add complex test - @TODO: right now scheduling api uses posts api, therefor we had to extend the options for api's >> allowed to pass transactions through it >> but these are only allowed if defined from outside {opts: [...]} >> so i think this is fine and not dirty >> will wait for opinions >> alternatively we have to re-write the scheduling endpoint to use the models directly
2017-04-19 16:53:23 +03:00
[
'fetching',
'fetching:collection',
'fetched',
'creating',
'created',
'updating',
'updated',
'destroying',
'destroyed',
'saved'
].forEach(function (eventName) {
var functionName = 'on' + eventName[0].toUpperCase() + eventName.slice(1);
if (functionName.indexOf(':') !== -1) {
functionName = functionName.slice(0, functionName.indexOf(':'))
+ functionName[functionName.indexOf(':') + 1].toUpperCase()
+ functionName.slice(functionName.indexOf(':') + 2);
functionName = functionName.replace(':', '');
}
✨ post update collision detection (#8328) (#8362) closes #5599 If two users edit the same post, it can happen that they override each others content or post settings. With this change this won't happen anymore. ✨ Update collision for posts - add a new bookshelf plugin to detect these changes - use the `changed` object of bookshelf -> we don't have to create our own diff - compare client and server updated_at field - run editing posts in a transaction (see comments in code base) 🙀 update collision for tags - `updateTags` for adding posts on `onCreated` - happens after the post was inserted --> it's "okay" to attach the tags afterwards on insert --> there is no need to add collision for inserting data --> it's very hard to move the updateTags call to `onCreating`, because the `updateTags` function queries the database to look up the affected post - `updateTags` while editing posts on `onSaving` - all operations run in a transactions and are rolled back if something get's rejected - Post model edit: if we push a transaction from outside, take this one ✨ introduce options.forUpdate - if two queries happening in a transaction we have to signalise knex/mysql that we select for an update - otherwise the following case happens: >> you fetch posts for an update >> a user requests comes in and updates the post (e.g. sets title to "X") >> you update the fetched posts, title would get overriden to the old one use options.forUpdate and protect internal post updates: model listeners - use a transaction for listener updates - signalise forUpdate - write a complex test use options.forUpdate and protect internal post updates: scheduling - publish endpoint runs in a transaction - add complex test - @TODO: right now scheduling api uses posts api, therefor we had to extend the options for api's >> allowed to pass transactions through it >> but these are only allowed if defined from outside {opts: [...]} >> so i think this is fine and not dirty >> will wait for opinions >> alternatively we have to re-write the scheduling endpoint to use the models directly
2017-04-19 16:53:23 +03:00
if (!self[functionName]) {
return;
}
✨ post update collision detection (#8328) (#8362) closes #5599 If two users edit the same post, it can happen that they override each others content or post settings. With this change this won't happen anymore. ✨ Update collision for posts - add a new bookshelf plugin to detect these changes - use the `changed` object of bookshelf -> we don't have to create our own diff - compare client and server updated_at field - run editing posts in a transaction (see comments in code base) 🙀 update collision for tags - `updateTags` for adding posts on `onCreated` - happens after the post was inserted --> it's "okay" to attach the tags afterwards on insert --> there is no need to add collision for inserting data --> it's very hard to move the updateTags call to `onCreating`, because the `updateTags` function queries the database to look up the affected post - `updateTags` while editing posts on `onSaving` - all operations run in a transactions and are rolled back if something get's rejected - Post model edit: if we push a transaction from outside, take this one ✨ introduce options.forUpdate - if two queries happening in a transaction we have to signalise knex/mysql that we select for an update - otherwise the following case happens: >> you fetch posts for an update >> a user requests comes in and updates the post (e.g. sets title to "X") >> you update the fetched posts, title would get overriden to the old one use options.forUpdate and protect internal post updates: model listeners - use a transaction for listener updates - signalise forUpdate - write a complex test use options.forUpdate and protect internal post updates: scheduling - publish endpoint runs in a transaction - add complex test - @TODO: right now scheduling api uses posts api, therefor we had to extend the options for api's >> allowed to pass transactions through it >> but these are only allowed if defined from outside {opts: [...]} >> so i think this is fine and not dirty >> will wait for opinions >> alternatively we have to re-write the scheduling endpoint to use the models directly
2017-04-19 16:53:23 +03:00
self.on(eventName, function eventTriggered() {
return this[functionName].apply(this, arguments);
});
✨ post update collision detection (#8328) (#8362) closes #5599 If two users edit the same post, it can happen that they override each others content or post settings. With this change this won't happen anymore. ✨ Update collision for posts - add a new bookshelf plugin to detect these changes - use the `changed` object of bookshelf -> we don't have to create our own diff - compare client and server updated_at field - run editing posts in a transaction (see comments in code base) 🙀 update collision for tags - `updateTags` for adding posts on `onCreated` - happens after the post was inserted --> it's "okay" to attach the tags afterwards on insert --> there is no need to add collision for inserting data --> it's very hard to move the updateTags call to `onCreating`, because the `updateTags` function queries the database to look up the affected post - `updateTags` while editing posts on `onSaving` - all operations run in a transactions and are rolled back if something get's rejected - Post model edit: if we push a transaction from outside, take this one ✨ introduce options.forUpdate - if two queries happening in a transaction we have to signalise knex/mysql that we select for an update - otherwise the following case happens: >> you fetch posts for an update >> a user requests comes in and updates the post (e.g. sets title to "X") >> you update the fetched posts, title would get overriden to the old one use options.forUpdate and protect internal post updates: model listeners - use a transaction for listener updates - signalise forUpdate - write a complex test use options.forUpdate and protect internal post updates: scheduling - publish endpoint runs in a transaction - add complex test - @TODO: right now scheduling api uses posts api, therefor we had to extend the options for api's >> allowed to pass transactions through it >> but these are only allowed if defined from outside {opts: [...]} >> so i think this is fine and not dirty >> will wait for opinions >> alternatively we have to re-write the scheduling endpoint to use the models directly
2017-04-19 16:53:23 +03:00
});
this.on('saving', function onSaving() {
var self = this,
args = arguments;
return Promise.resolve(self.onSaving.apply(self, args))
.then(function validated() {
return Promise.resolve(self.onValidate.apply(self, args));
});
});
// NOTE: Please keep here. If we don't initialize the parent, bookshelf-relations won't work.
proto.initialize.call(this);
},
onValidate: function onValidate() {
return validation.validateSchema(this.tableName, this.toJSON());
},
/**
* http://knexjs.org/#Builder-forUpdate
* https://dev.mysql.com/doc/refman/5.7/en/innodb-locking-reads.html
*
* Lock target collection/model for further update operations.
* This avoids collisions and possible content override cases.
*/
onFetching: function onFetching(model, columns, options) {
if (options.forUpdate && options.transacting) {
options.query.forUpdate();
}
},
onFetchingCollection: function onFetchingCollection(model, columns, options) {
if (options.forUpdate && options.transacting) {
options.query.forUpdate();
}
},
/**
* Adding resources implies setting these properties on the server side
* - set `created_by` based on the context
* - set `updated_by` based on the context
* - the bookshelf `timestamps` plugin sets `created_at` and `updated_at`
* - if plugin is disabled (e.g. import) we have a fallback condition
*
* Exceptions: internal context or importing
*/
onCreating: function onCreating(newObj, attr, options) {
✨ replace auto increment id's by object id (#7495) * 🛠 bookshelf tarball, bson-objectid * 🎨 schema changes - change increment type to string - add a default fallback for string length 191 (to avoid adding this logic to every single column which uses an ID) - remove uuid, because ID now represents a global resource identifier - keep uuid for post, because we are using this as preview id - keep uuid for clients for now - we are using this param for Ghost-Auth * ✨ base model: generate ObjectId on creating event - each new resource get's a auto generate ObjectId - this logic won't work for attached models, this commit comes later * 🎨 centralised attach method When attaching models there are two things important two know 1. To be able to attach an ObjectId, we need to register the `onCreating` event the fetched model!This is caused by the Bookshelf design in general. On this target model we are attaching the new model. 2. We need to manually fetch the target model, because Bookshelf has a weird behaviour (which is known as a bug, see see https://github.com/tgriesser/bookshelf/issues/629). The most important property when attaching a model is `parentFk`, which is the foreign key. This can be null when fetching the model with the option `withRelated`. To ensure quality and consistency, the custom attach wrapper always fetches the target model manual. By fetching the target model (again) is a little performance decrease, but it also has advantages: we can register the event, and directly unregister the event again. So very clean code. Important: please only use the custom attach wrapper in the future. * 🎨 token model had overriden the onCreating function because of the created_at field - we need to ensure that the base onCreating hook get's triggered for ALL models - if not, they don't get an ObjectId assigned - in this case: be smart and check if the target model has a created_at field * 🎨 we don't have a uuid field anymore, remove the usages - no default uuid creation in models - i am pretty sure we have some more definitions in our tests (for example in the export json files), but that is too much work to delete them all * 🎨 do not parse ID to Number - we had various occurances of parsing all ID's to numbers - we don't need this behaviour anymore - ID is string - i will adapt the ID validation in the next commit * 🎨 change ID regex for validation - we only allow: ID as ObjectId, ID as 1 and ID as me - we need to keep ID 1, because our whole software relies on ID 1 (permissions etc) * 🎨 owner fixture - roles: [4] does not work anymore - 4 means -> static id 4 - this worked in an auto increment system (not even in a system with distributed writes) - with ObjectId we generate each ID automatically (for static and dynamic resources) - it is possible to define all id's for static resources still, but that means we need to know which ID is already used and for consistency we have to define ObjectId's for these static resources - so no static id's anymore, except of: id 1 for owner and id 0 for external usage (because this is required from our permission system) - NOTE: please read through the comment in the user model * 🎨 tests: DataGenerator and test utils First of all: we need to ensure using ObjectId's in the tests. When don't, we can't ensure that ObjectId's work properly. This commit brings lot's of dynamic into all the static defined id's. In one of the next commits, i will adapt all the tests. * 🚨 remove counter in Notification API - no need to add a counter - we simply generate ObjectId's (they are auto incremental as well) - our id validator does only allow ObjectId as id,1 and me * 🎨 extend contextUser in Base Model - remove isNumber check, because id's are no longer numbers, except of id 0/1 - use existing isExternalUser - support id 0/1 as string or number * ✨ Ghost Owner has id 1 - ensure we define this id in the fixtures.json - doesn't matter if number or string * 🎨 functional tests adaptions - use dynamic id's * 🎨 fix unit tests * 🎨 integration tests adaptions * 🎨 change importer utils - all our export examples (test/fixtures/exports) contain id's as numbers - fact: but we ignore them anyway when inserting into the database, see https://github.com/TryGhost/Ghost/blob/master/core/server/data/import/utils.js#L249 - in https://github.com/TryGhost/Ghost/pull/7495/commits/0e6ed957cd54dc02a25cf6fb1ab7d7e723295e2c#diff-70f514a06347c048648be464819503c4L67 i removed parsing id's to integers - i realised that this ^ check just existed, because the userIdToMap was an object key and object keys are always strings! - i think this logic is a little bit complicated, but i don't want to refactor this now - this commit ensures when trying to find the user, the id comparison works again - i've added more documentation to understand this logic ;) - plus i renamed an attribute to improve readability * 🎨 Data-Generator: add more defaults to createUser - if i use the function DataGenerator.forKnex.createUser i would like to get a full set of defaults * 🎨 test utils: change/extend function set for functional tests - functional tests work a bit different - they boot Ghost and seed the database - some functional tests have mis-used the test setup - the test setup needs two sections: integration/unit and functional tests - any functional test is allowed to either add more data or change data in the existing Ghost db - but what it should not do is: add test fixtures like roles or users from our DataGenerator and cross fingers it will work - this commit adds a clean method for functional tests to add extra users * 🎨 functional tests adaptions - use last commit to insert users for functional tests clean - tidy up usage of testUtils.setup or testUtils.doAuth * 🐛 test utils: reset database before init - ensure we don't have any left data from other tests in the database when starting ghost * 🐛 fix test (unrelated to this PR) - fixes a random failure - return statement was missing * 🎨 make changes for invites
2016-11-17 12:09:11 +03:00
// id = 0 is still a valid value for external usage
if (_.isUndefined(newObj.id) || _.isNull(newObj.id)) {
newObj.setId();
}
if (schema.tables[this.tableName].hasOwnProperty('created_by')) {
if (!options.importing || (options.importing && !this.get('created_by'))) {
this.set('created_by', this.contextUser(options));
}
}
🎨 refactor the importer (#8473) refs #5422 - we can support null titles after this PR if we want - user model: fix getAuthorRole - user model: support adding roles by name - we support this for roles as well, this makes it easier when importing related user roles (because usually roles already exists in the database and the related id's are wrong e.g. roles_users) - base model: support for null created_at or updated_at values - post or tag slugs are always safe strings - enable an import of a null slug, no need to crash or to cover this on import layer - add new DataImporter logic - uses a class inheritance mechanism to achieve an easier readability and maintenance - schema validation (happens on model layer) was ignored - allow to import unknown user id's (see https://github.com/TryGhost/Ghost/issues/8365) - most of the duplication handling happens on model layer (we can use the power of unique fields and errors from the database) - the import is splitted into three steps: - beforeImport --> prepares the data to import, sorts out relations (roles, tags), detects fields (for LTS) - doImport --> does the actual import - afterImport --> updates the data after successful import e.g. update all user reference fields e.g. published_by (compares the imported data with the current state of the database) - import images: markdown can be null - show error message when json handler can't parse file - do not request gravatar if email is null - return problems/warnings after successful import - optimise warnings in importer - do not return warnings for role duplications, no helpful information - error handler: return context information of error - we show the affected json entries as one line in the UI - show warning for: detected duplicated tag - schema validation: fix valueMustBeBoolean translation - remove context property from json parse error
2017-05-23 19:18:13 +03:00
this.set('updated_by', this.contextUser(options));
🎨 refactor the importer (#8473) refs #5422 - we can support null titles after this PR if we want - user model: fix getAuthorRole - user model: support adding roles by name - we support this for roles as well, this makes it easier when importing related user roles (because usually roles already exists in the database and the related id's are wrong e.g. roles_users) - base model: support for null created_at or updated_at values - post or tag slugs are always safe strings - enable an import of a null slug, no need to crash or to cover this on import layer - add new DataImporter logic - uses a class inheritance mechanism to achieve an easier readability and maintenance - schema validation (happens on model layer) was ignored - allow to import unknown user id's (see https://github.com/TryGhost/Ghost/issues/8365) - most of the duplication handling happens on model layer (we can use the power of unique fields and errors from the database) - the import is splitted into three steps: - beforeImport --> prepares the data to import, sorts out relations (roles, tags), detects fields (for LTS) - doImport --> does the actual import - afterImport --> updates the data after successful import e.g. update all user reference fields e.g. published_by (compares the imported data with the current state of the database) - import images: markdown can be null - show error message when json handler can't parse file - do not request gravatar if email is null - return problems/warnings after successful import - optimise warnings in importer - do not return warnings for role duplications, no helpful information - error handler: return context information of error - we show the affected json entries as one line in the UI - show warning for: detected duplicated tag - schema validation: fix valueMustBeBoolean translation - remove context property from json parse error
2017-05-23 19:18:13 +03:00
if (!newObj.get('created_at')) {
newObj.set('created_at', new Date());
}
if (!newObj.get('updated_at')) {
newObj.set('updated_at', new Date());
}
},
onSaving: function onSaving(newObj) {
// Remove any properties which don't belong on the model
this.attributes = this.pick(this.permittedAttributes());
// Store the previous attributes so we can tell what was updated later
this._updatedAttributes = newObj.previousAttributes();
},
/**
* Changing resources implies setting these properties on the server side
* - set `updated_by` based on the context
* - ensure `created_at` never changes
* - ensure `created_by` never changes
* - the bookshelf `timestamps` plugin sets `updated_at` automatically
*
* Exceptions:
* - importing data
* - internal context
* - if no context
*/
onUpdating: function onUpdating(newObj, attr, options) {
this.set('updated_by', this.contextUser(options));
if (options && options.context && !options.internal && !options.importing) {
if (newObj.hasDateChanged('created_at', {beforeWrite: true})) {
newObj.set('created_at', this.previous('created_at'));
}
if (newObj.hasChanged('created_by')) {
newObj.set('created_by', this.previous('created_by'));
}
}
},
/**
* before we insert dates into the database, we have to normalize
* date format is now in each db the same
*/
fixDatesWhenSave: function fixDates(attrs) {
var self = this;
_.each(attrs, function each(value, key) {
if (value !== null
✨ post update collision detection (#8328) (#8362) closes #5599 If two users edit the same post, it can happen that they override each others content or post settings. With this change this won't happen anymore. ✨ Update collision for posts - add a new bookshelf plugin to detect these changes - use the `changed` object of bookshelf -> we don't have to create our own diff - compare client and server updated_at field - run editing posts in a transaction (see comments in code base) 🙀 update collision for tags - `updateTags` for adding posts on `onCreated` - happens after the post was inserted --> it's "okay" to attach the tags afterwards on insert --> there is no need to add collision for inserting data --> it's very hard to move the updateTags call to `onCreating`, because the `updateTags` function queries the database to look up the affected post - `updateTags` while editing posts on `onSaving` - all operations run in a transactions and are rolled back if something get's rejected - Post model edit: if we push a transaction from outside, take this one ✨ introduce options.forUpdate - if two queries happening in a transaction we have to signalise knex/mysql that we select for an update - otherwise the following case happens: >> you fetch posts for an update >> a user requests comes in and updates the post (e.g. sets title to "X") >> you update the fetched posts, title would get overriden to the old one use options.forUpdate and protect internal post updates: model listeners - use a transaction for listener updates - signalise forUpdate - write a complex test use options.forUpdate and protect internal post updates: scheduling - publish endpoint runs in a transaction - add complex test - @TODO: right now scheduling api uses posts api, therefor we had to extend the options for api's >> allowed to pass transactions through it >> but these are only allowed if defined from outside {opts: [...]} >> so i think this is fine and not dirty >> will wait for opinions >> alternatively we have to re-write the scheduling endpoint to use the models directly
2017-04-19 16:53:23 +03:00
&& schema.tables[self.tableName].hasOwnProperty(key)
&& schema.tables[self.tableName][key].type === 'dateTime') {
attrs[key] = moment(value).format('YYYY-MM-DD HH:mm:ss');
}
});
return attrs;
},
/**
* all supported databases (sqlite, mysql) return different values
*
* sqlite:
* - knex returns a UTC String
* mysql:
* - knex wraps the UTC value into a local JS Date
*/
fixDatesWhenFetch: function fixDates(attrs) {
var self = this, dateMoment;
_.each(attrs, function each(value, key) {
if (value !== null
&& schema.tables[self.tableName].hasOwnProperty(key)
&& schema.tables[self.tableName][key].type === 'dateTime') {
dateMoment = moment(value);
// CASE: You are somehow able to store e.g. 0000-00-00 00:00:00
// Protect the code base and return the current date time.
if (dateMoment.isValid()) {
attrs[key] = dateMoment.startOf('seconds').toDate();
} else {
attrs[key] = moment().startOf('seconds').toDate();
}
}
});
return attrs;
},
// Convert integers to real booleans
fixBools: function fixBools(attrs) {
var self = this;
_.each(attrs, function each(value, key) {
if (schema.tables[self.tableName].hasOwnProperty(key)
✨ post update collision detection (#8328) (#8362) closes #5599 If two users edit the same post, it can happen that they override each others content or post settings. With this change this won't happen anymore. ✨ Update collision for posts - add a new bookshelf plugin to detect these changes - use the `changed` object of bookshelf -> we don't have to create our own diff - compare client and server updated_at field - run editing posts in a transaction (see comments in code base) 🙀 update collision for tags - `updateTags` for adding posts on `onCreated` - happens after the post was inserted --> it's "okay" to attach the tags afterwards on insert --> there is no need to add collision for inserting data --> it's very hard to move the updateTags call to `onCreating`, because the `updateTags` function queries the database to look up the affected post - `updateTags` while editing posts on `onSaving` - all operations run in a transactions and are rolled back if something get's rejected - Post model edit: if we push a transaction from outside, take this one ✨ introduce options.forUpdate - if two queries happening in a transaction we have to signalise knex/mysql that we select for an update - otherwise the following case happens: >> you fetch posts for an update >> a user requests comes in and updates the post (e.g. sets title to "X") >> you update the fetched posts, title would get overriden to the old one use options.forUpdate and protect internal post updates: model listeners - use a transaction for listener updates - signalise forUpdate - write a complex test use options.forUpdate and protect internal post updates: scheduling - publish endpoint runs in a transaction - add complex test - @TODO: right now scheduling api uses posts api, therefor we had to extend the options for api's >> allowed to pass transactions through it >> but these are only allowed if defined from outside {opts: [...]} >> so i think this is fine and not dirty >> will wait for opinions >> alternatively we have to re-write the scheduling endpoint to use the models directly
2017-04-19 16:53:23 +03:00
&& schema.tables[self.tableName][key].type === 'bool') {
attrs[key] = value ? true : false;
}
});
return attrs;
},
// Get the user from the options object
contextUser: function contextUser(options) {
options = options || {};
options.context = options.context || {};
✨ replace auto increment id's by object id (#7495) * 🛠 bookshelf tarball, bson-objectid * 🎨 schema changes - change increment type to string - add a default fallback for string length 191 (to avoid adding this logic to every single column which uses an ID) - remove uuid, because ID now represents a global resource identifier - keep uuid for post, because we are using this as preview id - keep uuid for clients for now - we are using this param for Ghost-Auth * ✨ base model: generate ObjectId on creating event - each new resource get's a auto generate ObjectId - this logic won't work for attached models, this commit comes later * 🎨 centralised attach method When attaching models there are two things important two know 1. To be able to attach an ObjectId, we need to register the `onCreating` event the fetched model!This is caused by the Bookshelf design in general. On this target model we are attaching the new model. 2. We need to manually fetch the target model, because Bookshelf has a weird behaviour (which is known as a bug, see see https://github.com/tgriesser/bookshelf/issues/629). The most important property when attaching a model is `parentFk`, which is the foreign key. This can be null when fetching the model with the option `withRelated`. To ensure quality and consistency, the custom attach wrapper always fetches the target model manual. By fetching the target model (again) is a little performance decrease, but it also has advantages: we can register the event, and directly unregister the event again. So very clean code. Important: please only use the custom attach wrapper in the future. * 🎨 token model had overriden the onCreating function because of the created_at field - we need to ensure that the base onCreating hook get's triggered for ALL models - if not, they don't get an ObjectId assigned - in this case: be smart and check if the target model has a created_at field * 🎨 we don't have a uuid field anymore, remove the usages - no default uuid creation in models - i am pretty sure we have some more definitions in our tests (for example in the export json files), but that is too much work to delete them all * 🎨 do not parse ID to Number - we had various occurances of parsing all ID's to numbers - we don't need this behaviour anymore - ID is string - i will adapt the ID validation in the next commit * 🎨 change ID regex for validation - we only allow: ID as ObjectId, ID as 1 and ID as me - we need to keep ID 1, because our whole software relies on ID 1 (permissions etc) * 🎨 owner fixture - roles: [4] does not work anymore - 4 means -> static id 4 - this worked in an auto increment system (not even in a system with distributed writes) - with ObjectId we generate each ID automatically (for static and dynamic resources) - it is possible to define all id's for static resources still, but that means we need to know which ID is already used and for consistency we have to define ObjectId's for these static resources - so no static id's anymore, except of: id 1 for owner and id 0 for external usage (because this is required from our permission system) - NOTE: please read through the comment in the user model * 🎨 tests: DataGenerator and test utils First of all: we need to ensure using ObjectId's in the tests. When don't, we can't ensure that ObjectId's work properly. This commit brings lot's of dynamic into all the static defined id's. In one of the next commits, i will adapt all the tests. * 🚨 remove counter in Notification API - no need to add a counter - we simply generate ObjectId's (they are auto incremental as well) - our id validator does only allow ObjectId as id,1 and me * 🎨 extend contextUser in Base Model - remove isNumber check, because id's are no longer numbers, except of id 0/1 - use existing isExternalUser - support id 0/1 as string or number * ✨ Ghost Owner has id 1 - ensure we define this id in the fixtures.json - doesn't matter if number or string * 🎨 functional tests adaptions - use dynamic id's * 🎨 fix unit tests * 🎨 integration tests adaptions * 🎨 change importer utils - all our export examples (test/fixtures/exports) contain id's as numbers - fact: but we ignore them anyway when inserting into the database, see https://github.com/TryGhost/Ghost/blob/master/core/server/data/import/utils.js#L249 - in https://github.com/TryGhost/Ghost/pull/7495/commits/0e6ed957cd54dc02a25cf6fb1ab7d7e723295e2c#diff-70f514a06347c048648be464819503c4L67 i removed parsing id's to integers - i realised that this ^ check just existed, because the userIdToMap was an object key and object keys are always strings! - i think this logic is a little bit complicated, but i don't want to refactor this now - this commit ensures when trying to find the user, the id comparison works again - i've added more documentation to understand this logic ;) - plus i renamed an attribute to improve readability * 🎨 Data-Generator: add more defaults to createUser - if i use the function DataGenerator.forKnex.createUser i would like to get a full set of defaults * 🎨 test utils: change/extend function set for functional tests - functional tests work a bit different - they boot Ghost and seed the database - some functional tests have mis-used the test setup - the test setup needs two sections: integration/unit and functional tests - any functional test is allowed to either add more data or change data in the existing Ghost db - but what it should not do is: add test fixtures like roles or users from our DataGenerator and cross fingers it will work - this commit adds a clean method for functional tests to add extra users * 🎨 functional tests adaptions - use last commit to insert users for functional tests clean - tidy up usage of testUtils.setup or testUtils.doAuth * 🐛 test utils: reset database before init - ensure we don't have any left data from other tests in the database when starting ghost * 🐛 fix test (unrelated to this PR) - fixes a random failure - return statement was missing * 🎨 make changes for invites
2016-11-17 12:09:11 +03:00
if (options.context.user || ghostBookshelf.Model.isExternalUser(options.context.user)) {
return options.context.user;
} else if (options.context.internal) {
return ghostBookshelf.Model.internalUser;
} else if (this.get('id')) {
return this.get('id');
} else if (options.context.external) {
return ghostBookshelf.Model.externalUser;
} else {
throw new common.errors.NotFoundError({
message: common.i18n.t('errors.models.base.index.missingContext'),
level: 'critical'
});
}
},
// format date before writing to DB, bools work
format: function format(attrs) {
return this.fixDatesWhenSave(attrs);
},
// format data and bool when fetching from DB
parse: function parse(attrs) {
return this.fixBools(this.fixDatesWhenFetch(attrs));
},
toJSON: function toJSON(options) {
var attrs = _.extend({}, this.attributes),
self = this;
options = options || {};
options = _.pick(options, ['shallow', 'baseKey', 'include', 'context']);
if (options && options.shallow) {
return attrs;
}
if (options && options.include) {
this.include = _.union(this.include, options.include);
}
_.each(this.relations, function each(relation, key) {
if (key.substring(0, 7) !== '_pivot_') {
// if include is set, expand to full object
var fullKey = _.isEmpty(options.baseKey) ? key : options.baseKey + '.' + key;
if (_.includes(self.include, fullKey)) {
attrs[key] = relation.toJSON(_.extend({}, options, {baseKey: fullKey, include: self.include}));
}
}
});
// @TODO upgrade bookshelf & knex and use serialize & toJSON to do this in a neater way (see #6103)
return proto.finalize.call(this, attrs);
},
// Get attributes that have been updated (values before a .save() call)
updatedAttributes: function updatedAttributes() {
return this._updatedAttributes || {};
},
// Get a specific updated attribute value
updated: function updated(attr) {
return this.updatedAttributes()[attr];
},
/**
* There is difference between `updated` and `previous`:
* Depending on the hook (before or after writing into the db), both fields have a different meaning.
* e.g. onSaving -> before db write (has to use previous)
* onUpdated -> after db write (has to use updated)
*
* hasDateChanged('attr', {beforeWrite: true})
*/
hasDateChanged: function (attr, options) {
options = options || {};
return moment(this.get(attr)).diff(moment(options.beforeWrite ? this.previous(attr) : this.updated(attr))) !== 0;
✨ replace auto increment id's by object id (#7495) * 🛠 bookshelf tarball, bson-objectid * 🎨 schema changes - change increment type to string - add a default fallback for string length 191 (to avoid adding this logic to every single column which uses an ID) - remove uuid, because ID now represents a global resource identifier - keep uuid for post, because we are using this as preview id - keep uuid for clients for now - we are using this param for Ghost-Auth * ✨ base model: generate ObjectId on creating event - each new resource get's a auto generate ObjectId - this logic won't work for attached models, this commit comes later * 🎨 centralised attach method When attaching models there are two things important two know 1. To be able to attach an ObjectId, we need to register the `onCreating` event the fetched model!This is caused by the Bookshelf design in general. On this target model we are attaching the new model. 2. We need to manually fetch the target model, because Bookshelf has a weird behaviour (which is known as a bug, see see https://github.com/tgriesser/bookshelf/issues/629). The most important property when attaching a model is `parentFk`, which is the foreign key. This can be null when fetching the model with the option `withRelated`. To ensure quality and consistency, the custom attach wrapper always fetches the target model manual. By fetching the target model (again) is a little performance decrease, but it also has advantages: we can register the event, and directly unregister the event again. So very clean code. Important: please only use the custom attach wrapper in the future. * 🎨 token model had overriden the onCreating function because of the created_at field - we need to ensure that the base onCreating hook get's triggered for ALL models - if not, they don't get an ObjectId assigned - in this case: be smart and check if the target model has a created_at field * 🎨 we don't have a uuid field anymore, remove the usages - no default uuid creation in models - i am pretty sure we have some more definitions in our tests (for example in the export json files), but that is too much work to delete them all * 🎨 do not parse ID to Number - we had various occurances of parsing all ID's to numbers - we don't need this behaviour anymore - ID is string - i will adapt the ID validation in the next commit * 🎨 change ID regex for validation - we only allow: ID as ObjectId, ID as 1 and ID as me - we need to keep ID 1, because our whole software relies on ID 1 (permissions etc) * 🎨 owner fixture - roles: [4] does not work anymore - 4 means -> static id 4 - this worked in an auto increment system (not even in a system with distributed writes) - with ObjectId we generate each ID automatically (for static and dynamic resources) - it is possible to define all id's for static resources still, but that means we need to know which ID is already used and for consistency we have to define ObjectId's for these static resources - so no static id's anymore, except of: id 1 for owner and id 0 for external usage (because this is required from our permission system) - NOTE: please read through the comment in the user model * 🎨 tests: DataGenerator and test utils First of all: we need to ensure using ObjectId's in the tests. When don't, we can't ensure that ObjectId's work properly. This commit brings lot's of dynamic into all the static defined id's. In one of the next commits, i will adapt all the tests. * 🚨 remove counter in Notification API - no need to add a counter - we simply generate ObjectId's (they are auto incremental as well) - our id validator does only allow ObjectId as id,1 and me * 🎨 extend contextUser in Base Model - remove isNumber check, because id's are no longer numbers, except of id 0/1 - use existing isExternalUser - support id 0/1 as string or number * ✨ Ghost Owner has id 1 - ensure we define this id in the fixtures.json - doesn't matter if number or string * 🎨 functional tests adaptions - use dynamic id's * 🎨 fix unit tests * 🎨 integration tests adaptions * 🎨 change importer utils - all our export examples (test/fixtures/exports) contain id's as numbers - fact: but we ignore them anyway when inserting into the database, see https://github.com/TryGhost/Ghost/blob/master/core/server/data/import/utils.js#L249 - in https://github.com/TryGhost/Ghost/pull/7495/commits/0e6ed957cd54dc02a25cf6fb1ab7d7e723295e2c#diff-70f514a06347c048648be464819503c4L67 i removed parsing id's to integers - i realised that this ^ check just existed, because the userIdToMap was an object key and object keys are always strings! - i think this logic is a little bit complicated, but i don't want to refactor this now - this commit ensures when trying to find the user, the id comparison works again - i've added more documentation to understand this logic ;) - plus i renamed an attribute to improve readability * 🎨 Data-Generator: add more defaults to createUser - if i use the function DataGenerator.forKnex.createUser i would like to get a full set of defaults * 🎨 test utils: change/extend function set for functional tests - functional tests work a bit different - they boot Ghost and seed the database - some functional tests have mis-used the test setup - the test setup needs two sections: integration/unit and functional tests - any functional test is allowed to either add more data or change data in the existing Ghost db - but what it should not do is: add test fixtures like roles or users from our DataGenerator and cross fingers it will work - this commit adds a clean method for functional tests to add extra users * 🎨 functional tests adaptions - use last commit to insert users for functional tests clean - tidy up usage of testUtils.setup or testUtils.doAuth * 🐛 test utils: reset database before init - ensure we don't have any left data from other tests in the database when starting ghost * 🐛 fix test (unrelated to this PR) - fixes a random failure - return statement was missing * 🎨 make changes for invites
2016-11-17 12:09:11 +03:00
},
/**
* we auto generate a GUID for each resource
* no auto increment
*/
setId: function setId() {
this.set('id', ObjectId.generate());
}
}, {
// ## Data Utility Functions
/**
* please use these static definitions when comparing id's
* we keep type Number, because we have too many check's where we rely on Number
✨ replace auto increment id's by object id (#7495) * 🛠 bookshelf tarball, bson-objectid * 🎨 schema changes - change increment type to string - add a default fallback for string length 191 (to avoid adding this logic to every single column which uses an ID) - remove uuid, because ID now represents a global resource identifier - keep uuid for post, because we are using this as preview id - keep uuid for clients for now - we are using this param for Ghost-Auth * ✨ base model: generate ObjectId on creating event - each new resource get's a auto generate ObjectId - this logic won't work for attached models, this commit comes later * 🎨 centralised attach method When attaching models there are two things important two know 1. To be able to attach an ObjectId, we need to register the `onCreating` event the fetched model!This is caused by the Bookshelf design in general. On this target model we are attaching the new model. 2. We need to manually fetch the target model, because Bookshelf has a weird behaviour (which is known as a bug, see see https://github.com/tgriesser/bookshelf/issues/629). The most important property when attaching a model is `parentFk`, which is the foreign key. This can be null when fetching the model with the option `withRelated`. To ensure quality and consistency, the custom attach wrapper always fetches the target model manual. By fetching the target model (again) is a little performance decrease, but it also has advantages: we can register the event, and directly unregister the event again. So very clean code. Important: please only use the custom attach wrapper in the future. * 🎨 token model had overriden the onCreating function because of the created_at field - we need to ensure that the base onCreating hook get's triggered for ALL models - if not, they don't get an ObjectId assigned - in this case: be smart and check if the target model has a created_at field * 🎨 we don't have a uuid field anymore, remove the usages - no default uuid creation in models - i am pretty sure we have some more definitions in our tests (for example in the export json files), but that is too much work to delete them all * 🎨 do not parse ID to Number - we had various occurances of parsing all ID's to numbers - we don't need this behaviour anymore - ID is string - i will adapt the ID validation in the next commit * 🎨 change ID regex for validation - we only allow: ID as ObjectId, ID as 1 and ID as me - we need to keep ID 1, because our whole software relies on ID 1 (permissions etc) * 🎨 owner fixture - roles: [4] does not work anymore - 4 means -> static id 4 - this worked in an auto increment system (not even in a system with distributed writes) - with ObjectId we generate each ID automatically (for static and dynamic resources) - it is possible to define all id's for static resources still, but that means we need to know which ID is already used and for consistency we have to define ObjectId's for these static resources - so no static id's anymore, except of: id 1 for owner and id 0 for external usage (because this is required from our permission system) - NOTE: please read through the comment in the user model * 🎨 tests: DataGenerator and test utils First of all: we need to ensure using ObjectId's in the tests. When don't, we can't ensure that ObjectId's work properly. This commit brings lot's of dynamic into all the static defined id's. In one of the next commits, i will adapt all the tests. * 🚨 remove counter in Notification API - no need to add a counter - we simply generate ObjectId's (they are auto incremental as well) - our id validator does only allow ObjectId as id,1 and me * 🎨 extend contextUser in Base Model - remove isNumber check, because id's are no longer numbers, except of id 0/1 - use existing isExternalUser - support id 0/1 as string or number * ✨ Ghost Owner has id 1 - ensure we define this id in the fixtures.json - doesn't matter if number or string * 🎨 functional tests adaptions - use dynamic id's * 🎨 fix unit tests * 🎨 integration tests adaptions * 🎨 change importer utils - all our export examples (test/fixtures/exports) contain id's as numbers - fact: but we ignore them anyway when inserting into the database, see https://github.com/TryGhost/Ghost/blob/master/core/server/data/import/utils.js#L249 - in https://github.com/TryGhost/Ghost/pull/7495/commits/0e6ed957cd54dc02a25cf6fb1ab7d7e723295e2c#diff-70f514a06347c048648be464819503c4L67 i removed parsing id's to integers - i realised that this ^ check just existed, because the userIdToMap was an object key and object keys are always strings! - i think this logic is a little bit complicated, but i don't want to refactor this now - this commit ensures when trying to find the user, the id comparison works again - i've added more documentation to understand this logic ;) - plus i renamed an attribute to improve readability * 🎨 Data-Generator: add more defaults to createUser - if i use the function DataGenerator.forKnex.createUser i would like to get a full set of defaults * 🎨 test utils: change/extend function set for functional tests - functional tests work a bit different - they boot Ghost and seed the database - some functional tests have mis-used the test setup - the test setup needs two sections: integration/unit and functional tests - any functional test is allowed to either add more data or change data in the existing Ghost db - but what it should not do is: add test fixtures like roles or users from our DataGenerator and cross fingers it will work - this commit adds a clean method for functional tests to add extra users * 🎨 functional tests adaptions - use last commit to insert users for functional tests clean - tidy up usage of testUtils.setup or testUtils.doAuth * 🐛 test utils: reset database before init - ensure we don't have any left data from other tests in the database when starting ghost * 🐛 fix test (unrelated to this PR) - fixes a random failure - return statement was missing * 🎨 make changes for invites
2016-11-17 12:09:11 +03:00
* context.user ? true : false (if context.user is 0 as number, this condition is false)
*/
internalUser: 1,
externalUser: 0,
isInternalUser: function isInternalUser(id) {
✨ replace auto increment id's by object id (#7495) * 🛠 bookshelf tarball, bson-objectid * 🎨 schema changes - change increment type to string - add a default fallback for string length 191 (to avoid adding this logic to every single column which uses an ID) - remove uuid, because ID now represents a global resource identifier - keep uuid for post, because we are using this as preview id - keep uuid for clients for now - we are using this param for Ghost-Auth * ✨ base model: generate ObjectId on creating event - each new resource get's a auto generate ObjectId - this logic won't work for attached models, this commit comes later * 🎨 centralised attach method When attaching models there are two things important two know 1. To be able to attach an ObjectId, we need to register the `onCreating` event the fetched model!This is caused by the Bookshelf design in general. On this target model we are attaching the new model. 2. We need to manually fetch the target model, because Bookshelf has a weird behaviour (which is known as a bug, see see https://github.com/tgriesser/bookshelf/issues/629). The most important property when attaching a model is `parentFk`, which is the foreign key. This can be null when fetching the model with the option `withRelated`. To ensure quality and consistency, the custom attach wrapper always fetches the target model manual. By fetching the target model (again) is a little performance decrease, but it also has advantages: we can register the event, and directly unregister the event again. So very clean code. Important: please only use the custom attach wrapper in the future. * 🎨 token model had overriden the onCreating function because of the created_at field - we need to ensure that the base onCreating hook get's triggered for ALL models - if not, they don't get an ObjectId assigned - in this case: be smart and check if the target model has a created_at field * 🎨 we don't have a uuid field anymore, remove the usages - no default uuid creation in models - i am pretty sure we have some more definitions in our tests (for example in the export json files), but that is too much work to delete them all * 🎨 do not parse ID to Number - we had various occurances of parsing all ID's to numbers - we don't need this behaviour anymore - ID is string - i will adapt the ID validation in the next commit * 🎨 change ID regex for validation - we only allow: ID as ObjectId, ID as 1 and ID as me - we need to keep ID 1, because our whole software relies on ID 1 (permissions etc) * 🎨 owner fixture - roles: [4] does not work anymore - 4 means -> static id 4 - this worked in an auto increment system (not even in a system with distributed writes) - with ObjectId we generate each ID automatically (for static and dynamic resources) - it is possible to define all id's for static resources still, but that means we need to know which ID is already used and for consistency we have to define ObjectId's for these static resources - so no static id's anymore, except of: id 1 for owner and id 0 for external usage (because this is required from our permission system) - NOTE: please read through the comment in the user model * 🎨 tests: DataGenerator and test utils First of all: we need to ensure using ObjectId's in the tests. When don't, we can't ensure that ObjectId's work properly. This commit brings lot's of dynamic into all the static defined id's. In one of the next commits, i will adapt all the tests. * 🚨 remove counter in Notification API - no need to add a counter - we simply generate ObjectId's (they are auto incremental as well) - our id validator does only allow ObjectId as id,1 and me * 🎨 extend contextUser in Base Model - remove isNumber check, because id's are no longer numbers, except of id 0/1 - use existing isExternalUser - support id 0/1 as string or number * ✨ Ghost Owner has id 1 - ensure we define this id in the fixtures.json - doesn't matter if number or string * 🎨 functional tests adaptions - use dynamic id's * 🎨 fix unit tests * 🎨 integration tests adaptions * 🎨 change importer utils - all our export examples (test/fixtures/exports) contain id's as numbers - fact: but we ignore them anyway when inserting into the database, see https://github.com/TryGhost/Ghost/blob/master/core/server/data/import/utils.js#L249 - in https://github.com/TryGhost/Ghost/pull/7495/commits/0e6ed957cd54dc02a25cf6fb1ab7d7e723295e2c#diff-70f514a06347c048648be464819503c4L67 i removed parsing id's to integers - i realised that this ^ check just existed, because the userIdToMap was an object key and object keys are always strings! - i think this logic is a little bit complicated, but i don't want to refactor this now - this commit ensures when trying to find the user, the id comparison works again - i've added more documentation to understand this logic ;) - plus i renamed an attribute to improve readability * 🎨 Data-Generator: add more defaults to createUser - if i use the function DataGenerator.forKnex.createUser i would like to get a full set of defaults * 🎨 test utils: change/extend function set for functional tests - functional tests work a bit different - they boot Ghost and seed the database - some functional tests have mis-used the test setup - the test setup needs two sections: integration/unit and functional tests - any functional test is allowed to either add more data or change data in the existing Ghost db - but what it should not do is: add test fixtures like roles or users from our DataGenerator and cross fingers it will work - this commit adds a clean method for functional tests to add extra users * 🎨 functional tests adaptions - use last commit to insert users for functional tests clean - tidy up usage of testUtils.setup or testUtils.doAuth * 🐛 test utils: reset database before init - ensure we don't have any left data from other tests in the database when starting ghost * 🐛 fix test (unrelated to this PR) - fixes a random failure - return statement was missing * 🎨 make changes for invites
2016-11-17 12:09:11 +03:00
return id === ghostBookshelf.Model.internalUser || id === ghostBookshelf.Model.internalUser.toString();
},
isExternalUser: function isExternalUser(id) {
✨ replace auto increment id's by object id (#7495) * 🛠 bookshelf tarball, bson-objectid * 🎨 schema changes - change increment type to string - add a default fallback for string length 191 (to avoid adding this logic to every single column which uses an ID) - remove uuid, because ID now represents a global resource identifier - keep uuid for post, because we are using this as preview id - keep uuid for clients for now - we are using this param for Ghost-Auth * ✨ base model: generate ObjectId on creating event - each new resource get's a auto generate ObjectId - this logic won't work for attached models, this commit comes later * 🎨 centralised attach method When attaching models there are two things important two know 1. To be able to attach an ObjectId, we need to register the `onCreating` event the fetched model!This is caused by the Bookshelf design in general. On this target model we are attaching the new model. 2. We need to manually fetch the target model, because Bookshelf has a weird behaviour (which is known as a bug, see see https://github.com/tgriesser/bookshelf/issues/629). The most important property when attaching a model is `parentFk`, which is the foreign key. This can be null when fetching the model with the option `withRelated`. To ensure quality and consistency, the custom attach wrapper always fetches the target model manual. By fetching the target model (again) is a little performance decrease, but it also has advantages: we can register the event, and directly unregister the event again. So very clean code. Important: please only use the custom attach wrapper in the future. * 🎨 token model had overriden the onCreating function because of the created_at field - we need to ensure that the base onCreating hook get's triggered for ALL models - if not, they don't get an ObjectId assigned - in this case: be smart and check if the target model has a created_at field * 🎨 we don't have a uuid field anymore, remove the usages - no default uuid creation in models - i am pretty sure we have some more definitions in our tests (for example in the export json files), but that is too much work to delete them all * 🎨 do not parse ID to Number - we had various occurances of parsing all ID's to numbers - we don't need this behaviour anymore - ID is string - i will adapt the ID validation in the next commit * 🎨 change ID regex for validation - we only allow: ID as ObjectId, ID as 1 and ID as me - we need to keep ID 1, because our whole software relies on ID 1 (permissions etc) * 🎨 owner fixture - roles: [4] does not work anymore - 4 means -> static id 4 - this worked in an auto increment system (not even in a system with distributed writes) - with ObjectId we generate each ID automatically (for static and dynamic resources) - it is possible to define all id's for static resources still, but that means we need to know which ID is already used and for consistency we have to define ObjectId's for these static resources - so no static id's anymore, except of: id 1 for owner and id 0 for external usage (because this is required from our permission system) - NOTE: please read through the comment in the user model * 🎨 tests: DataGenerator and test utils First of all: we need to ensure using ObjectId's in the tests. When don't, we can't ensure that ObjectId's work properly. This commit brings lot's of dynamic into all the static defined id's. In one of the next commits, i will adapt all the tests. * 🚨 remove counter in Notification API - no need to add a counter - we simply generate ObjectId's (they are auto incremental as well) - our id validator does only allow ObjectId as id,1 and me * 🎨 extend contextUser in Base Model - remove isNumber check, because id's are no longer numbers, except of id 0/1 - use existing isExternalUser - support id 0/1 as string or number * ✨ Ghost Owner has id 1 - ensure we define this id in the fixtures.json - doesn't matter if number or string * 🎨 functional tests adaptions - use dynamic id's * 🎨 fix unit tests * 🎨 integration tests adaptions * 🎨 change importer utils - all our export examples (test/fixtures/exports) contain id's as numbers - fact: but we ignore them anyway when inserting into the database, see https://github.com/TryGhost/Ghost/blob/master/core/server/data/import/utils.js#L249 - in https://github.com/TryGhost/Ghost/pull/7495/commits/0e6ed957cd54dc02a25cf6fb1ab7d7e723295e2c#diff-70f514a06347c048648be464819503c4L67 i removed parsing id's to integers - i realised that this ^ check just existed, because the userIdToMap was an object key and object keys are always strings! - i think this logic is a little bit complicated, but i don't want to refactor this now - this commit ensures when trying to find the user, the id comparison works again - i've added more documentation to understand this logic ;) - plus i renamed an attribute to improve readability * 🎨 Data-Generator: add more defaults to createUser - if i use the function DataGenerator.forKnex.createUser i would like to get a full set of defaults * 🎨 test utils: change/extend function set for functional tests - functional tests work a bit different - they boot Ghost and seed the database - some functional tests have mis-used the test setup - the test setup needs two sections: integration/unit and functional tests - any functional test is allowed to either add more data or change data in the existing Ghost db - but what it should not do is: add test fixtures like roles or users from our DataGenerator and cross fingers it will work - this commit adds a clean method for functional tests to add extra users * 🎨 functional tests adaptions - use last commit to insert users for functional tests clean - tidy up usage of testUtils.setup or testUtils.doAuth * 🐛 test utils: reset database before init - ensure we don't have any left data from other tests in the database when starting ghost * 🐛 fix test (unrelated to this PR) - fixes a random failure - return statement was missing * 🎨 make changes for invites
2016-11-17 12:09:11 +03:00
return id === ghostBookshelf.Model.externalUser || id === ghostBookshelf.Model.externalUser.toString();
},
/**
* Returns an array of keys permitted in every method's `options` hash.
* Can be overridden and added to by a model's `permittedOptions` method.
*
* importing: is used when import a JSON file or when migrating the database
*
* @return {Object} Keys allowed in the `options` hash of every model's method.
*/
permittedOptions: function permittedOptions() {
// terms to whitelist for all methods.
return ['context', 'include', 'transacting', 'importing', 'forUpdate'];
},
/**
* Filters potentially unsafe model attributes, so you can pass them to Bookshelf / Knex.
* This filter should be called before each insert/update operation.
*
* @param {Object} data Has keys representing the model's attributes/fields in the database.
* @return {Object} The filtered results of the passed in data, containing only what's allowed in the schema.
*/
filterData: function filterData(data) {
var permittedAttributes = this.prototype.permittedAttributes(),
filteredData = _.pick(data, permittedAttributes),
sanitizedData = this.sanitizeData(filteredData);
return sanitizedData;
},
/**
* `sanitizeData` ensures that client data is in the correct format for further operations.
*
* Dates:
* - client dates are sent as ISO 8601 format (moment(..).format())
* - server dates are in JS Date format
* >> when bookshelf fetches data from the database, all dates are in JS Dates
* >> see `parse`
* - Bookshelf updates the model with the new client data via the `set` function
* - Bookshelf uses a simple `isEqual` function from lodash to detect real changes
* - .previous(attr) and .get(attr) returns false obviously
* - internally we use our `hasDateChanged` if we have to compare previous/updated dates
* - but Bookshelf is not in our control for this case
*
* @IMPORTANT
* Before the new client data get's inserted again, the dates get's retransformed into
* proper strings, see `format`.
*/
sanitizeData: function sanitizeData(data) {
var tableName = _.result(this.prototype, 'tableName'), dateMoment;
_.each(data, function (value, key) {
if (value !== null
&& schema.tables[tableName].hasOwnProperty(key)
&& schema.tables[tableName][key].type === 'dateTime'
&& typeof value === 'string'
) {
dateMoment = moment(value);
// CASE: client sends `0000-00-00 00:00:00`
if (!dateMoment.isValid()) {
throw new common.errors.ValidationError({
message: common.i18n.t('errors.models.base.invalidDate', {key: key})
});
}
data[key] = dateMoment.toDate();
}
});
return data;
},
/**
* Filters potentially unsafe `options` in a model method's arguments, so you can pass them to Bookshelf / Knex.
* @param {Object} options Represents options to filter in order to be passed to the Bookshelf query.
* @param {String} methodName The name of the method to check valid options for.
* @return {Object} The filtered results of `options`.
✨ post update collision detection (#8328) (#8362) closes #5599 If two users edit the same post, it can happen that they override each others content or post settings. With this change this won't happen anymore. ✨ Update collision for posts - add a new bookshelf plugin to detect these changes - use the `changed` object of bookshelf -> we don't have to create our own diff - compare client and server updated_at field - run editing posts in a transaction (see comments in code base) 🙀 update collision for tags - `updateTags` for adding posts on `onCreated` - happens after the post was inserted --> it's "okay" to attach the tags afterwards on insert --> there is no need to add collision for inserting data --> it's very hard to move the updateTags call to `onCreating`, because the `updateTags` function queries the database to look up the affected post - `updateTags` while editing posts on `onSaving` - all operations run in a transactions and are rolled back if something get's rejected - Post model edit: if we push a transaction from outside, take this one ✨ introduce options.forUpdate - if two queries happening in a transaction we have to signalise knex/mysql that we select for an update - otherwise the following case happens: >> you fetch posts for an update >> a user requests comes in and updates the post (e.g. sets title to "X") >> you update the fetched posts, title would get overriden to the old one use options.forUpdate and protect internal post updates: model listeners - use a transaction for listener updates - signalise forUpdate - write a complex test use options.forUpdate and protect internal post updates: scheduling - publish endpoint runs in a transaction - add complex test - @TODO: right now scheduling api uses posts api, therefor we had to extend the options for api's >> allowed to pass transactions through it >> but these are only allowed if defined from outside {opts: [...]} >> so i think this is fine and not dirty >> will wait for opinions >> alternatively we have to re-write the scheduling endpoint to use the models directly
2017-04-19 16:53:23 +03:00
*/
filterOptions: function filterOptions(options, methodName) {
var permittedOptions = this.permittedOptions(methodName, options),
filteredOptions = _.pick(options, permittedOptions);
return filteredOptions;
},
// ## Model Data Functions
/**
* ### Find All
* Fetches all the data for a particular model
* @param {Object} options (optional)
* @return {Promise(ghostBookshelf.Collection)} Collection of all Models
*/
findAll: function findAll(options) {
options = this.filterOptions(options, 'findAll');
options.withRelated = _.union(options.withRelated, options.include);
var itemCollection = this.forge(null, {context: options.context});
// transforms fictive keywords like 'all' (status:all) into correct allowed values
if (this.processOptions) {
this.processOptions(options);
}
itemCollection.applyDefaultAndCustomFilters(options);
return itemCollection.fetchAll(options).then(function then(result) {
if (options.include) {
_.each(result.models, function each(item) {
item.include = options.include;
});
}
✨ migrations: seeding is part of init db task (#7545) * 🎨 move heart of fixtures to schema folder and change user model - add fixtures.json to schema folder - add fixture utils to schema folder - keep all the logic! --> FIXTURE.JSON - add owner user with roles --> USER MODEL - add password as default - findAll: allow querying inactive users when internal context (defaultFilters) - findOne: do not remove values from original object! - add: do not remove values from original object! * 🔥 remove migrations key from default_settings.json - this was a temporary invention for an older migration script - sephiroth keep alls needed information in a migration collection * 🔥 add code property to errors - add code property to errors - IMPORTANT: please share your opinion about that - this is a copy paste behaviour of how node is doing that (errno, code etc.) - so code specifies a GhostError * 🎨 change error handling in versioning - no need to throw specific database errors anymore (this was just a temporary solution) - now: we are throwing real DatabaseVersionErrors - specified by a code - background: the versioning unit has not idea about seeding and population of the database - it just throws what it knows --> database version does not exist or settings table does not exist * 🎨 sephiroth optimisations - added getPath function to get the path to init scripts and migration scripts - migrationPath is still hardcoded (see TODO) - tidy up database naming to transacting * ✨ migration init scripts are now complete - 1. add tables - 2. add fixtures - 3. add default settings * 🎨 important: make bootup script smaller! - remove all TODO'S except of one - no seeding logic in bootup script anymore 🕵🏻 * ✨ sephiroth: allow params for init command - param: skip (do not run this script) - param: only (only run this script) - very simple way * 🎨 adapt tests and test env - do not use migrate.populate anymore - use sephiroth instead - jscs/jshint * 🎨 fix User model status checks
2016-10-12 18:18:57 +03:00
return result;
});
},
/**
* ### Find Page
* Find results by page - returns an object containing the
* information about the request (page, limit), along with the
* info needed for pagination (pages, total).
*
* **response:**
*
* {
* posts: [
* {...}, ...
* ],
* page: __,
* limit: __,
* pages: __,
* total: __
* }
*
* @param {Object} options
*/
findPage: function findPage(options) {
options = options || {};
✨ post update collision detection (#8328) (#8362) closes #5599 If two users edit the same post, it can happen that they override each others content or post settings. With this change this won't happen anymore. ✨ Update collision for posts - add a new bookshelf plugin to detect these changes - use the `changed` object of bookshelf -> we don't have to create our own diff - compare client and server updated_at field - run editing posts in a transaction (see comments in code base) 🙀 update collision for tags - `updateTags` for adding posts on `onCreated` - happens after the post was inserted --> it's "okay" to attach the tags afterwards on insert --> there is no need to add collision for inserting data --> it's very hard to move the updateTags call to `onCreating`, because the `updateTags` function queries the database to look up the affected post - `updateTags` while editing posts on `onSaving` - all operations run in a transactions and are rolled back if something get's rejected - Post model edit: if we push a transaction from outside, take this one ✨ introduce options.forUpdate - if two queries happening in a transaction we have to signalise knex/mysql that we select for an update - otherwise the following case happens: >> you fetch posts for an update >> a user requests comes in and updates the post (e.g. sets title to "X") >> you update the fetched posts, title would get overriden to the old one use options.forUpdate and protect internal post updates: model listeners - use a transaction for listener updates - signalise forUpdate - write a complex test use options.forUpdate and protect internal post updates: scheduling - publish endpoint runs in a transaction - add complex test - @TODO: right now scheduling api uses posts api, therefor we had to extend the options for api's >> allowed to pass transactions through it >> but these are only allowed if defined from outside {opts: [...]} >> so i think this is fine and not dirty >> will wait for opinions >> alternatively we have to re-write the scheduling endpoint to use the models directly
2017-04-19 16:53:23 +03:00
var self = this,
itemCollection = this.forge(null, {context: options.context}),
tableName = _.result(this.prototype, 'tableName'),
requestedColumns = options.columns;
// Set this to true or pass ?debug=true as an API option to get output
itemCollection.debug = options.debug && config.get('env') !== 'production';
// Filter options so that only permitted ones remain
options = this.filterOptions(options, 'findPage');
// This applies default properties like 'staticPages' and 'status'
// And then converts them to 'where' options... this behaviour is effectively deprecated in favour
// of using filter - it's only be being kept here so that we can transition cleanly.
this.processOptions(options);
// Add Filter behaviour
itemCollection.applyDefaultAndCustomFilters(options);
// Handle related objects
// TODO: this should just be done for all methods @ the API level
options.withRelated = _.union(options.withRelated, options.include);
// Ensure only valid fields/columns are added to query
// and append default columns to fetch
if (options.columns) {
options.columns = _.intersection(options.columns, this.prototype.permittedAttributes());
options.columns = _.union(options.columns, this.prototype.defaultColumnsToFetch());
}
if (options.order) {
options.order = self.parseOrderOption(options.order, options.include);
} else if (self.orderDefaultRaw) {
options.orderRaw = self.orderDefaultRaw();
} else {
options.order = self.orderDefaultOptions();
}
return itemCollection.fetchPage(options).then(function formatResponse(response) {
✨ post update collision detection (#8328) (#8362) closes #5599 If two users edit the same post, it can happen that they override each others content or post settings. With this change this won't happen anymore. ✨ Update collision for posts - add a new bookshelf plugin to detect these changes - use the `changed` object of bookshelf -> we don't have to create our own diff - compare client and server updated_at field - run editing posts in a transaction (see comments in code base) 🙀 update collision for tags - `updateTags` for adding posts on `onCreated` - happens after the post was inserted --> it's "okay" to attach the tags afterwards on insert --> there is no need to add collision for inserting data --> it's very hard to move the updateTags call to `onCreating`, because the `updateTags` function queries the database to look up the affected post - `updateTags` while editing posts on `onSaving` - all operations run in a transactions and are rolled back if something get's rejected - Post model edit: if we push a transaction from outside, take this one ✨ introduce options.forUpdate - if two queries happening in a transaction we have to signalise knex/mysql that we select for an update - otherwise the following case happens: >> you fetch posts for an update >> a user requests comes in and updates the post (e.g. sets title to "X") >> you update the fetched posts, title would get overriden to the old one use options.forUpdate and protect internal post updates: model listeners - use a transaction for listener updates - signalise forUpdate - write a complex test use options.forUpdate and protect internal post updates: scheduling - publish endpoint runs in a transaction - add complex test - @TODO: right now scheduling api uses posts api, therefor we had to extend the options for api's >> allowed to pass transactions through it >> but these are only allowed if defined from outside {opts: [...]} >> so i think this is fine and not dirty >> will wait for opinions >> alternatively we have to re-write the scheduling endpoint to use the models directly
2017-04-19 16:53:23 +03:00
var data = {},
models = [];
options.columns = requestedColumns;
models = response.collection.toJSON(options);
// re-add any computed properties that were stripped out before the call to fetchPage
// pick only requested before returning JSON
data[tableName] = _.map(models, function transform(model) {
return options.columns ? _.pick(model, options.columns) : model;
});
data.meta = {pagination: response.pagination};
return data;
});
},
/**
* ### Find One
* Naive find one where data determines what to match on
* @param {Object} data
* @param {Object} options (optional)
* @return {Promise(ghostBookshelf.Model)} Single Model
*/
findOne: function findOne(data, options) {
data = this.filterData(data);
options = this.filterOptions(options, 'findOne');
// We pass include to forge so that toJSON has access
return this.forge(data, {include: options.include}).fetch(options);
},
/**
* ### Edit
* Naive edit
✨ post update collision detection (#8328) (#8362) closes #5599 If two users edit the same post, it can happen that they override each others content or post settings. With this change this won't happen anymore. ✨ Update collision for posts - add a new bookshelf plugin to detect these changes - use the `changed` object of bookshelf -> we don't have to create our own diff - compare client and server updated_at field - run editing posts in a transaction (see comments in code base) 🙀 update collision for tags - `updateTags` for adding posts on `onCreated` - happens after the post was inserted --> it's "okay" to attach the tags afterwards on insert --> there is no need to add collision for inserting data --> it's very hard to move the updateTags call to `onCreating`, because the `updateTags` function queries the database to look up the affected post - `updateTags` while editing posts on `onSaving` - all operations run in a transactions and are rolled back if something get's rejected - Post model edit: if we push a transaction from outside, take this one ✨ introduce options.forUpdate - if two queries happening in a transaction we have to signalise knex/mysql that we select for an update - otherwise the following case happens: >> you fetch posts for an update >> a user requests comes in and updates the post (e.g. sets title to "X") >> you update the fetched posts, title would get overriden to the old one use options.forUpdate and protect internal post updates: model listeners - use a transaction for listener updates - signalise forUpdate - write a complex test use options.forUpdate and protect internal post updates: scheduling - publish endpoint runs in a transaction - add complex test - @TODO: right now scheduling api uses posts api, therefor we had to extend the options for api's >> allowed to pass transactions through it >> but these are only allowed if defined from outside {opts: [...]} >> so i think this is fine and not dirty >> will wait for opinions >> alternatively we have to re-write the scheduling endpoint to use the models directly
2017-04-19 16:53:23 +03:00
*
* We always forward the `method` option to Bookshelf, see http://bookshelfjs.org/#Model-instance-save.
* Based on the `method` option Bookshelf and Ghost can determine if a query is an insert or an update.
*
* @param {Object} data
* @param {Object} options (optional)
* @return {Promise(ghostBookshelf.Model)} Edited Model
*/
edit: function edit(data, options) {
var id = options.id,
model = this.forge({id: id});
data = this.filterData(data);
options = this.filterOptions(options, 'edit');
// We allow you to disable timestamps when run migration, so that the posts `updated_at` value is the same
if (options.importing) {
model.hasTimestamps = false;
}
return model.fetch(options).then(function then(object) {
if (object) {
✨ post update collision detection (#8328) (#8362) closes #5599 If two users edit the same post, it can happen that they override each others content or post settings. With this change this won't happen anymore. ✨ Update collision for posts - add a new bookshelf plugin to detect these changes - use the `changed` object of bookshelf -> we don't have to create our own diff - compare client and server updated_at field - run editing posts in a transaction (see comments in code base) 🙀 update collision for tags - `updateTags` for adding posts on `onCreated` - happens after the post was inserted --> it's "okay" to attach the tags afterwards on insert --> there is no need to add collision for inserting data --> it's very hard to move the updateTags call to `onCreating`, because the `updateTags` function queries the database to look up the affected post - `updateTags` while editing posts on `onSaving` - all operations run in a transactions and are rolled back if something get's rejected - Post model edit: if we push a transaction from outside, take this one ✨ introduce options.forUpdate - if two queries happening in a transaction we have to signalise knex/mysql that we select for an update - otherwise the following case happens: >> you fetch posts for an update >> a user requests comes in and updates the post (e.g. sets title to "X") >> you update the fetched posts, title would get overriden to the old one use options.forUpdate and protect internal post updates: model listeners - use a transaction for listener updates - signalise forUpdate - write a complex test use options.forUpdate and protect internal post updates: scheduling - publish endpoint runs in a transaction - add complex test - @TODO: right now scheduling api uses posts api, therefor we had to extend the options for api's >> allowed to pass transactions through it >> but these are only allowed if defined from outside {opts: [...]} >> so i think this is fine and not dirty >> will wait for opinions >> alternatively we have to re-write the scheduling endpoint to use the models directly
2017-04-19 16:53:23 +03:00
return object.save(data, _.merge({method: 'update'}, options));
}
});
},
/**
* ### Add
* Naive add
* @param {Object} data
* @param {Object} options (optional)
* @return {Promise(ghostBookshelf.Model)} Newly Added Model
*/
add: function add(data, options) {
data = this.filterData(data);
options = this.filterOptions(options, 'add');
var model = this.forge(data);
// We allow you to disable timestamps when importing posts so that the new posts `updated_at` value is the same
// as the import json blob. More details refer to https://github.com/TryGhost/Ghost/issues/1696
if (options.importing) {
model.hasTimestamps = false;
}
✨ replace auto increment id's by object id (#7495) * 🛠 bookshelf tarball, bson-objectid * 🎨 schema changes - change increment type to string - add a default fallback for string length 191 (to avoid adding this logic to every single column which uses an ID) - remove uuid, because ID now represents a global resource identifier - keep uuid for post, because we are using this as preview id - keep uuid for clients for now - we are using this param for Ghost-Auth * ✨ base model: generate ObjectId on creating event - each new resource get's a auto generate ObjectId - this logic won't work for attached models, this commit comes later * 🎨 centralised attach method When attaching models there are two things important two know 1. To be able to attach an ObjectId, we need to register the `onCreating` event the fetched model!This is caused by the Bookshelf design in general. On this target model we are attaching the new model. 2. We need to manually fetch the target model, because Bookshelf has a weird behaviour (which is known as a bug, see see https://github.com/tgriesser/bookshelf/issues/629). The most important property when attaching a model is `parentFk`, which is the foreign key. This can be null when fetching the model with the option `withRelated`. To ensure quality and consistency, the custom attach wrapper always fetches the target model manual. By fetching the target model (again) is a little performance decrease, but it also has advantages: we can register the event, and directly unregister the event again. So very clean code. Important: please only use the custom attach wrapper in the future. * 🎨 token model had overriden the onCreating function because of the created_at field - we need to ensure that the base onCreating hook get's triggered for ALL models - if not, they don't get an ObjectId assigned - in this case: be smart and check if the target model has a created_at field * 🎨 we don't have a uuid field anymore, remove the usages - no default uuid creation in models - i am pretty sure we have some more definitions in our tests (for example in the export json files), but that is too much work to delete them all * 🎨 do not parse ID to Number - we had various occurances of parsing all ID's to numbers - we don't need this behaviour anymore - ID is string - i will adapt the ID validation in the next commit * 🎨 change ID regex for validation - we only allow: ID as ObjectId, ID as 1 and ID as me - we need to keep ID 1, because our whole software relies on ID 1 (permissions etc) * 🎨 owner fixture - roles: [4] does not work anymore - 4 means -> static id 4 - this worked in an auto increment system (not even in a system with distributed writes) - with ObjectId we generate each ID automatically (for static and dynamic resources) - it is possible to define all id's for static resources still, but that means we need to know which ID is already used and for consistency we have to define ObjectId's for these static resources - so no static id's anymore, except of: id 1 for owner and id 0 for external usage (because this is required from our permission system) - NOTE: please read through the comment in the user model * 🎨 tests: DataGenerator and test utils First of all: we need to ensure using ObjectId's in the tests. When don't, we can't ensure that ObjectId's work properly. This commit brings lot's of dynamic into all the static defined id's. In one of the next commits, i will adapt all the tests. * 🚨 remove counter in Notification API - no need to add a counter - we simply generate ObjectId's (they are auto incremental as well) - our id validator does only allow ObjectId as id,1 and me * 🎨 extend contextUser in Base Model - remove isNumber check, because id's are no longer numbers, except of id 0/1 - use existing isExternalUser - support id 0/1 as string or number * ✨ Ghost Owner has id 1 - ensure we define this id in the fixtures.json - doesn't matter if number or string * 🎨 functional tests adaptions - use dynamic id's * 🎨 fix unit tests * 🎨 integration tests adaptions * 🎨 change importer utils - all our export examples (test/fixtures/exports) contain id's as numbers - fact: but we ignore them anyway when inserting into the database, see https://github.com/TryGhost/Ghost/blob/master/core/server/data/import/utils.js#L249 - in https://github.com/TryGhost/Ghost/pull/7495/commits/0e6ed957cd54dc02a25cf6fb1ab7d7e723295e2c#diff-70f514a06347c048648be464819503c4L67 i removed parsing id's to integers - i realised that this ^ check just existed, because the userIdToMap was an object key and object keys are always strings! - i think this logic is a little bit complicated, but i don't want to refactor this now - this commit ensures when trying to find the user, the id comparison works again - i've added more documentation to understand this logic ;) - plus i renamed an attribute to improve readability * 🎨 Data-Generator: add more defaults to createUser - if i use the function DataGenerator.forKnex.createUser i would like to get a full set of defaults * 🎨 test utils: change/extend function set for functional tests - functional tests work a bit different - they boot Ghost and seed the database - some functional tests have mis-used the test setup - the test setup needs two sections: integration/unit and functional tests - any functional test is allowed to either add more data or change data in the existing Ghost db - but what it should not do is: add test fixtures like roles or users from our DataGenerator and cross fingers it will work - this commit adds a clean method for functional tests to add extra users * 🎨 functional tests adaptions - use last commit to insert users for functional tests clean - tidy up usage of testUtils.setup or testUtils.doAuth * 🐛 test utils: reset database before init - ensure we don't have any left data from other tests in the database when starting ghost * 🐛 fix test (unrelated to this PR) - fixes a random failure - return statement was missing * 🎨 make changes for invites
2016-11-17 12:09:11 +03:00
// Bookshelf determines whether an operation is an update or an insert based on the id
// Ghost auto-generates Object id's, so we need to tell Bookshelf here that we are inserting data
options.method = 'insert';
return model.save(null, options);
},
/**
* ### Destroy
* Naive destroy
* @param {Object} options (optional)
* @return {Promise(ghostBookshelf.Model)} Empty Model
*/
destroy: function destroy(options) {
var id = options.id;
options = this.filterOptions(options, 'destroy');
// Fetch the object before destroying it, so that the changed data is available to events
return this.forge({id: id}).fetch(options).then(function then(obj) {
return obj.destroy(options);
});
},
/**
✨ post update collision detection (#8328) (#8362) closes #5599 If two users edit the same post, it can happen that they override each others content or post settings. With this change this won't happen anymore. ✨ Update collision for posts - add a new bookshelf plugin to detect these changes - use the `changed` object of bookshelf -> we don't have to create our own diff - compare client and server updated_at field - run editing posts in a transaction (see comments in code base) 🙀 update collision for tags - `updateTags` for adding posts on `onCreated` - happens after the post was inserted --> it's "okay" to attach the tags afterwards on insert --> there is no need to add collision for inserting data --> it's very hard to move the updateTags call to `onCreating`, because the `updateTags` function queries the database to look up the affected post - `updateTags` while editing posts on `onSaving` - all operations run in a transactions and are rolled back if something get's rejected - Post model edit: if we push a transaction from outside, take this one ✨ introduce options.forUpdate - if two queries happening in a transaction we have to signalise knex/mysql that we select for an update - otherwise the following case happens: >> you fetch posts for an update >> a user requests comes in and updates the post (e.g. sets title to "X") >> you update the fetched posts, title would get overriden to the old one use options.forUpdate and protect internal post updates: model listeners - use a transaction for listener updates - signalise forUpdate - write a complex test use options.forUpdate and protect internal post updates: scheduling - publish endpoint runs in a transaction - add complex test - @TODO: right now scheduling api uses posts api, therefor we had to extend the options for api's >> allowed to pass transactions through it >> but these are only allowed if defined from outside {opts: [...]} >> so i think this is fine and not dirty >> will wait for opinions >> alternatively we have to re-write the scheduling endpoint to use the models directly
2017-04-19 16:53:23 +03:00
* ### Generate Slug
* Create a string to act as the permalink for an object.
* @param {ghostBookshelf.Model} Model Model type to generate a slug for
* @param {String} base The string for which to generate a slug, usually a title or name
* @param {Object} options Options to pass to findOne
* @return {Promise(String)} Resolves to a unique slug string
✨ post update collision detection (#8328) (#8362) closes #5599 If two users edit the same post, it can happen that they override each others content or post settings. With this change this won't happen anymore. ✨ Update collision for posts - add a new bookshelf plugin to detect these changes - use the `changed` object of bookshelf -> we don't have to create our own diff - compare client and server updated_at field - run editing posts in a transaction (see comments in code base) 🙀 update collision for tags - `updateTags` for adding posts on `onCreated` - happens after the post was inserted --> it's "okay" to attach the tags afterwards on insert --> there is no need to add collision for inserting data --> it's very hard to move the updateTags call to `onCreating`, because the `updateTags` function queries the database to look up the affected post - `updateTags` while editing posts on `onSaving` - all operations run in a transactions and are rolled back if something get's rejected - Post model edit: if we push a transaction from outside, take this one ✨ introduce options.forUpdate - if two queries happening in a transaction we have to signalise knex/mysql that we select for an update - otherwise the following case happens: >> you fetch posts for an update >> a user requests comes in and updates the post (e.g. sets title to "X") >> you update the fetched posts, title would get overriden to the old one use options.forUpdate and protect internal post updates: model listeners - use a transaction for listener updates - signalise forUpdate - write a complex test use options.forUpdate and protect internal post updates: scheduling - publish endpoint runs in a transaction - add complex test - @TODO: right now scheduling api uses posts api, therefor we had to extend the options for api's >> allowed to pass transactions through it >> but these are only allowed if defined from outside {opts: [...]} >> so i think this is fine and not dirty >> will wait for opinions >> alternatively we have to re-write the scheduling endpoint to use the models directly
2017-04-19 16:53:23 +03:00
*/
generateSlug: function generateSlug(Model, base, options) {
var slug,
slugTryCount = 1,
baseName = Model.prototype.tableName.replace(/s$/, ''),
// Look for a matching slug, append an incrementing number if so
checkIfSlugExists, longSlug;
checkIfSlugExists = function checkIfSlugExists(slugToFind) {
var args = {slug: slugToFind};
// status is needed for posts
if (options && options.status) {
args.status = options.status;
}
return Model.findOne(args, options).then(function then(found) {
var trimSpace;
if (!found) {
return slugToFind;
}
slugTryCount += 1;
// If we shortened, go back to the full version and try again
if (slugTryCount === 2 && longSlug) {
slugToFind = longSlug;
longSlug = null;
slugTryCount = 1;
return checkIfSlugExists(slugToFind);
}
// If this is the first time through, add the hyphen
if (slugTryCount === 2) {
slugToFind += '-';
} else {
// Otherwise, trim the number off the end
trimSpace = -(String(slugTryCount - 1).length);
slugToFind = slugToFind.slice(0, trimSpace);
}
slugToFind += slugTryCount;
return checkIfSlugExists(slugToFind);
});
};
// the slug may never be longer than the allowed limit of 191 chars, but should also
// take the counter into count. We reduce a too long slug to 185 so we're always on the
// safe side, also in terms of checking for existing slugs already.
slug = security.string.safe(base, options);
if (slug.length > 185) {
// CASE: don't cut the slug on import
if (!_.has(options, 'importing') || !options.importing) {
slug = slug.slice(0, 185);
}
}
// If it's a user, let's try to cut it down (unless this is a human request)
if (baseName === 'user' && options && options.shortSlug && slugTryCount === 1 && slug !== 'ghost-owner') {
longSlug = slug;
slug = (slug.indexOf('-') > -1) ? slug.substr(0, slug.indexOf('-')) : slug;
}
if (!_.has(options, 'importing') || !options.importing) {
// This checks if the first character of a tag name is a #. If it is, this
// is an internal tag, and as such we should add 'hash' to the beginning of the slug
if (baseName === 'tag' && /^#/.test(base)) {
slug = 'hash-' + slug;
}
}
// Check the filtered slug doesn't match any of the reserved keywords
return filters.doFilter('slug.reservedSlugs', config.get('slugs').reserved).then(function then(slugList) {
// Some keywords cannot be changed
slugList = _.union(slugList, urlService.utils.getProtectedSlugs());
return _.includes(slugList, slug) ? slug + '-' + baseName : slug;
}).then(function then(slug) {
// if slug is empty after trimming use the model name
if (!slug) {
slug = baseName;
}
// Test for duplicate slugs.
return checkIfSlugExists(slug);
});
},
parseOrderOption: function (order, include) {
var permittedAttributes, result, rules;
permittedAttributes = this.prototype.permittedAttributes();
if (include && include.indexOf('count.posts') > -1) {
permittedAttributes.push('count.posts');
}
result = {};
rules = order.split(',');
_.each(rules, function (rule) {
var match, field, direction;
match = /^([a-z0-9_\.]+)\s+(asc|desc)$/i.exec(rule.trim());
// invalid order syntax
if (!match) {
return;
}
field = match[1].toLowerCase();
direction = match[2].toUpperCase();
if (permittedAttributes.indexOf(field) === -1) {
return;
}
result[field] = direction;
});
return result;
},
/**
* All models which have a visibility property, can use this static helper function.
* Filter models by visibility.
*
* @param {Array|Object} items
* @param {Array} visibility
* @param {Boolean} [explicit]
* @param {Function} [fn]
* @returns {Array|Object} filtered items
*/
filterByVisibility: function filterByVisibility(items, visibility, explicit, fn) {
var memo = _.isArray(items) ? [] : {};
if (_.includes(visibility, 'all')) {
return fn ? _.map(items, fn) : items;
}
// We don't want to change the structure of what is returned
return _.reduce(items, function (items, item, key) {
if (!item.visibility && !explicit || _.includes(visibility, item.visibility)) {
var newItem = fn ? fn(item) : item;
if (_.isArray(items)) {
memo.push(newItem);
} else {
memo[key] = newItem;
}
}
return memo;
}, memo);
},
/**
* Returns an Array of visibility values.
* e.g. public,all => ['public, 'all']
* @param visibility
* @returns {*}
*/
parseVisibilityString: function parseVisibilityString(visibility) {
if (!visibility) {
return ['public'];
}
return _.map(visibility.split(','), _.trim);
}
});
// Export ghostBookshelf for use elsewhere
module.exports = ghostBookshelf;