Ghost/ghost/admin/app/components/posts/debug.js

340 lines
11 KiB
JavaScript
Raw Normal View History

import Component from '@glimmer/component';
import moment from 'moment-timezone';
import {action} from '@ember/object';
Implemented email analytics retrying (#16273) fixes https://github.com/TryGhost/Team/issues/2562 New event fetching loops: - Reworked the analytics fetching algorithm. Instead of starting again where we stopped during the last fetching minus 30 minutes, we now just continue where we stopped. But with ms precision (because no longer database dependent after first fetch), and we stop at NOW - 1 minute to reduce chance of missing events. - Apart from that, a missing fetching loop is introduced. This fetches events that are older than 30 minutes, and just processes all events a second time to make sure we didn't skip any because of storage delays in the Mailgun API. - A new scheduled fetching loop, that allows us to schedule between a given start/end date (currently only persisted in memory, so stops after a reboot) UI and endpoint changes: - New UI to show the state of the analytics 'loops' - New endpoint to request the analytics loop status - New endpoint to schedule analytics - New endpoint to cancel scheduled analytics - Some number formatting improvements, and introduction of 'opened' count in debug screen - Live reload of data in the debug screen Other changes: - This also improves the support for maxEvents. We can now stop a fetching loop after x events without worrying about lost events. This is used to reduce the fetched events in the missing and scheduled event loop (e.g. when the main one is fetching lots of events, we skip the other loops). - Prevents fetching the same events over and over again if no new events come in (because we always started at the same begin timestamp). The code increases the begin timestamp with 1 second if it is safe to do so, to prevent the API from returning the same events over and over again. - Some optimisations in handing the processing results (less merges to reduce CPU usage in cases we have lots of events). Testing: - You can test with lots of events using the new mailgun mocking server (Toolbox repo `scripts/mailgun-mock-server`). This can also simulate events that are only returned after x minutes because of storage delays.
2023-02-20 18:44:13 +03:00
import {didCancel, task, timeout} from 'ember-concurrency';
import {formatNumber} from 'ghost-admin/helpers/format-number';
import {ghPluralize} from 'ghost-admin/helpers/gh-pluralize';
import {inject as service} from '@ember/service';
import {tracked} from '@glimmer/tracking';
export default class Debug extends Component {
@service ajax;
@service ghostPaths;
@service settings;
@service membersUtils;
@service utils;
@service feature;
Implemented email analytics retrying (#16273) fixes https://github.com/TryGhost/Team/issues/2562 New event fetching loops: - Reworked the analytics fetching algorithm. Instead of starting again where we stopped during the last fetching minus 30 minutes, we now just continue where we stopped. But with ms precision (because no longer database dependent after first fetch), and we stop at NOW - 1 minute to reduce chance of missing events. - Apart from that, a missing fetching loop is introduced. This fetches events that are older than 30 minutes, and just processes all events a second time to make sure we didn't skip any because of storage delays in the Mailgun API. - A new scheduled fetching loop, that allows us to schedule between a given start/end date (currently only persisted in memory, so stops after a reboot) UI and endpoint changes: - New UI to show the state of the analytics 'loops' - New endpoint to request the analytics loop status - New endpoint to schedule analytics - New endpoint to cancel scheduled analytics - Some number formatting improvements, and introduction of 'opened' count in debug screen - Live reload of data in the debug screen Other changes: - This also improves the support for maxEvents. We can now stop a fetching loop after x events without worrying about lost events. This is used to reduce the fetched events in the missing and scheduled event loop (e.g. when the main one is fetching lots of events, we skip the other loops). - Prevents fetching the same events over and over again if no new events come in (because we always started at the same begin timestamp). The code increases the begin timestamp with 1 second if it is safe to do so, to prevent the API from returning the same events over and over again. - Some optimisations in handing the processing results (less merges to reduce CPU usage in cases we have lots of events). Testing: - You can test with lots of events using the new mailgun mocking server (Toolbox repo `scripts/mailgun-mock-server`). This can also simulate events that are only returned after x minutes because of storage delays.
2023-02-20 18:44:13 +03:00
@service store;
@tracked emailBatches = null;
@tracked recipientFailures = null;
@tracked loading = true;
Implemented email analytics retrying (#16273) fixes https://github.com/TryGhost/Team/issues/2562 New event fetching loops: - Reworked the analytics fetching algorithm. Instead of starting again where we stopped during the last fetching minus 30 minutes, we now just continue where we stopped. But with ms precision (because no longer database dependent after first fetch), and we stop at NOW - 1 minute to reduce chance of missing events. - Apart from that, a missing fetching loop is introduced. This fetches events that are older than 30 minutes, and just processes all events a second time to make sure we didn't skip any because of storage delays in the Mailgun API. - A new scheduled fetching loop, that allows us to schedule between a given start/end date (currently only persisted in memory, so stops after a reboot) UI and endpoint changes: - New UI to show the state of the analytics 'loops' - New endpoint to request the analytics loop status - New endpoint to schedule analytics - New endpoint to cancel scheduled analytics - Some number formatting improvements, and introduction of 'opened' count in debug screen - Live reload of data in the debug screen Other changes: - This also improves the support for maxEvents. We can now stop a fetching loop after x events without worrying about lost events. This is used to reduce the fetched events in the missing and scheduled event loop (e.g. when the main one is fetching lots of events, we skip the other loops). - Prevents fetching the same events over and over again if no new events come in (because we always started at the same begin timestamp). The code increases the begin timestamp with 1 second if it is safe to do so, to prevent the API from returning the same events over and over again. - Some optimisations in handing the processing results (less merges to reduce CPU usage in cases we have lots of events). Testing: - You can test with lots of events using the new mailgun mocking server (Toolbox repo `scripts/mailgun-mock-server`). This can also simulate events that are only returned after x minutes because of storage delays.
2023-02-20 18:44:13 +03:00
@tracked analyticsStatus = null;
@tracked latestEmail = null;
get post() {
return this.args.post;
}
Implemented email analytics retrying (#16273) fixes https://github.com/TryGhost/Team/issues/2562 New event fetching loops: - Reworked the analytics fetching algorithm. Instead of starting again where we stopped during the last fetching minus 30 minutes, we now just continue where we stopped. But with ms precision (because no longer database dependent after first fetch), and we stop at NOW - 1 minute to reduce chance of missing events. - Apart from that, a missing fetching loop is introduced. This fetches events that are older than 30 minutes, and just processes all events a second time to make sure we didn't skip any because of storage delays in the Mailgun API. - A new scheduled fetching loop, that allows us to schedule between a given start/end date (currently only persisted in memory, so stops after a reboot) UI and endpoint changes: - New UI to show the state of the analytics 'loops' - New endpoint to request the analytics loop status - New endpoint to schedule analytics - New endpoint to cancel scheduled analytics - Some number formatting improvements, and introduction of 'opened' count in debug screen - Live reload of data in the debug screen Other changes: - This also improves the support for maxEvents. We can now stop a fetching loop after x events without worrying about lost events. This is used to reduce the fetched events in the missing and scheduled event loop (e.g. when the main one is fetching lots of events, we skip the other loops). - Prevents fetching the same events over and over again if no new events come in (because we always started at the same begin timestamp). The code increases the begin timestamp with 1 second if it is safe to do so, to prevent the API from returning the same events over and over again. - Some optimisations in handing the processing results (less merges to reduce CPU usage in cases we have lots of events). Testing: - You can test with lots of events using the new mailgun mocking server (Toolbox repo `scripts/mailgun-mock-server`). This can also simulate events that are only returned after x minutes because of storage delays.
2023-02-20 18:44:13 +03:00
get email() {
return this.latestEmail ?? this.post.email;
}
async updateEmail() {
try {
this.latestEmail = await this.store.findRecord('email', this.post.email.id, {reload: true});
} catch (e) {
// Skip
}
}
get emailError() {
// get failed batches count
let failedBatches = this.emailBatchesData?.filter((batch) => {
return batch.statusClass === 'failed';
}).length || 0;
// get total batch count
let totalBatches = this.emailBatchesData?.length || 0;
let details = (this.loading || !totalBatches) ? '' : `${failedBatches} of ${ghPluralize(totalBatches, 'batch')} failed to send, check below for more details.`;
2022-12-02 13:52:59 +03:00
return {
message: this.post.email?.error || 'Failed to send email.',
details
2022-12-02 13:52:59 +03:00
};
}
get emailSettings() {
return {
Implemented email analytics retrying (#16273) fixes https://github.com/TryGhost/Team/issues/2562 New event fetching loops: - Reworked the analytics fetching algorithm. Instead of starting again where we stopped during the last fetching minus 30 minutes, we now just continue where we stopped. But with ms precision (because no longer database dependent after first fetch), and we stop at NOW - 1 minute to reduce chance of missing events. - Apart from that, a missing fetching loop is introduced. This fetches events that are older than 30 minutes, and just processes all events a second time to make sure we didn't skip any because of storage delays in the Mailgun API. - A new scheduled fetching loop, that allows us to schedule between a given start/end date (currently only persisted in memory, so stops after a reboot) UI and endpoint changes: - New UI to show the state of the analytics 'loops' - New endpoint to request the analytics loop status - New endpoint to schedule analytics - New endpoint to cancel scheduled analytics - Some number formatting improvements, and introduction of 'opened' count in debug screen - Live reload of data in the debug screen Other changes: - This also improves the support for maxEvents. We can now stop a fetching loop after x events without worrying about lost events. This is used to reduce the fetched events in the missing and scheduled event loop (e.g. when the main one is fetching lots of events, we skip the other loops). - Prevents fetching the same events over and over again if no new events come in (because we always started at the same begin timestamp). The code increases the begin timestamp with 1 second if it is safe to do so, to prevent the API from returning the same events over and over again. - Some optimisations in handing the processing results (less merges to reduce CPU usage in cases we have lots of events). Testing: - You can test with lots of events using the new mailgun mocking server (Toolbox repo `scripts/mailgun-mock-server`). This can also simulate events that are only returned after x minutes because of storage delays.
2023-02-20 18:44:13 +03:00
statusClass: this.email?.status,
status: this.getStatusLabel(this.email?.status),
recipientFilter: this.email?.recipientFilter,
createdAt: this.email?.createdAtUTC ? moment(this.email.createdAtUTC).format('DD MMM, YYYY, HH:mm:ss') : '',
submittedAt: this.email?.submittedAtUTC ? moment(this.email.submittedAtUTC).format('DD MMM, YYYY, HH:mm:ss') : '',
emailsSent: this.email?.emailCount,
emailsDelivered: this.email?.deliveredCount,
emailsOpened: this.email?.openedCount,
emailsFailed: this.email?.failedCount,
trackOpens: this.email?.trackOpens,
trackClicks: this.email?.trackClicks,
feedbackEnabled: this.email?.feedbackEnabled
};
}
get tabTotals() {
return {
temporaryFailures: formatNumber(this.temporaryFailureData?.length || 0),
permanentFailures: formatNumber(this.permanentFailureData?.length || 0),
erroredBatches: formatNumber(this.emailBatchesData?.filter((batch) => {
return batch.statusClass === 'failed';
}).length || 0)
};
}
get emailBatchesData() {
return this.emailBatches?.map((batch) => {
return {
id: batch.id,
status: this.getStatusLabel(batch.status),
statusClass: batch.status,
createdAt: batch.created_at ? moment(batch.created_at).format('DD MMM, YYYY, HH:mm:ss') : '',
segment: batch.member_segment || '',
providerId: batch.provider_id || null,
errorMessage: batch.error_message || '',
errorStatusCode: batch.error_status_code || '',
recipientCount: batch.count?.recipients || 0
};
});
}
get temporaryFailureData() {
return this.recipientFailures?.filter((failure) => {
return failure.severity === 'temporary';
}).map((failure) => {
return {
id: failure.id,
code: failure.code,
failedAt: failure.failed_at ? moment(failure.failed_at).format('DD MMM, YYYY, HH:mm:ss') : '',
processedAt: failure.email_recipient.processed_at ? moment(failure.email_recipient.processed_at).format('DD MMM, YYYY, HH:mm:ss') : '',
batchId: failure.email_recipient.batch_id,
enhancedCode: failure.enhanced_code,
message: failure.message,
recipient: {
name: failure.email_recipient.member_name || '',
email: failure.email_recipient.member_email || '',
initials: this.getInitials(failure.email_recipient?.member_name || failure.email_recipient?.member_email)
},
member: {
record: failure.member,
id: failure.member?.id,
name: failure.member?.name || '',
email: failure.member?.email || '',
initials: this.getInitials(failure.member?.name)
}
};
});
}
get permanentFailureData() {
return this.recipientFailures?.filter((failure) => {
return failure.severity === 'permanent';
}).map((failure) => {
return {
id: failure.id,
code: failure.code,
enhancedCode: failure.enhanced_code,
message: failure.message,
recipient: {
name: failure.email_recipient.member_name || '',
email: failure.email_recipient.member_email || '',
initials: this.getInitials(failure.email_recipient?.member_name || failure.email_recipient?.member_email)
},
member: {
record: failure.member,
id: failure.member?.id,
name: failure.member?.name || '',
email: failure.member?.email || '',
initials: this.getInitials(failure.member?.name)
}
};
});
}
getInitials(name) {
if (!name) {
return 'U';
}
let names = name.split(' ');
let intials = names.length > 1 ? [names[0][0], names[names.length - 1][0]] : [names[0][0]];
return intials.join('').toUpperCase();
}
getStatusLabel(status) {
if (status === 'submitted') {
return 'Submitted';
} else if (status === 'submitting') {
return 'Submitting';
} else if (status === 'pending') {
return 'Pending';
} else if (status === 'failed') {
return 'Failed';
}
return status;
}
@action
loadData() {
if (this.post.email) {
this.fetchEmailBatches();
this.fetchRecipientFailures();
Implemented email analytics retrying (#16273) fixes https://github.com/TryGhost/Team/issues/2562 New event fetching loops: - Reworked the analytics fetching algorithm. Instead of starting again where we stopped during the last fetching minus 30 minutes, we now just continue where we stopped. But with ms precision (because no longer database dependent after first fetch), and we stop at NOW - 1 minute to reduce chance of missing events. - Apart from that, a missing fetching loop is introduced. This fetches events that are older than 30 minutes, and just processes all events a second time to make sure we didn't skip any because of storage delays in the Mailgun API. - A new scheduled fetching loop, that allows us to schedule between a given start/end date (currently only persisted in memory, so stops after a reboot) UI and endpoint changes: - New UI to show the state of the analytics 'loops' - New endpoint to request the analytics loop status - New endpoint to schedule analytics - New endpoint to cancel scheduled analytics - Some number formatting improvements, and introduction of 'opened' count in debug screen - Live reload of data in the debug screen Other changes: - This also improves the support for maxEvents. We can now stop a fetching loop after x events without worrying about lost events. This is used to reduce the fetched events in the missing and scheduled event loop (e.g. when the main one is fetching lots of events, we skip the other loops). - Prevents fetching the same events over and over again if no new events come in (because we always started at the same begin timestamp). The code increases the begin timestamp with 1 second if it is safe to do so, to prevent the API from returning the same events over and over again. - Some optimisations in handing the processing results (less merges to reduce CPU usage in cases we have lots of events). Testing: - You can test with lots of events using the new mailgun mocking server (Toolbox repo `scripts/mailgun-mock-server`). This can also simulate events that are only returned after x minutes because of storage delays.
2023-02-20 18:44:13 +03:00
this.pollAnalyticsStatus.perform();
this.pollEmail.perform();
}
}
async fetchEmailBatches() {
try {
if (this._fetchEmailBatches.isRunning) {
return this._fetchEmailBatches.last;
}
return this._fetchEmailBatches.perform();
} catch (e) {
if (!didCancel(e)) {
// re-throw the non-cancelation error
throw e;
}
}
}
@task
*_fetchEmailBatches() {
const data = {
include: 'count.recipients',
limit: 'all',
order: 'status asc, created_at desc'
};
let statsUrl = this.ghostPaths.url.api(`emails/${this.post.email.id}/batches`);
let result = yield this.ajax.request(statsUrl, {data});
this.emailBatches = result.batches;
this.loading = false;
}
async fetchRecipientFailures() {
try {
if (this._fetchRecipientFailures.isRunning) {
return this._fetchRecipientFailures.last;
}
return this._fetchRecipientFailures.perform();
} catch (e) {
if (!didCancel(e)) {
// re-throw the non-cancelation error
throw e;
}
}
}
Implemented email analytics retrying (#16273) fixes https://github.com/TryGhost/Team/issues/2562 New event fetching loops: - Reworked the analytics fetching algorithm. Instead of starting again where we stopped during the last fetching minus 30 minutes, we now just continue where we stopped. But with ms precision (because no longer database dependent after first fetch), and we stop at NOW - 1 minute to reduce chance of missing events. - Apart from that, a missing fetching loop is introduced. This fetches events that are older than 30 minutes, and just processes all events a second time to make sure we didn't skip any because of storage delays in the Mailgun API. - A new scheduled fetching loop, that allows us to schedule between a given start/end date (currently only persisted in memory, so stops after a reboot) UI and endpoint changes: - New UI to show the state of the analytics 'loops' - New endpoint to request the analytics loop status - New endpoint to schedule analytics - New endpoint to cancel scheduled analytics - Some number formatting improvements, and introduction of 'opened' count in debug screen - Live reload of data in the debug screen Other changes: - This also improves the support for maxEvents. We can now stop a fetching loop after x events without worrying about lost events. This is used to reduce the fetched events in the missing and scheduled event loop (e.g. when the main one is fetching lots of events, we skip the other loops). - Prevents fetching the same events over and over again if no new events come in (because we always started at the same begin timestamp). The code increases the begin timestamp with 1 second if it is safe to do so, to prevent the API from returning the same events over and over again. - Some optimisations in handing the processing results (less merges to reduce CPU usage in cases we have lots of events). Testing: - You can test with lots of events using the new mailgun mocking server (Toolbox repo `scripts/mailgun-mock-server`). This can also simulate events that are only returned after x minutes because of storage delays.
2023-02-20 18:44:13 +03:00
@task
*pollAnalyticsStatus() {
while (true) {
yield this.fetchAnalyticsStatus();
yield timeout(5 * 1000);
}
}
@task
*pollEmail() {
while (true) {
yield timeout(10 * 1000);
yield this.updateEmail();
}
}
async fetchAnalyticsStatus() {
try {
if (this._fetchAnalyticsStatus.isRunning) {
return this._fetchAnalyticsStatus.last;
}
return this._fetchAnalyticsStatus.perform();
} catch (e) {
// Skip
}
}
@task
*_fetchRecipientFailures() {
const data = {
include: 'member,email_recipient',
limit: 'all'
};
let statsUrl = this.ghostPaths.url.api(`/emails/${this.post.email.id}/recipient-failures`);
let result = yield this.ajax.request(statsUrl, {data});
this.recipientFailures = result.failures;
}
Implemented email analytics retrying (#16273) fixes https://github.com/TryGhost/Team/issues/2562 New event fetching loops: - Reworked the analytics fetching algorithm. Instead of starting again where we stopped during the last fetching minus 30 minutes, we now just continue where we stopped. But with ms precision (because no longer database dependent after first fetch), and we stop at NOW - 1 minute to reduce chance of missing events. - Apart from that, a missing fetching loop is introduced. This fetches events that are older than 30 minutes, and just processes all events a second time to make sure we didn't skip any because of storage delays in the Mailgun API. - A new scheduled fetching loop, that allows us to schedule between a given start/end date (currently only persisted in memory, so stops after a reboot) UI and endpoint changes: - New UI to show the state of the analytics 'loops' - New endpoint to request the analytics loop status - New endpoint to schedule analytics - New endpoint to cancel scheduled analytics - Some number formatting improvements, and introduction of 'opened' count in debug screen - Live reload of data in the debug screen Other changes: - This also improves the support for maxEvents. We can now stop a fetching loop after x events without worrying about lost events. This is used to reduce the fetched events in the missing and scheduled event loop (e.g. when the main one is fetching lots of events, we skip the other loops). - Prevents fetching the same events over and over again if no new events come in (because we always started at the same begin timestamp). The code increases the begin timestamp with 1 second if it is safe to do so, to prevent the API from returning the same events over and over again. - Some optimisations in handing the processing results (less merges to reduce CPU usage in cases we have lots of events). Testing: - You can test with lots of events using the new mailgun mocking server (Toolbox repo `scripts/mailgun-mock-server`). This can also simulate events that are only returned after x minutes because of storage delays.
2023-02-20 18:44:13 +03:00
@task
*_fetchAnalyticsStatus() {
let statsUrl = this.ghostPaths.url.api(`/emails/${this.post.email.id}/analytics`);
let result = yield this.ajax.request(statsUrl);
this.analyticsStatus = result;
// Parse dates
for (const type of Object.keys(result)) {
if (!result[type]) {
result[type] = {};
}
let object = result[type];
for (const key of ['lastStarted', 'lastBegin', 'lastEventTimestamp']) {
if (object[key]) {
object[key] = moment(object[key]).format('DD MMM, YYYY, HH:mm:ss.SSS');
} else {
object[key] = 'N/A';
}
}
if (object.schedule) {
object = object.schedule;
for (const key of ['begin', 'end']) {
if (object[key]) {
object[key] = moment(object[key]).format('DD MMM, YYYY, HH:mm:ss.SSS');
} else {
object[key] = 'N/A';
}
}
}
}
}
@action
scheduleAnalytics() {
try {
if (this._scheduleAnalytics.isRunning) {
return this._scheduleAnalytics.last;
}
return this._scheduleAnalytics.perform();
} catch (e) {
if (!didCancel(e)) {
// re-throw the non-cancelation error
throw e;
}
}
}
@task
*_scheduleAnalytics() {
let statsUrl = this.ghostPaths.url.api(`/emails/${this.post.email.id}/analytics`);
yield this.ajax.put(statsUrl, {});
yield this.fetchAnalyticsStatus();
}
@action
cancelScheduleAnalytics() {
try {
if (this._cancelScheduleAnalytics.isRunning) {
return this._cancelScheduleAnalytics.last;
}
return this._cancelScheduleAnalytics.perform();
} catch (e) {
if (!didCancel(e)) {
// re-throw the non-cancelation error
throw e;
}
}
}
@task
*_cancelScheduleAnalytics() {
let statsUrl = this.ghostPaths.url.api(`/emails/analytics`);
yield this.ajax.delete(statsUrl, {});
yield this.fetchAnalyticsStatus();
}
}