Fixed: Drag and Drop Causes Flashing and Disappearing Cards (#6065)

Issue was "inView" was setting false due to root being scrollWrapper,
instead changed it to viewport.
Fixes: #6001 
[Screencast from 2024-06-27
22-50-59.webm](https://github.com/twentyhq/twenty/assets/59247136/408d5fb5-91ab-4d3a-881d-bbcaf4817a79)

---------

Co-authored-by: Lucas Bordeau <bordeau.lucas@gmail.com>
This commit is contained in:
Raunak Singh Jolly 2024-08-12 22:47:32 +05:30 committed by GitHub
parent b6202fe98c
commit ecdbe26b09
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194

View File

@ -39,7 +39,7 @@ export const triggerUpdateRecordOptimisticEffect = ({
fields: {
[objectMetadataItem.namePlural]: (
rootQueryCachedResponse,
{ DELETE, readField, storeFieldName, toReference },
{ readField, storeFieldName, toReference },
) => {
const shouldSkip = !isObjectRecordConnectionWithRefs(
objectMetadataItem.nameSingular,
@ -64,7 +64,6 @@ export const triggerUpdateRecordOptimisticEffect = ({
const rootQueryFilter = rootQueryVariables?.filter;
const rootQueryOrderBy = rootQueryVariables?.orderBy;
const rootQueryLimit = rootQueryVariables?.first;
const shouldTryToMatchFilter = isDefined(rootQueryFilter);
@ -123,56 +122,6 @@ export const triggerUpdateRecordOptimisticEffect = ({
});
}
const shouldLimitNextRootQueryEdges = isDefined(rootQueryLimit);
// TODO: not sure that we should trigger a DELETE here, as it will trigger a network request
// Is it the responsibility of this optimistic effect function to delete a root query that will trigger a network request ?
// Shouldn't we let the response from the network overwrite the cache and keep this util purely about cache updates ?
//
// Shoud we even apply the limit at all since with pagination we cannot really do optimistic rendering and should
// wait for the network response to update the cache
//
// Maybe we could apply a merging function instead and exclude limit from the caching field arguments ?
// Also we have a problem that is not yet present with this but may arise if we start
// to use limit arguments, as for now we rely on the hard coded limit of 60 in pg_graphql.
// This is as if we had a { limit: 60 } argument in every query but we don't.
// so Apollo correctly merges the return of fetchMore for now, because of this,
// but wouldn't do it well like Thomas had the problem with mail threads
// because he applied a limit of 2 and Apollo created one root query in the cache for each.
// In Thomas' case we should implement this because he use a hack to overwrite the first request with the return of the other.
// See: https://www.apollographql.com/docs/react/pagination/cursor-based/#relay-style-cursor-pagination
// See: https://www.apollographql.com/docs/react/pagination/core-api/#merging-queries
if (shouldLimitNextRootQueryEdges) {
// If previous edges length was exactly at the required limit,
// but after update next edges length is under the limit,
// we cannot for sure know if re-fetching the query
// would return more edges, so we cannot optimistically deduce
// the query's result.
// In this case, invalidate the cache entry so it can be re-fetched.
const rootQueryCurrentCachedRecordEdgesLengthIsAtLimit =
rootQueryCurrentEdges.length === rootQueryLimit;
// If next edges length is under limit, then we can wait for the network response and merge the result
// then in the merge function we could implement this mechanism to limit the number of edges in the cache
const rootQueryNextCachedRecordEdgesLengthIsUnderLimit =
rootQueryNextEdges.length < rootQueryLimit;
const shouldDeleteRootQuerySoItCanBeRefetched =
rootQueryCurrentCachedRecordEdgesLengthIsAtLimit &&
rootQueryNextCachedRecordEdgesLengthIsUnderLimit;
if (shouldDeleteRootQuerySoItCanBeRefetched) {
return DELETE;
}
const rootQueryNextCachedRecordEdgesLengthIsAboveRootQueryLimit =
rootQueryNextEdges.length > rootQueryLimit;
if (rootQueryNextCachedRecordEdgesLengthIsAboveRootQueryLimit) {
rootQueryNextEdges.splice(rootQueryLimit);
}
}
return {
...rootQueryConnection,
edges: rootQueryNextEdges,