Merge remote-tracking branch 'origin/trunk' into topic/projects

This commit is contained in:
Chris Penner 2023-02-24 12:36:23 -06:00
commit d6f1c159b8
31 changed files with 1639 additions and 1403 deletions

View File

@ -104,7 +104,7 @@ jobs:
working-directory: ${{ github.workspace }}
run: |
mkdir stack && cd stack
curl -L https://github.com/commercialhaskell/stack/releases/download/v2.7.5/stack-2.9.1-linux-x86_64.tar.gz | tar -xz
curl -L https://github.com/commercialhaskell/stack/releases/download/v2.9.1/stack-2.9.1-linux-x86_64.tar.gz | tar -xz
echo "$PWD/stack-"* >> $GITHUB_PATH
- name: build
@ -176,7 +176,7 @@ jobs:
working-directory: ${{ github.workspace }}
run: |
mkdir stack && cd stack
curl -L https://github.com/commercialhaskell/stack/releases/download/v2.7.5/stack-2.9.1-osx-x86_64.tar.gz | tar -xz
curl -L https://github.com/commercialhaskell/stack/releases/download/v2.9.1/stack-2.9.1-osx-x86_64.tar.gz | tar -xz
echo "$PWD/stack-"* >> $GITHUB_PATH
- name: remove ~/.stack/setup-exe-cache on macOS
@ -252,7 +252,7 @@ jobs:
working-directory: ${{ github.workspace }}
run: |
mkdir stack && cd stack
curl -L https://github.com/commercialhaskell/stack/releases/download/v2.7.5/stack-2.9.1-windows-x86_64.tar.gz | tar -xz
curl -L https://github.com/commercialhaskell/stack/releases/download/v2.9.1/stack-2.9.1-windows-x86_64.tar.gz | tar -xz
echo "$PWD/stack-"* >> $GITHUB_PATH
- name: build

View File

@ -5,7 +5,7 @@ import qualified Data.List.NonEmpty as NEL
import qualified Data.List.NonEmpty as NonEmpty
import qualified Data.Text as Text
import Unison.Prelude
import Unison.Sqlite (FromField (..), FromRow (..), SQLData (..), ToField (..), ToRow (..), field)
import Unison.Sqlite
type ReversedSegments = NonEmpty Text
@ -32,11 +32,20 @@ instance (ToRow ref) => ToRow (NamedRef ref) where
toRow (NamedRef {reversedSegments = segments, ref}) =
[toField reversedName] <> toRow ref
where
reversedName = Text.intercalate "." . toList $ segments
reversedName =
segments
& toList
& Text.intercalate "."
& (<> ".") -- Add trailing dot, see notes on scoped_term_name_lookup schema
instance (FromRow ref) => FromRow (NamedRef ref) where
fromRow = do
reversedSegments <- NonEmpty.fromList . Text.splitOn "." <$> field
reversedSegments <-
field <&> \f ->
f
& Text.init -- Drop trailing dot, see notes on scoped_term_name_lookup schema
& Text.splitOn "."
& NonEmpty.fromList
ref <- fromRow
pure (NamedRef {reversedSegments, ref})
@ -44,3 +53,18 @@ toRowWithNamespace :: (ToRow ref) => NamedRef ref -> [SQLData]
toRowWithNamespace nr = toRow nr <> [SQLText namespace]
where
namespace = Text.intercalate "." . reverse . NEL.tail . reversedSegments $ nr
-- | The new 'scoped' name lookup format is different from the old version.
--
-- Specifically, the scoped format adds the 'lastNameSegment' as well as adding a trailing '.' to the db format
-- of both the namespace and reversed_name.
--
-- Converts a NamedRef to SQLData of the form:
-- [reversedName, namespace, lastNameSegment] <> ref fields...
namedRefToScopedRow :: (ToRow ref) => NamedRef ref -> [SQLData]
namedRefToScopedRow (NamedRef {reversedSegments = revSegments, ref}) =
toRow $ (SQLText reversedName, SQLText namespace, SQLText lastNameSegment) :. ref
where
reversedName = (Text.intercalate "." . toList $ revSegments) <> "."
namespace = (Text.intercalate "." . reverse . NEL.tail $ revSegments) <> "."
lastNameSegment = NEL.head revSegments

View File

@ -67,9 +67,10 @@ module U.Codebase.Sqlite.Operations
termsMentioningType,
-- ** name lookup index
updateNameIndex,
rootNamesByPath,
NamesByPath (..),
checkBranchHashNameLookupExists,
buildNameLookupForBranchHash,
-- * reflog
getReflog,
@ -1072,19 +1073,37 @@ derivedDependencies cid = do
cids <- traverse s2cReferenceId sids
pure $ Set.fromList cids
-- | Given lists of names to add and remove, update the index accordingly.
updateNameIndex ::
-- | Apply a set of name updates to an existing index.
buildNameLookupForBranchHash ::
-- The existing name lookup index to copy before applying the diff.
-- If Nothing, run the diff against an empty index.
-- If Just, the name lookup must exist or an error will be thrown.
Maybe BranchHash ->
BranchHash ->
-- | (add terms, remove terms)
([S.NamedRef (C.Referent, Maybe C.ConstructorType)], [S.NamedRef C.Referent]) ->
-- | (add types, remove types)
([S.NamedRef C.Reference], [S.NamedRef C.Reference]) ->
Transaction ()
updateNameIndex (newTermNames, removedTermNames) (newTypeNames, removedTypeNames) = do
Q.ensureNameLookupTables
Q.removeTermNames ((fmap c2sTextReferent <$> removedTermNames))
Q.removeTypeNames ((fmap c2sTextReference <$> removedTypeNames))
Q.insertTermNames (fmap (c2sTextReferent *** fmap c2sConstructorType) <$> newTermNames)
Q.insertTypeNames (fmap c2sTextReference <$> newTypeNames)
buildNameLookupForBranchHash mayExistingBranchIndex newBranchHash (newTermNames, removedTermNames) (newTypeNames, removedTypeNames) = do
newBranchHashId <- Q.saveBranchHash newBranchHash
Q.trackNewBranchHashNameLookup newBranchHashId
case mayExistingBranchIndex of
Nothing -> pure ()
Just existingBranchIndex -> do
unlessM (checkBranchHashNameLookupExists existingBranchIndex) $ error "buildNameLookupForBranchHash: existingBranchIndex was provided, but no index was found for that branch hash."
existingBranchHashId <- Q.saveBranchHash existingBranchIndex
Q.copyScopedNameLookup existingBranchHashId newBranchHashId
Q.removeScopedTermNames newBranchHashId ((fmap c2sTextReferent <$> removedTermNames))
Q.removeScopedTypeNames newBranchHashId ((fmap c2sTextReference <$> removedTypeNames))
Q.insertScopedTermNames newBranchHashId (fmap (c2sTextReferent *** fmap c2sConstructorType) <$> newTermNames)
Q.insertScopedTypeNames newBranchHashId (fmap c2sTextReference <$> newTypeNames)
-- | Check whether we've already got an index for a given causal hash.
checkBranchHashNameLookupExists :: BranchHash -> Transaction Bool
checkBranchHashNameLookupExists bh = do
bhId <- Q.saveBranchHash bh
Q.checkBranchHashNameLookupExists bhId
data NamesByPath = NamesByPath
{ termNamesInPath :: [S.NamedRef (C.Referent, Maybe C.ConstructorType)],
@ -1092,13 +1111,16 @@ data NamesByPath = NamesByPath
}
-- | Get all the term and type names for the root namespace from the lookup table.
-- Requires that an index for this branch hash already exists, which is currently
-- only true on Share.
rootNamesByPath ::
-- | A relative namespace string, e.g. Just "base.List"
Maybe Text ->
Transaction NamesByPath
rootNamesByPath path = do
termNamesInPath <- Q.rootTermNamesByPath path
typeNamesInPath <- Q.rootTypeNamesByPath path
bhId <- Q.expectNamespaceRootBranchHashId
termNamesInPath <- Q.termNamesWithinNamespace bhId path
typeNamesInPath <- Q.typeNamesWithinNamespace bhId path
pure $
NamesByPath
{ termNamesInPath = convertTerms <$> termNamesInPath,

View File

@ -62,6 +62,7 @@ module U.Codebase.Sqlite.Queries
loadNamespaceRoot,
setNamespaceRoot,
expectNamespaceRoot,
expectNamespaceRootBranchHashId,
-- * namespace_statistics table
saveNamespaceStats,
@ -158,15 +159,16 @@ module U.Codebase.Sqlite.Queries
causalHashIdByBase32Prefix,
-- * Name Lookup
ensureNameLookupTables,
copyScopedNameLookup,
dropNameLookupTables,
insertTermNames,
insertTypeNames,
removeTermNames,
removeTypeNames,
rootTermNamesByPath,
rootTypeNamesByPath,
getNamespaceDefinitionCount,
insertScopedTermNames,
insertScopedTypeNames,
removeScopedTermNames,
removeScopedTypeNames,
termNamesWithinNamespace,
typeNamesWithinNamespace,
checkBranchHashNameLookupExists,
trackNewBranchHashNameLookup,
-- * Reflog
appendReflog,
@ -329,7 +331,7 @@ import qualified Unison.Util.Lens as Lens
-- * main squeeze
currentSchemaVersion :: SchemaVersion
currentSchemaVersion = 8
currentSchemaVersion = 9
createSchema :: Transaction ()
createSchema = do
@ -1135,6 +1137,11 @@ loadCausalParentsByHash hash =
|]
(Only hash)
expectNamespaceRootBranchHashId :: Transaction BranchHashId
expectNamespaceRootBranchHashId = do
chId <- expectNamespaceRoot
expectCausalValueHashId chId
expectNamespaceRoot :: Transaction CausalHashId
expectNamespaceRoot =
queryOneCol_ loadNamespaceRootSql
@ -1637,81 +1644,98 @@ dropNameLookupTables = do
DROP TABLE IF EXISTS type_name_lookup
|]
-- | Ensure the name lookup tables exist.
ensureNameLookupTables :: Transaction ()
ensureNameLookupTables = do
execute_
[here|
CREATE TABLE IF NOT EXISTS term_name_lookup (
-- The name of the term: E.g. map.List.base
reversed_name TEXT NOT NULL,
-- The namespace containing this term, not reversed: E.g. base.List
namespace TEXT NOT NULL,
referent_builtin TEXT NULL,
referent_component_hash TEXT NULL,
referent_component_index INTEGER NULL,
referent_constructor_index INTEGER NULL,
referent_constructor_type INTEGER NULL,
PRIMARY KEY (reversed_name, referent_builtin, referent_component_hash, referent_component_index, referent_constructor_index)
)
|]
execute_
[here|
CREATE INDEX IF NOT EXISTS term_names_by_namespace ON term_name_lookup(namespace)
|]
-- Don't need this index at the moment, but will likely be useful later.
-- execute_
-- [here|
-- CREATE INDEX IF NOT EXISTS term_name_by_referent_lookup ON term_name_lookup(referent_builtin, referent_component_hash, referent_component_index, referent_constructor_index)
-- |]
execute_
[here|
CREATE TABLE IF NOT EXISTS type_name_lookup (
-- The name of the term: E.g. List.base
reversed_name TEXT NOT NULL,
-- The namespace containing this term, not reversed: E.g. base.List
namespace TEXT NOT NULL,
reference_builtin TEXT NULL,
reference_component_hash INTEGER NULL,
reference_component_index INTEGER NULL,
PRIMARY KEY (reversed_name, reference_builtin, reference_component_hash, reference_component_index)
);
|]
execute_
[here|
CREATE INDEX IF NOT EXISTS type_names_by_namespace ON type_name_lookup(namespace)
|]
-- | Copies existing name lookup rows but replaces their branch hash id;
-- This is a low-level operation used as part of deriving a new name lookup index
-- from an existing one as performantly as possible.
copyScopedNameLookup :: BranchHashId -> BranchHashId -> Transaction ()
copyScopedNameLookup fromBHId toBHId = do
execute termsCopySql (toBHId, fromBHId)
execute typesCopySql (toBHId, fromBHId)
where
termsCopySql =
[here|
INSERT INTO scoped_term_name_lookup(root_branch_hash_id, reversed_name, last_name_segment, namespace, referent_builtin, referent_component_hash, referent_component_index, referent_constructor_index, referent_constructor_type)
SELECT ?, reversed_name, last_name_segment, namespace, referent_builtin, referent_component_hash, referent_component_index, referent_constructor_index, referent_constructor_type
FROM scoped_term_name_lookup
WHERE root_branch_hash_id = ?
|]
typesCopySql =
[here|
INSERT INTO scoped_type_name_lookup(root_branch_hash_id, reversed_name, last_name_segment, namespace, reference_builtin, reference_component_hash, reference_component_index)
SELECT ?, reversed_name, last_name_segment, namespace, reference_builtin, reference_component_hash, reference_component_index
FROM scoped_type_name_lookup
WHERE root_branch_hash_id = ?
|]
-- Don't need this index at the moment, but will likely be useful later.
-- execute_
-- [here|
-- CREATE INDEX IF NOT EXISTS type_name_by_reference_lookup ON type_name_lookup(reference_builtin, reference_object_id, reference_component_index);
-- |]
-- | Inserts a new record into the name_lookups table
trackNewBranchHashNameLookup :: BranchHashId -> Transaction ()
trackNewBranchHashNameLookup bhId = do
execute sql (Only bhId)
where
sql =
[here|
INSERT INTO name_lookups (root_branch_hash_id)
VALUES (?)
|]
-- | Check if we've already got an index for the desired root branch hash.
checkBranchHashNameLookupExists :: BranchHashId -> Transaction Bool
checkBranchHashNameLookupExists hashId = do
queryOneCol sql (Only hashId)
where
sql =
[here|
SELECT EXISTS (
SELECT 1
FROM name_lookups
WHERE root_branch_hash_id = ?
LIMIT 1
)
|]
-- | Insert the given set of term names into the name lookup table
insertTermNames :: [NamedRef (Referent.TextReferent, Maybe NamedRef.ConstructorType)] -> Transaction ()
insertTermNames names = do
executeMany sql (NamedRef.toRowWithNamespace . fmap refToRow <$> names)
insertScopedTermNames :: BranchHashId -> [NamedRef (Referent.TextReferent, Maybe NamedRef.ConstructorType)] -> Transaction ()
insertScopedTermNames bhId names = do
executeMany sql (namedRefToRow <$> names)
where
namedRefToRow :: NamedRef (S.Referent.TextReferent, Maybe NamedRef.ConstructorType) -> (Only BranchHashId :. [SQLData])
namedRefToRow namedRef =
namedRef
& fmap refToRow
& NamedRef.namedRefToScopedRow
& \nr -> (Only bhId :. nr)
refToRow :: (Referent.TextReferent, Maybe NamedRef.ConstructorType) -> (Referent.TextReferent :. Only (Maybe NamedRef.ConstructorType))
refToRow (ref, ct) = ref :. Only ct
sql =
[here|
INSERT INTO term_name_lookup (reversed_name, referent_builtin, referent_component_hash, referent_component_index, referent_constructor_index, referent_constructor_type, namespace)
INSERT INTO scoped_term_name_lookup (root_branch_hash_id, reversed_name, namespace, last_name_segment, referent_builtin, referent_component_hash, referent_component_index, referent_constructor_index, referent_constructor_type)
VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?)
ON CONFLICT DO NOTHING
|]
-- | Insert the given set of type names into the name lookup table
insertScopedTypeNames :: BranchHashId -> [NamedRef (Reference.TextReference)] -> Transaction ()
insertScopedTypeNames bhId names =
executeMany sql ((Only bhId :.) . NamedRef.namedRefToScopedRow <$> names)
where
sql =
[here|
INSERT INTO scoped_type_name_lookup (root_branch_hash_id, reversed_name, namespace, last_name_segment, reference_builtin, reference_component_hash, reference_component_index)
VALUES (?, ?, ?, ?, ?, ?, ?)
ON CONFLICT DO NOTHING
|]
-- | Remove the given set of term names into the name lookup table
removeTermNames :: [NamedRef Referent.TextReferent] -> Transaction ()
removeTermNames names = do
executeMany sql names
removeScopedTermNames :: BranchHashId -> [NamedRef Referent.TextReferent] -> Transaction ()
removeScopedTermNames bhId names = do
executeMany sql ((Only bhId :.) <$> names)
where
sql =
[here|
DELETE FROM term_name_lookup
DELETE FROM scoped_term_name_lookup
WHERE
reversed_name IS ?
root_branch_hash_id IS ?
AND reversed_name IS ?
AND referent_builtin IS ?
AND referent_component_hash IS ?
AND referent_component_index IS ?
@ -1719,15 +1743,16 @@ removeTermNames names = do
|]
-- | Remove the given set of term names into the name lookup table
removeTypeNames :: [NamedRef (Reference.TextReference)] -> Transaction ()
removeTypeNames names = do
executeMany sql names
removeScopedTypeNames :: BranchHashId -> [NamedRef (Reference.TextReference)] -> Transaction ()
removeScopedTypeNames bhId names = do
executeMany sql ((Only bhId :.) <$> names)
where
sql =
[here|
DELETE FROM type_name_lookup
DELETE FROM scoped_type_name_lookup
WHERE
reversed_name IS ?
root_branch_hash_id IS ?
AND reversed_name IS ?
AND reference_builtin IS ?
AND reference_component_hash IS ?
AND reference_component_index IS ?
@ -1774,65 +1799,39 @@ likeEscape escapeChar pat =
| c == escapeChar -> Text.pack [escapeChar, escapeChar]
| otherwise -> Text.singleton c
-- | Gets the count of all definitions within the given namespace.
-- NOTE: This requires a working name lookup index.
getNamespaceDefinitionCount :: Text -> Transaction Int
getNamespaceDefinitionCount namespace = do
let subnamespace = globEscape namespace <> ".*"
queryOneCol sql (subnamespace, namespace, subnamespace, namespace)
where
sql =
[here|
SELECT COUNT(*) FROM (
SELECT 1 FROM term_name_lookup WHERE namespace GLOB ? OR namespace = ?
UNION ALL
SELECT 1 FROM type_name_lookup WHERE namespace GLOB ? OR namespace = ?
)
|]
-- | Insert the given set of type names into the name lookup table
insertTypeNames :: [NamedRef (Reference.TextReference)] -> Transaction ()
insertTypeNames names =
executeMany sql (NamedRef.toRowWithNamespace <$> names)
where
sql =
[here|
INSERT INTO type_name_lookup (reversed_name, reference_builtin, reference_component_hash, reference_component_index, namespace)
VALUES (?, ?, ?, ?, ?)
ON CONFLICT DO NOTHING
|]
-- | Get the list of a term names in the root namespace according to the name lookup index
rootTermNamesByPath :: Maybe Text -> Transaction [NamedRef (Referent.TextReferent, Maybe NamedRef.ConstructorType)]
rootTermNamesByPath mayNamespace = do
let (namespace, subnamespace) = case mayNamespace of
Nothing -> ("", "*")
Just namespace -> (namespace, globEscape namespace <> ".*")
results :: [NamedRef (Referent.TextReferent :. Only (Maybe NamedRef.ConstructorType))] <- queryListRow sql (subnamespace, namespace, subnamespace, namespace)
termNamesWithinNamespace :: BranchHashId -> Maybe Text -> Transaction [NamedRef (Referent.TextReferent, Maybe NamedRef.ConstructorType)]
termNamesWithinNamespace bhId mayNamespace = do
let namespaceGlob = case mayNamespace of
Nothing -> "*"
Just namespace -> globEscape namespace <> ".*"
results :: [NamedRef (Referent.TextReferent :. Only (Maybe NamedRef.ConstructorType))] <- queryListRow sql (bhId, namespaceGlob)
pure (fmap unRow <$> results)
where
unRow (a :. Only b) = (a, b)
sql =
[here|
SELECT reversed_name, referent_builtin, referent_component_hash, referent_component_index, referent_constructor_index, referent_constructor_type FROM term_name_lookup
WHERE (namespace GLOB ? OR namespace = ?)
ORDER BY (namespace GLOB ? OR namespace = ?) DESC
SELECT reversed_name, referent_builtin, referent_component_hash, referent_component_index, referent_constructor_index, referent_constructor_type FROM scoped_term_name_lookup
WHERE
root_branch_hash_id = ?
AND namespace GLOB ?
|]
-- | Get the list of a type names in the root namespace according to the name lookup index
rootTypeNamesByPath :: Maybe Text -> Transaction [NamedRef Reference.TextReference]
rootTypeNamesByPath mayNamespace = do
let (namespace, subnamespace) = case mayNamespace of
Nothing -> ("", "*")
Just namespace -> (namespace, globEscape namespace <> ".*")
results :: [NamedRef Reference.TextReference] <- queryListRow sql (subnamespace, namespace, subnamespace, namespace)
typeNamesWithinNamespace :: BranchHashId -> Maybe Text -> Transaction [NamedRef Reference.TextReference]
typeNamesWithinNamespace bhId mayNamespace = do
let namespaceGlob = case mayNamespace of
Nothing -> "*"
Just namespace -> globEscape namespace <> ".*"
results :: [NamedRef Reference.TextReference] <- queryListRow sql (bhId, namespaceGlob)
pure results
where
sql =
[here|
SELECT reversed_name, reference_builtin, reference_component_hash, reference_component_index FROM type_name_lookup
WHERE namespace GLOB ? OR namespace = ?
ORDER BY (namespace GLOB ? OR namespace = ?) DESC
SELECT reversed_name, reference_builtin, reference_component_hash, reference_component_index FROM scoped_type_name_lookup
WHERE
root_branch_hash_id = ?
AND namespace GLOB ?
|]
-- | @before x y@ returns whether or not @x@ occurred before @y@, i.e. @x@ is an ancestor of @y@.

View File

@ -229,7 +229,103 @@ CREATE INDEX dependents_by_dependency ON dependents_index (
CREATE INDEX dependencies_by_dependent ON dependents_index (
dependent_object_id,
dependent_component_index
)
);
-- This table allows us to look up which branch hashes have a name lookup.
CREATE TABLE name_lookups (
root_branch_hash_id INTEGER PRIMARY KEY REFERENCES hash(id) ON DELETE CASCADE
);
CREATE TABLE scoped_term_name_lookup (
root_branch_hash_id INTEGER NOT NULL REFERENCES hash(id) ON DELETE CASCADE,
-- The name of the term in reversed form, with a trailing '.':
-- E.g. map.List.base.
--
-- The trailing '.' is helpful when performing suffix queries where we may not know
-- whether the suffix is complete or not, e.g. we could suffix search using any of the
-- following globs and it would still find 'map.List.base.':
-- map.List.base.*
-- map.List.*
-- map.*
reversed_name TEXT NOT NULL,
-- The last name segment of the name. This is used when looking up names for
-- suffixification when building PPEs.
-- E.g. for the name 'base.List.map' this would be 'map'
last_name_segment TEXT NOT NULL,
-- The namespace containing this definition, not reversed, with a trailing '.'
-- The trailing '.' simplifies GLOB queries, so that 'base.*' matches both things in
-- 'base' and 'base.List', but not 'base1', which allows us to avoid an OR in our where
-- clauses which in turn helps the sqlite query planner use indexes more effectively.
--
-- example value: 'base.List.'
namespace TEXT NOT NULL,
referent_builtin TEXT NULL,
referent_component_hash TEXT NULL,
referent_component_index INTEGER NULL,
referent_constructor_index INTEGER NULL,
referent_constructor_type INTEGER NULL,
PRIMARY KEY (root_branch_hash_id, reversed_name, referent_builtin, referent_component_hash, referent_component_index, referent_constructor_index)
);
-- This index allows finding all names we need to consider within a given namespace for
-- suffixification of a name.
-- It may seem strange to use last_name_segment rather than a suffix search over reversed_name name her
-- but SQLite will only optimize for a single prefix-glob at once, so we can't glob search
-- over both namespace and reversed_name, but we can EXACT match on last_name_segment and
-- then glob search on the namespace prefix, and have SQLite do the final glob search on
-- reversed_name over rows with a matching last segment without using an index and should be plenty fast.
CREATE INDEX scoped_term_names_by_namespace_and_last_name_segment ON scoped_term_name_lookup(root_branch_hash_id, last_name_segment, namespace);
-- This index allows us to find all names with a given ref within a specific namespace
CREATE INDEX scoped_term_name_by_referent_lookup ON scoped_term_name_lookup(root_branch_hash_id, referent_builtin, referent_component_hash, referent_component_index, referent_constructor_index, namespace);
-- Allows fetching ALL names within a specific namespace prefix. We currently use this to
-- pretty-print on share, but will be replaced with a more precise set of queries soon.
CREATE INDEX scoped_term_names_by_namespace ON scoped_term_name_lookup(root_branch_hash_id, namespace);
CREATE TABLE scoped_type_name_lookup (
root_branch_hash_id INTEGER NOT NULL REFERENCES hash(id),
-- The name of the term: E.g. List.base
reversed_name TEXT NOT NULL,
-- The last name segment of the name. This is used when looking up names for
-- suffixification when building PPEs.
-- E.g. for the name 'base.List.map' this would be 'map'
last_name_segment TEXT NOT NULL,
-- The namespace containing this definition, not reversed, with a trailing '.'
-- The trailing '.' simplifies GLOB queries, so that 'base.*' matches both things in
-- 'base' and 'base.List', but not 'base1', which allows us to avoid an OR in our where
-- clauses which in turn helps the sqlite query planner use indexes more effectively.
--
-- example value: 'base.List.'
namespace TEXT NOT NULL,
reference_builtin TEXT NULL,
reference_component_hash INTEGER NULL,
reference_component_index INTEGER NULL,
PRIMARY KEY (reversed_name, reference_builtin, reference_component_hash, reference_component_index)
);
-- This index allows finding all names we need to consider within a given namespace for
-- suffixification of a name.
-- It may seem strange to use last_name_segment rather than a suffix search over reversed_name name here
-- but SQLite will only optimize for a single prefix-glob at once, so we can't glob search
-- over both namespace and reversed_name, but we can EXACT match on last_name_segment and
-- then glob search on the namespace prefix, and have SQLite do the final glob search on
-- reversed_name over rows with a matching last segment without using an index and should be plenty fast.
CREATE INDEX scoped_type_names_by_namespace_and_last_name_segment ON scoped_type_name_lookup(root_branch_hash_id, last_name_segment, namespace);
-- This index allows us to find all names with a given ref within a specific namespace.
CREATE INDEX scoped_type_name_by_reference_lookup ON scoped_type_name_lookup(root_branch_hash_id, reference_builtin, reference_component_hash, reference_component_index, namespace);
-- Allows fetching ALL names within a specific namespace prefix. We currently use this to
-- pretty-print on share, but will be replaced with a more precise set of queries soon.
CREATE INDEX scoped_type_names_by_namespace ON scoped_type_name_lookup(root_branch_hash_id, namespace)
-- Semicolon intentionally omitted, for the same reason
-- semicolons in comments will blow up codebase initialization.

View File

@ -523,7 +523,7 @@ vacuum conn =
Right () -> pure True
-- | @VACUUM INTO@
vacuumInto :: Connection -> Text -> IO ()
vacuumInto :: Connection -> FilePath -> IO ()
vacuumInto conn file =
execute conn "VACUUM INTO ?" (Sqlite.Only file)

View File

@ -78,11 +78,12 @@ tlsSignedCertRef = lookupDeclRef "io2.Tls.SignedCert"
tlsPrivateKeyRef = lookupDeclRef "io2.Tls.PrivateKey"
runtimeFailureRef, arithmeticFailureRef, miscFailureRef, stmFailureRef :: Reference
runtimeFailureRef, arithmeticFailureRef, miscFailureRef, stmFailureRef, threadKilledFailureRef :: Reference
runtimeFailureRef = lookupDeclRef "io2.RuntimeFailure"
arithmeticFailureRef = lookupDeclRef "io2.ArithmeticFailure"
miscFailureRef = lookupDeclRef "io2.MiscFailure"
stmFailureRef = lookupDeclRef "io2.STMFailure"
threadKilledFailureRef = lookupDeclRef "io2.ThreadKilledFailure"
fileModeRef, filePathRef, bufferModeRef, seekModeRef, seqViewRef :: Reference
fileModeRef = lookupDeclRef "io2.FileMode"
@ -184,7 +185,8 @@ builtinDataDecls = rs1 ++ rs
(v "io2.RuntimeFailure", runtimeFailure),
(v "io2.ArithmeticFailure", arithmeticFailure),
(v "io2.MiscFailure", miscFailure),
(v "io2.STMFailure", stmFailure)
(v "io2.STMFailure", stmFailure),
(v "io2.ThreadKilledFailure", threadKilledFailure)
] of
Right a -> a
Left e -> error $ "builtinDataDecls: " <> show e
@ -363,6 +365,13 @@ builtinDataDecls = rs1 ++ rs
[]
[]
threadKilledFailure =
DataDeclaration
(Unique "e7e479ebb757edcd5acff958b00aa228ac75b0c53638d44cf9d62fca045c33cf")
()
[]
[]
stdhnd =
DataDeclaration
(Unique "67bf7a8e517cbb1e9f42bc078e35498212d3be3c")

View File

@ -10,6 +10,7 @@ module Unison.Codebase.Init
InitResult (..),
SpecifiedCodebase (..),
MigrationStrategy (..),
BackupStrategy (..),
Pretty,
createCodebase,
initCodebaseAndExit,
@ -47,11 +48,20 @@ data CodebaseLockOption
= DoLock
| DontLock
data BackupStrategy
= -- Create a backup of the codebase in the same directory as the codebase,
-- see 'backupCodebasePath'.
Backup
| -- Don't create a backup when migrating, this might be used if the caller has
-- already created a copy of the codebase for instance.
NoBackup
deriving stock (Show, Eq, Ord)
data MigrationStrategy
= -- | Perform a migration immediately if one is required.
MigrateAutomatically
MigrateAutomatically BackupStrategy
| -- | Prompt the user that a migration is about to occur, continue after acknownledgment
MigrateAfterPrompt
MigrateAfterPrompt BackupStrategy
| -- | Triggers an 'OpenCodebaseRequiresMigration' error instead of migrating
DontMigrate
deriving stock (Show, Eq, Ord)

View File

@ -9,7 +9,9 @@
module Unison.Codebase.SqliteCodebase
( Unison.Codebase.SqliteCodebase.init,
MigrationStrategy (..),
BackupStrategy (..),
CodebaseLockOption (..),
copyCodebase,
)
where
@ -27,7 +29,7 @@ import qualified System.Console.ANSI as ANSI
import System.FileLock (SharedExclusive (Exclusive), withTryFileLock)
import qualified System.FilePath as FilePath
import qualified System.FilePath.Posix as FilePath.Posix
import U.Codebase.HashTags (BranchHash, CausalHash, PatchHash (..))
import U.Codebase.HashTags (CausalHash, PatchHash (..))
import qualified U.Codebase.Reflog as Reflog
import qualified U.Codebase.Sqlite.Operations as Ops
import qualified U.Codebase.Sqlite.Queries as Q
@ -47,12 +49,11 @@ import Unison.Codebase.Editor.RemoteRepo
writeToReadGit,
)
import qualified Unison.Codebase.GitError as GitError
import Unison.Codebase.Init (CodebaseLockOption (..), MigrationStrategy (..))
import Unison.Codebase.Init (BackupStrategy (..), CodebaseLockOption (..), MigrationStrategy (..))
import qualified Unison.Codebase.Init as Codebase
import qualified Unison.Codebase.Init.CreateCodebaseError as Codebase1
import Unison.Codebase.Init.OpenCodebaseError (OpenCodebaseError (..))
import qualified Unison.Codebase.Init.OpenCodebaseError as Codebase1
import Unison.Codebase.Path (Path)
import Unison.Codebase.RootBranchCache
import Unison.Codebase.SqliteCodebase.Branch.Cache (newBranchCache)
import qualified Unison.Codebase.SqliteCodebase.Branch.Dependencies as BD
@ -212,12 +213,12 @@ sqliteCodebase debugName root localOrRemote lockOption migrationStrategy action
Migrations.CodebaseRequiresMigration fromSv toSv ->
case migrationStrategy of
DontMigrate -> pure $ Left (OpenCodebaseRequiresMigration fromSv toSv)
MigrateAfterPrompt -> do
MigrateAfterPrompt backupStrategy -> do
let shouldPrompt = True
Migrations.ensureCodebaseIsUpToDate localOrRemote root getDeclType termBuffer declBuffer shouldPrompt conn
MigrateAutomatically -> do
Migrations.ensureCodebaseIsUpToDate localOrRemote root getDeclType termBuffer declBuffer shouldPrompt backupStrategy conn
MigrateAutomatically backupStrategy -> do
let shouldPrompt = False
Migrations.ensureCodebaseIsUpToDate localOrRemote root getDeclType termBuffer declBuffer shouldPrompt conn
Migrations.ensureCodebaseIsUpToDate localOrRemote root getDeclType termBuffer declBuffer shouldPrompt backupStrategy conn
case result of
Left err -> pure $ Left err
@ -344,10 +345,6 @@ sqliteCodebase debugName root localOrRemote lockOption migrationStrategy action
referentsByPrefix =
CodebaseOps.referentsByPrefix getDeclType
updateNameLookup :: Path -> Maybe BranchHash -> BranchHash -> Sqlite.Transaction ()
updateNameLookup =
CodebaseOps.updateNameLookupIndex getDeclType
let codebase =
C.Codebase
{ getTerm,
@ -374,7 +371,6 @@ sqliteCodebase debugName root localOrRemote lockOption migrationStrategy action
termsOfTypeImpl,
termsMentioningTypeImpl,
termReferentsByPrefix = referentsByPrefix,
updateNameLookup,
withConnection = withConn,
withConnectionIO = withConnection debugName root
}
@ -588,7 +584,7 @@ viewRemoteBranch' ReadGitRemoteNamespace {repo, sch, path} gitBranchBehavior act
then throwIO (C.GitSqliteCodebaseError (GitError.NoDatabaseFile repo remotePath))
else throwIO exception
result <- sqliteCodebase "viewRemoteBranch.gitCache" remotePath Remote DoLock MigrateAfterPrompt \codebase -> do
result <- sqliteCodebase "viewRemoteBranch.gitCache" remotePath Remote DoLock (MigrateAfterPrompt Codebase.Backup) \codebase -> do
-- try to load the requested branch from it
branch <- time "Git fetch (sch)" $ case sch of
-- no sub-branch was specified, so use the root.
@ -638,7 +634,7 @@ pushGitBranch srcConn repo (PushGitBranchOpts behavior _syncMode) action = Unlif
-- set up the cache dir
throwEitherMWith C.GitProtocolError . withRepo readRepo Git.CreateBranchIfMissing $ \pushStaging -> do
newBranchOrErr <- throwEitherMWith (C.GitSqliteCodebaseError . C.gitErrorFromOpenCodebaseError (Git.gitDirToPath pushStaging) readRepo)
. withOpenOrCreateCodebase "push.dest" (Git.gitDirToPath pushStaging) Remote DoLock MigrateAfterPrompt
. withOpenOrCreateCodebase "push.dest" (Git.gitDirToPath pushStaging) Remote DoLock (MigrateAfterPrompt Codebase.Backup)
$ \(codebaseStatus, destCodebase) -> do
currentRootBranch <-
Codebase1.runTransaction destCodebase CodebaseOps.getRootBranchExists >>= \case
@ -766,3 +762,12 @@ pushGitBranch srcConn repo (PushGitBranchOpts behavior _syncMode) action = Unlif
(successful, _stdout, stderr) <- gitInCaptured remotePath $ ["push", url] ++ Git.gitVerbosity ++ maybe [] (pure @[]) mayGitBranch
when (not successful) . throwIO $ GitError.PushException repo (Text.unpack stderr)
pure True
-- | Given two codebase roots (e.g. "./mycodebase"), safely copy the codebase
-- at the source to the destination.
-- Note: this does not copy the .unisonConfig file.
copyCodebase :: (MonadIO m) => CodebasePath -> CodebasePath -> m ()
copyCodebase src dest = liftIO $ do
createDirectoryIfMissing True (makeCodebaseDirPath dest)
withConnection ("copy-from:" <> src) src $ \srcConn -> do
Sqlite.vacuumInto srcConn (makeCodebasePath dest)

View File

@ -9,13 +9,13 @@ import qualified Data.Map as Map
import qualified Data.Text as Text
import Data.Time.Clock.POSIX (getPOSIXTime)
import qualified System.Console.Regions as Region
import System.Directory (copyFile)
import System.FilePath ((</>))
import Text.Printf (printf)
import qualified U.Codebase.Reference as C.Reference
import U.Codebase.Sqlite.DbId (SchemaVersion (..))
import qualified U.Codebase.Sqlite.Queries as Q
import Unison.Codebase (CodebasePath)
import Unison.Codebase.Init (BackupStrategy (..))
import Unison.Codebase.Init.OpenCodebaseError (OpenCodebaseError (OpenCodebaseUnknownSchemaVersion))
import qualified Unison.Codebase.Init.OpenCodebaseError as Codebase
import Unison.Codebase.IntegrityCheck (IntegrityResult (..), integrityCheckAllBranches, integrityCheckAllCausals, prettyPrintIntegrityErrors)
@ -27,8 +27,9 @@ import Unison.Codebase.SqliteCodebase.Migrations.MigrateSchema4To5 (migrateSchem
import Unison.Codebase.SqliteCodebase.Migrations.MigrateSchema5To6 (migrateSchema5To6)
import Unison.Codebase.SqliteCodebase.Migrations.MigrateSchema6To7 (migrateSchema6To7)
import Unison.Codebase.SqliteCodebase.Migrations.MigrateSchema7To8 (migrateSchema7To8)
import Unison.Codebase.SqliteCodebase.Migrations.MigrateSchema8To9 (migrateSchema8To9)
import qualified Unison.Codebase.SqliteCodebase.Operations as Ops2
import Unison.Codebase.SqliteCodebase.Paths (backupCodebasePath, codebasePath)
import Unison.Codebase.SqliteCodebase.Paths (backupCodebasePath)
import Unison.Codebase.Type (LocalOrRemote (..))
import qualified Unison.ConstructorType as CT
import Unison.Hash (Hash)
@ -36,11 +37,11 @@ import Unison.Prelude
import qualified Unison.Sqlite as Sqlite
import qualified Unison.Sqlite.Connection as Sqlite.Connection
import Unison.Util.Monoid (foldMapM)
import qualified Unison.Util.Monoid as Monoid
import qualified Unison.Util.Pretty as Pretty
import qualified UnliftIO
-- | Mapping from schema version to the migration required to get there.
-- Each migration may only be run on a schema of its immediate predecessor,
-- E.g. The migration at index 2 must be run on a codebase at version 1.
migrations ::
-- | A 'getDeclType'-like lookup, possibly backed by a cache.
@ -57,7 +58,8 @@ migrations getDeclType termBuffer declBuffer rootCodebasePath =
(5, migrateSchema4To5),
(6, migrateSchema5To6 rootCodebasePath),
(7, migrateSchema6To7),
(8, migrateSchema7To8)
(8, migrateSchema7To8),
(9, migrateSchema8To9)
]
data CodebaseVersionStatus
@ -92,9 +94,10 @@ ensureCodebaseIsUpToDate ::
TVar (Map Hash Ops2.TermBufferEntry) ->
TVar (Map Hash Ops2.DeclBufferEntry) ->
Bool ->
BackupStrategy ->
Sqlite.Connection ->
m (Either Codebase.OpenCodebaseError ())
ensureCodebaseIsUpToDate localOrRemote root getDeclType termBuffer declBuffer shouldPrompt conn =
ensureCodebaseIsUpToDate localOrRemote root getDeclType termBuffer declBuffer shouldPrompt backupStrategy conn =
(liftIO . UnliftIO.try) do
regionVar <- newEmptyMVar
let finalizeRegion :: IO ()
@ -105,15 +108,20 @@ ensureCodebaseIsUpToDate localOrRemote root getDeclType termBuffer declBuffer sh
Region.displayConsoleRegions do
(`UnliftIO.finally` finalizeRegion) do
let migs = migrations getDeclType termBuffer declBuffer root
-- The highest schema that this ucm knows how to migrate to.
let highestKnownSchemaVersion = fst . head $ Map.toDescList migs
currentSchemaVersion <- Sqlite.runTransaction conn Q.schemaVersion
when (currentSchemaVersion > highestKnownSchemaVersion) $ UnliftIO.throwIO $ OpenCodebaseUnknownSchemaVersion (fromIntegral currentSchemaVersion)
backupCodebaseIfNecessary backupStrategy localOrRemote conn currentSchemaVersion highestKnownSchemaVersion root
when shouldPrompt do
putStrLn "Press <enter> to start the migration once all other ucm processes are shutdown..."
void $ liftIO getLine
ranMigrations <-
Sqlite.runWriteTransaction conn \run -> do
schemaVersion <- run Q.schemaVersion
let migs = migrations getDeclType termBuffer declBuffer root
-- The highest schema that this ucm knows how to migrate to.
let currentSchemaVersion = fst . head $ Map.toDescList migs
when (schemaVersion > currentSchemaVersion) $ UnliftIO.throwIO $ OpenCodebaseUnknownSchemaVersion (fromIntegral schemaVersion)
let migrationsToRun = Map.filterWithKey (\v _ -> v > schemaVersion) migs
when (localOrRemote == Local && (not . null) migrationsToRun) $ backupCodebase root shouldPrompt
-- Get the schema version again now that we're in a transaction.
currentSchemaVersion <- run Q.schemaVersion
let migrationsToRun = Map.filterWithKey (\v _ -> v > currentSchemaVersion) migs
-- This is a bit of a hack, hopefully we can remove this when we have a more
-- reliable way to freeze old migration code in time.
-- The problem is that 'saveObject' has been changed to flush temp entity tables,
@ -123,8 +131,8 @@ ensureCodebaseIsUpToDate localOrRemote root getDeclType termBuffer declBuffer sh
--
-- Hopefully we can remove this once we've got better methods of freezing migration
-- code in time.
when (schemaVersion < 5) $ run Q.addTempEntityTables
when (schemaVersion < 6) $ run Q.addNamespaceStatsTables
when (currentSchemaVersion < 5) $ run Q.addTempEntityTables
when (currentSchemaVersion < 6) $ run Q.addNamespaceStatsTables
for_ (Map.toAscList migrationsToRun) $ \(SchemaVersion v, migration) -> do
putStrLn $ "🔨 Migrating codebase to version " <> show v <> "..."
run migration
@ -136,13 +144,16 @@ ensureCodebaseIsUpToDate localOrRemote root getDeclType termBuffer declBuffer sh
putMVar regionVar region
pure region
result <- do
-- Ideally we'd check everything here, but certain codebases are known to have objects
-- with missing Hash Objects, we'll want to clean that up in a future migration.
-- integrityCheckAllHashObjects,
let checks =
[ -- Ideally we'd check everything here, but certain codebases are known to have objects
-- with missing Hash Objects, we'll want to clean that up in a future migration.
-- integrityCheckAllHashObjects,
integrityCheckAllBranches,
integrityCheckAllCausals
]
Monoid.whenM
(currentSchemaVersion < 7) -- Only certain migrations actually make changes which reasonably need to be checked
[ integrityCheckAllBranches,
integrityCheckAllCausals
]
zip [(1 :: Int) ..] checks & foldMapM \(i, check) -> do
Region.setConsoleRegion
region
@ -163,13 +174,16 @@ ensureCodebaseIsUpToDate localOrRemote root getDeclType termBuffer declBuffer sh
_success <- Sqlite.Connection.vacuum conn
Region.setConsoleRegion region ("🏁 Migrations complete 🏁" :: Text)
-- | Copy the sqlite database to a new file with a unique name based on current time.
backupCodebase :: CodebasePath -> Bool -> IO ()
backupCodebase root shouldPrompt = do
backupPath <- backupCodebasePath <$> getPOSIXTime
copyFile (root </> codebasePath) (root </> backupPath)
putStrLn ("📋 I backed up your codebase to " ++ (root </> backupPath))
putStrLn "⚠️ Please close all other ucm processes and wait for the migration to complete before interacting with your codebase."
when shouldPrompt do
putStrLn "Press <enter> to start the migration once all other ucm processes are shutdown..."
void $ liftIO getLine
-- | If we need to make a backup, then copy the sqlite database to a new file with a unique name based on current time.
backupCodebaseIfNecessary :: BackupStrategy -> LocalOrRemote -> Sqlite.Connection -> SchemaVersion -> SchemaVersion -> CodebasePath -> IO ()
backupCodebaseIfNecessary backupStrategy localOrRemote conn currentSchemaVersion highestKnownSchemaVersion root = do
case (backupStrategy, localOrRemote) of
(NoBackup, _) -> pure ()
(_, Remote) -> pure ()
(Backup, Local)
| (currentSchemaVersion >= highestKnownSchemaVersion) -> pure ()
| otherwise -> do
backupPath <- getPOSIXTime <&> (\t -> root </> backupCodebasePath currentSchemaVersion t)
Sqlite.vacuumInto conn backupPath
putStrLn ("📋 I backed up your codebase to " ++ (root </> backupPath))
putStrLn "⚠️ Please close all other ucm processes and wait for the migration to complete before interacting with your codebase."

View File

@ -1,11 +1,135 @@
-- | @004-add-project-tables.sql@
{-# LANGUAGE QuasiQuotes #-}
module Unison.Codebase.SqliteCodebase.Migrations.MigrateSchema7To8 (migrateSchema7To8) where
import qualified U.Codebase.Sqlite.Queries as Queries
import Data.String.Here.Uninterpolated (here)
import qualified U.Codebase.Sqlite.Queries as Q
import qualified Unison.Sqlite as Sqlite
-- | Adds a table for tracking namespace statistics
-- Adds stats for all existing namespaces, even though missing stats are computed on-demand if missing.
migrateSchema7To8 :: Sqlite.Transaction ()
migrateSchema7To8 = do
Queries.expectSchemaVersion 7
Queries.addProjectTables
Queries.setSchemaVersion 8
Q.expectSchemaVersion 7
createScopedNameLookupTables
Q.setSchemaVersion 8
-- | Create the scoped name lookup tables.
createScopedNameLookupTables :: Sqlite.Transaction ()
createScopedNameLookupTables = do
-- This table allows us to look up which causal hashes have a name lookup.
Sqlite.execute_
[here|
CREATE TABLE name_lookups (
root_branch_hash_id INTEGER PRIMARY KEY REFERENCES hash(id) ON DELETE CASCADE
)
|]
Sqlite.execute_
[here|
CREATE TABLE scoped_term_name_lookup (
root_branch_hash_id INTEGER NOT NULL REFERENCES hash(id) ON DELETE CASCADE,
-- The name of the term in reversed form, with a trailing '.':
-- E.g. map.List.base.
--
-- The trailing '.' is helpful when performing suffix queries where we may not know
-- whether the suffix is complete or not, e.g. we could suffix search using any of the
-- following globs and it would still find 'map.List.base.':
-- map.List.base.*
-- map.List.*
-- map.*
reversed_name TEXT NOT NULL,
-- The last name segment of the name. This is used when looking up names for
-- suffixification when building PPEs.
-- E.g. for the name 'base.List.map' this would be 'map'
last_name_segment TEXT NOT NULL,
-- The namespace containing this definition, not reversed, with a trailing '.'
-- The trailing '.' simplifies GLOB queries, so that 'base.*' matches both things in
-- 'base' and 'base.List', but not 'base1', which allows us to avoid an OR in our where
-- clauses which in turn helps the sqlite query planner use indexes more effectively.
--
-- example value: 'base.List.'
namespace TEXT NOT NULL,
referent_builtin TEXT NULL,
referent_component_hash TEXT NULL,
referent_component_index INTEGER NULL,
referent_constructor_index INTEGER NULL,
referent_constructor_type INTEGER NULL,
PRIMARY KEY (root_branch_hash_id, reversed_name, referent_builtin, referent_component_hash, referent_component_index, referent_constructor_index)
)
|]
-- This index allows finding all names we need to consider within a given namespace for
-- suffixification of a name.
-- It may seem strange to use last_name_segment rather than a suffix search over reversed_name name here;
-- but SQLite will only optimize for a single prefix-glob at once, so we can't glob search
-- over both namespace and reversed_name, but we can EXACT match on last_name_segment and
-- then glob search on the namespace prefix, and have SQLite do the final glob search on
-- reversed_name over rows with a matching last segment without using an index and should be plenty fast.
Sqlite.execute_
[here|
CREATE INDEX scoped_term_names_by_namespace_and_last_name_segment ON scoped_term_name_lookup(root_branch_hash_id, last_name_segment, namespace)
|]
-- This index allows us to find all names with a given ref within a specific namespace
Sqlite.execute_
[here|
CREATE INDEX scoped_term_name_by_referent_lookup ON scoped_term_name_lookup(root_branch_hash_id, referent_builtin, referent_component_hash, referent_component_index, referent_constructor_index, namespace)
|]
-- Allows fetching ALL names within a specific namespace prefix. We currently use this to
-- pretty-print on share, but will be replaced with a more precise set of queries soon.
Sqlite.execute_
[here|
CREATE INDEX scoped_term_names_by_namespace ON scoped_term_name_lookup(root_branch_hash_id, namespace)
|]
Sqlite.execute_
[here|
CREATE TABLE scoped_type_name_lookup (
root_branch_hash_id INTEGER NOT NULL REFERENCES hash(id),
-- The name of the term: E.g. List.base
reversed_name TEXT NOT NULL,
-- The last name segment of the name. This is used when looking up names for
-- suffixification when building PPEs.
-- E.g. for the name 'base.List.map' this would be 'map'
last_name_segment TEXT NOT NULL,
-- The namespace containing this definition, not reversed, with a trailing '.'
-- The trailing '.' simplifies GLOB queries, so that 'base.*' matches both things in
-- 'base' and 'base.List', but not 'base1', which allows us to avoid an OR in our where
-- clauses which in turn helps the sqlite query planner use indexes more effectively.
--
-- example value: 'base.List.'
namespace TEXT NOT NULL,
reference_builtin TEXT NULL,
reference_component_hash INTEGER NULL,
reference_component_index INTEGER NULL,
PRIMARY KEY (reversed_name, reference_builtin, reference_component_hash, reference_component_index)
);
|]
-- This index allows finding all names we need to consider within a given namespace for
-- suffixification of a name.
-- It may seem strange to use last_name_segment rather than a suffix search over reversed_name name here;
-- but SQLite will only optimize for a single prefix-glob at once, so we can't glob search
-- over both namespace and reversed_name, but we can EXACT match on last_name_segment and
-- then glob search on the namespace prefix, and have SQLite do the final glob search on
-- reversed_name over rows with a matching last segment without using an index and should be plenty fast.
Sqlite.execute_
[here|
CREATE INDEX scoped_type_names_by_namespace_and_last_name_segment ON scoped_type_name_lookup(root_branch_hash_id, last_name_segment, namespace)
|]
-- This index allows us to find all names with a given ref within a specific namespace.
Sqlite.execute_
[here|
CREATE INDEX scoped_type_name_by_reference_lookup ON scoped_type_name_lookup(root_branch_hash_id, reference_builtin, reference_component_hash, reference_component_index, namespace)
|]
-- Allows fetching ALL names within a specific namespace prefix. We currently use this to
-- pretty-print on share, but will be replaced with a more precise set of queries soon.
Sqlite.execute_
[here|
CREATE INDEX scoped_type_names_by_namespace ON scoped_type_name_lookup(root_branch_hash_id, namespace)
|]

View File

@ -0,0 +1,10 @@
module Unison.Codebase.SqliteCodebase.Migrations.MigrateSchema8To9 (migrateSchema8To9) where
import qualified U.Codebase.Sqlite.Queries as Queries
import qualified Unison.Sqlite as Sqlite
migrateSchema8To9 :: Sqlite.Transaction ()
migrateSchema8To9 = do
Queries.expectSchemaVersion 8
Queries.addProjectTables
Queries.setSchemaVersion 9

View File

@ -7,11 +7,9 @@
-- are unified with non-sqlite operations in the Codebase interface, like 'appendReflog'.
module Unison.Codebase.SqliteCodebase.Operations where
import Control.Lens (ifor)
import Data.Bitraversable (bitraverse)
import Data.Either.Extra ()
import qualified Data.List as List
import qualified Data.List.NonEmpty as NEList
import Data.List.NonEmpty.Extra (NonEmpty ((:|)), maximum1)
import qualified Data.Map as Map
import Data.Maybe (fromJust)
@ -19,7 +17,6 @@ import qualified Data.Set as Set
import qualified Data.Text as Text
import qualified U.Codebase.Branch as V2Branch
import qualified U.Codebase.Branch.Diff as BranchDiff
import qualified U.Codebase.Causal as V2Causal
import U.Codebase.HashTags (BranchHash, CausalHash (unCausalHash), PatchHash)
import qualified U.Codebase.Reference as C.Reference
import qualified U.Codebase.Referent as C.Referent
@ -32,8 +29,6 @@ import qualified U.Codebase.Sqlite.Queries as Q
import U.Codebase.Sqlite.V2.HashHandle (v2HashHandle)
import qualified Unison.Builtin as Builtins
import Unison.Codebase.Branch (Branch (..))
import qualified Unison.Codebase.Branch as Branch
import qualified Unison.Codebase.Branch.Names as V1Branch
import Unison.Codebase.Patch (Patch)
import Unison.Codebase.Path (Path)
import qualified Unison.Codebase.Path as Path
@ -622,31 +617,38 @@ namesAtPath namesRootPath relativeToPath = do
Nothing -> Nothing
Just stripped -> Just (Name.makeRelative stripped, ref)
-- | Update the root namespace names index which is used by the share server for serving api
-- requests.
updateNameLookupIndex ::
-- | Add an index for the provided branch hash if one doesn't already exist.
ensureNameLookupForBranchHash ::
(C.Reference.Reference -> Sqlite.Transaction CT.ConstructorType) ->
Path ->
-- | "from" branch, if 'Nothing' use the empty branch
-- | An optional branch which we may already have an index for.
-- This should be a branch which is relatively similar to the branch we're creating a name
-- lookup for, e.g. a recent ancestor of the new branch. The more similar it is, the faster
-- the less work we'll need to do.
Maybe BranchHash ->
-- | "to" branch
BranchHash ->
Sqlite.Transaction ()
updateNameLookupIndex getDeclType pathPrefix mayFromBranchHash toBranchHash = do
fromBranch <- case mayFromBranchHash of
Nothing -> pure V2Branch.empty
Just fromBH -> Ops.expectBranchByBranchHash fromBH
toBranch <- Ops.expectBranchByBranchHash toBranchHash
treeDiff <- BranchDiff.diffBranches fromBranch toBranch
let namePrefix = case pathPrefix of
Path.Empty -> Nothing
(p Path.:< ps) -> Just $ Name.fromSegments (p :| Path.toList ps)
let BranchDiff.NameChanges {termNameAdds, termNameRemovals, typeNameAdds, typeNameRemovals} = BranchDiff.nameChanges namePrefix treeDiff
termNameAddsWithCT <- do
for termNameAdds \(name, ref) -> do
refWithCT <- addReferentCT ref
pure $ toNamedRef (name, refWithCT)
Ops.updateNameIndex (termNameAddsWithCT, toNamedRef <$> termNameRemovals) (toNamedRef <$> typeNameAdds, toNamedRef <$> typeNameRemovals)
ensureNameLookupForBranchHash getDeclType mayFromBranchHash toBranchHash = do
Ops.checkBranchHashNameLookupExists toBranchHash >>= \case
True -> pure ()
False -> do
(fromBranch, mayExistingLookupBH) <- case mayFromBranchHash of
Nothing -> pure (V2Branch.empty, Nothing)
Just fromBH -> do
Ops.checkBranchHashNameLookupExists fromBH >>= \case
True -> (,Just fromBH) <$> Ops.expectBranchByBranchHash fromBH
False -> do
-- TODO: We can probably infer a good starting branch by crawling through
-- history looking for a Branch Hash we already have an index for.
pure (V2Branch.empty, Nothing)
toBranch <- Ops.expectBranchByBranchHash toBranchHash
treeDiff <- BranchDiff.diffBranches fromBranch toBranch
let namePrefix = Nothing
let BranchDiff.NameChanges {termNameAdds, termNameRemovals, typeNameAdds, typeNameRemovals} = BranchDiff.nameChanges namePrefix treeDiff
termNameAddsWithCT <- do
for termNameAdds \(name, ref) -> do
refWithCT <- addReferentCT ref
pure $ toNamedRef (name, refWithCT)
Ops.buildNameLookupForBranchHash mayExistingLookupBH toBranchHash (termNameAddsWithCT, toNamedRef <$> termNameRemovals) (toNamedRef <$> typeNameAdds, toNamedRef <$> typeNameRemovals)
where
toNamedRef :: (Name, ref) -> S.NamedRef ref
toNamedRef (name, ref) = S.NamedRef {reversedSegments = coerce $ Name.reverseSegments name, ref = ref}
@ -657,80 +659,6 @@ updateNameLookupIndex getDeclType pathPrefix mayFromBranchHash toBranchHash = do
ct <- getDeclType ref
pure (referent, Just $ Cv.constructorType1to2 ct)
-- | Compute the root namespace names index which is used by the share server for serving api
-- requests. Using 'updateNameLookupIndex' is preferred whenever possible, since it's
-- considerably faster. This can be used to reset the index if it ever gets out of sync due to
-- a bug.
--
-- This version can be used if you've already got the root Branch pre-loaded, otherwise
-- it's faster to use 'initializeNameLookupIndexFromV2Root'
initializeNameLookupIndexFromV1Branch :: Branch Transaction -> Sqlite.Transaction ()
initializeNameLookupIndexFromV1Branch root = do
Q.dropNameLookupTables
saveRootNamesIndexV1 (V1Branch.toNames . Branch.head $ root)
where
saveRootNamesIndexV1 :: Names -> Transaction ()
saveRootNamesIndexV1 Names {Names.terms, Names.types} = do
let termNames :: [(S.NamedRef (C.Referent.Referent, Maybe C.Referent.ConstructorType))]
termNames = Rel.toList terms <&> \(name, ref) -> S.NamedRef {reversedSegments = nameSegments name, ref = splitReferent ref}
let typeNames :: [(S.NamedRef C.Reference.Reference)]
typeNames =
Rel.toList types
<&> ( \(name, ref) ->
S.NamedRef {reversedSegments = nameSegments name, ref = Cv.reference1to2 ref}
)
Ops.updateNameIndex (termNames, []) (typeNames, [])
where
nameSegments :: Name -> NonEmpty Text
nameSegments = coerce @(NonEmpty NameSegment) @(NonEmpty Text) . Name.reverseSegments
splitReferent :: Referent.Referent -> (C.Referent.Referent, Maybe C.Referent.ConstructorType)
splitReferent referent = case referent of
Referent.Ref {} -> (Cv.referent1to2 referent, Nothing)
Referent.Con _ref ct -> (Cv.referent1to2 referent, Just (Cv.constructorType1to2 ct))
-- | Compute the root namespace names index which is used by the share server for serving api
-- requests. Using 'updateNameLookupIndex' is preferred whenever possible, since it's
-- considerably faster. This can be used to reset the index if it ever gets out of sync due to
-- a bug.
--
-- This version should be used if you don't already have the root Branch pre-loaded,
-- If you do, use 'initializeNameLookupIndexFromV1Branch' instead.
initializeNameLookupIndexFromV2Root :: (C.Reference.Reference -> Sqlite.Transaction CT.ConstructorType) -> Sqlite.Transaction ()
initializeNameLookupIndexFromV2Root getDeclType = do
Q.dropNameLookupTables
rootHash <- Ops.expectRootCausalHash
causalBranch <- Ops.expectCausalBranchByCausalHash rootHash
(termNameMap, typeNameMap) <- nameMapsFromV2Branch [] causalBranch
let expandedTermNames = Map.toList termNameMap >>= (\(name, refs) -> (name,) <$> Set.toList refs)
termNameList <- do
for expandedTermNames \(name, ref) -> do
refWithCT <- addReferentCT ref
pure S.NamedRef {S.reversedSegments = coerce name, S.ref = refWithCT}
let typeNameList = do
(name, refs) <- Map.toList typeNameMap
ref <- Set.toList refs
pure $ S.NamedRef {S.reversedSegments = coerce name, S.ref = ref}
Ops.updateNameIndex (termNameList, []) (typeNameList, [])
where
addReferentCT :: C.Referent.Referent -> Transaction (C.Referent.Referent, Maybe C.Referent.ConstructorType)
addReferentCT referent = case referent of
C.Referent.Ref {} -> pure (referent, Nothing)
C.Referent.Con ref _conId -> do
ct <- getDeclType ref
pure (referent, Just $ Cv.constructorType1to2 ct)
-- Traverse a v2 branch
-- Collects two maps, one with all term names and one with all type names.
-- Note that unlike the `Name` type in `unison-core1`, this list of name segments is
-- in reverse order, e.g. `["map", "List", "base"]`
nameMapsFromV2Branch :: (Monad m) => [NameSegment] -> V2Branch.CausalBranch m -> m (Map (NonEmpty NameSegment) (Set C.Referent.Referent), Map (NonEmpty NameSegment) (Set C.Reference.Reference))
nameMapsFromV2Branch reversedNamePrefix cb = do
b <- V2Causal.value cb
let (shallowTermNames, shallowTypeNames) = (Map.keysSet <$> V2Branch.terms b, Map.keysSet <$> V2Branch.types b)
(prefixedChildTerms, prefixedChildTypes) <-
fold <$> (ifor (V2Branch.children b) $ \nameSegment cb -> (nameMapsFromV2Branch (nameSegment : reversedNamePrefix) cb))
pure (Map.mapKeys (NEList.:| reversedNamePrefix) shallowTermNames <> prefixedChildTerms, Map.mapKeys (NEList.:| reversedNamePrefix) shallowTypeNames <> prefixedChildTypes)
-- | Given a transaction, return a transaction that first checks a semispace cache of the given size.
--
-- The transaction should probably be read-only, as we (of course) don't hit SQLite on a cache hit.

View File

@ -9,6 +9,7 @@ where
import Data.Time (NominalDiffTime)
import System.FilePath ((</>))
import U.Codebase.Sqlite.DbId (SchemaVersion (SchemaVersion))
import Unison.Codebase (CodebasePath)
-- | Prefer makeCodebasePath or makeCodebaseDirPath when possible.
@ -27,6 +28,6 @@ makeCodebaseDirPath :: CodebasePath -> FilePath
makeCodebaseDirPath root = root </> ".unison" </> "v2"
-- | Makes a path to store a backup of a sqlite database given the current time.
backupCodebasePath :: NominalDiffTime -> FilePath
backupCodebasePath now =
codebasePath ++ "." ++ show @Int (floor now)
backupCodebasePath :: SchemaVersion -> NominalDiffTime -> FilePath
backupCodebasePath (SchemaVersion schemaVersion) now =
codebasePath ++ ".v" ++ show schemaVersion ++ "." ++ show @Int (floor now)

View File

@ -13,14 +13,13 @@ module Unison.Codebase.Type
)
where
import U.Codebase.HashTags (BranchHash, CausalHash)
import U.Codebase.HashTags (CausalHash)
import qualified U.Codebase.Reference as V2
import Unison.Codebase.Branch (Branch)
import qualified Unison.Codebase.Editor.Git as Git
import Unison.Codebase.Editor.RemoteRepo (ReadGitRemoteNamespace, ReadGitRepo, WriteGitRepo)
import Unison.Codebase.GitError (GitCodebaseError, GitProtocolError)
import Unison.Codebase.Init.OpenCodebaseError (OpenCodebaseError (..))
import Unison.Codebase.Path (Path)
import Unison.Codebase.SqliteCodebase.GitError (GitSqliteCodebaseError (..))
import Unison.Codebase.SyncMode (SyncMode)
import Unison.CodebasePath (CodebasePath)
@ -101,21 +100,6 @@ data Codebase m v a = Codebase
termsMentioningTypeImpl :: Reference -> Sqlite.Transaction (Set Referent.Id),
-- | Get the set of user-defined terms-or-constructors whose hash matches the given prefix.
termReferentsByPrefix :: ShortHash -> Sqlite.Transaction (Set Referent.Id),
-- Updates the root namespace names index from an old BranchHash to a new one.
-- This isn't run automatically because it can be a bit slow.
updateNameLookup ::
-- Path to the root of the _changes_.
-- E.g. if you know that all the changes occur at "base.List", you can pass "base.List"
-- here, and pass the old and new branch hashes for the branch as "base.List".
-- This allows us to avoid searching for changes in areas where it's impossible for it
-- to have occurred.
Path ->
-- The branch hash at 'Path' which the existing index was built from.
-- Pass 'Nothing' to build the index from scratch (i.e. compute a diff from an empty branch).
Maybe BranchHash ->
-- The new branch
BranchHash ->
Sqlite.Transaction (),
-- | Acquire a new connection to the same underlying database file this codebase object connects to.
withConnection :: forall x. (Sqlite.Connection -> m x) -> m x,
-- | Acquire a new connection to the same underlying database file this codebase object connects to.

View File

@ -521,6 +521,8 @@ encodeExn ustk bstk (Left exn) = do
(Rf.stmFailureRef, disp be, unitValue)
| Just (be :: BlockedIndefinitelyOnMVar) <- fromException exn =
(Rf.ioFailureRef, disp be, unitValue)
| Just (ie :: AsyncException) <- fromException exn =
(Rf.threadKilledFailureRef, disp ie, unitValue)
| otherwise = (Rf.miscFailureRef, disp exn, unitValue)
eval ::

View File

@ -83,6 +83,7 @@ library
Unison.Codebase.SqliteCodebase.Migrations.MigrateSchema5To6
Unison.Codebase.SqliteCodebase.Migrations.MigrateSchema6To7
Unison.Codebase.SqliteCodebase.Migrations.MigrateSchema7To8
Unison.Codebase.SqliteCodebase.Migrations.MigrateSchema8To9
Unison.Codebase.SqliteCodebase.Operations
Unison.Codebase.SqliteCodebase.Paths
Unison.Codebase.SqliteCodebase.SyncEphemeral

View File

@ -1,30 +0,0 @@
(library (unison concurrent)
(export
ref-new
ref-read
ref-write
ref-cas
promise-new
promise-read
promise-write
promise-try-read
fork
kill
sleep
try-eval)
(define err "This operation is not supported on the pure Chez Scheme
backend, use the Racket over Chez Scheme backend")
(define (ref-new a) (error err))
(define (ref-read ref) (error err))
(define (ref-write ref a) (error err))
(define (ref-cas ref old-value new-value) (error err))
(define (promise-new) (error err))
(define (promise-read promise) (error err))
(define (promise-try-read promise) (error err))
(define (fork thread-thunk) (error err))
(define (kill thread-id) (error err))
(define (try-eval thunk) (error err)))

View File

@ -118,7 +118,7 @@ main = withCP65001 . runInUnboundThread . Ki.scoped $ \scope -> do
]
)
Run (RunFromSymbol mainName) args -> do
getCodebaseOrExit mCodePathOption SC.MigrateAutomatically \(_, _, theCodebase) -> do
getCodebaseOrExit mCodePathOption (SC.MigrateAutomatically SC.Backup) \(_, _, theCodebase) -> do
RTI.withRuntime False RTI.OneOff Version.gitDescribeWithDate \runtime -> do
withArgs args (execute theCodebase runtime mainName) >>= \case
Left err -> exitError err
@ -130,7 +130,7 @@ main = withCP65001 . runInUnboundThread . Ki.scoped $ \scope -> do
case e of
Left _ -> exitError "I couldn't find that file or it is for some reason unreadable."
Right contents -> do
getCodebaseOrExit mCodePathOption SC.MigrateAutomatically \(initRes, _, theCodebase) -> do
getCodebaseOrExit mCodePathOption (SC.MigrateAutomatically SC.Backup) \(initRes, _, theCodebase) -> do
withRuntimes RTI.OneOff \(rt, sbrt) -> do
let fileEvent = Input.UnisonFileChanged (Text.pack file) contents
let noOpRootNotifier _ = pure ()
@ -156,7 +156,7 @@ main = withCP65001 . runInUnboundThread . Ki.scoped $ \scope -> do
case e of
Left _ -> exitError "I had trouble reading this input."
Right contents -> do
getCodebaseOrExit mCodePathOption SC.MigrateAutomatically \(initRes, _, theCodebase) -> do
getCodebaseOrExit mCodePathOption (SC.MigrateAutomatically SC.Backup) \(initRes, _, theCodebase) -> do
withRuntimes RTI.OneOff \(rt, sbrt) -> do
let fileEvent = Input.UnisonFileChanged (Text.pack "<standard input>") contents
let noOpRootNotifier _ = pure ()
@ -247,7 +247,7 @@ main = withCP65001 . runInUnboundThread . Ki.scoped $ \scope -> do
Nothing -> action
Just fp -> recordRtsStats fp action
Launch isHeadless codebaseServerOpts downloadBase mayStartingPath shouldWatchFiles -> do
getCodebaseOrExit mCodePathOption SC.MigrateAfterPrompt \(initRes, _, theCodebase) -> do
getCodebaseOrExit mCodePathOption (SC.MigrateAfterPrompt SC.Backup) \(initRes, _, theCodebase) -> do
withRuntimes RTI.Persistent \(runtime, sbRuntime) -> do
rootVar <- newEmptyTMVarIO
pathVar <- newTVarIO initialPath
@ -342,7 +342,7 @@ prepareTranscriptDir shouldFork mCodePathOption shouldSaveCodebase = do
case shouldFork of
UseFork -> do
-- A forked codebase does not need to Create a codebase, because it already exists
getCodebaseOrExit mCodePathOption SC.MigrateAutomatically $ const (pure ())
getCodebaseOrExit mCodePathOption (SC.MigrateAutomatically SC.Backup) $ const (pure ())
path <- Codebase.getCodebaseDir (fmap codebasePathOptionToPath mCodePathOption)
PT.putPrettyLn $
P.lines
@ -366,7 +366,7 @@ runTranscripts' progName mcodepath transcriptDir markdownFiles = do
currentDir <- getCurrentDirectory
configFilePath <- getConfigFilePath mcodepath
-- We don't need to create a codebase through `getCodebaseOrExit` as we've already done so previously.
and <$> getCodebaseOrExit (Just (DontCreateCodebaseWhenMissing transcriptDir)) SC.MigrateAutomatically \(_, codebasePath, theCodebase) -> do
and <$> getCodebaseOrExit (Just (DontCreateCodebaseWhenMissing transcriptDir)) (SC.MigrateAutomatically SC.Backup) \(_, codebasePath, theCodebase) -> do
TR.withTranscriptRunner Version.gitDescribeWithDate (Just configFilePath) $ \runTranscript -> do
for markdownFiles $ \(MarkdownFile fileName) -> do
transcriptSrc <- readUtf8 fileName

View File

@ -23,3 +23,14 @@ to `Tests.check` and `Tests.checkEqual`).
```ucm
.> run tests
```
```ucm:hide
.> builtins.merge
.> load unison-src/builtin-tests/thread-killed-typeLink-test.u
.> add
```
```ucm
.> run threadKilledTypeLinkTest
```

View File

@ -11,3 +11,9 @@ to `Tests.check` and `Tests.checkEqual`).
()
```
```ucm
.> run threadKilledTypeLinkTest
()
```

View File

@ -0,0 +1,15 @@
-- TODO Move this to concurrency-tests once the JIT supports typeLinks
threadKilledTypeLinkTest = Tests.main do
ref = IO.ref None
t = fork do
match catchAll do sleep_ (400 * millis) with
Left (Failure f _ _) -> unsafeRun! do Ref.write ref (Some f)
_ -> ()
sleep_ (200 * millis)
kill_ t
sleep_ (300 * millis)
v = Ref.read ref
expected = Some (typeLink ThreadKilledFailure)
checkEqual "Thread killed, finalisers with typeLink" v expected

File diff suppressed because it is too large Load Diff

View File

@ -422,271 +422,272 @@ Let's try it!
312. io2.STM.retry : '{STM} a
313. unique type io2.STMFailure
314. builtin type io2.ThreadId
315. builtin type io2.Tls
316. builtin type io2.Tls.Cipher
317. builtin type io2.Tls.ClientConfig
318. io2.Tls.ClientConfig.certificates.set : [SignedCert]
315. unique type io2.ThreadKilledFailure
316. builtin type io2.Tls
317. builtin type io2.Tls.Cipher
318. builtin type io2.Tls.ClientConfig
319. io2.Tls.ClientConfig.certificates.set : [SignedCert]
-> ClientConfig
-> ClientConfig
319. io2.TLS.ClientConfig.ciphers.set : [Cipher]
320. io2.TLS.ClientConfig.ciphers.set : [Cipher]
-> ClientConfig
-> ClientConfig
320. io2.Tls.ClientConfig.default : Text
321. io2.Tls.ClientConfig.default : Text
-> Bytes
-> ClientConfig
321. io2.Tls.ClientConfig.versions.set : [Version]
322. io2.Tls.ClientConfig.versions.set : [Version]
-> ClientConfig
-> ClientConfig
322. io2.Tls.decodeCert.impl : Bytes
323. io2.Tls.decodeCert.impl : Bytes
-> Either Failure SignedCert
323. io2.Tls.decodePrivateKey : Bytes -> [PrivateKey]
324. io2.Tls.encodeCert : SignedCert -> Bytes
325. io2.Tls.encodePrivateKey : PrivateKey -> Bytes
326. io2.Tls.handshake.impl : Tls ->{IO} Either Failure ()
327. io2.Tls.newClient.impl : ClientConfig
324. io2.Tls.decodePrivateKey : Bytes -> [PrivateKey]
325. io2.Tls.encodeCert : SignedCert -> Bytes
326. io2.Tls.encodePrivateKey : PrivateKey -> Bytes
327. io2.Tls.handshake.impl : Tls ->{IO} Either Failure ()
328. io2.Tls.newClient.impl : ClientConfig
-> Socket
->{IO} Either Failure Tls
328. io2.Tls.newServer.impl : ServerConfig
329. io2.Tls.newServer.impl : ServerConfig
-> Socket
->{IO} Either Failure Tls
329. builtin type io2.Tls.PrivateKey
330. io2.Tls.receive.impl : Tls ->{IO} Either Failure Bytes
331. io2.Tls.send.impl : Tls -> Bytes ->{IO} Either Failure ()
332. builtin type io2.Tls.ServerConfig
333. io2.Tls.ServerConfig.certificates.set : [SignedCert]
330. builtin type io2.Tls.PrivateKey
331. io2.Tls.receive.impl : Tls ->{IO} Either Failure Bytes
332. io2.Tls.send.impl : Tls -> Bytes ->{IO} Either Failure ()
333. builtin type io2.Tls.ServerConfig
334. io2.Tls.ServerConfig.certificates.set : [SignedCert]
-> ServerConfig
-> ServerConfig
334. io2.Tls.ServerConfig.ciphers.set : [Cipher]
335. io2.Tls.ServerConfig.ciphers.set : [Cipher]
-> ServerConfig
-> ServerConfig
335. io2.Tls.ServerConfig.default : [SignedCert]
336. io2.Tls.ServerConfig.default : [SignedCert]
-> PrivateKey
-> ServerConfig
336. io2.Tls.ServerConfig.versions.set : [Version]
337. io2.Tls.ServerConfig.versions.set : [Version]
-> ServerConfig
-> ServerConfig
337. builtin type io2.Tls.SignedCert
338. io2.Tls.terminate.impl : Tls ->{IO} Either Failure ()
339. builtin type io2.Tls.Version
340. unique type io2.TlsFailure
341. builtin type io2.TVar
342. io2.TVar.new : a ->{STM} TVar a
343. io2.TVar.newIO : a ->{IO} TVar a
344. io2.TVar.read : TVar a ->{STM} a
345. io2.TVar.readIO : TVar a ->{IO} a
346. io2.TVar.swap : TVar a -> a ->{STM} a
347. io2.TVar.write : TVar a -> a ->{STM} ()
348. io2.validateSandboxed : [Term] -> a -> Boolean
349. unique type IsPropagated
350. IsPropagated.IsPropagated : IsPropagated
351. unique type IsTest
352. IsTest.IsTest : IsTest
353. unique type Link
354. builtin type Link.Term
355. Link.Term : Term -> Link
356. Link.Term.toText : Term -> Text
357. builtin type Link.Type
358. Link.Type : Type -> Link
359. builtin type List
360. List.++ : [a] -> [a] -> [a]
361. List.+: : a -> [a] -> [a]
362. List.:+ : [a] -> a -> [a]
363. List.at : Nat -> [a] -> Optional a
364. List.cons : a -> [a] -> [a]
365. List.drop : Nat -> [a] -> [a]
366. List.empty : [a]
367. List.size : [a] -> Nat
368. List.snoc : [a] -> a -> [a]
369. List.take : Nat -> [a] -> [a]
370. metadata.isPropagated : IsPropagated
371. metadata.isTest : IsTest
372. builtin type MutableArray
373. MutableArray.copyTo! : MutableArray g a
338. builtin type io2.Tls.SignedCert
339. io2.Tls.terminate.impl : Tls ->{IO} Either Failure ()
340. builtin type io2.Tls.Version
341. unique type io2.TlsFailure
342. builtin type io2.TVar
343. io2.TVar.new : a ->{STM} TVar a
344. io2.TVar.newIO : a ->{IO} TVar a
345. io2.TVar.read : TVar a ->{STM} a
346. io2.TVar.readIO : TVar a ->{IO} a
347. io2.TVar.swap : TVar a -> a ->{STM} a
348. io2.TVar.write : TVar a -> a ->{STM} ()
349. io2.validateSandboxed : [Term] -> a -> Boolean
350. unique type IsPropagated
351. IsPropagated.IsPropagated : IsPropagated
352. unique type IsTest
353. IsTest.IsTest : IsTest
354. unique type Link
355. builtin type Link.Term
356. Link.Term : Term -> Link
357. Link.Term.toText : Term -> Text
358. builtin type Link.Type
359. Link.Type : Type -> Link
360. builtin type List
361. List.++ : [a] -> [a] -> [a]
362. List.+: : a -> [a] -> [a]
363. List.:+ : [a] -> a -> [a]
364. List.at : Nat -> [a] -> Optional a
365. List.cons : a -> [a] -> [a]
366. List.drop : Nat -> [a] -> [a]
367. List.empty : [a]
368. List.size : [a] -> Nat
369. List.snoc : [a] -> a -> [a]
370. List.take : Nat -> [a] -> [a]
371. metadata.isPropagated : IsPropagated
372. metadata.isTest : IsTest
373. builtin type MutableArray
374. MutableArray.copyTo! : MutableArray g a
-> Nat
-> MutableArray g a
-> Nat
-> Nat
->{g, Exception} ()
374. MutableArray.freeze : MutableArray g a
375. MutableArray.freeze : MutableArray g a
-> Nat
-> Nat
->{g} ImmutableArray a
375. MutableArray.freeze! : MutableArray g a
376. MutableArray.freeze! : MutableArray g a
->{g} ImmutableArray a
376. MutableArray.read : MutableArray g a
377. MutableArray.read : MutableArray g a
-> Nat
->{g, Exception} a
377. MutableArray.size : MutableArray g a -> Nat
378. MutableArray.write : MutableArray g a
378. MutableArray.size : MutableArray g a -> Nat
379. MutableArray.write : MutableArray g a
-> Nat
-> a
->{g, Exception} ()
379. builtin type MutableByteArray
380. MutableByteArray.copyTo! : MutableByteArray g
380. builtin type MutableByteArray
381. MutableByteArray.copyTo! : MutableByteArray g
-> Nat
-> MutableByteArray g
-> Nat
-> Nat
->{g, Exception} ()
381. MutableByteArray.freeze : MutableByteArray g
382. MutableByteArray.freeze : MutableByteArray g
-> Nat
-> Nat
->{g} ImmutableByteArray
382. MutableByteArray.freeze! : MutableByteArray g
383. MutableByteArray.freeze! : MutableByteArray g
->{g} ImmutableByteArray
383. MutableByteArray.read16be : MutableByteArray g
384. MutableByteArray.read16be : MutableByteArray g
-> Nat
->{g, Exception} Nat
384. MutableByteArray.read24be : MutableByteArray g
385. MutableByteArray.read24be : MutableByteArray g
-> Nat
->{g, Exception} Nat
385. MutableByteArray.read32be : MutableByteArray g
386. MutableByteArray.read32be : MutableByteArray g
-> Nat
->{g, Exception} Nat
386. MutableByteArray.read40be : MutableByteArray g
387. MutableByteArray.read40be : MutableByteArray g
-> Nat
->{g, Exception} Nat
387. MutableByteArray.read64be : MutableByteArray g
388. MutableByteArray.read64be : MutableByteArray g
-> Nat
->{g, Exception} Nat
388. MutableByteArray.read8 : MutableByteArray g
389. MutableByteArray.read8 : MutableByteArray g
-> Nat
->{g, Exception} Nat
389. MutableByteArray.size : MutableByteArray g -> Nat
390. MutableByteArray.write16be : MutableByteArray g
390. MutableByteArray.size : MutableByteArray g -> Nat
391. MutableByteArray.write16be : MutableByteArray g
-> Nat
-> Nat
->{g, Exception} ()
391. MutableByteArray.write32be : MutableByteArray g
392. MutableByteArray.write32be : MutableByteArray g
-> Nat
-> Nat
->{g, Exception} ()
392. MutableByteArray.write64be : MutableByteArray g
393. MutableByteArray.write64be : MutableByteArray g
-> Nat
-> Nat
->{g, Exception} ()
393. MutableByteArray.write8 : MutableByteArray g
394. MutableByteArray.write8 : MutableByteArray g
-> Nat
-> Nat
->{g, Exception} ()
394. builtin type Nat
395. Nat.* : Nat -> Nat -> Nat
396. Nat.+ : Nat -> Nat -> Nat
397. Nat./ : Nat -> Nat -> Nat
398. Nat.and : Nat -> Nat -> Nat
399. Nat.complement : Nat -> Nat
400. Nat.drop : Nat -> Nat -> Nat
401. Nat.eq : Nat -> Nat -> Boolean
402. Nat.fromText : Text -> Optional Nat
403. Nat.gt : Nat -> Nat -> Boolean
404. Nat.gteq : Nat -> Nat -> Boolean
405. Nat.increment : Nat -> Nat
406. Nat.isEven : Nat -> Boolean
407. Nat.isOdd : Nat -> Boolean
408. Nat.leadingZeros : Nat -> Nat
409. Nat.lt : Nat -> Nat -> Boolean
410. Nat.lteq : Nat -> Nat -> Boolean
411. Nat.mod : Nat -> Nat -> Nat
412. Nat.or : Nat -> Nat -> Nat
413. Nat.popCount : Nat -> Nat
414. Nat.pow : Nat -> Nat -> Nat
415. Nat.shiftLeft : Nat -> Nat -> Nat
416. Nat.shiftRight : Nat -> Nat -> Nat
417. Nat.sub : Nat -> Nat -> Int
418. Nat.toFloat : Nat -> Float
419. Nat.toInt : Nat -> Int
420. Nat.toText : Nat -> Text
421. Nat.trailingZeros : Nat -> Nat
422. Nat.xor : Nat -> Nat -> Nat
423. structural type Optional a
424. Optional.None : Optional a
425. Optional.Some : a -> Optional a
426. builtin type Pattern
427. Pattern.capture : Pattern a -> Pattern a
428. Pattern.isMatch : Pattern a -> a -> Boolean
429. Pattern.join : [Pattern a] -> Pattern a
430. Pattern.many : Pattern a -> Pattern a
431. Pattern.or : Pattern a -> Pattern a -> Pattern a
432. Pattern.replicate : Nat -> Nat -> Pattern a -> Pattern a
433. Pattern.run : Pattern a -> a -> Optional ([a], a)
434. builtin type Ref
435. Ref.read : Ref g a ->{g} a
436. Ref.write : Ref g a -> a ->{g} ()
437. builtin type Request
438. builtin type Scope
439. Scope.array : Nat ->{Scope s} MutableArray (Scope s) a
440. Scope.arrayOf : a
395. builtin type Nat
396. Nat.* : Nat -> Nat -> Nat
397. Nat.+ : Nat -> Nat -> Nat
398. Nat./ : Nat -> Nat -> Nat
399. Nat.and : Nat -> Nat -> Nat
400. Nat.complement : Nat -> Nat
401. Nat.drop : Nat -> Nat -> Nat
402. Nat.eq : Nat -> Nat -> Boolean
403. Nat.fromText : Text -> Optional Nat
404. Nat.gt : Nat -> Nat -> Boolean
405. Nat.gteq : Nat -> Nat -> Boolean
406. Nat.increment : Nat -> Nat
407. Nat.isEven : Nat -> Boolean
408. Nat.isOdd : Nat -> Boolean
409. Nat.leadingZeros : Nat -> Nat
410. Nat.lt : Nat -> Nat -> Boolean
411. Nat.lteq : Nat -> Nat -> Boolean
412. Nat.mod : Nat -> Nat -> Nat
413. Nat.or : Nat -> Nat -> Nat
414. Nat.popCount : Nat -> Nat
415. Nat.pow : Nat -> Nat -> Nat
416. Nat.shiftLeft : Nat -> Nat -> Nat
417. Nat.shiftRight : Nat -> Nat -> Nat
418. Nat.sub : Nat -> Nat -> Int
419. Nat.toFloat : Nat -> Float
420. Nat.toInt : Nat -> Int
421. Nat.toText : Nat -> Text
422. Nat.trailingZeros : Nat -> Nat
423. Nat.xor : Nat -> Nat -> Nat
424. structural type Optional a
425. Optional.None : Optional a
426. Optional.Some : a -> Optional a
427. builtin type Pattern
428. Pattern.capture : Pattern a -> Pattern a
429. Pattern.isMatch : Pattern a -> a -> Boolean
430. Pattern.join : [Pattern a] -> Pattern a
431. Pattern.many : Pattern a -> Pattern a
432. Pattern.or : Pattern a -> Pattern a -> Pattern a
433. Pattern.replicate : Nat -> Nat -> Pattern a -> Pattern a
434. Pattern.run : Pattern a -> a -> Optional ([a], a)
435. builtin type Ref
436. Ref.read : Ref g a ->{g} a
437. Ref.write : Ref g a -> a ->{g} ()
438. builtin type Request
439. builtin type Scope
440. Scope.array : Nat ->{Scope s} MutableArray (Scope s) a
441. Scope.arrayOf : a
-> Nat
->{Scope s} MutableArray (Scope s) a
441. Scope.bytearray : Nat
442. Scope.bytearray : Nat
->{Scope s} MutableByteArray (Scope s)
442. Scope.bytearrayOf : Nat
443. Scope.bytearrayOf : Nat
-> Nat
->{Scope s} MutableByteArray
(Scope s)
443. Scope.ref : a ->{Scope s} Ref {Scope s} a
444. Scope.run : (∀ s. '{g, Scope s} r) ->{g} r
445. structural type SeqView a b
446. SeqView.VElem : a -> b -> SeqView a b
447. SeqView.VEmpty : SeqView a b
448. Socket.toText : Socket -> Text
449. unique type Test.Result
450. Test.Result.Fail : Text -> Result
451. Test.Result.Ok : Text -> Result
452. builtin type Text
453. Text.!= : Text -> Text -> Boolean
454. Text.++ : Text -> Text -> Text
455. Text.drop : Nat -> Text -> Text
456. Text.empty : Text
457. Text.eq : Text -> Text -> Boolean
458. Text.fromCharList : [Char] -> Text
459. Text.fromUtf8.impl : Bytes -> Either Failure Text
460. Text.gt : Text -> Text -> Boolean
461. Text.gteq : Text -> Text -> Boolean
462. Text.lt : Text -> Text -> Boolean
463. Text.lteq : Text -> Text -> Boolean
464. Text.patterns.anyChar : Pattern Text
465. Text.patterns.char : Class -> Pattern Text
466. Text.patterns.charIn : [Char] -> Pattern Text
467. Text.patterns.charRange : Char -> Char -> Pattern Text
468. Text.patterns.digit : Pattern Text
469. Text.patterns.eof : Pattern Text
470. Text.patterns.letter : Pattern Text
471. Text.patterns.literal : Text -> Pattern Text
472. Text.patterns.notCharIn : [Char] -> Pattern Text
473. Text.patterns.notCharRange : Char -> Char -> Pattern Text
474. Text.patterns.punctuation : Pattern Text
475. Text.patterns.space : Pattern Text
476. Text.repeat : Nat -> Text -> Text
477. Text.reverse : Text -> Text
478. Text.size : Text -> Nat
479. Text.take : Nat -> Text -> Text
480. Text.toCharList : Text -> [Char]
481. Text.toLowercase : Text -> Text
482. Text.toUppercase : Text -> Text
483. Text.toUtf8 : Text -> Bytes
484. Text.uncons : Text -> Optional (Char, Text)
485. Text.unsnoc : Text -> Optional (Text, Char)
486. ThreadId.toText : ThreadId -> Text
487. todo : a -> b
488. structural type Tuple a b
489. Tuple.Cons : a -> b -> Tuple a b
490. structural type Unit
491. Unit.Unit : ()
492. Universal.< : a -> a -> Boolean
493. Universal.<= : a -> a -> Boolean
494. Universal.== : a -> a -> Boolean
495. Universal.> : a -> a -> Boolean
496. Universal.>= : a -> a -> Boolean
497. Universal.compare : a -> a -> Int
498. Universal.murmurHash : a -> Nat
499. unsafe.coerceAbilities : (a ->{e1} b) -> a ->{e2} b
500. builtin type Value
501. Value.dependencies : Value -> [Term]
502. Value.deserialize : Bytes -> Either Text Value
503. Value.load : Value ->{IO} Either [Term] a
504. Value.serialize : Value -> Bytes
505. Value.value : a -> Value
444. Scope.ref : a ->{Scope s} Ref {Scope s} a
445. Scope.run : (∀ s. '{g, Scope s} r) ->{g} r
446. structural type SeqView a b
447. SeqView.VElem : a -> b -> SeqView a b
448. SeqView.VEmpty : SeqView a b
449. Socket.toText : Socket -> Text
450. unique type Test.Result
451. Test.Result.Fail : Text -> Result
452. Test.Result.Ok : Text -> Result
453. builtin type Text
454. Text.!= : Text -> Text -> Boolean
455. Text.++ : Text -> Text -> Text
456. Text.drop : Nat -> Text -> Text
457. Text.empty : Text
458. Text.eq : Text -> Text -> Boolean
459. Text.fromCharList : [Char] -> Text
460. Text.fromUtf8.impl : Bytes -> Either Failure Text
461. Text.gt : Text -> Text -> Boolean
462. Text.gteq : Text -> Text -> Boolean
463. Text.lt : Text -> Text -> Boolean
464. Text.lteq : Text -> Text -> Boolean
465. Text.patterns.anyChar : Pattern Text
466. Text.patterns.char : Class -> Pattern Text
467. Text.patterns.charIn : [Char] -> Pattern Text
468. Text.patterns.charRange : Char -> Char -> Pattern Text
469. Text.patterns.digit : Pattern Text
470. Text.patterns.eof : Pattern Text
471. Text.patterns.letter : Pattern Text
472. Text.patterns.literal : Text -> Pattern Text
473. Text.patterns.notCharIn : [Char] -> Pattern Text
474. Text.patterns.notCharRange : Char -> Char -> Pattern Text
475. Text.patterns.punctuation : Pattern Text
476. Text.patterns.space : Pattern Text
477. Text.repeat : Nat -> Text -> Text
478. Text.reverse : Text -> Text
479. Text.size : Text -> Nat
480. Text.take : Nat -> Text -> Text
481. Text.toCharList : Text -> [Char]
482. Text.toLowercase : Text -> Text
483. Text.toUppercase : Text -> Text
484. Text.toUtf8 : Text -> Bytes
485. Text.uncons : Text -> Optional (Char, Text)
486. Text.unsnoc : Text -> Optional (Text, Char)
487. ThreadId.toText : ThreadId -> Text
488. todo : a -> b
489. structural type Tuple a b
490. Tuple.Cons : a -> b -> Tuple a b
491. structural type Unit
492. Unit.Unit : ()
493. Universal.< : a -> a -> Boolean
494. Universal.<= : a -> a -> Boolean
495. Universal.== : a -> a -> Boolean
496. Universal.> : a -> a -> Boolean
497. Universal.>= : a -> a -> Boolean
498. Universal.compare : a -> a -> Int
499. Universal.murmurHash : a -> Nat
500. unsafe.coerceAbilities : (a ->{e1} b) -> a ->{e2} b
501. builtin type Value
502. Value.dependencies : Value -> [Term]
503. Value.deserialize : Bytes -> Either Text Value
504. Value.load : Value ->{IO} Either [Term] a
505. Value.serialize : Value -> Bytes
506. Value.value : a -> Value
.builtin> alias.many 94-104 .mylib

View File

@ -74,7 +74,7 @@ The `builtins.merge` command adds the known builtins to a `builtin` subnamespace
63. Value/ (5 terms)
64. bug (a -> b)
65. crypto/ (12 terms, 1 type)
66. io2/ (131 terms, 31 types)
66. io2/ (131 terms, 32 types)
67. metadata/ (2 terms)
68. todo (a -> b)
69. unsafe/ (1 term)

View File

@ -23,7 +23,7 @@ Technically, the definitions all exist, but they have no names. `builtins.merge`
.foo> ls
1. builtin/ (440 terms, 65 types)
1. builtin/ (440 terms, 66 types)
```
And for a limited time, you can get even more builtin goodies:
@ -35,7 +35,7 @@ And for a limited time, you can get even more builtin goodies:
.foo> ls
1. builtin/ (612 terms, 83 types)
1. builtin/ (612 terms, 84 types)
```
More typically, you'd start out by pulling `base.

View File

@ -113,13 +113,13 @@ it's still in the `history` of the parent namespace and can be resurrected at an
Note: The most recent namespace hash is immediately below this
message.
⊙ 1. #pfspc3m714
⊙ 1. #9k2k834edc
- Deletes:
feature1.y
⊙ 2. #t9gdv3652e
⊙ 2. #d9u1nnu62h
+ Adds / updates:
@ -130,26 +130,26 @@ it's still in the `history` of the parent namespace and can be resurrected at an
Original name New name(s)
feature1.y master.y
⊙ 3. #fnah4umom7
⊙ 3. #8ir51a10kg
+ Adds / updates:
feature1.y
⊙ 4. #fsd9t403lp
⊙ 4. #v03np1853n
> Moves:
Original name New name
x master.x
⊙ 5. #64tba28sdf
⊙ 5. #1fnkqqd2ae
+ Adds / updates:
x
□ 6. #uql7vkh78v (start of history)
□ 6. #b3jsb4pjcl (start of history)
```
To resurrect an old version of a namespace, you can learn its hash via the `history` command, then use `fork #namespacehash .newname`.

View File

@ -269,7 +269,7 @@ I should be able to move the root into a sub-namespace
.> ls
1. root/ (617 terms, 84 types)
1. root/ (617 terms, 85 types)
.> history
@ -278,13 +278,13 @@ I should be able to move the root into a sub-namespace
□ 1. #g5nn5l3b03 (start of history)
□ 1. #hu6p22qbe4 (start of history)
```
```ucm
.> ls .root.at.path
1. builtin/ (612 terms, 83 types)
1. builtin/ (612 terms, 84 types)
2. existing/ (1 term)
3. happy/ (3 terms, 1 type)
4. history/ (1 term)
@ -294,7 +294,7 @@ I should be able to move the root into a sub-namespace
Note: The most recent namespace hash is immediately below this
message.
⊙ 1. #vt3jsa8k80
⊙ 1. #8rae339vn9
- Deletes:
@ -305,7 +305,7 @@ I should be able to move the root into a sub-namespace
Original name New name
existing.a.termInA existing.b.termInA
⊙ 2. #b2h3s5rv29
⊙ 2. #r697qei02b
+ Adds / updates:
@ -317,26 +317,26 @@ I should be able to move the root into a sub-namespace
happy.b.termInA existing.a.termInA
history.b.termInA existing.a.termInA
⊙ 3. #7v6bvecsm0
⊙ 3. #ca0k6ug92t
+ Adds / updates:
existing.a.termInA existing.b.termInB
⊙ 4. #1uf0leagkk
⊙ 4. #7nnrjj85km
> Moves:
Original name New name
history.a.termInA history.b.termInA
⊙ 5. #a3uao3fp6q
⊙ 5. #01cflse46b
- Deletes:
history.b.termInB
⊙ 6. #umd1mp6mku
⊙ 6. #cna2qqmgcq
+ Adds / updates:
@ -347,13 +347,13 @@ I should be able to move the root into a sub-namespace
Original name New name(s)
happy.b.termInA history.a.termInA
⊙ 7. #dqfd14almm
⊙ 7. #qoce7dt2h1
+ Adds / updates:
history.a.termInA history.b.termInB
⊙ 8. #ljk3oa07ld
⊙ 8. #g8m2g6bd1g
> Moves:
@ -363,7 +363,7 @@ I should be able to move the root into a sub-namespace
happy.a.T.T2 happy.b.T.T2
happy.a.termInA happy.b.termInA
⊙ 9. #hhun973gp5
⊙ 9. #ngl33npfah
+ Adds / updates:
@ -373,7 +373,7 @@ I should be able to move the root into a sub-namespace
happy.a.T.T
⊙ 10. #8ri4h5gjvo
⊙ 10. #1rnd8dpisq
+ Adds / updates:
@ -385,7 +385,7 @@ I should be able to move the root into a sub-namespace
⊙ 11. #ahcsbbqt21
⊙ 11. #7s9j9tscke
```

File diff suppressed because it is too large Load Diff

View File

@ -59,17 +59,17 @@ y = 2
most recent, along with the command that got us there. Try:
`fork 2 .old`
`fork #s4kjl4lbf3 .old` to make an old namespace
`fork #30tl6rkfqn .old` to make an old namespace
accessible again,
`reset-root #s4kjl4lbf3` to reset the root namespace and
`reset-root #30tl6rkfqn` to reset the root namespace and
its history to that of the
specified namespace.
When Root Hash Action
1. now #hr821c0ji5 add
2. now #s4kjl4lbf3 add
3. now #92606li9fc builtins.merge
1. now #4t4aoo9vnt add
2. now #30tl6rkfqn add
3. now #mfof2amrm2 builtins.merge
4. #sg60bvjo91 history starts here
Tip: Use `diff.namespace 1 7` to compare namespaces between

View File

@ -13,7 +13,7 @@ Let's look at some examples. We'll start with a namespace with just the builtins
□ 1. #3jjj6quqhh (start of history)
□ 1. #uqucjk22if (start of history)
.> fork builtin builtin2
@ -42,21 +42,21 @@ Now suppose we `fork` a copy of builtin, then rename `Nat.+` to `frobnicate`, th
Note: The most recent namespace hash is immediately below this
message.
⊙ 1. #6qm36657l4
⊙ 1. #2g254tkpvt
> Moves:
Original name New name
Nat.frobnicate Nat.+
⊙ 2. #72ip8q9i3l
⊙ 2. #s54qi5ddk7
> Moves:
Original name New name
Nat.+ Nat.frobnicate
□ 3. #3jjj6quqhh (start of history)
□ 3. #uqucjk22if (start of history)
```
If we merge that back into `builtin`, we get that same chain of history:
@ -71,21 +71,21 @@ If we merge that back into `builtin`, we get that same chain of history:
Note: The most recent namespace hash is immediately below this
message.
⊙ 1. #6qm36657l4
⊙ 1. #2g254tkpvt
> Moves:
Original name New name
Nat.frobnicate Nat.+
⊙ 2. #72ip8q9i3l
⊙ 2. #s54qi5ddk7
> Moves:
Original name New name
Nat.+ Nat.frobnicate
□ 3. #3jjj6quqhh (start of history)
□ 3. #uqucjk22if (start of history)
```
Let's try again, but using a `merge.squash` (or just `squash`) instead. The history will be unchanged:
@ -106,7 +106,7 @@ Let's try again, but using a `merge.squash` (or just `squash`) instead. The hist
□ 1. #3jjj6quqhh (start of history)
□ 1. #uqucjk22if (start of history)
```
The churn that happened in `mybuiltin` namespace ended up back in the same spot, so the squash merge of that namespace with our original namespace had no effect.
@ -485,13 +485,13 @@ This checks to see that squashing correctly preserves deletions:
Note: The most recent namespace hash is immediately below this
message.
⊙ 1. #p9ur8e0jlu
⊙ 1. #9ootvo2tgi
- Deletes:
Nat.* Nat.+
□ 2. #3jjj6quqhh (start of history)
□ 2. #uqucjk22if (start of history)
```
Notice that `Nat.+` and `Nat.*` are deleted by the squash, and we see them deleted in one atomic step in the history.