mirror of
https://github.com/MichaelMure/git-bug.git
synced 2024-12-14 08:45:30 +03:00
WIP
This commit is contained in:
parent
4ef92efeb9
commit
8d63c983c9
@ -426,7 +426,7 @@ func (bug *Bug) Commit(repo repository.ClockedRepo) error {
|
||||
|
||||
// Write a Git commit referencing the tree, with the previous commit as parent
|
||||
if bug.lastCommit != "" {
|
||||
hash, err = repo.StoreCommitWithParent(hash, bug.lastCommit)
|
||||
hash, err = repo.StoreCommit(hash, bug.lastCommit)
|
||||
} else {
|
||||
hash, err = repo.StoreCommit(hash)
|
||||
}
|
||||
@ -524,7 +524,7 @@ func (bug *Bug) Merge(repo repository.Repo, other Interface) (bool, error) {
|
||||
}
|
||||
|
||||
// create a new commit with the correct ancestor
|
||||
hash, err := repo.StoreCommitWithParent(treeHash, bug.lastCommit)
|
||||
hash, err := repo.StoreCommit(treeHash, bug.lastCommit)
|
||||
|
||||
if err != nil {
|
||||
return false, err
|
||||
|
8
entity/TODO
Normal file
8
entity/TODO
Normal file
@ -0,0 +1,8 @@
|
||||
- is the pack Lamport clock really useful vs only topological sort?
|
||||
- topological order is enforced on the clocks, so what's the point?
|
||||
- is EditTime equivalent to PackTime? no, avoid the gaps. Is it better?
|
||||
- how to do commit signature?
|
||||
- how to avoid id collision between Operations?
|
||||
- write tests for actions
|
||||
- migrate Bug to the new structure
|
||||
- migrate Identity to the new structure?
|
137
entity/dag/common_test.go
Normal file
137
entity/dag/common_test.go
Normal file
@ -0,0 +1,137 @@
|
||||
package dag
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
|
||||
"github.com/MichaelMure/git-bug/entity"
|
||||
"github.com/MichaelMure/git-bug/identity"
|
||||
"github.com/MichaelMure/git-bug/repository"
|
||||
)
|
||||
|
||||
// This file contains an example dummy entity to be used in the tests
|
||||
|
||||
/*
|
||||
Operations
|
||||
*/
|
||||
|
||||
type op1 struct {
|
||||
author identity.Interface
|
||||
|
||||
OperationType int `json:"type"`
|
||||
Field1 string `json:"field_1"`
|
||||
}
|
||||
|
||||
func newOp1(author identity.Interface, field1 string) *op1 {
|
||||
return &op1{author: author, OperationType: 1, Field1: field1}
|
||||
}
|
||||
|
||||
func (o op1) Id() entity.Id {
|
||||
data, _ := json.Marshal(o)
|
||||
return entity.DeriveId(data)
|
||||
}
|
||||
|
||||
func (o op1) Author() identity.Interface {
|
||||
return o.author
|
||||
}
|
||||
|
||||
func (o op1) Validate() error { return nil }
|
||||
|
||||
type op2 struct {
|
||||
author identity.Interface
|
||||
|
||||
OperationType int `json:"type"`
|
||||
Field2 string `json:"field_2"`
|
||||
}
|
||||
|
||||
func newOp2(author identity.Interface, field2 string) *op2 {
|
||||
return &op2{author: author, OperationType: 2, Field2: field2}
|
||||
}
|
||||
|
||||
func (o op2) Id() entity.Id {
|
||||
data, _ := json.Marshal(o)
|
||||
return entity.DeriveId(data)
|
||||
}
|
||||
|
||||
func (o op2) Author() identity.Interface {
|
||||
return o.author
|
||||
}
|
||||
|
||||
func (o op2) Validate() error { return nil }
|
||||
|
||||
func unmarshaler(author identity.Interface, raw json.RawMessage) (Operation, error) {
|
||||
var t struct {
|
||||
OperationType int `json:"type"`
|
||||
}
|
||||
|
||||
if err := json.Unmarshal(raw, &t); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
switch t.OperationType {
|
||||
case 1:
|
||||
op := &op1{}
|
||||
err := json.Unmarshal(raw, &op)
|
||||
op.author = author
|
||||
return op, err
|
||||
case 2:
|
||||
op := &op2{}
|
||||
err := json.Unmarshal(raw, &op)
|
||||
op.author = author
|
||||
return op, err
|
||||
default:
|
||||
return nil, fmt.Errorf("unknown operation type %v", t.OperationType)
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
Identities + repo + definition
|
||||
*/
|
||||
|
||||
func makeTestContext() (repository.ClockedRepo, identity.Interface, identity.Interface, Definition) {
|
||||
repo := repository.NewMockRepo()
|
||||
|
||||
id1, err := identity.NewIdentity(repo, "name1", "email1")
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
err = id1.Commit(repo)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
id2, err := identity.NewIdentity(repo, "name2", "email2")
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
err = id2.Commit(repo)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
resolver := identityResolverFunc(func(id entity.Id) (identity.Interface, error) {
|
||||
switch id {
|
||||
case id1.Id():
|
||||
return id1, nil
|
||||
case id2.Id():
|
||||
return id2, nil
|
||||
default:
|
||||
return nil, identity.ErrIdentityNotExist
|
||||
}
|
||||
})
|
||||
|
||||
def := Definition{
|
||||
typename: "foo",
|
||||
namespace: "foos",
|
||||
operationUnmarshaler: unmarshaler,
|
||||
identityResolver: resolver,
|
||||
formatVersion: 1,
|
||||
}
|
||||
|
||||
return repo, id1, id2, def
|
||||
}
|
||||
|
||||
type identityResolverFunc func(id entity.Id) (identity.Interface, error)
|
||||
|
||||
func (fn identityResolverFunc) ResolveIdentity(id entity.Id) (identity.Interface, error) {
|
||||
return fn(id)
|
||||
}
|
@ -1,4 +1,6 @@
|
||||
package entity
|
||||
// Package dag contains the base common code to define an entity stored
|
||||
// in a chain of git objects, supporting actions like Push, Pull and Merge.
|
||||
package dag
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
@ -7,6 +9,8 @@ import (
|
||||
|
||||
"github.com/pkg/errors"
|
||||
|
||||
"github.com/MichaelMure/git-bug/entity"
|
||||
"github.com/MichaelMure/git-bug/identity"
|
||||
"github.com/MichaelMure/git-bug/repository"
|
||||
"github.com/MichaelMure/git-bug/util/lamport"
|
||||
)
|
||||
@ -15,12 +19,6 @@ const refsPattern = "refs/%s/%s"
|
||||
const creationClockPattern = "%s-create"
|
||||
const editClockPattern = "%s-edit"
|
||||
|
||||
type Operation interface {
|
||||
Id() Id
|
||||
// MarshalJSON() ([]byte, error)
|
||||
Validate() error
|
||||
}
|
||||
|
||||
// Definition hold the details defining one specialization of an Entity.
|
||||
type Definition struct {
|
||||
// the name of the entity (bug, pull-request, ...)
|
||||
@ -28,29 +26,40 @@ type Definition struct {
|
||||
// the namespace in git (bugs, prs, ...)
|
||||
namespace string
|
||||
// a function decoding a JSON message into an Operation
|
||||
operationUnmarshaler func(raw json.RawMessage) (Operation, error)
|
||||
// the expected format version number
|
||||
operationUnmarshaler func(author identity.Interface, raw json.RawMessage) (Operation, error)
|
||||
// a function loading an identity.Identity from its Id
|
||||
identityResolver identity.Resolver
|
||||
// the expected format version number, that can be used for data migration/upgrade
|
||||
formatVersion uint
|
||||
}
|
||||
|
||||
// Entity is a data structure stored in a chain of git objects, supporting actions like Push, Pull and Merge.
|
||||
type Entity struct {
|
||||
Definition
|
||||
|
||||
ops []Operation
|
||||
// operations that are already stored in the repository
|
||||
ops []Operation
|
||||
// operations not yet stored in the repository
|
||||
staging []Operation
|
||||
|
||||
packClock lamport.Clock
|
||||
// TODO: add here createTime and editTime
|
||||
|
||||
// // TODO: doesn't seems to actually be useful over the topological sort ? Timestamp can be generated from graph depth
|
||||
// // TODO: maybe EditTime is better because it could spread ops in consecutive groups on the logical timeline --> avoid interleaving
|
||||
// packClock lamport.Clock
|
||||
lastCommit repository.Hash
|
||||
}
|
||||
|
||||
// New create an empty Entity
|
||||
func New(definition Definition) *Entity {
|
||||
return &Entity{
|
||||
Definition: definition,
|
||||
packClock: lamport.NewMemClock(),
|
||||
// packClock: lamport.NewMemClock(),
|
||||
}
|
||||
}
|
||||
|
||||
func Read(def Definition, repo repository.ClockedRepo, id Id) (*Entity, error) {
|
||||
// Read will read and decode a stored Entity from a repository
|
||||
func Read(def Definition, repo repository.ClockedRepo, id entity.Id) (*Entity, error) {
|
||||
if err := id.Validate(); err != nil {
|
||||
return nil, errors.Wrap(err, "invalid id")
|
||||
}
|
||||
@ -109,33 +118,34 @@ func read(def Definition, repo repository.ClockedRepo, ref string) (*Entity, err
|
||||
|
||||
oppMap := make(map[repository.Hash]*operationPack)
|
||||
var opsCount int
|
||||
var packClock = lamport.NewMemClock()
|
||||
// var packClock = lamport.NewMemClock()
|
||||
|
||||
for i := len(DFSOrder) - 1; i >= 0; i-- {
|
||||
commit := DFSOrder[i]
|
||||
firstCommit := i == len(DFSOrder)-1
|
||||
isFirstCommit := i == len(DFSOrder)-1
|
||||
isMerge := len(commit.Parents) > 1
|
||||
|
||||
// Verify DAG structure: single chronological root, so only the root
|
||||
// can have no parents
|
||||
if !firstCommit && len(commit.Parents) == 0 {
|
||||
return nil, fmt.Errorf("multiple root in the entity DAG")
|
||||
// can have no parents. Said otherwise, the DAG need to have exactly
|
||||
// one leaf.
|
||||
if !isFirstCommit && len(commit.Parents) == 0 {
|
||||
return nil, fmt.Errorf("multiple leafs in the entity DAG")
|
||||
}
|
||||
|
||||
opp, err := readOperationPack(def, repo, commit.TreeHash)
|
||||
opp, err := readOperationPack(def, repo, commit)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Check that the lamport clocks are set
|
||||
if firstCommit && opp.CreateTime <= 0 {
|
||||
err = opp.Validate()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Check that the create lamport clock is set (not checked in Validate() as it's optional)
|
||||
if isFirstCommit && opp.CreateTime <= 0 {
|
||||
return nil, fmt.Errorf("creation lamport time not set")
|
||||
}
|
||||
if opp.EditTime <= 0 {
|
||||
return nil, fmt.Errorf("edition lamport time not set")
|
||||
}
|
||||
if opp.PackTime <= 0 {
|
||||
return nil, fmt.Errorf("pack lamport time not set")
|
||||
}
|
||||
|
||||
// make sure that the lamport clocks causality match the DAG topology
|
||||
for _, parentHash := range commit.Parents {
|
||||
@ -150,9 +160,13 @@ func read(def Definition, repo repository.ClockedRepo, ref string) (*Entity, err
|
||||
|
||||
// to avoid an attack where clocks are pushed toward the uint64 rollover, make sure
|
||||
// that the clocks don't jump too far in the future
|
||||
if opp.EditTime-parentPack.EditTime > 10_000 {
|
||||
// we ignore merge commits here to allow merging after a loooong time without breaking anything,
|
||||
// as long as there is one valid chain of small hops, it's fine.
|
||||
if !isMerge && opp.EditTime-parentPack.EditTime > 1_000_000 {
|
||||
return nil, fmt.Errorf("lamport clock jumping too far in the future, likely an attack")
|
||||
}
|
||||
|
||||
// TODO: PackTime is not checked
|
||||
}
|
||||
|
||||
oppMap[commit.Hash] = opp
|
||||
@ -169,10 +183,10 @@ func read(def Definition, repo repository.ClockedRepo, ref string) (*Entity, err
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
err = packClock.Witness(opp.PackTime)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// err = packClock.Witness(opp.PackTime)
|
||||
// if err != nil {
|
||||
// return nil, err
|
||||
// }
|
||||
}
|
||||
|
||||
// Now that we know that the topological order and clocks are fine, we order the operationPacks
|
||||
@ -185,20 +199,20 @@ func read(def Definition, repo repository.ClockedRepo, ref string) (*Entity, err
|
||||
sort.Slice(oppSlice, func(i, j int) bool {
|
||||
// Primary ordering with the dedicated "pack" Lamport time that encode causality
|
||||
// within the entity
|
||||
if oppSlice[i].PackTime != oppSlice[j].PackTime {
|
||||
return oppSlice[i].PackTime < oppSlice[i].PackTime
|
||||
}
|
||||
// if oppSlice[i].PackTime != oppSlice[j].PackTime {
|
||||
// return oppSlice[i].PackTime < oppSlice[i].PackTime
|
||||
// }
|
||||
// We have equal PackTime, which means we had a concurrent edition. We can't tell which exactly
|
||||
// came first. As a secondary arbitrary ordering, we can use the EditTime. It's unlikely to be
|
||||
// enough but it can give us an edge to approach what really happened.
|
||||
if oppSlice[i].EditTime != oppSlice[j].EditTime {
|
||||
return oppSlice[i].EditTime < oppSlice[j].EditTime
|
||||
}
|
||||
// Well, what now? We still need a total ordering, the most stable possible.
|
||||
// Well, what now? We still need a total ordering and the most stable possible.
|
||||
// As a last resort, we can order based on a hash of the serialized Operations in the
|
||||
// operationPack. It doesn't carry much meaning but it's unbiased and hard to abuse.
|
||||
// This is a lexicographic ordering.
|
||||
return oppSlice[i].Id < oppSlice[j].Id
|
||||
// This is a lexicographic ordering on the stringified ID.
|
||||
return oppSlice[i].Id() < oppSlice[j].Id()
|
||||
})
|
||||
|
||||
// Now that we ordered the operationPacks, we have the order of the Operations
|
||||
@ -213,16 +227,18 @@ func read(def Definition, repo repository.ClockedRepo, ref string) (*Entity, err
|
||||
return &Entity{
|
||||
Definition: def,
|
||||
ops: ops,
|
||||
// packClock: packClock,
|
||||
lastCommit: rootHash,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Id return the Entity identifier
|
||||
func (e *Entity) Id() Id {
|
||||
func (e *Entity) Id() entity.Id {
|
||||
// id is the id of the first operation
|
||||
return e.FirstOp().Id()
|
||||
}
|
||||
|
||||
// Validate check if the Entity data is valid
|
||||
func (e *Entity) Validate() error {
|
||||
// non-empty
|
||||
if len(e.ops) == 0 && len(e.staging) == 0 {
|
||||
@ -244,7 +260,7 @@ func (e *Entity) Validate() error {
|
||||
}
|
||||
|
||||
// Check that there is no colliding operation's ID
|
||||
ids := make(map[Id]struct{})
|
||||
ids := make(map[entity.Id]struct{})
|
||||
for _, op := range e.Operations() {
|
||||
if _, ok := ids[op.Id()]; ok {
|
||||
return fmt.Errorf("id collision: %s", op.Id())
|
||||
@ -255,12 +271,12 @@ func (e *Entity) Validate() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// return the ordered operations
|
||||
// Operations return the ordered operations
|
||||
func (e *Entity) Operations() []Operation {
|
||||
return append(e.ops, e.staging...)
|
||||
}
|
||||
|
||||
// Lookup for the very first operation of the Entity.
|
||||
// FirstOp lookup for the very first operation of the Entity
|
||||
func (e *Entity) FirstOp() Operation {
|
||||
for _, op := range e.ops {
|
||||
return op
|
||||
@ -271,14 +287,29 @@ func (e *Entity) FirstOp() Operation {
|
||||
return nil
|
||||
}
|
||||
|
||||
// LastOp lookup for the very last operation of the Entity
|
||||
func (e *Entity) LastOp() Operation {
|
||||
if len(e.staging) > 0 {
|
||||
return e.staging[len(e.staging)-1]
|
||||
}
|
||||
if len(e.ops) > 0 {
|
||||
return e.ops[len(e.ops)-1]
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Append add a new Operation to the Entity
|
||||
func (e *Entity) Append(op Operation) {
|
||||
e.staging = append(e.staging, op)
|
||||
}
|
||||
|
||||
// NeedCommit indicate if the in-memory state changed and need to be commit in the repository
|
||||
func (e *Entity) NeedCommit() bool {
|
||||
return len(e.staging) > 0
|
||||
}
|
||||
|
||||
// CommitAdNeeded execute a Commit only if necessary. This function is useful to avoid getting an error if the Entity
|
||||
// is already in sync with the repository.
|
||||
func (e *Entity) CommitAdNeeded(repo repository.ClockedRepo) error {
|
||||
if e.NeedCommit() {
|
||||
return e.Commit(repo)
|
||||
@ -286,6 +317,7 @@ func (e *Entity) CommitAdNeeded(repo repository.ClockedRepo) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Commit write the appended operations in the repository
|
||||
// TODO: support commit signature
|
||||
func (e *Entity) Commit(repo repository.ClockedRepo) error {
|
||||
if !e.NeedCommit() {
|
||||
@ -296,11 +328,19 @@ func (e *Entity) Commit(repo repository.ClockedRepo) error {
|
||||
return errors.Wrapf(err, "can't commit a %s with invalid data", e.Definition.typename)
|
||||
}
|
||||
|
||||
// increment the various clocks for this new operationPack
|
||||
packTime, err := e.packClock.Increment()
|
||||
if err != nil {
|
||||
return err
|
||||
var author identity.Interface
|
||||
for _, op := range e.staging {
|
||||
if author != nil && op.Author() != author {
|
||||
return fmt.Errorf("operations with different author")
|
||||
}
|
||||
author = op.Author()
|
||||
}
|
||||
|
||||
// increment the various clocks for this new operationPack
|
||||
// packTime, err := e.packClock.Increment()
|
||||
// if err != nil {
|
||||
// return err
|
||||
// }
|
||||
editTime, err := repo.Increment(fmt.Sprintf(editClockPattern, e.namespace))
|
||||
if err != nil {
|
||||
return err
|
||||
@ -314,13 +354,14 @@ func (e *Entity) Commit(repo repository.ClockedRepo) error {
|
||||
}
|
||||
|
||||
opp := &operationPack{
|
||||
Author: author,
|
||||
Operations: e.staging,
|
||||
CreateTime: creationTime,
|
||||
EditTime: editTime,
|
||||
PackTime: packTime,
|
||||
// PackTime: packTime,
|
||||
}
|
||||
|
||||
treeHash, err := opp.write(e.Definition, repo)
|
||||
treeHash, err := opp.Write(e.Definition, repo)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -328,7 +369,7 @@ func (e *Entity) Commit(repo repository.ClockedRepo) error {
|
||||
// Write a Git commit referencing the tree, with the previous commit as parent
|
||||
var commitHash repository.Hash
|
||||
if e.lastCommit != "" {
|
||||
commitHash, err = repo.StoreCommitWithParent(treeHash, e.lastCommit)
|
||||
commitHash, err = repo.StoreCommit(treeHash, e.lastCommit)
|
||||
} else {
|
||||
commitHash, err = repo.StoreCommit(treeHash)
|
||||
}
|
227
entity/dag/entity_actions.go
Normal file
227
entity/dag/entity_actions.go
Normal file
@ -0,0 +1,227 @@
|
||||
package dag
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
|
||||
"github.com/MichaelMure/git-bug/entity"
|
||||
"github.com/MichaelMure/git-bug/repository"
|
||||
)
|
||||
|
||||
func ListLocalIds(typename string, repo repository.RepoData) ([]entity.Id, error) {
|
||||
refs, err := repo.ListRefs(fmt.Sprintf("refs/%s/", typename))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return entity.RefsToIds(refs), nil
|
||||
}
|
||||
|
||||
// Fetch retrieve updates from a remote
|
||||
// This does not change the local entity state
|
||||
func Fetch(def Definition, repo repository.Repo, remote string) (string, error) {
|
||||
// "refs/<entity>/*:refs/remotes/<remote>/<entity>/*"
|
||||
fetchRefSpec := fmt.Sprintf("refs/%s/*:refs/remotes/%s/%s/*",
|
||||
def.namespace, remote, def.namespace)
|
||||
|
||||
return repo.FetchRefs(remote, fetchRefSpec)
|
||||
}
|
||||
|
||||
// Push update a remote with the local changes
|
||||
func Push(def Definition, repo repository.Repo, remote string) (string, error) {
|
||||
// "refs/<entity>/*:refs/<entity>/*"
|
||||
refspec := fmt.Sprintf("refs/%s/*:refs/%s/*",
|
||||
def.namespace, def.namespace)
|
||||
|
||||
return repo.PushRefs(remote, refspec)
|
||||
}
|
||||
|
||||
// Pull will do a Fetch + MergeAll
|
||||
// Contrary to MergeAll, this function will return an error if a merge fail.
|
||||
func Pull(def Definition, repo repository.ClockedRepo, remote string) error {
|
||||
_, err := Fetch(def, repo, remote)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for merge := range MergeAll(def, repo, remote) {
|
||||
if merge.Err != nil {
|
||||
return merge.Err
|
||||
}
|
||||
if merge.Status == entity.MergeStatusInvalid {
|
||||
return errors.Errorf("merge failure: %s", merge.Reason)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func MergeAll(def Definition, repo repository.ClockedRepo, remote string) <-chan entity.MergeResult {
|
||||
out := make(chan entity.MergeResult)
|
||||
|
||||
// no caching for the merge, we load everything from git even if that means multiple
|
||||
// copy of the same entity in memory. The cache layer will intercept the results to
|
||||
// invalidate entities if necessary.
|
||||
|
||||
go func() {
|
||||
defer close(out)
|
||||
|
||||
remoteRefSpec := fmt.Sprintf("refs/remotes/%s/%s/", remote, def.namespace)
|
||||
remoteRefs, err := repo.ListRefs(remoteRefSpec)
|
||||
if err != nil {
|
||||
out <- entity.MergeResult{Err: err}
|
||||
return
|
||||
}
|
||||
|
||||
for _, remoteRef := range remoteRefs {
|
||||
out <- merge(def, repo, remoteRef)
|
||||
}
|
||||
}()
|
||||
|
||||
return out
|
||||
}
|
||||
|
||||
func merge(def Definition, repo repository.ClockedRepo, remoteRef string) entity.MergeResult {
|
||||
id := entity.RefToId(remoteRef)
|
||||
|
||||
if err := id.Validate(); err != nil {
|
||||
return entity.NewMergeInvalidStatus(id, errors.Wrap(err, "invalid ref").Error())
|
||||
}
|
||||
|
||||
remoteEntity, err := read(def, repo, remoteRef)
|
||||
if err != nil {
|
||||
return entity.NewMergeInvalidStatus(id,
|
||||
errors.Wrapf(err, "remote %s is not readable", def.typename).Error())
|
||||
}
|
||||
|
||||
// Check for error in remote data
|
||||
if err := remoteEntity.Validate(); err != nil {
|
||||
return entity.NewMergeInvalidStatus(id,
|
||||
errors.Wrapf(err, "remote %s data is invalid", def.typename).Error())
|
||||
}
|
||||
|
||||
localRef := fmt.Sprintf("refs/%s/%s", def.namespace, id.String())
|
||||
|
||||
localExist, err := repo.RefExist(localRef)
|
||||
if err != nil {
|
||||
return entity.NewMergeError(err, id)
|
||||
}
|
||||
|
||||
// the bug is not local yet, simply create the reference
|
||||
if !localExist {
|
||||
err := repo.CopyRef(remoteRef, localRef)
|
||||
if err != nil {
|
||||
return entity.NewMergeError(err, id)
|
||||
}
|
||||
|
||||
return entity.NewMergeStatus(entity.MergeStatusNew, id, remoteEntity)
|
||||
}
|
||||
|
||||
// var updated bool
|
||||
// err = repo.MergeRef(localRef, remoteRef, func() repository.Hash {
|
||||
// updated = true
|
||||
//
|
||||
// })
|
||||
// if err != nil {
|
||||
// return entity.NewMergeError(err, id)
|
||||
// }
|
||||
//
|
||||
// if updated {
|
||||
// return entity.NewMergeStatus(entity.MergeStatusUpdated, id, )
|
||||
// } else {
|
||||
// return entity.NewMergeStatus(entity.MergeStatusNothing, id, )
|
||||
// }
|
||||
|
||||
localCommit, err := repo.ResolveRef(localRef)
|
||||
if err != nil {
|
||||
return entity.NewMergeError(err, id)
|
||||
}
|
||||
|
||||
remoteCommit, err := repo.ResolveRef(remoteRef)
|
||||
if err != nil {
|
||||
return entity.NewMergeError(err, id)
|
||||
}
|
||||
|
||||
if localCommit == remoteCommit {
|
||||
// nothing to merge
|
||||
return entity.NewMergeStatus(entity.MergeStatusNothing, id, remoteEntity)
|
||||
}
|
||||
|
||||
// fast-forward is possible if otherRef include ref
|
||||
|
||||
remoteCommits, err := repo.ListCommits(remoteRef)
|
||||
if err != nil {
|
||||
return entity.NewMergeError(err, id)
|
||||
}
|
||||
|
||||
fastForwardPossible := false
|
||||
for _, hash := range remoteCommits {
|
||||
if hash == localCommit {
|
||||
fastForwardPossible = true
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if fastForwardPossible {
|
||||
err = repo.UpdateRef(localRef, remoteCommit)
|
||||
if err != nil {
|
||||
return entity.NewMergeError(err, id)
|
||||
}
|
||||
return entity.NewMergeStatus(entity.MergeStatusUpdated, id, remoteEntity)
|
||||
}
|
||||
|
||||
// fast-forward is not possible, we need to create a merge commit
|
||||
// For simplicity when reading and to have clocks that record this change, we store
|
||||
// an empty operationPack.
|
||||
// First step is to collect those clocks.
|
||||
|
||||
localEntity, err := read(def, repo, localRef)
|
||||
if err != nil {
|
||||
return entity.NewMergeError(err, id)
|
||||
}
|
||||
|
||||
// err = localEntity.packClock.Witness(remoteEntity.packClock.Time())
|
||||
// if err != nil {
|
||||
// return entity.NewMergeError(err, id)
|
||||
// }
|
||||
//
|
||||
// packTime, err := localEntity.packClock.Increment()
|
||||
// if err != nil {
|
||||
// return entity.NewMergeError(err, id)
|
||||
// }
|
||||
|
||||
editTime, err := repo.Increment(fmt.Sprintf(editClockPattern, def.namespace))
|
||||
if err != nil {
|
||||
return entity.NewMergeError(err, id)
|
||||
}
|
||||
|
||||
opp := &operationPack{
|
||||
Operations: nil,
|
||||
CreateTime: 0,
|
||||
EditTime: editTime,
|
||||
// PackTime: packTime,
|
||||
}
|
||||
|
||||
treeHash, err := opp.Write(def, repo)
|
||||
if err != nil {
|
||||
return entity.NewMergeError(err, id)
|
||||
}
|
||||
|
||||
// Create the merge commit with two parents
|
||||
newHash, err := repo.StoreCommit(treeHash, localCommit, remoteCommit)
|
||||
if err != nil {
|
||||
return entity.NewMergeError(err, id)
|
||||
}
|
||||
|
||||
// finally update the ref
|
||||
err = repo.UpdateRef(localRef, newHash)
|
||||
if err != nil {
|
||||
return entity.NewMergeError(err, id)
|
||||
}
|
||||
|
||||
return entity.NewMergeStatus(entity.MergeStatusUpdated, id, localEntity)
|
||||
}
|
||||
|
||||
func Remove() error {
|
||||
panic("")
|
||||
}
|
117
entity/dag/entity_test.go
Normal file
117
entity/dag/entity_test.go
Normal file
@ -0,0 +1,117 @@
|
||||
package dag
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestWriteRead(t *testing.T) {
|
||||
repo, id1, id2, def := makeTestContext()
|
||||
|
||||
entity := New(def)
|
||||
require.False(t, entity.NeedCommit())
|
||||
|
||||
entity.Append(newOp1(id1, "foo"))
|
||||
entity.Append(newOp2(id1, "bar"))
|
||||
|
||||
require.True(t, entity.NeedCommit())
|
||||
require.NoError(t, entity.CommitAdNeeded(repo))
|
||||
require.False(t, entity.NeedCommit())
|
||||
|
||||
entity.Append(newOp2(id2, "foobar"))
|
||||
require.True(t, entity.NeedCommit())
|
||||
require.NoError(t, entity.CommitAdNeeded(repo))
|
||||
require.False(t, entity.NeedCommit())
|
||||
|
||||
read, err := Read(def, repo, entity.Id())
|
||||
require.NoError(t, err)
|
||||
|
||||
assertEqualEntities(t, entity, read)
|
||||
}
|
||||
|
||||
func assertEqualEntities(t *testing.T, a, b *Entity) {
|
||||
// testify doesn't support comparing functions and systematically fail if they are not nil
|
||||
// so we have to set them to nil temporarily
|
||||
|
||||
backOpUnA := a.Definition.operationUnmarshaler
|
||||
backOpUnB := b.Definition.operationUnmarshaler
|
||||
|
||||
a.Definition.operationUnmarshaler = nil
|
||||
b.Definition.operationUnmarshaler = nil
|
||||
|
||||
backIdResA := a.Definition.identityResolver
|
||||
backIdResB := b.Definition.identityResolver
|
||||
|
||||
a.Definition.identityResolver = nil
|
||||
b.Definition.identityResolver = nil
|
||||
|
||||
defer func() {
|
||||
a.Definition.operationUnmarshaler = backOpUnA
|
||||
b.Definition.operationUnmarshaler = backOpUnB
|
||||
a.Definition.identityResolver = backIdResA
|
||||
b.Definition.identityResolver = backIdResB
|
||||
}()
|
||||
|
||||
require.Equal(t, a, b)
|
||||
}
|
||||
|
||||
// // Merge
|
||||
//
|
||||
// merge1 := makeCommit(t, repo)
|
||||
// merge1 = makeCommit(t, repo, merge1)
|
||||
// err = repo.UpdateRef("merge1", merge1)
|
||||
// require.NoError(t, err)
|
||||
//
|
||||
// err = repo.UpdateRef("merge2", merge1)
|
||||
// require.NoError(t, err)
|
||||
//
|
||||
// // identical merge
|
||||
// err = repo.MergeRef("merge1", "merge2")
|
||||
// require.NoError(t, err)
|
||||
//
|
||||
// refMerge1, err := repo.ResolveRef("merge1")
|
||||
// require.NoError(t, err)
|
||||
// require.Equal(t, merge1, refMerge1)
|
||||
// refMerge2, err := repo.ResolveRef("merge2")
|
||||
// require.NoError(t, err)
|
||||
// require.Equal(t, merge1, refMerge2)
|
||||
//
|
||||
// // fast-forward merge
|
||||
// merge2 := makeCommit(t, repo, merge1)
|
||||
// merge2 = makeCommit(t, repo, merge2)
|
||||
//
|
||||
// err = repo.UpdateRef("merge2", merge2)
|
||||
// require.NoError(t, err)
|
||||
//
|
||||
// err = repo.MergeRef("merge1", "merge2")
|
||||
// require.NoError(t, err)
|
||||
//
|
||||
// refMerge1, err = repo.ResolveRef("merge1")
|
||||
// require.NoError(t, err)
|
||||
// require.Equal(t, merge2, refMerge1)
|
||||
// refMerge2, err = repo.ResolveRef("merge2")
|
||||
// require.NoError(t, err)
|
||||
// require.Equal(t, merge2, refMerge2)
|
||||
//
|
||||
// // merge commit
|
||||
// merge1 = makeCommit(t, repo, merge1)
|
||||
// err = repo.UpdateRef("merge1", merge1)
|
||||
// require.NoError(t, err)
|
||||
//
|
||||
// merge2 = makeCommit(t, repo, merge2)
|
||||
// err = repo.UpdateRef("merge2", merge2)
|
||||
// require.NoError(t, err)
|
||||
//
|
||||
// err = repo.MergeRef("merge1", "merge2")
|
||||
// require.NoError(t, err)
|
||||
//
|
||||
// refMerge1, err = repo.ResolveRef("merge1")
|
||||
// require.NoError(t, err)
|
||||
// require.NotEqual(t, merge1, refMerge1)
|
||||
// commitRefMerge1, err := repo.ReadCommit(refMerge1)
|
||||
// require.NoError(t, err)
|
||||
// require.ElementsMatch(t, commitRefMerge1.Parents, []Hash{merge1, merge2})
|
||||
// refMerge2, err = repo.ResolveRef("merge2")
|
||||
// require.NoError(t, err)
|
||||
// require.Equal(t, merge2, refMerge2)
|
31
entity/dag/operation.go
Normal file
31
entity/dag/operation.go
Normal file
@ -0,0 +1,31 @@
|
||||
package dag
|
||||
|
||||
import (
|
||||
"github.com/MichaelMure/git-bug/entity"
|
||||
"github.com/MichaelMure/git-bug/identity"
|
||||
)
|
||||
|
||||
// Operation is a piece of data defining a change to reflect on the state of an Entity.
|
||||
// What this Operation or Entity's state looks like is not of the resort of this package as it only deals with the
|
||||
// data structure and storage.
|
||||
type Operation interface {
|
||||
// Id return the Operation identifier
|
||||
// Some care need to be taken to define a correct Id derivation and enough entropy in the data used to avoid
|
||||
// collisions. Notably:
|
||||
// - the Id of the first Operation will be used as the Id of the Entity. Collision need to be avoided across Entities.
|
||||
// - collisions can also happen within the set of Operations of an Entity. Simple Operation might not have enough
|
||||
// entropy to yield unique Ids.
|
||||
// A common way to derive an Id will be to use the DeriveId function on the serialized operation data.
|
||||
Id() entity.Id
|
||||
// Validate check if the Operation data is valid
|
||||
Validate() error
|
||||
|
||||
Author() identity.Interface
|
||||
}
|
||||
|
||||
type operationBase struct {
|
||||
author identity.Interface
|
||||
|
||||
// Not serialized. Store the op's id in memory.
|
||||
id entity.Id
|
||||
}
|
294
entity/dag/operation_pack.go
Normal file
294
entity/dag/operation_pack.go
Normal file
@ -0,0 +1,294 @@
|
||||
package dag
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"golang.org/x/crypto/openpgp"
|
||||
|
||||
"github.com/MichaelMure/git-bug/entity"
|
||||
"github.com/MichaelMure/git-bug/identity"
|
||||
"github.com/MichaelMure/git-bug/repository"
|
||||
"github.com/MichaelMure/git-bug/util/lamport"
|
||||
)
|
||||
|
||||
// TODO: extra data tree
|
||||
const extraEntryName = "extra"
|
||||
|
||||
const opsEntryName = "ops"
|
||||
const versionEntryPrefix = "version-"
|
||||
const createClockEntryPrefix = "create-clock-"
|
||||
const editClockEntryPrefix = "edit-clock-"
|
||||
const packClockEntryPrefix = "pack-clock-"
|
||||
|
||||
// operationPack is a wrapper structure to store multiple operations in a single git blob.
|
||||
// Additionally, it holds and store the metadata for those operations.
|
||||
type operationPack struct {
|
||||
// An identifier, taken from a hash of the serialized Operations.
|
||||
id entity.Id
|
||||
|
||||
// The author of the Operations. Must be the same author for all the Operations.
|
||||
Author identity.Interface
|
||||
// The list of Operation stored in the operationPack
|
||||
Operations []Operation
|
||||
// Encode the entity's logical time of creation across all entities of the same type.
|
||||
// Only exist on the root operationPack
|
||||
CreateTime lamport.Time
|
||||
// Encode the entity's logical time of last edition across all entities of the same type.
|
||||
// Exist on all operationPack
|
||||
EditTime lamport.Time
|
||||
// // Encode the operationPack's logical time of creation withing this entity.
|
||||
// // Exist on all operationPack
|
||||
// PackTime lamport.Time
|
||||
}
|
||||
|
||||
func (opp *operationPack) Id() entity.Id {
|
||||
if opp.id == "" || opp.id == entity.UnsetId {
|
||||
// This means we are trying to get the opp's Id *before* it has been stored.
|
||||
// As the Id is computed based on the actual bytes written on the disk, we are going to predict
|
||||
// those and then get the Id. This is safe as it will be the exact same code writing on disk later.
|
||||
|
||||
data, err := json.Marshal(opp)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
opp.id = entity.DeriveId(data)
|
||||
}
|
||||
|
||||
return opp.id
|
||||
}
|
||||
|
||||
func (opp *operationPack) MarshalJSON() ([]byte, error) {
|
||||
return json.Marshal(struct {
|
||||
Author identity.Interface `json:"author"`
|
||||
Operations []Operation `json:"ops"`
|
||||
}{
|
||||
Author: opp.Author,
|
||||
Operations: opp.Operations,
|
||||
})
|
||||
}
|
||||
|
||||
func (opp *operationPack) Validate() error {
|
||||
if opp.Author == nil {
|
||||
return fmt.Errorf("missing author")
|
||||
}
|
||||
for _, op := range opp.Operations {
|
||||
if op.Author() != opp.Author {
|
||||
return fmt.Errorf("operation has different author than the operationPack's")
|
||||
}
|
||||
}
|
||||
if opp.EditTime == 0 {
|
||||
return fmt.Errorf("lamport edit time is zero")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (opp *operationPack) Write(def Definition, repo repository.RepoData, parentCommit ...repository.Hash) (repository.Hash, error) {
|
||||
if err := opp.Validate(); err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
// For different reason, we store the clocks and format version directly in the git tree.
|
||||
// Version has to be accessible before any attempt to decode to return early with a unique error.
|
||||
// Clocks could possibly be stored in the git blob but it's nice to separate data and metadata, and
|
||||
// we are storing something directly in the tree already so why not.
|
||||
//
|
||||
// To have a valid Tree, we point the "fake" entries to always the same value, the empty blob.
|
||||
emptyBlobHash, err := repo.StoreData([]byte{})
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
// Write the Ops as a Git blob containing the serialized array of operations
|
||||
data, err := json.Marshal(opp)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
// compute the Id while we have the serialized data
|
||||
opp.id = entity.DeriveId(data)
|
||||
|
||||
hash, err := repo.StoreData(data)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
// Make a Git tree referencing this blob and encoding the other values:
|
||||
// - format version
|
||||
// - clocks
|
||||
tree := []repository.TreeEntry{
|
||||
{ObjectType: repository.Blob, Hash: emptyBlobHash,
|
||||
Name: fmt.Sprintf(versionEntryPrefix+"%d", def.formatVersion)},
|
||||
{ObjectType: repository.Blob, Hash: hash,
|
||||
Name: opsEntryName},
|
||||
{ObjectType: repository.Blob, Hash: emptyBlobHash,
|
||||
Name: fmt.Sprintf(editClockEntryPrefix+"%d", opp.EditTime)},
|
||||
// {ObjectType: repository.Blob, Hash: emptyBlobHash,
|
||||
// Name: fmt.Sprintf(packClockEntryPrefix+"%d", opp.PackTime)},
|
||||
}
|
||||
if opp.CreateTime > 0 {
|
||||
tree = append(tree, repository.TreeEntry{
|
||||
ObjectType: repository.Blob,
|
||||
Hash: emptyBlobHash,
|
||||
Name: fmt.Sprintf(createClockEntryPrefix+"%d", opp.CreateTime),
|
||||
})
|
||||
}
|
||||
|
||||
// Store the tree
|
||||
treeHash, err := repo.StoreTree(tree)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
// Write a Git commit referencing the tree, with the previous commit as parent
|
||||
// If we have keys, sign.
|
||||
var commitHash repository.Hash
|
||||
|
||||
// Sign the commit if we have a key
|
||||
if opp.Author.SigningKey() != nil {
|
||||
commitHash, err = repo.StoreSignedCommit(treeHash, opp.Author.SigningKey().PGPEntity(), parentCommit...)
|
||||
} else {
|
||||
commitHash, err = repo.StoreCommit(treeHash, parentCommit...)
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
return commitHash, nil
|
||||
}
|
||||
|
||||
// readOperationPack read the operationPack encoded in git at the given Tree hash.
|
||||
//
|
||||
// Validity of the Lamport clocks is left for the caller to decide.
|
||||
func readOperationPack(def Definition, repo repository.RepoData, commit repository.Commit) (*operationPack, error) {
|
||||
entries, err := repo.ReadTree(commit.TreeHash)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// check the format version first, fail early instead of trying to read something
|
||||
var version uint
|
||||
for _, entry := range entries {
|
||||
if strings.HasPrefix(entry.Name, versionEntryPrefix) {
|
||||
v, err := strconv.ParseUint(strings.TrimPrefix(entry.Name, versionEntryPrefix), 10, 64)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "can't read format version")
|
||||
}
|
||||
if v > 1<<12 {
|
||||
return nil, fmt.Errorf("format version too big")
|
||||
}
|
||||
version = uint(v)
|
||||
break
|
||||
}
|
||||
}
|
||||
if version == 0 {
|
||||
return nil, entity.NewErrUnknowFormat(def.formatVersion)
|
||||
}
|
||||
if version != def.formatVersion {
|
||||
return nil, entity.NewErrInvalidFormat(version, def.formatVersion)
|
||||
}
|
||||
|
||||
var id entity.Id
|
||||
var author identity.Interface
|
||||
var ops []Operation
|
||||
var createTime lamport.Time
|
||||
var editTime lamport.Time
|
||||
// var packTime lamport.Time
|
||||
|
||||
for _, entry := range entries {
|
||||
switch {
|
||||
case entry.Name == opsEntryName:
|
||||
data, err := repo.ReadData(entry.Hash)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to read git blob data")
|
||||
}
|
||||
ops, author, err = unmarshallPack(def, data)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
id = entity.DeriveId(data)
|
||||
|
||||
case strings.HasPrefix(entry.Name, createClockEntryPrefix):
|
||||
v, err := strconv.ParseUint(strings.TrimPrefix(entry.Name, createClockEntryPrefix), 10, 64)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "can't read creation lamport time")
|
||||
}
|
||||
createTime = lamport.Time(v)
|
||||
|
||||
case strings.HasPrefix(entry.Name, editClockEntryPrefix):
|
||||
v, err := strconv.ParseUint(strings.TrimPrefix(entry.Name, editClockEntryPrefix), 10, 64)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "can't read edit lamport time")
|
||||
}
|
||||
editTime = lamport.Time(v)
|
||||
|
||||
// case strings.HasPrefix(entry.Name, packClockEntryPrefix):
|
||||
// found &= 1 << 3
|
||||
//
|
||||
// v, err := strconv.ParseUint(strings.TrimPrefix(entry.Name, packClockEntryPrefix), 10, 64)
|
||||
// if err != nil {
|
||||
// return nil, errors.Wrap(err, "can't read pack lamport time")
|
||||
// }
|
||||
// packTime = lamport.Time(v)
|
||||
}
|
||||
}
|
||||
|
||||
// Verify signature if we expect one
|
||||
keys := author.ValidKeysAtTime(fmt.Sprintf(editClockPattern, def.namespace), editTime)
|
||||
if len(keys) > 0 {
|
||||
keyring := identity.PGPKeyring(keys)
|
||||
_, err = openpgp.CheckDetachedSignature(keyring, commit.SignedData, commit.Signature)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("signature failure: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
return &operationPack{
|
||||
id: id,
|
||||
Author: author,
|
||||
Operations: ops,
|
||||
CreateTime: createTime,
|
||||
EditTime: editTime,
|
||||
// PackTime: packTime,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// unmarshallPack delegate the unmarshalling of the Operation's JSON to the decoding
|
||||
// function provided by the concrete entity. This gives access to the concrete type of each
|
||||
// Operation.
|
||||
func unmarshallPack(def Definition, data []byte) ([]Operation, identity.Interface, error) {
|
||||
aux := struct {
|
||||
Author identity.IdentityStub `json:"author"`
|
||||
Operations []json.RawMessage `json:"ops"`
|
||||
}{}
|
||||
|
||||
if err := json.Unmarshal(data, &aux); err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
if aux.Author.Id() == "" || aux.Author.Id() == entity.UnsetId {
|
||||
return nil, nil, fmt.Errorf("missing author")
|
||||
}
|
||||
|
||||
author, err := def.identityResolver.ResolveIdentity(aux.Author.Id())
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
ops := make([]Operation, 0, len(aux.Operations))
|
||||
|
||||
for _, raw := range aux.Operations {
|
||||
// delegate to specialized unmarshal function
|
||||
op, err := def.operationUnmarshaler(author, raw)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
ops = append(ops, op)
|
||||
}
|
||||
|
||||
return ops, author, nil
|
||||
}
|
44
entity/dag/operation_pack_test.go
Normal file
44
entity/dag/operation_pack_test.go
Normal file
@ -0,0 +1,44 @@
|
||||
package dag
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestOperationPackReadWrite(t *testing.T) {
|
||||
repo, id1, _, def := makeTestContext()
|
||||
|
||||
opp := &operationPack{
|
||||
Author: id1,
|
||||
Operations: []Operation{
|
||||
newOp1(id1, "foo"),
|
||||
newOp2(id1, "bar"),
|
||||
},
|
||||
CreateTime: 123,
|
||||
EditTime: 456,
|
||||
}
|
||||
|
||||
commitHash, err := opp.Write(def, repo)
|
||||
require.NoError(t, err)
|
||||
|
||||
commit, err := repo.ReadCommit(commitHash)
|
||||
require.NoError(t, err)
|
||||
|
||||
opp2, err := readOperationPack(def, repo, commit)
|
||||
require.NoError(t, err)
|
||||
|
||||
require.Equal(t, opp, opp2)
|
||||
|
||||
// make sure we get the same Id with the same data
|
||||
opp3 := &operationPack{
|
||||
Author: id1,
|
||||
Operations: []Operation{
|
||||
newOp1(id1, "foo"),
|
||||
newOp2(id1, "bar"),
|
||||
},
|
||||
CreateTime: 123,
|
||||
EditTime: 456,
|
||||
}
|
||||
require.Equal(t, opp.Id(), opp3.Id())
|
||||
}
|
@ -1,8 +0,0 @@
|
||||
// Package entity contains the base common code to define an entity stored
|
||||
// in a chain of git objects, supporting actions like Push, Pull and Merge.
|
||||
package entity
|
||||
|
||||
// TODO: Bug and Identity are very similar, right ? I expect that this package
|
||||
// will eventually hold the common code to define an entity and the related
|
||||
// helpers, errors and so on. When this work is done, it will become easier
|
||||
// to add new entities, for example to support pull requests.
|
@ -1,31 +0,0 @@
|
||||
package entity
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/MichaelMure/git-bug/repository"
|
||||
)
|
||||
|
||||
func ListLocalIds(typename string, repo repository.RepoData) ([]Id, error) {
|
||||
refs, err := repo.ListRefs(fmt.Sprintf("refs/%s/", typename))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return RefsToIds(refs), nil
|
||||
}
|
||||
|
||||
func Fetch() {
|
||||
|
||||
}
|
||||
|
||||
func Pull() {
|
||||
|
||||
}
|
||||
|
||||
func Push() {
|
||||
|
||||
}
|
||||
|
||||
func Remove() error {
|
||||
panic("")
|
||||
}
|
@ -1,107 +0,0 @@
|
||||
package entity
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/MichaelMure/git-bug/repository"
|
||||
)
|
||||
|
||||
// func TestFoo(t *testing.T) {
|
||||
// repo, err := repository.OpenGoGitRepo("~/dev/git-bug", nil)
|
||||
// require.NoError(t, err)
|
||||
//
|
||||
// b, err := ReadBug(repo, Id("8b22e548c93a6ed23c31fd4e337c6286c3d1e5c9cae5537bc8e5842e11bd1099"))
|
||||
// require.NoError(t, err)
|
||||
//
|
||||
// fmt.Println(b)
|
||||
// }
|
||||
|
||||
type op1 struct {
|
||||
OperationType int `json:"type"`
|
||||
Field1 string `json:"field_1"`
|
||||
}
|
||||
|
||||
func newOp1(field1 string) *op1 {
|
||||
return &op1{OperationType: 1, Field1: field1}
|
||||
}
|
||||
|
||||
func (o op1) Id() Id {
|
||||
data, _ := json.Marshal(o)
|
||||
return DeriveId(data)
|
||||
}
|
||||
|
||||
func (o op1) Validate() error { return nil }
|
||||
|
||||
type op2 struct {
|
||||
OperationType int `json:"type"`
|
||||
Field2 string `json:"field_2"`
|
||||
}
|
||||
|
||||
func newOp2(field2 string) *op2 {
|
||||
return &op2{OperationType: 2, Field2: field2}
|
||||
}
|
||||
|
||||
func (o op2) Id() Id {
|
||||
data, _ := json.Marshal(o)
|
||||
return DeriveId(data)
|
||||
}
|
||||
|
||||
func (o op2) Validate() error { return nil }
|
||||
|
||||
var def = Definition{
|
||||
typename: "foo",
|
||||
namespace: "foos",
|
||||
operationUnmarshaler: unmarshaller,
|
||||
formatVersion: 1,
|
||||
}
|
||||
|
||||
func unmarshaller(raw json.RawMessage) (Operation, error) {
|
||||
var t struct {
|
||||
OperationType int `json:"type"`
|
||||
}
|
||||
|
||||
if err := json.Unmarshal(raw, &t); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
switch t.OperationType {
|
||||
case 1:
|
||||
op := &op1{}
|
||||
err := json.Unmarshal(raw, &op)
|
||||
return op, err
|
||||
case 2:
|
||||
op := &op2{}
|
||||
err := json.Unmarshal(raw, &op)
|
||||
return op, err
|
||||
default:
|
||||
return nil, fmt.Errorf("unknown operation type %v", t.OperationType)
|
||||
}
|
||||
}
|
||||
|
||||
func TestWriteRead(t *testing.T) {
|
||||
repo := repository.NewMockRepo()
|
||||
|
||||
entity := New(def)
|
||||
require.False(t, entity.NeedCommit())
|
||||
|
||||
entity.Append(newOp1("foo"))
|
||||
entity.Append(newOp2("bar"))
|
||||
|
||||
require.True(t, entity.NeedCommit())
|
||||
require.NoError(t, entity.CommitAdNeeded(repo))
|
||||
require.False(t, entity.NeedCommit())
|
||||
|
||||
entity.Append(newOp2("foobar"))
|
||||
require.True(t, entity.NeedCommit())
|
||||
require.NoError(t, entity.CommitAdNeeded(repo))
|
||||
require.False(t, entity.NeedCommit())
|
||||
|
||||
read, err := Read(def, repo, entity.Id())
|
||||
require.NoError(t, err)
|
||||
|
||||
fmt.Println(*read)
|
||||
}
|
@ -8,14 +8,15 @@ import (
|
||||
type MergeStatus int
|
||||
|
||||
const (
|
||||
_ MergeStatus = iota
|
||||
MergeStatusNew
|
||||
MergeStatusInvalid
|
||||
MergeStatusUpdated
|
||||
MergeStatusNothing
|
||||
MergeStatusError
|
||||
_ MergeStatus = iota
|
||||
MergeStatusNew // a new Entity was created locally
|
||||
MergeStatusInvalid // the remote data is invalid
|
||||
MergeStatusUpdated // a local Entity has been updated
|
||||
MergeStatusNothing // no changes were made to a local Entity (already up to date)
|
||||
MergeStatusError // a terminal error happened
|
||||
)
|
||||
|
||||
// MergeResult hold the result of a merge operation on an Entity.
|
||||
type MergeResult struct {
|
||||
// Err is set when a terminal error occur in the process
|
||||
Err error
|
||||
@ -55,6 +56,7 @@ func NewMergeError(err error, id Id) MergeResult {
|
||||
}
|
||||
}
|
||||
|
||||
// TODO: Interface --> *Entity ?
|
||||
func NewMergeStatus(status MergeStatus, id Id, entity Interface) MergeResult {
|
||||
return MergeResult{
|
||||
Id: id,
|
||||
|
@ -1,199 +0,0 @@
|
||||
package entity
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
|
||||
"github.com/MichaelMure/git-bug/repository"
|
||||
"github.com/MichaelMure/git-bug/util/lamport"
|
||||
)
|
||||
|
||||
// TODO: extra data tree
|
||||
const extraEntryName = "extra"
|
||||
|
||||
const opsEntryName = "ops"
|
||||
const versionEntryPrefix = "version-"
|
||||
const createClockEntryPrefix = "create-clock-"
|
||||
const editClockEntryPrefix = "edit-clock-"
|
||||
const packClockEntryPrefix = "pack-clock-"
|
||||
|
||||
type operationPack struct {
|
||||
Operations []Operation
|
||||
// Encode the entity's logical time of creation across all entities of the same type.
|
||||
// Only exist on the root operationPack
|
||||
CreateTime lamport.Time
|
||||
// Encode the entity's logical time of last edition across all entities of the same type.
|
||||
// Exist on all operationPack
|
||||
EditTime lamport.Time
|
||||
// Encode the operationPack's logical time of creation withing this entity.
|
||||
// Exist on all operationPack
|
||||
PackTime lamport.Time
|
||||
}
|
||||
|
||||
func (opp operationPack) write(def Definition, repo repository.RepoData) (repository.Hash, error) {
|
||||
// For different reason, we store the clocks and format version directly in the git tree.
|
||||
// Version has to be accessible before any attempt to decode to return early with a unique error.
|
||||
// Clocks could possibly be stored in the git blob but it's nice to separate data and metadata, and
|
||||
// we are storing something directly in the tree already so why not.
|
||||
//
|
||||
// To have a valid Tree, we point the "fake" entries to always the same value, the empty blob.
|
||||
emptyBlobHash, err := repo.StoreData([]byte{})
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
// Write the Ops as a Git blob containing the serialized array
|
||||
data, err := json.Marshal(struct {
|
||||
Operations []Operation `json:"ops"`
|
||||
}{
|
||||
Operations: opp.Operations,
|
||||
})
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
hash, err := repo.StoreData(data)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
// Make a Git tree referencing this blob and encoding the other values:
|
||||
// - format version
|
||||
// - clocks
|
||||
tree := []repository.TreeEntry{
|
||||
{ObjectType: repository.Blob, Hash: emptyBlobHash,
|
||||
Name: fmt.Sprintf(versionEntryPrefix+"%d", def.formatVersion)},
|
||||
{ObjectType: repository.Blob, Hash: hash,
|
||||
Name: opsEntryName},
|
||||
{ObjectType: repository.Blob, Hash: emptyBlobHash,
|
||||
Name: fmt.Sprintf(editClockEntryPrefix+"%d", opp.EditTime)},
|
||||
{ObjectType: repository.Blob, Hash: emptyBlobHash,
|
||||
Name: fmt.Sprintf(packClockEntryPrefix+"%d", opp.PackTime)},
|
||||
}
|
||||
if opp.CreateTime > 0 {
|
||||
tree = append(tree, repository.TreeEntry{
|
||||
ObjectType: repository.Blob,
|
||||
Hash: emptyBlobHash,
|
||||
Name: fmt.Sprintf(createClockEntryPrefix+"%d", opp.CreateTime),
|
||||
})
|
||||
}
|
||||
|
||||
// Store the tree
|
||||
return repo.StoreTree(tree)
|
||||
}
|
||||
|
||||
// readOperationPack read the operationPack encoded in git at the given Tree hash.
|
||||
//
|
||||
// Validity of the Lamport clocks is left for the caller to decide.
|
||||
func readOperationPack(def Definition, repo repository.RepoData, treeHash repository.Hash) (*operationPack, error) {
|
||||
entries, err := repo.ReadTree(treeHash)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// check the format version first, fail early instead of trying to read something
|
||||
var version uint
|
||||
for _, entry := range entries {
|
||||
if strings.HasPrefix(entry.Name, versionEntryPrefix) {
|
||||
v, err := strconv.ParseUint(strings.TrimPrefix(entry.Name, versionEntryPrefix), 10, 64)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "can't read format version")
|
||||
}
|
||||
if v > 1<<12 {
|
||||
return nil, fmt.Errorf("format version too big")
|
||||
}
|
||||
version = uint(v)
|
||||
break
|
||||
}
|
||||
}
|
||||
if version == 0 {
|
||||
return nil, NewErrUnknowFormat(def.formatVersion)
|
||||
}
|
||||
if version != def.formatVersion {
|
||||
return nil, NewErrInvalidFormat(version, def.formatVersion)
|
||||
}
|
||||
|
||||
var ops []Operation
|
||||
var createTime lamport.Time
|
||||
var editTime lamport.Time
|
||||
var packTime lamport.Time
|
||||
|
||||
for _, entry := range entries {
|
||||
if entry.Name == opsEntryName {
|
||||
data, err := repo.ReadData(entry.Hash)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to read git blob data")
|
||||
}
|
||||
|
||||
ops, err = unmarshallOperations(def, data)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
if strings.HasPrefix(entry.Name, createClockEntryPrefix) {
|
||||
v, err := strconv.ParseUint(strings.TrimPrefix(entry.Name, createClockEntryPrefix), 10, 64)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "can't read creation lamport time")
|
||||
}
|
||||
createTime = lamport.Time(v)
|
||||
continue
|
||||
}
|
||||
|
||||
if strings.HasPrefix(entry.Name, editClockEntryPrefix) {
|
||||
v, err := strconv.ParseUint(strings.TrimPrefix(entry.Name, editClockEntryPrefix), 10, 64)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "can't read edit lamport time")
|
||||
}
|
||||
editTime = lamport.Time(v)
|
||||
continue
|
||||
}
|
||||
|
||||
if strings.HasPrefix(entry.Name, packClockEntryPrefix) {
|
||||
v, err := strconv.ParseUint(strings.TrimPrefix(entry.Name, packClockEntryPrefix), 10, 64)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "can't read pack lamport time")
|
||||
}
|
||||
packTime = lamport.Time(v)
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
return &operationPack{
|
||||
Operations: ops,
|
||||
CreateTime: createTime,
|
||||
EditTime: editTime,
|
||||
PackTime: packTime,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// unmarshallOperations delegate the unmarshalling of the Operation's JSON to the decoding
|
||||
// function provided by the concrete entity. This gives access to the concrete type of each
|
||||
// Operation.
|
||||
func unmarshallOperations(def Definition, data []byte) ([]Operation, error) {
|
||||
aux := struct {
|
||||
Operations []json.RawMessage `json:"ops"`
|
||||
}{}
|
||||
|
||||
if err := json.Unmarshal(data, &aux); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
ops := make([]Operation, 0, len(aux.Operations))
|
||||
|
||||
for _, raw := range aux.Operations {
|
||||
// delegate to specialized unmarshal function
|
||||
op, err := def.operationUnmarshaler(raw)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
ops = append(ops, op)
|
||||
}
|
||||
|
||||
return ops, nil
|
||||
}
|
@ -2,6 +2,7 @@ package entity
|
||||
|
||||
import "strings"
|
||||
|
||||
// RefsToIds parse a slice of git references and return the corresponding Entity's Id.
|
||||
func RefsToIds(refs []string) []Id {
|
||||
ids := make([]Id, len(refs))
|
||||
|
||||
@ -12,6 +13,7 @@ func RefsToIds(refs []string) []Id {
|
||||
return ids
|
||||
}
|
||||
|
||||
// RefsToIds parse a git reference and return the corresponding Entity's Id.
|
||||
func RefToId(ref string) Id {
|
||||
split := strings.Split(ref, "/")
|
||||
return Id(split[len(split)-1])
|
||||
|
@ -344,7 +344,7 @@ func (i *Identity) Commit(repo repository.ClockedRepo) error {
|
||||
|
||||
var commitHash repository.Hash
|
||||
if lastCommit != "" {
|
||||
commitHash, err = repo.StoreCommitWithParent(treeHash, lastCommit)
|
||||
commitHash, err = repo.StoreCommit(treeHash, lastCommit)
|
||||
} else {
|
||||
commitHash, err = repo.StoreCommit(treeHash)
|
||||
}
|
||||
@ -518,6 +518,15 @@ func (i *Identity) Keys() []*Key {
|
||||
return i.lastVersion().keys
|
||||
}
|
||||
|
||||
// SigningKey return the key that should be used to sign new messages. If no key is available, return nil.
|
||||
func (i *Identity) SigningKey() *Key {
|
||||
keys := i.Keys()
|
||||
if len(keys) > 0 {
|
||||
return keys[0]
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// ValidKeysAtTime return the set of keys valid at a given lamport time
|
||||
func (i *Identity) ValidKeysAtTime(clockName string, time lamport.Time) []*Key {
|
||||
var result []*Key
|
||||
|
@ -9,7 +9,7 @@ import (
|
||||
)
|
||||
|
||||
func TestPushPull(t *testing.T) {
|
||||
repoA, repoB, remote := repository.SetupReposAndRemote()
|
||||
repoA, repoB, remote := repository.SetupGoGitReposAndRemote()
|
||||
defer repository.CleanupTestRepos(repoA, repoB, remote)
|
||||
|
||||
identity1, err := NewIdentity(repoA, "name1", "email1")
|
||||
|
@ -71,6 +71,10 @@ func (IdentityStub) Keys() []*Key {
|
||||
panic("identities needs to be properly loaded with identity.ReadLocal()")
|
||||
}
|
||||
|
||||
func (i *IdentityStub) SigningKey() *Key {
|
||||
panic("identities needs to be properly loaded with identity.ReadLocal()")
|
||||
}
|
||||
|
||||
func (IdentityStub) ValidKeysAtTime(_ string, _ lamport.Time) []*Key {
|
||||
panic("identities needs to be properly loaded with identity.ReadLocal()")
|
||||
}
|
||||
|
@ -36,18 +36,18 @@ func TestIdentityCommitLoad(t *testing.T) {
|
||||
|
||||
// multiple versions
|
||||
|
||||
identity, err = NewIdentityFull(repo, "René Descartes", "rene.descartes@example.com", "", "", []*Key{{PubKey: "pubkeyA"}})
|
||||
identity, err = NewIdentityFull(repo, "René Descartes", "rene.descartes@example.com", "", "", []*Key{generatePublicKey()})
|
||||
require.NoError(t, err)
|
||||
|
||||
idBeforeCommit = identity.Id()
|
||||
|
||||
err = identity.Mutate(repo, func(orig *Mutator) {
|
||||
orig.Keys = []*Key{{PubKey: "pubkeyB"}}
|
||||
orig.Keys = []*Key{generatePublicKey()}
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
err = identity.Mutate(repo, func(orig *Mutator) {
|
||||
orig.Keys = []*Key{{PubKey: "pubkeyC"}}
|
||||
orig.Keys = []*Key{generatePublicKey()}
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
@ -70,13 +70,13 @@ func TestIdentityCommitLoad(t *testing.T) {
|
||||
|
||||
err = identity.Mutate(repo, func(orig *Mutator) {
|
||||
orig.Email = "rene@descartes.com"
|
||||
orig.Keys = []*Key{{PubKey: "pubkeyD"}}
|
||||
orig.Keys = []*Key{generatePublicKey()}
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
err = identity.Mutate(repo, func(orig *Mutator) {
|
||||
orig.Email = "rene@descartes.com"
|
||||
orig.Keys = []*Key{{PubKey: "pubkeyD"}, {PubKey: "pubkeyE"}}
|
||||
orig.Keys = []*Key{generatePublicKey(), generatePublicKey()}
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
@ -123,49 +123,45 @@ func commitsAreSet(t *testing.T, identity *Identity) {
|
||||
|
||||
// Test that the correct crypto keys are returned for a given lamport time
|
||||
func TestIdentity_ValidKeysAtTime(t *testing.T) {
|
||||
pubKeyA := generatePublicKey()
|
||||
pubKeyB := generatePublicKey()
|
||||
pubKeyC := generatePublicKey()
|
||||
pubKeyD := generatePublicKey()
|
||||
pubKeyE := generatePublicKey()
|
||||
|
||||
identity := Identity{
|
||||
versions: []*version{
|
||||
{
|
||||
times: map[string]lamport.Time{"foo": 100},
|
||||
keys: []*Key{
|
||||
{PubKey: "pubkeyA"},
|
||||
},
|
||||
keys: []*Key{pubKeyA},
|
||||
},
|
||||
{
|
||||
times: map[string]lamport.Time{"foo": 200},
|
||||
keys: []*Key{
|
||||
{PubKey: "pubkeyB"},
|
||||
},
|
||||
keys: []*Key{pubKeyB},
|
||||
},
|
||||
{
|
||||
times: map[string]lamport.Time{"foo": 201},
|
||||
keys: []*Key{
|
||||
{PubKey: "pubkeyC"},
|
||||
},
|
||||
keys: []*Key{pubKeyC},
|
||||
},
|
||||
{
|
||||
times: map[string]lamport.Time{"foo": 201},
|
||||
keys: []*Key{
|
||||
{PubKey: "pubkeyD"},
|
||||
},
|
||||
keys: []*Key{pubKeyD},
|
||||
},
|
||||
{
|
||||
times: map[string]lamport.Time{"foo": 300},
|
||||
keys: []*Key{
|
||||
{PubKey: "pubkeyE"},
|
||||
},
|
||||
keys: []*Key{pubKeyE},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
require.Nil(t, identity.ValidKeysAtTime("foo", 10))
|
||||
require.Equal(t, identity.ValidKeysAtTime("foo", 100), []*Key{{PubKey: "pubkeyA"}})
|
||||
require.Equal(t, identity.ValidKeysAtTime("foo", 140), []*Key{{PubKey: "pubkeyA"}})
|
||||
require.Equal(t, identity.ValidKeysAtTime("foo", 200), []*Key{{PubKey: "pubkeyB"}})
|
||||
require.Equal(t, identity.ValidKeysAtTime("foo", 201), []*Key{{PubKey: "pubkeyD"}})
|
||||
require.Equal(t, identity.ValidKeysAtTime("foo", 202), []*Key{{PubKey: "pubkeyD"}})
|
||||
require.Equal(t, identity.ValidKeysAtTime("foo", 300), []*Key{{PubKey: "pubkeyE"}})
|
||||
require.Equal(t, identity.ValidKeysAtTime("foo", 3000), []*Key{{PubKey: "pubkeyE"}})
|
||||
require.Equal(t, identity.ValidKeysAtTime("foo", 100), []*Key{pubKeyA})
|
||||
require.Equal(t, identity.ValidKeysAtTime("foo", 140), []*Key{pubKeyA})
|
||||
require.Equal(t, identity.ValidKeysAtTime("foo", 200), []*Key{pubKeyB})
|
||||
require.Equal(t, identity.ValidKeysAtTime("foo", 201), []*Key{pubKeyD})
|
||||
require.Equal(t, identity.ValidKeysAtTime("foo", 202), []*Key{pubKeyD})
|
||||
require.Equal(t, identity.ValidKeysAtTime("foo", 300), []*Key{pubKeyE})
|
||||
require.Equal(t, identity.ValidKeysAtTime("foo", 3000), []*Key{pubKeyE})
|
||||
}
|
||||
|
||||
// Test the immutable or mutable metadata search
|
||||
|
@ -36,6 +36,9 @@ type Interface interface {
|
||||
// Can be empty.
|
||||
Keys() []*Key
|
||||
|
||||
// SigningKey return the key that should be used to sign new messages. If no key is available, return nil.
|
||||
SigningKey() *Key
|
||||
|
||||
// ValidKeysAtTime return the set of keys valid at a given lamport time for a given clock of another entity
|
||||
// Can be empty.
|
||||
ValidKeysAtTime(clockName string, time lamport.Time) []*Key
|
||||
|
187
identity/key.go
187
identity/key.go
@ -1,18 +1,193 @@
|
||||
package identity
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"golang.org/x/crypto/openpgp"
|
||||
"golang.org/x/crypto/openpgp/armor"
|
||||
"golang.org/x/crypto/openpgp/packet"
|
||||
|
||||
"github.com/MichaelMure/git-bug/repository"
|
||||
)
|
||||
|
||||
type Key struct {
|
||||
// The GPG fingerprint of the key
|
||||
Fingerprint string `json:"fingerprint"`
|
||||
PubKey string `json:"pub_key"`
|
||||
public *packet.PublicKey
|
||||
private *packet.PrivateKey
|
||||
}
|
||||
|
||||
// GenerateKey generate a keypair (public+private)
|
||||
func GenerateKey() *Key {
|
||||
entity, err := openpgp.NewEntity("", "", "", &packet.Config{
|
||||
// The armored format doesn't include the creation time, which makes the round-trip data not being fully equal.
|
||||
// We don't care about the creation time so we can set it to the zero value.
|
||||
Time: func() time.Time {
|
||||
return time.Time{}
|
||||
},
|
||||
})
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return &Key{
|
||||
public: entity.PrimaryKey,
|
||||
private: entity.PrivateKey,
|
||||
}
|
||||
}
|
||||
|
||||
// generatePublicKey generate only a public key (only useful for testing)
|
||||
// See GenerateKey for the details.
|
||||
func generatePublicKey() *Key {
|
||||
k := GenerateKey()
|
||||
k.private = nil
|
||||
return k
|
||||
}
|
||||
|
||||
func (k *Key) MarshalJSON() ([]byte, error) {
|
||||
var buf bytes.Buffer
|
||||
w, err := armor.Encode(&buf, openpgp.PublicKeyType, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
err = k.public.Serialize(w)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
err = w.Close()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return json.Marshal(buf.String())
|
||||
}
|
||||
|
||||
func (k *Key) UnmarshalJSON(data []byte) error {
|
||||
var armored string
|
||||
err := json.Unmarshal(data, &armored)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
block, err := armor.Decode(strings.NewReader(armored))
|
||||
if err == io.EOF {
|
||||
return fmt.Errorf("no armored data found")
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if block.Type != openpgp.PublicKeyType {
|
||||
return fmt.Errorf("invalid key type")
|
||||
}
|
||||
|
||||
reader := packet.NewReader(block.Body)
|
||||
p, err := reader.Next()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to read public key packet")
|
||||
}
|
||||
|
||||
public, ok := p.(*packet.PublicKey)
|
||||
if !ok {
|
||||
return errors.New("got no packet.publicKey")
|
||||
}
|
||||
|
||||
// The armored format doesn't include the creation time, which makes the round-trip data not being fully equal.
|
||||
// We don't care about the creation time so we can set it to the zero value.
|
||||
public.CreationTime = time.Time{}
|
||||
|
||||
k.public = public
|
||||
return nil
|
||||
}
|
||||
|
||||
func (k *Key) Validate() error {
|
||||
// Todo
|
||||
if k.public == nil {
|
||||
return fmt.Errorf("nil public key")
|
||||
}
|
||||
if !k.public.CanSign() {
|
||||
return fmt.Errorf("public key can't sign")
|
||||
}
|
||||
|
||||
if k.private != nil {
|
||||
if !k.private.CanSign() {
|
||||
return fmt.Errorf("private key can't sign")
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (k *Key) Clone() *Key {
|
||||
clone := *k
|
||||
return &clone
|
||||
clone := &Key{}
|
||||
|
||||
pub := *k.public
|
||||
clone.public = &pub
|
||||
|
||||
if k.private != nil {
|
||||
priv := *k.private
|
||||
clone.private = &priv
|
||||
}
|
||||
|
||||
return clone
|
||||
}
|
||||
|
||||
func (k *Key) EnsurePrivateKey(repo repository.RepoKeyring) error {
|
||||
if k.private != nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
// item, err := repo.Keyring().Get(k.Fingerprint())
|
||||
// if err != nil {
|
||||
// return fmt.Errorf("no private key found for %s", k.Fingerprint())
|
||||
// }
|
||||
//
|
||||
|
||||
panic("TODO")
|
||||
}
|
||||
|
||||
func (k *Key) Fingerprint() string {
|
||||
return string(k.public.Fingerprint[:])
|
||||
}
|
||||
|
||||
func (k *Key) PGPEntity() *openpgp.Entity {
|
||||
return &openpgp.Entity{
|
||||
PrimaryKey: k.public,
|
||||
PrivateKey: k.private,
|
||||
}
|
||||
}
|
||||
|
||||
var _ openpgp.KeyRing = &PGPKeyring{}
|
||||
|
||||
// PGPKeyring implement a openpgp.KeyRing from an slice of Key
|
||||
type PGPKeyring []*Key
|
||||
|
||||
func (pk PGPKeyring) KeysById(id uint64) []openpgp.Key {
|
||||
var result []openpgp.Key
|
||||
for _, key := range pk {
|
||||
if key.public.KeyId == id {
|
||||
result = append(result, openpgp.Key{
|
||||
PublicKey: key.public,
|
||||
PrivateKey: key.private,
|
||||
})
|
||||
}
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
func (pk PGPKeyring) KeysByIdUsage(id uint64, requiredUsage byte) []openpgp.Key {
|
||||
// the only usage we care about is the ability to sign, which all keys should already be capable of
|
||||
return pk.KeysById(id)
|
||||
}
|
||||
|
||||
func (pk PGPKeyring) DecryptionKeys() []openpgp.Key {
|
||||
result := make([]openpgp.Key, len(pk))
|
||||
for i, key := range pk {
|
||||
result[i] = openpgp.Key{
|
||||
PublicKey: key.public,
|
||||
PrivateKey: key.private,
|
||||
}
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
21
identity/key_test.go
Normal file
21
identity/key_test.go
Normal file
@ -0,0 +1,21 @@
|
||||
package identity
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestKeyJSON(t *testing.T) {
|
||||
k := generatePublicKey()
|
||||
|
||||
data, err := json.Marshal(k)
|
||||
require.NoError(t, err)
|
||||
|
||||
var read Key
|
||||
err = json.Unmarshal(data, &read)
|
||||
require.NoError(t, err)
|
||||
|
||||
require.Equal(t, k, &read)
|
||||
}
|
@ -18,29 +18,23 @@ func makeIdentityTestRepo(t *testing.T) repository.ClockedRepo {
|
||||
|
||||
clock1, err := repo.GetOrCreateClock("foo")
|
||||
require.NoError(t, err)
|
||||
err = clock1.Witness(42) // clock goes to 43
|
||||
err = clock1.Witness(42)
|
||||
require.NoError(t, err)
|
||||
|
||||
clock2, err := repo.GetOrCreateClock("bar")
|
||||
require.NoError(t, err)
|
||||
err = clock2.Witness(34) // clock goes to 35
|
||||
err = clock2.Witness(34)
|
||||
require.NoError(t, err)
|
||||
|
||||
return repo
|
||||
}
|
||||
|
||||
func TestVersionSerialize(t *testing.T) {
|
||||
func TestVersionJSON(t *testing.T) {
|
||||
repo := makeIdentityTestRepo(t)
|
||||
|
||||
keys := []*Key{
|
||||
{
|
||||
Fingerprint: "fingerprint1",
|
||||
PubKey: "pubkey1",
|
||||
},
|
||||
{
|
||||
Fingerprint: "fingerprint2",
|
||||
PubKey: "pubkey2",
|
||||
},
|
||||
generatePublicKey(),
|
||||
generatePublicKey(),
|
||||
}
|
||||
|
||||
before, err := newVersion(repo, "name", "email", "login", "avatarUrl", keys)
|
||||
@ -57,8 +51,8 @@ func TestVersionSerialize(t *testing.T) {
|
||||
avatarURL: "avatarUrl",
|
||||
unixTime: time.Now().Unix(),
|
||||
times: map[string]lamport.Time{
|
||||
"foo": 43,
|
||||
"bar": 35,
|
||||
"foo": 42,
|
||||
"bar": 34,
|
||||
},
|
||||
keys: keys,
|
||||
nonce: before.nonce,
|
||||
|
120
repository/common.go
Normal file
120
repository/common.go
Normal file
@ -0,0 +1,120 @@
|
||||
package repository
|
||||
|
||||
import (
|
||||
"io"
|
||||
|
||||
"golang.org/x/crypto/openpgp"
|
||||
"golang.org/x/crypto/openpgp/armor"
|
||||
"golang.org/x/crypto/openpgp/errors"
|
||||
)
|
||||
|
||||
// nonNativeMerge is an implementation of a branch merge, for the case where
|
||||
// the underlying git implementation doesn't support it natively.
|
||||
func nonNativeMerge(repo RepoData, ref string, otherRef string, treeHashFn func() Hash) error {
|
||||
commit, err := repo.ResolveRef(ref)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
otherCommit, err := repo.ResolveRef(otherRef)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if commit == otherCommit {
|
||||
// nothing to merge
|
||||
return nil
|
||||
}
|
||||
|
||||
// fast-forward is possible if otherRef include ref
|
||||
|
||||
otherCommits, err := repo.ListCommits(otherRef)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
fastForwardPossible := false
|
||||
for _, hash := range otherCommits {
|
||||
if hash == commit {
|
||||
fastForwardPossible = true
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if fastForwardPossible {
|
||||
return repo.UpdateRef(ref, otherCommit)
|
||||
}
|
||||
|
||||
// fast-forward is not possible, we need to create a merge commit
|
||||
|
||||
// we need a Tree to make the commit, an empty Tree will do
|
||||
emptyTreeHash, err := repo.StoreTree(nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
newHash, err := repo.StoreCommit(emptyTreeHash, commit, otherCommit)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return repo.UpdateRef(ref, newHash)
|
||||
}
|
||||
|
||||
// nonNativeListCommits is an implementation for ListCommits, for the case where
|
||||
// the underlying git implementation doesn't support if natively.
|
||||
func nonNativeListCommits(repo RepoData, ref string) ([]Hash, error) {
|
||||
var result []Hash
|
||||
|
||||
stack := make([]Hash, 0, 32)
|
||||
visited := make(map[Hash]struct{})
|
||||
|
||||
hash, err := repo.ResolveRef(ref)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
stack = append(stack, hash)
|
||||
|
||||
for len(stack) > 0 {
|
||||
// pop
|
||||
hash := stack[len(stack)-1]
|
||||
stack = stack[:len(stack)-1]
|
||||
|
||||
if _, ok := visited[hash]; ok {
|
||||
continue
|
||||
}
|
||||
|
||||
// mark as visited
|
||||
visited[hash] = struct{}{}
|
||||
result = append(result, hash)
|
||||
|
||||
commit, err := repo.ReadCommit(hash)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for _, parent := range commit.Parents {
|
||||
stack = append(stack, parent)
|
||||
}
|
||||
}
|
||||
|
||||
// reverse
|
||||
for i, j := 0, len(result)-1; i < j; i, j = i+1, j-1 {
|
||||
result[i], result[j] = result[j], result[i]
|
||||
}
|
||||
|
||||
return result, nil
|
||||
}
|
||||
|
||||
// deArmorSignature convert an armored (text serialized) signature into raw binary
|
||||
func deArmorSignature(armoredSig io.Reader) (io.Reader, error) {
|
||||
block, err := armor.Decode(armoredSig)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if block.Type != openpgp.SignatureType {
|
||||
return nil, errors.InvalidArgumentError("expected '" + openpgp.SignatureType + "', got: " + block.Type)
|
||||
}
|
||||
return block.Body, nil
|
||||
}
|
@ -1,570 +0,0 @@
|
||||
// Package repository contains helper methods for working with the Git repo.
|
||||
package repository
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
"github.com/blevesearch/bleve"
|
||||
"github.com/go-git/go-billy/v5"
|
||||
"github.com/go-git/go-billy/v5/osfs"
|
||||
|
||||
"github.com/MichaelMure/git-bug/util/lamport"
|
||||
)
|
||||
|
||||
var _ ClockedRepo = &GitRepo{}
|
||||
var _ TestedRepo = &GitRepo{}
|
||||
|
||||
// GitRepo represents an instance of a (local) git repository.
|
||||
type GitRepo struct {
|
||||
gitCli
|
||||
path string
|
||||
|
||||
clocksMutex sync.Mutex
|
||||
clocks map[string]lamport.Clock
|
||||
|
||||
indexesMutex sync.Mutex
|
||||
indexes map[string]bleve.Index
|
||||
|
||||
keyring Keyring
|
||||
localStorage billy.Filesystem
|
||||
}
|
||||
|
||||
func (repo *GitRepo) ReadCommit(hash Hash) (Commit, error) {
|
||||
panic("implement me")
|
||||
}
|
||||
|
||||
func (repo *GitRepo) ResolveRef(ref string) (Hash, error) {
|
||||
panic("implement me")
|
||||
}
|
||||
|
||||
// OpenGitRepo determines if the given working directory is inside of a git repository,
|
||||
// and returns the corresponding GitRepo instance if it is.
|
||||
func OpenGitRepo(path string, clockLoaders []ClockLoader) (*GitRepo, error) {
|
||||
k, err := defaultKeyring()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
repo := &GitRepo{
|
||||
gitCli: gitCli{path: path},
|
||||
path: path,
|
||||
clocks: make(map[string]lamport.Clock),
|
||||
indexes: make(map[string]bleve.Index),
|
||||
keyring: k,
|
||||
}
|
||||
|
||||
// Check the repo and retrieve the root path
|
||||
stdout, err := repo.runGitCommand("rev-parse", "--absolute-git-dir")
|
||||
|
||||
// Now dir is fetched with "git rev-parse --git-dir". May be it can
|
||||
// still return nothing in some cases. Then empty stdout check is
|
||||
// kept.
|
||||
if err != nil || stdout == "" {
|
||||
return nil, ErrNotARepo
|
||||
}
|
||||
|
||||
// Fix the path to be sure we are at the root
|
||||
repo.path = stdout
|
||||
repo.gitCli.path = stdout
|
||||
repo.localStorage = osfs.New(filepath.Join(path, "git-bug"))
|
||||
|
||||
for _, loader := range clockLoaders {
|
||||
allExist := true
|
||||
for _, name := range loader.Clocks {
|
||||
if _, err := repo.getClock(name); err != nil {
|
||||
allExist = false
|
||||
}
|
||||
}
|
||||
|
||||
if !allExist {
|
||||
err = loader.Witnesser(repo)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return repo, nil
|
||||
}
|
||||
|
||||
// InitGitRepo create a new empty git repo at the given path
|
||||
func InitGitRepo(path string) (*GitRepo, error) {
|
||||
k, err := defaultKeyring()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
repo := &GitRepo{
|
||||
gitCli: gitCli{path: path},
|
||||
path: filepath.Join(path, ".git"),
|
||||
clocks: make(map[string]lamport.Clock),
|
||||
indexes: make(map[string]bleve.Index),
|
||||
keyring: k,
|
||||
localStorage: osfs.New(filepath.Join(path, ".git", "git-bug")),
|
||||
}
|
||||
|
||||
_, err = repo.runGitCommand("init", path)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return repo, nil
|
||||
}
|
||||
|
||||
// InitBareGitRepo create a new --bare empty git repo at the given path
|
||||
func InitBareGitRepo(path string) (*GitRepo, error) {
|
||||
k, err := defaultKeyring()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
repo := &GitRepo{
|
||||
gitCli: gitCli{path: path},
|
||||
path: path,
|
||||
clocks: make(map[string]lamport.Clock),
|
||||
indexes: make(map[string]bleve.Index),
|
||||
keyring: k,
|
||||
localStorage: osfs.New(filepath.Join(path, "git-bug")),
|
||||
}
|
||||
|
||||
_, err = repo.runGitCommand("init", "--bare", path)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return repo, nil
|
||||
}
|
||||
|
||||
func (repo *GitRepo) Close() error {
|
||||
var firstErr error
|
||||
for _, index := range repo.indexes {
|
||||
err := index.Close()
|
||||
if err != nil && firstErr == nil {
|
||||
firstErr = err
|
||||
}
|
||||
}
|
||||
return firstErr
|
||||
}
|
||||
|
||||
// LocalConfig give access to the repository scoped configuration
|
||||
func (repo *GitRepo) LocalConfig() Config {
|
||||
return newGitConfig(repo.gitCli, false)
|
||||
}
|
||||
|
||||
// GlobalConfig give access to the global scoped configuration
|
||||
func (repo *GitRepo) GlobalConfig() Config {
|
||||
return newGitConfig(repo.gitCli, true)
|
||||
}
|
||||
|
||||
// AnyConfig give access to a merged local/global configuration
|
||||
func (repo *GitRepo) AnyConfig() ConfigRead {
|
||||
return mergeConfig(repo.LocalConfig(), repo.GlobalConfig())
|
||||
}
|
||||
|
||||
// Keyring give access to a user-wide storage for secrets
|
||||
func (repo *GitRepo) Keyring() Keyring {
|
||||
return repo.keyring
|
||||
}
|
||||
|
||||
// GetPath returns the path to the repo.
|
||||
func (repo *GitRepo) GetPath() string {
|
||||
return repo.path
|
||||
}
|
||||
|
||||
// GetUserName returns the name the the user has used to configure git
|
||||
func (repo *GitRepo) GetUserName() (string, error) {
|
||||
return repo.runGitCommand("config", "user.name")
|
||||
}
|
||||
|
||||
// GetUserEmail returns the email address that the user has used to configure git.
|
||||
func (repo *GitRepo) GetUserEmail() (string, error) {
|
||||
return repo.runGitCommand("config", "user.email")
|
||||
}
|
||||
|
||||
// GetCoreEditor returns the name of the editor that the user has used to configure git.
|
||||
func (repo *GitRepo) GetCoreEditor() (string, error) {
|
||||
return repo.runGitCommand("var", "GIT_EDITOR")
|
||||
}
|
||||
|
||||
// GetRemotes returns the configured remotes repositories.
|
||||
func (repo *GitRepo) GetRemotes() (map[string]string, error) {
|
||||
stdout, err := repo.runGitCommand("remote", "--verbose")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
lines := strings.Split(stdout, "\n")
|
||||
remotes := make(map[string]string, len(lines))
|
||||
|
||||
for _, line := range lines {
|
||||
if strings.TrimSpace(line) == "" {
|
||||
continue
|
||||
}
|
||||
elements := strings.Fields(line)
|
||||
if len(elements) != 3 {
|
||||
return nil, fmt.Errorf("git remote: unexpected output format: %s", line)
|
||||
}
|
||||
|
||||
remotes[elements[0]] = elements[1]
|
||||
}
|
||||
|
||||
return remotes, nil
|
||||
}
|
||||
|
||||
// LocalStorage return a billy.Filesystem giving access to $RepoPath/.git/git-bug
|
||||
func (repo *GitRepo) LocalStorage() billy.Filesystem {
|
||||
return repo.localStorage
|
||||
}
|
||||
|
||||
// GetBleveIndex return a bleve.Index that can be used to index documents
|
||||
func (repo *GitRepo) GetBleveIndex(name string) (bleve.Index, error) {
|
||||
repo.indexesMutex.Lock()
|
||||
defer repo.indexesMutex.Unlock()
|
||||
|
||||
if index, ok := repo.indexes[name]; ok {
|
||||
return index, nil
|
||||
}
|
||||
|
||||
path := filepath.Join(repo.path, "indexes", name)
|
||||
|
||||
index, err := bleve.Open(path)
|
||||
if err == nil {
|
||||
repo.indexes[name] = index
|
||||
return index, nil
|
||||
}
|
||||
|
||||
err = os.MkdirAll(path, os.ModeDir)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
mapping := bleve.NewIndexMapping()
|
||||
mapping.DefaultAnalyzer = "en"
|
||||
|
||||
index, err = bleve.New(path, mapping)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
repo.indexes[name] = index
|
||||
|
||||
return index, nil
|
||||
}
|
||||
|
||||
// ClearBleveIndex will wipe the given index
|
||||
func (repo *GitRepo) ClearBleveIndex(name string) error {
|
||||
repo.indexesMutex.Lock()
|
||||
defer repo.indexesMutex.Unlock()
|
||||
|
||||
path := filepath.Join(repo.path, "indexes", name)
|
||||
|
||||
err := os.RemoveAll(path)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
delete(repo.indexes, name)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// FetchRefs fetch git refs from a remote
|
||||
func (repo *GitRepo) FetchRefs(remote, refSpec string) (string, error) {
|
||||
stdout, err := repo.runGitCommand("fetch", remote, refSpec)
|
||||
|
||||
if err != nil {
|
||||
return stdout, fmt.Errorf("failed to fetch from the remote '%s': %v", remote, err)
|
||||
}
|
||||
|
||||
return stdout, err
|
||||
}
|
||||
|
||||
// PushRefs push git refs to a remote
|
||||
func (repo *GitRepo) PushRefs(remote string, refSpec string) (string, error) {
|
||||
stdout, stderr, err := repo.runGitCommandRaw(nil, "push", remote, refSpec)
|
||||
|
||||
if err != nil {
|
||||
return stdout + stderr, fmt.Errorf("failed to push to the remote '%s': %v", remote, stderr)
|
||||
}
|
||||
return stdout + stderr, nil
|
||||
}
|
||||
|
||||
// StoreData will store arbitrary data and return the corresponding hash
|
||||
func (repo *GitRepo) StoreData(data []byte) (Hash, error) {
|
||||
var stdin = bytes.NewReader(data)
|
||||
|
||||
stdout, err := repo.runGitCommandWithStdin(stdin, "hash-object", "--stdin", "-w")
|
||||
|
||||
return Hash(stdout), err
|
||||
}
|
||||
|
||||
// ReadData will attempt to read arbitrary data from the given hash
|
||||
func (repo *GitRepo) ReadData(hash Hash) ([]byte, error) {
|
||||
var stdout bytes.Buffer
|
||||
var stderr bytes.Buffer
|
||||
|
||||
err := repo.runGitCommandWithIO(nil, &stdout, &stderr, "cat-file", "-p", string(hash))
|
||||
|
||||
if err != nil {
|
||||
return []byte{}, err
|
||||
}
|
||||
|
||||
return stdout.Bytes(), nil
|
||||
}
|
||||
|
||||
// StoreTree will store a mapping key-->Hash as a Git tree
|
||||
func (repo *GitRepo) StoreTree(entries []TreeEntry) (Hash, error) {
|
||||
buffer := prepareTreeEntries(entries)
|
||||
|
||||
stdout, err := repo.runGitCommandWithStdin(&buffer, "mktree")
|
||||
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
return Hash(stdout), nil
|
||||
}
|
||||
|
||||
// StoreCommit will store a Git commit with the given Git tree
|
||||
func (repo *GitRepo) StoreCommit(treeHash Hash) (Hash, error) {
|
||||
stdout, err := repo.runGitCommand("commit-tree", string(treeHash))
|
||||
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
return Hash(stdout), nil
|
||||
}
|
||||
|
||||
// StoreCommitWithParent will store a Git commit with the given Git tree
|
||||
func (repo *GitRepo) StoreCommitWithParent(treeHash Hash, parent Hash) (Hash, error) {
|
||||
stdout, err := repo.runGitCommand("commit-tree", string(treeHash),
|
||||
"-p", string(parent))
|
||||
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
return Hash(stdout), nil
|
||||
}
|
||||
|
||||
// UpdateRef will create or update a Git reference
|
||||
func (repo *GitRepo) UpdateRef(ref string, hash Hash) error {
|
||||
_, err := repo.runGitCommand("update-ref", ref, string(hash))
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
// RemoveRef will remove a Git reference
|
||||
func (repo *GitRepo) RemoveRef(ref string) error {
|
||||
_, err := repo.runGitCommand("update-ref", "-d", ref)
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
// ListRefs will return a list of Git ref matching the given refspec
|
||||
func (repo *GitRepo) ListRefs(refPrefix string) ([]string, error) {
|
||||
stdout, err := repo.runGitCommand("for-each-ref", "--format=%(refname)", refPrefix)
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
split := strings.Split(stdout, "\n")
|
||||
|
||||
if len(split) == 1 && split[0] == "" {
|
||||
return []string{}, nil
|
||||
}
|
||||
|
||||
return split, nil
|
||||
}
|
||||
|
||||
// RefExist will check if a reference exist in Git
|
||||
func (repo *GitRepo) RefExist(ref string) (bool, error) {
|
||||
stdout, err := repo.runGitCommand("for-each-ref", ref)
|
||||
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
return stdout != "", nil
|
||||
}
|
||||
|
||||
// CopyRef will create a new reference with the same value as another one
|
||||
func (repo *GitRepo) CopyRef(source string, dest string) error {
|
||||
_, err := repo.runGitCommand("update-ref", dest, source)
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
// ListCommits will return the list of commit hashes of a ref, in chronological order
|
||||
func (repo *GitRepo) ListCommits(ref string) ([]Hash, error) {
|
||||
stdout, err := repo.runGitCommand("rev-list", "--first-parent", "--reverse", ref)
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
split := strings.Split(stdout, "\n")
|
||||
|
||||
casted := make([]Hash, len(split))
|
||||
for i, line := range split {
|
||||
casted[i] = Hash(line)
|
||||
}
|
||||
|
||||
return casted, nil
|
||||
|
||||
}
|
||||
|
||||
// ReadTree will return the list of entries in a Git tree
|
||||
func (repo *GitRepo) ReadTree(hash Hash) ([]TreeEntry, error) {
|
||||
stdout, err := repo.runGitCommand("ls-tree", string(hash))
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return readTreeEntries(stdout)
|
||||
}
|
||||
|
||||
// FindCommonAncestor will return the last common ancestor of two chain of commit
|
||||
func (repo *GitRepo) FindCommonAncestor(hash1 Hash, hash2 Hash) (Hash, error) {
|
||||
stdout, err := repo.runGitCommand("merge-base", string(hash1), string(hash2))
|
||||
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
return Hash(stdout), nil
|
||||
}
|
||||
|
||||
// GetTreeHash return the git tree hash referenced in a commit
|
||||
func (repo *GitRepo) GetTreeHash(commit Hash) (Hash, error) {
|
||||
stdout, err := repo.runGitCommand("rev-parse", string(commit)+"^{tree}")
|
||||
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
return Hash(stdout), nil
|
||||
}
|
||||
|
||||
func (repo *GitRepo) AllClocks() (map[string]lamport.Clock, error) {
|
||||
repo.clocksMutex.Lock()
|
||||
defer repo.clocksMutex.Unlock()
|
||||
|
||||
result := make(map[string]lamport.Clock)
|
||||
|
||||
files, err := ioutil.ReadDir(filepath.Join(repo.path, "git-bug", clockPath))
|
||||
if os.IsNotExist(err) {
|
||||
return nil, nil
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for _, file := range files {
|
||||
name := file.Name()
|
||||
if c, ok := repo.clocks[name]; ok {
|
||||
result[name] = c
|
||||
} else {
|
||||
c, err := lamport.LoadPersistedClock(repo.LocalStorage(), filepath.Join(clockPath, name))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
repo.clocks[name] = c
|
||||
result[name] = c
|
||||
}
|
||||
}
|
||||
|
||||
return result, nil
|
||||
}
|
||||
|
||||
// GetOrCreateClock return a Lamport clock stored in the Repo.
|
||||
// If the clock doesn't exist, it's created.
|
||||
func (repo *GitRepo) GetOrCreateClock(name string) (lamport.Clock, error) {
|
||||
repo.clocksMutex.Lock()
|
||||
defer repo.clocksMutex.Unlock()
|
||||
|
||||
c, err := repo.getClock(name)
|
||||
if err == nil {
|
||||
return c, nil
|
||||
}
|
||||
if err != ErrClockNotExist {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
c, err = lamport.NewPersistedClock(repo.LocalStorage(), filepath.Join(clockPath, name))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
repo.clocks[name] = c
|
||||
return c, nil
|
||||
}
|
||||
|
||||
func (repo *GitRepo) getClock(name string) (lamport.Clock, error) {
|
||||
if c, ok := repo.clocks[name]; ok {
|
||||
return c, nil
|
||||
}
|
||||
|
||||
c, err := lamport.LoadPersistedClock(repo.LocalStorage(), filepath.Join(clockPath, name))
|
||||
if err == nil {
|
||||
repo.clocks[name] = c
|
||||
return c, nil
|
||||
}
|
||||
if err == lamport.ErrClockNotExist {
|
||||
return nil, ErrClockNotExist
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Increment is equivalent to c = GetOrCreateClock(name) + c.Increment()
|
||||
func (repo *GitRepo) Increment(name string) (lamport.Time, error) {
|
||||
c, err := repo.GetOrCreateClock(name)
|
||||
if err != nil {
|
||||
return lamport.Time(0), err
|
||||
}
|
||||
return c.Increment()
|
||||
}
|
||||
|
||||
// Witness is equivalent to c = GetOrCreateClock(name) + c.Witness(time)
|
||||
func (repo *GitRepo) Witness(name string, time lamport.Time) error {
|
||||
c, err := repo.GetOrCreateClock(name)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return c.Witness(time)
|
||||
}
|
||||
|
||||
// AddRemote add a new remote to the repository
|
||||
// Not in the interface because it's only used for testing
|
||||
func (repo *GitRepo) AddRemote(name string, url string) error {
|
||||
_, err := repo.runGitCommand("remote", "add", name, url)
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
// GetLocalRemote return the URL to use to add this repo as a local remote
|
||||
func (repo *GitRepo) GetLocalRemote() string {
|
||||
return repo.path
|
||||
}
|
||||
|
||||
// EraseFromDisk delete this repository entirely from the disk
|
||||
func (repo *GitRepo) EraseFromDisk() error {
|
||||
err := repo.Close()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
path := filepath.Clean(strings.TrimSuffix(repo.path, string(filepath.Separator)+".git"))
|
||||
|
||||
// fmt.Println("Cleaning repo:", path)
|
||||
return os.RemoveAll(path)
|
||||
}
|
@ -1,56 +0,0 @@
|
||||
package repository
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io"
|
||||
"os/exec"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// gitCli is a helper to launch CLI git commands
|
||||
type gitCli struct {
|
||||
path string
|
||||
}
|
||||
|
||||
// Run the given git command with the given I/O reader/writers, returning an error if it fails.
|
||||
func (cli gitCli) runGitCommandWithIO(stdin io.Reader, stdout, stderr io.Writer, args ...string) error {
|
||||
// make sure that the working directory for the command
|
||||
// always exist, in particular when running "git init".
|
||||
path := strings.TrimSuffix(cli.path, ".git")
|
||||
|
||||
// fmt.Printf("[%s] Running git %s\n", path, strings.Join(args, " "))
|
||||
|
||||
cmd := exec.Command("git", args...)
|
||||
cmd.Dir = path
|
||||
cmd.Stdin = stdin
|
||||
cmd.Stdout = stdout
|
||||
cmd.Stderr = stderr
|
||||
|
||||
return cmd.Run()
|
||||
}
|
||||
|
||||
// Run the given git command and return its stdout, or an error if the command fails.
|
||||
func (cli gitCli) runGitCommandRaw(stdin io.Reader, args ...string) (string, string, error) {
|
||||
var stdout bytes.Buffer
|
||||
var stderr bytes.Buffer
|
||||
err := cli.runGitCommandWithIO(stdin, &stdout, &stderr, args...)
|
||||
return strings.TrimSpace(stdout.String()), strings.TrimSpace(stderr.String()), err
|
||||
}
|
||||
|
||||
// Run the given git command and return its stdout, or an error if the command fails.
|
||||
func (cli gitCli) runGitCommandWithStdin(stdin io.Reader, args ...string) (string, error) {
|
||||
stdout, stderr, err := cli.runGitCommandRaw(stdin, args...)
|
||||
if err != nil {
|
||||
if stderr == "" {
|
||||
stderr = "Error running git command: " + strings.Join(args, " ")
|
||||
}
|
||||
err = fmt.Errorf(stderr)
|
||||
}
|
||||
return stdout, err
|
||||
}
|
||||
|
||||
// Run the given git command and return its stdout, or an error if the command fails.
|
||||
func (cli gitCli) runGitCommand(args ...string) (string, error) {
|
||||
return cli.runGitCommandWithStdin(nil, args...)
|
||||
}
|
@ -1,221 +0,0 @@
|
||||
package repository
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"regexp"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/blang/semver"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
var _ Config = &gitConfig{}
|
||||
|
||||
type gitConfig struct {
|
||||
cli gitCli
|
||||
localityFlag string
|
||||
}
|
||||
|
||||
func newGitConfig(cli gitCli, global bool) *gitConfig {
|
||||
localityFlag := "--local"
|
||||
if global {
|
||||
localityFlag = "--global"
|
||||
}
|
||||
return &gitConfig{
|
||||
cli: cli,
|
||||
localityFlag: localityFlag,
|
||||
}
|
||||
}
|
||||
|
||||
// StoreString store a single key/value pair in the config of the repo
|
||||
func (gc *gitConfig) StoreString(key string, value string) error {
|
||||
_, err := gc.cli.runGitCommand("config", gc.localityFlag, "--replace-all", key, value)
|
||||
return err
|
||||
}
|
||||
|
||||
func (gc *gitConfig) StoreBool(key string, value bool) error {
|
||||
return gc.StoreString(key, strconv.FormatBool(value))
|
||||
}
|
||||
|
||||
func (gc *gitConfig) StoreTimestamp(key string, value time.Time) error {
|
||||
return gc.StoreString(key, strconv.Itoa(int(value.Unix())))
|
||||
}
|
||||
|
||||
// ReadAll read all key/value pair matching the key prefix
|
||||
func (gc *gitConfig) ReadAll(keyPrefix string) (map[string]string, error) {
|
||||
stdout, err := gc.cli.runGitCommand("config", gc.localityFlag, "--includes", "--get-regexp", keyPrefix)
|
||||
|
||||
// / \
|
||||
// / ! \
|
||||
// -------
|
||||
//
|
||||
// There can be a legitimate error here, but I see no portable way to
|
||||
// distinguish them from the git error that say "no matching value exist"
|
||||
if err != nil {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
lines := strings.Split(stdout, "\n")
|
||||
|
||||
result := make(map[string]string, len(lines))
|
||||
|
||||
for _, line := range lines {
|
||||
if strings.TrimSpace(line) == "" {
|
||||
continue
|
||||
}
|
||||
|
||||
parts := strings.SplitN(line, " ", 2)
|
||||
result[parts[0]] = parts[1]
|
||||
}
|
||||
|
||||
return result, nil
|
||||
}
|
||||
|
||||
func (gc *gitConfig) ReadString(key string) (string, error) {
|
||||
stdout, err := gc.cli.runGitCommand("config", gc.localityFlag, "--includes", "--get-all", key)
|
||||
|
||||
// / \
|
||||
// / ! \
|
||||
// -------
|
||||
//
|
||||
// There can be a legitimate error here, but I see no portable way to
|
||||
// distinguish them from the git error that say "no matching value exist"
|
||||
if err != nil {
|
||||
return "", ErrNoConfigEntry
|
||||
}
|
||||
|
||||
lines := strings.Split(stdout, "\n")
|
||||
|
||||
if len(lines) == 0 {
|
||||
return "", ErrNoConfigEntry
|
||||
}
|
||||
if len(lines) > 1 {
|
||||
return "", ErrMultipleConfigEntry
|
||||
}
|
||||
|
||||
return lines[0], nil
|
||||
}
|
||||
|
||||
func (gc *gitConfig) ReadBool(key string) (bool, error) {
|
||||
val, err := gc.ReadString(key)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
return strconv.ParseBool(val)
|
||||
}
|
||||
|
||||
func (gc *gitConfig) ReadTimestamp(key string) (time.Time, error) {
|
||||
value, err := gc.ReadString(key)
|
||||
if err != nil {
|
||||
return time.Time{}, err
|
||||
}
|
||||
return ParseTimestamp(value)
|
||||
}
|
||||
|
||||
func (gc *gitConfig) rmSection(keyPrefix string) error {
|
||||
_, err := gc.cli.runGitCommand("config", gc.localityFlag, "--remove-section", keyPrefix)
|
||||
return err
|
||||
}
|
||||
|
||||
func (gc *gitConfig) unsetAll(keyPrefix string) error {
|
||||
_, err := gc.cli.runGitCommand("config", gc.localityFlag, "--unset-all", keyPrefix)
|
||||
return err
|
||||
}
|
||||
|
||||
// return keyPrefix section
|
||||
// example: sectionFromKey(a.b.c.d) return a.b.c
|
||||
func sectionFromKey(keyPrefix string) string {
|
||||
s := strings.Split(keyPrefix, ".")
|
||||
if len(s) == 1 {
|
||||
return keyPrefix
|
||||
}
|
||||
|
||||
return strings.Join(s[:len(s)-1], ".")
|
||||
}
|
||||
|
||||
// rmConfigs with git version lesser than 2.18
|
||||
func (gc *gitConfig) rmConfigsGitVersionLT218(keyPrefix string) error {
|
||||
// try to remove key/value pair by key
|
||||
err := gc.unsetAll(keyPrefix)
|
||||
if err != nil {
|
||||
return gc.rmSection(keyPrefix)
|
||||
}
|
||||
|
||||
m, err := gc.ReadAll(sectionFromKey(keyPrefix))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// if section doesn't have any left key/value remove the section
|
||||
if len(m) == 0 {
|
||||
return gc.rmSection(sectionFromKey(keyPrefix))
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// RmConfigs remove all key/value pair matching the key prefix
|
||||
func (gc *gitConfig) RemoveAll(keyPrefix string) error {
|
||||
// starting from git 2.18.0 sections are automatically deleted when the last existing
|
||||
// key/value is removed. Before 2.18.0 we should remove the section
|
||||
// see https://github.com/git/git/blob/master/Documentation/RelNotes/2.18.0.txt#L379
|
||||
lt218, err := gc.gitVersionLT218()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "getting git version")
|
||||
}
|
||||
|
||||
if lt218 {
|
||||
return gc.rmConfigsGitVersionLT218(keyPrefix)
|
||||
}
|
||||
|
||||
err = gc.unsetAll(keyPrefix)
|
||||
if err != nil {
|
||||
return gc.rmSection(keyPrefix)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (gc *gitConfig) gitVersion() (*semver.Version, error) {
|
||||
versionOut, err := gc.cli.runGitCommand("version")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return parseGitVersion(versionOut)
|
||||
}
|
||||
|
||||
func parseGitVersion(versionOut string) (*semver.Version, error) {
|
||||
// extract the version and truncate potential bad parts
|
||||
// ex: 2.23.0.rc1 instead of 2.23.0-rc1
|
||||
r := regexp.MustCompile(`(\d+\.){1,2}\d+`)
|
||||
|
||||
extracted := r.FindString(versionOut)
|
||||
if extracted == "" {
|
||||
return nil, fmt.Errorf("unreadable git version %s", versionOut)
|
||||
}
|
||||
|
||||
version, err := semver.Make(extracted)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &version, nil
|
||||
}
|
||||
|
||||
func (gc *gitConfig) gitVersionLT218() (bool, error) {
|
||||
version, err := gc.gitVersion()
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
version218string := "2.18.0"
|
||||
gitVersion218, err := semver.Make(version218string)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
return version.LT(gitVersion218), nil
|
||||
}
|
@ -1,6 +0,0 @@
|
||||
// Package repository contains helper methods for working with the Git repo.
|
||||
package repository
|
||||
|
||||
// func TestGitRepo(t *testing.T) {
|
||||
// RepoTest(t, CreateTestRepo, CleanupTestRepos)
|
||||
// }
|
@ -1,72 +0,0 @@
|
||||
package repository
|
||||
|
||||
import (
|
||||
"io/ioutil"
|
||||
"log"
|
||||
|
||||
"github.com/99designs/keyring"
|
||||
)
|
||||
|
||||
// This is intended for testing only
|
||||
|
||||
func CreateTestRepo(bare bool) TestedRepo {
|
||||
dir, err := ioutil.TempDir("", "")
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
var creator func(string) (*GitRepo, error)
|
||||
|
||||
if bare {
|
||||
creator = InitBareGitRepo
|
||||
} else {
|
||||
creator = InitGitRepo
|
||||
}
|
||||
|
||||
repo, err := creator(dir)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
config := repo.LocalConfig()
|
||||
if err := config.StoreString("user.name", "testuser"); err != nil {
|
||||
log.Fatal("failed to set user.name for test repository: ", err)
|
||||
}
|
||||
if err := config.StoreString("user.email", "testuser@example.com"); err != nil {
|
||||
log.Fatal("failed to set user.email for test repository: ", err)
|
||||
}
|
||||
|
||||
// make sure we use a mock keyring for testing to not interact with the global system
|
||||
return &replaceKeyring{
|
||||
TestedRepo: repo,
|
||||
keyring: keyring.NewArrayKeyring(nil),
|
||||
}
|
||||
}
|
||||
|
||||
func SetupReposAndRemote() (repoA, repoB, remote TestedRepo) {
|
||||
repoA = CreateGoGitTestRepo(false)
|
||||
repoB = CreateGoGitTestRepo(false)
|
||||
remote = CreateGoGitTestRepo(true)
|
||||
|
||||
err := repoA.AddRemote("origin", remote.GetLocalRemote())
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
err = repoB.AddRemote("origin", remote.GetLocalRemote())
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
return repoA, repoB, remote
|
||||
}
|
||||
|
||||
// replaceKeyring allow to replace the Keyring of the underlying repo
|
||||
type replaceKeyring struct {
|
||||
TestedRepo
|
||||
keyring Keyring
|
||||
}
|
||||
|
||||
func (rk replaceKeyring) Keyring() Keyring {
|
||||
return rk.keyring
|
||||
}
|
@ -20,6 +20,7 @@ import (
|
||||
"github.com/go-git/go-git/v5/plumbing"
|
||||
"github.com/go-git/go-git/v5/plumbing/filemode"
|
||||
"github.com/go-git/go-git/v5/plumbing/object"
|
||||
"golang.org/x/crypto/openpgp"
|
||||
|
||||
"github.com/MichaelMure/git-bug/util/lamport"
|
||||
)
|
||||
@ -521,12 +522,13 @@ func (repo *GoGitRepo) ReadTree(hash Hash) ([]TreeEntry, error) {
|
||||
}
|
||||
|
||||
// StoreCommit will store a Git commit with the given Git tree
|
||||
func (repo *GoGitRepo) StoreCommit(treeHash Hash) (Hash, error) {
|
||||
return repo.StoreCommitWithParent(treeHash, "")
|
||||
func (repo *GoGitRepo) StoreCommit(treeHash Hash, parents ...Hash) (Hash, error) {
|
||||
return repo.StoreSignedCommit(treeHash, nil, parents...)
|
||||
}
|
||||
|
||||
// StoreCommit will store a Git commit with the given Git tree
|
||||
func (repo *GoGitRepo) StoreCommitWithParent(treeHash Hash, parent Hash) (Hash, error) {
|
||||
// StoreCommit will store a Git commit with the given Git tree. If signKey is not nil, the commit
|
||||
// will be signed accordingly.
|
||||
func (repo *GoGitRepo) StoreSignedCommit(treeHash Hash, signKey *openpgp.Entity, parents ...Hash) (Hash, error) {
|
||||
cfg, err := repo.r.Config()
|
||||
if err != nil {
|
||||
return "", err
|
||||
@ -547,8 +549,28 @@ func (repo *GoGitRepo) StoreCommitWithParent(treeHash Hash, parent Hash) (Hash,
|
||||
TreeHash: plumbing.NewHash(treeHash.String()),
|
||||
}
|
||||
|
||||
if parent != "" {
|
||||
commit.ParentHashes = []plumbing.Hash{plumbing.NewHash(parent.String())}
|
||||
for _, parent := range parents {
|
||||
commit.ParentHashes = append(commit.ParentHashes, plumbing.NewHash(parent.String()))
|
||||
}
|
||||
|
||||
// Compute the signature if needed
|
||||
if signKey != nil {
|
||||
// first get the serialized commit
|
||||
encoded := &plumbing.MemoryObject{}
|
||||
if err := commit.Encode(encoded); err != nil {
|
||||
return "", err
|
||||
}
|
||||
r, err := encoded.Reader()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
// sign the data
|
||||
var sig bytes.Buffer
|
||||
if err := openpgp.ArmoredDetachSign(&sig, signKey, r, nil); err != nil {
|
||||
return "", err
|
||||
}
|
||||
commit.PGPSignature = sig.String()
|
||||
}
|
||||
|
||||
obj := repo.r.Storer.NewEncodedObject()
|
||||
@ -608,6 +630,13 @@ func (repo *GoGitRepo) UpdateRef(ref string, hash Hash) error {
|
||||
return repo.r.Storer.SetReference(plumbing.NewHashReference(plumbing.ReferenceName(ref), plumbing.NewHash(hash.String())))
|
||||
}
|
||||
|
||||
// MergeRef merge other into ref and update the reference
|
||||
// If the update is not fast-forward, the callback treeHashFn will be called for the caller to generate
|
||||
// the Tree to store in the merge commit.
|
||||
func (repo *GoGitRepo) MergeRef(ref string, otherRef string, treeHashFn func() Hash) error {
|
||||
return nonNativeMerge(repo, ref, otherRef, treeHashFn)
|
||||
}
|
||||
|
||||
// RemoveRef will remove a Git reference
|
||||
func (repo *GoGitRepo) RemoveRef(ref string) error {
|
||||
return repo.r.Storer.RemoveReference(plumbing.ReferenceName(ref))
|
||||
@ -657,38 +686,16 @@ func (repo *GoGitRepo) CopyRef(source string, dest string) error {
|
||||
|
||||
// ListCommits will return the list of tree hashes of a ref, in chronological order
|
||||
func (repo *GoGitRepo) ListCommits(ref string) ([]Hash, error) {
|
||||
r, err := repo.r.Reference(plumbing.ReferenceName(ref), false)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
commit, err := repo.r.CommitObject(r.Hash())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
hashes := []Hash{Hash(commit.Hash.String())}
|
||||
|
||||
for {
|
||||
commit, err = commit.Parent(0)
|
||||
if err == object.ErrParentNotFound {
|
||||
break
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if commit.NumParents() > 1 {
|
||||
return nil, fmt.Errorf("multiple parents")
|
||||
}
|
||||
|
||||
hashes = append([]Hash{Hash(commit.Hash.String())}, hashes...)
|
||||
}
|
||||
|
||||
return hashes, nil
|
||||
return nonNativeListCommits(repo, ref)
|
||||
}
|
||||
|
||||
func (repo *GoGitRepo) ReadCommit(hash Hash) (Commit, error) {
|
||||
commit, err := repo.r.CommitObject(plumbing.NewHash(hash.String()))
|
||||
encoded, err := repo.r.Storer.EncodedObject(plumbing.CommitObject, plumbing.NewHash(hash.String()))
|
||||
if err != nil {
|
||||
return Commit{}, err
|
||||
}
|
||||
|
||||
commit, err := object.DecodeCommit(repo.r.Storer, encoded)
|
||||
if err != nil {
|
||||
return Commit{}, err
|
||||
}
|
||||
@ -698,12 +705,25 @@ func (repo *GoGitRepo) ReadCommit(hash Hash) (Commit, error) {
|
||||
parents[i] = Hash(parentHash.String())
|
||||
}
|
||||
|
||||
return Commit{
|
||||
result := Commit{
|
||||
Hash: hash,
|
||||
Parents: parents,
|
||||
TreeHash: Hash(commit.TreeHash.String()),
|
||||
}, nil
|
||||
}
|
||||
|
||||
if commit.PGPSignature != "" {
|
||||
result.SignedData, err = encoded.Reader()
|
||||
if err != nil {
|
||||
return Commit{}, err
|
||||
}
|
||||
|
||||
result.Signature, err = deArmorSignature(strings.NewReader(commit.PGPSignature))
|
||||
if err != nil {
|
||||
return Commit{}, err
|
||||
}
|
||||
}
|
||||
|
||||
return result, nil
|
||||
}
|
||||
|
||||
func (repo *GoGitRepo) AllClocks() (map[string]lamport.Clock, error) {
|
||||
|
@ -3,6 +3,8 @@ package repository
|
||||
import (
|
||||
"io/ioutil"
|
||||
"log"
|
||||
|
||||
"github.com/99designs/keyring"
|
||||
)
|
||||
|
||||
// This is intended for testing only
|
||||
@ -34,7 +36,11 @@ func CreateGoGitTestRepo(bare bool) TestedRepo {
|
||||
log.Fatal("failed to set user.email for test repository: ", err)
|
||||
}
|
||||
|
||||
return repo
|
||||
// make sure we use a mock keyring for testing to not interact with the global system
|
||||
return &replaceKeyring{
|
||||
TestedRepo: repo,
|
||||
keyring: keyring.NewArrayKeyring(nil),
|
||||
}
|
||||
}
|
||||
|
||||
func SetupGoGitReposAndRemote() (repoA, repoB, remote TestedRepo) {
|
||||
|
@ -48,3 +48,13 @@ func defaultKeyring() (Keyring, error) {
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
// replaceKeyring allow to replace the Keyring of the underlying repo
|
||||
type replaceKeyring struct {
|
||||
TestedRepo
|
||||
keyring Keyring
|
||||
}
|
||||
|
||||
func (rk replaceKeyring) Keyring() Keyring {
|
||||
return rk.keyring
|
||||
}
|
||||
|
@ -1,6 +1,7 @@
|
||||
package repository
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/sha1"
|
||||
"fmt"
|
||||
"strings"
|
||||
@ -10,6 +11,7 @@ import (
|
||||
"github.com/blevesearch/bleve"
|
||||
"github.com/go-git/go-billy/v5"
|
||||
"github.com/go-git/go-billy/v5/memfs"
|
||||
"golang.org/x/crypto/openpgp"
|
||||
|
||||
"github.com/MichaelMure/git-bug/util/lamport"
|
||||
)
|
||||
@ -180,6 +182,7 @@ var _ RepoData = &mockRepoData{}
|
||||
type commit struct {
|
||||
treeHash Hash
|
||||
parents []Hash
|
||||
sig string
|
||||
}
|
||||
|
||||
type mockRepoData struct {
|
||||
@ -198,12 +201,12 @@ func NewMockRepoData() *mockRepoData {
|
||||
}
|
||||
}
|
||||
|
||||
// PushRefs push git refs to a remote
|
||||
func (r *mockRepoData) PushRefs(remote string, refSpec string) (string, error) {
|
||||
func (r *mockRepoData) FetchRefs(remote string, refSpec string) (string, error) {
|
||||
return "", nil
|
||||
}
|
||||
|
||||
func (r *mockRepoData) FetchRefs(remote string, refSpec string) (string, error) {
|
||||
// PushRefs push git refs to a remote
|
||||
func (r *mockRepoData) PushRefs(remote string, refSpec string) (string, error) {
|
||||
return "", nil
|
||||
}
|
||||
|
||||
@ -216,7 +219,6 @@ func (r *mockRepoData) StoreData(data []byte) (Hash, error) {
|
||||
|
||||
func (r *mockRepoData) ReadData(hash Hash) ([]byte, error) {
|
||||
data, ok := r.blobs[hash]
|
||||
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("unknown hash")
|
||||
}
|
||||
@ -233,107 +235,6 @@ func (r *mockRepoData) StoreTree(entries []TreeEntry) (Hash, error) {
|
||||
return hash, nil
|
||||
}
|
||||
|
||||
func (r *mockRepoData) StoreCommit(treeHash Hash) (Hash, error) {
|
||||
rawHash := sha1.Sum([]byte(treeHash))
|
||||
hash := Hash(fmt.Sprintf("%x", rawHash))
|
||||
r.commits[hash] = commit{
|
||||
treeHash: treeHash,
|
||||
}
|
||||
return hash, nil
|
||||
}
|
||||
|
||||
func (r *mockRepoData) StoreCommitWithParent(treeHash Hash, parent Hash) (Hash, error) {
|
||||
rawHash := sha1.Sum([]byte(treeHash + parent))
|
||||
hash := Hash(fmt.Sprintf("%x", rawHash))
|
||||
r.commits[hash] = commit{
|
||||
treeHash: treeHash,
|
||||
parents: []Hash{parent},
|
||||
}
|
||||
return hash, nil
|
||||
}
|
||||
|
||||
func (r *mockRepoData) ResolveRef(ref string) (Hash, error) {
|
||||
h, ok := r.refs[ref]
|
||||
if !ok {
|
||||
return "", fmt.Errorf("unknown ref")
|
||||
}
|
||||
return h, nil
|
||||
}
|
||||
|
||||
func (r *mockRepoData) UpdateRef(ref string, hash Hash) error {
|
||||
r.refs[ref] = hash
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *mockRepoData) RemoveRef(ref string) error {
|
||||
delete(r.refs, ref)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *mockRepoData) RefExist(ref string) (bool, error) {
|
||||
_, exist := r.refs[ref]
|
||||
return exist, nil
|
||||
}
|
||||
|
||||
func (r *mockRepoData) CopyRef(source string, dest string) error {
|
||||
hash, exist := r.refs[source]
|
||||
|
||||
if !exist {
|
||||
return fmt.Errorf("Unknown ref")
|
||||
}
|
||||
|
||||
r.refs[dest] = hash
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *mockRepoData) ListRefs(refPrefix string) ([]string, error) {
|
||||
var keys []string
|
||||
|
||||
for k := range r.refs {
|
||||
if strings.HasPrefix(k, refPrefix) {
|
||||
keys = append(keys, k)
|
||||
}
|
||||
}
|
||||
|
||||
return keys, nil
|
||||
}
|
||||
|
||||
func (r *mockRepoData) ListCommits(ref string) ([]Hash, error) {
|
||||
var hashes []Hash
|
||||
|
||||
hash := r.refs[ref]
|
||||
|
||||
for {
|
||||
commit, ok := r.commits[hash]
|
||||
|
||||
if !ok {
|
||||
break
|
||||
}
|
||||
|
||||
hashes = append([]Hash{hash}, hashes...)
|
||||
|
||||
if len(commit.parents) == 0 {
|
||||
break
|
||||
}
|
||||
hash = commit.parents[0]
|
||||
}
|
||||
|
||||
return hashes, nil
|
||||
}
|
||||
|
||||
func (r *mockRepoData) ReadCommit(hash Hash) (Commit, error) {
|
||||
c, ok := r.commits[hash]
|
||||
if !ok {
|
||||
return Commit{}, fmt.Errorf("unknown commit")
|
||||
}
|
||||
|
||||
return Commit{
|
||||
Hash: hash,
|
||||
Parents: c.parents,
|
||||
TreeHash: c.treeHash,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (r *mockRepoData) ReadTree(hash Hash) ([]TreeEntry, error) {
|
||||
var data string
|
||||
|
||||
@ -357,6 +258,109 @@ func (r *mockRepoData) ReadTree(hash Hash) ([]TreeEntry, error) {
|
||||
return readTreeEntries(data)
|
||||
}
|
||||
|
||||
func (r *mockRepoData) StoreCommit(treeHash Hash, parents ...Hash) (Hash, error) {
|
||||
return r.StoreSignedCommit(treeHash, nil, parents...)
|
||||
}
|
||||
|
||||
func (r *mockRepoData) StoreSignedCommit(treeHash Hash, signKey *openpgp.Entity, parents ...Hash) (Hash, error) {
|
||||
hasher := sha1.New()
|
||||
hasher.Write([]byte(treeHash))
|
||||
for _, parent := range parents {
|
||||
hasher.Write([]byte(parent))
|
||||
}
|
||||
rawHash := hasher.Sum(nil)
|
||||
hash := Hash(fmt.Sprintf("%x", rawHash))
|
||||
c := commit{
|
||||
treeHash: treeHash,
|
||||
parents: parents,
|
||||
}
|
||||
if signKey != nil {
|
||||
// unlike go-git, we only sign the tree hash for simplicity instead of all the fields (parents ...)
|
||||
var sig bytes.Buffer
|
||||
if err := openpgp.DetachSign(&sig, signKey, strings.NewReader(string(treeHash)), nil); err != nil {
|
||||
return "", err
|
||||
}
|
||||
c.sig = sig.String()
|
||||
}
|
||||
r.commits[hash] = c
|
||||
return hash, nil
|
||||
}
|
||||
|
||||
func (r *mockRepoData) ReadCommit(hash Hash) (Commit, error) {
|
||||
c, ok := r.commits[hash]
|
||||
if !ok {
|
||||
return Commit{}, fmt.Errorf("unknown commit")
|
||||
}
|
||||
|
||||
result := Commit{
|
||||
Hash: hash,
|
||||
Parents: c.parents,
|
||||
TreeHash: c.treeHash,
|
||||
}
|
||||
|
||||
if c.sig != "" {
|
||||
result.SignedData = strings.NewReader(string(c.treeHash))
|
||||
result.Signature = strings.NewReader(c.sig)
|
||||
}
|
||||
|
||||
return result, nil
|
||||
}
|
||||
|
||||
func (r *mockRepoData) GetTreeHash(commit Hash) (Hash, error) {
|
||||
c, ok := r.commits[commit]
|
||||
if !ok {
|
||||
return "", fmt.Errorf("unknown commit")
|
||||
}
|
||||
|
||||
return c.treeHash, nil
|
||||
}
|
||||
|
||||
func (r *mockRepoData) ResolveRef(ref string) (Hash, error) {
|
||||
h, ok := r.refs[ref]
|
||||
if !ok {
|
||||
return "", fmt.Errorf("unknown ref")
|
||||
}
|
||||
return h, nil
|
||||
}
|
||||
|
||||
func (r *mockRepoData) UpdateRef(ref string, hash Hash) error {
|
||||
r.refs[ref] = hash
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *mockRepoData) RemoveRef(ref string) error {
|
||||
delete(r.refs, ref)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *mockRepoData) ListRefs(refPrefix string) ([]string, error) {
|
||||
var keys []string
|
||||
|
||||
for k := range r.refs {
|
||||
if strings.HasPrefix(k, refPrefix) {
|
||||
keys = append(keys, k)
|
||||
}
|
||||
}
|
||||
|
||||
return keys, nil
|
||||
}
|
||||
|
||||
func (r *mockRepoData) RefExist(ref string) (bool, error) {
|
||||
_, exist := r.refs[ref]
|
||||
return exist, nil
|
||||
}
|
||||
|
||||
func (r *mockRepoData) CopyRef(source string, dest string) error {
|
||||
hash, exist := r.refs[source]
|
||||
|
||||
if !exist {
|
||||
return fmt.Errorf("Unknown ref")
|
||||
}
|
||||
|
||||
r.refs[dest] = hash
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *mockRepoData) FindCommonAncestor(hash1 Hash, hash2 Hash) (Hash, error) {
|
||||
ancestor1 := []Hash{hash1}
|
||||
|
||||
@ -392,13 +396,8 @@ func (r *mockRepoData) FindCommonAncestor(hash1 Hash, hash2 Hash) (Hash, error)
|
||||
}
|
||||
}
|
||||
|
||||
func (r *mockRepoData) GetTreeHash(commit Hash) (Hash, error) {
|
||||
c, ok := r.commits[commit]
|
||||
if !ok {
|
||||
return "", fmt.Errorf("unknown commit")
|
||||
}
|
||||
|
||||
return c.treeHash, nil
|
||||
func (r *mockRepoData) ListCommits(ref string) ([]Hash, error) {
|
||||
return nonNativeListCommits(r, ref)
|
||||
}
|
||||
|
||||
var _ RepoClock = &mockRepoClock{}
|
||||
|
@ -1,6 +1,8 @@
|
||||
package repository
|
||||
|
||||
import "testing"
|
||||
import (
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestMockRepo(t *testing.T) {
|
||||
creator := func(bare bool) TestedRepo { return NewMockRepo() }
|
||||
|
@ -3,15 +3,17 @@ package repository
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"io"
|
||||
|
||||
"github.com/blevesearch/bleve"
|
||||
"github.com/go-git/go-billy/v5"
|
||||
"golang.org/x/crypto/openpgp"
|
||||
|
||||
"github.com/MichaelMure/git-bug/util/lamport"
|
||||
)
|
||||
|
||||
var (
|
||||
// ErrNotARepo is the error returned when the git repo root wan't be found
|
||||
// ErrNotARepo is the error returned when the git repo root can't be found
|
||||
ErrNotARepo = errors.New("not a git repository")
|
||||
// ErrClockNotExist is the error returned when a clock can't be found
|
||||
ErrClockNotExist = errors.New("clock doesn't exist")
|
||||
@ -89,9 +91,11 @@ type RepoBleve interface {
|
||||
}
|
||||
|
||||
type Commit struct {
|
||||
Hash Hash
|
||||
Parents []Hash
|
||||
TreeHash Hash
|
||||
Hash Hash
|
||||
Parents []Hash // hashes of the parents, if any
|
||||
TreeHash Hash // hash of the git Tree
|
||||
SignedData io.Reader // if signed, reader for the signed data (likely, the serialized commit)
|
||||
Signature io.Reader // if signed, reader for the (non-armored) signature
|
||||
}
|
||||
|
||||
// RepoData give access to the git data storage
|
||||
@ -116,21 +120,29 @@ type RepoData interface {
|
||||
ReadTree(hash Hash) ([]TreeEntry, error)
|
||||
|
||||
// StoreCommit will store a Git commit with the given Git tree
|
||||
StoreCommit(treeHash Hash) (Hash, error)
|
||||
StoreCommit(treeHash Hash, parents ...Hash) (Hash, error)
|
||||
|
||||
// StoreCommit will store a Git commit with the given Git tree
|
||||
StoreCommitWithParent(treeHash Hash, parent Hash) (Hash, error)
|
||||
// StoreCommit will store a Git commit with the given Git tree. If signKey is not nil, the commit
|
||||
// will be signed accordingly.
|
||||
StoreSignedCommit(treeHash Hash, signKey *openpgp.Entity, parents ...Hash) (Hash, error)
|
||||
|
||||
// ReadCommit read a Git commit and returns some of its characteristic
|
||||
ReadCommit(hash Hash) (Commit, error)
|
||||
|
||||
// GetTreeHash return the git tree hash referenced in a commit
|
||||
GetTreeHash(commit Hash) (Hash, error)
|
||||
|
||||
// ResolveRef returns the hash of the target commit of the given ref
|
||||
ResolveRef(ref string) (Hash, error)
|
||||
|
||||
// UpdateRef will create or update a Git reference
|
||||
UpdateRef(ref string, hash Hash) error
|
||||
|
||||
// // MergeRef merge other into ref and update the reference
|
||||
// // If the update is not fast-forward, the callback treeHashFn will be called for the caller to generate
|
||||
// // the Tree to store in the merge commit.
|
||||
// MergeRef(ref string, otherRef string, treeHashFn func() Hash) error
|
||||
|
||||
// RemoveRef will remove a Git reference
|
||||
RemoveRef(ref string) error
|
||||
|
||||
@ -148,7 +160,6 @@ type RepoData interface {
|
||||
FindCommonAncestor(commit1 Hash, commit2 Hash) (Hash, error)
|
||||
|
||||
// ListCommits will return the list of tree hashes of a ref, in chronological order
|
||||
// Deprecated
|
||||
ListCommits(ref string) ([]Hash, error)
|
||||
}
|
||||
|
||||
|
@ -135,7 +135,8 @@ func RepoDataTest(t *testing.T, repo RepoData) {
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, treeHash1, treeHash1Read)
|
||||
|
||||
commit2, err := repo.StoreCommitWithParent(treeHash2, commit1)
|
||||
// commit with a parent
|
||||
commit2, err := repo.StoreCommit(treeHash2, commit1)
|
||||
require.NoError(t, err)
|
||||
require.True(t, commit2.IsValid())
|
||||
|
||||
@ -187,7 +188,7 @@ func RepoDataTest(t *testing.T, repo RepoData) {
|
||||
|
||||
// Graph
|
||||
|
||||
commit3, err := repo.StoreCommitWithParent(treeHash1, commit1)
|
||||
commit3, err := repo.StoreCommit(treeHash1, commit1)
|
||||
require.NoError(t, err)
|
||||
|
||||
ancestorHash, err := repo.FindCommonAncestor(commit2, commit3)
|
||||
@ -237,3 +238,22 @@ func randomData() []byte {
|
||||
}
|
||||
return b
|
||||
}
|
||||
|
||||
func makeCommit(t *testing.T, repo RepoData, parents ...Hash) Hash {
|
||||
blobHash, err := repo.StoreData(randomData())
|
||||
require.NoError(t, err)
|
||||
|
||||
treeHash, err := repo.StoreTree([]TreeEntry{
|
||||
{
|
||||
ObjectType: Blob,
|
||||
Hash: blobHash,
|
||||
Name: "foo",
|
||||
},
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
commitHash, err := repo.StoreCommit(treeHash, parents...)
|
||||
require.NoError(t, err)
|
||||
|
||||
return commitHash
|
||||
}
|
||||
|
@ -14,11 +14,11 @@ func testClock(t *testing.T, c Clock) {
|
||||
assert.Equal(t, Time(2), val)
|
||||
assert.Equal(t, Time(2), c.Time())
|
||||
|
||||
err = c.Witness(41)
|
||||
err = c.Witness(42)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, Time(42), c.Time())
|
||||
|
||||
err = c.Witness(41)
|
||||
err = c.Witness(42)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, Time(42), c.Time())
|
||||
|
||||
|
@ -25,6 +25,14 @@
|
||||
|
||||
*/
|
||||
|
||||
// Note: this code originally originate from Hashicorp's Serf but has been changed since to fit git-bug's need.
|
||||
|
||||
// Note: this Lamport clock implementation is different than the algorithms you can find, notably Wikipedia or the
|
||||
// original Serf implementation. The reason is lie to what constitute an event in this distributed system.
|
||||
// Commonly, events happen when messages are sent or received, whereas in git-bug events happen when some data is
|
||||
// written, but *not* when read. This is why Witness set the time to the max seen value instead of max seen value +1.
|
||||
// See https://cs.stackexchange.com/a/133730/129795
|
||||
|
||||
package lamport
|
||||
|
||||
import (
|
||||
@ -72,12 +80,12 @@ WITNESS:
|
||||
// If the other value is old, we do not need to do anything
|
||||
cur := atomic.LoadUint64(&mc.counter)
|
||||
other := uint64(v)
|
||||
if other < cur {
|
||||
if other <= cur {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Ensure that our local clock is at least one ahead.
|
||||
if !atomic.CompareAndSwapUint64(&mc.counter, cur, other+1) {
|
||||
if !atomic.CompareAndSwapUint64(&mc.counter, cur, other) {
|
||||
// CAS: CompareAndSwap
|
||||
// The CAS failed, so we just retry. Eventually our CAS should
|
||||
// succeed or a future witness will pass us by and our witness
|
||||
|
Loading…
Reference in New Issue
Block a user