mirror of
https://github.com/MichaelMure/git-bug.git
synced 2024-12-13 20:13:15 +03:00
bug: migrate to the DAG entity structure!
This commit is contained in:
parent
99b9dd84cb
commit
3f6ef50883
@ -19,7 +19,7 @@ func (createOperationResolver) ID(_ context.Context, obj *bug.CreateOperation) (
|
||||
}
|
||||
|
||||
func (createOperationResolver) Author(_ context.Context, obj *bug.CreateOperation) (models.IdentityWrapper, error) {
|
||||
return models.NewLoadedIdentity(obj.Author), nil
|
||||
return models.NewLoadedIdentity(obj.Author()), nil
|
||||
}
|
||||
|
||||
func (createOperationResolver) Date(_ context.Context, obj *bug.CreateOperation) (*time.Time, error) {
|
||||
@ -36,7 +36,7 @@ func (addCommentOperationResolver) ID(_ context.Context, obj *bug.AddCommentOper
|
||||
}
|
||||
|
||||
func (addCommentOperationResolver) Author(_ context.Context, obj *bug.AddCommentOperation) (models.IdentityWrapper, error) {
|
||||
return models.NewLoadedIdentity(obj.Author), nil
|
||||
return models.NewLoadedIdentity(obj.Author()), nil
|
||||
}
|
||||
|
||||
func (addCommentOperationResolver) Date(_ context.Context, obj *bug.AddCommentOperation) (*time.Time, error) {
|
||||
@ -57,7 +57,7 @@ func (editCommentOperationResolver) Target(_ context.Context, obj *bug.EditComme
|
||||
}
|
||||
|
||||
func (editCommentOperationResolver) Author(_ context.Context, obj *bug.EditCommentOperation) (models.IdentityWrapper, error) {
|
||||
return models.NewLoadedIdentity(obj.Author), nil
|
||||
return models.NewLoadedIdentity(obj.Author()), nil
|
||||
}
|
||||
|
||||
func (editCommentOperationResolver) Date(_ context.Context, obj *bug.EditCommentOperation) (*time.Time, error) {
|
||||
@ -74,7 +74,7 @@ func (labelChangeOperationResolver) ID(_ context.Context, obj *bug.LabelChangeOp
|
||||
}
|
||||
|
||||
func (labelChangeOperationResolver) Author(_ context.Context, obj *bug.LabelChangeOperation) (models.IdentityWrapper, error) {
|
||||
return models.NewLoadedIdentity(obj.Author), nil
|
||||
return models.NewLoadedIdentity(obj.Author()), nil
|
||||
}
|
||||
|
||||
func (labelChangeOperationResolver) Date(_ context.Context, obj *bug.LabelChangeOperation) (*time.Time, error) {
|
||||
@ -91,7 +91,7 @@ func (setStatusOperationResolver) ID(_ context.Context, obj *bug.SetStatusOperat
|
||||
}
|
||||
|
||||
func (setStatusOperationResolver) Author(_ context.Context, obj *bug.SetStatusOperation) (models.IdentityWrapper, error) {
|
||||
return models.NewLoadedIdentity(obj.Author), nil
|
||||
return models.NewLoadedIdentity(obj.Author()), nil
|
||||
}
|
||||
|
||||
func (setStatusOperationResolver) Date(_ context.Context, obj *bug.SetStatusOperation) (*time.Time, error) {
|
||||
@ -112,7 +112,7 @@ func (setTitleOperationResolver) ID(_ context.Context, obj *bug.SetTitleOperatio
|
||||
}
|
||||
|
||||
func (setTitleOperationResolver) Author(_ context.Context, obj *bug.SetTitleOperation) (models.IdentityWrapper, error) {
|
||||
return models.NewLoadedIdentity(obj.Author), nil
|
||||
return models.NewLoadedIdentity(obj.Author()), nil
|
||||
}
|
||||
|
||||
func (setTitleOperationResolver) Date(_ context.Context, obj *bug.SetTitleOperation) (*time.Time, error) {
|
||||
|
@ -14,19 +14,6 @@ type rootQueryResolver struct {
|
||||
cache *cache.MultiRepoCache
|
||||
}
|
||||
|
||||
func (r rootQueryResolver) DefaultRepository(_ context.Context) (*models.Repository, error) {
|
||||
repo, err := r.cache.DefaultRepo()
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &models.Repository{
|
||||
Cache: r.cache,
|
||||
Repo: repo,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (r rootQueryResolver) Repository(_ context.Context, ref *string) (*models.Repository, error) {
|
||||
var repo *cache.RepoCache
|
||||
var err error
|
||||
|
@ -294,7 +294,7 @@ func (ge *githubExporter) exportBug(ctx context.Context, b *cache.BugCache, out
|
||||
continue
|
||||
}
|
||||
|
||||
opAuthor := op.GetAuthor()
|
||||
opAuthor := op.Author()
|
||||
client, err := ge.getClientForIdentity(opAuthor.Id())
|
||||
if err != nil {
|
||||
continue
|
||||
|
@ -182,29 +182,24 @@ func TestGithubImporter(t *testing.T) {
|
||||
|
||||
for i, op := range tt.bug.Operations {
|
||||
require.IsType(t, ops[i], op)
|
||||
require.Equal(t, op.Author().Name(), ops[i].Author().Name())
|
||||
|
||||
switch op.(type) {
|
||||
switch op := op.(type) {
|
||||
case *bug.CreateOperation:
|
||||
require.Equal(t, op.(*bug.CreateOperation).Title, ops[i].(*bug.CreateOperation).Title)
|
||||
require.Equal(t, op.(*bug.CreateOperation).Message, ops[i].(*bug.CreateOperation).Message)
|
||||
require.Equal(t, op.(*bug.CreateOperation).Author.Name(), ops[i].(*bug.CreateOperation).Author.Name())
|
||||
require.Equal(t, op.Title, ops[i].(*bug.CreateOperation).Title)
|
||||
require.Equal(t, op.Message, ops[i].(*bug.CreateOperation).Message)
|
||||
case *bug.SetStatusOperation:
|
||||
require.Equal(t, op.(*bug.SetStatusOperation).Status, ops[i].(*bug.SetStatusOperation).Status)
|
||||
require.Equal(t, op.(*bug.SetStatusOperation).Author.Name(), ops[i].(*bug.SetStatusOperation).Author.Name())
|
||||
require.Equal(t, op.Status, ops[i].(*bug.SetStatusOperation).Status)
|
||||
case *bug.SetTitleOperation:
|
||||
require.Equal(t, op.(*bug.SetTitleOperation).Was, ops[i].(*bug.SetTitleOperation).Was)
|
||||
require.Equal(t, op.(*bug.SetTitleOperation).Title, ops[i].(*bug.SetTitleOperation).Title)
|
||||
require.Equal(t, op.(*bug.SetTitleOperation).Author.Name(), ops[i].(*bug.SetTitleOperation).Author.Name())
|
||||
require.Equal(t, op.Was, ops[i].(*bug.SetTitleOperation).Was)
|
||||
require.Equal(t, op.Title, ops[i].(*bug.SetTitleOperation).Title)
|
||||
case *bug.LabelChangeOperation:
|
||||
require.ElementsMatch(t, op.(*bug.LabelChangeOperation).Added, ops[i].(*bug.LabelChangeOperation).Added)
|
||||
require.ElementsMatch(t, op.(*bug.LabelChangeOperation).Removed, ops[i].(*bug.LabelChangeOperation).Removed)
|
||||
require.Equal(t, op.(*bug.LabelChangeOperation).Author.Name(), ops[i].(*bug.LabelChangeOperation).Author.Name())
|
||||
require.ElementsMatch(t, op.Added, ops[i].(*bug.LabelChangeOperation).Added)
|
||||
require.ElementsMatch(t, op.Removed, ops[i].(*bug.LabelChangeOperation).Removed)
|
||||
case *bug.AddCommentOperation:
|
||||
require.Equal(t, op.(*bug.AddCommentOperation).Message, ops[i].(*bug.AddCommentOperation).Message)
|
||||
require.Equal(t, op.(*bug.AddCommentOperation).Author.Name(), ops[i].(*bug.AddCommentOperation).Author.Name())
|
||||
require.Equal(t, op.Message, ops[i].(*bug.AddCommentOperation).Message)
|
||||
case *bug.EditCommentOperation:
|
||||
require.Equal(t, op.(*bug.EditCommentOperation).Message, ops[i].(*bug.EditCommentOperation).Message)
|
||||
require.Equal(t, op.(*bug.EditCommentOperation).Author.Name(), ops[i].(*bug.EditCommentOperation).Author.Name())
|
||||
require.Equal(t, op.Message, ops[i].(*bug.EditCommentOperation).Message)
|
||||
|
||||
default:
|
||||
panic("unknown operation type")
|
||||
|
@ -267,7 +267,7 @@ func (ge *gitlabExporter) exportBug(ctx context.Context, b *cache.BugCache, out
|
||||
continue
|
||||
}
|
||||
|
||||
opAuthor := op.GetAuthor()
|
||||
opAuthor := op.Author()
|
||||
client, err := ge.getIdentityClient(opAuthor.Id())
|
||||
if err != nil {
|
||||
continue
|
||||
|
@ -138,29 +138,24 @@ func TestGitlabImport(t *testing.T) {
|
||||
for i, op := range tt.bug.Operations {
|
||||
|
||||
require.IsType(t, ops[i], op)
|
||||
require.Equal(t, op.Author().Name(), ops[i].Author().Name())
|
||||
|
||||
switch op.(type) {
|
||||
switch op := op.(type) {
|
||||
case *bug.CreateOperation:
|
||||
require.Equal(t, op.(*bug.CreateOperation).Title, ops[i].(*bug.CreateOperation).Title)
|
||||
require.Equal(t, op.(*bug.CreateOperation).Message, ops[i].(*bug.CreateOperation).Message)
|
||||
require.Equal(t, op.(*bug.CreateOperation).Author.Name(), ops[i].(*bug.CreateOperation).Author.Name())
|
||||
require.Equal(t, op.Title, ops[i].(*bug.CreateOperation).Title)
|
||||
require.Equal(t, op.Message, ops[i].(*bug.CreateOperation).Message)
|
||||
case *bug.SetStatusOperation:
|
||||
require.Equal(t, op.(*bug.SetStatusOperation).Status, ops[i].(*bug.SetStatusOperation).Status)
|
||||
require.Equal(t, op.(*bug.SetStatusOperation).Author.Name(), ops[i].(*bug.SetStatusOperation).Author.Name())
|
||||
require.Equal(t, op.Status, ops[i].(*bug.SetStatusOperation).Status)
|
||||
case *bug.SetTitleOperation:
|
||||
require.Equal(t, op.(*bug.SetTitleOperation).Was, ops[i].(*bug.SetTitleOperation).Was)
|
||||
require.Equal(t, op.(*bug.SetTitleOperation).Title, ops[i].(*bug.SetTitleOperation).Title)
|
||||
require.Equal(t, op.(*bug.SetTitleOperation).Author.Name(), ops[i].(*bug.SetTitleOperation).Author.Name())
|
||||
require.Equal(t, op.Was, ops[i].(*bug.SetTitleOperation).Was)
|
||||
require.Equal(t, op.Title, ops[i].(*bug.SetTitleOperation).Title)
|
||||
case *bug.LabelChangeOperation:
|
||||
require.ElementsMatch(t, op.(*bug.LabelChangeOperation).Added, ops[i].(*bug.LabelChangeOperation).Added)
|
||||
require.ElementsMatch(t, op.(*bug.LabelChangeOperation).Removed, ops[i].(*bug.LabelChangeOperation).Removed)
|
||||
require.Equal(t, op.(*bug.LabelChangeOperation).Author.Name(), ops[i].(*bug.LabelChangeOperation).Author.Name())
|
||||
require.ElementsMatch(t, op.Added, ops[i].(*bug.LabelChangeOperation).Added)
|
||||
require.ElementsMatch(t, op.Removed, ops[i].(*bug.LabelChangeOperation).Removed)
|
||||
case *bug.AddCommentOperation:
|
||||
require.Equal(t, op.(*bug.AddCommentOperation).Message, ops[i].(*bug.AddCommentOperation).Message)
|
||||
require.Equal(t, op.(*bug.AddCommentOperation).Author.Name(), ops[i].(*bug.AddCommentOperation).Author.Name())
|
||||
require.Equal(t, op.Message, ops[i].(*bug.AddCommentOperation).Message)
|
||||
case *bug.EditCommentOperation:
|
||||
require.Equal(t, op.(*bug.EditCommentOperation).Message, ops[i].(*bug.EditCommentOperation).Message)
|
||||
require.Equal(t, op.(*bug.EditCommentOperation).Author.Name(), ops[i].(*bug.EditCommentOperation).Author.Name())
|
||||
require.Equal(t, op.Message, ops[i].(*bug.EditCommentOperation).Message)
|
||||
|
||||
default:
|
||||
panic("unknown operation type")
|
||||
|
@ -309,7 +309,7 @@ func (je *jiraExporter) exportBug(ctx context.Context, b *cache.BugCache, out ch
|
||||
continue
|
||||
}
|
||||
|
||||
opAuthor := op.GetAuthor()
|
||||
opAuthor := op.Author()
|
||||
client, err := je.getClientForIdentity(opAuthor.Id())
|
||||
if err != nil {
|
||||
out <- core.NewExportError(
|
||||
|
608
bug/bug.go
608
bug/bug.go
@ -2,222 +2,62 @@
|
||||
package bug
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
|
||||
"github.com/MichaelMure/git-bug/entity"
|
||||
"github.com/MichaelMure/git-bug/entity/dag"
|
||||
"github.com/MichaelMure/git-bug/identity"
|
||||
"github.com/MichaelMure/git-bug/repository"
|
||||
"github.com/MichaelMure/git-bug/util/lamport"
|
||||
)
|
||||
|
||||
const bugsRefPattern = "refs/bugs/"
|
||||
const bugsRemoteRefPattern = "refs/remotes/%s/bugs/"
|
||||
|
||||
const opsEntryName = "ops"
|
||||
const mediaEntryName = "media"
|
||||
|
||||
const createClockEntryPrefix = "create-clock-"
|
||||
const createClockEntryPattern = "create-clock-%d"
|
||||
const editClockEntryPrefix = "edit-clock-"
|
||||
const editClockEntryPattern = "edit-clock-%d"
|
||||
|
||||
const creationClockName = "bug-create"
|
||||
const editClockName = "bug-edit"
|
||||
|
||||
var ErrBugNotExist = errors.New("bug doesn't exist")
|
||||
|
||||
func NewErrMultipleMatchBug(matching []entity.Id) *entity.ErrMultipleMatch {
|
||||
return entity.NewErrMultipleMatch("bug", matching)
|
||||
}
|
||||
|
||||
func NewErrMultipleMatchOp(matching []entity.Id) *entity.ErrMultipleMatch {
|
||||
return entity.NewErrMultipleMatch("operation", matching)
|
||||
}
|
||||
|
||||
var _ Interface = &Bug{}
|
||||
var _ entity.Interface = &Bug{}
|
||||
|
||||
// 1: original format
|
||||
// 2: no more legacy identities
|
||||
// 3: Ids are generated from the create operation serialized data instead of from the first git commit
|
||||
// 4: with DAG entity framework
|
||||
const formatVersion = 4
|
||||
|
||||
var def = dag.Definition{
|
||||
Typename: "bug",
|
||||
Namespace: "bugs",
|
||||
OperationUnmarshaler: operationUnmarshaller,
|
||||
FormatVersion: formatVersion,
|
||||
}
|
||||
|
||||
var ClockLoader = dag.ClockLoader(def)
|
||||
|
||||
// Bug hold the data of a bug thread, organized in a way close to
|
||||
// how it will be persisted inside Git. This is the data structure
|
||||
// used to merge two different version of the same Bug.
|
||||
type Bug struct {
|
||||
// A Lamport clock is a logical clock that allow to order event
|
||||
// inside a distributed system.
|
||||
// It must be the first field in this struct due to https://github.com/golang/go/issues/599
|
||||
createTime lamport.Time
|
||||
editTime lamport.Time
|
||||
|
||||
lastCommit repository.Hash
|
||||
|
||||
// all the committed operations
|
||||
packs []OperationPack
|
||||
|
||||
// a temporary pack of operations used for convenience to pile up new operations
|
||||
// before a commit
|
||||
staging OperationPack
|
||||
*dag.Entity
|
||||
}
|
||||
|
||||
// NewBug create a new Bug
|
||||
func NewBug() *Bug {
|
||||
// No logical clock yet
|
||||
return &Bug{}
|
||||
return &Bug{
|
||||
Entity: dag.New(def),
|
||||
}
|
||||
}
|
||||
|
||||
// ReadLocal will read a local bug from its hash
|
||||
func ReadLocal(repo repository.ClockedRepo, id entity.Id) (*Bug, error) {
|
||||
ref := bugsRefPattern + id.String()
|
||||
return read(repo, identity.NewSimpleResolver(repo), ref)
|
||||
}
|
||||
|
||||
// ReadLocalWithResolver will read a local bug from its hash
|
||||
func ReadLocalWithResolver(repo repository.ClockedRepo, identityResolver identity.Resolver, id entity.Id) (*Bug, error) {
|
||||
ref := bugsRefPattern + id.String()
|
||||
return read(repo, identityResolver, ref)
|
||||
}
|
||||
|
||||
// ReadRemote will read a remote bug from its hash
|
||||
func ReadRemote(repo repository.ClockedRepo, remote string, id entity.Id) (*Bug, error) {
|
||||
ref := fmt.Sprintf(bugsRemoteRefPattern, remote) + id.String()
|
||||
return read(repo, identity.NewSimpleResolver(repo), ref)
|
||||
}
|
||||
|
||||
// ReadRemoteWithResolver will read a remote bug from its hash
|
||||
func ReadRemoteWithResolver(repo repository.ClockedRepo, identityResolver identity.Resolver, remote string, id entity.Id) (*Bug, error) {
|
||||
ref := fmt.Sprintf(bugsRemoteRefPattern, remote) + id.String()
|
||||
return read(repo, identityResolver, ref)
|
||||
}
|
||||
|
||||
// read will read and parse a Bug from git
|
||||
func read(repo repository.ClockedRepo, identityResolver identity.Resolver, ref string) (*Bug, error) {
|
||||
id := entity.RefToId(ref)
|
||||
|
||||
if err := id.Validate(); err != nil {
|
||||
return nil, errors.Wrap(err, "invalid ref ")
|
||||
}
|
||||
|
||||
hashes, err := repo.ListCommits(ref)
|
||||
if err != nil {
|
||||
return nil, ErrBugNotExist
|
||||
}
|
||||
if len(hashes) == 0 {
|
||||
return nil, fmt.Errorf("empty bug")
|
||||
}
|
||||
|
||||
bug := Bug{}
|
||||
|
||||
// Load each OperationPack
|
||||
for _, hash := range hashes {
|
||||
tree, err := readTree(repo, hash)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Due to rebase, edit Lamport time are not necessarily ordered
|
||||
if tree.editTime > bug.editTime {
|
||||
bug.editTime = tree.editTime
|
||||
}
|
||||
|
||||
// Update the clocks
|
||||
err = repo.Witness(creationClockName, bug.createTime)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to update create lamport clock")
|
||||
}
|
||||
err = repo.Witness(editClockName, bug.editTime)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to update edit lamport clock")
|
||||
}
|
||||
|
||||
data, err := repo.ReadData(tree.opsEntry.Hash)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to read git blob data")
|
||||
}
|
||||
|
||||
opp := &OperationPack{}
|
||||
err = json.Unmarshal(data, &opp)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to decode OperationPack json")
|
||||
}
|
||||
|
||||
// tag the pack with the commit hash
|
||||
opp.commitHash = hash
|
||||
bug.lastCommit = hash
|
||||
|
||||
// if it's the first OperationPack read
|
||||
if len(bug.packs) == 0 {
|
||||
bug.createTime = tree.createTime
|
||||
}
|
||||
|
||||
bug.packs = append(bug.packs, *opp)
|
||||
}
|
||||
|
||||
// Bug Id is the Id of the first operation
|
||||
if len(bug.packs[0].Operations) == 0 {
|
||||
return nil, fmt.Errorf("first OperationPack is empty")
|
||||
}
|
||||
if id != bug.packs[0].Operations[0].Id() {
|
||||
return nil, fmt.Errorf("bug ID doesn't match the first operation ID")
|
||||
}
|
||||
|
||||
// Make sure that the identities are properly loaded
|
||||
err = bug.EnsureIdentities(identityResolver)
|
||||
// Read will read a bug from a repository
|
||||
func Read(repo repository.ClockedRepo, id entity.Id) (*Bug, error) {
|
||||
e, err := dag.Read(def, repo, identity.NewSimpleResolver(repo), id)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &bug, nil
|
||||
return &Bug{Entity: e}, nil
|
||||
}
|
||||
|
||||
// RemoveBug will remove a local bug from its entity.Id
|
||||
func RemoveBug(repo repository.ClockedRepo, id entity.Id) error {
|
||||
var fullMatches []string
|
||||
|
||||
refs, err := repo.ListRefs(bugsRefPattern + id.String())
|
||||
// ReadWithResolver will read a bug from its Id, with a custom identity.Resolver
|
||||
func ReadWithResolver(repo repository.ClockedRepo, identityResolver identity.Resolver, id entity.Id) (*Bug, error) {
|
||||
e, err := dag.Read(def, repo, identityResolver, id)
|
||||
if err != nil {
|
||||
return err
|
||||
return nil, err
|
||||
}
|
||||
if len(refs) > 1 {
|
||||
return NewErrMultipleMatchBug(entity.RefsToIds(refs))
|
||||
}
|
||||
if len(refs) == 1 {
|
||||
// we have the bug locally
|
||||
fullMatches = append(fullMatches, refs[0])
|
||||
}
|
||||
|
||||
remotes, err := repo.GetRemotes()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for remote := range remotes {
|
||||
remotePrefix := fmt.Sprintf(bugsRemoteRefPattern+id.String(), remote)
|
||||
remoteRefs, err := repo.ListRefs(remotePrefix)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if len(remoteRefs) > 1 {
|
||||
return NewErrMultipleMatchBug(entity.RefsToIds(refs))
|
||||
}
|
||||
if len(remoteRefs) == 1 {
|
||||
// found the bug in a remote
|
||||
fullMatches = append(fullMatches, remoteRefs[0])
|
||||
}
|
||||
}
|
||||
|
||||
if len(fullMatches) == 0 {
|
||||
return ErrBugNotExist
|
||||
}
|
||||
|
||||
for _, ref := range fullMatches {
|
||||
err = repo.RemoveRef(ref)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
return &Bug{Entity: e}, nil
|
||||
}
|
||||
|
||||
type StreamedBug struct {
|
||||
@ -225,50 +65,33 @@ type StreamedBug struct {
|
||||
Err error
|
||||
}
|
||||
|
||||
// ReadAllLocal read and parse all local bugs
|
||||
func ReadAllLocal(repo repository.ClockedRepo) <-chan StreamedBug {
|
||||
return readAll(repo, identity.NewSimpleResolver(repo), bugsRefPattern)
|
||||
// ReadAll read and parse all local bugs
|
||||
func ReadAll(repo repository.ClockedRepo) <-chan StreamedBug {
|
||||
return readAll(repo, identity.NewSimpleResolver(repo))
|
||||
}
|
||||
|
||||
// ReadAllLocalWithResolver read and parse all local bugs
|
||||
func ReadAllLocalWithResolver(repo repository.ClockedRepo, identityResolver identity.Resolver) <-chan StreamedBug {
|
||||
return readAll(repo, identityResolver, bugsRefPattern)
|
||||
}
|
||||
|
||||
// ReadAllRemote read and parse all remote bugs for a given remote
|
||||
func ReadAllRemote(repo repository.ClockedRepo, remote string) <-chan StreamedBug {
|
||||
refPrefix := fmt.Sprintf(bugsRemoteRefPattern, remote)
|
||||
return readAll(repo, identity.NewSimpleResolver(repo), refPrefix)
|
||||
}
|
||||
|
||||
// ReadAllRemoteWithResolver read and parse all remote bugs for a given remote
|
||||
func ReadAllRemoteWithResolver(repo repository.ClockedRepo, identityResolver identity.Resolver, remote string) <-chan StreamedBug {
|
||||
refPrefix := fmt.Sprintf(bugsRemoteRefPattern, remote)
|
||||
return readAll(repo, identityResolver, refPrefix)
|
||||
// ReadAllWithResolver read and parse all local bugs
|
||||
func ReadAllWithResolver(repo repository.ClockedRepo, identityResolver identity.Resolver) <-chan StreamedBug {
|
||||
return readAll(repo, identityResolver)
|
||||
}
|
||||
|
||||
// Read and parse all available bug with a given ref prefix
|
||||
func readAll(repo repository.ClockedRepo, identityResolver identity.Resolver, refPrefix string) <-chan StreamedBug {
|
||||
func readAll(repo repository.ClockedRepo, identityResolver identity.Resolver) <-chan StreamedBug {
|
||||
out := make(chan StreamedBug)
|
||||
|
||||
go func() {
|
||||
defer close(out)
|
||||
|
||||
refs, err := repo.ListRefs(refPrefix)
|
||||
if err != nil {
|
||||
out <- StreamedBug{Err: err}
|
||||
return
|
||||
}
|
||||
|
||||
for _, ref := range refs {
|
||||
b, err := read(repo, identityResolver, ref)
|
||||
|
||||
if err != nil {
|
||||
out <- StreamedBug{Err: err}
|
||||
return
|
||||
for streamedEntity := range dag.ReadAll(def, repo, identityResolver) {
|
||||
if streamedEntity.Err != nil {
|
||||
out <- StreamedBug{
|
||||
Err: streamedEntity.Err,
|
||||
}
|
||||
} else {
|
||||
out <- StreamedBug{
|
||||
Bug: &Bug{Entity: streamedEntity.Entity},
|
||||
}
|
||||
}
|
||||
|
||||
out <- StreamedBug{Bug: b}
|
||||
}
|
||||
}()
|
||||
|
||||
@ -277,329 +100,47 @@ func readAll(repo repository.ClockedRepo, identityResolver identity.Resolver, re
|
||||
|
||||
// ListLocalIds list all the available local bug ids
|
||||
func ListLocalIds(repo repository.Repo) ([]entity.Id, error) {
|
||||
refs, err := repo.ListRefs(bugsRefPattern)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return entity.RefsToIds(refs), nil
|
||||
return dag.ListLocalIds(def, repo)
|
||||
}
|
||||
|
||||
// Validate check if the Bug data is valid
|
||||
func (bug *Bug) Validate() error {
|
||||
// non-empty
|
||||
if len(bug.packs) == 0 && bug.staging.IsEmpty() {
|
||||
return fmt.Errorf("bug has no operations")
|
||||
}
|
||||
|
||||
// check if each pack and operations are valid
|
||||
for _, pack := range bug.packs {
|
||||
if err := pack.Validate(); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// check if staging is valid if needed
|
||||
if !bug.staging.IsEmpty() {
|
||||
if err := bug.staging.Validate(); err != nil {
|
||||
return errors.Wrap(err, "staging")
|
||||
}
|
||||
if err := bug.Entity.Validate(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// The very first Op should be a CreateOp
|
||||
firstOp := bug.FirstOp()
|
||||
if firstOp == nil || firstOp.base().OperationType != CreateOp {
|
||||
if firstOp == nil || firstOp.Type() != CreateOp {
|
||||
return fmt.Errorf("first operation should be a Create op")
|
||||
}
|
||||
|
||||
// Check that there is no more CreateOp op
|
||||
// Check that there is no colliding operation's ID
|
||||
it := NewOperationIterator(bug)
|
||||
createCount := 0
|
||||
ids := make(map[entity.Id]struct{})
|
||||
for it.Next() {
|
||||
if it.Value().base().OperationType == CreateOp {
|
||||
createCount++
|
||||
for i, op := range bug.Operations() {
|
||||
if i == 0 {
|
||||
continue
|
||||
}
|
||||
if _, ok := ids[it.Value().Id()]; ok {
|
||||
return fmt.Errorf("id collision: %s", it.Value().Id())
|
||||
if op.Type() == CreateOp {
|
||||
return fmt.Errorf("only one Create op allowed")
|
||||
}
|
||||
ids[it.Value().Id()] = struct{}{}
|
||||
}
|
||||
|
||||
if createCount != 1 {
|
||||
return fmt.Errorf("only one Create op allowed")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Append an operation into the staging area, to be committed later
|
||||
// Append add a new Operation to the Bug
|
||||
func (bug *Bug) Append(op Operation) {
|
||||
if len(bug.packs) == 0 && len(bug.staging.Operations) == 0 {
|
||||
if op.base().OperationType != CreateOp {
|
||||
panic("first operation should be a Create")
|
||||
}
|
||||
}
|
||||
bug.staging.Append(op)
|
||||
bug.Entity.Append(op)
|
||||
}
|
||||
|
||||
// Commit write the staging area in Git and move the operations to the packs
|
||||
func (bug *Bug) Commit(repo repository.ClockedRepo) error {
|
||||
if !bug.NeedCommit() {
|
||||
return fmt.Errorf("can't commit a bug with no pending operation")
|
||||
// Operations return the ordered operations
|
||||
func (bug *Bug) Operations() []Operation {
|
||||
source := bug.Entity.Operations()
|
||||
result := make([]Operation, len(source))
|
||||
for i, op := range source {
|
||||
result[i] = op.(Operation)
|
||||
}
|
||||
|
||||
if err := bug.Validate(); err != nil {
|
||||
return errors.Wrap(err, "can't commit a bug with invalid data")
|
||||
}
|
||||
|
||||
// update clocks
|
||||
var err error
|
||||
bug.editTime, err = repo.Increment(editClockName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if bug.lastCommit == "" {
|
||||
bug.createTime, err = repo.Increment(creationClockName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// Write the Ops as a Git blob containing the serialized array
|
||||
hash, err := bug.staging.Write(repo)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Make a Git tree referencing this blob
|
||||
tree := []repository.TreeEntry{
|
||||
// the last pack of ops
|
||||
{ObjectType: repository.Blob, Hash: hash, Name: opsEntryName},
|
||||
}
|
||||
|
||||
// Store the logical clocks as well
|
||||
// --> edit clock for each OperationPack/commits
|
||||
// --> create clock only for the first OperationPack/commits
|
||||
//
|
||||
// To avoid having one blob for each clock value, clocks are serialized
|
||||
// directly into the entry name
|
||||
emptyBlobHash, err := repo.StoreData([]byte{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
tree = append(tree, repository.TreeEntry{
|
||||
ObjectType: repository.Blob,
|
||||
Hash: emptyBlobHash,
|
||||
Name: fmt.Sprintf(editClockEntryPattern, bug.editTime),
|
||||
})
|
||||
if bug.lastCommit == "" {
|
||||
tree = append(tree, repository.TreeEntry{
|
||||
ObjectType: repository.Blob,
|
||||
Hash: emptyBlobHash,
|
||||
Name: fmt.Sprintf(createClockEntryPattern, bug.createTime),
|
||||
})
|
||||
}
|
||||
|
||||
// Reference, if any, all the files required by the ops
|
||||
// Git will check that they actually exist in the storage and will make sure
|
||||
// to push/pull them as needed.
|
||||
mediaTree := makeMediaTree(bug.staging)
|
||||
if len(mediaTree) > 0 {
|
||||
mediaTreeHash, err := repo.StoreTree(mediaTree)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
tree = append(tree, repository.TreeEntry{
|
||||
ObjectType: repository.Tree,
|
||||
Hash: mediaTreeHash,
|
||||
Name: mediaEntryName,
|
||||
})
|
||||
}
|
||||
|
||||
// Store the tree
|
||||
hash, err = repo.StoreTree(tree)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Write a Git commit referencing the tree, with the previous commit as parent
|
||||
if bug.lastCommit != "" {
|
||||
hash, err = repo.StoreCommit(hash, bug.lastCommit)
|
||||
} else {
|
||||
hash, err = repo.StoreCommit(hash)
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
bug.lastCommit = hash
|
||||
bug.staging.commitHash = hash
|
||||
bug.packs = append(bug.packs, bug.staging)
|
||||
bug.staging = OperationPack{}
|
||||
|
||||
// Create or update the Git reference for this bug
|
||||
// When pushing later, the remote will ensure that this ref update
|
||||
// is fast-forward, that is no data has been overwritten
|
||||
ref := fmt.Sprintf("%s%s", bugsRefPattern, bug.Id().String())
|
||||
return repo.UpdateRef(ref, hash)
|
||||
}
|
||||
|
||||
func (bug *Bug) CommitAsNeeded(repo repository.ClockedRepo) error {
|
||||
if !bug.NeedCommit() {
|
||||
return nil
|
||||
}
|
||||
return bug.Commit(repo)
|
||||
}
|
||||
|
||||
func (bug *Bug) NeedCommit() bool {
|
||||
return !bug.staging.IsEmpty()
|
||||
}
|
||||
|
||||
// Merge a different version of the same bug by rebasing operations of this bug
|
||||
// that are not present in the other on top of the chain of operations of the
|
||||
// other version.
|
||||
func (bug *Bug) Merge(repo repository.Repo, other Interface) (bool, error) {
|
||||
var otherBug = bugFromInterface(other)
|
||||
|
||||
// Note: a faster merge should be possible without actually reading and parsing
|
||||
// all operations pack of our side.
|
||||
// Reading the other side is still necessary to validate remote data, at least
|
||||
// for new operations
|
||||
|
||||
if bug.Id() != otherBug.Id() {
|
||||
return false, errors.New("merging unrelated bugs is not supported")
|
||||
}
|
||||
|
||||
if len(otherBug.staging.Operations) > 0 {
|
||||
return false, errors.New("merging a bug with a non-empty staging is not supported")
|
||||
}
|
||||
|
||||
if bug.lastCommit == "" || otherBug.lastCommit == "" {
|
||||
return false, errors.New("can't merge a bug that has never been stored")
|
||||
}
|
||||
|
||||
ancestor, err := repo.FindCommonAncestor(bug.lastCommit, otherBug.lastCommit)
|
||||
if err != nil {
|
||||
return false, errors.Wrap(err, "can't find common ancestor")
|
||||
}
|
||||
|
||||
ancestorIndex := 0
|
||||
newPacks := make([]OperationPack, 0, len(bug.packs))
|
||||
|
||||
// Find the root of the rebase
|
||||
for i, pack := range bug.packs {
|
||||
newPacks = append(newPacks, pack)
|
||||
|
||||
if pack.commitHash == ancestor {
|
||||
ancestorIndex = i
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if len(otherBug.packs) == ancestorIndex+1 {
|
||||
// Nothing to rebase, return early
|
||||
return false, nil
|
||||
}
|
||||
|
||||
// get other bug's extra packs
|
||||
for i := ancestorIndex + 1; i < len(otherBug.packs); i++ {
|
||||
// clone is probably not necessary
|
||||
newPack := otherBug.packs[i].Clone()
|
||||
|
||||
newPacks = append(newPacks, newPack)
|
||||
bug.lastCommit = newPack.commitHash
|
||||
}
|
||||
|
||||
// rebase our extra packs
|
||||
for i := ancestorIndex + 1; i < len(bug.packs); i++ {
|
||||
pack := bug.packs[i]
|
||||
|
||||
// get the referenced git tree
|
||||
treeHash, err := repo.GetTreeHash(pack.commitHash)
|
||||
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
// create a new commit with the correct ancestor
|
||||
hash, err := repo.StoreCommit(treeHash, bug.lastCommit)
|
||||
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
// replace the pack
|
||||
newPack := pack.Clone()
|
||||
newPack.commitHash = hash
|
||||
newPacks = append(newPacks, newPack)
|
||||
|
||||
// update the bug
|
||||
bug.lastCommit = hash
|
||||
}
|
||||
|
||||
bug.packs = newPacks
|
||||
|
||||
// Update the git ref
|
||||
err = repo.UpdateRef(bugsRefPattern+bug.Id().String(), bug.lastCommit)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
return true, nil
|
||||
}
|
||||
|
||||
// Id return the Bug identifier
|
||||
func (bug *Bug) Id() entity.Id {
|
||||
// id is the id of the first operation
|
||||
return bug.FirstOp().Id()
|
||||
}
|
||||
|
||||
// CreateLamportTime return the Lamport time of creation
|
||||
func (bug *Bug) CreateLamportTime() lamport.Time {
|
||||
return bug.createTime
|
||||
}
|
||||
|
||||
// EditLamportTime return the Lamport time of the last edit
|
||||
func (bug *Bug) EditLamportTime() lamport.Time {
|
||||
return bug.editTime
|
||||
}
|
||||
|
||||
// Lookup for the very first operation of the bug.
|
||||
// For a valid Bug, this operation should be a CreateOp
|
||||
func (bug *Bug) FirstOp() Operation {
|
||||
for _, pack := range bug.packs {
|
||||
for _, op := range pack.Operations {
|
||||
return op
|
||||
}
|
||||
}
|
||||
|
||||
if !bug.staging.IsEmpty() {
|
||||
return bug.staging.Operations[0]
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Lookup for the very last operation of the bug.
|
||||
// For a valid Bug, should never be nil
|
||||
func (bug *Bug) LastOp() Operation {
|
||||
if !bug.staging.IsEmpty() {
|
||||
return bug.staging.Operations[len(bug.staging.Operations)-1]
|
||||
}
|
||||
|
||||
if len(bug.packs) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
lastPack := bug.packs[len(bug.packs)-1]
|
||||
|
||||
if len(lastPack.Operations) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
return lastPack.Operations[len(lastPack.Operations)-1]
|
||||
return result
|
||||
}
|
||||
|
||||
// Compile a bug in a easily usable snapshot
|
||||
@ -609,13 +150,28 @@ func (bug *Bug) Compile() Snapshot {
|
||||
Status: OpenStatus,
|
||||
}
|
||||
|
||||
it := NewOperationIterator(bug)
|
||||
|
||||
for it.Next() {
|
||||
op := it.Value()
|
||||
for _, op := range bug.Operations() {
|
||||
op.Apply(&snap)
|
||||
snap.Operations = append(snap.Operations, op)
|
||||
}
|
||||
|
||||
return snap
|
||||
}
|
||||
|
||||
// Lookup for the very first operation of the bug.
|
||||
// For a valid Bug, this operation should be a CreateOp
|
||||
func (bug *Bug) FirstOp() Operation {
|
||||
if fo := bug.Entity.FirstOp(); fo != nil {
|
||||
return fo.(Operation)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Lookup for the very last operation of the bug.
|
||||
// For a valid Bug, should never be nil
|
||||
func (bug *Bug) LastOp() Operation {
|
||||
if lo := bug.Entity.LastOp(); lo != nil {
|
||||
return lo.(Operation)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
@ -1,12 +1,10 @@
|
||||
package bug
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
|
||||
"github.com/MichaelMure/git-bug/entity"
|
||||
"github.com/MichaelMure/git-bug/entity/dag"
|
||||
"github.com/MichaelMure/git-bug/identity"
|
||||
"github.com/MichaelMure/git-bug/repository"
|
||||
)
|
||||
@ -14,23 +12,23 @@ import (
|
||||
// Fetch retrieve updates from a remote
|
||||
// This does not change the local bugs state
|
||||
func Fetch(repo repository.Repo, remote string) (string, error) {
|
||||
return repo.FetchRefs(remote, "bugs")
|
||||
return dag.Fetch(def, repo, remote)
|
||||
}
|
||||
|
||||
// Push update a remote with the local changes
|
||||
func Push(repo repository.Repo, remote string) (string, error) {
|
||||
return repo.PushRefs(remote, "bugs")
|
||||
return dag.Push(def, repo, remote)
|
||||
}
|
||||
|
||||
// Pull will do a Fetch + MergeAll
|
||||
// This function will return an error if a merge fail
|
||||
func Pull(repo repository.ClockedRepo, remote string) error {
|
||||
func Pull(repo repository.ClockedRepo, remote string, author identity.Interface) error {
|
||||
_, err := Fetch(repo, remote)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for merge := range MergeAll(repo, remote) {
|
||||
for merge := range MergeAll(repo, remote, author) {
|
||||
if merge.Err != nil {
|
||||
return merge.Err
|
||||
}
|
||||
@ -42,95 +40,19 @@ func Pull(repo repository.ClockedRepo, remote string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// MergeAll will merge all the available remote bug:
|
||||
//
|
||||
// - If the remote has new commit, the local bug is updated to match the same history
|
||||
// (fast-forward update)
|
||||
// - if the local bug has new commits but the remote don't, nothing is changed
|
||||
// - if both local and remote bug have new commits (that is, we have a concurrent edition),
|
||||
// new local commits are rewritten at the head of the remote history (that is, a rebase)
|
||||
func MergeAll(repo repository.ClockedRepo, remote string) <-chan entity.MergeResult {
|
||||
out := make(chan entity.MergeResult)
|
||||
|
||||
// MergeAll will merge all the available remote bug
|
||||
// Note: an author is necessary for the case where a merge commit is created, as this commit will
|
||||
// have an author and may be signed if a signing key is available.
|
||||
func MergeAll(repo repository.ClockedRepo, remote string, author identity.Interface) <-chan entity.MergeResult {
|
||||
// no caching for the merge, we load everything from git even if that means multiple
|
||||
// copy of the same entity in memory. The cache layer will intercept the results to
|
||||
// invalidate entities if necessary.
|
||||
identityResolver := identity.NewSimpleResolver(repo)
|
||||
|
||||
go func() {
|
||||
defer close(out)
|
||||
|
||||
remoteRefSpec := fmt.Sprintf(bugsRemoteRefPattern, remote)
|
||||
remoteRefs, err := repo.ListRefs(remoteRefSpec)
|
||||
if err != nil {
|
||||
out <- entity.MergeResult{Err: err}
|
||||
return
|
||||
}
|
||||
|
||||
for _, remoteRef := range remoteRefs {
|
||||
refSplit := strings.Split(remoteRef, "/")
|
||||
id := entity.Id(refSplit[len(refSplit)-1])
|
||||
|
||||
if err := id.Validate(); err != nil {
|
||||
out <- entity.NewMergeInvalidStatus(id, errors.Wrap(err, "invalid ref").Error())
|
||||
continue
|
||||
}
|
||||
|
||||
remoteBug, err := read(repo, identityResolver, remoteRef)
|
||||
|
||||
if err != nil {
|
||||
out <- entity.NewMergeInvalidStatus(id, errors.Wrap(err, "remote bug is not readable").Error())
|
||||
continue
|
||||
}
|
||||
|
||||
// Check for error in remote data
|
||||
if err := remoteBug.Validate(); err != nil {
|
||||
out <- entity.NewMergeInvalidStatus(id, errors.Wrap(err, "remote bug is invalid").Error())
|
||||
continue
|
||||
}
|
||||
|
||||
localRef := bugsRefPattern + remoteBug.Id().String()
|
||||
localExist, err := repo.RefExist(localRef)
|
||||
|
||||
if err != nil {
|
||||
out <- entity.NewMergeError(err, id)
|
||||
continue
|
||||
}
|
||||
|
||||
// the bug is not local yet, simply create the reference
|
||||
if !localExist {
|
||||
err := repo.CopyRef(remoteRef, localRef)
|
||||
|
||||
if err != nil {
|
||||
out <- entity.NewMergeError(err, id)
|
||||
return
|
||||
}
|
||||
|
||||
out <- entity.NewMergeNewStatus(id, remoteBug)
|
||||
continue
|
||||
}
|
||||
|
||||
localBug, err := read(repo, identityResolver, localRef)
|
||||
|
||||
if err != nil {
|
||||
out <- entity.NewMergeError(errors.Wrap(err, "local bug is not readable"), id)
|
||||
return
|
||||
}
|
||||
|
||||
updated, err := localBug.Merge(repo, remoteBug)
|
||||
|
||||
if err != nil {
|
||||
out <- entity.NewMergeInvalidStatus(id, errors.Wrap(err, "merge failed").Error())
|
||||
return
|
||||
}
|
||||
|
||||
if updated {
|
||||
out <- entity.NewMergeUpdatedStatus(id, localBug)
|
||||
} else {
|
||||
out <- entity.NewMergeNothingStatus(id)
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
return out
|
||||
return dag.MergeAll(def, repo, identityResolver, remote, author)
|
||||
}
|
||||
|
||||
// RemoveBug will remove a local bug from its entity.Id
|
||||
func RemoveBug(repo repository.ClockedRepo, id entity.Id) error {
|
||||
return dag.Remove(def, repo, id)
|
||||
}
|
||||
|
@ -1,394 +0,0 @@
|
||||
package bug
|
||||
|
||||
import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/MichaelMure/git-bug/identity"
|
||||
"github.com/MichaelMure/git-bug/repository"
|
||||
)
|
||||
|
||||
func TestPushPull(t *testing.T) {
|
||||
repoA, repoB, remote := repository.SetupGoGitReposAndRemote()
|
||||
defer repository.CleanupTestRepos(repoA, repoB, remote)
|
||||
|
||||
reneA, err := identity.NewIdentity(repoA, "René Descartes", "rene@descartes.fr")
|
||||
require.NoError(t, err)
|
||||
err = reneA.Commit(repoA)
|
||||
require.NoError(t, err)
|
||||
|
||||
bug1, _, err := Create(reneA, time.Now().Unix(), "bug1", "message")
|
||||
require.NoError(t, err)
|
||||
assert.True(t, bug1.NeedCommit())
|
||||
err = bug1.Commit(repoA)
|
||||
require.NoError(t, err)
|
||||
assert.False(t, bug1.NeedCommit())
|
||||
|
||||
// distribute the identity
|
||||
_, err = identity.Push(repoA, "origin")
|
||||
require.NoError(t, err)
|
||||
err = identity.Pull(repoB, "origin")
|
||||
require.NoError(t, err)
|
||||
|
||||
// A --> remote --> B
|
||||
_, err = Push(repoA, "origin")
|
||||
require.NoError(t, err)
|
||||
|
||||
err = Pull(repoB, "origin")
|
||||
require.NoError(t, err)
|
||||
|
||||
bugs := allBugs(t, ReadAllLocal(repoB))
|
||||
|
||||
if len(bugs) != 1 {
|
||||
t.Fatal("Unexpected number of bugs")
|
||||
}
|
||||
|
||||
// B --> remote --> A
|
||||
reneB, err := identity.ReadLocal(repoA, reneA.Id())
|
||||
require.NoError(t, err)
|
||||
|
||||
bug2, _, err := Create(reneB, time.Now().Unix(), "bug2", "message")
|
||||
require.NoError(t, err)
|
||||
err = bug2.Commit(repoB)
|
||||
require.NoError(t, err)
|
||||
|
||||
_, err = Push(repoB, "origin")
|
||||
require.NoError(t, err)
|
||||
|
||||
err = Pull(repoA, "origin")
|
||||
require.NoError(t, err)
|
||||
|
||||
bugs = allBugs(t, ReadAllLocal(repoA))
|
||||
|
||||
if len(bugs) != 2 {
|
||||
t.Fatal("Unexpected number of bugs")
|
||||
}
|
||||
}
|
||||
|
||||
func allBugs(t testing.TB, bugs <-chan StreamedBug) []*Bug {
|
||||
var result []*Bug
|
||||
for streamed := range bugs {
|
||||
if streamed.Err != nil {
|
||||
t.Fatal(streamed.Err)
|
||||
}
|
||||
result = append(result, streamed.Bug)
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
func TestRebaseTheirs(t *testing.T) {
|
||||
_RebaseTheirs(t)
|
||||
}
|
||||
|
||||
func BenchmarkRebaseTheirs(b *testing.B) {
|
||||
for n := 0; n < b.N; n++ {
|
||||
_RebaseTheirs(b)
|
||||
}
|
||||
}
|
||||
|
||||
func _RebaseTheirs(t testing.TB) {
|
||||
repoA, repoB, remote := repository.SetupGoGitReposAndRemote()
|
||||
defer repository.CleanupTestRepos(repoA, repoB, remote)
|
||||
|
||||
reneA, err := identity.NewIdentity(repoA, "René Descartes", "rene@descartes.fr")
|
||||
require.NoError(t, err)
|
||||
err = reneA.Commit(repoA)
|
||||
require.NoError(t, err)
|
||||
|
||||
bug1, _, err := Create(reneA, time.Now().Unix(), "bug1", "message")
|
||||
require.NoError(t, err)
|
||||
assert.True(t, bug1.NeedCommit())
|
||||
err = bug1.Commit(repoA)
|
||||
require.NoError(t, err)
|
||||
assert.False(t, bug1.NeedCommit())
|
||||
|
||||
// distribute the identity
|
||||
_, err = identity.Push(repoA, "origin")
|
||||
require.NoError(t, err)
|
||||
err = identity.Pull(repoB, "origin")
|
||||
require.NoError(t, err)
|
||||
|
||||
// A --> remote
|
||||
|
||||
_, err = Push(repoA, "origin")
|
||||
require.NoError(t, err)
|
||||
|
||||
// remote --> B
|
||||
err = Pull(repoB, "origin")
|
||||
require.NoError(t, err)
|
||||
|
||||
bug2, err := ReadLocal(repoB, bug1.Id())
|
||||
require.NoError(t, err)
|
||||
assert.False(t, bug2.NeedCommit())
|
||||
|
||||
reneB, err := identity.ReadLocal(repoA, reneA.Id())
|
||||
require.NoError(t, err)
|
||||
|
||||
_, err = AddComment(bug2, reneB, time.Now().Unix(), "message2")
|
||||
require.NoError(t, err)
|
||||
assert.True(t, bug2.NeedCommit())
|
||||
_, err = AddComment(bug2, reneB, time.Now().Unix(), "message3")
|
||||
require.NoError(t, err)
|
||||
_, err = AddComment(bug2, reneB, time.Now().Unix(), "message4")
|
||||
require.NoError(t, err)
|
||||
err = bug2.Commit(repoB)
|
||||
require.NoError(t, err)
|
||||
assert.False(t, bug2.NeedCommit())
|
||||
|
||||
// B --> remote
|
||||
_, err = Push(repoB, "origin")
|
||||
require.NoError(t, err)
|
||||
|
||||
// remote --> A
|
||||
err = Pull(repoA, "origin")
|
||||
require.NoError(t, err)
|
||||
|
||||
bugs := allBugs(t, ReadAllLocal(repoB))
|
||||
|
||||
if len(bugs) != 1 {
|
||||
t.Fatal("Unexpected number of bugs")
|
||||
}
|
||||
|
||||
bug3, err := ReadLocal(repoA, bug1.Id())
|
||||
require.NoError(t, err)
|
||||
|
||||
if nbOps(bug3) != 4 {
|
||||
t.Fatal("Unexpected number of operations")
|
||||
}
|
||||
}
|
||||
|
||||
func TestRebaseOurs(t *testing.T) {
|
||||
_RebaseOurs(t)
|
||||
}
|
||||
|
||||
func BenchmarkRebaseOurs(b *testing.B) {
|
||||
for n := 0; n < b.N; n++ {
|
||||
_RebaseOurs(b)
|
||||
}
|
||||
}
|
||||
|
||||
func _RebaseOurs(t testing.TB) {
|
||||
repoA, repoB, remote := repository.SetupGoGitReposAndRemote()
|
||||
defer repository.CleanupTestRepos(repoA, repoB, remote)
|
||||
|
||||
reneA, err := identity.NewIdentity(repoA, "René Descartes", "rene@descartes.fr")
|
||||
require.NoError(t, err)
|
||||
err = reneA.Commit(repoA)
|
||||
require.NoError(t, err)
|
||||
|
||||
bug1, _, err := Create(reneA, time.Now().Unix(), "bug1", "message")
|
||||
require.NoError(t, err)
|
||||
err = bug1.Commit(repoA)
|
||||
require.NoError(t, err)
|
||||
|
||||
// distribute the identity
|
||||
_, err = identity.Push(repoA, "origin")
|
||||
require.NoError(t, err)
|
||||
err = identity.Pull(repoB, "origin")
|
||||
require.NoError(t, err)
|
||||
|
||||
// A --> remote
|
||||
_, err = Push(repoA, "origin")
|
||||
require.NoError(t, err)
|
||||
|
||||
// remote --> B
|
||||
err = Pull(repoB, "origin")
|
||||
require.NoError(t, err)
|
||||
|
||||
_, err = AddComment(bug1, reneA, time.Now().Unix(), "message2")
|
||||
require.NoError(t, err)
|
||||
_, err = AddComment(bug1, reneA, time.Now().Unix(), "message3")
|
||||
require.NoError(t, err)
|
||||
_, err = AddComment(bug1, reneA, time.Now().Unix(), "message4")
|
||||
require.NoError(t, err)
|
||||
err = bug1.Commit(repoA)
|
||||
require.NoError(t, err)
|
||||
|
||||
_, err = AddComment(bug1, reneA, time.Now().Unix(), "message5")
|
||||
require.NoError(t, err)
|
||||
_, err = AddComment(bug1, reneA, time.Now().Unix(), "message6")
|
||||
require.NoError(t, err)
|
||||
_, err = AddComment(bug1, reneA, time.Now().Unix(), "message7")
|
||||
require.NoError(t, err)
|
||||
err = bug1.Commit(repoA)
|
||||
require.NoError(t, err)
|
||||
|
||||
_, err = AddComment(bug1, reneA, time.Now().Unix(), "message8")
|
||||
require.NoError(t, err)
|
||||
_, err = AddComment(bug1, reneA, time.Now().Unix(), "message9")
|
||||
require.NoError(t, err)
|
||||
_, err = AddComment(bug1, reneA, time.Now().Unix(), "message10")
|
||||
require.NoError(t, err)
|
||||
err = bug1.Commit(repoA)
|
||||
require.NoError(t, err)
|
||||
|
||||
// remote --> A
|
||||
err = Pull(repoA, "origin")
|
||||
require.NoError(t, err)
|
||||
|
||||
bugs := allBugs(t, ReadAllLocal(repoA))
|
||||
|
||||
if len(bugs) != 1 {
|
||||
t.Fatal("Unexpected number of bugs")
|
||||
}
|
||||
|
||||
bug2, err := ReadLocal(repoA, bug1.Id())
|
||||
require.NoError(t, err)
|
||||
|
||||
if nbOps(bug2) != 10 {
|
||||
t.Fatal("Unexpected number of operations")
|
||||
}
|
||||
}
|
||||
|
||||
func nbOps(b *Bug) int {
|
||||
it := NewOperationIterator(b)
|
||||
counter := 0
|
||||
for it.Next() {
|
||||
counter++
|
||||
}
|
||||
return counter
|
||||
}
|
||||
|
||||
func TestRebaseConflict(t *testing.T) {
|
||||
_RebaseConflict(t)
|
||||
}
|
||||
|
||||
func BenchmarkRebaseConflict(b *testing.B) {
|
||||
for n := 0; n < b.N; n++ {
|
||||
_RebaseConflict(b)
|
||||
}
|
||||
}
|
||||
|
||||
func _RebaseConflict(t testing.TB) {
|
||||
repoA, repoB, remote := repository.SetupGoGitReposAndRemote()
|
||||
defer repository.CleanupTestRepos(repoA, repoB, remote)
|
||||
|
||||
reneA, err := identity.NewIdentity(repoA, "René Descartes", "rene@descartes.fr")
|
||||
require.NoError(t, err)
|
||||
err = reneA.Commit(repoA)
|
||||
require.NoError(t, err)
|
||||
|
||||
bug1, _, err := Create(reneA, time.Now().Unix(), "bug1", "message")
|
||||
require.NoError(t, err)
|
||||
err = bug1.Commit(repoA)
|
||||
require.NoError(t, err)
|
||||
|
||||
// distribute the identity
|
||||
_, err = identity.Push(repoA, "origin")
|
||||
require.NoError(t, err)
|
||||
err = identity.Pull(repoB, "origin")
|
||||
require.NoError(t, err)
|
||||
|
||||
// A --> remote
|
||||
_, err = Push(repoA, "origin")
|
||||
require.NoError(t, err)
|
||||
|
||||
// remote --> B
|
||||
err = Pull(repoB, "origin")
|
||||
require.NoError(t, err)
|
||||
|
||||
_, err = AddComment(bug1, reneA, time.Now().Unix(), "message2")
|
||||
require.NoError(t, err)
|
||||
_, err = AddComment(bug1, reneA, time.Now().Unix(), "message3")
|
||||
require.NoError(t, err)
|
||||
_, err = AddComment(bug1, reneA, time.Now().Unix(), "message4")
|
||||
require.NoError(t, err)
|
||||
err = bug1.Commit(repoA)
|
||||
require.NoError(t, err)
|
||||
|
||||
_, err = AddComment(bug1, reneA, time.Now().Unix(), "message5")
|
||||
require.NoError(t, err)
|
||||
_, err = AddComment(bug1, reneA, time.Now().Unix(), "message6")
|
||||
require.NoError(t, err)
|
||||
_, err = AddComment(bug1, reneA, time.Now().Unix(), "message7")
|
||||
require.NoError(t, err)
|
||||
err = bug1.Commit(repoA)
|
||||
require.NoError(t, err)
|
||||
|
||||
_, err = AddComment(bug1, reneA, time.Now().Unix(), "message8")
|
||||
require.NoError(t, err)
|
||||
_, err = AddComment(bug1, reneA, time.Now().Unix(), "message9")
|
||||
require.NoError(t, err)
|
||||
_, err = AddComment(bug1, reneA, time.Now().Unix(), "message10")
|
||||
require.NoError(t, err)
|
||||
err = bug1.Commit(repoA)
|
||||
require.NoError(t, err)
|
||||
|
||||
bug2, err := ReadLocal(repoB, bug1.Id())
|
||||
require.NoError(t, err)
|
||||
|
||||
reneB, err := identity.ReadLocal(repoA, reneA.Id())
|
||||
require.NoError(t, err)
|
||||
|
||||
_, err = AddComment(bug2, reneB, time.Now().Unix(), "message11")
|
||||
require.NoError(t, err)
|
||||
_, err = AddComment(bug2, reneB, time.Now().Unix(), "message12")
|
||||
require.NoError(t, err)
|
||||
_, err = AddComment(bug2, reneB, time.Now().Unix(), "message13")
|
||||
require.NoError(t, err)
|
||||
err = bug2.Commit(repoB)
|
||||
require.NoError(t, err)
|
||||
|
||||
_, err = AddComment(bug2, reneB, time.Now().Unix(), "message14")
|
||||
require.NoError(t, err)
|
||||
_, err = AddComment(bug2, reneB, time.Now().Unix(), "message15")
|
||||
require.NoError(t, err)
|
||||
_, err = AddComment(bug2, reneB, time.Now().Unix(), "message16")
|
||||
require.NoError(t, err)
|
||||
err = bug2.Commit(repoB)
|
||||
require.NoError(t, err)
|
||||
|
||||
_, err = AddComment(bug2, reneB, time.Now().Unix(), "message17")
|
||||
require.NoError(t, err)
|
||||
_, err = AddComment(bug2, reneB, time.Now().Unix(), "message18")
|
||||
require.NoError(t, err)
|
||||
_, err = AddComment(bug2, reneB, time.Now().Unix(), "message19")
|
||||
require.NoError(t, err)
|
||||
err = bug2.Commit(repoB)
|
||||
require.NoError(t, err)
|
||||
|
||||
// A --> remote
|
||||
_, err = Push(repoA, "origin")
|
||||
require.NoError(t, err)
|
||||
|
||||
// remote --> B
|
||||
err = Pull(repoB, "origin")
|
||||
require.NoError(t, err)
|
||||
|
||||
bugs := allBugs(t, ReadAllLocal(repoB))
|
||||
|
||||
if len(bugs) != 1 {
|
||||
t.Fatal("Unexpected number of bugs")
|
||||
}
|
||||
|
||||
bug3, err := ReadLocal(repoB, bug1.Id())
|
||||
require.NoError(t, err)
|
||||
|
||||
if nbOps(bug3) != 19 {
|
||||
t.Fatal("Unexpected number of operations")
|
||||
}
|
||||
|
||||
// B --> remote
|
||||
_, err = Push(repoB, "origin")
|
||||
require.NoError(t, err)
|
||||
|
||||
// remote --> A
|
||||
err = Pull(repoA, "origin")
|
||||
require.NoError(t, err)
|
||||
|
||||
bugs = allBugs(t, ReadAllLocal(repoA))
|
||||
|
||||
if len(bugs) != 1 {
|
||||
t.Fatal("Unexpected number of bugs")
|
||||
}
|
||||
|
||||
bug4, err := ReadLocal(repoA, bug1.Id())
|
||||
require.NoError(t, err)
|
||||
|
||||
if nbOps(bug4) != 19 {
|
||||
t.Fatal("Unexpected number of operations")
|
||||
}
|
||||
}
|
190
bug/bug_test.go
190
bug/bug_test.go
@ -1,190 +0,0 @@
|
||||
package bug
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/MichaelMure/git-bug/identity"
|
||||
"github.com/MichaelMure/git-bug/repository"
|
||||
)
|
||||
|
||||
func TestBugId(t *testing.T) {
|
||||
repo := repository.NewMockRepo()
|
||||
|
||||
bug1 := NewBug()
|
||||
|
||||
rene, err := identity.NewIdentity(repo, "René Descartes", "rene@descartes.fr")
|
||||
require.NoError(t, err)
|
||||
err = rene.Commit(repo)
|
||||
require.NoError(t, err)
|
||||
|
||||
createOp := NewCreateOp(rene, time.Now().Unix(), "title", "message", nil)
|
||||
|
||||
bug1.Append(createOp)
|
||||
|
||||
err = bug1.Commit(repo)
|
||||
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
bug1.Id()
|
||||
}
|
||||
|
||||
func TestBugValidity(t *testing.T) {
|
||||
repo := repository.NewMockRepo()
|
||||
|
||||
bug1 := NewBug()
|
||||
|
||||
rene, err := identity.NewIdentity(repo, "René Descartes", "rene@descartes.fr")
|
||||
require.NoError(t, err)
|
||||
err = rene.Commit(repo)
|
||||
require.NoError(t, err)
|
||||
|
||||
createOp := NewCreateOp(rene, time.Now().Unix(), "title", "message", nil)
|
||||
|
||||
if bug1.Validate() == nil {
|
||||
t.Fatal("Empty bug should be invalid")
|
||||
}
|
||||
|
||||
bug1.Append(createOp)
|
||||
|
||||
if bug1.Validate() != nil {
|
||||
t.Fatal("Bug with just a CreateOp should be valid")
|
||||
}
|
||||
|
||||
err = bug1.Commit(repo)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
bug1.Append(createOp)
|
||||
|
||||
if bug1.Validate() == nil {
|
||||
t.Fatal("Bug with multiple CreateOp should be invalid")
|
||||
}
|
||||
|
||||
err = bug1.Commit(repo)
|
||||
if err == nil {
|
||||
t.Fatal("Invalid bug should not commit")
|
||||
}
|
||||
}
|
||||
|
||||
func TestBugCommitLoad(t *testing.T) {
|
||||
repo := repository.NewMockRepo()
|
||||
|
||||
bug1 := NewBug()
|
||||
|
||||
rene, err := identity.NewIdentity(repo, "René Descartes", "rene@descartes.fr")
|
||||
require.NoError(t, err)
|
||||
err = rene.Commit(repo)
|
||||
require.NoError(t, err)
|
||||
|
||||
createOp := NewCreateOp(rene, time.Now().Unix(), "title", "message", nil)
|
||||
setTitleOp := NewSetTitleOp(rene, time.Now().Unix(), "title2", "title1")
|
||||
addCommentOp := NewAddCommentOp(rene, time.Now().Unix(), "message2", nil)
|
||||
|
||||
bug1.Append(createOp)
|
||||
bug1.Append(setTitleOp)
|
||||
|
||||
require.True(t, bug1.NeedCommit())
|
||||
|
||||
err = bug1.Commit(repo)
|
||||
require.Nil(t, err)
|
||||
require.False(t, bug1.NeedCommit())
|
||||
|
||||
bug2, err := ReadLocal(repo, bug1.Id())
|
||||
require.NoError(t, err)
|
||||
equivalentBug(t, bug1, bug2)
|
||||
|
||||
// add more op
|
||||
|
||||
bug1.Append(addCommentOp)
|
||||
|
||||
require.True(t, bug1.NeedCommit())
|
||||
|
||||
err = bug1.Commit(repo)
|
||||
require.Nil(t, err)
|
||||
require.False(t, bug1.NeedCommit())
|
||||
|
||||
bug3, err := ReadLocal(repo, bug1.Id())
|
||||
require.NoError(t, err)
|
||||
equivalentBug(t, bug1, bug3)
|
||||
}
|
||||
|
||||
func equivalentBug(t *testing.T, expected, actual *Bug) {
|
||||
require.Equal(t, len(expected.packs), len(actual.packs))
|
||||
|
||||
for i := range expected.packs {
|
||||
for j := range expected.packs[i].Operations {
|
||||
actual.packs[i].Operations[j].base().id = expected.packs[i].Operations[j].base().id
|
||||
}
|
||||
}
|
||||
|
||||
require.Equal(t, expected, actual)
|
||||
}
|
||||
|
||||
func TestBugRemove(t *testing.T) {
|
||||
repo := repository.CreateGoGitTestRepo(false)
|
||||
remoteA := repository.CreateGoGitTestRepo(true)
|
||||
remoteB := repository.CreateGoGitTestRepo(true)
|
||||
defer repository.CleanupTestRepos(repo, remoteA, remoteB)
|
||||
|
||||
err := repo.AddRemote("remoteA", remoteA.GetLocalRemote())
|
||||
require.NoError(t, err)
|
||||
|
||||
err = repo.AddRemote("remoteB", remoteB.GetLocalRemote())
|
||||
require.NoError(t, err)
|
||||
|
||||
// generate a bunch of bugs
|
||||
rene, err := identity.NewIdentity(repo, "René Descartes", "rene@descartes.fr")
|
||||
require.NoError(t, err)
|
||||
err = rene.Commit(repo)
|
||||
require.NoError(t, err)
|
||||
|
||||
for i := 0; i < 100; i++ {
|
||||
b := NewBug()
|
||||
createOp := NewCreateOp(rene, time.Now().Unix(), "title", fmt.Sprintf("message%v", i), nil)
|
||||
b.Append(createOp)
|
||||
err = b.Commit(repo)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
// and one more for testing
|
||||
b := NewBug()
|
||||
createOp := NewCreateOp(rene, time.Now().Unix(), "title", "message", nil)
|
||||
b.Append(createOp)
|
||||
err = b.Commit(repo)
|
||||
require.NoError(t, err)
|
||||
|
||||
_, err = Push(repo, "remoteA")
|
||||
require.NoError(t, err)
|
||||
|
||||
_, err = Push(repo, "remoteB")
|
||||
require.NoError(t, err)
|
||||
|
||||
_, err = Fetch(repo, "remoteA")
|
||||
require.NoError(t, err)
|
||||
|
||||
_, err = Fetch(repo, "remoteB")
|
||||
require.NoError(t, err)
|
||||
|
||||
err = RemoveBug(repo, b.Id())
|
||||
require.NoError(t, err)
|
||||
|
||||
_, err = ReadLocal(repo, b.Id())
|
||||
require.Error(t, ErrBugNotExist, err)
|
||||
|
||||
_, err = ReadRemote(repo, "remoteA", b.Id())
|
||||
require.Error(t, ErrBugNotExist, err)
|
||||
|
||||
_, err = ReadRemote(repo, "remoteB", b.Id())
|
||||
require.Error(t, ErrBugNotExist, err)
|
||||
|
||||
ids, err := ListLocalIds(repo)
|
||||
require.NoError(t, err)
|
||||
require.Len(t, ids, 100)
|
||||
}
|
@ -1,40 +0,0 @@
|
||||
package bug
|
||||
|
||||
import (
|
||||
"github.com/MichaelMure/git-bug/identity"
|
||||
"github.com/MichaelMure/git-bug/repository"
|
||||
)
|
||||
|
||||
// ClockLoader is the repository.ClockLoader for the Bug entity
|
||||
var ClockLoader = repository.ClockLoader{
|
||||
Clocks: []string{creationClockName, editClockName},
|
||||
Witnesser: func(repo repository.ClockedRepo) error {
|
||||
// We don't care about the actual identity so an IdentityStub will do
|
||||
resolver := identity.NewStubResolver()
|
||||
for b := range ReadAllLocalWithResolver(repo, resolver) {
|
||||
if b.Err != nil {
|
||||
return b.Err
|
||||
}
|
||||
|
||||
createClock, err := repo.GetOrCreateClock(creationClockName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = createClock.Witness(b.Bug.createTime)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
editClock, err := repo.GetOrCreateClock(editClockName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = editClock.Witness(b.Bug.editTime)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
},
|
||||
}
|
17
bug/err.go
Normal file
17
bug/err.go
Normal file
@ -0,0 +1,17 @@
|
||||
package bug
|
||||
|
||||
import (
|
||||
"errors"
|
||||
|
||||
"github.com/MichaelMure/git-bug/entity"
|
||||
)
|
||||
|
||||
var ErrBugNotExist = errors.New("bug doesn't exist")
|
||||
|
||||
func NewErrMultipleMatchBug(matching []entity.Id) *entity.ErrMultipleMatch {
|
||||
return entity.NewErrMultipleMatch("bug", matching)
|
||||
}
|
||||
|
||||
func NewErrMultipleMatchOp(matching []entity.Id) *entity.ErrMultipleMatch {
|
||||
return entity.NewErrMultipleMatch("operation", matching)
|
||||
}
|
@ -1,84 +0,0 @@
|
||||
package bug
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
|
||||
"github.com/MichaelMure/git-bug/repository"
|
||||
"github.com/MichaelMure/git-bug/util/lamport"
|
||||
)
|
||||
|
||||
type gitTree struct {
|
||||
opsEntry repository.TreeEntry
|
||||
createTime lamport.Time
|
||||
editTime lamport.Time
|
||||
}
|
||||
|
||||
func readTree(repo repository.RepoData, hash repository.Hash) (*gitTree, error) {
|
||||
tree := &gitTree{}
|
||||
|
||||
entries, err := repo.ReadTree(hash)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "can't list git tree entries")
|
||||
}
|
||||
|
||||
opsFound := false
|
||||
|
||||
for _, entry := range entries {
|
||||
if entry.Name == opsEntryName {
|
||||
tree.opsEntry = entry
|
||||
opsFound = true
|
||||
continue
|
||||
}
|
||||
if strings.HasPrefix(entry.Name, createClockEntryPrefix) {
|
||||
n, err := fmt.Sscanf(entry.Name, createClockEntryPattern, &tree.createTime)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "can't read create lamport time")
|
||||
}
|
||||
if n != 1 {
|
||||
return nil, fmt.Errorf("could not parse create time lamport value")
|
||||
}
|
||||
}
|
||||
if strings.HasPrefix(entry.Name, editClockEntryPrefix) {
|
||||
n, err := fmt.Sscanf(entry.Name, editClockEntryPattern, &tree.editTime)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "can't read edit lamport time")
|
||||
}
|
||||
if n != 1 {
|
||||
return nil, fmt.Errorf("could not parse edit time lamport value")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if !opsFound {
|
||||
return nil, errors.New("invalid tree, missing the ops entry")
|
||||
}
|
||||
|
||||
return tree, nil
|
||||
}
|
||||
|
||||
func makeMediaTree(pack OperationPack) []repository.TreeEntry {
|
||||
var tree []repository.TreeEntry
|
||||
counter := 0
|
||||
added := make(map[repository.Hash]interface{})
|
||||
|
||||
for _, ops := range pack.Operations {
|
||||
for _, file := range ops.GetFiles() {
|
||||
if _, has := added[file]; !has {
|
||||
tree = append(tree, repository.TreeEntry{
|
||||
ObjectType: repository.Blob,
|
||||
Hash: file,
|
||||
// The name is not important here, we only need to
|
||||
// reference the blob.
|
||||
Name: fmt.Sprintf("file%d", counter),
|
||||
})
|
||||
counter++
|
||||
added[file] = struct{}{}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return tree
|
||||
}
|
@ -1,27 +0,0 @@
|
||||
package bug
|
||||
|
||||
import (
|
||||
"github.com/MichaelMure/git-bug/identity"
|
||||
)
|
||||
|
||||
// EnsureIdentities walk the graph of operations and make sure that all Identity
|
||||
// are properly loaded. That is, it replace all the IdentityStub with the full
|
||||
// Identity, loaded through a Resolver.
|
||||
func (bug *Bug) EnsureIdentities(resolver identity.Resolver) error {
|
||||
it := NewOperationIterator(bug)
|
||||
|
||||
for it.Next() {
|
||||
op := it.Value()
|
||||
base := op.base()
|
||||
|
||||
if stub, ok := base.Author.(*identity.IdentityStub); ok {
|
||||
i, err := resolver.ResolveIdentity(stub.Id())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
base.Author = i
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
@ -16,17 +16,15 @@ type Interface interface {
|
||||
// Append an operation into the staging area, to be committed later
|
||||
Append(op Operation)
|
||||
|
||||
// Operations return the ordered operations
|
||||
Operations() []Operation
|
||||
|
||||
// Indicate that the in-memory state changed and need to be commit in the repository
|
||||
NeedCommit() bool
|
||||
|
||||
// Commit write the staging area in Git and move the operations to the packs
|
||||
Commit(repo repository.ClockedRepo) error
|
||||
|
||||
// Merge a different version of the same bug by rebasing operations of this bug
|
||||
// that are not present in the other on top of the chain of operations of the
|
||||
// other version.
|
||||
Merge(repo repository.Repo, other Interface) (bool, error)
|
||||
|
||||
// Lookup for the very first operation of the bug.
|
||||
// For a valid Bug, this operation should be a CreateOp
|
||||
FirstOp() Operation
|
||||
|
@ -24,23 +24,19 @@ type AddCommentOperation struct {
|
||||
// Sign-post method for gqlgen
|
||||
func (op *AddCommentOperation) IsOperation() {}
|
||||
|
||||
func (op *AddCommentOperation) base() *OpBase {
|
||||
return &op.OpBase
|
||||
}
|
||||
|
||||
func (op *AddCommentOperation) Id() entity.Id {
|
||||
return idOperation(op)
|
||||
return idOperation(op, &op.OpBase)
|
||||
}
|
||||
|
||||
func (op *AddCommentOperation) Apply(snapshot *Snapshot) {
|
||||
snapshot.addActor(op.Author)
|
||||
snapshot.addParticipant(op.Author)
|
||||
snapshot.addActor(op.Author_)
|
||||
snapshot.addParticipant(op.Author_)
|
||||
|
||||
commentId := entity.CombineIds(snapshot.Id(), op.Id())
|
||||
comment := Comment{
|
||||
id: commentId,
|
||||
Message: op.Message,
|
||||
Author: op.Author,
|
||||
Author: op.Author_,
|
||||
Files: op.Files,
|
||||
UnixTime: timestamp.Timestamp(op.UnixTime),
|
||||
}
|
||||
@ -59,7 +55,7 @@ func (op *AddCommentOperation) GetFiles() []repository.Hash {
|
||||
}
|
||||
|
||||
func (op *AddCommentOperation) Validate() error {
|
||||
if err := opBaseValidate(op, AddCommentOp); err != nil {
|
||||
if err := op.OpBase.Validate(op, AddCommentOp); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
|
@ -32,8 +32,8 @@ func TestAddCommentSerialize(t *testing.T) {
|
||||
before.Id()
|
||||
|
||||
// Replace the identity stub with the real thing
|
||||
assert.Equal(t, rene.Id(), after.base().Author.Id())
|
||||
after.Author = rene
|
||||
assert.Equal(t, rene.Id(), after.Author().Id())
|
||||
after.Author_ = rene
|
||||
|
||||
assert.Equal(t, before, &after)
|
||||
}
|
||||
|
@ -30,12 +30,8 @@ type CreateOperation struct {
|
||||
// Sign-post method for gqlgen
|
||||
func (op *CreateOperation) IsOperation() {}
|
||||
|
||||
func (op *CreateOperation) base() *OpBase {
|
||||
return &op.OpBase
|
||||
}
|
||||
|
||||
func (op *CreateOperation) Id() entity.Id {
|
||||
return idOperation(op)
|
||||
return idOperation(op, &op.OpBase)
|
||||
}
|
||||
|
||||
// OVERRIDE
|
||||
@ -61,8 +57,8 @@ func (op *CreateOperation) Apply(snapshot *Snapshot) {
|
||||
|
||||
snapshot.id = op.Id()
|
||||
|
||||
snapshot.addActor(op.Author)
|
||||
snapshot.addParticipant(op.Author)
|
||||
snapshot.addActor(op.Author_)
|
||||
snapshot.addParticipant(op.Author_)
|
||||
|
||||
snapshot.Title = op.Title
|
||||
|
||||
@ -70,12 +66,12 @@ func (op *CreateOperation) Apply(snapshot *Snapshot) {
|
||||
comment := Comment{
|
||||
id: commentId,
|
||||
Message: op.Message,
|
||||
Author: op.Author,
|
||||
Author: op.Author_,
|
||||
UnixTime: timestamp.Timestamp(op.UnixTime),
|
||||
}
|
||||
|
||||
snapshot.Comments = []Comment{comment}
|
||||
snapshot.Author = op.Author
|
||||
snapshot.Author = op.Author_
|
||||
snapshot.CreateTime = op.Time()
|
||||
|
||||
snapshot.Timeline = []TimelineItem{
|
||||
@ -90,7 +86,7 @@ func (op *CreateOperation) GetFiles() []repository.Hash {
|
||||
}
|
||||
|
||||
func (op *CreateOperation) Validate() error {
|
||||
if err := opBaseValidate(op, CreateOp); err != nil {
|
||||
if err := op.OpBase.Validate(op, CreateOp); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
|
@ -79,8 +79,8 @@ func TestCreateSerialize(t *testing.T) {
|
||||
before.Id()
|
||||
|
||||
// Replace the identity stub with the real thing
|
||||
require.Equal(t, rene.Id(), after.base().Author.Id())
|
||||
after.Author = rene
|
||||
require.Equal(t, rene.Id(), after.Author().Id())
|
||||
after.Author_ = rene
|
||||
|
||||
require.Equal(t, before, &after)
|
||||
}
|
||||
|
@ -27,19 +27,15 @@ type EditCommentOperation struct {
|
||||
// Sign-post method for gqlgen
|
||||
func (op *EditCommentOperation) IsOperation() {}
|
||||
|
||||
func (op *EditCommentOperation) base() *OpBase {
|
||||
return &op.OpBase
|
||||
}
|
||||
|
||||
func (op *EditCommentOperation) Id() entity.Id {
|
||||
return idOperation(op)
|
||||
return idOperation(op, &op.OpBase)
|
||||
}
|
||||
|
||||
func (op *EditCommentOperation) Apply(snapshot *Snapshot) {
|
||||
// Todo: currently any message can be edited, even by a different author
|
||||
// crypto signature are needed.
|
||||
|
||||
snapshot.addActor(op.Author)
|
||||
snapshot.addActor(op.Author_)
|
||||
|
||||
var target TimelineItem
|
||||
|
||||
@ -85,7 +81,7 @@ func (op *EditCommentOperation) GetFiles() []repository.Hash {
|
||||
}
|
||||
|
||||
func (op *EditCommentOperation) Validate() error {
|
||||
if err := opBaseValidate(op, EditCommentOp); err != nil {
|
||||
if err := op.OpBase.Validate(op, EditCommentOp); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
|
@ -97,8 +97,8 @@ func TestEditCommentSerialize(t *testing.T) {
|
||||
before.Id()
|
||||
|
||||
// Replace the identity stub with the real thing
|
||||
require.Equal(t, rene.Id(), after.base().Author.Id())
|
||||
after.Author = rene
|
||||
require.Equal(t, rene.Id(), after.Author().Id())
|
||||
after.Author_ = rene
|
||||
|
||||
require.Equal(t, before, &after)
|
||||
}
|
||||
|
@ -24,17 +24,13 @@ type LabelChangeOperation struct {
|
||||
// Sign-post method for gqlgen
|
||||
func (op *LabelChangeOperation) IsOperation() {}
|
||||
|
||||
func (op *LabelChangeOperation) base() *OpBase {
|
||||
return &op.OpBase
|
||||
}
|
||||
|
||||
func (op *LabelChangeOperation) Id() entity.Id {
|
||||
return idOperation(op)
|
||||
return idOperation(op, &op.OpBase)
|
||||
}
|
||||
|
||||
// Apply apply the operation
|
||||
func (op *LabelChangeOperation) Apply(snapshot *Snapshot) {
|
||||
snapshot.addActor(op.Author)
|
||||
snapshot.addActor(op.Author_)
|
||||
|
||||
// Add in the set
|
||||
AddLoop:
|
||||
@ -66,7 +62,7 @@ AddLoop:
|
||||
|
||||
item := &LabelChangeTimelineItem{
|
||||
id: op.Id(),
|
||||
Author: op.Author,
|
||||
Author: op.Author_,
|
||||
UnixTime: timestamp.Timestamp(op.UnixTime),
|
||||
Added: op.Added,
|
||||
Removed: op.Removed,
|
||||
@ -76,7 +72,7 @@ AddLoop:
|
||||
}
|
||||
|
||||
func (op *LabelChangeOperation) Validate() error {
|
||||
if err := opBaseValidate(op, LabelChangeOp); err != nil {
|
||||
if err := op.OpBase.Validate(op, LabelChangeOp); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
|
@ -31,8 +31,8 @@ func TestLabelChangeSerialize(t *testing.T) {
|
||||
before.Id()
|
||||
|
||||
// Replace the identity stub with the real thing
|
||||
require.Equal(t, rene.Id(), after.base().Author.Id())
|
||||
after.Author = rene
|
||||
require.Equal(t, rene.Id(), after.Author().Id())
|
||||
after.Author_ = rene
|
||||
|
||||
require.Equal(t, before, &after)
|
||||
}
|
||||
|
@ -19,12 +19,8 @@ type NoOpOperation struct {
|
||||
// Sign-post method for gqlgen
|
||||
func (op *NoOpOperation) IsOperation() {}
|
||||
|
||||
func (op *NoOpOperation) base() *OpBase {
|
||||
return &op.OpBase
|
||||
}
|
||||
|
||||
func (op *NoOpOperation) Id() entity.Id {
|
||||
return idOperation(op)
|
||||
return idOperation(op, &op.OpBase)
|
||||
}
|
||||
|
||||
func (op *NoOpOperation) Apply(snapshot *Snapshot) {
|
||||
@ -32,7 +28,7 @@ func (op *NoOpOperation) Apply(snapshot *Snapshot) {
|
||||
}
|
||||
|
||||
func (op *NoOpOperation) Validate() error {
|
||||
return opBaseValidate(op, NoOpOp)
|
||||
return op.OpBase.Validate(op, NoOpOp)
|
||||
}
|
||||
|
||||
// UnmarshalJSON is a two step JSON unmarshaling
|
||||
|
@ -33,8 +33,8 @@ func TestNoopSerialize(t *testing.T) {
|
||||
before.Id()
|
||||
|
||||
// Replace the identity stub with the real thing
|
||||
assert.Equal(t, rene.Id(), after.base().Author.Id())
|
||||
after.Author = rene
|
||||
assert.Equal(t, rene.Id(), after.Author().Id())
|
||||
after.Author_ = rene
|
||||
|
||||
assert.Equal(t, before, &after)
|
||||
}
|
||||
|
@ -20,38 +20,25 @@ type SetMetadataOperation struct {
|
||||
// Sign-post method for gqlgen
|
||||
func (op *SetMetadataOperation) IsOperation() {}
|
||||
|
||||
func (op *SetMetadataOperation) base() *OpBase {
|
||||
return &op.OpBase
|
||||
}
|
||||
|
||||
func (op *SetMetadataOperation) Id() entity.Id {
|
||||
return idOperation(op)
|
||||
return idOperation(op, &op.OpBase)
|
||||
}
|
||||
|
||||
func (op *SetMetadataOperation) Apply(snapshot *Snapshot) {
|
||||
for _, target := range snapshot.Operations {
|
||||
if target.Id() == op.Target {
|
||||
base := target.base()
|
||||
|
||||
if base.extraMetadata == nil {
|
||||
base.extraMetadata = make(map[string]string)
|
||||
}
|
||||
|
||||
// Apply the metadata in an immutable way: if a metadata already
|
||||
// exist, it's not possible to override it.
|
||||
for key, val := range op.NewMetadata {
|
||||
if _, exist := base.extraMetadata[key]; !exist {
|
||||
base.extraMetadata[key] = val
|
||||
}
|
||||
for key, value := range op.NewMetadata {
|
||||
target.setExtraMetadataImmutable(key, value)
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (op *SetMetadataOperation) Validate() error {
|
||||
if err := opBaseValidate(op, SetMetadataOp); err != nil {
|
||||
if err := op.OpBase.Validate(op, SetMetadataOp); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
|
@ -120,8 +120,8 @@ func TestSetMetadataSerialize(t *testing.T) {
|
||||
before.Id()
|
||||
|
||||
// Replace the identity stub with the real thing
|
||||
require.Equal(t, rene.Id(), after.base().Author.Id())
|
||||
after.Author = rene
|
||||
require.Equal(t, rene.Id(), after.Author().Id())
|
||||
after.Author_ = rene
|
||||
|
||||
require.Equal(t, before, &after)
|
||||
}
|
||||
|
@ -21,21 +21,17 @@ type SetStatusOperation struct {
|
||||
// Sign-post method for gqlgen
|
||||
func (op *SetStatusOperation) IsOperation() {}
|
||||
|
||||
func (op *SetStatusOperation) base() *OpBase {
|
||||
return &op.OpBase
|
||||
}
|
||||
|
||||
func (op *SetStatusOperation) Id() entity.Id {
|
||||
return idOperation(op)
|
||||
return idOperation(op, &op.OpBase)
|
||||
}
|
||||
|
||||
func (op *SetStatusOperation) Apply(snapshot *Snapshot) {
|
||||
snapshot.Status = op.Status
|
||||
snapshot.addActor(op.Author)
|
||||
snapshot.addActor(op.Author_)
|
||||
|
||||
item := &SetStatusTimelineItem{
|
||||
id: op.Id(),
|
||||
Author: op.Author,
|
||||
Author: op.Author_,
|
||||
UnixTime: timestamp.Timestamp(op.UnixTime),
|
||||
Status: op.Status,
|
||||
}
|
||||
@ -44,7 +40,7 @@ func (op *SetStatusOperation) Apply(snapshot *Snapshot) {
|
||||
}
|
||||
|
||||
func (op *SetStatusOperation) Validate() error {
|
||||
if err := opBaseValidate(op, SetStatusOp); err != nil {
|
||||
if err := op.OpBase.Validate(op, SetStatusOp); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
|
@ -31,8 +31,8 @@ func TestSetStatusSerialize(t *testing.T) {
|
||||
before.Id()
|
||||
|
||||
// Replace the identity stub with the real thing
|
||||
require.Equal(t, rene.Id(), after.base().Author.Id())
|
||||
after.Author = rene
|
||||
require.Equal(t, rene.Id(), after.Author().Id())
|
||||
after.Author_ = rene
|
||||
|
||||
require.Equal(t, before, &after)
|
||||
}
|
||||
|
@ -24,21 +24,17 @@ type SetTitleOperation struct {
|
||||
// Sign-post method for gqlgen
|
||||
func (op *SetTitleOperation) IsOperation() {}
|
||||
|
||||
func (op *SetTitleOperation) base() *OpBase {
|
||||
return &op.OpBase
|
||||
}
|
||||
|
||||
func (op *SetTitleOperation) Id() entity.Id {
|
||||
return idOperation(op)
|
||||
return idOperation(op, &op.OpBase)
|
||||
}
|
||||
|
||||
func (op *SetTitleOperation) Apply(snapshot *Snapshot) {
|
||||
snapshot.Title = op.Title
|
||||
snapshot.addActor(op.Author)
|
||||
snapshot.addActor(op.Author_)
|
||||
|
||||
item := &SetTitleTimelineItem{
|
||||
id: op.Id(),
|
||||
Author: op.Author,
|
||||
Author: op.Author_,
|
||||
UnixTime: timestamp.Timestamp(op.UnixTime),
|
||||
Title: op.Title,
|
||||
Was: op.Was,
|
||||
@ -48,7 +44,7 @@ func (op *SetTitleOperation) Apply(snapshot *Snapshot) {
|
||||
}
|
||||
|
||||
func (op *SetTitleOperation) Validate() error {
|
||||
if err := opBaseValidate(op, SetTitleOp); err != nil {
|
||||
if err := op.OpBase.Validate(op, SetTitleOp); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@ -132,19 +128,17 @@ func (s *SetTitleTimelineItem) IsAuthored() {}
|
||||
|
||||
// Convenience function to apply the operation
|
||||
func SetTitle(b Interface, author identity.Interface, unixTime int64, title string) (*SetTitleOperation, error) {
|
||||
it := NewOperationIterator(b)
|
||||
|
||||
var lastTitleOp Operation
|
||||
for it.Next() {
|
||||
op := it.Value()
|
||||
if op.base().OperationType == SetTitleOp {
|
||||
var lastTitleOp *SetTitleOperation
|
||||
for _, op := range b.Operations() {
|
||||
switch op := op.(type) {
|
||||
case *SetTitleOperation:
|
||||
lastTitleOp = op
|
||||
}
|
||||
}
|
||||
|
||||
var was string
|
||||
if lastTitleOp != nil {
|
||||
was = lastTitleOp.(*SetTitleOperation).Title
|
||||
was = lastTitleOp.Title
|
||||
} else {
|
||||
was = b.FirstOp().(*CreateOperation).Title
|
||||
}
|
||||
|
@ -31,8 +31,8 @@ func TestSetTitleSerialize(t *testing.T) {
|
||||
before.Id()
|
||||
|
||||
// Replace the identity stub with the real thing
|
||||
require.Equal(t, rene.Id(), after.base().Author.Id())
|
||||
after.Author = rene
|
||||
require.Equal(t, rene.Id(), after.Author().Id())
|
||||
after.Author_ = rene
|
||||
|
||||
require.Equal(t, before, &after)
|
||||
}
|
||||
|
152
bug/operation.go
152
bug/operation.go
@ -8,6 +8,7 @@ import (
|
||||
"github.com/pkg/errors"
|
||||
|
||||
"github.com/MichaelMure/git-bug/entity"
|
||||
"github.com/MichaelMure/git-bug/entity/dag"
|
||||
"github.com/MichaelMure/git-bug/identity"
|
||||
"github.com/MichaelMure/git-bug/repository"
|
||||
)
|
||||
@ -29,34 +30,31 @@ const (
|
||||
|
||||
// Operation define the interface to fulfill for an edit operation of a Bug
|
||||
type Operation interface {
|
||||
// base return the OpBase of the Operation, for package internal use
|
||||
base() *OpBase
|
||||
// Id return the identifier of the operation, to be used for back references
|
||||
Id() entity.Id
|
||||
dag.Operation
|
||||
|
||||
// Type return the type of the operation
|
||||
Type() OperationType
|
||||
|
||||
// Time return the time when the operation was added
|
||||
Time() time.Time
|
||||
// GetFiles return the files needed by this operation
|
||||
GetFiles() []repository.Hash
|
||||
// Apply the operation to a Snapshot to create the final state
|
||||
Apply(snapshot *Snapshot)
|
||||
// Validate check if the operation is valid (ex: a title is a single line)
|
||||
Validate() error
|
||||
// SetMetadata store arbitrary metadata about the operation
|
||||
SetMetadata(key string, value string)
|
||||
// GetMetadata retrieve arbitrary metadata about the operation
|
||||
GetMetadata(key string) (string, bool)
|
||||
// AllMetadata return all metadata for this operation
|
||||
AllMetadata() map[string]string
|
||||
// GetAuthor return the author identity
|
||||
GetAuthor() identity.Interface
|
||||
|
||||
setExtraMetadataImmutable(key string, value string)
|
||||
|
||||
// sign-post method for gqlgen
|
||||
IsOperation()
|
||||
}
|
||||
|
||||
func idOperation(op Operation) entity.Id {
|
||||
base := op.base()
|
||||
|
||||
func idOperation(op Operation, base *OpBase) entity.Id {
|
||||
if base.id == "" {
|
||||
// something went really wrong
|
||||
panic("op's id not set")
|
||||
@ -77,10 +75,69 @@ func idOperation(op Operation) entity.Id {
|
||||
return base.id
|
||||
}
|
||||
|
||||
func operationUnmarshaller(author identity.Interface, raw json.RawMessage) (dag.Operation, error) {
|
||||
var t struct {
|
||||
OperationType OperationType `json:"type"`
|
||||
}
|
||||
|
||||
if err := json.Unmarshal(raw, &t); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var op Operation
|
||||
|
||||
switch t.OperationType {
|
||||
case AddCommentOp:
|
||||
op = &AddCommentOperation{}
|
||||
case CreateOp:
|
||||
op = &CreateOperation{}
|
||||
case EditCommentOp:
|
||||
op = &EditCommentOperation{}
|
||||
case LabelChangeOp:
|
||||
op = &LabelChangeOperation{}
|
||||
case NoOpOp:
|
||||
op = &NoOpOperation{}
|
||||
case SetMetadataOp:
|
||||
op = &SetMetadataOperation{}
|
||||
case SetStatusOp:
|
||||
op = &SetStatusOperation{}
|
||||
case SetTitleOp:
|
||||
op = &SetTitleOperation{}
|
||||
default:
|
||||
panic(fmt.Sprintf("unknown operation type %v", t.OperationType))
|
||||
}
|
||||
|
||||
err := json.Unmarshal(raw, &op)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
switch op := op.(type) {
|
||||
case *AddCommentOperation:
|
||||
op.Author_ = author
|
||||
case *CreateOperation:
|
||||
op.Author_ = author
|
||||
case *LabelChangeOperation:
|
||||
op.Author_ = author
|
||||
case *NoOpOperation:
|
||||
op.Author_ = author
|
||||
case *SetMetadataOperation:
|
||||
op.Author_ = author
|
||||
case *SetStatusOperation:
|
||||
op.Author_ = author
|
||||
case *SetTitleOperation:
|
||||
op.Author_ = author
|
||||
default:
|
||||
panic(fmt.Sprintf("unknown operation type %T", op))
|
||||
}
|
||||
|
||||
return op, nil
|
||||
}
|
||||
|
||||
// OpBase implement the common code for all operations
|
||||
type OpBase struct {
|
||||
OperationType OperationType `json:"type"`
|
||||
Author identity.Interface `json:"author"`
|
||||
Author_ identity.Interface `json:"author"`
|
||||
// TODO: part of the data model upgrade, this should eventually be a timestamp + lamport
|
||||
UnixTime int64 `json:"timestamp"`
|
||||
Metadata map[string]string `json:"metadata,omitempty"`
|
||||
@ -95,15 +152,15 @@ type OpBase struct {
|
||||
func newOpBase(opType OperationType, author identity.Interface, unixTime int64) OpBase {
|
||||
return OpBase{
|
||||
OperationType: opType,
|
||||
Author: author,
|
||||
Author_: author,
|
||||
UnixTime: unixTime,
|
||||
id: entity.UnsetId,
|
||||
}
|
||||
}
|
||||
|
||||
func (op *OpBase) UnmarshalJSON(data []byte) error {
|
||||
func (base *OpBase) UnmarshalJSON(data []byte) error {
|
||||
// Compute the Id when loading the op from disk.
|
||||
op.id = entity.DeriveId(data)
|
||||
base.id = entity.DeriveId(data)
|
||||
|
||||
aux := struct {
|
||||
OperationType OperationType `json:"type"`
|
||||
@ -122,39 +179,43 @@ func (op *OpBase) UnmarshalJSON(data []byte) error {
|
||||
return err
|
||||
}
|
||||
|
||||
op.OperationType = aux.OperationType
|
||||
op.Author = author
|
||||
op.UnixTime = aux.UnixTime
|
||||
op.Metadata = aux.Metadata
|
||||
base.OperationType = aux.OperationType
|
||||
base.Author_ = author
|
||||
base.UnixTime = aux.UnixTime
|
||||
base.Metadata = aux.Metadata
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (base *OpBase) Type() OperationType {
|
||||
return base.OperationType
|
||||
}
|
||||
|
||||
// Time return the time when the operation was added
|
||||
func (op *OpBase) Time() time.Time {
|
||||
return time.Unix(op.UnixTime, 0)
|
||||
func (base *OpBase) Time() time.Time {
|
||||
return time.Unix(base.UnixTime, 0)
|
||||
}
|
||||
|
||||
// GetFiles return the files needed by this operation
|
||||
func (op *OpBase) GetFiles() []repository.Hash {
|
||||
func (base *OpBase) GetFiles() []repository.Hash {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Validate check the OpBase for errors
|
||||
func opBaseValidate(op Operation, opType OperationType) error {
|
||||
if op.base().OperationType != opType {
|
||||
return fmt.Errorf("incorrect operation type (expected: %v, actual: %v)", opType, op.base().OperationType)
|
||||
func (base *OpBase) Validate(op Operation, opType OperationType) error {
|
||||
if base.OperationType != opType {
|
||||
return fmt.Errorf("incorrect operation type (expected: %v, actual: %v)", opType, base.OperationType)
|
||||
}
|
||||
|
||||
if op.Time().Unix() == 0 {
|
||||
return fmt.Errorf("time not set")
|
||||
}
|
||||
|
||||
if op.base().Author == nil {
|
||||
if base.Author_ == nil {
|
||||
return fmt.Errorf("author not set")
|
||||
}
|
||||
|
||||
if err := op.base().Author.Validate(); err != nil {
|
||||
if err := op.Author().Validate(); err != nil {
|
||||
return errors.Wrap(err, "author")
|
||||
}
|
||||
|
||||
@ -168,46 +229,55 @@ func opBaseValidate(op Operation, opType OperationType) error {
|
||||
}
|
||||
|
||||
// SetMetadata store arbitrary metadata about the operation
|
||||
func (op *OpBase) SetMetadata(key string, value string) {
|
||||
if op.Metadata == nil {
|
||||
op.Metadata = make(map[string]string)
|
||||
func (base *OpBase) SetMetadata(key string, value string) {
|
||||
if base.Metadata == nil {
|
||||
base.Metadata = make(map[string]string)
|
||||
}
|
||||
|
||||
op.Metadata[key] = value
|
||||
op.id = entity.UnsetId
|
||||
base.Metadata[key] = value
|
||||
base.id = entity.UnsetId
|
||||
}
|
||||
|
||||
// GetMetadata retrieve arbitrary metadata about the operation
|
||||
func (op *OpBase) GetMetadata(key string) (string, bool) {
|
||||
val, ok := op.Metadata[key]
|
||||
func (base *OpBase) GetMetadata(key string) (string, bool) {
|
||||
val, ok := base.Metadata[key]
|
||||
|
||||
if ok {
|
||||
return val, true
|
||||
}
|
||||
|
||||
// extraMetadata can't replace the original operations value if any
|
||||
val, ok = op.extraMetadata[key]
|
||||
val, ok = base.extraMetadata[key]
|
||||
|
||||
return val, ok
|
||||
}
|
||||
|
||||
// AllMetadata return all metadata for this operation
|
||||
func (op *OpBase) AllMetadata() map[string]string {
|
||||
func (base *OpBase) AllMetadata() map[string]string {
|
||||
result := make(map[string]string)
|
||||
|
||||
for key, val := range op.extraMetadata {
|
||||
for key, val := range base.extraMetadata {
|
||||
result[key] = val
|
||||
}
|
||||
|
||||
// Original metadata take precedence
|
||||
for key, val := range op.Metadata {
|
||||
for key, val := range base.Metadata {
|
||||
result[key] = val
|
||||
}
|
||||
|
||||
return result
|
||||
}
|
||||
|
||||
// GetAuthor return author identity
|
||||
func (op *OpBase) GetAuthor() identity.Interface {
|
||||
return op.Author
|
||||
func (base *OpBase) setExtraMetadataImmutable(key string, value string) {
|
||||
if base.extraMetadata == nil {
|
||||
base.extraMetadata = make(map[string]string)
|
||||
}
|
||||
if _, exist := base.extraMetadata[key]; !exist {
|
||||
base.extraMetadata[key] = value
|
||||
}
|
||||
}
|
||||
|
||||
// Author return author identity
|
||||
func (base *OpBase) Author() identity.Interface {
|
||||
return base.Author_
|
||||
}
|
||||
|
@ -1,72 +0,0 @@
|
||||
package bug
|
||||
|
||||
type OperationIterator struct {
|
||||
bug *Bug
|
||||
packIndex int
|
||||
opIndex int
|
||||
}
|
||||
|
||||
func NewOperationIterator(bug Interface) *OperationIterator {
|
||||
return &OperationIterator{
|
||||
bug: bugFromInterface(bug),
|
||||
packIndex: 0,
|
||||
opIndex: -1,
|
||||
}
|
||||
}
|
||||
|
||||
func (it *OperationIterator) Next() bool {
|
||||
// Special case of the staging area
|
||||
if it.packIndex == len(it.bug.packs) {
|
||||
pack := it.bug.staging
|
||||
it.opIndex++
|
||||
return it.opIndex < len(pack.Operations)
|
||||
}
|
||||
|
||||
if it.packIndex >= len(it.bug.packs) {
|
||||
return false
|
||||
}
|
||||
|
||||
pack := it.bug.packs[it.packIndex]
|
||||
|
||||
it.opIndex++
|
||||
|
||||
if it.opIndex < len(pack.Operations) {
|
||||
return true
|
||||
}
|
||||
|
||||
// Note: this iterator doesn't handle the empty pack case
|
||||
it.opIndex = 0
|
||||
it.packIndex++
|
||||
|
||||
// Special case of the non-empty staging area
|
||||
if it.packIndex == len(it.bug.packs) && len(it.bug.staging.Operations) > 0 {
|
||||
return true
|
||||
}
|
||||
|
||||
return it.packIndex < len(it.bug.packs)
|
||||
}
|
||||
|
||||
func (it *OperationIterator) Value() Operation {
|
||||
// Special case of the staging area
|
||||
if it.packIndex == len(it.bug.packs) {
|
||||
pack := it.bug.staging
|
||||
|
||||
if it.opIndex >= len(pack.Operations) {
|
||||
panic("Iterator is not valid anymore")
|
||||
}
|
||||
|
||||
return pack.Operations[it.opIndex]
|
||||
}
|
||||
|
||||
if it.packIndex >= len(it.bug.packs) {
|
||||
panic("Iterator is not valid anymore")
|
||||
}
|
||||
|
||||
pack := it.bug.packs[it.packIndex]
|
||||
|
||||
if it.opIndex >= len(pack.Operations) {
|
||||
panic("Iterator is not valid anymore")
|
||||
}
|
||||
|
||||
return pack.Operations[it.opIndex]
|
||||
}
|
@ -1,79 +0,0 @@
|
||||
package bug
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/MichaelMure/git-bug/identity"
|
||||
"github.com/MichaelMure/git-bug/repository"
|
||||
)
|
||||
|
||||
func ExampleOperationIterator() {
|
||||
b := NewBug()
|
||||
|
||||
// add operations
|
||||
|
||||
it := NewOperationIterator(b)
|
||||
|
||||
for it.Next() {
|
||||
// do something with each operations
|
||||
_ = it.Value()
|
||||
}
|
||||
}
|
||||
|
||||
func TestOpIterator(t *testing.T) {
|
||||
repo := repository.NewMockRepo()
|
||||
|
||||
rene, err := identity.NewIdentity(repo, "René Descartes", "rene@descartes.fr")
|
||||
require.NoError(t, err)
|
||||
err = rene.Commit(repo)
|
||||
require.NoError(t, err)
|
||||
|
||||
unix := time.Now().Unix()
|
||||
|
||||
createOp := NewCreateOp(rene, unix, "title", "message", nil)
|
||||
addCommentOp := NewAddCommentOp(rene, unix, "message2", nil)
|
||||
setStatusOp := NewSetStatusOp(rene, unix, ClosedStatus)
|
||||
labelChangeOp := NewLabelChangeOperation(rene, unix, []Label{"added"}, []Label{"removed"})
|
||||
|
||||
var i int
|
||||
genTitleOp := func() Operation {
|
||||
i++
|
||||
return NewSetTitleOp(rene, unix, fmt.Sprintf("title%d", i), "")
|
||||
}
|
||||
|
||||
bug1 := NewBug()
|
||||
|
||||
// first pack
|
||||
bug1.Append(createOp)
|
||||
bug1.Append(addCommentOp)
|
||||
bug1.Append(setStatusOp)
|
||||
bug1.Append(labelChangeOp)
|
||||
err = bug1.Commit(repo)
|
||||
require.NoError(t, err)
|
||||
|
||||
// second pack
|
||||
bug1.Append(genTitleOp())
|
||||
bug1.Append(genTitleOp())
|
||||
bug1.Append(genTitleOp())
|
||||
err = bug1.Commit(repo)
|
||||
require.NoError(t, err)
|
||||
|
||||
// staging
|
||||
bug1.Append(genTitleOp())
|
||||
bug1.Append(genTitleOp())
|
||||
bug1.Append(genTitleOp())
|
||||
|
||||
it := NewOperationIterator(bug1)
|
||||
|
||||
counter := 0
|
||||
for it.Next() {
|
||||
_ = it.Value()
|
||||
counter++
|
||||
}
|
||||
|
||||
require.Equal(t, 10, counter)
|
||||
}
|
@ -1,187 +0,0 @@
|
||||
package bug
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
|
||||
"github.com/MichaelMure/git-bug/entity"
|
||||
"github.com/MichaelMure/git-bug/repository"
|
||||
)
|
||||
|
||||
// 1: original format
|
||||
// 2: no more legacy identities
|
||||
// 3: Ids are generated from the create operation serialized data instead of from the first git commit
|
||||
const formatVersion = 3
|
||||
|
||||
// OperationPack represent an ordered set of operation to apply
|
||||
// to a Bug. These operations are stored in a single Git commit.
|
||||
//
|
||||
// These commits will be linked together in a linear chain of commits
|
||||
// inside Git to form the complete ordered chain of operation to
|
||||
// apply to get the final state of the Bug
|
||||
type OperationPack struct {
|
||||
Operations []Operation
|
||||
|
||||
// Private field so not serialized
|
||||
commitHash repository.Hash
|
||||
}
|
||||
|
||||
func (opp *OperationPack) MarshalJSON() ([]byte, error) {
|
||||
return json.Marshal(struct {
|
||||
Version uint `json:"version"`
|
||||
Operations []Operation `json:"ops"`
|
||||
}{
|
||||
Version: formatVersion,
|
||||
Operations: opp.Operations,
|
||||
})
|
||||
}
|
||||
|
||||
func (opp *OperationPack) UnmarshalJSON(data []byte) error {
|
||||
aux := struct {
|
||||
Version uint `json:"version"`
|
||||
Operations []json.RawMessage `json:"ops"`
|
||||
}{}
|
||||
|
||||
if err := json.Unmarshal(data, &aux); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if aux.Version < formatVersion {
|
||||
return entity.NewErrOldFormatVersion(aux.Version)
|
||||
}
|
||||
if aux.Version > formatVersion {
|
||||
return entity.NewErrNewFormatVersion(aux.Version)
|
||||
}
|
||||
|
||||
for _, raw := range aux.Operations {
|
||||
var t struct {
|
||||
OperationType OperationType `json:"type"`
|
||||
}
|
||||
|
||||
if err := json.Unmarshal(raw, &t); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// delegate to specialized unmarshal function
|
||||
op, err := opp.unmarshalOp(raw, t.OperationType)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
opp.Operations = append(opp.Operations, op)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (opp *OperationPack) unmarshalOp(raw []byte, _type OperationType) (Operation, error) {
|
||||
switch _type {
|
||||
case AddCommentOp:
|
||||
op := &AddCommentOperation{}
|
||||
err := json.Unmarshal(raw, &op)
|
||||
return op, err
|
||||
case CreateOp:
|
||||
op := &CreateOperation{}
|
||||
err := json.Unmarshal(raw, &op)
|
||||
return op, err
|
||||
case EditCommentOp:
|
||||
op := &EditCommentOperation{}
|
||||
err := json.Unmarshal(raw, &op)
|
||||
return op, err
|
||||
case LabelChangeOp:
|
||||
op := &LabelChangeOperation{}
|
||||
err := json.Unmarshal(raw, &op)
|
||||
return op, err
|
||||
case NoOpOp:
|
||||
op := &NoOpOperation{}
|
||||
err := json.Unmarshal(raw, &op)
|
||||
return op, err
|
||||
case SetMetadataOp:
|
||||
op := &SetMetadataOperation{}
|
||||
err := json.Unmarshal(raw, &op)
|
||||
return op, err
|
||||
case SetStatusOp:
|
||||
op := &SetStatusOperation{}
|
||||
err := json.Unmarshal(raw, &op)
|
||||
return op, err
|
||||
case SetTitleOp:
|
||||
op := &SetTitleOperation{}
|
||||
err := json.Unmarshal(raw, &op)
|
||||
return op, err
|
||||
default:
|
||||
return nil, fmt.Errorf("unknown operation type %v", _type)
|
||||
}
|
||||
}
|
||||
|
||||
// Append a new operation to the pack
|
||||
func (opp *OperationPack) Append(op Operation) {
|
||||
opp.Operations = append(opp.Operations, op)
|
||||
}
|
||||
|
||||
// IsEmpty tell if the OperationPack is empty
|
||||
func (opp *OperationPack) IsEmpty() bool {
|
||||
return len(opp.Operations) == 0
|
||||
}
|
||||
|
||||
// IsValid tell if the OperationPack is considered valid
|
||||
func (opp *OperationPack) Validate() error {
|
||||
if opp.IsEmpty() {
|
||||
return fmt.Errorf("empty")
|
||||
}
|
||||
|
||||
for _, op := range opp.Operations {
|
||||
if err := op.Validate(); err != nil {
|
||||
return errors.Wrap(err, "op")
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Write will serialize and store the OperationPack as a git blob and return
|
||||
// its hash
|
||||
func (opp *OperationPack) Write(repo repository.ClockedRepo) (repository.Hash, error) {
|
||||
// make sure we don't write invalid data
|
||||
err := opp.Validate()
|
||||
if err != nil {
|
||||
return "", errors.Wrap(err, "validation error")
|
||||
}
|
||||
|
||||
// First, make sure that all the identities are properly Commit as well
|
||||
// TODO: this might be downgraded to "make sure it exist in git" but then, what make
|
||||
// sure no data is lost on identities ?
|
||||
for _, op := range opp.Operations {
|
||||
if op.base().Author.NeedCommit() {
|
||||
return "", fmt.Errorf("identity need commmit")
|
||||
}
|
||||
}
|
||||
|
||||
data, err := json.Marshal(opp)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
hash, err := repo.StoreData(data)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
return hash, nil
|
||||
}
|
||||
|
||||
// Make a deep copy
|
||||
func (opp *OperationPack) Clone() OperationPack {
|
||||
|
||||
clone := OperationPack{
|
||||
Operations: make([]Operation, len(opp.Operations)),
|
||||
commitHash: opp.commitHash,
|
||||
}
|
||||
|
||||
for i, op := range opp.Operations {
|
||||
clone.Operations[i] = op
|
||||
}
|
||||
|
||||
return clone
|
||||
}
|
@ -1,78 +0,0 @@
|
||||
package bug
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/MichaelMure/git-bug/identity"
|
||||
"github.com/MichaelMure/git-bug/repository"
|
||||
)
|
||||
|
||||
func TestOperationPackSerialize(t *testing.T) {
|
||||
opp := &OperationPack{}
|
||||
|
||||
repo := repository.NewMockRepo()
|
||||
|
||||
rene, err := identity.NewIdentity(repo, "René Descartes", "rene@descartes.fr")
|
||||
require.NoError(t, err)
|
||||
|
||||
createOp := NewCreateOp(rene, time.Now().Unix(), "title", "message", nil)
|
||||
setTitleOp := NewSetTitleOp(rene, time.Now().Unix(), "title2", "title1")
|
||||
addCommentOp := NewAddCommentOp(rene, time.Now().Unix(), "message2", nil)
|
||||
setStatusOp := NewSetStatusOp(rene, time.Now().Unix(), ClosedStatus)
|
||||
labelChangeOp := NewLabelChangeOperation(rene, time.Now().Unix(), []Label{"added"}, []Label{"removed"})
|
||||
|
||||
opp.Append(createOp)
|
||||
opp.Append(setTitleOp)
|
||||
opp.Append(addCommentOp)
|
||||
opp.Append(setStatusOp)
|
||||
opp.Append(labelChangeOp)
|
||||
|
||||
opMeta := NewSetTitleOp(rene, time.Now().Unix(), "title3", "title2")
|
||||
opMeta.SetMetadata("key", "value")
|
||||
opp.Append(opMeta)
|
||||
|
||||
require.Equal(t, 1, len(opMeta.Metadata))
|
||||
|
||||
opFile := NewAddCommentOp(rene, time.Now().Unix(), "message", []repository.Hash{
|
||||
"abcdef",
|
||||
"ghijkl",
|
||||
})
|
||||
opp.Append(opFile)
|
||||
|
||||
require.Equal(t, 2, len(opFile.Files))
|
||||
|
||||
data, err := json.Marshal(opp)
|
||||
require.NoError(t, err)
|
||||
|
||||
var opp2 *OperationPack
|
||||
err = json.Unmarshal(data, &opp2)
|
||||
require.NoError(t, err)
|
||||
|
||||
ensureIds(opp)
|
||||
ensureAuthors(t, opp, opp2)
|
||||
|
||||
require.Equal(t, opp, opp2)
|
||||
}
|
||||
|
||||
func ensureIds(opp *OperationPack) {
|
||||
for _, op := range opp.Operations {
|
||||
op.Id()
|
||||
}
|
||||
}
|
||||
|
||||
func ensureAuthors(t *testing.T, opp1 *OperationPack, opp2 *OperationPack) {
|
||||
require.Equal(t, len(opp1.Operations), len(opp2.Operations))
|
||||
for i := 0; i < len(opp1.Operations); i++ {
|
||||
op1 := opp1.Operations[i]
|
||||
op2 := opp2.Operations[i]
|
||||
|
||||
// ensure we have equivalent authors (IdentityStub vs Identity) then
|
||||
// enforce equality
|
||||
require.Equal(t, op1.base().Author.Id(), op2.base().Author.Id())
|
||||
op1.base().Author = op2.base().Author
|
||||
}
|
||||
}
|
@ -45,7 +45,7 @@ func TestValidate(t *testing.T) {
|
||||
NewSetStatusOp(makeIdentity(t, "René \nDescartes", "rene@descartes.fr"), unix, ClosedStatus),
|
||||
NewSetStatusOp(makeIdentity(t, "René Descartes", "rene@\ndescartes.fr"), unix, ClosedStatus),
|
||||
&CreateOperation{OpBase: OpBase{
|
||||
Author: rene,
|
||||
Author_: rene,
|
||||
UnixTime: 0,
|
||||
OperationType: CreateOp,
|
||||
},
|
||||
@ -121,7 +121,7 @@ func TestID(t *testing.T) {
|
||||
require.NoError(t, id2.Validate())
|
||||
require.Equal(t, id1, id2)
|
||||
|
||||
b2, err := ReadLocal(repo, b.Id())
|
||||
b2, err := Read(repo, b.Id())
|
||||
require.NoError(t, err)
|
||||
|
||||
op3 := b2.FirstOp()
|
||||
|
@ -7,11 +7,11 @@ func (b BugsByCreationTime) Len() int {
|
||||
}
|
||||
|
||||
func (b BugsByCreationTime) Less(i, j int) bool {
|
||||
if b[i].createTime < b[j].createTime {
|
||||
if b[i].CreateLamportTime() < b[j].CreateLamportTime() {
|
||||
return true
|
||||
}
|
||||
|
||||
if b[i].createTime > b[j].createTime {
|
||||
if b[i].CreateLamportTime() > b[j].CreateLamportTime() {
|
||||
return false
|
||||
}
|
||||
|
||||
@ -35,11 +35,11 @@ func (b BugsByEditTime) Len() int {
|
||||
}
|
||||
|
||||
func (b BugsByEditTime) Less(i, j int) bool {
|
||||
if b[i].editTime < b[j].editTime {
|
||||
if b[i].EditLamportTime() < b[j].EditLamportTime() {
|
||||
return true
|
||||
}
|
||||
|
||||
if b[i].editTime > b[j].editTime {
|
||||
if b[i].EditLamportTime() > b[j].EditLamportTime() {
|
||||
return false
|
||||
}
|
||||
|
||||
|
@ -50,9 +50,3 @@ func (b *WithSnapshot) Commit(repo repository.ClockedRepo) error {
|
||||
b.snap.id = b.Bug.Id()
|
||||
return nil
|
||||
}
|
||||
|
||||
// Merge intercept Bug.Merge() and clear the snapshot
|
||||
func (b *WithSnapshot) Merge(repo repository.Repo, other Interface) (bool, error) {
|
||||
b.snap = nil
|
||||
return b.Bug.Merge(repo, other)
|
||||
}
|
||||
|
4
cache/bug_cache.go
vendored
4
cache/bug_cache.go
vendored
@ -51,9 +51,7 @@ func (c *BugCache) ResolveOperationWithMetadata(key string, value string) (entit
|
||||
// preallocate but empty
|
||||
matching := make([]entity.Id, 0, 5)
|
||||
|
||||
it := bug.NewOperationIterator(c.bug)
|
||||
for it.Next() {
|
||||
op := it.Value()
|
||||
for _, op := range c.bug.Operations() {
|
||||
opValue, ok := op.GetMetadata(key)
|
||||
if ok && value == opValue {
|
||||
matching = append(matching, op.Id())
|
||||
|
2
cache/bug_excerpt.go
vendored
2
cache/bug_excerpt.go
vendored
@ -87,7 +87,7 @@ func NewBugExcerpt(b bug.Interface, snap *bug.Snapshot) *BugExcerpt {
|
||||
}
|
||||
|
||||
switch snap.Author.(type) {
|
||||
case *identity.Identity, *IdentityCache:
|
||||
case *identity.Identity, *identity.IdentityStub, *IdentityCache:
|
||||
e.AuthorId = snap.Author.Id()
|
||||
default:
|
||||
panic("unhandled identity type")
|
||||
|
2
cache/repo_cache.go
vendored
2
cache/repo_cache.go
vendored
@ -195,7 +195,7 @@ func (c *RepoCache) buildCache() error {
|
||||
|
||||
c.bugExcerpts = make(map[entity.Id]*BugExcerpt)
|
||||
|
||||
allBugs := bug.ReadAllLocal(c.repo)
|
||||
allBugs := bug.ReadAll(c.repo)
|
||||
|
||||
// wipe the index just to be sure
|
||||
err := c.repo.ClearBleveIndex("bug")
|
||||
|
2
cache/repo_cache_bug.go
vendored
2
cache/repo_cache_bug.go
vendored
@ -151,7 +151,7 @@ func (c *RepoCache) ResolveBug(id entity.Id) (*BugCache, error) {
|
||||
}
|
||||
c.muBug.RUnlock()
|
||||
|
||||
b, err := bug.ReadLocalWithResolver(c.repo, newIdentityCacheResolver(c), id)
|
||||
b, err := bug.ReadWithResolver(c.repo, newIdentityCacheResolver(c), id)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
15
cache/repo_cache_common.go
vendored
15
cache/repo_cache_common.go
vendored
@ -95,6 +95,12 @@ func (c *RepoCache) MergeAll(remote string) <-chan entity.MergeResult {
|
||||
go func() {
|
||||
defer close(out)
|
||||
|
||||
author, err := c.GetUserIdentity()
|
||||
if err != nil {
|
||||
out <- entity.NewMergeError(err, "")
|
||||
return
|
||||
}
|
||||
|
||||
results := identity.MergeAll(c.repo, remote)
|
||||
for result := range results {
|
||||
out <- result
|
||||
@ -112,7 +118,7 @@ func (c *RepoCache) MergeAll(remote string) <-chan entity.MergeResult {
|
||||
}
|
||||
}
|
||||
|
||||
results = bug.MergeAll(c.repo, remote)
|
||||
results = bug.MergeAll(c.repo, remote, author)
|
||||
for result := range results {
|
||||
out <- result
|
||||
|
||||
@ -130,11 +136,10 @@ func (c *RepoCache) MergeAll(remote string) <-chan entity.MergeResult {
|
||||
}
|
||||
}
|
||||
|
||||
err := c.write()
|
||||
|
||||
// No easy way out here ..
|
||||
err = c.write()
|
||||
if err != nil {
|
||||
panic(err)
|
||||
out <- entity.NewMergeError(err, "")
|
||||
return
|
||||
}
|
||||
}()
|
||||
|
||||
|
4
cache/repo_cache_test.go
vendored
4
cache/repo_cache_test.go
vendored
@ -123,6 +123,10 @@ func TestPushPull(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
err = cacheA.SetUserIdentity(reneA)
|
||||
require.NoError(t, err)
|
||||
isaacB, err := cacheB.NewIdentity("Isaac Newton", "isaac@newton.uk")
|
||||
require.NoError(t, err)
|
||||
err = cacheB.SetUserIdentity(isaacB)
|
||||
require.NoError(t, err)
|
||||
|
||||
// distribute the identity
|
||||
_, err = cacheA.Push("origin")
|
||||
|
@ -1,9 +0,0 @@
|
||||
- is the pack Lamport clock really useful vs only topological sort?
|
||||
- topological order is enforced on the clocks, so what's the point?
|
||||
- is EditTime equivalent to PackTime? no, avoid the gaps. Is it better?
|
||||
--> PackTime is contained within a bug and might avoid extreme reordering?
|
||||
- how to do commit signature?
|
||||
- how to avoid id collision between Operations?
|
||||
- write tests for actions
|
||||
- migrate Bug to the new structure
|
||||
- migrate Identity to the new structure?
|
@ -42,13 +42,15 @@ func (mr MergeResult) String() string {
|
||||
case MergeStatusNothing:
|
||||
return "nothing to do"
|
||||
case MergeStatusError:
|
||||
return fmt.Sprintf("merge error on %s: %s", mr.Id, mr.Err.Error())
|
||||
if mr.Id != "" {
|
||||
return fmt.Sprintf("merge error on %s: %s", mr.Id, mr.Err.Error())
|
||||
}
|
||||
return fmt.Sprintf("merge error: %s", mr.Err.Error())
|
||||
default:
|
||||
panic("unknown merge status")
|
||||
}
|
||||
}
|
||||
|
||||
// TODO: Interface --> *Entity ?
|
||||
func NewMergeNewStatus(id Id, entity Interface) MergeResult {
|
||||
return MergeResult{
|
||||
Id: id,
|
||||
|
1
go.sum
1
go.sum
@ -575,6 +575,7 @@ golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY=
|
||||
golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
|
||||
golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
|
||||
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.3.0 h1:RM4zey1++hCTbCVQfnWeKs9/IEsaBLA8vTkd0WVtmH4=
|
||||
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
|
@ -111,52 +111,6 @@ func generateRandomBugsWithSeed(opts Options, seed int64) []*bug.Bug {
|
||||
return result
|
||||
}
|
||||
|
||||
func GenerateRandomOperationPacks(packNumber int, opNumber int) []*bug.OperationPack {
|
||||
return GenerateRandomOperationPacksWithSeed(packNumber, opNumber, time.Now().UnixNano())
|
||||
}
|
||||
|
||||
func GenerateRandomOperationPacksWithSeed(packNumber int, opNumber int, seed int64) []*bug.OperationPack {
|
||||
// Note: this is a bit crude, only generate a Create + Comments
|
||||
|
||||
panic("this piece of code needs to be updated to make sure that the identities " +
|
||||
"are properly commit before usage. That is, generateRandomPersons() need to be called.")
|
||||
|
||||
rand.Seed(seed)
|
||||
fake.Seed(seed)
|
||||
|
||||
result := make([]*bug.OperationPack, packNumber)
|
||||
|
||||
for i := 0; i < packNumber; i++ {
|
||||
opp := &bug.OperationPack{}
|
||||
|
||||
var op bug.Operation
|
||||
|
||||
op = bug.NewCreateOp(
|
||||
randomPerson(),
|
||||
time.Now().Unix(),
|
||||
fake.Sentence(),
|
||||
paragraphs(),
|
||||
nil,
|
||||
)
|
||||
|
||||
opp.Append(op)
|
||||
|
||||
for j := 0; j < opNumber-1; j++ {
|
||||
op = bug.NewAddCommentOp(
|
||||
randomPerson(),
|
||||
time.Now().Unix(),
|
||||
paragraphs(),
|
||||
nil,
|
||||
)
|
||||
opp.Append(op)
|
||||
}
|
||||
|
||||
result[i] = opp
|
||||
}
|
||||
|
||||
return result
|
||||
}
|
||||
|
||||
func person(repo repository.RepoClock) (*identity.Identity, error) {
|
||||
return identity.NewIdentity(repo, fake.FullName(), fake.EmailAddress())
|
||||
}
|
||||
|
@ -14,7 +14,7 @@ func TestReadBugs(t *testing.T) {
|
||||
|
||||
random_bugs.FillRepoWithSeed(repo, 15, 42)
|
||||
|
||||
bugs := bug.ReadAllLocal(repo)
|
||||
bugs := bug.ReadAll(repo)
|
||||
for b := range bugs {
|
||||
if b.Err != nil {
|
||||
t.Fatal(b.Err)
|
||||
@ -30,7 +30,7 @@ func benchmarkReadBugs(bugNumber int, t *testing.B) {
|
||||
t.ResetTimer()
|
||||
|
||||
for n := 0; n < t.N; n++ {
|
||||
bugs := bug.ReadAllLocal(repo)
|
||||
bugs := bug.ReadAll(repo)
|
||||
for b := range bugs {
|
||||
if b.Err != nil {
|
||||
t.Fatal(b.Err)
|
||||
|
Loading…
Reference in New Issue
Block a user