Skip to content

Commit

Permalink
Change --imports-per-dev to return imports through time
Browse files Browse the repository at this point in the history
Signed-off-by: Vadim Markovtsev <vadim@athenian.co>
  • Loading branch information
vmarkovtsev committed Dec 24, 2019
1 parent 1fca413 commit 8c012d1
Show file tree
Hide file tree
Showing 9 changed files with 459 additions and 234 deletions.
299 changes: 175 additions & 124 deletions internal/pb/pb.pb.go

Large diffs are not rendered by default.

8 changes: 7 additions & 1 deletion internal/pb/pb.proto
Original file line number Diff line number Diff line change
Expand Up @@ -182,8 +182,12 @@ message TyposDataset {
repeated Typo typos = 1;
}

message ImportsPerTick {
map<int32, int64> counts = 1;
}

message ImportsPerLanguage {
map<string, int64> counts = 1;
map<string, ImportsPerTick> ticks = 1;
}

message ImportsPerDeveloper {
Expand All @@ -193,6 +197,8 @@ message ImportsPerDeveloper {
message ImportsPerDeveloperResults {
repeated ImportsPerDeveloper imports = 1;
repeated string author_index = 2;
// how long each tick is, as an int64 nanosecond count (Go's time.Duration)
int64 tick_size = 3;
}

message AnalysisResults {
Expand Down
23 changes: 12 additions & 11 deletions leaves/burndown.go
Original file line number Diff line number Diff line change
Expand Up @@ -47,6 +47,9 @@ type BurndownAnalysis struct {
// PeopleNumber is the number of developers for which to collect the burndown stats. 0 disables it.
PeopleNumber int

// TickSize indicates the size of each time granule: day, hour, week, etc.
TickSize time.Duration

// HibernationThreshold sets the hibernation threshold for the underlying
// RBTree allocator. It is useful to trade CPU time for reduced peak memory consumption
// if there are many branches.
Expand Down Expand Up @@ -103,8 +106,6 @@ type BurndownAnalysis struct {
// previousTick is the tick from the previous sample period -
// different from TicksSinceStart.previousTick.
previousTick int
// tickSize indicates the size of each tick.
tickSize time.Duration
// references IdentityDetector.ReversedPeopleDict
reversedPeopleDict []string

Expand Down Expand Up @@ -140,7 +141,7 @@ type BurndownResult struct {
// Pipeline.Initialize(facts map[string]interface{}). Thus it can be obtained via
// facts[FactIdentityDetectorReversedPeopleDict].
reversedPeopleDict []string
// tickSize references TicksSinceStart.tickSize
// TickSize references TicksSinceStart.TickSize
tickSize time.Duration
// sampling and granularity are copied from BurndownAnalysis and stored for service purposes
// such as merging several results together.
Expand Down Expand Up @@ -294,7 +295,7 @@ func (analyser *BurndownAnalysis) Configure(facts map[string]interface{}) error
analyser.Debug = val
}
if val, exists := facts[items.FactTickSize].(time.Duration); exists {
analyser.tickSize = val
analyser.TickSize = val
}
return nil
}
Expand Down Expand Up @@ -329,10 +330,10 @@ func (analyser *BurndownAnalysis) Initialize(repository *git.Repository) error {
analyser.Granularity)
analyser.Sampling = analyser.Granularity
}
if analyser.tickSize == 0 {
if analyser.TickSize == 0 {
def := items.DefaultTicksSinceStartTickSize * time.Hour
analyser.l.Warnf("tick size was not set, adjusted to %v\n", def)
analyser.tickSize = items.DefaultTicksSinceStartTickSize * time.Hour
analyser.TickSize = items.DefaultTicksSinceStartTickSize * time.Hour
}
analyser.repository = repository
analyser.globalHistory = sparseHistory{}
Expand Down Expand Up @@ -570,7 +571,7 @@ func (analyser *BurndownAnalysis) Finalize() interface{} {
FileOwnership: fileOwnership,
PeopleHistories: peopleHistories,
PeopleMatrix: peopleMatrix,
tickSize: analyser.tickSize,
tickSize: analyser.TickSize,
reversedPeopleDict: analyser.reversedPeopleDict,
sampling: analyser.Sampling,
granularity: analyser.Granularity,
Expand Down Expand Up @@ -653,12 +654,12 @@ func (analyser *BurndownAnalysis) MergeResults(
bar1.tickSize, bar2.tickSize)
}
// for backwards-compatibility, if no tick size is present set to default
analyser.tickSize = bar1.tickSize
if analyser.tickSize == 0 {
analyser.tickSize = items.DefaultTicksSinceStartTickSize * time.Hour
analyser.TickSize = bar1.tickSize
if analyser.TickSize == 0 {
analyser.TickSize = items.DefaultTicksSinceStartTickSize * time.Hour
}
merged := BurndownResult{
tickSize: analyser.tickSize,
tickSize: analyser.TickSize,
}
if bar1.sampling < bar2.sampling {
merged.sampling = bar1.sampling
Expand Down
18 changes: 9 additions & 9 deletions leaves/burndown_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -87,7 +87,7 @@ func TestBurndownConfigure(t *testing.T) {
assert.True(t, bd.HibernationToDisk)
assert.Equal(t, bd.HibernationDirectory, "xxx")
assert.Equal(t, bd.Debug, true)
assert.Equal(t, bd.tickSize, 24*time.Hour)
assert.Equal(t, bd.TickSize, 24*time.Hour)
assert.Equal(t, bd.reversedPeopleDict, bd.Requires())
facts[ConfigBurndownTrackPeople] = false
facts[identity.FactIdentityDetectorPeopleCount] = 50
Expand Down Expand Up @@ -480,7 +480,7 @@ func bakeBurndownForSerialization(t *testing.T, firstAuthor, secondAuthor int) (
Sampling: 30,
PeopleNumber: 2,
TrackFiles: true,
tickSize: 24 * time.Hour,
TickSize: 24 * time.Hour,
}
assert.Nil(t, bd.Initialize(test.Repository))
deps := map[string]interface{}{}
Expand Down Expand Up @@ -1153,7 +1153,7 @@ func TestBurndownMergeGlobalHistory(t *testing.T) {
res2.PeopleMatrix[1][2] = 700
res2.PeopleMatrix[1][3] = 800
bd := BurndownAnalysis{
tickSize: 24 * time.Hour,
TickSize: 24 * time.Hour,
}
merged := bd.MergeResults(res1, res2, &c1, &c2).(BurndownResult)
assert.Equal(t, merged.granularity, 19)
Expand Down Expand Up @@ -1212,7 +1212,7 @@ func TestBurndownMergeGlobalHistory_withDifferentTickSizes(t *testing.T) {
RunTime: 100000,
}
bd := BurndownAnalysis{
tickSize: 24 * time.Hour,
TickSize: 24 * time.Hour,
}
merged := bd.MergeResults(res1, res2, &c1, &c2)
assert.IsType(t, errors.New(""), merged)
Expand Down Expand Up @@ -1253,7 +1253,7 @@ func TestBurndownMergeNils(t *testing.T) {
RunTime: 100000,
}
bd := BurndownAnalysis{
tickSize: 24 * time.Hour,
TickSize: 24 * time.Hour,
}
merged := bd.MergeResults(res1, res2, &c1, &c2).(BurndownResult)
assert.Equal(t, merged.granularity, 19)
Expand Down Expand Up @@ -1499,14 +1499,14 @@ func TestBurndownMergeMatrices(t *testing.T) {
CommitsNumber: 6982,
RunTime: 1567214,
}
bd := BurndownAnalysis{tickSize: 24 * time.Hour}
nh := bd.mergeMatrices(h, nil, 30, 30, 30, 30, bd.tickSize, cr, cr)
bd := BurndownAnalysis{TickSize: 24 * time.Hour}
nh := bd.mergeMatrices(h, nil, 30, 30, 30, 30, bd.TickSize, cr, cr)
for y, row := range nh {
for x, v := range row {
assert.InDelta(t, v, h[y][x], 1, fmt.Sprintf("y=%d x=%d", y, x))
}
}
nh = bd.mergeMatrices(h, h, 30, 30, 30, 30, bd.tickSize, cr, cr)
nh = bd.mergeMatrices(h, h, 30, 30, 30, 30, bd.TickSize, cr, cr)
for y, row := range nh {
for x, v := range row {
assert.InDelta(t, v, h[y][x]*2, 1, fmt.Sprintf("y=%d x=%d", y, x))
Expand Down Expand Up @@ -1559,7 +1559,7 @@ func TestBurndownMergePeopleHistories(t *testing.T) {
RunTime: 100000,
}
bd := BurndownAnalysis{
tickSize: 24 * time.Hour,
TickSize: 24 * time.Hour,
}
merged := bd.MergeResults(res1, res2, &c1, &c2).(BurndownResult)
mh := [][]int64{
Expand Down
4 changes: 2 additions & 2 deletions leaves/devs.go
Original file line number Diff line number Diff line change
Expand Up @@ -33,7 +33,7 @@ type DevsAnalysis struct {
ticks map[int]map[int]*DevTick
// reversedPeopleDict references IdentityDetector.ReversedPeopleDict
reversedPeopleDict []string
// tickSize references TicksSinceStart.tickSize
// TickSize references TicksSinceStart.TickSize
tickSize time.Duration

l core.Logger
Expand All @@ -47,7 +47,7 @@ type DevsResult struct {

// reversedPeopleDict references IdentityDetector.ReversedPeopleDict
reversedPeopleDict []string
// tickSize references TicksSinceStart.tickSize
// TickSize references TicksSinceStart.TickSize
tickSize time.Duration
}

Expand Down
Loading

0 comments on commit 8c012d1

Please sign in to comment.