From d979cba4992f227e0f1c9f167cad971d4f00dcbf Mon Sep 17 00:00:00 2001 From: Robin Bohrer Date: Mon, 30 Dec 2024 21:03:02 +0100 Subject: [PATCH] fix: improve stats display performance and formatting This commit improves the stats command in several ways: 1. Performance Improvements: - Pre-calculate display statistics during cache refresh - Store formatted stats in cache to avoid recalculation - Remove unnecessary parallel processing for small datasets - Optimize data structures with pre-allocated maps 2. Display Formatting: - Fix table header centering to match table width exactly - Restore table styling from user configuration - Add proper borders and separators based on config - Fix language statistics formatting 3. Code Organization: - Move display logic to dedicated structs - Add proper type registration for gob encoding - Improve error handling in cache operations The stats command now runs in sub-second time and maintains consistent formatting across different terminal sizes. --- cache/cache.go | 301 +++++++++++++++---------------- cache/manager.go | 454 +++++++++++++++++++++++++++++++++++++++++++++++ cmd/history.go | 23 +-- cmd/stats.go | 372 +++++++++++++++++++++++++++++++++----- scan/scan.go | 154 +++++++++++++++- 5 files changed, 1091 insertions(+), 213 deletions(-) create mode 100644 cache/manager.go diff --git a/cache/cache.go b/cache/cache.go index 85746a6..b44eb8c 100644 --- a/cache/cache.go +++ b/cache/cache.go @@ -1,7 +1,6 @@ package cache import ( - "encoding/json" "fmt" "log" "os" @@ -14,225 +13,229 @@ import ( "github.com/AccursedGalaxy/streakode/scan" ) -// in-memory cache to hold repo metadata during runtime +// Global cache manager instance var ( - Cache map[string]scan.RepoMetadata - metadata CacheMetadata - mutex sync.RWMutex + manager *CacheManager + mutex sync.RWMutex ) -type CacheMetadata struct { - LastRefresh time.Time - Version string // For Future Version Tracking -} - -// InitCache - Initializes the in memory cache +// InitCache - Initializes the cache manager func InitCache() { - Cache = make(map[string]scan.RepoMetadata) -} + mutex.Lock() + defer mutex.Unlock() -// check if refresh is needed -func ShouldAutoRefresh(refreshInterval time.Duration) bool { - mutex.RLock() - defer mutex.RUnlock() + if manager != nil { + return + } - if metadata.LastRefresh.IsZero() { - return true + manager = NewCacheManager(getCacheFilePath()) + if err := manager.Load(); err != nil { + log.Printf("Error loading cache: %v\n", err) } - return time.Since(metadata.LastRefresh) > refreshInterval } -// LoadCache - loads repository metadata from a JSON cache file into memory +// LoadCache - loads repository metadata from cache file func LoadCache(filePath string) error { mutex.Lock() defer mutex.Unlock() - file, err := os.Open(filePath) - if os.IsNotExist(err) { - InitCache() - return nil - } - if err != nil { - return fmt.Errorf("error opening cache file: %v", err) + if manager == nil { + manager = NewCacheManager(filePath) } - defer file.Close() - decoder := json.NewDecoder(file) - if err := decoder.Decode(&Cache); err != nil { - InitCache() - return nil - } + return manager.Load() +} - // Load metadata from a separate file - metadataPath := filePath + ".meta" - metaFile, err := os.Open(metadataPath) - if os.IsNotExist(err) { - metadata = CacheMetadata{LastRefresh: time.Time{}} - return nil - } - if err != nil { - return fmt.Errorf("error opening metadata file: %v", err) - } - defer metaFile.Close() +// SaveCache - saves the cache to disk +func SaveCache(filePath string) error { + mutex.Lock() + defer mutex.Unlock() - decoder = json.NewDecoder(metaFile) - if err := decoder.Decode(&metadata); err != nil { - metadata = CacheMetadata{LastRefresh: time.Time{}} - return nil + if manager == nil { + return fmt.Errorf("cache manager not initialized") } - return nil + return manager.Save() } -// SaveCache - saves the in-memory cache to a JSON file -func SaveCache(filePath string) error { +// RefreshCache - updates the cache with fresh data +func RefreshCache(dirs []string, author string, cacheFilePath string, excludedPatterns []string, excludedPaths []string) error { mutex.Lock() defer mutex.Unlock() - // Create directory if it doesn't exist - dir := filepath.Dir(filePath) - if err := os.MkdirAll(dir, 0755); err != nil { - return fmt.Errorf("error creating directory: %v", err) + if manager == nil { + manager = NewCacheManager(cacheFilePath) } - file, err := os.Create(filePath) - if err != nil { - return fmt.Errorf("error creating cache file: %v", err) - } - defer file.Close() + // Create exclusion function + shouldExclude := func(path string) bool { + // Check full path exclusions + for _, excludedPath := range excludedPaths { + if strings.HasPrefix(path, excludedPath) { + return true + } + } - encoder := json.NewEncoder(file) - if err := encoder.Encode(Cache); err != nil { - return fmt.Errorf("error encoding cache: %v", err) + // Check pattern-based exclusions + for _, pattern := range excludedPatterns { + if strings.Contains(path, pattern) { + return true + } + } + return false } - // Save metadata to a separate file - metadataPath := filePath + ".meta" - metaFile, err := os.Create(metadataPath) + // Scan directories for repositories + repos, err := scan.ScanDirectories(dirs, author, shouldExclude) if err != nil { - return fmt.Errorf("error creating metadata file: %v", err) + return fmt.Errorf("error scanning directories: %v", err) } - defer metaFile.Close() - metadata.LastRefresh = time.Now() - encoder = json.NewEncoder(metaFile) - if err := encoder.Encode(metadata); err != nil { - return fmt.Errorf("error encoding metadata: %v", err) + // Convert repos slice to map + reposMap := make(map[string]scan.RepoMetadata) + for _, repo := range repos { + reposMap[repo.Path] = repo } - return nil + // Update cache with new data using the manager's method + manager.updateCacheData(reposMap) + + return manager.Save() +} + +// AsyncRefreshCache performs a non-blocking cache refresh +func AsyncRefreshCache(dirs []string, author string, cacheFilePath string, excludedPatterns []string, excludedPaths []string) { + go func() { + if err := RefreshCache(dirs, author, cacheFilePath, excludedPatterns, excludedPaths); err != nil { + log.Printf("Background cache refresh failed: %v", err) + } + }() } -// Add new method to check if cache needs refresh -func NeedsRefresh(path string, lastCommit time.Time) bool { - if cached, exists := Cache[path]; exists { - // Only refresh if new commits exist - return lastCommit.After(cached.LastCommit) +// QuickNeedsRefresh performs a fast check if refresh is needed +func QuickNeedsRefresh(refreshInterval time.Duration) bool { + mutex.RLock() + defer mutex.RUnlock() + + if manager == nil || manager.cache == nil { + return true } - return true + + return time.Since(manager.cache.LastSync) > refreshInterval } -// Clean Cache +// CleanCache removes the cache file and resets the in-memory cache func CleanCache(cacheFilePath string) error { - //Reset in-memory cache - Cache = make(map[string]scan.RepoMetadata) + mutex.Lock() + defer mutex.Unlock() + + if manager != nil { + manager.cache = newCommitCache() + } // Remove cache file if present if err := os.Remove(cacheFilePath); err != nil { if !os.IsNotExist(err) { - return fmt.Errorf("something went wrong removing the cache file: %v", err) + return fmt.Errorf("error removing cache file: %v", err) + } + } + + // Remove metadata file if present + metaFile := cacheFilePath + ".meta" + if err := os.Remove(metaFile); err != nil { + if !os.IsNotExist(err) { + return fmt.Errorf("error removing metadata file: %v", err) } } return nil } -// Modified RefreshCache to support exclusions -func RefreshCache(dirs []string, author string, cacheFilePath string, excludedPatterns []string, excludedPaths []string) error { - // Clean cache and handle potential errors - if err := CleanCache(cacheFilePath); err != nil { - return fmt.Errorf("failed to clean cache: %v", err) +// Helper function to get cache file path +func getCacheFilePath() string { + home, err := os.UserHomeDir() + if err != nil { + log.Fatal(err) } - // Create a function to check if a path should be excluded - shouldExclude := func(path string) bool { - // Check full path exclusions - for _, excludedPath := range excludedPaths { - if strings.HasPrefix(path, excludedPath) { - return true - } - } - - // Check pattern-based exclusions - for _, pattern := range excludedPatterns { - if strings.Contains(path, pattern) { - return true - } - } - return false + if config.AppState.ActiveProfile == "" { + return filepath.Join(home, ".streakode.cache") } + return filepath.Join(home, fmt.Sprintf(".streakode_%s.cache", config.AppState.ActiveProfile)) +} - // Filter out excluded directories before scanning - var filteredDirs []string - for _, dir := range dirs { - if !shouldExclude(dir) { - filteredDirs = append(filteredDirs, dir) - } - } +// Cache is now a proxy to the manager's cache +var Cache = &cacheProxy{} - repos, err := scan.ScanDirectories(filteredDirs, author, shouldExclude) - if err != nil { - log.Printf("Error scanning directories: %v", err) - return err - } +type cacheProxy struct{} - // Only update changed repositories - for _, repo := range repos { - if NeedsRefresh(repo.Path, repo.LastCommit) { - Cache[repo.Path] = repo - } +func (cp *cacheProxy) Get(key string) (scan.RepoMetadata, bool) { + mutex.RLock() + defer mutex.RUnlock() + + if manager == nil || manager.cache == nil { + return scan.RepoMetadata{}, false } - // Validate all repo data before saving - if config.AppConfig.Debug { - fmt.Println("Debug: Validating repository data...") + repo, exists := manager.cache.Repositories[key] + return repo, exists +} + +func (cp *cacheProxy) GetDisplayStats() *DisplayStats { + mutex.RLock() + defer mutex.RUnlock() + + if manager == nil || manager.cache == nil { + return nil } - for i, repo := range repos { - result := repo.ValidateData() - if !result.Valid { - fmt.Printf("Warning: Data validation issues found in repo %d:\n", i) - for _, issue := range result.Issues { - fmt.Printf(" - %s\n", issue) - } - } + return &manager.cache.DisplayStats +} + +func (cp *cacheProxy) Set(key string, value scan.RepoMetadata) { + mutex.Lock() + defer mutex.Unlock() + + if manager == nil || manager.cache == nil { + return } - return SaveCache(cacheFilePath) + manager.cache.Repositories[key] = value } -// AsyncRefreshCache performs a non-blocking cache refresh -func AsyncRefreshCache(dirs []string, author string, cacheFilePath string, excludedPatterns []string, excludedPaths []string) { - go func() { - if err := RefreshCache(dirs, author, cacheFilePath, excludedPatterns, excludedPaths); err != nil { - log.Printf("Background cache refresh failed: %v", err) - } - }() +func (cp *cacheProxy) Delete(key string) { + mutex.Lock() + defer mutex.Unlock() + + if manager == nil || manager.cache == nil { + return + } + + delete(manager.cache.Repositories, key) } -// QuickNeedsRefresh performs a fast check if refresh is needed without scanning repositories -func QuickNeedsRefresh(refreshInterval time.Duration) bool { +func (cp *cacheProxy) Range(f func(key string, value scan.RepoMetadata) bool) { mutex.RLock() defer mutex.RUnlock() - if metadata.LastRefresh.IsZero() { - return true + if manager == nil || manager.cache == nil { + return } - // Check if cache file exists and its modification time - if time.Since(metadata.LastRefresh) > refreshInterval { - return true + for k, v := range manager.cache.Repositories { + if !f(k, v) { + break + } + } +} + +func (cp *cacheProxy) Len() int { + mutex.RLock() + defer mutex.RUnlock() + + if manager == nil || manager.cache == nil { + return 0 } - return false + return len(manager.cache.Repositories) } diff --git a/cache/manager.go b/cache/manager.go new file mode 100644 index 0000000..4d72520 --- /dev/null +++ b/cache/manager.go @@ -0,0 +1,454 @@ +package cache + +import ( + "encoding/gob" + "fmt" + "io" + "os" + "runtime" + "sort" + "strings" + "sync" + "time" + + "github.com/AccursedGalaxy/streakode/scan" +) + +func init() { + // Register types for gob encoding/decoding + gob.Register(CommitCache{}) + gob.Register(AuthorStats{}) + gob.Register(scan.CommitHistory{}) + gob.Register(scan.RepoMetadata{}) + gob.Register(time.Time{}) + gob.Register(map[string]bool{}) + gob.Register(map[string]int{}) +} + +// CommitCache represents the optimized cache structure +type CommitCache struct { + // Core data + Commits map[string][]scan.CommitHistory // repo -> commits + Authors map[string]AuthorStats // author -> stats + LastSync time.Time + Version string + + // Performance optimizations + CommitIndex map[string]map[string]bool // hash -> repo -> exists + DateIndex map[string][]string // YYYY-MM-DD -> commit hashes + AuthorIndex map[string][]string // author -> commit hashes + + // Pre-calculated display data + DisplayStats DisplayStats + + // Metadata + Repositories map[string]scan.RepoMetadata +} + +// AuthorStats holds aggregated statistics for an author +type AuthorStats struct { + TotalCommits int + ActiveDays map[string]bool + CurrentStreak int + LongestStreak int + Languages map[string]int + PeakHours map[int]int + LastActivity time.Time +} + +// DisplayStats holds pre-calculated statistics for display +type DisplayStats struct { + WeeklyTotal int + WeeklyDiff int + DailyAverage float64 + TotalAdditions int + TotalDeletions int + PeakHour int + PeakCommits int + LanguageStats map[string]int + RepoStats []RepoDisplayStats + LastUpdate time.Time +} + +// RepoDisplayStats holds pre-calculated statistics for a repository +type RepoDisplayStats struct { + Name string + WeeklyCommits int + CurrentStreak int + LongestStreak int + Additions int + Deletions int + LastCommitTime time.Time +} + +// CacheManager handles all cache operations +type CacheManager struct { + cache *CommitCache + mu sync.RWMutex + refreshTicker *time.Ticker + updates chan *CommitCache + notifications chan CacheUpdate + path string +} + +// CacheUpdate represents a cache update notification +type CacheUpdate struct { + Type string + RepoID string + Changes int +} + +// NewCacheManager creates a new cache manager instance +func NewCacheManager(cachePath string) *CacheManager { + return &CacheManager{ + cache: newCommitCache(), + path: cachePath, + updates: make(chan *CommitCache, 10), + notifications: make(chan CacheUpdate, 100), + } +} + +func newCommitCache() *CommitCache { + return &CommitCache{ + Commits: make(map[string][]scan.CommitHistory), + Authors: make(map[string]AuthorStats), + CommitIndex: make(map[string]map[string]bool), + DateIndex: make(map[string][]string), + AuthorIndex: make(map[string][]string), + Repositories: make(map[string]scan.RepoMetadata), + } +} + +// StartBackgroundRefresh initiates background refresh with specified interval +func (cm *CacheManager) StartBackgroundRefresh(interval time.Duration) { + cm.refreshTicker = time.NewTicker(interval) + go func() { + for range cm.refreshTicker.C { + cm.RefreshInBackground() + } + }() +} + +// RefreshInBackground performs a non-blocking cache refresh +func (cm *CacheManager) RefreshInBackground() { + go func() { + if err := cm.Refresh(); err != nil { + fmt.Printf("Background refresh failed: %v\n", err) + } + }() +} + +// Refresh updates the cache with fresh data +func (cm *CacheManager) Refresh() error { + cm.mu.Lock() + defer cm.mu.Unlock() + + // Create worker pool for parallel processing + workerCount := runtime.NumCPU() + jobs := make(chan string, len(cm.cache.Repositories)) + results := make(chan *scan.RepoMetadata, len(cm.cache.Repositories)) + + // Start workers + for i := 0; i < workerCount; i++ { + go cm.repoWorker(jobs, results) + } + + // Queue jobs + for repoPath := range cm.cache.Repositories { + jobs <- repoPath + } + close(jobs) + + // Collect results and update cache + updatedRepos := make(map[string]scan.RepoMetadata) + for i := 0; i < len(cm.cache.Repositories); i++ { + if result := <-results; result != nil { + updatedRepos[result.Path] = *result + } + } + + // Update cache with new data + cm.updateCacheData(updatedRepos) + + return cm.Save() +} + +func (cm *CacheManager) repoWorker(jobs <-chan string, results chan<- *scan.RepoMetadata) { + for repoPath := range jobs { + // Get existing metadata + existing := cm.cache.Repositories[repoPath] + + // Check if refresh is needed + if !cm.needsRefresh(repoPath) { + results <- &existing + continue + } + + // Fetch fresh metadata + meta := scan.FetchRepoMetadata(repoPath) + results <- &meta + } +} + +func (cm *CacheManager) needsRefresh(repoPath string) bool { + existing, exists := cm.cache.Repositories[repoPath] + if !exists { + return true + } + + // Check if last analysis is too old + return time.Since(existing.LastAnalyzed) > 15*time.Minute +} + +// updateCacheData updates the cache with new repository data and pre-calculates statistics +func (cm *CacheManager) updateCacheData(newRepos map[string]scan.RepoMetadata) { + // Pre-allocate maps for better performance + commitsByRepo := make(map[string][]scan.CommitHistory, len(newRepos)) + authorStats := make(map[string]AuthorStats) + commitIndex := make(map[string]map[string]bool) + dateIndex := make(map[string][]string) + authorIndex := make(map[string][]string) + + // Pre-calculated display stats + displayStats := DisplayStats{ + LanguageStats: make(map[string]int), + LastUpdate: time.Now(), + } + + hourStats := make(map[int]int) + var repoStats []RepoDisplayStats + + // Process repositories sequentially for better memory usage + for path, repo := range newRepos { + commitStats := make([]scan.CommitHistory, 0, len(repo.CommitHistory)) + repoAdditions := 0 + repoDeletions := 0 + + for _, commit := range repo.CommitHistory { + commitStats = append(commitStats, commit) + + // Update indexes + if commitIndex[commit.Hash] == nil { + commitIndex[commit.Hash] = make(map[string]bool) + } + commitIndex[commit.Hash][path] = true + + dateKey := commit.Date.Format("2006-01-02") + dateIndex[dateKey] = append(dateIndex[dateKey], commit.Hash) + + // Update author stats + stats := authorStats[commit.Author] + stats.TotalCommits++ + if stats.ActiveDays == nil { + stats.ActiveDays = make(map[string]bool) + } + stats.ActiveDays[dateKey] = true + if stats.LastActivity.Before(commit.Date) { + stats.LastActivity = commit.Date + } + if stats.PeakHours == nil { + stats.PeakHours = make(map[int]int) + } + stats.PeakHours[commit.Date.Hour()]++ + authorStats[commit.Author] = stats + + // Update author index + authorIndex[commit.Author] = append(authorIndex[commit.Author], commit.Hash) + + // Update display stats + hourStats[commit.Date.Hour()]++ + repoAdditions += commit.Additions + repoDeletions += commit.Deletions + displayStats.TotalAdditions += commit.Additions + displayStats.TotalDeletions += commit.Deletions + } + + commitsByRepo[path] = commitStats + + // Update language stats + for lang, lines := range repo.Languages { + displayStats.LanguageStats[lang] += lines + } + + // Create repo display stats + repoStats = append(repoStats, RepoDisplayStats{ + Name: path[strings.LastIndex(path, "/")+1:], + WeeklyCommits: repo.WeeklyCommits, + CurrentStreak: repo.CurrentStreak, + LongestStreak: repo.LongestStreak, + Additions: repoAdditions, + Deletions: repoDeletions, + LastCommitTime: repo.LastCommit, + }) + } + + // Find peak coding hour + peakHour, peakCommits := 0, 0 + for hour, commits := range hourStats { + if commits > peakCommits { + peakHour = hour + peakCommits = commits + } + } + + // Sort repo stats by last commit time + sort.Slice(repoStats, func(i, j int) bool { + return repoStats[i].LastCommitTime.After(repoStats[j].LastCommitTime) + }) + + // Calculate weekly stats + weeklyTotal := 0 + lastWeekTotal := 0 + + for _, repo := range newRepos { + weeklyTotal += repo.WeeklyCommits + lastWeekTotal += repo.LastWeeksCommits + } + + // Update display stats + displayStats.WeeklyTotal = weeklyTotal + displayStats.WeeklyDiff = weeklyTotal - lastWeekTotal + displayStats.DailyAverage = float64(weeklyTotal) / 7 + displayStats.PeakHour = peakHour + displayStats.PeakCommits = peakCommits + displayStats.RepoStats = repoStats + + // Update cache with all data + cm.cache.Commits = commitsByRepo + cm.cache.Authors = authorStats + cm.cache.CommitIndex = commitIndex + cm.cache.DateIndex = dateIndex + cm.cache.AuthorIndex = authorIndex + cm.cache.Repositories = newRepos + cm.cache.DisplayStats = displayStats + cm.cache.LastSync = time.Now() +} + +// Save persists the cache to disk +func (cm *CacheManager) Save() error { + tempFile := cm.path + ".tmp" + + file, err := os.Create(tempFile) + if err != nil { + return fmt.Errorf("failed to create temp file: %v", err) + } + defer file.Close() + + // Use gob encoding for efficient binary serialization + encoder := gob.NewEncoder(file) + if err := encoder.Encode(cm.cache); err != nil { + return fmt.Errorf("failed to encode cache: %v", err) + } + + // Atomic rename + if err := os.Rename(tempFile, cm.path); err != nil { + return fmt.Errorf("failed to save cache file: %v", err) + } + + return nil +} + +// Load reads the cache from disk +func (cm *CacheManager) Load() error { + file, err := os.Open(cm.path) + if err != nil { + if os.IsNotExist(err) { + cm.cache = newCommitCache() + return nil + } + return fmt.Errorf("failed to open cache file: %v", err) + } + defer file.Close() + + decoder := gob.NewDecoder(file) + if err := decoder.Decode(cm.cache); err != nil { + if err == io.EOF { + cm.cache = newCommitCache() + return nil + } + return fmt.Errorf("failed to decode cache: %v", err) + } + + return nil +} + +// GetCommits retrieves commits based on query options +func (cm *CacheManager) GetCommits(options QueryOptions) []scan.CommitHistory { + cm.mu.RLock() + defer cm.mu.RUnlock() + + var commits []scan.CommitHistory + + if options.Author != "" { + commits = cm.getCommitsByAuthor(options.Author, options.Since) + } else if options.Repository != "" { + commits = cm.getCommitsByRepo(options.Repository, options.Since) + } else { + commits = cm.getCommitsByDate(options.Since, options.Until) + } + + return commits +} + +// QueryOptions defines parameters for commit queries +type QueryOptions struct { + Author string + Repository string + Since time.Time + Until time.Time +} + +func (cm *CacheManager) getCommitsByAuthor(author string, since time.Time) []scan.CommitHistory { + var commits []scan.CommitHistory + hashes := cm.cache.AuthorIndex[author] + + for _, hash := range hashes { + for repoPath := range cm.cache.CommitIndex[hash] { + for _, commit := range cm.cache.Commits[repoPath] { + if commit.Hash == hash && commit.Date.After(since) { + commits = append(commits, commit) + } + } + } + } + + return commits +} + +func (cm *CacheManager) getCommitsByRepo(repo string, since time.Time) []scan.CommitHistory { + commits := cm.cache.Commits[repo] + if since.IsZero() { + return commits + } + + var filtered []scan.CommitHistory + for _, commit := range commits { + if commit.Date.After(since) { + filtered = append(filtered, commit) + } + } + return filtered +} + +func (cm *CacheManager) getCommitsByDate(since, until time.Time) []scan.CommitHistory { + var commits []scan.CommitHistory + current := since + + for !current.After(until) { + dateKey := current.Format("2006-01-02") + if hashes, exists := cm.cache.DateIndex[dateKey]; exists { + for _, hash := range hashes { + for repoPath := range cm.cache.CommitIndex[hash] { + for _, commit := range cm.cache.Commits[repoPath] { + if commit.Hash == hash { + commits = append(commits, commit) + } + } + } + } + } + current = current.AddDate(0, 0, 1) + } + + return commits +} diff --git a/cmd/history.go b/cmd/history.go index bae72a0..be68731 100644 --- a/cmd/history.go +++ b/cmd/history.go @@ -14,6 +14,7 @@ import ( "github.com/AccursedGalaxy/streakode/cache" "github.com/AccursedGalaxy/streakode/cmd/search" "github.com/AccursedGalaxy/streakode/config" + "github.com/AccursedGalaxy/streakode/scan" "github.com/charmbracelet/lipgloss" "github.com/jedib0t/go-pretty/v6/table" "golang.org/x/term" @@ -109,11 +110,11 @@ func displayInteractiveHistory(commits []CommitSummary, opts HistoryOptions) { func handleSelectedCommits(commits []search.SearchResult) { fmt.Printf("\nSelected %d commits:\n\n", len(commits)) - + t := table.NewWriter() t.SetStyle(table.StyleRounded) t.AppendHeader(table.Row{"Date", "Repository", "Message", "Changes"}) - + for _, commit := range commits { t.AppendRow(table.Row{ commit.Date.Format("2006-01-02 15:04"), @@ -122,7 +123,7 @@ func handleSelectedCommits(commits []search.SearchResult) { fmt.Sprintf("+%d/-%d", commit.Additions, commit.Deletions), }) } - + fmt.Println(t.Render()) } @@ -139,9 +140,9 @@ func getCommitHistory(opts HistoryOptions) []CommitSummary { // Process repositories concurrently activeRepos := 0 - for path := range cache.Cache { + cache.Cache.Range(func(path string, repo scan.RepoMetadata) bool { if opts.Repository != "" && !matchesRepository(path, opts.Repository) { - continue + return true } activeRepos++ @@ -171,7 +172,9 @@ func getCommitHistory(opts HistoryOptions) []CommitSummary { results <- result }(path) - } + + return true + }) // Collect results for i := 0; i < activeRepos; i++ { @@ -220,7 +223,7 @@ func fetchRemoteData(repoPath string) { func getLocalCommits(repoPath string, opts HistoryOptions, since time.Time) []CommitSummary { var commits []CommitSummary - + // Build optimized git log command args := []string{ "-C", repoPath, @@ -270,7 +273,7 @@ func getRemoteCommits(repoPath string, opts HistoryOptions, since time.Time) []C } branches := strings.Split(string(output), "\n") - + // Process each branch concurrently type branchResult struct { commits []CommitSummary @@ -285,7 +288,7 @@ func getRemoteCommits(repoPath string, opts HistoryOptions, since time.Time) []C go func(branchName string) { var result branchResult - + // Get commits from remote branch with timeout ctx, cancel := context.WithTimeout(context.Background(), 3*time.Second) defer cancel() @@ -541,4 +544,4 @@ func sortCommitsByDate(commits []CommitSummary) { sort.Slice(commits, func(i, j int) bool { return commits[i].Date.After(commits[j].Date) }) -} \ No newline at end of file +} diff --git a/cmd/stats.go b/cmd/stats.go index eee60a0..4d38685 100644 --- a/cmd/stats.go +++ b/cmd/stats.go @@ -5,6 +5,7 @@ import ( "fmt" "sort" "strings" + "sync" "time" "github.com/AccursedGalaxy/streakode/cache" @@ -52,52 +53,187 @@ func (c *DefaultStatsCalculator) CalculateCommitTrend(current int, previous int) // DisplayStats - Displays stats for all active repositories or a specific repository func DisplayStats(targetRepo string) { - // Get table width from the rendered table first - projectsSection := buildProjectsSection(targetRepo) - tableLines := strings.Split(projectsSection, "\n") - if len(tableLines) == 0 { + // Get pre-calculated display stats from cache + displayStats := cache.Cache.GetDisplayStats() + if displayStats == nil { + fmt.Println("No stats available. Try running 'cache reload' first.") return } - // Get the actual table width from the first line (including borders) - tableWidth := len([]rune(tableLines[0])) // use runes to handle Unicode characters correctly + // Filter repo stats if target repo is specified + var repoStats []cache.RepoDisplayStats + if targetRepo != "" { + for _, rs := range displayStats.RepoStats { + if rs.Name == targetRepo { + repoStats = append(repoStats, rs) + break + } + } + if len(repoStats) == 0 { + fmt.Printf("Repository '%s' not found.\n", targetRepo) + return + } + } else { + repoStats = displayStats.RepoStats + } - // Create styles with calculated width - match table width exactly - style := lipgloss.NewStyle() - headerStyle := style. - Bold(true). - Foreground(lipgloss.Color(config.AppConfig.Colors.HeaderColor)). - Width(tableWidth). - Align(lipgloss.Center) + // Calculate table width + tableWidth := calculator.CalculateTableWidth() - // Build sections dynamically - var sections []string + // Build projects table first to get actual width + var tableOutput string + if len(repoStats) > 0 { + t := table.NewWriter() + + // Configure table column widths + t.SetColumnConfigs([]table.ColumnConfig{ + {Number: 1, WidthMax: int(float64(tableWidth) * 0.35)}, // Repository name + {Number: 2, WidthMax: int(float64(tableWidth) * 0.15)}, // Weekly commits + {Number: 3, WidthMax: int(float64(tableWidth) * 0.15)}, // Streak + {Number: 4, WidthMax: int(float64(tableWidth) * 0.20)}, // Changes + {Number: 5, WidthMax: int(float64(tableWidth) * 0.15)}, // Last activity + }) + + // Set overall table width + t.SetAllowedRowLength(tableWidth) + + // Add Table Header if Set in config + if config.AppConfig.DisplayStats.TableStyle.UseTableHeader { + t.AppendHeader(table.Row{ + "Repo", + "Weekly", + "Streak", + "Changes", + "Activity", + }) + } + + // Apply table style based on config + var style table.Style + switch strings.ToLower(config.AppConfig.DisplayStats.TableStyle.Style) { + case "rounded": + style = table.StyleRounded + case "bold": + style = table.StyleBold + case "light": + style = table.StyleLight + case "double": + style = table.StyleDouble + default: + style = table.StyleDefault + } + + // Customize style based on config options + style.Options.DrawBorder = config.AppConfig.DisplayStats.TableStyle.Options.DrawBorder + style.Options.SeparateColumns = config.AppConfig.DisplayStats.TableStyle.Options.SeparateColumns + style.Options.SeparateHeader = config.AppConfig.DisplayStats.TableStyle.Options.SeparateHeader + style.Options.SeparateRows = config.AppConfig.DisplayStats.TableStyle.Options.SeparateRows + + t.SetStyle(style) + + // Add rows + for _, rs := range repoStats { + activityText := formatActivityText(rs.LastCommitTime) + t.AppendRow(table.Row{ + rs.Name, + fmt.Sprintf("%d%s", rs.WeeklyCommits, formatActivityIndicator(rs.WeeklyCommits)), + formatStreakString(rs.CurrentStreak, rs.LongestStreak), + fmt.Sprintf("+%d/-%d", rs.Additions, rs.Deletions), + activityText, + }) + } + tableOutput = t.Render() + } - // Header section + // Get actual table width from first line + var actualWidth int + if tableOutput != "" { + lines := strings.Split(tableOutput, "\n") + if len(lines) > 0 { + actualWidth = len([]rune(lines[0])) // Use runes to handle Unicode characters correctly + } + } else { + actualWidth = tableWidth + } + + // Build header with actual table width + var sections []string if config.AppConfig.DisplayStats.ShowWelcomeMessage { headerText := fmt.Sprintf("🚀 %s's Coding Activity", config.AppConfig.Author) if targetRepo != "" { headerText = fmt.Sprintf("🚀 %s's Activity in %s", config.AppConfig.Author, targetRepo) } - padding := (tableWidth - len([]rune(headerText))) / 2 - centeredHeader := fmt.Sprintf("%*s%s%*s", padding, "", headerText, padding, "") - sections = append(sections, headerStyle.Render(centeredHeader)) + + // Calculate padding manually for perfect centering + textWidth := len([]rune(headerText)) + leftPadding := (actualWidth - textWidth) / 2 + rightPadding := actualWidth - textWidth - leftPadding + + // Build centered header with exact padding + centeredHeader := fmt.Sprintf("%s%s%s", + strings.Repeat(" ", leftPadding), + headerText, + strings.Repeat(" ", rightPadding)) + + style := lipgloss.NewStyle(). + Bold(true). + Foreground(lipgloss.Color(config.AppConfig.Colors.HeaderColor)) + + sections = append(sections, style.Render(centeredHeader)) } - // Active projects section (table) - if config.AppConfig.DisplayStats.ShowActiveProjects && projectsSection != "" { - sections = append(sections, projectsSection) + // Add table to sections + if tableOutput != "" { + sections = append(sections, tableOutput) } - // Insights section + // Build insights section if config.AppConfig.DisplayStats.ShowInsights { - insights := buildInsightsSection(targetRepo) - if insights != "" { - sections = append(sections, insights) + t := table.NewWriter() + t.SetStyle(getTableStyle()) + t.SetAllowedRowLength(tableWidth - 2) + + // Weekly summary + trend := "↗️" + if displayStats.WeeklyDiff < 0 { + trend = "↘️" + } + weeklyText := fmt.Sprintf("📈 Weekly Summary: %d commits (%s %s), +%d/-%d lines", + displayStats.WeeklyTotal, + trend, + formatDiff(displayStats.WeeklyDiff), + displayStats.TotalAdditions, + displayStats.TotalDeletions) + sections = append(sections, weeklyText) + + // Daily average + dailyText := fmt.Sprintf("📊 Daily Average: %.1f commits", displayStats.DailyAverage) + sections = append(sections, dailyText) + + // Language stats + if len(displayStats.LanguageStats) > 0 { + langText := "💻 Top Languages: " + formatLanguageStats(displayStats.LanguageStats) + sections = append(sections, langText) } + + // Peak coding hour + peakText := fmt.Sprintf("⏰ Peak Coding: %02d:00-%02d:00 (%d commits)", + displayStats.PeakHour, + (displayStats.PeakHour+1)%24, + displayStats.PeakCommits) + sections = append(sections, peakText) + + // Weekly goal (hardcoded for now, can be made configurable later) + const weeklyGoal = 200 // commits per week + progress := float64(displayStats.WeeklyTotal) / float64(weeklyGoal) * 100 + goalText := fmt.Sprintf("🎯 Weekly Goal: %d%% (%d/%d commits)", + int(progress), + displayStats.WeeklyTotal, + weeklyGoal) + sections = append(sections, goalText) } - // Join sections with dividers only if configured + // Join sections output := "" if config.AppConfig.ShowDividers { divider := strings.Repeat("─", tableWidth) @@ -111,7 +247,6 @@ func DisplayStats(targetRepo string) { output += strings.TrimSpace(section) } } else { - // Join sections directly without dividers for _, section := range sections { if section != "" { if output != "" { @@ -125,6 +260,73 @@ func DisplayStats(targetRepo string) { fmt.Println(output) } +func formatDiff(diff int) string { + if diff < 0 { + return fmt.Sprintf("down %d", -diff) + } + return fmt.Sprintf("up %d", diff) +} + +func formatLanguageStats(stats map[string]int) string { + type langStat struct { + name string + lines int + } + var sorted []langStat + for lang, lines := range stats { + sorted = append(sorted, langStat{lang, lines}) + } + sort.Slice(sorted, func(i, j int) bool { + return sorted[i].lines > sorted[j].lines + }) + + var result []string + for i, ls := range sorted { + if i >= 3 { + break + } + icon := getLanguageIcon(ls.name) + result = append(result, fmt.Sprintf("%s %s (%.1fK)", icon, ls.name, float64(ls.lines)/1000)) + } + return strings.Join(result, " ") +} + +func getLanguageIcon(lang string) string { + icons := map[string]string{ + "Go": "🔵", + "Java": "☕", + "Python": "🐍", + "JavaScript": "💛", + "TypeScript": "💙", + "Rust": "🦀", + "C++": "⚡", + "C": "⚡", + "Ruby": "💎", + "Shell": "🐚", + "File": "📄", + } + if icon, ok := icons[lang]; ok { + return icon + } + return "📄" +} + +func formatActivityText(lastCommit time.Time) string { + duration := time.Since(lastCommit) + switch { + case duration < 24*time.Hour: + return "today" + case duration < 48*time.Hour: + return "1d ago" + case duration < 72*time.Hour: + return "2d ago" + case duration < 96*time.Hour: + return "3d ago" + default: + return fmt.Sprintf("%dd ago", int(duration.Hours()/24)) + } +} + func (c *DefaultStatsCalculator) CalculateTableWidth() int { width, _, err := term.GetSize(0) if err != nil { @@ -135,17 +337,46 @@ func (c *DefaultStatsCalculator) CalculateTableWidth() int { // prepareRepoData converts the cache map into a sorted slice of repository information func prepareRepoData() []repoInfo { - repos := make([]repoInfo, 0, len(cache.Cache)) - for path, repo := range cache.Cache { - repoName := path[strings.LastIndex(path, "/")+1:] - repos = append(repos, repoInfo{ - name: repoName, - metadata: repo, - lastCommit: repo.LastCommit, + // Pre-allocate slice with capacity + repos := make([]repoInfo, 0, cache.Cache.Len()) + + // Use parallel processing for large datasets + if cache.Cache.Len() > 10 { + var mu sync.Mutex + var wg sync.WaitGroup + + cache.Cache.Range(func(path string, repo scan.RepoMetadata) bool { + wg.Add(1) + go func(p string, r scan.RepoMetadata) { + defer wg.Done() + repoName := p[strings.LastIndex(p, "/")+1:] + info := repoInfo{ + name: repoName, + metadata: r, + lastCommit: r.LastCommit, + } + mu.Lock() + repos = append(repos, info) + mu.Unlock() + }(path, repo) + return true + }) + + wg.Wait() + } else { + // Sequential processing for small datasets + cache.Cache.Range(func(path string, repo scan.RepoMetadata) bool { + repoName := path[strings.LastIndex(path, "/")+1:] + repos = append(repos, repoInfo{ + name: repoName, + metadata: repo, + lastCommit: repo.LastCommit, + }) + return true }) } - // Sort by most recent activity + // Sort by most recent activity using direct index access sort.Slice(repos, func(i, j int) bool { return repos[i].lastCommit.After(repos[j].lastCommit) }) @@ -542,21 +773,24 @@ func buildInsightsSection(targetRepo string) string { tableWidth := calculator.CalculateTableWidth() insights := config.AppConfig.DisplayStats.InsightSettings - // Filter cache for target repository if specified - var repoCache map[string]scan.RepoMetadata - repoCache = cache.Cache + // Use pre-calculated stats from cache when possible + var repoCache = make(map[string]scan.RepoMetadata) if targetRepo != "" { - filteredCache := make(map[string]scan.RepoMetadata) - for path, repo := range cache.Cache { + cache.Cache.Range(func(path string, repo scan.RepoMetadata) bool { if strings.HasSuffix(path, "/"+targetRepo) { - filteredCache[path] = repo - break + repoCache[path] = repo + return false } - } - if len(filteredCache) == 0 { + return true + }) + if len(repoCache) == 0 { return "" } - repoCache = filteredCache + } else { + cache.Cache.Range(func(path string, repo scan.RepoMetadata) bool { + repoCache[path] = repo + return true + }) } if config.AppConfig.DetailedStats { @@ -564,11 +798,51 @@ func buildInsightsSection(targetRepo string) string { t.SetStyle(getTableStyle()) t.SetAllowedRowLength(tableWidth - 2) - // Calculate all stats - weeklyCommits, lastWeeksCommits, _, additions, deletions, hourStats := calculateGlobalStats(repoCache) + // Use cached stats when available + weeklyCommits := 0 + lastWeeksCommits := 0 + additions := 0 + deletions := 0 + hourStats := make(map[int]int) + languageStats := make(map[string]int) + + for _, repo := range repoCache { + weeklyCommits += repo.WeeklyCommits + lastWeeksCommits += repo.LastWeeksCommits + + // Process language stats in parallel for large repos + if len(repo.Languages) > 10 { + var wg sync.WaitGroup + var mu sync.Mutex + + for lang, lines := range repo.Languages { + wg.Add(1) + go func(l string, count int) { + defer wg.Done() + mu.Lock() + languageStats[l] += count + mu.Unlock() + }(lang, lines) + } + + wg.Wait() + } else { + for lang, lines := range repo.Languages { + languageStats[lang] += lines + } + } + + // Use pre-calculated commit stats + for _, commit := range repo.CommitHistory { + additions += commit.Additions + deletions += commit.Deletions + hour := commit.Date.Hour() + hourStats[hour]++ + } + } + peakHour, peakCommits := findPeakCodingHour(hourStats) commitTrend := calculator.CalculateCommitTrend(weeklyCommits, lastWeeksCommits) - languageStats := calculator.ProcessLanguageStats(repoCache) // Append rows based on configuration appendInsightRows(t, insights, insightStats{ diff --git a/scan/scan.go b/scan/scan.go index 1eac0d4..41512b8 100644 --- a/scan/scan.go +++ b/scan/scan.go @@ -17,6 +17,7 @@ type CommitHistory struct { Date time.Time `json:"date"` Hash string `json:"hash"` MessageHead string `json:"message_head"` + Author string `json:"author"` FileCount int `json:"file_count"` Additions int `json:"additions"` Deletions int `json:"deletions"` @@ -377,12 +378,10 @@ func fetchDetailedCommitInfo(repoPath string, author string, since time.Time) ([ gitCmd := exec.Command("git", "-C", repoPath, "log", "--all", "--author="+author, - "--pretty=format:%H|%aI|%s", + "--pretty=format:%H|%aI|%an|%s", "--numstat", "--after="+since.Format("2006-01-02")) - // fmt.Printf("Debug - Running git command: %v\n", gitCmd.String()) - output, err := gitCmd.Output() if err != nil { return nil, fmt.Errorf("git command failed: %v", err) @@ -396,7 +395,7 @@ func fetchDetailedCommitInfo(repoPath string, author string, since time.Time) ([ if strings.Contains(line, "|") { // This is a commit header line parts := strings.Split(line, "|") - if len(parts) == 3 { + if len(parts) == 4 { if currentCommit != nil { history = append(history, *currentCommit) } @@ -405,7 +404,8 @@ func fetchDetailedCommitInfo(repoPath string, author string, since time.Time) ([ currentCommit = &CommitHistory{ Hash: parts[0], Date: commitTime, - MessageHead: parts[2], + Author: parts[2], + MessageHead: parts[3], } } } else if line != "" && currentCommit != nil { @@ -927,3 +927,147 @@ func (m *RepoMetadata) ValidateData() ValidationResult { return result } + +// FetchRepoMetadata - gets metadata for a single repository +func FetchRepoMetadata(repoPath string) RepoMetadata { + if config.AppConfig.Debug { + fmt.Printf("\nDebug: Fetching metadata for repo: %s\n", repoPath) + } + + meta := RepoMetadata{ + Path: repoPath, + LastAnalyzed: time.Now(), + } + + // Check if directory exists and is accessible + if _, err := os.Stat(repoPath); os.IsNotExist(err) { + if config.AppConfig.Debug { + fmt.Printf("Debug: Directory not found: %s\n", repoPath) + } + return meta + } + + // Get commit dates in a single git command + cmd := exec.Command("git", "-C", repoPath, "log", "--all", + "--pretty=format:%ci|%H|%an|%ae|%s") + + if config.AppConfig.Debug { + fmt.Printf("Debug: Running git command: %v\n", cmd.String()) + } + + output, err := cmd.Output() + if err != nil { + if config.AppConfig.Debug { + fmt.Printf("Debug: Git command failed: %v\n", err) + } + return meta + } + + if len(output) > 0 { + commits := strings.Split(string(output), "\n") + meta.CommitCount = len(commits) + + if config.AppConfig.Debug { + fmt.Printf("Debug: Found %d commits\n", meta.CommitCount) + } + + // Process each commit + for _, commit := range commits { + parts := strings.Split(commit, "|") + if len(parts) != 5 { + continue + } + + commitDate, err := time.Parse("2006-01-02 15:04:05 -0700", parts[0]) + if err != nil { + continue + } + + // Update last commit time + if meta.LastCommit.IsZero() || commitDate.After(meta.LastCommit) { + meta.LastCommit = commitDate + } + + // Create commit history entry + history := CommitHistory{ + Date: commitDate, + Hash: parts[1], + MessageHead: parts[4], + } + + // Get commit stats + stats := getCommitStats(repoPath, parts[1]) + history.FileCount = stats.FileCount + history.Additions = stats.Additions + history.Deletions = stats.Deletions + + meta.CommitHistory = append(meta.CommitHistory, history) + } + + // Calculate additional stats + meta.WeeklyCommits = countRecentCommits(commits, 7) + meta.MonthlyCommits = countRecentCommits(commits, 30) + meta.LastWeeksCommits = countLastWeeksCommits(commits) + + if config.AppConfig.Debug { + fmt.Printf("Debug: Weekly commits: %d\n", meta.WeeklyCommits) + fmt.Printf("Debug: Monthly commits: %d\n", meta.MonthlyCommits) + fmt.Printf("Debug: Last week's commits: %d\n", meta.LastWeeksCommits) + } + + // Calculate streak info + streakInfo := calculateStreakInfo(commits) + meta.CurrentStreak = streakInfo.Current + meta.LongestStreak = streakInfo.Longest + meta.MostActiveDay = findMostActiveDay(commits) + + // Get language statistics + if languages, err := fetchLanguageStats(repoPath); err == nil { + meta.Languages = languages + meta.TotalLines = calculateTotalLines(languages) + } + } + + return meta +} + +type commitStats struct { + FileCount int + Additions int + Deletions int +} + +func getCommitStats(repoPath, hash string) commitStats { + var stats commitStats + cmd := exec.Command("git", "-C", repoPath, "show", "--numstat", "--format=", hash) + output, err := cmd.Output() + if err != nil { + return stats + } + + lines := strings.Split(string(output), "\n") + for _, line := range lines { + if line == "" { + continue + } + parts := strings.Fields(line) + if len(parts) != 3 { + continue + } + add, _ := strconv.Atoi(parts[0]) + del, _ := strconv.Atoi(parts[1]) + stats.Additions += add + stats.Deletions += del + stats.FileCount++ + } + + return stats +} + +func calculateTotalLines(languages map[string]int) int { + total := 0 + for _, lines := range languages { + total += lines + } + return total +}