mirror of
https://github.com/ivuorinen/gibidify.git
synced 2026-02-04 16:45:10 +00:00
feat: many features, check TODO.md
This commit is contained in:
210
cli/processor.go
Normal file
210
cli/processor.go
Normal file
@@ -0,0 +1,210 @@
|
||||
package cli
|
||||
|
||||
import (
|
||||
"context"
|
||||
"os"
|
||||
"sync"
|
||||
|
||||
"github.com/sirupsen/logrus"
|
||||
|
||||
"github.com/ivuorinen/gibidify/config"
|
||||
"github.com/ivuorinen/gibidify/fileproc"
|
||||
"github.com/ivuorinen/gibidify/utils"
|
||||
)
|
||||
|
||||
// Processor handles the main file processing logic.
|
||||
type Processor struct {
|
||||
flags *Flags
|
||||
backpressure *fileproc.BackpressureManager
|
||||
ui *UIManager
|
||||
}
|
||||
|
||||
// NewProcessor creates a new processor with the given flags.
|
||||
func NewProcessor(flags *Flags) *Processor {
|
||||
ui := NewUIManager()
|
||||
|
||||
// Configure UI based on flags
|
||||
ui.SetColorOutput(!flags.NoColors)
|
||||
ui.SetProgressOutput(!flags.NoProgress)
|
||||
|
||||
return &Processor{
|
||||
flags: flags,
|
||||
backpressure: fileproc.NewBackpressureManager(),
|
||||
ui: ui,
|
||||
}
|
||||
}
|
||||
|
||||
// Process executes the main file processing workflow.
|
||||
func (p *Processor) Process(ctx context.Context) error {
|
||||
// Configure file type registry
|
||||
p.configureFileTypes()
|
||||
|
||||
// Print startup info with colors
|
||||
p.ui.PrintHeader("🚀 Starting gibidify")
|
||||
p.ui.PrintInfo("Format: %s", p.flags.Format)
|
||||
p.ui.PrintInfo("Source: %s", p.flags.SourceDir)
|
||||
p.ui.PrintInfo("Destination: %s", p.flags.Destination)
|
||||
p.ui.PrintInfo("Workers: %d", p.flags.Concurrency)
|
||||
|
||||
// Collect files with progress indication
|
||||
p.ui.PrintInfo("📁 Collecting files...")
|
||||
files, err := p.collectFiles()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Show collection results
|
||||
p.ui.PrintSuccess("Found %d files to process", len(files))
|
||||
|
||||
// Process files
|
||||
return p.processFiles(ctx, files)
|
||||
}
|
||||
|
||||
// configureFileTypes configures the file type registry.
|
||||
func (p *Processor) configureFileTypes() {
|
||||
if config.GetFileTypesEnabled() {
|
||||
fileproc.ConfigureFromSettings(
|
||||
config.GetCustomImageExtensions(),
|
||||
config.GetCustomBinaryExtensions(),
|
||||
config.GetCustomLanguages(),
|
||||
config.GetDisabledImageExtensions(),
|
||||
config.GetDisabledBinaryExtensions(),
|
||||
config.GetDisabledLanguageExtensions(),
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
// collectFiles collects all files to be processed.
|
||||
func (p *Processor) collectFiles() ([]string, error) {
|
||||
files, err := fileproc.CollectFiles(p.flags.SourceDir)
|
||||
if err != nil {
|
||||
return nil, utils.WrapError(err, utils.ErrorTypeProcessing, utils.CodeProcessingCollection, "error collecting files")
|
||||
}
|
||||
logrus.Infof("Found %d files to process", len(files))
|
||||
return files, nil
|
||||
}
|
||||
|
||||
// processFiles processes the collected files.
|
||||
func (p *Processor) processFiles(ctx context.Context, files []string) error {
|
||||
outFile, err := p.createOutputFile()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer func() {
|
||||
utils.LogError("Error closing output file", outFile.Close())
|
||||
}()
|
||||
|
||||
// Initialize back-pressure and channels
|
||||
p.ui.PrintInfo("⚙️ Initializing processing...")
|
||||
p.backpressure.LogBackpressureInfo()
|
||||
fileCh, writeCh := p.backpressure.CreateChannels()
|
||||
writerDone := make(chan struct{})
|
||||
|
||||
// Start writer
|
||||
go fileproc.StartWriter(outFile, writeCh, writerDone, p.flags.Format, p.flags.Prefix, p.flags.Suffix)
|
||||
|
||||
// Start workers
|
||||
var wg sync.WaitGroup
|
||||
p.startWorkers(ctx, &wg, fileCh, writeCh)
|
||||
|
||||
// Start progress bar
|
||||
p.ui.StartProgress(len(files), "📝 Processing files")
|
||||
|
||||
// Send files to workers
|
||||
if err := p.sendFiles(ctx, files, fileCh); err != nil {
|
||||
p.ui.FinishProgress()
|
||||
return err
|
||||
}
|
||||
|
||||
// Wait for completion
|
||||
p.waitForCompletion(&wg, writeCh, writerDone)
|
||||
p.ui.FinishProgress()
|
||||
|
||||
p.logFinalStats()
|
||||
p.ui.PrintSuccess("Processing completed. Output saved to %s", p.flags.Destination)
|
||||
return nil
|
||||
}
|
||||
|
||||
// createOutputFile creates the output file.
|
||||
func (p *Processor) createOutputFile() (*os.File, error) {
|
||||
outFile, err := os.Create(p.flags.Destination) // #nosec G304 - destination is user-provided CLI arg
|
||||
if err != nil {
|
||||
return nil, utils.WrapError(err, utils.ErrorTypeIO, utils.CodeIOFileCreate, "failed to create output file").WithFilePath(p.flags.Destination)
|
||||
}
|
||||
return outFile, nil
|
||||
}
|
||||
|
||||
// startWorkers starts the worker goroutines.
|
||||
func (p *Processor) startWorkers(ctx context.Context, wg *sync.WaitGroup, fileCh chan string, writeCh chan fileproc.WriteRequest) {
|
||||
for range p.flags.Concurrency {
|
||||
wg.Add(1)
|
||||
go p.worker(ctx, wg, fileCh, writeCh)
|
||||
}
|
||||
}
|
||||
|
||||
// worker is the worker goroutine function.
|
||||
func (p *Processor) worker(ctx context.Context, wg *sync.WaitGroup, fileCh chan string, writeCh chan fileproc.WriteRequest) {
|
||||
defer wg.Done()
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return
|
||||
case filePath, ok := <-fileCh:
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
p.processFile(filePath, writeCh)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// processFile processes a single file.
|
||||
func (p *Processor) processFile(filePath string, writeCh chan fileproc.WriteRequest) {
|
||||
absRoot, err := utils.GetAbsolutePath(p.flags.SourceDir)
|
||||
if err != nil {
|
||||
utils.LogError("Failed to get absolute path", err)
|
||||
return
|
||||
}
|
||||
fileproc.ProcessFile(filePath, writeCh, absRoot)
|
||||
|
||||
// Update progress bar
|
||||
p.ui.UpdateProgress(1)
|
||||
}
|
||||
|
||||
// sendFiles sends files to the worker channels with back-pressure handling.
|
||||
func (p *Processor) sendFiles(ctx context.Context, files []string, fileCh chan string) error {
|
||||
defer close(fileCh)
|
||||
|
||||
for _, fp := range files {
|
||||
// Check if we should apply back-pressure
|
||||
if p.backpressure.ShouldApplyBackpressure(ctx) {
|
||||
p.backpressure.ApplyBackpressure(ctx)
|
||||
}
|
||||
|
||||
// Wait for channel space if needed
|
||||
p.backpressure.WaitForChannelSpace(ctx, fileCh, nil)
|
||||
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return ctx.Err()
|
||||
case fileCh <- fp:
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// waitForCompletion waits for all workers to complete.
|
||||
func (p *Processor) waitForCompletion(wg *sync.WaitGroup, writeCh chan fileproc.WriteRequest, writerDone chan struct{}) {
|
||||
wg.Wait()
|
||||
close(writeCh)
|
||||
<-writerDone
|
||||
}
|
||||
|
||||
// logFinalStats logs the final back-pressure statistics.
|
||||
func (p *Processor) logFinalStats() {
|
||||
stats := p.backpressure.GetStats()
|
||||
if stats.Enabled {
|
||||
logrus.Infof("Back-pressure stats: processed=%d files, memory=%dMB/%dMB",
|
||||
stats.FilesProcessed, stats.CurrentMemoryUsage/1024/1024, stats.MaxMemoryUsage/1024/1024)
|
||||
}
|
||||
}
|
||||
Reference in New Issue
Block a user