@@ -95,7 +95,8 @@ func GetByteBuffer() []byte {
9595
9696// PutByteBuffer returns a byte buffer to the pool
9797func PutByteBuffer (buf []byte ) {
98- ByteBufferPool .Put (buf [:0 ]) // Reset length but keep capacity
98+ bufCopy := make ([]byte , 0 , cap (buf ))
99+ ByteBufferPool .Put (& bufCopy )
99100}
100101
101102// GetStringBuffer retrieves a string slice from the pool
@@ -105,7 +106,8 @@ func GetStringBuffer() []string {
105106
106107// PutStringBuffer returns a string slice to the pool
107108func PutStringBuffer (slice []string ) {
108- StringBufferPool .Put (slice [:0 ]) // Reset length but keep capacity
109+ sliceCopy := make ([]string , 0 , cap (slice ))
110+ StringBufferPool .Put (& sliceCopy )
109111}
110112
111113// GetExtendedPosBuffer retrieves an ExtendedPos slice from the pool
@@ -115,7 +117,8 @@ func GetExtendedPosBuffer() []ExtendedPos {
115117
116118// PutExtendedPosBuffer returns an ExtendedPos slice to the pool
117119func PutExtendedPosBuffer (slice []ExtendedPos ) {
118- ExtendedPosPool .Put (slice [:0 ]) // Reset length but keep capacity
120+ sliceCopy := make ([]ExtendedPos , 0 , cap (slice ))
121+ ExtendedPosPool .Put (& sliceCopy )
119122}
120123
121124const (
@@ -596,7 +599,11 @@ func (p *Parser) readFileEfficiently(path string) ([]byte, error) {
596599 if err != nil {
597600 return nil , err
598601 }
599- defer f .Close ()
602+ defer func () {
603+ if closeErr := f .Close (); closeErr != nil {
604+ log .Printf ("Error closing file: %v" , closeErr )
605+ }
606+ }()
600607
601608 // Get file size to allocate buffer exactly once
602609 info , err := f .Stat ()
@@ -730,88 +737,6 @@ func (p *Parser) ProcessResults() {
730737 }
731738}
732739
733- func (p * Parser ) parseDir (dir string ) error {
734- fset := token .NewFileSet ()
735- pkgs , err := parser .ParseDir (fset , dir , func (info os.FileInfo ) bool {
736- valid , name := true , info .Name ()
737-
738- if p .ignoreTests {
739- if strings .HasSuffix (name , testSuffix ) {
740- valid = false
741- }
742- }
743-
744- if p .ignoreRegex != nil {
745- if p .ignoreRegex .MatchString (dir + name ) {
746- valid = false
747- }
748- } else if len (p .ignore ) != 0 {
749- // Fallback to non-compiled regex if compilation failed
750- match , err := regexp .MatchString (p .ignore , dir + name )
751- if err != nil {
752- log .Fatal (err )
753- return true
754- }
755- if match {
756- valid = false
757- }
758- }
759-
760- return valid
761- }, 0 )
762- if err != nil {
763- return err
764- }
765-
766- // Process files concurrently with a workgroup
767- var wg sync.WaitGroup
768-
769- // Create a flattened list of all files
770- type fileInfo struct {
771- pkg string
772- fileName string
773- file * ast.File
774- }
775-
776- // Pre-allocate the slice with expected capacity to avoid resizing
777- files := make ([]fileInfo , 0 , len (pkgs )* 10 ) // Assuming average of 10 files per package
778- for pkgName , pkg := range pkgs {
779- for fn , f := range pkg .Files {
780- files = append (files , fileInfo {
781- pkg : pkgName ,
782- fileName : fn ,
783- file : f ,
784- })
785- }
786- }
787-
788- // Process files concurrently using a semaphore to limit concurrency
789- sem := make (chan struct {}, p .maxConcurrency )
790- for _ , fi := range files {
791- wg .Add (1 )
792- sem <- struct {}{} // acquire semaphore
793-
794- go func (pkg , fn string , f * ast.File ) {
795- defer func () {
796- <- sem // release semaphore
797- wg .Done ()
798- }()
799-
800- // Create a separate visitor for this file with pre-compiled regex
801- ast .Walk (& treeVisitor {
802- fileSet : fset ,
803- packageName : pkg ,
804- fileName : fn ,
805- p : p ,
806- ignoreRegex : p .ignoreStringsRegex ,
807- }, f )
808- }(fi .pkg , fi .fileName , fi .file )
809- }
810-
811- wg .Wait ()
812- return nil
813- }
814-
815740// Strings maps string literals to their positions in the code.
816741type Strings map [string ][]ExtendedPos
817742
0 commit comments