@@ -47,9 +47,7 @@ static uint64_t trace_work(void *data, uint64_t delay)
4747 struct dma_trace_buf * buffer = & d -> dmatb ;
4848 struct dma_sg_config * config = & d -> config ;
4949 unsigned long flags ;
50- int32_t offset = 0 ;
5150 uint32_t avail = buffer -> avail ;
52- uint32_t bytes_copied = 0 ;
5351 uint32_t size ;
5452 uint32_t hsize ;
5553 uint32_t lsize ;
@@ -58,53 +56,51 @@ static uint64_t trace_work(void *data, uint64_t delay)
5856 if (avail == 0 )
5957 return DMA_TRACE_US ;
6058
59+ /* make sure we dont write more than buffer */
60+ if (avail > DMA_TRACE_LOCAL_SIZE )
61+ avail = DMA_TRACE_LOCAL_SIZE ;
62+
6163 /* copy to host in sections if we wrap */
62- while (avail > 0 ) {
63-
64- lsize = hsize = avail ;
65-
66- /* host buffer wrap ? */
67- if (d -> host_offset + buffer -> avail > d -> host_size )
68- hsize = d -> host_offset + buffer -> avail - d -> host_size ;
69-
70- /* local buffer wrap ? */
71- if (buffer -> r_ptr > buffer -> w_ptr )
72- lsize = buffer -> end_addr - buffer -> r_ptr ;
73-
74- /* get smallest size */
75- if (hsize < lsize )
76- size = hsize ;
77- else
78- size = lsize ;
79-
80- /* writeback trace data */
81- dcache_writeback_region ((void * )buffer -> r_ptr , size );
82-
83- /* copy this section to host */
84- offset = dma_copy_to_host (& d -> dc , config , d -> host_offset ,
85- buffer -> r_ptr , size );
86- if (offset < 0 ) {
87- trace_buffer_error ("ebb" );
88- goto out ;
89- }
90-
91- /* update host pointer and check for wrap */
92- d -> host_offset += size ;
93- if (d -> host_offset + size >= d -> host_size )
94- d -> host_offset = 0 ;
95-
96- /* update local pointer and check for wrap */
97- buffer -> r_ptr += size ;
98- if (buffer -> r_ptr >= buffer -> end_addr )
99- buffer -> r_ptr = buffer -> addr ;
100-
101- avail -= size ;
102- bytes_copied += size ;
64+ lsize = hsize = avail ;
65+
66+ /* host buffer wrap ? */
67+ if (d -> host_offset + avail > d -> host_size )
68+ hsize = d -> host_size - d -> host_offset ;
69+
70+ /* local buffer wrap ? */
71+ if (buffer -> r_ptr + avail > buffer -> end_addr )
72+ lsize = buffer -> end_addr - buffer -> r_ptr ;
73+
74+ /* get smallest size */
75+ if (hsize < lsize )
76+ size = hsize ;
77+ else
78+ size = lsize ;
79+
80+ /* writeback trace data */
81+ dcache_writeback_region ((void * )buffer -> r_ptr , size );
82+
83+ /* copy this section to host */
84+ size = dma_copy_to_host_nowait (& d -> dc , config , d -> host_offset ,
85+ buffer -> r_ptr , size );
86+ if (size < 0 ) {
87+ trace_buffer_error ("ebb" );
88+ goto out ;
10389 }
10490
91+ /* update host pointer and check for wrap */
92+ d -> host_offset += size ;
93+ if (d -> host_offset + size >= d -> host_size )
94+ d -> host_offset = 0 ;
95+
96+ /* update local pointer and check for wrap */
97+ buffer -> r_ptr += size ;
98+ if (buffer -> r_ptr >= buffer -> end_addr )
99+ buffer -> r_ptr = buffer -> addr ;
100+
105101out :
106102 spin_lock_irq (& d -> lock , flags );
107- buffer -> avail -= bytes_copied ;
103+ buffer -> avail -= size ;
108104 spin_unlock_irq (& d -> lock , flags );
109105
110106 /* reschedule the trace copying work */
@@ -172,32 +168,28 @@ int dma_trace_enable(struct dma_trace_data *d)
172168{
173169 /* validate DMA context */
174170 if (d -> dc .dmac == NULL || d -> dc .chan < 0 ) {
175- trace_buffer_error ( "eem" );
171+ trace_error_atomic ( TRACE_CLASS_BUFFER , "eem" );
176172 return - ENODEV ;
177173 }
178174
179175 /* TODO: fix crash when enabled */
180- // d->enabled = 1;
176+ d -> enabled = 1 ;
181177 work_schedule_default (& d -> dmat_work , DMA_TRACE_US );
182178 return 0 ;
183179}
184180
185- void dtrace_event (const char * e , uint32_t length )
181+ static void dtrace_add_event (const char * e , uint32_t length )
186182{
187- struct dma_trace_buf * buffer = NULL ;
188- int margin = 0 ;
189- unsigned long flags ;
183+ struct dma_trace_buf * buffer = & trace_data -> dmatb ;
184+ int margin ;
190185
191- if (trace_data == NULL || length == 0 )
192- return ;
186+ margin = buffer -> end_addr - buffer -> w_ptr ;
193187
194- buffer = & trace_data -> dmatb ;
195- if (buffer == NULL )
188+ /* validate */
189+ if (margin <= 0 ) {
190+ trace_buffer_error ("emm" );
196191 return ;
197-
198- spin_lock_irq (& trace_data -> lock , flags );
199-
200- margin = buffer -> end_addr - buffer -> w_ptr ;
192+ }
201193
202194 /* check for buffer wrap */
203195 if (margin > length ) {
@@ -216,9 +208,45 @@ void dtrace_event(const char *e, uint32_t length)
216208 }
217209
218210 buffer -> avail += length ;
211+ }
212+
213+ void dtrace_event (const char * e , uint32_t length )
214+ {
215+ struct dma_trace_buf * buffer = NULL ;
216+ unsigned long flags ;
217+
218+ if (trace_data == NULL || length == 0 )
219+ return ;
220+
221+ if (!trace_data -> enabled )
222+ return ;
223+
224+ buffer = & trace_data -> dmatb ;
225+ if (buffer == NULL )
226+ return ;
227+
228+ spin_lock_irq (& trace_data -> lock , flags );
229+ dtrace_add_event (e , length );
219230 spin_unlock_irq (& trace_data -> lock , flags );
220231
221232 /* schedule copy now if buffer > 50% full */
222233 if (trace_data -> enabled && buffer -> avail >= (DMA_TRACE_LOCAL_SIZE / 2 ))
223234 work_reschedule_default (& trace_data -> dmat_work , 100 );
224235}
236+
237+ void dtrace_event_atomic (const char * e , uint32_t length )
238+ {
239+ struct dma_trace_buf * buffer = NULL ;
240+
241+ if (trace_data == NULL || length == 0 )
242+ return ;
243+
244+ if (!trace_data -> enabled )
245+ return ;
246+
247+ buffer = & trace_data -> dmatb ;
248+ if (buffer == NULL )
249+ return ;
250+
251+ dtrace_add_event (e , length );
252+ }
0 commit comments