4141
4242static struct dma_trace_data * trace_data = NULL ;
4343
44- static int dma_trace_new_buffer (struct dma_trace_data * d , uint32_t buffer_size )
45- {
46- struct dma_trace_buf * buffer = & d -> dmatb ;
47-
48- trace_buffer ("nlb" );
49-
50- /* validate request */
51- if (buffer_size == 0 || buffer_size > HEAP_BUFFER_SIZE ) {
52- trace_buffer_error ("ebg" );
53- trace_value (buffer_size );
54- return - ENOMEM ;
55- }
56-
57- /* allocate new buffer */
58- buffer -> addr = rballoc (RZONE_RUNTIME , RFLAGS_NONE , buffer_size );
59- if (buffer -> addr == NULL ) {
60- trace_buffer_error ("ebm" );
61- return - ENOMEM ;
62- }
63-
64- bzero (buffer -> addr , buffer_size );
65-
66- buffer -> size = buffer_size ;
67- buffer -> w_ptr = buffer -> r_ptr = buffer -> addr ;
68- buffer -> end_addr = buffer -> addr + buffer -> size ;
69- buffer -> avail = 0 ;
70-
71- return 0 ;
72- }
73-
74- static void trace_send (struct dma_trace_data * d )
44+ static uint64_t trace_work (void * data , uint64_t delay )
7545{
46+ struct dma_trace_data * d = (struct dma_trace_data * )data ;
7647 struct dma_trace_buf * buffer = & d -> dmatb ;
7748 struct dma_sg_config * config = & d -> config ;
7849 unsigned long flags ;
7950 int32_t offset = 0 ;
8051 uint32_t avail = buffer -> avail ;
81- uint32_t bytes_copied = avail ;
52+ uint32_t bytes_copied = 0 ;
8253 uint32_t size ;
8354 uint32_t hsize ;
8455 uint32_t lsize ;
8556
8657 /* any data to copy ? */
8758 if (avail == 0 )
88- return ;
59+ return DMA_TRACE_US ;
8960
9061 /* copy to host in sections if we wrap */
9162 while (avail > 0 ) {
@@ -106,12 +77,15 @@ static void trace_send(struct dma_trace_data *d)
10677 else
10778 size = lsize ;
10879
80+ /* writeback trace data */
81+ dcache_writeback_region ((void * )buffer -> r_ptr , size );
82+
10983 /* copy this section to host */
110- offset = dma_copy_to_host (config , d -> host_offset ,
84+ offset = dma_copy_to_host (& d -> dc , config , d -> host_offset ,
11185 buffer -> r_ptr , size );
11286 if (offset < 0 ) {
11387 trace_buffer_error ("ebb" );
114- return ;
88+ goto out ;
11589 }
11690
11791 /* update host pointer and check for wrap */
@@ -125,44 +99,55 @@ static void trace_send(struct dma_trace_data *d)
12599 buffer -> r_ptr = buffer -> addr ;
126100
127101 avail -= size ;
102+ bytes_copied += size ;
128103 }
129104
105+ out :
130106 spin_lock_irq (& d -> lock , flags );
131107 buffer -> avail -= bytes_copied ;
132108 spin_unlock_irq (& d -> lock , flags );
133- }
134-
135- static uint64_t trace_work (void * data , uint64_t delay )
136- {
137- struct dma_trace_data * d = (struct dma_trace_data * )data ;
138-
139- trace_send (d );
140109
141110 /* reschedule the trace copying work */
142111 return DMA_TRACE_US ;
143112}
144113
145114int dma_trace_init (struct dma_trace_data * d )
146115{
147- int err ;
116+ struct dma_trace_buf * buffer = & d -> dmatb ;
117+ int ret ;
148118
149119 trace_buffer ("dtn" );
150120
151- /* init buffer elems */
152- list_init (& d -> config .elem_list );
121+ /* allocate new buffer */
122+ buffer -> addr = rballoc (RZONE_RUNTIME , RFLAGS_NONE , DMA_TRACE_LOCAL_SIZE );
123+ if (buffer -> addr == NULL ) {
124+ trace_buffer_error ("ebm" );
125+ return - ENOMEM ;
126+ }
153127
154- /* allocate local DMA buffer */
155- err = dma_trace_new_buffer (d , DMA_TRACE_LOCAL_SIZE );
156- if (err < 0 ) {
157- trace_buffer_error ("ePb" );
158- return err ;
128+ /* init DMA copy context */
129+ ret = dma_copy_new (& d -> dc , PLATFORM_TRACE_DMAC );
130+ if (ret < 0 ) {
131+ trace_buffer_error ("edm" );
132+ rfree (buffer -> addr );
133+ return ret ;
159134 }
160135
136+ bzero (buffer -> addr , DMA_TRACE_LOCAL_SIZE );
137+
138+ /* initialise the DMA buffer */
139+ buffer -> size = DMA_TRACE_LOCAL_SIZE ;
140+ buffer -> w_ptr = buffer -> r_ptr = buffer -> addr ;
141+ buffer -> end_addr = buffer -> addr + buffer -> size ;
142+ buffer -> avail = 0 ;
161143 d -> host_offset = 0 ;
162- trace_data = d ;
144+ d -> enabled = 0 ;
163145
146+ list_init (& d -> config .elem_list );
164147 work_init (& d -> dmat_work , trace_work , d , WORK_ASYNC );
165148 spinlock_init (& d -> lock );
149+ trace_data = d ;
150+
166151 return 0 ;
167152}
168153
@@ -183,10 +168,18 @@ int dma_trace_host_buffer(struct dma_trace_data *d, struct dma_sg_elem *elem,
183168 return 0 ;
184169}
185170
186- void dma_trace_config_ready (struct dma_trace_data * d )
171+ int dma_trace_enable (struct dma_trace_data * d )
187172{
173+ /* validate DMA context */
174+ if (d -> dc .dmac == NULL || d -> dc .chan < 0 ) {
175+ trace_buffer_error ("eem" );
176+ return - ENODEV ;
177+ }
178+
179+ /* TODO: fix crash when enabled */
180+ //d->enabled = 1;
188181 work_schedule_default (& d -> dmat_work , DMA_TRACE_US );
189- d -> ready = 1 ;
182+ return 0 ;
190183}
191184
192185void dtrace_event (const char * e , uint32_t length )
@@ -226,6 +219,6 @@ void dtrace_event(const char *e, uint32_t length)
226219 spin_unlock_irq (& trace_data -> lock , flags );
227220
228221 /* schedule copy now if buffer > 50% full */
229- if (trace_data -> ready && buffer -> avail >= (DMA_TRACE_LOCAL_SIZE / 2 ))
222+ if (trace_data -> enabled && buffer -> avail >= (DMA_TRACE_LOCAL_SIZE / 2 ))
230223 work_reschedule_default (& trace_data -> dmat_work , 100 );
231224}
0 commit comments