@@ -221,46 +221,62 @@ static void scm_op_rgn(phys_addr_t paddr, unsigned long sz, const int op)
221
221
* operation can remain incomplete forever (lockup in SLC_CTRL_BUSY loop
222
222
* below)
223
223
*/
224
+ struct cpuinfo_arc_cache * p_l2 = & l2_info ;
224
225
static DEFINE_SPINLOCK (lock );
225
226
unsigned long flags ;
226
227
unsigned int cmd ;
228
+ unsigned long csz ;
227
229
u64 end ;
228
230
229
231
if (sz == 0 )
230
232
return ;
231
233
232
- cmd = ARC_CLN_CACHE_CMD_INCR ; /* Iterate over all available ways */
233
- if (op == OP_INV ) {
234
- /* Invalidate any line in the cache whose block address is in the range */
235
- cmd |= ARC_CLN_CACHE_CMD_OP_ADDR_INV ;
236
- } else if (op == OP_FLUSH ) {
237
- /* Writeback any line in the cache whose block address is in the range */
238
- cmd |= ARC_CLN_CACHE_CMD_OP_ADDR_CLN ;
239
- } else { /* OP_FLUSH_N_INV */
240
- /* Writeback any line in the cache whose block address is in
241
- the range, then invalidate. */
242
- cmd |= ARC_CLN_CACHE_CMD_OP_ADDR_CLN_INV ;
243
- }
244
-
245
- /*
246
- * Lower bits are ignored, no need to clip
247
- * The range specified by [{CACHE_ADDR_LO1, CACHE_ADDR_LO0,},
248
- * {CACHE_ADDR_HI1, CACHE_ADDR_HI0}] is inclusive for L2$
234
+ /* The number of lookups required to execute these operations is never larger than
235
+ * the number of lines in the cache. If the size of the requested operation is
236
+ * larger than the L2$ size, then we do everything in a loop according to the L2$ size.
237
+ * Some cache lines can be processed twice
249
238
*/
250
- end = paddr + sz - 1 ;
239
+ while (sz ) {
240
+ csz = sz ;
241
+ if (csz > (p_l2 -> sz_k - p_l2 -> line_len ))
242
+ csz = p_l2 -> sz_k - p_l2 -> line_len ;
243
+
244
+ cmd = ARC_CLN_CACHE_CMD_INCR ; /* Iterate over all available ways */
245
+ if (op == OP_INV ) {
246
+ /* Invalidate any line in the cache whose block address is in the range */
247
+ cmd |= ARC_CLN_CACHE_CMD_OP_ADDR_INV ;
248
+ } else if (op == OP_FLUSH ) {
249
+ /* Writeback any line in the cache whose block address is in the range */
250
+ cmd |= ARC_CLN_CACHE_CMD_OP_ADDR_CLN ;
251
+ } else { /* OP_FLUSH_N_INV */
252
+ /* Writeback any line in the cache whose block address is in
253
+ the range, then invalidate. */
254
+ cmd |= ARC_CLN_CACHE_CMD_OP_ADDR_CLN_INV ;
255
+ }
251
256
252
- spin_lock_irqsave (& lock , flags );
257
+ /*
258
+ * Lower bits are ignored, no need to clip
259
+ * The range specified by [{CACHE_ADDR_LO1, CACHE_ADDR_LO0,},
260
+ * {CACHE_ADDR_HI1, CACHE_ADDR_HI0}] is inclusive for L2$
261
+ */
262
+ end = paddr + csz - 1 ;
253
263
254
- arc_cln_write_reg (ARC_CLN_CACHE_ADDR_LO0 , (u32 )paddr );
255
- arc_cln_write_reg (ARC_CLN_CACHE_ADDR_LO1 , (u64 )paddr >> 32ULL );
264
+ spin_lock_irqsave (& lock , flags );
265
+
266
+ arc_cln_write_reg (ARC_CLN_CACHE_ADDR_LO0 , (u32 )paddr );
267
+ arc_cln_write_reg (ARC_CLN_CACHE_ADDR_LO1 , (u64 )paddr >> 32ULL );
256
268
257
- arc_cln_write_reg (ARC_CLN_CACHE_ADDR_HI0 , (u32 )end );
258
- arc_cln_write_reg (ARC_CLN_CACHE_ADDR_HI1 , (u64 )end >> 32ULL );
269
+ arc_cln_write_reg (ARC_CLN_CACHE_ADDR_HI0 , (u32 )end );
270
+ arc_cln_write_reg (ARC_CLN_CACHE_ADDR_HI1 , (u64 )end >> 32ULL );
259
271
260
- arc_cln_write_reg (ARC_CLN_CACHE_CMD , cmd );
261
- while (arc_cln_read_reg (ARC_CLN_CACHE_STATUS ) & ARC_CLN_CACHE_STATUS_BUSY );
272
+ arc_cln_write_reg (ARC_CLN_CACHE_CMD , cmd );
273
+ while (arc_cln_read_reg (ARC_CLN_CACHE_STATUS ) & ARC_CLN_CACHE_STATUS_BUSY );
262
274
263
- spin_unlock_irqrestore (& lock , flags );
275
+ spin_unlock_irqrestore (& lock , flags );
276
+
277
+ sz -= csz ;
278
+ paddr += csz ;
279
+ }
264
280
}
265
281
266
282
/*
0 commit comments