2 * xtensa/core-macros.h -- C specific definitions
3 * that depend on CORE configuration
7 * Copyright (c) 2012 Tensilica Inc.
9 * Permission is hereby granted, free of charge, to any person obtaining
10 * a copy of this software and associated documentation files (the
11 * "Software"), to deal in the Software without restriction, including
12 * without limitation the rights to use, copy, modify, merge, publish,
13 * distribute, sublicense, and/or sell copies of the Software, and to
14 * permit persons to whom the Software is furnished to do so, subject to
15 * the following conditions:
17 * The above copyright notice and this permission notice shall be included
18 * in all copies or substantial portions of the Software.
20 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
21 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
22 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
23 * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
24 * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
25 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
26 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
29 #ifndef XTENSA_CACHE_H
30 #define XTENSA_CACHE_H
32 #include <xtensa/config/core.h>
34 /* Only define things for C code. */
35 #if !defined(_ASMLANGUAGE) && !defined(_NOCLANGUAGE) && !defined(__ASSEMBLER__)
39 /*************************** CACHE ***************************/
41 /* All the macros are in the lower case now and some of them
42 * share the name with the existing functions from hal.h.
43 * Including this header file will define XTHAL_USE_CACHE_MACROS
44 * which directs hal.h not to use the functions.
48 * Single-cache-line operations in C-callable inline assembly.
49 * Essentially macro versions (uppercase) of:
51 * xthal_icache_line_invalidate(void *addr);
52 * xthal_icache_line_lock(void *addr);
53 * xthal_icache_line_unlock(void *addr);
54 * xthal_icache_sync(void);
56 * NOTE: unlike the above functions, the following macros do NOT
57 * execute the xthal_icache_sync() as part of each line operation.
58 * This sync must be called explicitly by the caller. This is to
59 * allow better optimization when operating on more than one line.
61 * xthal_dcache_line_invalidate(void *addr);
62 * xthal_dcache_line_writeback(void *addr);
63 * xthal_dcache_line_writeback_inv(void *addr);
64 * xthal_dcache_line_lock(void *addr);
65 * xthal_dcache_line_unlock(void *addr);
66 * xthal_dcache_sync(void);
67 * xthal_dcache_line_prefetch_for_write(void *addr);
68 * xthal_dcache_line_prefetch_for_read(void *addr);
70 * All are made memory-barriers, given that's how they're typically used
71 * (ops operate on a whole line, so clobbers all memory not just *addr).
73 * NOTE: All the block block cache ops and line prefetches are implemented
74 * using intrinsics so they are better optimized regarding memory barriers etc.
76 * All block downgrade functions exist in two forms: with and without
77 * the 'max' parameter: This parameter allows compiler to optimize
78 * the functions whenever the parameter is smaller than the cache size.
80 * xthal_dcache_block_invalidate(void *addr, unsigned size);
81 * xthal_dcache_block_writeback(void *addr, unsigned size);
82 * xthal_dcache_block_writeback_inv(void *addr, unsigned size);
83 * xthal_dcache_block_invalidate_max(void *addr, unsigned size, unsigned max);
84 * xthal_dcache_block_writeback_max(void *addr, unsigned size, unsigned max);
85 * xthal_dcache_block_writeback_inv_max(void *addr, unsigned size, unsigned max);
87 * xthal_dcache_block_prefetch_for_read(void *addr, unsigned size);
88 * xthal_dcache_block_prefetch_for_write(void *addr, unsigned size);
89 * xthal_dcache_block_prefetch_modify(void *addr, unsigned size);
90 * xthal_dcache_block_prefetch_read_write(void *addr, unsigned size);
91 * xthal_dcache_block_prefetch_for_read_grp(void *addr, unsigned size);
92 * xthal_dcache_block_prefetch_for_write_grp(void *addr, unsigned size);
93 * xthal_dcache_block_prefetch_modify_grp(void *addr, unsigned size);
94 * xthal_dcache_block_prefetch_read_write_grp(void *addr, unsigned size)
96 * xthal_dcache_block_wait();
97 * xthal_dcache_block_required_wait();
98 * xthal_dcache_block_abort();
99 * xthal_dcache_block_prefetch_end();
100 * xthal_dcache_block_newgrp();
103 /*** INSTRUCTION CACHE ***/
105 #define XTHAL_USE_CACHE_MACROS
107 #if XCHAL_ICACHE_SIZE > 0
108 # define xthal_icache_line_invalidate(addr) do { void *__a = (void*)(addr); \
109 __asm__ __volatile__("ihi %0, 0" :: "a"(__a) : "memory"); \
112 # define xthal_icache_line_invalidate(addr) do {/*nothing*/} while(0)
115 #if XCHAL_ICACHE_SIZE > 0 && XCHAL_ICACHE_LINE_LOCKABLE
116 # define xthal_icache_line_lock(addr) do { void *__a = (void*)(addr); \
117 __asm__ __volatile__("ipfl %0, 0" :: "a"(__a) : "memory"); \
119 # define xthal_icache_line_unlock(addr) do { void *__a = (void*)(addr); \
120 __asm__ __volatile__("ihu %0, 0" :: "a"(__a) : "memory"); \
123 # define xthal_icache_line_lock(addr) do {/*nothing*/} while(0)
124 # define xthal_icache_line_unlock(addr) do {/*nothing*/} while(0)
128 * Even if a config doesn't have caches, an isync is still needed
129 * when instructions in any memory are modified, whether by a loader
130 * or self-modifying code. Therefore, this macro always produces
131 * an isync, whether or not an icache is present.
133 #define xthal_icache_sync() \
134 __asm__ __volatile__("isync":::"memory")
139 #if XCHAL_DCACHE_SIZE > 0
141 # include <xtensa/tie/xt_datacache.h>
143 # define xthal_dcache_line_invalidate(addr) do { void *__a = (void*)(addr); \
144 __asm__ __volatile__("dhi %0, 0" :: "a"(__a) : "memory"); \
146 # define xthal_dcache_line_writeback(addr) do { void *__a = (void*)(addr); \
147 __asm__ __volatile__("dhwb %0, 0" :: "a"(__a) : "memory"); \
149 # define xthal_dcache_line_writeback_inv(addr) do { void *__a = (void*)(addr); \
150 __asm__ __volatile__("dhwbi %0, 0" :: "a"(__a) : "memory"); \
152 # define xthal_dcache_sync() \
153 __asm__ __volatile__("" /*"dsync"?*/:::"memory")
154 # define xthal_dcache_line_prefetch_for_read(addr) do { \
155 XT_DPFR((const int*)addr, 0); \
158 # define xthal_dcache_line_invalidate(addr) do {/*nothing*/} while(0)
159 # define xthal_dcache_line_writeback(addr) do {/*nothing*/} while(0)
160 # define xthal_dcache_line_writeback_inv(addr) do {/*nothing*/} while(0)
161 # define xthal_dcache_sync() __asm__ __volatile__("":::"memory")
162 # define xthal_dcache_line_prefetch_for_read(addr) do {/*nothing*/} while(0)
165 #if XCHAL_DCACHE_SIZE > 0 && XCHAL_DCACHE_LINE_LOCKABLE
166 # define xthal_dcache_line_lock(addr) do { void *__a = (void*)(addr); \
167 __asm__ __volatile__("dpfl %0, 0" :: "a"(__a) : "memory"); \
169 # define xthal_dcache_line_unlock(addr) do { void *__a = (void*)(addr); \
170 __asm__ __volatile__("dhu %0, 0" :: "a"(__a) : "memory"); \
173 # define xthal_dcache_line_lock(addr) do {/*nothing*/} while(0)
174 # define xthal_dcache_line_unlock(addr) do {/*nothing*/} while(0)
177 #if XCHAL_DCACHE_SIZE > 0 && XCHAL_DCACHE_IS_WRITEBACK
179 # define xthal_dcache_line_prefetch_for_write(addr) do { \
180 XT_DPFW((const int*)addr, 0); \
183 # define xthal_dcache_line_prefetch_for_write(addr) do {/*nothing*/} while(0)
187 /***** Block Operations *****/
189 #if XCHAL_DCACHE_SIZE > 0 && XCHAL_HAVE_CACHE_BLOCKOPS
193 # define _XTHAL_DCACHE_BLOCK_UPGRADE(addr, size, type) \
195 type((const int*)addr, size); \
200 # define _XTHAL_DCACHE_BLOCK_DOWNGRADE(addr, size, type) \
201 unsigned _s = size; \
202 unsigned _a = addr; \
204 unsigned __s = (_s > XCHAL_DCACHE_SIZE) ? \
205 XCHAL_DCACHE_SIZE : _s; \
206 type((const int*)_a, __s); \
211 # define _XTHAL_DCACHE_BLOCK_DOWNGRADE_MAX(addr, size, type, max) \
212 if (max <= XCHAL_DCACHE_SIZE) { \
213 unsigned _s = size; \
214 unsigned _a = addr; \
215 type((const int*)_a, _s); \
218 _XTHAL_DCACHE_BLOCK_DOWNGRADE(addr, size, type); \
221 # define xthal_dcache_block_invalidate(addr, size) do { \
222 _XTHAL_DCACHE_BLOCK_DOWNGRADE(addr, size, XT_DHI_B); \
224 # define xthal_dcache_block_writeback(addr, size) do { \
225 _XTHAL_DCACHE_BLOCK_DOWNGRADE(addr, size, XT_DHWB_B); \
227 # define xthal_dcache_block_writeback_inv(addr, size) do { \
228 _XTHAL_DCACHE_BLOCK_DOWNGRADE(addr, size, XT_DHWBI_B); \
231 # define xthal_dcache_block_invalidate_max(addr, size, max) do { \
232 _XTHAL_DCACHE_BLOCK_DOWNGRADE_MAX(addr, size, XT_DHI_B, max); \
234 # define xthal_dcache_block_writeback_max(addr, size, max) do { \
235 _XTHAL_DCACHE_BLOCK_DOWNGRADE_MAX(addr, size, XT_DHWB_B, max); \
237 # define xthal_dcache_block_writeback_inv_max(addr, size, max) do { \
238 _XTHAL_DCACHE_BLOCK_DOWNGRADE_MAX(addr, size, XT_DHWBI_B, max); \
241 /* upgrades that are performed even with write-thru caches */
243 # define xthal_dcache_block_prefetch_read_write(addr, size) do { \
244 _XTHAL_DCACHE_BLOCK_UPGRADE(addr, size, XT_DPFW_B); \
246 # define xthal_dcache_block_prefetch_read_write_grp(addr, size) do { \
247 _XTHAL_DCACHE_BLOCK_UPGRADE(addr, size, XT_DPFW_BF); \
249 # define xthal_dcache_block_prefetch_for_read(addr, size) do { \
250 _XTHAL_DCACHE_BLOCK_UPGRADE(addr, size, XT_DPFR_B); \
252 # define xthal_dcache_block_prefetch_for_read_grp(addr, size) do { \
253 _XTHAL_DCACHE_BLOCK_UPGRADE(addr, size, XT_DPFR_BF); \
256 /* abort all or end optional block cache operations */
257 # define xthal_dcache_block_abort() do { \
260 # define xthal_dcache_block_end() do { \
264 /* wait for all/required block cache operations to finish */
265 # define xthal_dcache_block_wait() do { \
268 # define xthal_dcache_block_required_wait() do { \
271 /* Start a new group */
272 # define xthal_dcache_block_newgrp() do { \
276 # define xthal_dcache_block_invalidate(addr, size) do {/*nothing*/} while(0)
277 # define xthal_dcache_block_writeback(addr, size) do {/*nothing*/} while(0)
278 # define xthal_dcache_block_writeback_inv(addr, size) do {/*nothing*/} while(0)
279 # define xthal_dcache_block_invalidate_max(addr, size, max) do {/*nothing*/} while(0)
280 # define xthal_dcache_block_writeback_max(addr, size, max) do {/*nothing*/} while(0)
281 # define xthal_dcache_block_writeback_inv_max(addr, size, max) do {/*nothing*/} while(0)
282 # define xthal_dcache_block_prefetch_read_write(addr, size) do {/*nothing*/} while(0)
283 # define xthal_dcache_block_prefetch_read_write_grp(addr, size) do {/*nothing*/} while(0)
284 # define xthal_dcache_block_prefetch_for_read(addr, size) do {/*nothing*/} while(0)
285 # define xthal_dcache_block_prefetch_for_read_grp(addr, size) do {/*nothing*/} while(0)
286 # define xthal_dcache_block_end() do {/*nothing*/} while(0)
287 # define xthal_dcache_block_abort() do {/*nothing*/} while(0)
288 # define xthal_dcache_block_wait() do {/*nothing*/} while(0)
289 # define xthal_dcache_block_required_wait() do {/*nothing*/} while(0)
290 # define xthal_dcache_block_newgrp() do {/*nothing*/} while(0)
293 #if XCHAL_DCACHE_SIZE > 0 && XCHAL_HAVE_CACHE_BLOCKOPS && XCHAL_DCACHE_IS_WRITEBACK
295 # define xthal_dcache_block_prefetch_for_write(addr, size) do { \
296 _XTHAL_DCACHE_BLOCK_UPGRADE(addr, size, XT_DPFW_B); \
298 # define xthal_dcache_block_prefetch_modify(addr, size) do { \
299 _XTHAL_DCACHE_BLOCK_UPGRADE(addr, size, XT_DPFM_B); \
301 # define xthal_dcache_block_prefetch_for_write_grp(addr, size) do { \
302 _XTHAL_DCACHE_BLOCK_UPGRADE(addr, size, XT_DPFW_BF); \
304 # define xthal_dcache_block_prefetch_modify_grp(addr, size) do { \
305 _XTHAL_DCACHE_BLOCK_UPGRADE(addr, size, XT_DPFM_BF); \
308 # define xthal_dcache_block_prefetch_for_write(addr, size) do {/*nothing*/} while(0)
309 # define xthal_dcache_block_prefetch_modify(addr, size) do {/*nothing*/} while(0)
310 # define xthal_dcache_block_prefetch_for_write_grp(addr, size) do {/*nothing*/} while(0)
311 # define xthal_dcache_block_prefetch_modify_grp(addr, size) do {/*nothing*/} while(0)
314 /*************************** INTERRUPTS ***************************/
318 * unsigned xthal_get_intenable( void );
319 * void xthal_set_intenable( unsigned );
320 * unsigned xthal_get_interrupt( void );
321 * void xthal_set_intset( unsigned );
322 * void xthal_set_intclear( unsigned );
323 * unsigned xthal_get_ccount(void);
324 * void xthal_set_ccompare(int, unsigned);
325 * unsigned xthal_get_ccompare(int);
327 * NOTE: for {set,get}_ccompare, the first argument MUST be a decimal constant.
330 #if XCHAL_HAVE_INTERRUPTS
331 # define XTHAL_GET_INTENABLE() ({ int __intenable; \
332 __asm__("rsr.intenable %0" : "=a"(__intenable)); \
334 # define XTHAL_SET_INTENABLE(v) do { int __intenable = (int)(v); \
335 __asm__ __volatile__("wsr.intenable %0" :: "a"(__intenable):"memory"); \
337 # define XTHAL_GET_INTERRUPT() ({ int __interrupt; \
338 __asm__("rsr.interrupt %0" : "=a"(__interrupt)); \
340 # define XTHAL_SET_INTSET(v) do { int __interrupt = (int)(v); \
341 __asm__ __volatile__("wsr.intset %0" :: "a"(__interrupt):"memory"); \
343 # define XTHAL_SET_INTCLEAR(v) do { int __interrupt = (int)(v); \
344 __asm__ __volatile__("wsr.intclear %0" :: "a"(__interrupt):"memory"); \
346 # define XTHAL_GET_CCOUNT() ({ int __ccount; \
347 __asm__("rsr.ccount %0" : "=a"(__ccount)); \
349 # define XTHAL_SET_CCOUNT(v) do { int __ccount = (int)(v); \
350 __asm__ __volatile__("wsr.ccount %0" :: "a"(__ccount):"memory"); \
352 # define _XTHAL_GET_CCOMPARE(n) ({ int __ccompare; \
353 __asm__("rsr.ccompare" #n " %0" : "=a"(__ccompare)); \
355 # define XTHAL_GET_CCOMPARE(n) _XTHAL_GET_CCOMPARE(n)
356 # define _XTHAL_SET_CCOMPARE(n,v) do { int __ccompare = (int)(v); \
357 __asm__ __volatile__("wsr.ccompare" #n " %0 ; esync" :: "a"(__ccompare):"memory"); \
359 # define XTHAL_SET_CCOMPARE(n,v) _XTHAL_SET_CCOMPARE(n,v)
361 # define XTHAL_GET_INTENABLE() 0
362 # define XTHAL_SET_INTENABLE(v) do {/*nothing*/} while(0)
363 # define XTHAL_GET_INTERRUPT() 0
364 # define XTHAL_SET_INTSET(v) do {/*nothing*/} while(0)
365 # define XTHAL_SET_INTCLEAR(v) do {/*nothing*/} while(0)
366 # define XTHAL_GET_CCOUNT() 0
367 # define XTHAL_SET_CCOUNT(v) do {/*nothing*/} while(0)
368 # define XTHAL_GET_CCOMPARE(n) 0
369 # define XTHAL_SET_CCOMPARE(n,v) do {/*nothing*/} while(0)
373 /*************************** MISC ***************************/
376 * Macro or inline versions of:
377 * void xthal_clear_regcached_code( void );
378 * unsigned xthal_get_prid( void );
379 * unsigned xthal_compare_and_set( int *addr, int testval, int setval );
383 # define XTHAL_CLEAR_REGCACHED_CODE() \
384 __asm__ __volatile__("wsr.lcount %0" :: "a"(0) : "memory")
386 # define XTHAL_CLEAR_REGCACHED_CODE() do {/*nothing*/} while(0)
390 # define XTHAL_GET_PRID() ({ int __prid; \
391 __asm__("rsr.prid %0" : "=a"(__prid)); \
394 # define XTHAL_GET_PRID() 0
398 static inline unsigned XTHAL_COMPARE_AND_SET( int *addr, int testval, int setval )
402 #if XCHAL_HAVE_S32C1I && XCHAL_HW_MIN_VERSION_MAJOR >= 2200
403 __asm__ __volatile__ (
404 " wsr.scompare1 %2 \n"
405 " s32c1i %0, %3, 0 \n"
406 : "=a"(result) : "0" (setval), "a" (testval), "a" (addr)
408 #elif XCHAL_HAVE_INTERRUPTS
410 __asm__ __volatile__ (
411 " rsil %4, 15 \n" // %4 == saved ps
412 " l32i %0, %3, 0 \n" // %0 == value to test, return val
413 " bne %2, %0, 9f \n" // test
414 " s32i %1, %3, 0 \n" // write the new value
415 "9: wsr.ps %4 ; rsync \n" // restore the PS
417 : "0" (setval), "a" (testval), "a" (addr), "a" (tmp)
420 __asm__ __volatile__ (
421 " l32i %0, %3, 0 \n" // %0 == value to test, return val
422 " bne %2, %0, 9f \n" // test
423 " s32i %1, %3, 0 \n" // write the new value
425 : "=a"(result) : "0" (setval), "a" (testval), "a" (addr)
431 #if XCHAL_HAVE_EXTERN_REGS
433 static inline unsigned XTHAL_RER (unsigned int reg)
437 __asm__ __volatile__ (
439 : "=a" (result) : "a" (reg) : "memory");
444 static inline void XTHAL_WER (unsigned reg, unsigned value)
446 __asm__ __volatile__ (
448 : : "a" (value), "a" (reg) : "memory");
451 #endif /* XCHAL_HAVE_EXTERN_REGS */
455 #endif /*XTENSA_CACHE_H*/