forked from len0rd/rockbox
imx233: make sure dma descriptors are cache friendly
Because DMA descriptors needs to be committed and discarded from the cache, if they are not cache aligned and/or if their size is not a multiple of cache ligne, nasty side effects could occur with adjacents data. The same applies to DMA buffers which are still potentially broken. Add a macro to ensure that these constraints will not break by error in the future. Change-Id: I1dd69a5a9c29796c156d953eaa57c0d281e79846
This commit is contained in:
parent
1adc474771
commit
1b6e8cba62
5 changed files with 34 additions and 7 deletions
|
@ -34,7 +34,11 @@ struct i2c_dma_command_t
|
|||
struct apb_dma_command_t dma;
|
||||
/* PIO words */
|
||||
uint32_t ctrl0;
|
||||
};
|
||||
/* padded to next multiple of cache line size (32 bytes) */
|
||||
uint32_t pad[4];
|
||||
} __attribute__((packed)) CACHEALIGN_ATTR;
|
||||
|
||||
__ENSURE_STRUCT_CACHE_FRIENDLY(struct i2c_dma_command_t)
|
||||
|
||||
#define I2C_NR_STAGES 4
|
||||
/* Current transfer */
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue