@@ -111,14 +111,14 @@ static void alloc_memset_region(void *ptr, uint32_t bytes, uint32_t val)
111111#endif
112112
113113/* allocate from system memory pool */
114- static void * rmalloc_sys (int zone , size_t bytes )
114+ static void * rmalloc_sys (int zone , int core , size_t bytes )
115115{
116116 void * ptr ;
117117 struct mm_heap * cpu_heap ;
118118 size_t alignment = 0 ;
119119
120- /* use the heap dedicated for the current core */
121- cpu_heap = memmap .system + cpu_get_id ( );
120+ /* use the heap dedicated for the selected core */
121+ cpu_heap = cache_to_uncache ( memmap .system + core );
122122
123123 /* align address to dcache line size */
124124 if (cpu_heap -> info .used % PLATFORM_DCACHE_ALIGN )
@@ -434,7 +434,7 @@ void *rmalloc(int zone, uint32_t caps, size_t bytes)
434434
435435 switch (zone & RZONE_TYPE_MASK ) {
436436 case RZONE_SYS :
437- ptr = rmalloc_sys (zone , bytes );
437+ ptr = rmalloc_sys (zone , cpu_get_id (), bytes );
438438 break ;
439439 case RZONE_RUNTIME :
440440 ptr = rmalloc_runtime (zone , caps , bytes );
@@ -460,6 +460,22 @@ void *rzalloc(int zone, uint32_t caps, size_t bytes)
460460 return ptr ;
461461}
462462
463+ void * rzalloc_core_sys (int core , size_t bytes )
464+ {
465+ uint32_t flags ;
466+ void * ptr = NULL ;
467+
468+ spin_lock_irq (& memmap .lock , flags );
469+
470+ ptr = rmalloc_sys (RZONE_SYS , core , bytes );
471+ if (ptr )
472+ bzero (ptr , bytes );
473+
474+ spin_unlock_irq (& memmap .lock , flags );
475+
476+ return ptr ;
477+ }
478+
463479/* allocates continuous buffers */
464480void * rballoc (int zone , uint32_t caps , size_t bytes )
465481{
@@ -567,7 +583,7 @@ void free_heap(int zone)
567583 panic (SOF_IPC_PANIC_MEM );
568584 }
569585
570- cpu_heap = memmap .system + cpu_get_id ();
586+ cpu_heap = cache_to_uncache ( memmap .system + cpu_get_id () );
571587 cpu_heap -> info .used = 0 ;
572588 cpu_heap -> info .free = cpu_heap -> size ;
573589}
0 commit comments