| /* |
| * |
| * (C) COPYRIGHT 2010-2019 ARM Limited. All rights reserved. |
| * |
| * This program is free software and is provided to you under the terms of the |
| * GNU General Public License version 2 as published by the Free Software |
| * Foundation, and any use by you of this program is subject to the terms |
| * of such GNU licence. |
| * |
| * This program is distributed in the hope that it will be useful, |
| * but WITHOUT ANY WARRANTY; without even the implied warranty of |
| * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
| * GNU General Public License for more details. |
| * |
| * You should have received a copy of the GNU General Public License |
| * along with this program; if not, you can access it online at |
| * http://www.gnu.org/licenses/gpl-2.0.html. |
| * |
| * SPDX-License-Identifier: GPL-2.0 |
| * |
| */ |
| |
| /* |
| * THIS FILE IS AUTOGENERATED BY mali_trace_generator.py. |
| * DO NOT EDIT. |
| */ |
| |
| #if !defined(_KBASE_TRACEPOINTS_H) |
| #define _KBASE_TRACEPOINTS_H |
| |
| /* Tracepoints are abstract callbacks notifying that some important |
| * software or hardware event has happened. |
| * |
| * In this particular implementation, it results into a MIPE |
| * timeline event and, in some cases, it also fires an ftrace event |
| * (a.k.a. Gator events, see details below). |
| */ |
| |
| #include "mali_kbase.h" |
| #include "mali_kbase_gator.h" |
| |
| #include <linux/types.h> |
| #include <linux/atomic.h> |
| |
| /* clang-format off */ |
| |
| struct kbase_tlstream; |
| |
| extern const size_t __obj_stream_offset; |
| extern const size_t __aux_stream_offset; |
| |
| /* This macro dispatches a kbase_tlstream from |
| * a kbase_device instance. Only AUX or OBJ |
| * streams can be dispatched. It is aware of |
| * kbase_timeline binary representation and |
| * relies on offset variables: |
| * __obj_stream_offset and __aux_stream_offset. |
| */ |
| #define __TL_DISPATCH_STREAM(kbdev, stype) \ |
| ((struct kbase_tlstream *) \ |
| ((u8 *)kbdev->timeline + __ ## stype ## _stream_offset)) |
| |
| struct tp_desc; |
| |
| /* Descriptors of timeline messages transmitted in object events stream. */ |
| extern const char *obj_desc_header; |
| extern const size_t obj_desc_header_size; |
| /* Descriptors of timeline messages transmitted in auxiliary events stream. */ |
| extern const char *aux_desc_header; |
| extern const size_t aux_desc_header_size; |
| |
| #define TL_ATOM_STATE_IDLE 0 |
| #define TL_ATOM_STATE_READY 1 |
| #define TL_ATOM_STATE_DONE 2 |
| #define TL_ATOM_STATE_POSTED 3 |
| |
| #define TL_JS_EVENT_START GATOR_JOB_SLOT_START |
| #define TL_JS_EVENT_STOP GATOR_JOB_SLOT_STOP |
| #define TL_JS_EVENT_SOFT_STOP GATOR_JOB_SLOT_SOFT_STOPPED |
| |
| #define TLSTREAM_ENABLED (1 << 31) |
| |
| void __kbase_tlstream_tl_new_ctx( |
| struct kbase_tlstream *stream, |
| const void *ctx, |
| u32 ctx_nr, |
| u32 tgid); |
| void __kbase_tlstream_tl_new_gpu( |
| struct kbase_tlstream *stream, |
| const void *gpu, |
| u32 gpu_id, |
| u32 core_count); |
| void __kbase_tlstream_tl_new_lpu( |
| struct kbase_tlstream *stream, |
| const void *lpu, |
| u32 lpu_nr, |
| u32 lpu_fn); |
| void __kbase_tlstream_tl_new_atom( |
| struct kbase_tlstream *stream, |
| const void *atom, |
| u32 atom_nr); |
| void __kbase_tlstream_tl_new_as( |
| struct kbase_tlstream *stream, |
| const void *address_space, |
| u32 as_nr); |
| void __kbase_tlstream_tl_del_ctx( |
| struct kbase_tlstream *stream, |
| const void *ctx); |
| void __kbase_tlstream_tl_del_atom( |
| struct kbase_tlstream *stream, |
| const void *atom); |
| void __kbase_tlstream_tl_lifelink_lpu_gpu( |
| struct kbase_tlstream *stream, |
| const void *lpu, |
| const void *gpu); |
| void __kbase_tlstream_tl_lifelink_as_gpu( |
| struct kbase_tlstream *stream, |
| const void *address_space, |
| const void *gpu); |
| void __kbase_tlstream_tl_ret_ctx_lpu( |
| struct kbase_tlstream *stream, |
| const void *ctx, |
| const void *lpu); |
| void __kbase_tlstream_tl_ret_atom_ctx( |
| struct kbase_tlstream *stream, |
| const void *atom, |
| const void *ctx); |
| void __kbase_tlstream_tl_ret_atom_lpu( |
| struct kbase_tlstream *stream, |
| const void *atom, |
| const void *lpu, |
| const char *attrib_match_list); |
| void __kbase_tlstream_tl_nret_ctx_lpu( |
| struct kbase_tlstream *stream, |
| const void *ctx, |
| const void *lpu); |
| void __kbase_tlstream_tl_nret_atom_ctx( |
| struct kbase_tlstream *stream, |
| const void *atom, |
| const void *ctx); |
| void __kbase_tlstream_tl_nret_atom_lpu( |
| struct kbase_tlstream *stream, |
| const void *atom, |
| const void *lpu); |
| void __kbase_tlstream_tl_ret_as_ctx( |
| struct kbase_tlstream *stream, |
| const void *address_space, |
| const void *ctx); |
| void __kbase_tlstream_tl_nret_as_ctx( |
| struct kbase_tlstream *stream, |
| const void *address_space, |
| const void *ctx); |
| void __kbase_tlstream_tl_ret_atom_as( |
| struct kbase_tlstream *stream, |
| const void *atom, |
| const void *address_space); |
| void __kbase_tlstream_tl_nret_atom_as( |
| struct kbase_tlstream *stream, |
| const void *atom, |
| const void *address_space); |
| void __kbase_tlstream_tl_attrib_atom_config( |
| struct kbase_tlstream *stream, |
| const void *atom, |
| u64 descriptor, |
| u64 affinity, |
| u32 config); |
| void __kbase_tlstream_tl_attrib_atom_priority( |
| struct kbase_tlstream *stream, |
| const void *atom, |
| u32 prio); |
| void __kbase_tlstream_tl_attrib_atom_state( |
| struct kbase_tlstream *stream, |
| const void *atom, |
| u32 state); |
| void __kbase_tlstream_tl_attrib_atom_prioritized( |
| struct kbase_tlstream *stream, |
| const void *atom); |
| void __kbase_tlstream_tl_attrib_atom_jit( |
| struct kbase_tlstream *stream, |
| const void *atom, |
| u64 edit_addr, |
| u64 new_addr, |
| u32 jit_flags, |
| u64 mem_flags, |
| u32 j_id, |
| u64 com_pgs, |
| u64 extent, |
| u64 va_pgs); |
| void __kbase_tlstream_tl_jit_usedpages( |
| struct kbase_tlstream *stream, |
| u64 used_pages, |
| u32 j_id); |
| void __kbase_tlstream_tl_attrib_atom_jitallocinfo( |
| struct kbase_tlstream *stream, |
| const void *atom, |
| u64 va_pgs, |
| u64 com_pgs, |
| u64 extent, |
| u32 j_id, |
| u32 bin_id, |
| u32 max_allocs, |
| u32 jit_flags, |
| u32 usg_id); |
| void __kbase_tlstream_tl_attrib_atom_jitfreeinfo( |
| struct kbase_tlstream *stream, |
| const void *atom, |
| u32 j_id); |
| void __kbase_tlstream_tl_attrib_as_config( |
| struct kbase_tlstream *stream, |
| const void *address_space, |
| u64 transtab, |
| u64 memattr, |
| u64 transcfg); |
| void __kbase_tlstream_tl_event_lpu_softstop( |
| struct kbase_tlstream *stream, |
| const void *lpu); |
| void __kbase_tlstream_tl_event_atom_softstop_ex( |
| struct kbase_tlstream *stream, |
| const void *atom); |
| void __kbase_tlstream_tl_event_atom_softstop_issue( |
| struct kbase_tlstream *stream, |
| const void *atom); |
| void __kbase_tlstream_tl_event_atom_softjob_start( |
| struct kbase_tlstream *stream, |
| const void *atom); |
| void __kbase_tlstream_tl_event_atom_softjob_end( |
| struct kbase_tlstream *stream, |
| const void *atom); |
| void __kbase_tlstream_jd_gpu_soft_reset( |
| struct kbase_tlstream *stream, |
| const void *gpu); |
| void __kbase_tlstream_aux_pm_state( |
| struct kbase_tlstream *stream, |
| u32 core_type, |
| u64 core_state_bitset); |
| void __kbase_tlstream_aux_pagefault( |
| struct kbase_tlstream *stream, |
| u32 ctx_nr, |
| u32 as_nr, |
| u64 page_cnt_change); |
| void __kbase_tlstream_aux_pagesalloc( |
| struct kbase_tlstream *stream, |
| u32 ctx_nr, |
| u64 page_cnt); |
| void __kbase_tlstream_aux_devfreq_target( |
| struct kbase_tlstream *stream, |
| u64 target_freq); |
| void __kbase_tlstream_aux_protected_enter_start( |
| struct kbase_tlstream *stream, |
| const void *gpu); |
| void __kbase_tlstream_aux_protected_enter_end( |
| struct kbase_tlstream *stream, |
| const void *gpu); |
| void __kbase_tlstream_aux_protected_leave_start( |
| struct kbase_tlstream *stream, |
| const void *gpu); |
| void __kbase_tlstream_aux_protected_leave_end( |
| struct kbase_tlstream *stream, |
| const void *gpu); |
| void __kbase_tlstream_aux_jit_stats( |
| struct kbase_tlstream *stream, |
| u32 ctx_nr, |
| u32 bid, |
| u32 max_allocs, |
| u32 allocs, |
| u32 va_pages, |
| u32 ph_pages); |
| void __kbase_tlstream_aux_event_job_slot( |
| struct kbase_tlstream *stream, |
| const void *ctx, |
| u32 slot_nr, |
| u32 atom_nr, |
| u32 event); |
| void __kbase_tlstream_tl_new_kcpuqueue( |
| struct kbase_tlstream *stream, |
| const void *kcpu_queue, |
| const void *ctx, |
| u32 kcpuq_num_pending_cmds); |
| void __kbase_tlstream_tl_ret_kcpuqueue_ctx( |
| struct kbase_tlstream *stream, |
| const void *kcpu_queue, |
| const void *ctx); |
| void __kbase_tlstream_tl_del_kcpuqueue( |
| struct kbase_tlstream *stream, |
| const void *kcpu_queue); |
| void __kbase_tlstream_tl_nret_kcpuqueue_ctx( |
| struct kbase_tlstream *stream, |
| const void *kcpu_queue, |
| const void *ctx); |
| void __kbase_tlstream_tl_event_kcpuqueue_enqueue_fence_signal( |
| struct kbase_tlstream *stream, |
| const void *kcpu_queue, |
| u64 fence); |
| void __kbase_tlstream_tl_event_kcpuqueue_enqueue_fence_wait( |
| struct kbase_tlstream *stream, |
| const void *kcpu_queue, |
| u64 fence); |
| void __kbase_tlstream_tl_event_array_begin_kcpuqueue_enqueue_cqs_wait( |
| struct kbase_tlstream *stream, |
| const void *kcpu_queue); |
| void __kbase_tlstream_tl_event_array_item_kcpuqueue_enqueue_cqs_wait( |
| struct kbase_tlstream *stream, |
| const void *kcpu_queue, |
| u64 cqs_obj_gpu_addr, |
| u32 cqs_obj_compare_value); |
| void __kbase_tlstream_tl_event_array_end_kcpuqueue_enqueue_cqs_wait( |
| struct kbase_tlstream *stream, |
| const void *kcpu_queue); |
| void __kbase_tlstream_tl_event_array_begin_kcpuqueue_enqueue_cqs_set( |
| struct kbase_tlstream *stream, |
| const void *kcpu_queue); |
| void __kbase_tlstream_tl_event_array_item_kcpuqueue_enqueue_cqs_set( |
| struct kbase_tlstream *stream, |
| const void *kcpu_queue, |
| u64 cqs_obj_gpu_addr); |
| void __kbase_tlstream_tl_event_array_end_kcpuqueue_enqueue_cqs_set( |
| struct kbase_tlstream *stream, |
| const void *kcpu_queue); |
| void __kbase_tlstream_tl_event_array_begin_kcpuqueue_enqueue_debugcopy( |
| struct kbase_tlstream *stream, |
| const void *kcpu_queue); |
| void __kbase_tlstream_tl_event_array_item_kcpuqueue_enqueue_debugcopy( |
| struct kbase_tlstream *stream, |
| const void *kcpu_queue, |
| u64 debugcopy_dst_size); |
| void __kbase_tlstream_tl_event_array_end_kcpuqueue_enqueue_debugcopy( |
| struct kbase_tlstream *stream, |
| const void *kcpu_queue); |
| void __kbase_tlstream_tl_event_kcpuqueue_enqueue_map_import( |
| struct kbase_tlstream *stream, |
| const void *kcpu_queue, |
| u64 map_import_buf_gpu_addr); |
| void __kbase_tlstream_tl_event_kcpuqueue_enqueue_unmap_import( |
| struct kbase_tlstream *stream, |
| const void *kcpu_queue, |
| u64 map_import_buf_gpu_addr); |
| void __kbase_tlstream_tl_event_array_begin_kcpuqueue_enqueue_jit_alloc( |
| struct kbase_tlstream *stream, |
| const void *kcpu_queue); |
| void __kbase_tlstream_tl_event_array_item_kcpuqueue_enqueue_jit_alloc( |
| struct kbase_tlstream *stream, |
| const void *kcpu_queue, |
| u64 jit_alloc_gpu_alloc_addr_dest, |
| u64 jit_alloc_va_pages, |
| u64 jit_alloc_commit_pages, |
| u64 jit_alloc_extent, |
| u32 jit_alloc_jit_id, |
| u32 jit_alloc_bin_id, |
| u32 jit_alloc_max_allocations, |
| u32 jit_alloc_flags, |
| u32 jit_alloc_usage_id); |
| void __kbase_tlstream_tl_event_array_end_kcpuqueue_enqueue_jit_alloc( |
| struct kbase_tlstream *stream, |
| const void *kcpu_queue); |
| void __kbase_tlstream_tl_event_array_begin_kcpuqueue_enqueue_jit_free( |
| struct kbase_tlstream *stream, |
| const void *kcpu_queue); |
| void __kbase_tlstream_tl_event_array_item_kcpuqueue_enqueue_jit_free( |
| struct kbase_tlstream *stream, |
| const void *kcpu_queue, |
| u32 jit_alloc_jit_id); |
| void __kbase_tlstream_tl_event_array_end_kcpuqueue_enqueue_jit_free( |
| struct kbase_tlstream *stream, |
| const void *kcpu_queue); |
| void __kbase_tlstream_tl_event_kcpuqueue_execute_fence_signal_start( |
| struct kbase_tlstream *stream, |
| const void *kcpu_queue); |
| void __kbase_tlstream_tl_event_kcpuqueue_execute_fence_signal_end( |
| struct kbase_tlstream *stream, |
| const void *kcpu_queue); |
| void __kbase_tlstream_tl_event_kcpuqueue_execute_fence_wait_start( |
| struct kbase_tlstream *stream, |
| const void *kcpu_queue); |
| void __kbase_tlstream_tl_event_kcpuqueue_execute_fence_wait_end( |
| struct kbase_tlstream *stream, |
| const void *kcpu_queue); |
| void __kbase_tlstream_tl_event_kcpuqueue_execute_cqs_wait_start( |
| struct kbase_tlstream *stream, |
| const void *kcpu_queue); |
| void __kbase_tlstream_tl_event_kcpuqueue_execute_cqs_wait_end( |
| struct kbase_tlstream *stream, |
| const void *kcpu_queue); |
| void __kbase_tlstream_tl_event_kcpuqueue_execute_cqs_set_start( |
| struct kbase_tlstream *stream, |
| const void *kcpu_queue); |
| void __kbase_tlstream_tl_event_kcpuqueue_execute_cqs_set_end( |
| struct kbase_tlstream *stream, |
| const void *kcpu_queue); |
| void __kbase_tlstream_tl_event_kcpuqueue_execute_debugcopy_start( |
| struct kbase_tlstream *stream, |
| const void *kcpu_queue); |
| void __kbase_tlstream_tl_event_kcpuqueue_execute_debugcopy_end( |
| struct kbase_tlstream *stream, |
| const void *kcpu_queue); |
| void __kbase_tlstream_tl_event_kcpuqueue_execute_map_import_start( |
| struct kbase_tlstream *stream, |
| const void *kcpu_queue); |
| void __kbase_tlstream_tl_event_kcpuqueue_execute_map_import_end( |
| struct kbase_tlstream *stream, |
| const void *kcpu_queue); |
| void __kbase_tlstream_tl_event_kcpuqueue_execute_unmap_import_start( |
| struct kbase_tlstream *stream, |
| const void *kcpu_queue); |
| void __kbase_tlstream_tl_event_kcpuqueue_execute_unmap_import_end( |
| struct kbase_tlstream *stream, |
| const void *kcpu_queue); |
| void __kbase_tlstream_tl_event_kcpuqueue_execute_jit_alloc_start( |
| struct kbase_tlstream *stream, |
| const void *kcpu_queue); |
| void __kbase_tlstream_tl_event_array_begin_kcpuqueue_execute_jit_alloc_end( |
| struct kbase_tlstream *stream, |
| const void *kcpu_queue); |
| void __kbase_tlstream_tl_event_array_item_kcpuqueue_execute_jit_alloc_end( |
| struct kbase_tlstream *stream, |
| const void *kcpu_queue, |
| u64 jit_alloc_gpu_alloc_addr, |
| u64 jit_alloc_mmu_flags); |
| void __kbase_tlstream_tl_event_array_end_kcpuqueue_execute_jit_alloc_end( |
| struct kbase_tlstream *stream, |
| const void *kcpu_queue); |
| void __kbase_tlstream_tl_event_kcpuqueue_execute_jit_free_start( |
| struct kbase_tlstream *stream, |
| const void *kcpu_queue); |
| void __kbase_tlstream_tl_event_array_begin_kcpuqueue_execute_jit_free_end( |
| struct kbase_tlstream *stream, |
| const void *kcpu_queue); |
| void __kbase_tlstream_tl_event_array_item_kcpuqueue_execute_jit_free_end( |
| struct kbase_tlstream *stream, |
| const void *kcpu_queue, |
| u64 jit_free_pages_used); |
| void __kbase_tlstream_tl_event_array_end_kcpuqueue_execute_jit_free_end( |
| struct kbase_tlstream *stream, |
| const void *kcpu_queue); |
| void __kbase_tlstream_tl_event_kcpuqueue_execute_errorbarrier( |
| struct kbase_tlstream *stream, |
| const void *kcpu_queue); |
| |
| struct kbase_tlstream; |
| |
| /** |
| * KBASE_TLSTREAM_TL_NEW_CTX - |
| * object ctx is created |
| * |
| * @kbdev: Kbase device |
| * @ctx: Name of the context object |
| * @ctx_nr: Kernel context number |
| * @tgid: Thread Group Id |
| */ |
| #define KBASE_TLSTREAM_TL_NEW_CTX( \ |
| kbdev, \ |
| ctx, \ |
| ctx_nr, \ |
| tgid \ |
| ) \ |
| do { \ |
| int enabled = atomic_read(&kbdev->timeline_is_enabled); \ |
| if (enabled & TLSTREAM_ENABLED) \ |
| __kbase_tlstream_tl_new_ctx( \ |
| __TL_DISPATCH_STREAM(kbdev, obj), \ |
| ctx, ctx_nr, tgid); \ |
| } while (0) |
| |
| /** |
| * KBASE_TLSTREAM_TL_NEW_GPU - |
| * object gpu is created |
| * |
| * @kbdev: Kbase device |
| * @gpu: Name of the GPU object |
| * @gpu_id: Name of the GPU object |
| * @core_count: Number of cores this GPU hosts |
| */ |
| #define KBASE_TLSTREAM_TL_NEW_GPU( \ |
| kbdev, \ |
| gpu, \ |
| gpu_id, \ |
| core_count \ |
| ) \ |
| do { \ |
| int enabled = atomic_read(&kbdev->timeline_is_enabled); \ |
| if (enabled & TLSTREAM_ENABLED) \ |
| __kbase_tlstream_tl_new_gpu( \ |
| __TL_DISPATCH_STREAM(kbdev, obj), \ |
| gpu, gpu_id, core_count); \ |
| } while (0) |
| |
| /** |
| * KBASE_TLSTREAM_TL_NEW_LPU - |
| * object lpu is created |
| * |
| * @kbdev: Kbase device |
| * @lpu: Name of the Logical Processing Unit object |
| * @lpu_nr: Sequential number assigned to the newly created LPU |
| * @lpu_fn: Property describing functional abilities of this LPU |
| */ |
| #define KBASE_TLSTREAM_TL_NEW_LPU( \ |
| kbdev, \ |
| lpu, \ |
| lpu_nr, \ |
| lpu_fn \ |
| ) \ |
| do { \ |
| int enabled = atomic_read(&kbdev->timeline_is_enabled); \ |
| if (enabled & TLSTREAM_ENABLED) \ |
| __kbase_tlstream_tl_new_lpu( \ |
| __TL_DISPATCH_STREAM(kbdev, obj), \ |
| lpu, lpu_nr, lpu_fn); \ |
| } while (0) |
| |
| /** |
| * KBASE_TLSTREAM_TL_NEW_ATOM - |
| * object atom is created |
| * |
| * @kbdev: Kbase device |
| * @atom: Atom identifier |
| * @atom_nr: Sequential number of an atom |
| */ |
| #define KBASE_TLSTREAM_TL_NEW_ATOM( \ |
| kbdev, \ |
| atom, \ |
| atom_nr \ |
| ) \ |
| do { \ |
| int enabled = atomic_read(&kbdev->timeline_is_enabled); \ |
| if (enabled & TLSTREAM_ENABLED) \ |
| __kbase_tlstream_tl_new_atom( \ |
| __TL_DISPATCH_STREAM(kbdev, obj), \ |
| atom, atom_nr); \ |
| } while (0) |
| |
| /** |
| * KBASE_TLSTREAM_TL_NEW_AS - |
| * address space object is created |
| * |
| * @kbdev: Kbase device |
| * @address_space: Name of the address space object |
| * @as_nr: Address space number |
| */ |
| #define KBASE_TLSTREAM_TL_NEW_AS( \ |
| kbdev, \ |
| address_space, \ |
| as_nr \ |
| ) \ |
| do { \ |
| int enabled = atomic_read(&kbdev->timeline_is_enabled); \ |
| if (enabled & TLSTREAM_ENABLED) \ |
| __kbase_tlstream_tl_new_as( \ |
| __TL_DISPATCH_STREAM(kbdev, obj), \ |
| address_space, as_nr); \ |
| } while (0) |
| |
| /** |
| * KBASE_TLSTREAM_TL_DEL_CTX - |
| * context is destroyed |
| * |
| * @kbdev: Kbase device |
| * @ctx: Name of the context object |
| */ |
| #define KBASE_TLSTREAM_TL_DEL_CTX( \ |
| kbdev, \ |
| ctx \ |
| ) \ |
| do { \ |
| int enabled = atomic_read(&kbdev->timeline_is_enabled); \ |
| if (enabled & TLSTREAM_ENABLED) \ |
| __kbase_tlstream_tl_del_ctx( \ |
| __TL_DISPATCH_STREAM(kbdev, obj), \ |
| ctx); \ |
| } while (0) |
| |
| /** |
| * KBASE_TLSTREAM_TL_DEL_ATOM - |
| * atom is destroyed |
| * |
| * @kbdev: Kbase device |
| * @atom: Atom identifier |
| */ |
| #define KBASE_TLSTREAM_TL_DEL_ATOM( \ |
| kbdev, \ |
| atom \ |
| ) \ |
| do { \ |
| int enabled = atomic_read(&kbdev->timeline_is_enabled); \ |
| if (enabled & TLSTREAM_ENABLED) \ |
| __kbase_tlstream_tl_del_atom( \ |
| __TL_DISPATCH_STREAM(kbdev, obj), \ |
| atom); \ |
| } while (0) |
| |
| /** |
| * KBASE_TLSTREAM_TL_LIFELINK_LPU_GPU - |
| * lpu is deleted with gpu |
| * |
| * @kbdev: Kbase device |
| * @lpu: Name of the Logical Processing Unit object |
| * @gpu: Name of the GPU object |
| */ |
| #define KBASE_TLSTREAM_TL_LIFELINK_LPU_GPU( \ |
| kbdev, \ |
| lpu, \ |
| gpu \ |
| ) \ |
| do { \ |
| int enabled = atomic_read(&kbdev->timeline_is_enabled); \ |
| if (enabled & TLSTREAM_ENABLED) \ |
| __kbase_tlstream_tl_lifelink_lpu_gpu( \ |
| __TL_DISPATCH_STREAM(kbdev, obj), \ |
| lpu, gpu); \ |
| } while (0) |
| |
| /** |
| * KBASE_TLSTREAM_TL_LIFELINK_AS_GPU - |
| * address space is deleted with gpu |
| * |
| * @kbdev: Kbase device |
| * @address_space: Name of the address space object |
| * @gpu: Name of the GPU object |
| */ |
| #define KBASE_TLSTREAM_TL_LIFELINK_AS_GPU( \ |
| kbdev, \ |
| address_space, \ |
| gpu \ |
| ) \ |
| do { \ |
| int enabled = atomic_read(&kbdev->timeline_is_enabled); \ |
| if (enabled & TLSTREAM_ENABLED) \ |
| __kbase_tlstream_tl_lifelink_as_gpu( \ |
| __TL_DISPATCH_STREAM(kbdev, obj), \ |
| address_space, gpu); \ |
| } while (0) |
| |
| /** |
| * KBASE_TLSTREAM_TL_RET_CTX_LPU - |
| * context is retained by lpu |
| * |
| * @kbdev: Kbase device |
| * @ctx: Name of the context object |
| * @lpu: Name of the Logical Processing Unit object |
| */ |
| #define KBASE_TLSTREAM_TL_RET_CTX_LPU( \ |
| kbdev, \ |
| ctx, \ |
| lpu \ |
| ) \ |
| do { \ |
| int enabled = atomic_read(&kbdev->timeline_is_enabled); \ |
| if (enabled & TLSTREAM_ENABLED) \ |
| __kbase_tlstream_tl_ret_ctx_lpu( \ |
| __TL_DISPATCH_STREAM(kbdev, obj), \ |
| ctx, lpu); \ |
| } while (0) |
| |
| /** |
| * KBASE_TLSTREAM_TL_RET_ATOM_CTX - |
| * atom is retained by context |
| * |
| * @kbdev: Kbase device |
| * @atom: Atom identifier |
| * @ctx: Name of the context object |
| */ |
| #define KBASE_TLSTREAM_TL_RET_ATOM_CTX( \ |
| kbdev, \ |
| atom, \ |
| ctx \ |
| ) \ |
| do { \ |
| int enabled = atomic_read(&kbdev->timeline_is_enabled); \ |
| if (enabled & TLSTREAM_ENABLED) \ |
| __kbase_tlstream_tl_ret_atom_ctx( \ |
| __TL_DISPATCH_STREAM(kbdev, obj), \ |
| atom, ctx); \ |
| } while (0) |
| |
| /** |
| * KBASE_TLSTREAM_TL_RET_ATOM_LPU - |
| * atom is retained by lpu |
| * |
| * @kbdev: Kbase device |
| * @atom: Atom identifier |
| * @lpu: Name of the Logical Processing Unit object |
| * @attrib_match_list: List containing match operator attributes |
| */ |
| #define KBASE_TLSTREAM_TL_RET_ATOM_LPU( \ |
| kbdev, \ |
| atom, \ |
| lpu, \ |
| attrib_match_list \ |
| ) \ |
| do { \ |
| int enabled = atomic_read(&kbdev->timeline_is_enabled); \ |
| if (enabled & TLSTREAM_ENABLED) \ |
| __kbase_tlstream_tl_ret_atom_lpu( \ |
| __TL_DISPATCH_STREAM(kbdev, obj), \ |
| atom, lpu, attrib_match_list); \ |
| } while (0) |
| |
| /** |
| * KBASE_TLSTREAM_TL_NRET_CTX_LPU - |
| * context is released by lpu |
| * |
| * @kbdev: Kbase device |
| * @ctx: Name of the context object |
| * @lpu: Name of the Logical Processing Unit object |
| */ |
| #define KBASE_TLSTREAM_TL_NRET_CTX_LPU( \ |
| kbdev, \ |
| ctx, \ |
| lpu \ |
| ) \ |
| do { \ |
| int enabled = atomic_read(&kbdev->timeline_is_enabled); \ |
| if (enabled & TLSTREAM_ENABLED) \ |
| __kbase_tlstream_tl_nret_ctx_lpu( \ |
| __TL_DISPATCH_STREAM(kbdev, obj), \ |
| ctx, lpu); \ |
| } while (0) |
| |
| /** |
| * KBASE_TLSTREAM_TL_NRET_ATOM_CTX - |
| * atom is released by context |
| * |
| * @kbdev: Kbase device |
| * @atom: Atom identifier |
| * @ctx: Name of the context object |
| */ |
| #define KBASE_TLSTREAM_TL_NRET_ATOM_CTX( \ |
| kbdev, \ |
| atom, \ |
| ctx \ |
| ) \ |
| do { \ |
| int enabled = atomic_read(&kbdev->timeline_is_enabled); \ |
| if (enabled & TLSTREAM_ENABLED) \ |
| __kbase_tlstream_tl_nret_atom_ctx( \ |
| __TL_DISPATCH_STREAM(kbdev, obj), \ |
| atom, ctx); \ |
| } while (0) |
| |
| /** |
| * KBASE_TLSTREAM_TL_NRET_ATOM_LPU - |
| * atom is released by lpu |
| * |
| * @kbdev: Kbase device |
| * @atom: Atom identifier |
| * @lpu: Name of the Logical Processing Unit object |
| */ |
| #define KBASE_TLSTREAM_TL_NRET_ATOM_LPU( \ |
| kbdev, \ |
| atom, \ |
| lpu \ |
| ) \ |
| do { \ |
| int enabled = atomic_read(&kbdev->timeline_is_enabled); \ |
| if (enabled & TLSTREAM_ENABLED) \ |
| __kbase_tlstream_tl_nret_atom_lpu( \ |
| __TL_DISPATCH_STREAM(kbdev, obj), \ |
| atom, lpu); \ |
| } while (0) |
| |
| /** |
| * KBASE_TLSTREAM_TL_RET_AS_CTX - |
| * address space is retained by context |
| * |
| * @kbdev: Kbase device |
| * @address_space: Name of the address space object |
| * @ctx: Name of the context object |
| */ |
| #define KBASE_TLSTREAM_TL_RET_AS_CTX( \ |
| kbdev, \ |
| address_space, \ |
| ctx \ |
| ) \ |
| do { \ |
| int enabled = atomic_read(&kbdev->timeline_is_enabled); \ |
| if (enabled & TLSTREAM_ENABLED) \ |
| __kbase_tlstream_tl_ret_as_ctx( \ |
| __TL_DISPATCH_STREAM(kbdev, obj), \ |
| address_space, ctx); \ |
| } while (0) |
| |
| /** |
| * KBASE_TLSTREAM_TL_NRET_AS_CTX - |
| * address space is released by context |
| * |
| * @kbdev: Kbase device |
| * @address_space: Name of the address space object |
| * @ctx: Name of the context object |
| */ |
| #define KBASE_TLSTREAM_TL_NRET_AS_CTX( \ |
| kbdev, \ |
| address_space, \ |
| ctx \ |
| ) \ |
| do { \ |
| int enabled = atomic_read(&kbdev->timeline_is_enabled); \ |
| if (enabled & TLSTREAM_ENABLED) \ |
| __kbase_tlstream_tl_nret_as_ctx( \ |
| __TL_DISPATCH_STREAM(kbdev, obj), \ |
| address_space, ctx); \ |
| } while (0) |
| |
| /** |
| * KBASE_TLSTREAM_TL_RET_ATOM_AS - |
| * atom is retained by address space |
| * |
| * @kbdev: Kbase device |
| * @atom: Atom identifier |
| * @address_space: Name of the address space object |
| */ |
| #define KBASE_TLSTREAM_TL_RET_ATOM_AS( \ |
| kbdev, \ |
| atom, \ |
| address_space \ |
| ) \ |
| do { \ |
| int enabled = atomic_read(&kbdev->timeline_is_enabled); \ |
| if (enabled & TLSTREAM_ENABLED) \ |
| __kbase_tlstream_tl_ret_atom_as( \ |
| __TL_DISPATCH_STREAM(kbdev, obj), \ |
| atom, address_space); \ |
| } while (0) |
| |
| /** |
| * KBASE_TLSTREAM_TL_NRET_ATOM_AS - |
| * atom is released by address space |
| * |
| * @kbdev: Kbase device |
| * @atom: Atom identifier |
| * @address_space: Name of the address space object |
| */ |
| #define KBASE_TLSTREAM_TL_NRET_ATOM_AS( \ |
| kbdev, \ |
| atom, \ |
| address_space \ |
| ) \ |
| do { \ |
| int enabled = atomic_read(&kbdev->timeline_is_enabled); \ |
| if (enabled & TLSTREAM_ENABLED) \ |
| __kbase_tlstream_tl_nret_atom_as( \ |
| __TL_DISPATCH_STREAM(kbdev, obj), \ |
| atom, address_space); \ |
| } while (0) |
| |
| /** |
| * KBASE_TLSTREAM_TL_ATTRIB_ATOM_CONFIG - |
| * atom job slot attributes |
| * |
| * @kbdev: Kbase device |
| * @atom: Atom identifier |
| * @descriptor: Job descriptor address |
| * @affinity: Job affinity |
| * @config: Job config |
| */ |
| #define KBASE_TLSTREAM_TL_ATTRIB_ATOM_CONFIG( \ |
| kbdev, \ |
| atom, \ |
| descriptor, \ |
| affinity, \ |
| config \ |
| ) \ |
| do { \ |
| int enabled = atomic_read(&kbdev->timeline_is_enabled); \ |
| if (enabled & TLSTREAM_ENABLED) \ |
| __kbase_tlstream_tl_attrib_atom_config( \ |
| __TL_DISPATCH_STREAM(kbdev, obj), \ |
| atom, descriptor, affinity, config); \ |
| } while (0) |
| |
| /** |
| * KBASE_TLSTREAM_TL_ATTRIB_ATOM_PRIORITY - |
| * atom priority |
| * |
| * @kbdev: Kbase device |
| * @atom: Atom identifier |
| * @prio: Atom priority |
| */ |
| #define KBASE_TLSTREAM_TL_ATTRIB_ATOM_PRIORITY( \ |
| kbdev, \ |
| atom, \ |
| prio \ |
| ) \ |
| do { \ |
| int enabled = atomic_read(&kbdev->timeline_is_enabled); \ |
| if (enabled & BASE_TLSTREAM_ENABLE_LATENCY_TRACEPOINTS) \ |
| __kbase_tlstream_tl_attrib_atom_priority( \ |
| __TL_DISPATCH_STREAM(kbdev, obj), \ |
| atom, prio); \ |
| } while (0) |
| |
| /** |
| * KBASE_TLSTREAM_TL_ATTRIB_ATOM_STATE - |
| * atom state |
| * |
| * @kbdev: Kbase device |
| * @atom: Atom identifier |
| * @state: Atom state |
| */ |
| #define KBASE_TLSTREAM_TL_ATTRIB_ATOM_STATE( \ |
| kbdev, \ |
| atom, \ |
| state \ |
| ) \ |
| do { \ |
| int enabled = atomic_read(&kbdev->timeline_is_enabled); \ |
| if (enabled & BASE_TLSTREAM_ENABLE_LATENCY_TRACEPOINTS) \ |
| __kbase_tlstream_tl_attrib_atom_state( \ |
| __TL_DISPATCH_STREAM(kbdev, obj), \ |
| atom, state); \ |
| } while (0) |
| |
| /** |
| * KBASE_TLSTREAM_TL_ATTRIB_ATOM_PRIORITIZED - |
| * atom caused priority change |
| * |
| * @kbdev: Kbase device |
| * @atom: Atom identifier |
| */ |
| #define KBASE_TLSTREAM_TL_ATTRIB_ATOM_PRIORITIZED( \ |
| kbdev, \ |
| atom \ |
| ) \ |
| do { \ |
| int enabled = atomic_read(&kbdev->timeline_is_enabled); \ |
| if (enabled & BASE_TLSTREAM_ENABLE_LATENCY_TRACEPOINTS) \ |
| __kbase_tlstream_tl_attrib_atom_prioritized( \ |
| __TL_DISPATCH_STREAM(kbdev, obj), \ |
| atom); \ |
| } while (0) |
| |
| /** |
| * KBASE_TLSTREAM_TL_ATTRIB_ATOM_JIT - |
| * jit done for atom |
| * |
| * @kbdev: Kbase device |
| * @atom: Atom identifier |
| * @edit_addr: Address edited by jit |
| * @new_addr: Address placed into the edited location |
| * @jit_flags: Flags specifying the special requirements for |
| * the JIT allocation. |
| * @mem_flags: Flags defining the properties of a memory region |
| * @j_id: Unique ID provided by the caller, this is used |
| * to pair allocation and free requests. |
| * @com_pgs: The minimum number of physical pages which |
| * should back the allocation. |
| * @extent: Granularity of physical pages to grow the |
| * allocation by during a fault. |
| * @va_pgs: The minimum number of virtual pages required |
| */ |
| #define KBASE_TLSTREAM_TL_ATTRIB_ATOM_JIT( \ |
| kbdev, \ |
| atom, \ |
| edit_addr, \ |
| new_addr, \ |
| jit_flags, \ |
| mem_flags, \ |
| j_id, \ |
| com_pgs, \ |
| extent, \ |
| va_pgs \ |
| ) \ |
| do { \ |
| int enabled = atomic_read(&kbdev->timeline_is_enabled); \ |
| if (enabled & BASE_TLSTREAM_JOB_DUMPING_ENABLED) \ |
| __kbase_tlstream_tl_attrib_atom_jit( \ |
| __TL_DISPATCH_STREAM(kbdev, obj), \ |
| atom, edit_addr, new_addr, jit_flags, mem_flags, j_id, com_pgs, extent, va_pgs); \ |
| } while (0) |
| |
| /** |
| * KBASE_TLSTREAM_TL_JIT_USEDPAGES - |
| * used pages for jit |
| * |
| * @kbdev: Kbase device |
| * @used_pages: Number of pages used for jit |
| * @j_id: Unique ID provided by the caller, this is used |
| * to pair allocation and free requests. |
| */ |
| #define KBASE_TLSTREAM_TL_JIT_USEDPAGES( \ |
| kbdev, \ |
| used_pages, \ |
| j_id \ |
| ) \ |
| do { \ |
| int enabled = atomic_read(&kbdev->timeline_is_enabled); \ |
| if (enabled & TLSTREAM_ENABLED) \ |
| __kbase_tlstream_tl_jit_usedpages( \ |
| __TL_DISPATCH_STREAM(kbdev, obj), \ |
| used_pages, j_id); \ |
| } while (0) |
| |
| /** |
| * KBASE_TLSTREAM_TL_ATTRIB_ATOM_JITALLOCINFO - |
| * Information about JIT allocations |
| * |
| * @kbdev: Kbase device |
| * @atom: Atom identifier |
| * @va_pgs: The minimum number of virtual pages required |
| * @com_pgs: The minimum number of physical pages which |
| * should back the allocation. |
| * @extent: Granularity of physical pages to grow the |
| * allocation by during a fault. |
| * @j_id: Unique ID provided by the caller, this is used |
| * to pair allocation and free requests. |
| * @bin_id: The JIT allocation bin, used in conjunction with |
| * max_allocations to limit the number of each |
| * type of JIT allocation. |
| * @max_allocs: Maximum allocations allowed in this bin. |
| * @jit_flags: Flags specifying the special requirements for |
| * the JIT allocation. |
| * @usg_id: A hint about which allocation should be reused. |
| */ |
| #define KBASE_TLSTREAM_TL_ATTRIB_ATOM_JITALLOCINFO( \ |
| kbdev, \ |
| atom, \ |
| va_pgs, \ |
| com_pgs, \ |
| extent, \ |
| j_id, \ |
| bin_id, \ |
| max_allocs, \ |
| jit_flags, \ |
| usg_id \ |
| ) \ |
| do { \ |
| int enabled = atomic_read(&kbdev->timeline_is_enabled); \ |
| if (enabled & TLSTREAM_ENABLED) \ |
| __kbase_tlstream_tl_attrib_atom_jitallocinfo( \ |
| __TL_DISPATCH_STREAM(kbdev, obj), \ |
| atom, va_pgs, com_pgs, extent, j_id, bin_id, max_allocs, jit_flags, usg_id); \ |
| } while (0) |
| |
| /** |
| * KBASE_TLSTREAM_TL_ATTRIB_ATOM_JITFREEINFO - |
| * Information about JIT frees |
| * |
| * @kbdev: Kbase device |
| * @atom: Atom identifier |
| * @j_id: Unique ID provided by the caller, this is used |
| * to pair allocation and free requests. |
| */ |
| #define KBASE_TLSTREAM_TL_ATTRIB_ATOM_JITFREEINFO( \ |
| kbdev, \ |
| atom, \ |
| j_id \ |
| ) \ |
| do { \ |
| int enabled = atomic_read(&kbdev->timeline_is_enabled); \ |
| if (enabled & TLSTREAM_ENABLED) \ |
| __kbase_tlstream_tl_attrib_atom_jitfreeinfo( \ |
| __TL_DISPATCH_STREAM(kbdev, obj), \ |
| atom, j_id); \ |
| } while (0) |
| |
| /** |
| * KBASE_TLSTREAM_TL_ATTRIB_AS_CONFIG - |
| * address space attributes |
| * |
| * @kbdev: Kbase device |
| * @address_space: Name of the address space object |
| * @transtab: Configuration of the TRANSTAB register |
| * @memattr: Configuration of the MEMATTR register |
| * @transcfg: Configuration of the TRANSCFG register (or zero if not present) |
| */ |
| #define KBASE_TLSTREAM_TL_ATTRIB_AS_CONFIG( \ |
| kbdev, \ |
| address_space, \ |
| transtab, \ |
| memattr, \ |
| transcfg \ |
| ) \ |
| do { \ |
| int enabled = atomic_read(&kbdev->timeline_is_enabled); \ |
| if (enabled & TLSTREAM_ENABLED) \ |
| __kbase_tlstream_tl_attrib_as_config( \ |
| __TL_DISPATCH_STREAM(kbdev, obj), \ |
| address_space, transtab, memattr, transcfg); \ |
| } while (0) |
| |
| /** |
| * KBASE_TLSTREAM_TL_EVENT_LPU_SOFTSTOP - |
| * softstop event on given lpu |
| * |
| * @kbdev: Kbase device |
| * @lpu: Name of the Logical Processing Unit object |
| */ |
| #define KBASE_TLSTREAM_TL_EVENT_LPU_SOFTSTOP( \ |
| kbdev, \ |
| lpu \ |
| ) \ |
| do { \ |
| int enabled = atomic_read(&kbdev->timeline_is_enabled); \ |
| if (enabled & TLSTREAM_ENABLED) \ |
| __kbase_tlstream_tl_event_lpu_softstop( \ |
| __TL_DISPATCH_STREAM(kbdev, obj), \ |
| lpu); \ |
| } while (0) |
| |
| /** |
| * KBASE_TLSTREAM_TL_EVENT_ATOM_SOFTSTOP_EX - |
| * atom softstopped |
| * |
| * @kbdev: Kbase device |
| * @atom: Atom identifier |
| */ |
| #define KBASE_TLSTREAM_TL_EVENT_ATOM_SOFTSTOP_EX( \ |
| kbdev, \ |
| atom \ |
| ) \ |
| do { \ |
| int enabled = atomic_read(&kbdev->timeline_is_enabled); \ |
| if (enabled & TLSTREAM_ENABLED) \ |
| __kbase_tlstream_tl_event_atom_softstop_ex( \ |
| __TL_DISPATCH_STREAM(kbdev, obj), \ |
| atom); \ |
| } while (0) |
| |
| /** |
| * KBASE_TLSTREAM_TL_EVENT_ATOM_SOFTSTOP_ISSUE - |
| * atom softstop issued |
| * |
| * @kbdev: Kbase device |
| * @atom: Atom identifier |
| */ |
| #define KBASE_TLSTREAM_TL_EVENT_ATOM_SOFTSTOP_ISSUE( \ |
| kbdev, \ |
| atom \ |
| ) \ |
| do { \ |
| int enabled = atomic_read(&kbdev->timeline_is_enabled); \ |
| if (enabled & TLSTREAM_ENABLED) \ |
| __kbase_tlstream_tl_event_atom_softstop_issue( \ |
| __TL_DISPATCH_STREAM(kbdev, obj), \ |
| atom); \ |
| } while (0) |
| |
| /** |
| * KBASE_TLSTREAM_TL_EVENT_ATOM_SOFTJOB_START - |
| * atom soft job has started |
| * |
| * @kbdev: Kbase device |
| * @atom: Atom identifier |
| */ |
| #define KBASE_TLSTREAM_TL_EVENT_ATOM_SOFTJOB_START( \ |
| kbdev, \ |
| atom \ |
| ) \ |
| do { \ |
| int enabled = atomic_read(&kbdev->timeline_is_enabled); \ |
| if (enabled & TLSTREAM_ENABLED) \ |
| __kbase_tlstream_tl_event_atom_softjob_start( \ |
| __TL_DISPATCH_STREAM(kbdev, obj), \ |
| atom); \ |
| } while (0) |
| |
| /** |
| * KBASE_TLSTREAM_TL_EVENT_ATOM_SOFTJOB_END - |
| * atom soft job has completed |
| * |
| * @kbdev: Kbase device |
| * @atom: Atom identifier |
| */ |
| #define KBASE_TLSTREAM_TL_EVENT_ATOM_SOFTJOB_END( \ |
| kbdev, \ |
| atom \ |
| ) \ |
| do { \ |
| int enabled = atomic_read(&kbdev->timeline_is_enabled); \ |
| if (enabled & TLSTREAM_ENABLED) \ |
| __kbase_tlstream_tl_event_atom_softjob_end( \ |
| __TL_DISPATCH_STREAM(kbdev, obj), \ |
| atom); \ |
| } while (0) |
| |
| /** |
| * KBASE_TLSTREAM_JD_GPU_SOFT_RESET - |
| * gpu soft reset |
| * |
| * @kbdev: Kbase device |
| * @gpu: Name of the GPU object |
| */ |
| #define KBASE_TLSTREAM_JD_GPU_SOFT_RESET( \ |
| kbdev, \ |
| gpu \ |
| ) \ |
| do { \ |
| int enabled = atomic_read(&kbdev->timeline_is_enabled); \ |
| if (enabled & TLSTREAM_ENABLED) \ |
| __kbase_tlstream_jd_gpu_soft_reset( \ |
| __TL_DISPATCH_STREAM(kbdev, obj), \ |
| gpu); \ |
| } while (0) |
| |
| /** |
| * KBASE_TLSTREAM_AUX_PM_STATE - |
| * PM state |
| * |
| * @kbdev: Kbase device |
| * @core_type: Core type (shader, tiler, l2 cache, l3 cache) |
| * @core_state_bitset: 64bits bitmask reporting power state of the cores |
| * (1-ON, 0-OFF) |
| */ |
| #define KBASE_TLSTREAM_AUX_PM_STATE( \ |
| kbdev, \ |
| core_type, \ |
| core_state_bitset \ |
| ) \ |
| do { \ |
| int enabled = atomic_read(&kbdev->timeline_is_enabled); \ |
| if (enabled & TLSTREAM_ENABLED) \ |
| __kbase_tlstream_aux_pm_state( \ |
| __TL_DISPATCH_STREAM(kbdev, aux), \ |
| core_type, core_state_bitset); \ |
| } while (0) |
| |
| /** |
| * KBASE_TLSTREAM_AUX_PAGEFAULT - |
| * Page fault |
| * |
| * @kbdev: Kbase device |
| * @ctx_nr: Kernel context number |
| * @as_nr: Address space number |
| * @page_cnt_change: Number of pages to be added |
| */ |
| #define KBASE_TLSTREAM_AUX_PAGEFAULT( \ |
| kbdev, \ |
| ctx_nr, \ |
| as_nr, \ |
| page_cnt_change \ |
| ) \ |
| do { \ |
| int enabled = atomic_read(&kbdev->timeline_is_enabled); \ |
| if (enabled & TLSTREAM_ENABLED) \ |
| __kbase_tlstream_aux_pagefault( \ |
| __TL_DISPATCH_STREAM(kbdev, aux), \ |
| ctx_nr, as_nr, page_cnt_change); \ |
| } while (0) |
| |
| /** |
| * KBASE_TLSTREAM_AUX_PAGESALLOC - |
| * Total alloc pages change |
| * |
| * @kbdev: Kbase device |
| * @ctx_nr: Kernel context number |
| * @page_cnt: Number of pages used by the context |
| */ |
| #define KBASE_TLSTREAM_AUX_PAGESALLOC( \ |
| kbdev, \ |
| ctx_nr, \ |
| page_cnt \ |
| ) \ |
| do { \ |
| int enabled = atomic_read(&kbdev->timeline_is_enabled); \ |
| if (enabled & TLSTREAM_ENABLED) \ |
| __kbase_tlstream_aux_pagesalloc( \ |
| __TL_DISPATCH_STREAM(kbdev, aux), \ |
| ctx_nr, page_cnt); \ |
| } while (0) |
| |
| /** |
| * KBASE_TLSTREAM_AUX_DEVFREQ_TARGET - |
| * New device frequency target |
| * |
| * @kbdev: Kbase device |
| * @target_freq: New target frequency |
| */ |
| #define KBASE_TLSTREAM_AUX_DEVFREQ_TARGET( \ |
| kbdev, \ |
| target_freq \ |
| ) \ |
| do { \ |
| int enabled = atomic_read(&kbdev->timeline_is_enabled); \ |
| if (enabled & TLSTREAM_ENABLED) \ |
| __kbase_tlstream_aux_devfreq_target( \ |
| __TL_DISPATCH_STREAM(kbdev, aux), \ |
| target_freq); \ |
| } while (0) |
| |
| /** |
| * KBASE_TLSTREAM_AUX_PROTECTED_ENTER_START - |
| * enter protected mode start |
| * |
| * @kbdev: Kbase device |
| * @gpu: Name of the GPU object |
| */ |
| #define KBASE_TLSTREAM_AUX_PROTECTED_ENTER_START( \ |
| kbdev, \ |
| gpu \ |
| ) \ |
| do { \ |
| int enabled = atomic_read(&kbdev->timeline_is_enabled); \ |
| if (enabled & BASE_TLSTREAM_ENABLE_LATENCY_TRACEPOINTS) \ |
| __kbase_tlstream_aux_protected_enter_start( \ |
| __TL_DISPATCH_STREAM(kbdev, aux), \ |
| gpu); \ |
| } while (0) |
| |
| /** |
| * KBASE_TLSTREAM_AUX_PROTECTED_ENTER_END - |
| * enter protected mode end |
| * |
| * @kbdev: Kbase device |
| * @gpu: Name of the GPU object |
| */ |
| #define KBASE_TLSTREAM_AUX_PROTECTED_ENTER_END( \ |
| kbdev, \ |
| gpu \ |
| ) \ |
| do { \ |
| int enabled = atomic_read(&kbdev->timeline_is_enabled); \ |
| if (enabled & BASE_TLSTREAM_ENABLE_LATENCY_TRACEPOINTS) \ |
| __kbase_tlstream_aux_protected_enter_end( \ |
| __TL_DISPATCH_STREAM(kbdev, aux), \ |
| gpu); \ |
| } while (0) |
| |
| /** |
| * KBASE_TLSTREAM_AUX_PROTECTED_LEAVE_START - |
| * leave protected mode start |
| * |
| * @kbdev: Kbase device |
| * @gpu: Name of the GPU object |
| */ |
| #define KBASE_TLSTREAM_AUX_PROTECTED_LEAVE_START( \ |
| kbdev, \ |
| gpu \ |
| ) \ |
| do { \ |
| int enabled = atomic_read(&kbdev->timeline_is_enabled); \ |
| if (enabled & BASE_TLSTREAM_ENABLE_LATENCY_TRACEPOINTS) \ |
| __kbase_tlstream_aux_protected_leave_start( \ |
| __TL_DISPATCH_STREAM(kbdev, aux), \ |
| gpu); \ |
| } while (0) |
| |
| /** |
| * KBASE_TLSTREAM_AUX_PROTECTED_LEAVE_END - |
| * leave protected mode end |
| * |
| * @kbdev: Kbase device |
| * @gpu: Name of the GPU object |
| */ |
| #define KBASE_TLSTREAM_AUX_PROTECTED_LEAVE_END( \ |
| kbdev, \ |
| gpu \ |
| ) \ |
| do { \ |
| int enabled = atomic_read(&kbdev->timeline_is_enabled); \ |
| if (enabled & BASE_TLSTREAM_ENABLE_LATENCY_TRACEPOINTS) \ |
| __kbase_tlstream_aux_protected_leave_end( \ |
| __TL_DISPATCH_STREAM(kbdev, aux), \ |
| gpu); \ |
| } while (0) |
| |
| /** |
| * KBASE_TLSTREAM_AUX_JIT_STATS - |
| * per-bin JIT statistics |
| * |
| * @kbdev: Kbase device |
| * @ctx_nr: Kernel context number |
| * @bid: JIT bin id |
| * @max_allocs: Maximum allocations allowed in this bin. |
| * @allocs: Number of active allocations in this bin |
| * @va_pages: Number of virtual pages allocated in this bin |
| * @ph_pages: Number of physical pages allocated in this bin |
| */ |
| #define KBASE_TLSTREAM_AUX_JIT_STATS( \ |
| kbdev, \ |
| ctx_nr, \ |
| bid, \ |
| max_allocs, \ |
| allocs, \ |
| va_pages, \ |
| ph_pages \ |
| ) \ |
| do { \ |
| int enabled = atomic_read(&kbdev->timeline_is_enabled); \ |
| if (enabled & TLSTREAM_ENABLED) \ |
| __kbase_tlstream_aux_jit_stats( \ |
| __TL_DISPATCH_STREAM(kbdev, aux), \ |
| ctx_nr, bid, max_allocs, allocs, va_pages, ph_pages); \ |
| } while (0) |
| |
| /** |
| * KBASE_TLSTREAM_AUX_EVENT_JOB_SLOT - |
| * event on a given job slot |
| * |
| * @kbdev: Kbase device |
| * @ctx: Name of the context object |
| * @slot_nr: Job slot number |
| * @atom_nr: Sequential number of an atom |
| * @event: Event type. One of TL_JS_EVENT values |
| */ |
| #define KBASE_TLSTREAM_AUX_EVENT_JOB_SLOT( \ |
| kbdev, \ |
| ctx, \ |
| slot_nr, \ |
| atom_nr, \ |
| event \ |
| ) \ |
| do { \ |
| int enabled = atomic_read(&kbdev->timeline_is_enabled); \ |
| if (enabled & TLSTREAM_ENABLED) \ |
| __kbase_tlstream_aux_event_job_slot( \ |
| __TL_DISPATCH_STREAM(kbdev, aux), \ |
| ctx, slot_nr, atom_nr, event); \ |
| } while (0) |
| |
| /** |
| * KBASE_TLSTREAM_TL_NEW_KCPUQUEUE - |
| * New KCPU Queue |
| * |
| * @kbdev: Kbase device |
| * @kcpu_queue: KCPU queue |
| * @ctx: Name of the context object |
| * @kcpuq_num_pending_cmds: Number of commands already enqueued |
| * in the KCPU queue |
| */ |
| #define KBASE_TLSTREAM_TL_NEW_KCPUQUEUE( \ |
| kbdev, \ |
| kcpu_queue, \ |
| ctx, \ |
| kcpuq_num_pending_cmds \ |
| ) \ |
| do { \ |
| int enabled = atomic_read(&kbdev->timeline_is_enabled); \ |
| if (enabled & TLSTREAM_ENABLED) \ |
| __kbase_tlstream_tl_new_kcpuqueue( \ |
| __TL_DISPATCH_STREAM(kbdev, obj), \ |
| kcpu_queue, ctx, kcpuq_num_pending_cmds); \ |
| } while (0) |
| |
| /** |
| * KBASE_TLSTREAM_TL_RET_KCPUQUEUE_CTX - |
| * Context retains KCPU Queue |
| * |
| * @kbdev: Kbase device |
| * @kcpu_queue: KCPU queue |
| * @ctx: Name of the context object |
| */ |
| #define KBASE_TLSTREAM_TL_RET_KCPUQUEUE_CTX( \ |
| kbdev, \ |
| kcpu_queue, \ |
| ctx \ |
| ) \ |
| do { \ |
| int enabled = atomic_read(&kbdev->timeline_is_enabled); \ |
| if (enabled & TLSTREAM_ENABLED) \ |
| __kbase_tlstream_tl_ret_kcpuqueue_ctx( \ |
| __TL_DISPATCH_STREAM(kbdev, obj), \ |
| kcpu_queue, ctx); \ |
| } while (0) |
| |
| /** |
| * KBASE_TLSTREAM_TL_DEL_KCPUQUEUE - |
| * Delete KCPU Queue |
| * |
| * @kbdev: Kbase device |
| * @kcpu_queue: KCPU queue |
| */ |
| #define KBASE_TLSTREAM_TL_DEL_KCPUQUEUE( \ |
| kbdev, \ |
| kcpu_queue \ |
| ) \ |
| do { \ |
| int enabled = atomic_read(&kbdev->timeline_is_enabled); \ |
| if (enabled & TLSTREAM_ENABLED) \ |
| __kbase_tlstream_tl_del_kcpuqueue( \ |
| __TL_DISPATCH_STREAM(kbdev, obj), \ |
| kcpu_queue); \ |
| } while (0) |
| |
| /** |
| * KBASE_TLSTREAM_TL_NRET_KCPUQUEUE_CTX - |
| * Context releases KCPU Queue |
| * |
| * @kbdev: Kbase device |
| * @kcpu_queue: KCPU queue |
| * @ctx: Name of the context object |
| */ |
| #define KBASE_TLSTREAM_TL_NRET_KCPUQUEUE_CTX( \ |
| kbdev, \ |
| kcpu_queue, \ |
| ctx \ |
| ) \ |
| do { \ |
| int enabled = atomic_read(&kbdev->timeline_is_enabled); \ |
| if (enabled & TLSTREAM_ENABLED) \ |
| __kbase_tlstream_tl_nret_kcpuqueue_ctx( \ |
| __TL_DISPATCH_STREAM(kbdev, obj), \ |
| kcpu_queue, ctx); \ |
| } while (0) |
| |
| /** |
| * KBASE_TLSTREAM_TL_EVENT_KCPUQUEUE_ENQUEUE_FENCE_SIGNAL - |
| * KCPU Queue enqueues Signal on Fence |
| * |
| * @kbdev: Kbase device |
| * @kcpu_queue: KCPU queue |
| * @fence: Fence object handle |
| */ |
| #define KBASE_TLSTREAM_TL_EVENT_KCPUQUEUE_ENQUEUE_FENCE_SIGNAL( \ |
| kbdev, \ |
| kcpu_queue, \ |
| fence \ |
| ) \ |
| do { \ |
| int enabled = atomic_read(&kbdev->timeline_is_enabled); \ |
| if (enabled & TLSTREAM_ENABLED) \ |
| __kbase_tlstream_tl_event_kcpuqueue_enqueue_fence_signal( \ |
| __TL_DISPATCH_STREAM(kbdev, obj), \ |
| kcpu_queue, fence); \ |
| } while (0) |
| |
| /** |
| * KBASE_TLSTREAM_TL_EVENT_KCPUQUEUE_ENQUEUE_FENCE_WAIT - |
| * KCPU Queue enqueues Wait on Fence |
| * |
| * @kbdev: Kbase device |
| * @kcpu_queue: KCPU queue |
| * @fence: Fence object handle |
| */ |
| #define KBASE_TLSTREAM_TL_EVENT_KCPUQUEUE_ENQUEUE_FENCE_WAIT( \ |
| kbdev, \ |
| kcpu_queue, \ |
| fence \ |
| ) \ |
| do { \ |
| int enabled = atomic_read(&kbdev->timeline_is_enabled); \ |
| if (enabled & TLSTREAM_ENABLED) \ |
| __kbase_tlstream_tl_event_kcpuqueue_enqueue_fence_wait( \ |
| __TL_DISPATCH_STREAM(kbdev, obj), \ |
| kcpu_queue, fence); \ |
| } while (0) |
| |
| /** |
| * KBASE_TLSTREAM_TL_EVENT_ARRAY_BEGIN_KCPUQUEUE_ENQUEUE_CQS_WAIT - |
| * Begin array of KCPU Queue enqueues Wait on Cross Queue Sync Object |
| * |
| * @kbdev: Kbase device |
| * @kcpu_queue: KCPU queue |
| */ |
| #define KBASE_TLSTREAM_TL_EVENT_ARRAY_BEGIN_KCPUQUEUE_ENQUEUE_CQS_WAIT( \ |
| kbdev, \ |
| kcpu_queue \ |
| ) \ |
| do { \ |
| int enabled = atomic_read(&kbdev->timeline_is_enabled); \ |
| if (enabled & TLSTREAM_ENABLED) \ |
| __kbase_tlstream_tl_event_array_begin_kcpuqueue_enqueue_cqs_wait( \ |
| __TL_DISPATCH_STREAM(kbdev, obj), \ |
| kcpu_queue); \ |
| } while (0) |
| |
| /** |
| * KBASE_TLSTREAM_TL_EVENT_ARRAY_ITEM_KCPUQUEUE_ENQUEUE_CQS_WAIT - |
| * Array item of KCPU Queue enqueues Wait on Cross Queue Sync Object |
| * |
| * @kbdev: Kbase device |
| * @kcpu_queue: KCPU queue |
| * @cqs_obj_gpu_addr: CQS Object GPU ptr |
| * @cqs_obj_compare_value: Semaphore value that should be exceeded |
| * for the WAIT to pass |
| */ |
| #define KBASE_TLSTREAM_TL_EVENT_ARRAY_ITEM_KCPUQUEUE_ENQUEUE_CQS_WAIT( \ |
| kbdev, \ |
| kcpu_queue, \ |
| cqs_obj_gpu_addr, \ |
| cqs_obj_compare_value \ |
| ) \ |
| do { \ |
| int enabled = atomic_read(&kbdev->timeline_is_enabled); \ |
| if (enabled & TLSTREAM_ENABLED) \ |
| __kbase_tlstream_tl_event_array_item_kcpuqueue_enqueue_cqs_wait( \ |
| __TL_DISPATCH_STREAM(kbdev, obj), \ |
| kcpu_queue, cqs_obj_gpu_addr, cqs_obj_compare_value); \ |
| } while (0) |
| |
| /** |
| * KBASE_TLSTREAM_TL_EVENT_ARRAY_END_KCPUQUEUE_ENQUEUE_CQS_WAIT - |
| * End array of KCPU Queue enqueues Wait on Cross Queue Sync Object |
| * |
| * @kbdev: Kbase device |
| * @kcpu_queue: KCPU queue |
| */ |
| #define KBASE_TLSTREAM_TL_EVENT_ARRAY_END_KCPUQUEUE_ENQUEUE_CQS_WAIT( \ |
| kbdev, \ |
| kcpu_queue \ |
| ) \ |
| do { \ |
| int enabled = atomic_read(&kbdev->timeline_is_enabled); \ |
| if (enabled & TLSTREAM_ENABLED) \ |
| __kbase_tlstream_tl_event_array_end_kcpuqueue_enqueue_cqs_wait( \ |
| __TL_DISPATCH_STREAM(kbdev, obj), \ |
| kcpu_queue); \ |
| } while (0) |
| |
| /** |
| * KBASE_TLSTREAM_TL_EVENT_ARRAY_BEGIN_KCPUQUEUE_ENQUEUE_CQS_SET - |
| * Begin array of KCPU Queue enqueues Set on Cross Queue Sync Object |
| * |
| * @kbdev: Kbase device |
| * @kcpu_queue: KCPU queue |
| */ |
| #define KBASE_TLSTREAM_TL_EVENT_ARRAY_BEGIN_KCPUQUEUE_ENQUEUE_CQS_SET( \ |
| kbdev, \ |
| kcpu_queue \ |
| ) \ |
| do { \ |
| int enabled = atomic_read(&kbdev->timeline_is_enabled); \ |
| if (enabled & TLSTREAM_ENABLED) \ |
| __kbase_tlstream_tl_event_array_begin_kcpuqueue_enqueue_cqs_set( \ |
| __TL_DISPATCH_STREAM(kbdev, obj), \ |
| kcpu_queue); \ |
| } while (0) |
| |
| /** |
| * KBASE_TLSTREAM_TL_EVENT_ARRAY_ITEM_KCPUQUEUE_ENQUEUE_CQS_SET - |
| * Array item of KCPU Queue enqueues Set on Cross Queue Sync Object |
| * |
| * @kbdev: Kbase device |
| * @kcpu_queue: KCPU queue |
| * @cqs_obj_gpu_addr: CQS Object GPU ptr |
| */ |
| #define KBASE_TLSTREAM_TL_EVENT_ARRAY_ITEM_KCPUQUEUE_ENQUEUE_CQS_SET( \ |
| kbdev, \ |
| kcpu_queue, \ |
| cqs_obj_gpu_addr \ |
| ) \ |
| do { \ |
| int enabled = atomic_read(&kbdev->timeline_is_enabled); \ |
| if (enabled & TLSTREAM_ENABLED) \ |
| __kbase_tlstream_tl_event_array_item_kcpuqueue_enqueue_cqs_set( \ |
| __TL_DISPATCH_STREAM(kbdev, obj), \ |
| kcpu_queue, cqs_obj_gpu_addr); \ |
| } while (0) |
| |
| /** |
| * KBASE_TLSTREAM_TL_EVENT_ARRAY_END_KCPUQUEUE_ENQUEUE_CQS_SET - |
| * End array of KCPU Queue enqueues Set on Cross Queue Sync Object |
| * |
| * @kbdev: Kbase device |
| * @kcpu_queue: KCPU queue |
| */ |
| #define KBASE_TLSTREAM_TL_EVENT_ARRAY_END_KCPUQUEUE_ENQUEUE_CQS_SET( \ |
| kbdev, \ |
| kcpu_queue \ |
| ) \ |
| do { \ |
| int enabled = atomic_read(&kbdev->timeline_is_enabled); \ |
| if (enabled & TLSTREAM_ENABLED) \ |
| __kbase_tlstream_tl_event_array_end_kcpuqueue_enqueue_cqs_set( \ |
| __TL_DISPATCH_STREAM(kbdev, obj), \ |
| kcpu_queue); \ |
| } while (0) |
| |
| /** |
| * KBASE_TLSTREAM_TL_EVENT_ARRAY_BEGIN_KCPUQUEUE_ENQUEUE_DEBUGCOPY - |
| * Begin array of KCPU Queue enqueues Debug Copy |
| * |
| * @kbdev: Kbase device |
| * @kcpu_queue: KCPU queue |
| */ |
| #define KBASE_TLSTREAM_TL_EVENT_ARRAY_BEGIN_KCPUQUEUE_ENQUEUE_DEBUGCOPY( \ |
| kbdev, \ |
| kcpu_queue \ |
| ) \ |
| do { \ |
| int enabled = atomic_read(&kbdev->timeline_is_enabled); \ |
| if (enabled & TLSTREAM_ENABLED) \ |
| __kbase_tlstream_tl_event_array_begin_kcpuqueue_enqueue_debugcopy( \ |
| __TL_DISPATCH_STREAM(kbdev, obj), \ |
| kcpu_queue); \ |
| } while (0) |
| |
| /** |
| * KBASE_TLSTREAM_TL_EVENT_ARRAY_ITEM_KCPUQUEUE_ENQUEUE_DEBUGCOPY - |
| * Array item of KCPU Queue enqueues Debug Copy |
| * |
| * @kbdev: Kbase device |
| * @kcpu_queue: KCPU queue |
| * @debugcopy_dst_size: Debug Copy destination size |
| */ |
| #define KBASE_TLSTREAM_TL_EVENT_ARRAY_ITEM_KCPUQUEUE_ENQUEUE_DEBUGCOPY( \ |
| kbdev, \ |
| kcpu_queue, \ |
| debugcopy_dst_size \ |
| ) \ |
| do { \ |
| int enabled = atomic_read(&kbdev->timeline_is_enabled); \ |
| if (enabled & TLSTREAM_ENABLED) \ |
| __kbase_tlstream_tl_event_array_item_kcpuqueue_enqueue_debugcopy( \ |
| __TL_DISPATCH_STREAM(kbdev, obj), \ |
| kcpu_queue, debugcopy_dst_size); \ |
| } while (0) |
| |
| /** |
| * KBASE_TLSTREAM_TL_EVENT_ARRAY_END_KCPUQUEUE_ENQUEUE_DEBUGCOPY - |
| * End array of KCPU Queue enqueues Debug Copy |
| * |
| * @kbdev: Kbase device |
| * @kcpu_queue: KCPU queue |
| */ |
| #define KBASE_TLSTREAM_TL_EVENT_ARRAY_END_KCPUQUEUE_ENQUEUE_DEBUGCOPY( \ |
| kbdev, \ |
| kcpu_queue \ |
| ) \ |
| do { \ |
| int enabled = atomic_read(&kbdev->timeline_is_enabled); \ |
| if (enabled & TLSTREAM_ENABLED) \ |
| __kbase_tlstream_tl_event_array_end_kcpuqueue_enqueue_debugcopy( \ |
| __TL_DISPATCH_STREAM(kbdev, obj), \ |
| kcpu_queue); \ |
| } while (0) |
| |
| /** |
| * KBASE_TLSTREAM_TL_EVENT_KCPUQUEUE_ENQUEUE_MAP_IMPORT - |
| * KCPU Queue enqueues Map Import |
| * |
| * @kbdev: Kbase device |
| * @kcpu_queue: KCPU queue |
| * @map_import_buf_gpu_addr: Map import buffer GPU ptr |
| */ |
| #define KBASE_TLSTREAM_TL_EVENT_KCPUQUEUE_ENQUEUE_MAP_IMPORT( \ |
| kbdev, \ |
| kcpu_queue, \ |
| map_import_buf_gpu_addr \ |
| ) \ |
| do { \ |
| int enabled = atomic_read(&kbdev->timeline_is_enabled); \ |
| if (enabled & TLSTREAM_ENABLED) \ |
| __kbase_tlstream_tl_event_kcpuqueue_enqueue_map_import( \ |
| __TL_DISPATCH_STREAM(kbdev, obj), \ |
| kcpu_queue, map_import_buf_gpu_addr); \ |
| } while (0) |
| |
| /** |
| * KBASE_TLSTREAM_TL_EVENT_KCPUQUEUE_ENQUEUE_UNMAP_IMPORT - |
| * KCPU Queue enqueues Unmap Import |
| * |
| * @kbdev: Kbase device |
| * @kcpu_queue: KCPU queue |
| * @map_import_buf_gpu_addr: Map import buffer GPU ptr |
| */ |
| #define KBASE_TLSTREAM_TL_EVENT_KCPUQUEUE_ENQUEUE_UNMAP_IMPORT( \ |
| kbdev, \ |
| kcpu_queue, \ |
| map_import_buf_gpu_addr \ |
| ) \ |
| do { \ |
| int enabled = atomic_read(&kbdev->timeline_is_enabled); \ |
| if (enabled & TLSTREAM_ENABLED) \ |
| __kbase_tlstream_tl_event_kcpuqueue_enqueue_unmap_import( \ |
| __TL_DISPATCH_STREAM(kbdev, obj), \ |
| kcpu_queue, map_import_buf_gpu_addr); \ |
| } while (0) |
| |
| /** |
| * KBASE_TLSTREAM_TL_EVENT_ARRAY_BEGIN_KCPUQUEUE_ENQUEUE_JIT_ALLOC - |
| * Begin array of KCPU Queue enqueues JIT Alloc |
| * |
| * @kbdev: Kbase device |
| * @kcpu_queue: KCPU queue |
| */ |
| #define KBASE_TLSTREAM_TL_EVENT_ARRAY_BEGIN_KCPUQUEUE_ENQUEUE_JIT_ALLOC( \ |
| kbdev, \ |
| kcpu_queue \ |
| ) \ |
| do { \ |
| int enabled = atomic_read(&kbdev->timeline_is_enabled); \ |
| if (enabled & TLSTREAM_ENABLED) \ |
| __kbase_tlstream_tl_event_array_begin_kcpuqueue_enqueue_jit_alloc( \ |
| __TL_DISPATCH_STREAM(kbdev, obj), \ |
| kcpu_queue); \ |
| } while (0) |
| |
| /** |
| * KBASE_TLSTREAM_TL_EVENT_ARRAY_ITEM_KCPUQUEUE_ENQUEUE_JIT_ALLOC - |
| * Array item of KCPU Queue enqueues JIT Alloc |
| * |
| * @kbdev: Kbase device |
| * @kcpu_queue: KCPU queue |
| * @jit_alloc_gpu_alloc_addr_dest: The GPU virtual address to write |
| * the JIT allocated GPU virtual address to |
| * @jit_alloc_va_pages: The minimum number of virtual pages required |
| * @jit_alloc_commit_pages: The minimum number of physical pages which |
| * should back the allocation |
| * @jit_alloc_extent: Granularity of physical pages to grow the allocation |
| * by during a fault |
| * @jit_alloc_jit_id: Unique ID provided by the caller, this is used |
| * to pair allocation and free requests. Zero is not a valid value |
| * @jit_alloc_bin_id: The JIT allocation bin, used in conjunction with |
| * max_allocations to limit the number of each type of JIT allocation |
| * @jit_alloc_max_allocations: The maximum number of allocations |
| * allowed within the bin specified by bin_id. Should be the same for all |
| * JIT allocations within the same bin. |
| * @jit_alloc_flags: Flags specifying the special requirements for the |
| * JIT allocation |
| * @jit_alloc_usage_id: A hint about which allocation should be |
| * reused. The kernel should attempt to use a previous allocation with the same |
| * usage_id |
| */ |
| #define KBASE_TLSTREAM_TL_EVENT_ARRAY_ITEM_KCPUQUEUE_ENQUEUE_JIT_ALLOC( \ |
| kbdev, \ |
| kcpu_queue, \ |
| jit_alloc_gpu_alloc_addr_dest, \ |
| jit_alloc_va_pages, \ |
| jit_alloc_commit_pages, \ |
| jit_alloc_extent, \ |
| jit_alloc_jit_id, \ |
| jit_alloc_bin_id, \ |
| jit_alloc_max_allocations, \ |
| jit_alloc_flags, \ |
| jit_alloc_usage_id \ |
| ) \ |
| do { \ |
| int enabled = atomic_read(&kbdev->timeline_is_enabled); \ |
| if (enabled & TLSTREAM_ENABLED) \ |
| __kbase_tlstream_tl_event_array_item_kcpuqueue_enqueue_jit_alloc( \ |
| __TL_DISPATCH_STREAM(kbdev, obj), \ |
| kcpu_queue, jit_alloc_gpu_alloc_addr_dest, jit_alloc_va_pages, jit_alloc_commit_pages, jit_alloc_extent, jit_alloc_jit_id, jit_alloc_bin_id, jit_alloc_max_allocations, jit_alloc_flags, jit_alloc_usage_id); \ |
| } while (0) |
| |
| /** |
| * KBASE_TLSTREAM_TL_EVENT_ARRAY_END_KCPUQUEUE_ENQUEUE_JIT_ALLOC - |
| * End array of KCPU Queue enqueues JIT Alloc |
| * |
| * @kbdev: Kbase device |
| * @kcpu_queue: KCPU queue |
| */ |
| #define KBASE_TLSTREAM_TL_EVENT_ARRAY_END_KCPUQUEUE_ENQUEUE_JIT_ALLOC( \ |
| kbdev, \ |
| kcpu_queue \ |
| ) \ |
| do { \ |
| int enabled = atomic_read(&kbdev->timeline_is_enabled); \ |
| if (enabled & TLSTREAM_ENABLED) \ |
| __kbase_tlstream_tl_event_array_end_kcpuqueue_enqueue_jit_alloc( \ |
| __TL_DISPATCH_STREAM(kbdev, obj), \ |
| kcpu_queue); \ |
| } while (0) |
| |
| /** |
| * KBASE_TLSTREAM_TL_EVENT_ARRAY_BEGIN_KCPUQUEUE_ENQUEUE_JIT_FREE - |
| * Begin array of KCPU Queue enqueues JIT Free |
| * |
| * @kbdev: Kbase device |
| * @kcpu_queue: KCPU queue |
| */ |
| #define KBASE_TLSTREAM_TL_EVENT_ARRAY_BEGIN_KCPUQUEUE_ENQUEUE_JIT_FREE( \ |
| kbdev, \ |
| kcpu_queue \ |
| ) \ |
| do { \ |
| int enabled = atomic_read(&kbdev->timeline_is_enabled); \ |
| if (enabled & TLSTREAM_ENABLED) \ |
| __kbase_tlstream_tl_event_array_begin_kcpuqueue_enqueue_jit_free( \ |
| __TL_DISPATCH_STREAM(kbdev, obj), \ |
| kcpu_queue); \ |
| } while (0) |
| |
| /** |
| * KBASE_TLSTREAM_TL_EVENT_ARRAY_ITEM_KCPUQUEUE_ENQUEUE_JIT_FREE - |
| * Array item of KCPU Queue enqueues JIT Free |
| * |
| * @kbdev: Kbase device |
| * @kcpu_queue: KCPU queue |
| * @jit_alloc_jit_id: Unique ID provided by the caller, this is used |
| * to pair allocation and free requests. Zero is not a valid value |
| */ |
| #define KBASE_TLSTREAM_TL_EVENT_ARRAY_ITEM_KCPUQUEUE_ENQUEUE_JIT_FREE( \ |
| kbdev, \ |
| kcpu_queue, \ |
| jit_alloc_jit_id \ |
| ) \ |
| do { \ |
| int enabled = atomic_read(&kbdev->timeline_is_enabled); \ |
| if (enabled & TLSTREAM_ENABLED) \ |
| __kbase_tlstream_tl_event_array_item_kcpuqueue_enqueue_jit_free( \ |
| __TL_DISPATCH_STREAM(kbdev, obj), \ |
| kcpu_queue, jit_alloc_jit_id); \ |
| } while (0) |
| |
| /** |
| * KBASE_TLSTREAM_TL_EVENT_ARRAY_END_KCPUQUEUE_ENQUEUE_JIT_FREE - |
| * End array of KCPU Queue enqueues JIT Free |
| * |
| * @kbdev: Kbase device |
| * @kcpu_queue: KCPU queue |
| */ |
| #define KBASE_TLSTREAM_TL_EVENT_ARRAY_END_KCPUQUEUE_ENQUEUE_JIT_FREE( \ |
| kbdev, \ |
| kcpu_queue \ |
| ) \ |
| do { \ |
| int enabled = atomic_read(&kbdev->timeline_is_enabled); \ |
| if (enabled & TLSTREAM_ENABLED) \ |
| __kbase_tlstream_tl_event_array_end_kcpuqueue_enqueue_jit_free( \ |
| __TL_DISPATCH_STREAM(kbdev, obj), \ |
| kcpu_queue); \ |
| } while (0) |
| |
| /** |
| * KBASE_TLSTREAM_TL_EVENT_KCPUQUEUE_EXECUTE_FENCE_SIGNAL_START - |
| * KCPU Queue starts a Signal on Fence |
| * |
| * @kbdev: Kbase device |
| * @kcpu_queue: KCPU queue |
| */ |
| #define KBASE_TLSTREAM_TL_EVENT_KCPUQUEUE_EXECUTE_FENCE_SIGNAL_START( \ |
| kbdev, \ |
| kcpu_queue \ |
| ) \ |
| do { \ |
| int enabled = atomic_read(&kbdev->timeline_is_enabled); \ |
| if (enabled & TLSTREAM_ENABLED) \ |
| __kbase_tlstream_tl_event_kcpuqueue_execute_fence_signal_start( \ |
| __TL_DISPATCH_STREAM(kbdev, obj), \ |
| kcpu_queue); \ |
| } while (0) |
| |
| /** |
| * KBASE_TLSTREAM_TL_EVENT_KCPUQUEUE_EXECUTE_FENCE_SIGNAL_END - |
| * KCPU Queue ends a Signal on Fence |
| * |
| * @kbdev: Kbase device |
| * @kcpu_queue: KCPU queue |
| */ |
| #define KBASE_TLSTREAM_TL_EVENT_KCPUQUEUE_EXECUTE_FENCE_SIGNAL_END( \ |
| kbdev, \ |
| kcpu_queue \ |
| ) \ |
| do { \ |
| int enabled = atomic_read(&kbdev->timeline_is_enabled); \ |
| if (enabled & TLSTREAM_ENABLED) \ |
| __kbase_tlstream_tl_event_kcpuqueue_execute_fence_signal_end( \ |
| __TL_DISPATCH_STREAM(kbdev, obj), \ |
| kcpu_queue); \ |
| } while (0) |
| |
| /** |
| * KBASE_TLSTREAM_TL_EVENT_KCPUQUEUE_EXECUTE_FENCE_WAIT_START - |
| * KCPU Queue starts a Wait on Fence |
| * |
| * @kbdev: Kbase device |
| * @kcpu_queue: KCPU queue |
| */ |
| #define KBASE_TLSTREAM_TL_EVENT_KCPUQUEUE_EXECUTE_FENCE_WAIT_START( \ |
| kbdev, \ |
| kcpu_queue \ |
| ) \ |
| do { \ |
| int enabled = atomic_read(&kbdev->timeline_is_enabled); \ |
| if (enabled & TLSTREAM_ENABLED) \ |
| __kbase_tlstream_tl_event_kcpuqueue_execute_fence_wait_start( \ |
| __TL_DISPATCH_STREAM(kbdev, obj), \ |
| kcpu_queue); \ |
| } while (0) |
| |
| /** |
| * KBASE_TLSTREAM_TL_EVENT_KCPUQUEUE_EXECUTE_FENCE_WAIT_END - |
| * KCPU Queue ends a Wait on Fence |
| * |
| * @kbdev: Kbase device |
| * @kcpu_queue: KCPU queue |
| */ |
| #define KBASE_TLSTREAM_TL_EVENT_KCPUQUEUE_EXECUTE_FENCE_WAIT_END( \ |
| kbdev, \ |
| kcpu_queue \ |
| ) \ |
| do { \ |
| int enabled = atomic_read(&kbdev->timeline_is_enabled); \ |
| if (enabled & TLSTREAM_ENABLED) \ |
| __kbase_tlstream_tl_event_kcpuqueue_execute_fence_wait_end( \ |
| __TL_DISPATCH_STREAM(kbdev, obj), \ |
| kcpu_queue); \ |
| } while (0) |
| |
| /** |
| * KBASE_TLSTREAM_TL_EVENT_KCPUQUEUE_EXECUTE_CQS_WAIT_START - |
| * KCPU Queue starts a Wait on an array of Cross Queue Sync Objects |
| * |
| * @kbdev: Kbase device |
| * @kcpu_queue: KCPU queue |
| */ |
| #define KBASE_TLSTREAM_TL_EVENT_KCPUQUEUE_EXECUTE_CQS_WAIT_START( \ |
| kbdev, \ |
| kcpu_queue \ |
| ) \ |
| do { \ |
| int enabled = atomic_read(&kbdev->timeline_is_enabled); \ |
| if (enabled & TLSTREAM_ENABLED) \ |
| __kbase_tlstream_tl_event_kcpuqueue_execute_cqs_wait_start( \ |
| __TL_DISPATCH_STREAM(kbdev, obj), \ |
| kcpu_queue); \ |
| } while (0) |
| |
| /** |
| * KBASE_TLSTREAM_TL_EVENT_KCPUQUEUE_EXECUTE_CQS_WAIT_END - |
| * KCPU Queue ends a Wait on an array of Cross Queue Sync Objects |
| * |
| * @kbdev: Kbase device |
| * @kcpu_queue: KCPU queue |
| */ |
| #define KBASE_TLSTREAM_TL_EVENT_KCPUQUEUE_EXECUTE_CQS_WAIT_END( \ |
| kbdev, \ |
| kcpu_queue \ |
| ) \ |
| do { \ |
| int enabled = atomic_read(&kbdev->timeline_is_enabled); \ |
| if (enabled & TLSTREAM_ENABLED) \ |
| __kbase_tlstream_tl_event_kcpuqueue_execute_cqs_wait_end( \ |
| __TL_DISPATCH_STREAM(kbdev, obj), \ |
| kcpu_queue); \ |
| } while (0) |
| |
| /** |
| * KBASE_TLSTREAM_TL_EVENT_KCPUQUEUE_EXECUTE_CQS_SET_START - |
| * KCPU Queue starts a Set on an array of Cross Queue Sync Objects |
| * |
| * @kbdev: Kbase device |
| * @kcpu_queue: KCPU queue |
| */ |
| #define KBASE_TLSTREAM_TL_EVENT_KCPUQUEUE_EXECUTE_CQS_SET_START( \ |
| kbdev, \ |
| kcpu_queue \ |
| ) \ |
| do { \ |
| int enabled = atomic_read(&kbdev->timeline_is_enabled); \ |
| if (enabled & TLSTREAM_ENABLED) \ |
| __kbase_tlstream_tl_event_kcpuqueue_execute_cqs_set_start( \ |
| __TL_DISPATCH_STREAM(kbdev, obj), \ |
| kcpu_queue); \ |
| } while (0) |
| |
| /** |
| * KBASE_TLSTREAM_TL_EVENT_KCPUQUEUE_EXECUTE_CQS_SET_END - |
| * KCPU Queue ends a Set on an array of Cross Queue Sync Objects |
| * |
| * @kbdev: Kbase device |
| * @kcpu_queue: KCPU queue |
| */ |
| #define KBASE_TLSTREAM_TL_EVENT_KCPUQUEUE_EXECUTE_CQS_SET_END( \ |
| kbdev, \ |
| kcpu_queue \ |
| ) \ |
| do { \ |
| int enabled = atomic_read(&kbdev->timeline_is_enabled); \ |
| if (enabled & TLSTREAM_ENABLED) \ |
| __kbase_tlstream_tl_event_kcpuqueue_execute_cqs_set_end( \ |
| __TL_DISPATCH_STREAM(kbdev, obj), \ |
| kcpu_queue); \ |
| } while (0) |
| |
| /** |
| * KBASE_TLSTREAM_TL_EVENT_KCPUQUEUE_EXECUTE_DEBUGCOPY_START - |
| * KCPU Queue starts an array of Debug Copys |
| * |
| * @kbdev: Kbase device |
| * @kcpu_queue: KCPU queue |
| */ |
| #define KBASE_TLSTREAM_TL_EVENT_KCPUQUEUE_EXECUTE_DEBUGCOPY_START( \ |
| kbdev, \ |
| kcpu_queue \ |
| ) \ |
| do { \ |
| int enabled = atomic_read(&kbdev->timeline_is_enabled); \ |
| if (enabled & TLSTREAM_ENABLED) \ |
| __kbase_tlstream_tl_event_kcpuqueue_execute_debugcopy_start( \ |
| __TL_DISPATCH_STREAM(kbdev, obj), \ |
| kcpu_queue); \ |
| } while (0) |
| |
| /** |
| * KBASE_TLSTREAM_TL_EVENT_KCPUQUEUE_EXECUTE_DEBUGCOPY_END - |
| * KCPU Queue ends an array of Debug Copys |
| * |
| * @kbdev: Kbase device |
| * @kcpu_queue: KCPU queue |
| */ |
| #define KBASE_TLSTREAM_TL_EVENT_KCPUQUEUE_EXECUTE_DEBUGCOPY_END( \ |
| kbdev, \ |
| kcpu_queue \ |
| ) \ |
| do { \ |
| int enabled = atomic_read(&kbdev->timeline_is_enabled); \ |
| if (enabled & TLSTREAM_ENABLED) \ |
| __kbase_tlstream_tl_event_kcpuqueue_execute_debugcopy_end( \ |
| __TL_DISPATCH_STREAM(kbdev, obj), \ |
| kcpu_queue); \ |
| } while (0) |
| |
| /** |
| * KBASE_TLSTREAM_TL_EVENT_KCPUQUEUE_EXECUTE_MAP_IMPORT_START - |
| * KCPU Queue starts a Map Import |
| * |
| * @kbdev: Kbase device |
| * @kcpu_queue: KCPU queue |
| */ |
| #define KBASE_TLSTREAM_TL_EVENT_KCPUQUEUE_EXECUTE_MAP_IMPORT_START( \ |
| kbdev, \ |
| kcpu_queue \ |
| ) \ |
| do { \ |
| int enabled = atomic_read(&kbdev->timeline_is_enabled); \ |
| if (enabled & TLSTREAM_ENABLED) \ |
| __kbase_tlstream_tl_event_kcpuqueue_execute_map_import_start( \ |
| __TL_DISPATCH_STREAM(kbdev, obj), \ |
| kcpu_queue); \ |
| } while (0) |
| |
| /** |
| * KBASE_TLSTREAM_TL_EVENT_KCPUQUEUE_EXECUTE_MAP_IMPORT_END - |
| * KCPU Queue ends a Map Import |
| * |
| * @kbdev: Kbase device |
| * @kcpu_queue: KCPU queue |
| */ |
| #define KBASE_TLSTREAM_TL_EVENT_KCPUQUEUE_EXECUTE_MAP_IMPORT_END( \ |
| kbdev, \ |
| kcpu_queue \ |
| ) \ |
| do { \ |
| int enabled = atomic_read(&kbdev->timeline_is_enabled); \ |
| if (enabled & TLSTREAM_ENABLED) \ |
| __kbase_tlstream_tl_event_kcpuqueue_execute_map_import_end( \ |
| __TL_DISPATCH_STREAM(kbdev, obj), \ |
| kcpu_queue); \ |
| } while (0) |
| |
| /** |
| * KBASE_TLSTREAM_TL_EVENT_KCPUQUEUE_EXECUTE_UNMAP_IMPORT_START - |
| * KCPU Queue starts an Unmap Import |
| * |
| * @kbdev: Kbase device |
| * @kcpu_queue: KCPU queue |
| */ |
| #define KBASE_TLSTREAM_TL_EVENT_KCPUQUEUE_EXECUTE_UNMAP_IMPORT_START( \ |
| kbdev, \ |
| kcpu_queue \ |
| ) \ |
| do { \ |
| int enabled = atomic_read(&kbdev->timeline_is_enabled); \ |
| if (enabled & TLSTREAM_ENABLED) \ |
| __kbase_tlstream_tl_event_kcpuqueue_execute_unmap_import_start( \ |
| __TL_DISPATCH_STREAM(kbdev, obj), \ |
| kcpu_queue); \ |
| } while (0) |
| |
| /** |
| * KBASE_TLSTREAM_TL_EVENT_KCPUQUEUE_EXECUTE_UNMAP_IMPORT_END - |
| * KCPU Queue ends an Unmap Import |
| * |
| * @kbdev: Kbase device |
| * @kcpu_queue: KCPU queue |
| */ |
| #define KBASE_TLSTREAM_TL_EVENT_KCPUQUEUE_EXECUTE_UNMAP_IMPORT_END( \ |
| kbdev, \ |
| kcpu_queue \ |
| ) \ |
| do { \ |
| int enabled = atomic_read(&kbdev->timeline_is_enabled); \ |
| if (enabled & TLSTREAM_ENABLED) \ |
| __kbase_tlstream_tl_event_kcpuqueue_execute_unmap_import_end( \ |
| __TL_DISPATCH_STREAM(kbdev, obj), \ |
| kcpu_queue); \ |
| } while (0) |
| |
| /** |
| * KBASE_TLSTREAM_TL_EVENT_KCPUQUEUE_EXECUTE_JIT_ALLOC_START - |
| * KCPU Queue starts an array of JIT Allocs |
| * |
| * @kbdev: Kbase device |
| * @kcpu_queue: KCPU queue |
| */ |
| #define KBASE_TLSTREAM_TL_EVENT_KCPUQUEUE_EXECUTE_JIT_ALLOC_START( \ |
| kbdev, \ |
| kcpu_queue \ |
| ) \ |
| do { \ |
| int enabled = atomic_read(&kbdev->timeline_is_enabled); \ |
| if (enabled & TLSTREAM_ENABLED) \ |
| __kbase_tlstream_tl_event_kcpuqueue_execute_jit_alloc_start( \ |
| __TL_DISPATCH_STREAM(kbdev, obj), \ |
| kcpu_queue); \ |
| } while (0) |
| |
| /** |
| * KBASE_TLSTREAM_TL_EVENT_ARRAY_BEGIN_KCPUQUEUE_EXECUTE_JIT_ALLOC_END - |
| * Begin array of KCPU Queue ends an array of JIT Allocs |
| * |
| * @kbdev: Kbase device |
| * @kcpu_queue: KCPU queue |
| */ |
| #define KBASE_TLSTREAM_TL_EVENT_ARRAY_BEGIN_KCPUQUEUE_EXECUTE_JIT_ALLOC_END( \ |
| kbdev, \ |
| kcpu_queue \ |
| ) \ |
| do { \ |
| int enabled = atomic_read(&kbdev->timeline_is_enabled); \ |
| if (enabled & TLSTREAM_ENABLED) \ |
| __kbase_tlstream_tl_event_array_begin_kcpuqueue_execute_jit_alloc_end( \ |
| __TL_DISPATCH_STREAM(kbdev, obj), \ |
| kcpu_queue); \ |
| } while (0) |
| |
| /** |
| * KBASE_TLSTREAM_TL_EVENT_ARRAY_ITEM_KCPUQUEUE_EXECUTE_JIT_ALLOC_END - |
| * Array item of KCPU Queue ends an array of JIT Allocs |
| * |
| * @kbdev: Kbase device |
| * @kcpu_queue: KCPU queue |
| * @jit_alloc_gpu_alloc_addr: The JIT allocated GPU virtual address |
| * @jit_alloc_mmu_flags: The MMU flags for the JIT allocation |
| */ |
| #define KBASE_TLSTREAM_TL_EVENT_ARRAY_ITEM_KCPUQUEUE_EXECUTE_JIT_ALLOC_END( \ |
| kbdev, \ |
| kcpu_queue, \ |
| jit_alloc_gpu_alloc_addr, \ |
| jit_alloc_mmu_flags \ |
| ) \ |
| do { \ |
| int enabled = atomic_read(&kbdev->timeline_is_enabled); \ |
| if (enabled & TLSTREAM_ENABLED) \ |
| __kbase_tlstream_tl_event_array_item_kcpuqueue_execute_jit_alloc_end( \ |
| __TL_DISPATCH_STREAM(kbdev, obj), \ |
| kcpu_queue, jit_alloc_gpu_alloc_addr, jit_alloc_mmu_flags); \ |
| } while (0) |
| |
| /** |
| * KBASE_TLSTREAM_TL_EVENT_ARRAY_END_KCPUQUEUE_EXECUTE_JIT_ALLOC_END - |
| * End array of KCPU Queue ends an array of JIT Allocs |
| * |
| * @kbdev: Kbase device |
| * @kcpu_queue: KCPU queue |
| */ |
| #define KBASE_TLSTREAM_TL_EVENT_ARRAY_END_KCPUQUEUE_EXECUTE_JIT_ALLOC_END( \ |
| kbdev, \ |
| kcpu_queue \ |
| ) \ |
| do { \ |
| int enabled = atomic_read(&kbdev->timeline_is_enabled); \ |
| if (enabled & TLSTREAM_ENABLED) \ |
| __kbase_tlstream_tl_event_array_end_kcpuqueue_execute_jit_alloc_end( \ |
| __TL_DISPATCH_STREAM(kbdev, obj), \ |
| kcpu_queue); \ |
| } while (0) |
| |
| /** |
| * KBASE_TLSTREAM_TL_EVENT_KCPUQUEUE_EXECUTE_JIT_FREE_START - |
| * KCPU Queue starts an array of JIT Frees |
| * |
| * @kbdev: Kbase device |
| * @kcpu_queue: KCPU queue |
| */ |
| #define KBASE_TLSTREAM_TL_EVENT_KCPUQUEUE_EXECUTE_JIT_FREE_START( \ |
| kbdev, \ |
| kcpu_queue \ |
| ) \ |
| do { \ |
| int enabled = atomic_read(&kbdev->timeline_is_enabled); \ |
| if (enabled & TLSTREAM_ENABLED) \ |
| __kbase_tlstream_tl_event_kcpuqueue_execute_jit_free_start( \ |
| __TL_DISPATCH_STREAM(kbdev, obj), \ |
| kcpu_queue); \ |
| } while (0) |
| |
| /** |
| * KBASE_TLSTREAM_TL_EVENT_ARRAY_BEGIN_KCPUQUEUE_EXECUTE_JIT_FREE_END - |
| * Begin array of KCPU Queue ends an array of JIT Frees |
| * |
| * @kbdev: Kbase device |
| * @kcpu_queue: KCPU queue |
| */ |
| #define KBASE_TLSTREAM_TL_EVENT_ARRAY_BEGIN_KCPUQUEUE_EXECUTE_JIT_FREE_END( \ |
| kbdev, \ |
| kcpu_queue \ |
| ) \ |
| do { \ |
| int enabled = atomic_read(&kbdev->timeline_is_enabled); \ |
| if (enabled & TLSTREAM_ENABLED) \ |
| __kbase_tlstream_tl_event_array_begin_kcpuqueue_execute_jit_free_end( \ |
| __TL_DISPATCH_STREAM(kbdev, obj), \ |
| kcpu_queue); \ |
| } while (0) |
| |
| /** |
| * KBASE_TLSTREAM_TL_EVENT_ARRAY_ITEM_KCPUQUEUE_EXECUTE_JIT_FREE_END - |
| * Array item of KCPU Queue ends an array of JIT Frees |
| * |
| * @kbdev: Kbase device |
| * @kcpu_queue: KCPU queue |
| * @jit_free_pages_used: The actual number of pages used by the JIT |
| * allocation |
| */ |
| #define KBASE_TLSTREAM_TL_EVENT_ARRAY_ITEM_KCPUQUEUE_EXECUTE_JIT_FREE_END( \ |
| kbdev, \ |
| kcpu_queue, \ |
| jit_free_pages_used \ |
| ) \ |
| do { \ |
| int enabled = atomic_read(&kbdev->timeline_is_enabled); \ |
| if (enabled & TLSTREAM_ENABLED) \ |
| __kbase_tlstream_tl_event_array_item_kcpuqueue_execute_jit_free_end( \ |
| __TL_DISPATCH_STREAM(kbdev, obj), \ |
| kcpu_queue, jit_free_pages_used); \ |
| } while (0) |
| |
| /** |
| * KBASE_TLSTREAM_TL_EVENT_ARRAY_END_KCPUQUEUE_EXECUTE_JIT_FREE_END - |
| * End array of KCPU Queue ends an array of JIT Frees |
| * |
| * @kbdev: Kbase device |
| * @kcpu_queue: KCPU queue |
| */ |
| #define KBASE_TLSTREAM_TL_EVENT_ARRAY_END_KCPUQUEUE_EXECUTE_JIT_FREE_END( \ |
| kbdev, \ |
| kcpu_queue \ |
| ) \ |
| do { \ |
| int enabled = atomic_read(&kbdev->timeline_is_enabled); \ |
| if (enabled & TLSTREAM_ENABLED) \ |
| __kbase_tlstream_tl_event_array_end_kcpuqueue_execute_jit_free_end( \ |
| __TL_DISPATCH_STREAM(kbdev, obj), \ |
| kcpu_queue); \ |
| } while (0) |
| |
| /** |
| * KBASE_TLSTREAM_TL_EVENT_KCPUQUEUE_EXECUTE_ERRORBARRIER - |
| * KCPU Queue executes an Error Barrier |
| * |
| * @kbdev: Kbase device |
| * @kcpu_queue: KCPU queue |
| */ |
| #define KBASE_TLSTREAM_TL_EVENT_KCPUQUEUE_EXECUTE_ERRORBARRIER( \ |
| kbdev, \ |
| kcpu_queue \ |
| ) \ |
| do { \ |
| int enabled = atomic_read(&kbdev->timeline_is_enabled); \ |
| if (enabled & TLSTREAM_ENABLED) \ |
| __kbase_tlstream_tl_event_kcpuqueue_execute_errorbarrier( \ |
| __TL_DISPATCH_STREAM(kbdev, obj), \ |
| kcpu_queue); \ |
| } while (0) |
| |
| |
| /* Gator tracepoints are hooked into TLSTREAM interface. |
| * When the following tracepoints are called, corresponding |
| * Gator tracepoint will be called as well. |
| */ |
| |
| #if defined(CONFIG_MALI_GATOR_SUPPORT) |
| /* `event` is one of TL_JS_EVENT values here. |
| * The values of TL_JS_EVENT are guaranteed to match |
| * with corresponding GATOR_JOB_SLOT values. |
| */ |
| #undef KBASE_TLSTREAM_AUX_EVENT_JOB_SLOT |
| #define KBASE_TLSTREAM_AUX_EVENT_JOB_SLOT(kbdev, \ |
| context, slot_nr, atom_nr, event) \ |
| do { \ |
| int enabled = atomic_read(&kbdev->timeline_is_enabled); \ |
| kbase_trace_mali_job_slots_event(kbdev->id, \ |
| GATOR_MAKE_EVENT(event, slot_nr), \ |
| context, (u8) atom_nr); \ |
| if (enabled & TLSTREAM_ENABLED) \ |
| __kbase_tlstream_aux_event_job_slot( \ |
| __TL_DISPATCH_STREAM(kbdev, aux), \ |
| context, slot_nr, atom_nr, event); \ |
| } while (0) |
| |
| #undef KBASE_TLSTREAM_AUX_PM_STATE |
| #define KBASE_TLSTREAM_AUX_PM_STATE(kbdev, core_type, state) \ |
| do { \ |
| int enabled = atomic_read(&kbdev->timeline_is_enabled); \ |
| kbase_trace_mali_pm_status(kbdev->id, \ |
| core_type, state); \ |
| if (enabled & TLSTREAM_ENABLED) \ |
| __kbase_tlstream_aux_pm_state( \ |
| __TL_DISPATCH_STREAM(kbdev, aux), \ |
| core_type, state); \ |
| } while (0) |
| |
| #undef KBASE_TLSTREAM_AUX_PAGEFAULT |
| #define KBASE_TLSTREAM_AUX_PAGEFAULT(kbdev, \ |
| ctx_nr, as_nr, page_cnt_change) \ |
| do { \ |
| int enabled = atomic_read(&kbdev->timeline_is_enabled); \ |
| kbase_trace_mali_page_fault_insert_pages(kbdev->id, \ |
| as_nr, \ |
| page_cnt_change); \ |
| if (enabled & TLSTREAM_ENABLED) \ |
| __kbase_tlstream_aux_pagefault( \ |
| __TL_DISPATCH_STREAM(kbdev, aux), \ |
| ctx_nr, as_nr, page_cnt_change); \ |
| } while (0) |
| |
| /* kbase_trace_mali_total_alloc_pages_change is handled differently here. |
| * We stream the total amount of pages allocated for `kbdev` rather |
| * than `page_count`, which is per-context. |
| */ |
| #undef KBASE_TLSTREAM_AUX_PAGESALLOC |
| #define KBASE_TLSTREAM_AUX_PAGESALLOC(kbdev, ctx_nr, page_cnt) \ |
| do { \ |
| int enabled = atomic_read(&kbdev->timeline_is_enabled); \ |
| u32 global_pages_count = \ |
| atomic_read(&kbdev->memdev.used_pages); \ |
| \ |
| kbase_trace_mali_total_alloc_pages_change(kbdev->id, \ |
| global_pages_count); \ |
| if (enabled & TLSTREAM_ENABLED) \ |
| __kbase_tlstream_aux_pagesalloc( \ |
| __TL_DISPATCH_STREAM(kbdev, aux), \ |
| ctx_nr, page_cnt); \ |
| } while (0) |
| #endif /* CONFIG_MALI_GATOR_SUPPORT */ |
| |
| /* clang-format on */ |
| #endif |