blob: 62d1c88a8d262c23330c1ceec04154a04acf87ea [file] [log] [blame]
/****************************************************************************
*
* The MIT License (MIT)
*
* Copyright (c) 2014 - 2020 Vivante Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*
*****************************************************************************
*
* The GPL License (GPL)
*
* Copyright (C) 2014 - 2020 Vivante Corporation
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software Foundation,
* Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*
*****************************************************************************
*
* Note: This software is released under dual MIT and GPL licenses. A
* recipient may use this file under the terms of either the MIT license or
* GPL License. If you wish to use only one license not the other, you can
* indicate your decision by deleting one of the above license notices in your
* version of this file.
*
*****************************************************************************/
#include "gc_hal_kernel_precomp.h"
#if gcdDEC_ENABLE_AHB
#include "viv_dec300_main.h"
#endif
#if gcdCAPTURE_ONLY_MODE
#include "arch/gc_hal_kernel_context.h"
#endif
#define _GC_OBJ_ZONE gcvZONE_KERNEL
/*******************************************************************************
***** Version Signature *******************************************************/
#define _gcmTXT2STR(t) #t
#define gcmTXT2STR(t) _gcmTXT2STR(t)
const char * _VERSION = "\n\0$VERSION$"
gcmTXT2STR(gcvVERSION_MAJOR) "."
gcmTXT2STR(gcvVERSION_MINOR) "."
gcmTXT2STR(gcvVERSION_PATCH) ":"
gcmTXT2STR(gcvVERSION_BUILD) "$\n";
/******************************************************************************\
******************************* gckKERNEL API Code ******************************
\******************************************************************************/
#if gcmIS_DEBUG(gcdDEBUG_TRACE)
#define gcmDEFINE2TEXT(d) #d
gctCONST_STRING _DispatchText[] =
{
gcmDEFINE2TEXT(gcvHAL_CHIP_INFO),
gcmDEFINE2TEXT(gcvHAL_VERSION),
gcmDEFINE2TEXT(gcvHAL_SET_TIMEOUT),
gcmDEFINE2TEXT(gcvHAL_QUERY_VIDEO_MEMORY),
gcmDEFINE2TEXT(gcvHAL_QUERY_CHIP_IDENTITY),
gcmDEFINE2TEXT(gcvHAL_QUERY_CHIP_OPTION),
gcmDEFINE2TEXT(gcvHAL_QUERY_CHIP_FREQUENCY),
gcmDEFINE2TEXT(gcvHAL_ALLOCATE_NON_PAGED_MEMORY),
gcmDEFINE2TEXT(gcvHAL_FREE_NON_PAGED_MEMORY),
gcmDEFINE2TEXT(gcvHAL_ALLOCATE_LINEAR_VIDEO_MEMORY),
gcmDEFINE2TEXT(gcvHAL_WRAP_USER_MEMORY),
gcmDEFINE2TEXT(gcvHAL_RELEASE_VIDEO_MEMORY),
gcmDEFINE2TEXT(gcvHAL_LOCK_VIDEO_MEMORY),
gcmDEFINE2TEXT(gcvHAL_UNLOCK_VIDEO_MEMORY),
gcmDEFINE2TEXT(gcvHAL_BOTTOM_HALF_UNLOCK_VIDEO_MEMORY),
gcmDEFINE2TEXT(gcvHAL_EXPORT_VIDEO_MEMORY),
gcmDEFINE2TEXT(gcvHAL_NAME_VIDEO_MEMORY),
gcmDEFINE2TEXT(gcvHAL_IMPORT_VIDEO_MEMORY),
gcmDEFINE2TEXT(gcvHAL_MAP_MEMORY),
gcmDEFINE2TEXT(gcvHAL_UNMAP_MEMORY),
gcmDEFINE2TEXT(gcvHAL_CACHE),
gcmDEFINE2TEXT(gcvHAL_ATTACH),
gcmDEFINE2TEXT(gcvHAL_DETACH),
gcmDEFINE2TEXT(gcvHAL_EVENT_COMMIT),
gcmDEFINE2TEXT(gcvHAL_COMMIT),
gcmDEFINE2TEXT(gcvHAL_COMMIT_DONE),
gcmDEFINE2TEXT(gcvHAL_USER_SIGNAL),
gcmDEFINE2TEXT(gcvHAL_SIGNAL),
gcmDEFINE2TEXT(gcvHAL_WRITE_DATA),
gcmDEFINE2TEXT(gcvHAL_READ_REGISTER),
gcmDEFINE2TEXT(gcvHAL_WRITE_REGISTER),
gcmDEFINE2TEXT(gcvHAL_READ_REGISTER_EX),
gcmDEFINE2TEXT(gcvHAL_WRITE_REGISTER_EX),
gcmDEFINE2TEXT(gcvHAL_GET_PROFILE_SETTING),
gcmDEFINE2TEXT(gcvHAL_SET_PROFILE_SETTING),
gcmDEFINE2TEXT(gcvHAL_READ_PROFILER_REGISTER_SETTING),
gcmDEFINE2TEXT(gcvHAL_READ_ALL_PROFILE_REGISTERS_PART1),
gcmDEFINE2TEXT(gcvHAL_READ_ALL_PROFILE_REGISTERS_PART2),
gcmDEFINE2TEXT(gcvHAL_PROFILE_REGISTERS_2D),
gcmDEFINE2TEXT(gcvHAL_SET_POWER_MANAGEMENT_STATE),
gcmDEFINE2TEXT(gcvHAL_QUERY_POWER_MANAGEMENT_STATE),
gcmDEFINE2TEXT(gcvHAL_CONFIG_POWER_MANAGEMENT),
gcmDEFINE2TEXT(gcvHAL_GET_BASE_ADDRESS),
gcmDEFINE2TEXT(gcvHAL_SET_IDLE),
gcmDEFINE2TEXT(gcvHAL_RESET),
gcmDEFINE2TEXT(gcvHAL_SET_DEBUG_LEVEL_ZONE),
gcmDEFINE2TEXT(gcvHAL_DEBUG_DUMP),
gcmDEFINE2TEXT(gcvHAL_UPDATE_DEBUG_CALLBACK),
gcmDEFINE2TEXT(gcvHAL_CONFIG_CTX_FRAMEWORK),
gcmDEFINE2TEXT(gcvHAL_DUMP_GPU_STATE),
gcmDEFINE2TEXT(gcvHAL_DUMP_EVENT),
gcmDEFINE2TEXT(gcvHAL_DUMP_GPU_PROFILE),
gcmDEFINE2TEXT(gcvHAL_TIMESTAMP),
gcmDEFINE2TEXT(gcvHAL_DATABASE),
gcmDEFINE2TEXT(gcvHAL_GET_FRAME_INFO),
gcmDEFINE2TEXT(gcvHAL_QUERY_COMMAND_BUFFER),
gcmDEFINE2TEXT(gcvHAL_SET_FSCALE_VALUE),
gcmDEFINE2TEXT(gcvHAL_GET_FSCALE_VALUE),
gcmDEFINE2TEXT(gcvHAL_QUERY_RESET_TIME_STAMP),
gcmDEFINE2TEXT(gcvHAL_CREATE_NATIVE_FENCE),
gcmDEFINE2TEXT(gcvHAL_WAIT_NATIVE_FENCE),
gcmDEFINE2TEXT(gcvHAL_SHBUF),
gcmDEFINE2TEXT(gcvHAL_GET_GRAPHIC_BUFFER_FD),
gcmDEFINE2TEXT(gcvHAL_SET_VIDEO_MEMORY_METADATA),
gcmDEFINE2TEXT(gcvHAL_GET_VIDEO_MEMORY_FD),
gcmDEFINE2TEXT(gcvHAL_DESTROY_MMU),
gcmDEFINE2TEXT(gcvHAL_WAIT_FENCE),
gcmDEFINE2TEXT(gcvHAL_DEVICE_MUTEX),
gcmDEFINE2TEXT(gcvHAL_DEC200_TEST),
gcmDEFINE2TEXT(gcvHAL_DEC300_READ),
gcmDEFINE2TEXT(gcvHAL_DEC300_WRITE),
gcmDEFINE2TEXT(gcvHAL_DEC300_FLUSH),
gcmDEFINE2TEXT(gcvHAL_DEC300_FLUSH_WAIT),
};
#endif
#if gcdGPU_TIMEOUT && gcdINTERRUPT_STATISTIC
void
_MonitorTimerFunction(
gctPOINTER Data
)
{
gckKERNEL kernel = (gckKERNEL)Data;
gctINT32 pendingInterrupt;
gctBOOL reset = gcvFALSE;
gctINT32 mask;
gctUINT32 advance = kernel->timeOut/2;
if (kernel->monitorTimerStop)
{
/* Stop. */
return;
}
gckOS_AtomGet(kernel->os, kernel->eventObj->interruptCount, &pendingInterrupt);
if (pendingInterrupt < 0)
{
gctINT i = 0 - pendingInterrupt;
gctINT pendingMask;
gcmkVERIFY_OK(gckOS_AtomGet(
kernel->os,
kernel->hardware->pendingEvent,
&pendingMask
));
gcmkPRINT("[galcore]: Number of pending interrupt is %d mask is %x",
pendingInterrupt, pendingMask);
while (i--)
{
/* Ignore counting which should not exist. */
gckOS_AtomIncrement(kernel->os, kernel->eventObj->interruptCount, &pendingInterrupt);
}
gckOS_AtomGet(kernel->os, kernel->eventObj->interruptCount, &pendingInterrupt);
}
if (kernel->monitoring == gcvFALSE)
{
if (pendingInterrupt)
{
/* Begin to mointor GPU state. */
kernel->monitoring = gcvTRUE;
/* Record current state. */
kernel->lastCommitStamp = kernel->eventObj->lastCommitStamp;
kernel->restoreAddress = kernel->hardware->lastWaitLink;
gcmkVERIFY_OK(gckOS_AtomGet(
kernel->os,
kernel->hardware->pendingEvent,
&kernel->restoreMask
));
/* Clear timeout. */
kernel->timer = 0;
}
}
else
{
if (pendingInterrupt)
{
gcmkVERIFY_OK(gckOS_AtomGet(
kernel->os,
kernel->hardware->pendingEvent,
&mask
));
if (kernel->eventObj->lastCommitStamp == kernel->lastCommitStamp
&& kernel->hardware->lastWaitLink == kernel->restoreAddress
&& mask == kernel->restoreMask
)
{
/* GPU state is not changed, accumlate timeout. */
kernel->timer += advance;
if (kernel->timer >= kernel->timeOut)
{
/* GPU stuck, trigger reset. */
reset = gcvTRUE;
}
}
else
{
/* GPU state changed, cancel current timeout.*/
kernel->monitoring = gcvFALSE;
}
}
else
{
/* GPU finish all jobs, cancel current timeout*/
kernel->monitoring = gcvFALSE;
}
}
if (reset)
{
gckKERNEL_Recovery(kernel);
/* Work in this timeout is done. */
kernel->monitoring = gcvFALSE;
}
gcmkVERIFY_OK(gckOS_StartTimer(kernel->os, kernel->monitorTimer, advance));
}
#endif
void
_DumpDriverConfigure(
IN gckKERNEL Kernel
)
{
gcmkPRINT_N(0, "**************************\n");
gcmkPRINT_N(0, "*** GPU DRV CONFIG ***\n");
gcmkPRINT_N(0, "**************************\n");
gcmkPRINT("Galcore version %d.%d.%d.%d\n",
gcvVERSION_MAJOR, gcvVERSION_MINOR, gcvVERSION_PATCH, gcvVERSION_BUILD);
gckOS_DumpParam();
}
void
_DumpState(
IN gckKERNEL Kernel
)
{
/* Dump GPU Debug registers. */
gcmkVERIFY_OK(gckHARDWARE_DumpGPUState(Kernel->hardware));
/* Dump Pending event. */
gcmkVERIFY_OK(gckEVENT_Dump(Kernel->eventObj));
/* Dump Process DB. */
gcmkVERIFY_OK(gckKERNEL_DumpProcessDB(Kernel));
#if gcdRECORD_COMMAND
/* Dump record. */
gckRECORDER_Dump(Kernel->command->recorder);
#endif
if (Kernel->command)
{
gcmkVERIFY_OK(gckCOMMAND_DumpExecutingBuffer(Kernel->command));
}
}
gceSTATUS
gckKERNEL_GetHardwareType(
IN gckKERNEL Kernel,
OUT gceHARDWARE_TYPE *Type
)
{
gceHARDWARE_TYPE type;
gcmkHEADER();
gcmkVERIFY_OBJECT(Kernel, gcvOBJ_KERNEL);
{
type = Kernel->hardware->type;
}
*Type = type;
gcmkFOOTER_ARG("type=%d", type);
return gcvSTATUS_OK;
}
gceSTATUS
_SetRecovery(
IN gckKERNEL Kernel,
IN gctBOOL Recovery,
IN gctUINT32 StuckDump
)
{
Kernel->recovery = Recovery;
if (Recovery == gcvFALSE)
{
/* Dump stuck information if Recovery is disabled. */
Kernel->stuckDump = gcmMAX(StuckDump, gcvSTUCK_DUMP_USER_COMMAND);
}
return gcvSTATUS_OK;
}
/*******************************************************************************
**
** gckKERNEL_Construct
**
** Construct a new gckKERNEL object.
**
** INPUT:
**
** gckOS Os
** Pointer to an gckOS object.
**
** gceCORE Core
** Specified core.
**
** IN gctPOINTER Context
** Pointer to a driver defined context.
**
** IN gckDB SharedDB,
** Pointer to a shared DB.
**
** OUTPUT:
**
** gckKERNEL * Kernel
** Pointer to a variable that will hold the pointer to the gckKERNEL
** object.
*/
gceSTATUS
gckKERNEL_Construct(
IN gckOS Os,
IN gceCORE Core,
IN gctUINT ChipID,
IN gctPOINTER Context,
IN gckDEVICE Device,
IN gckDB SharedDB,
OUT gckKERNEL * Kernel
)
{
gckKERNEL kernel = gcvNULL;
gceSTATUS status;
gctSIZE_T i;
gctPOINTER pointer = gcvNULL;
gctUINT64 data;
gctUINT32 recovery;
gctUINT32 stuckDump;
gctUINT64 dynamicMap = 1;
gcmkHEADER_ARG("Os=%p Context=%p", Os, Context);
/* Verify the arguments. */
gcmkVERIFY_OBJECT(Os, gcvOBJ_OS);
gcmkVERIFY_ARGUMENT(Kernel != gcvNULL);
/* Allocate the gckKERNEL object. */
gcmkONERROR(gckOS_Allocate(Os,
gcmSIZEOF(struct _gckKERNEL),
&pointer));
/* Zero the object. */
gckOS_ZeroMemory(pointer, gcmSIZEOF(struct _gckKERNEL));
kernel = pointer;
/* Initialize the gckKERNEL object. */
kernel->object.type = gcvOBJ_KERNEL;
kernel->os = Os;
kernel->core = Core;
kernel->device = Device;
kernel->chipID = ChipID;
kernel->threadInitialized = gcvTRUE;
#if gcdENABLE_TRUST_APPLICATION
/* Connect to security service for this GPU. */
gcmkONERROR(gckKERNEL_SecurityOpen(kernel, kernel->core, &kernel->securityChannel));
#endif
if (SharedDB == gcvNULL)
{
gcmkONERROR(gckOS_Allocate(Os,
gcmSIZEOF(struct _gckDB),
&pointer));
kernel->db = pointer;
kernel->dbCreated = gcvTRUE;
kernel->db->freeDatabase = gcvNULL;
kernel->db->freeRecord = gcvNULL;
kernel->db->dbMutex = gcvNULL;
kernel->db->lastDatabase = gcvNULL;
kernel->db->idleTime = 0;
kernel->db->lastIdle = 0;
kernel->db->lastSlowdown = 0;
for (i = 0; i < gcmCOUNTOF(kernel->db->db); ++i)
{
kernel->db->db[i] = gcvNULL;
}
/* Construct a database mutex. */
gcmkONERROR(gckOS_CreateMutex(Os, &kernel->db->dbMutex));
/* Construct a video memory name database. */
gcmkONERROR(gckKERNEL_CreateIntegerDatabase(
kernel,
512,
&kernel->db->nameDatabase
));
/* Construct a video memory name database mutex. */
gcmkONERROR(gckOS_CreateMutex(Os, &kernel->db->nameDatabaseMutex));
/* Construct a pointer name database. */
gcmkONERROR(gckKERNEL_CreateIntegerDatabase(
kernel,
512,
&kernel->db->pointerDatabase
));
/* Initialize video memory node list. */
gcsLIST_Init(&kernel->db->videoMemList);
gcmkONERROR(gckOS_CreateMutex(Os, &kernel->db->videoMemListMutex));
}
else
{
kernel->db = SharedDB;
kernel->dbCreated = gcvFALSE;
}
for (i = 0; i < gcmCOUNTOF(kernel->timers); ++i)
{
kernel->timers[i].startTime = 0;
kernel->timers[i].stopTime = 0;
}
gcmkONERROR(gckOS_CreateMutex(Os, &kernel->vidMemBlockMutex));
/* Save context. */
kernel->context = Context;
/* Construct atom holding number of clients. */
kernel->atomClients = gcvNULL;
gcmkONERROR(gckOS_AtomConstruct(Os, &kernel->atomClients));
kernel->recovery = gcvTRUE;
kernel->stuckDump = gcvSTUCK_DUMP_NONE;
/* Override default recovery and stuckDump setting. */
status = gckOS_QueryOption(Os, "recovery", &data);
recovery = (gctUINT32)data;
if (gcmIS_SUCCESS(status))
{
status = gckOS_QueryOption(Os, "stuckDump", &data);
stuckDump = (gctUINT32)data;
gcmkASSERT(status == gcvSTATUS_OK);
_SetRecovery(kernel, recovery, stuckDump);
}
status = gckOS_QueryOption(Os, "sRAMLoopMode", &data);
kernel->sRAMLoopMode = (status == gcvSTATUS_OK) ? data : 0;
/* Need the kernel reference before gckKERNEL_Construct() completes.
gckOS_MapPagesEx() is called to map kernel virtual command buffers. */
*Kernel = kernel;
{
/* Construct the gckHARDWARE object. */
gcmkONERROR(
gckHARDWARE_Construct(Os, kernel->device, kernel->core, &kernel->hardware));
/* Set pointer to gckKERNEL object in gckHARDWARE object. */
kernel->hardware->kernel = kernel;
kernel->sRAMIndex = 0;
kernel->extSRAMIndex = 0;
for (i = gcvSRAM_INTERNAL0; i < gcvSRAM_INTER_COUNT; i++)
{
kernel->sRAMVidMem[i] = kernel->hardware->sRAMVidMem[i];
kernel->sRAMPhysical[i] = kernel->hardware->sRAMPhysical[i];
kernel->sRAMPhysFaked[i] = gcvFALSE;
}
kernel->timeOut = kernel->hardware->type == gcvHARDWARE_2D
? gcdGPU_2D_TIMEOUT
: gcdGPU_TIMEOUT
;
#if gcdSHARED_PAGETABLE
/* Construct the gckMMU object. */
gcmkONERROR(
gckMMU_Construct(kernel, gcdMMU_SIZE, &kernel->mmu));
#else
if (Device == gcvNULL)
{
/* Construct the gckMMU object. */
gcmkONERROR(
gckMMU_Construct(kernel, gcdMMU_SIZE, &kernel->mmu));
}
else
{
gcmkONERROR(gckDEVICE_GetMMU(Device, kernel->hardware->type, &kernel->mmu));
if (kernel->mmu == gcvNULL)
{
gcmkONERROR(
gckMMU_Construct(kernel, gcdMMU_SIZE, &kernel->mmu));
gcmkONERROR(
gckDEVICE_SetMMU(Device, kernel->hardware->type, kernel->mmu));
}
}
gcmkONERROR(
gckMMU_SetupSRAM(kernel->mmu, kernel->hardware, kernel->device));
status = gckOS_QueryOption(Os, "mmuDynamicMap", &dynamicMap);
if (dynamicMap && kernel->hardware->mmuVersion && !kernel->mmu->dynamicAreaSetuped)
{
gcmkONERROR(
gckMMU_SetupDynamicSpace(kernel->mmu));
kernel->mmu->dynamicAreaSetuped = gcvTRUE;
}
if (kernel->hardware->mmuVersion > 0)
{
/* Flush MTLB table. */
gcmkONERROR(gckVIDMEM_NODE_CleanCache(
kernel,
kernel->mmu->mtlbVideoMem,
0,
kernel->mmu->mtlbLogical,
kernel->mmu->mtlbSize
));
}
#endif
kernel->contiguousBaseAddress = kernel->mmu->contiguousBaseAddress;
kernel->externalBaseAddress = kernel->mmu->externalBaseAddress;
/* Construct the gckCOMMAND object, either MCFE or wait-link FE can exist. */
if (gckHARDWARE_IsFeatureAvailable(kernel->hardware, gcvFEATURE_MCFE))
{
/* Construct the gckCOMMAND object for multi-channel FE. */
gcmkONERROR(gckCOMMAND_Construct(kernel, gcvHW_FE_MULTI_CHANNEL, &kernel->command));
/* Construct gckEVENT for multi-channel FE. */
gcmkONERROR(gckEVENT_Construct(kernel, kernel->command, &kernel->eventObj));
}
else
{
/* Construct the gckCOMMAND object for legacy wait-link FE. */
gcmkONERROR(gckCOMMAND_Construct(kernel, gcvHW_FE_WAIT_LINK, &kernel->command));
/* Construct the gckEVENT object. */
gcmkONERROR(gckEVENT_Construct(kernel, kernel->command, &kernel->eventObj));
}
if (gckHARDWARE_IsFeatureAvailable(kernel->hardware, gcvFEATURE_ASYNC_BLIT))
{
/* Construct the gckCOMMAND object for BLT engine. */
gcmkONERROR(gckCOMMAND_Construct(kernel, gcvHW_FE_ASYNC, &kernel->asyncCommand));
/* Construct gckEVENT for BLT. */
gcmkONERROR(gckEVENT_Construct(kernel, kernel->asyncCommand, &kernel->asyncEvent));
}
gcmkVERIFY_OK(gckOS_GetTime(&kernel->resetTimeStamp));
/* Post construct hardware elements after MMU settle. */
gcmkONERROR(gckHARDWARE_PostConstruct(kernel->hardware));
/* Initialize the GPU. */
gcmkONERROR(
gckHARDWARE_InitializeHardware(kernel->hardware));
#if gcdDVFS
if (gckHARDWARE_IsFeatureAvailable(kernel->hardware,
gcvFEATURE_DYNAMIC_FREQUENCY_SCALING))
{
gcmkONERROR(gckDVFS_Construct(kernel->hardware, &kernel->dvfs));
gcmkONERROR(gckDVFS_Start(kernel->dvfs));
}
#endif
#if COMMAND_PROCESSOR_VERSION == 1
if (kernel->command)
{
/* Start the command queue. */
gcmkONERROR(gckCOMMAND_Start(kernel->command));
}
if (kernel->asyncCommand)
{
/* Start the async command queue. */
gcmkONERROR(gckCOMMAND_Start(kernel->asyncCommand));
}
#endif
}
#if VIVANTE_PROFILER
/* Initialize profile setting */
kernel->profileEnable = gcvFALSE;
kernel->profileCleanRegister = gcvTRUE;
#endif
#if gcdLINUX_SYNC_FILE
gcmkONERROR(gckOS_CreateSyncTimeline(Os, Core, &kernel->timeline));
#endif
#if gcdSECURITY
/* Connect to security service for this GPU. */
gcmkONERROR(gckKERNEL_SecurityOpen(kernel, kernel->core, &kernel->securityChannel));
#endif
#if gcdGPU_TIMEOUT && gcdINTERRUPT_STATISTIC
if (kernel->timeOut)
{
gcmkVERIFY_OK(gckOS_CreateTimer(
Os,
(gctTIMERFUNCTION)_MonitorTimerFunction,
(gctPOINTER)kernel,
&kernel->monitorTimer
));
kernel->monitoring = gcvFALSE;
kernel->monitorTimerStop = gcvFALSE;
gcmkVERIFY_OK(gckOS_StartTimer(
Os,
kernel->monitorTimer,
100
));
}
#endif
/* Return pointer to the gckKERNEL object. */
*Kernel = kernel;
/* Success. */
gcmkFOOTER_ARG("*Kernel=%p", *Kernel);
return gcvSTATUS_OK;
OnError:
gckOS_SetGPUPower(Os, kernel->core, gcvFALSE, gcvFALSE);
*Kernel = gcvNULL;
if (kernel != gcvNULL)
{
gckKERNEL_Destroy(kernel);
}
/* Return the error. */
gcmkFOOTER();
return status;
}
/*******************************************************************************
**
** gckKERNEL_Destroy
**
** Destroy an gckKERNEL object.
**
** INPUT:
**
** gckKERNEL Kernel
** Pointer to an gckKERNEL object to destroy.
**
** OUTPUT:
**
** Nothing.
*/
gceSTATUS
gckKERNEL_Destroy(
IN gckKERNEL Kernel
)
{
gctSIZE_T i;
gcsDATABASE_PTR database, databaseNext;
gcsDATABASE_RECORD_PTR record, recordNext;
gcmkHEADER_ARG("Kernel=%p", Kernel);
/* Verify the arguments. */
gcmkVERIFY_OBJECT(Kernel, gcvOBJ_KERNEL);
#if QNX_SINGLE_THREADED_DEBUGGING
gcmkVERIFY_OK(gckOS_DeleteMutex(Kernel->os, Kernel->debugMutex));
#endif
if (Kernel->monitorTimer)
{
/* Stop and destroy monitor timer. */
gcmkVERIFY_OK(gckOS_StopTimer(Kernel->os, Kernel->monitorTimer));
gcmkVERIFY_OK(gckOS_DestroyTimer(Kernel->os, Kernel->monitorTimer));
}
{
if (Kernel->command)
{
/* Destroy the gckCOMMNAND object. */
gcmkVERIFY_OK(gckCOMMAND_Destroy(Kernel->command));
}
if (Kernel->asyncCommand)
{
gcmkVERIFY_OK(gckCOMMAND_Destroy(Kernel->asyncCommand));
}
if (Kernel->asyncEvent)
{
gcmkVERIFY_OK(gckEVENT_Destroy(Kernel->asyncEvent));
}
if (Kernel->eventObj)
{
/* Destroy the gckEVENT object. */
gcmkVERIFY_OK(gckEVENT_Destroy(Kernel->eventObj));
}
/* Destroy hardware resources before destroying MMU. */
gcmkVERIFY_OK(gckHARDWARE_PreDestroy(Kernel->hardware));
if (Kernel->mmu)
{
#if gcdSHARED_PAGETABLE
/* Destroy the gckMMU object. */
gcmkVERIFY_OK(gckMMU_Destroy(Kernel->mmu));
#else
if (Kernel->mmu->hardware == Kernel->hardware)
{
/* Destroy the gckMMU object. */
gcmkVERIFY_OK(gckMMU_Destroy(Kernel->mmu));
}
#endif
}
/* Destroy the gckHARDWARE object. */
gcmkVERIFY_OK(gckHARDWARE_Destroy(Kernel->hardware));
}
if (Kernel->atomClients)
{
/* Detsroy the client atom. */
gcmkVERIFY_OK(gckOS_AtomDestroy(Kernel->os, Kernel->atomClients));
}
gcmkVERIFY_OK(gckOS_DeleteMutex(Kernel->os, Kernel->vidMemBlockMutex));
/* Destroy the database. */
if (Kernel->dbCreated)
{
for (i = 0; i < gcmCOUNTOF(Kernel->db->db); ++i)
{
if (Kernel->db->db[i] != gcvNULL)
{
gcmkVERIFY_OK(
gckKERNEL_DestroyProcessDB(Kernel, Kernel->db->db[i]->processID));
}
}
/* Free all databases. */
for (database = Kernel->db->freeDatabase;
database != gcvNULL;
database = databaseNext)
{
databaseNext = database->next;
if (database->counterMutex)
{
gcmkVERIFY_OK(gckOS_DeleteMutex(Kernel->os, database->counterMutex));
}
gcmkVERIFY_OK(gcmkOS_SAFE_FREE(Kernel->os, database));
}
if (Kernel->db->lastDatabase != gcvNULL)
{
if (Kernel->db->lastDatabase->counterMutex)
{
gcmkVERIFY_OK(gckOS_DeleteMutex(Kernel->os, Kernel->db->lastDatabase->counterMutex));
}
gcmkVERIFY_OK(gcmkOS_SAFE_FREE(Kernel->os, Kernel->db->lastDatabase));
}
/* Free all database records. */
for (record = Kernel->db->freeRecord; record != gcvNULL; record = recordNext)
{
recordNext = record->next;
gcmkVERIFY_OK(gcmkOS_SAFE_FREE(Kernel->os, record));
}
if (Kernel->db->dbMutex)
{
/* Destroy the database mutex. */
gcmkVERIFY_OK(gckOS_DeleteMutex(Kernel->os, Kernel->db->dbMutex));
}
if (Kernel->db->nameDatabase)
{
/* Destroy video memory name database. */
gcmkVERIFY_OK(gckKERNEL_DestroyIntegerDatabase(Kernel, Kernel->db->nameDatabase));
}
if (Kernel->db->nameDatabaseMutex)
{
/* Destroy video memory name database mutex. */
gcmkVERIFY_OK(gckOS_DeleteMutex(Kernel->os, Kernel->db->nameDatabaseMutex));
}
if (Kernel->db->pointerDatabase)
{
/* Destroy id-pointer database. */
gcmkVERIFY_OK(gckKERNEL_DestroyIntegerDatabase(Kernel, Kernel->db->pointerDatabase));
}
if (Kernel->db->videoMemListMutex)
{
/* Destroy video memory list mutex. */
gcmkVERIFY_OK(gckOS_DeleteMutex(Kernel->os, Kernel->db->videoMemListMutex));
}
/* Destroy the database. */
gcmkVERIFY_OK(gcmkOS_SAFE_FREE(Kernel->os, Kernel->db));
/* Notify stuck timer to quit. */
Kernel->monitorTimerStop = gcvTRUE;
}
#if gcdDVFS
if (Kernel->dvfs)
{
gcmkVERIFY_OK(gckDVFS_Stop(Kernel->dvfs));
gcmkVERIFY_OK(gckDVFS_Destroy(Kernel->dvfs));
}
#endif
#if gcdLINUX_SYNC_FILE
if (Kernel->timeline)
{
gcmkVERIFY_OK(gckOS_DestroySyncTimeline(Kernel->os, Kernel->timeline));
}
#endif
#if gcdSECURITY
if (Kernel->securityChannel)
{
gcmkVERIFY_OK(gckKERNEL_SecurityClose(Kernel->securityChannel));
}
#endif
/* Mark the gckKERNEL object as unknown. */
Kernel->object.type = gcvOBJ_UNKNOWN;
/* Free the gckKERNEL object. */
gcmkVERIFY_OK(gcmkOS_SAFE_FREE(Kernel->os, Kernel));
/* Success. */
gcmkFOOTER_NO();
return gcvSTATUS_OK;
}
/*******************************************************************************
**
** gckKERNEL_AllocateVideoMemory
**
** Walk requested pools to allocate video memory.
**
** INPUT:
**
** gckKERNEL Kernel
** Pointer to an gckKERNEL object.
**
** OUTPUT:
**
** gckVIDMEM_NODE * NodeObject
** Pointer to a variable receiving video memory represetation.
*/
gceSTATUS
gckKERNEL_AllocateVideoMemory(
IN gckKERNEL Kernel,
IN gctUINT32 Alignment,
IN gceVIDMEM_TYPE Type,
IN gctUINT32 Flag,
IN OUT gctSIZE_T * Bytes,
IN OUT gcePOOL * Pool,
OUT gckVIDMEM_NODE * NodeObject
)
{
gceSTATUS status;
gcePOOL pool;
gckVIDMEM videoMemory;
gctINT loopCount;
gckVIDMEM_NODE nodeObject = gcvNULL;
gctBOOL contiguous = gcvFALSE;
gctBOOL cacheable = gcvFALSE;
gctBOOL secure = gcvFALSE;
gctBOOL fastPools = gcvFALSE;
gctBOOL virtualPool4K = gcvFALSE;
gctBOOL hasFastPools = gcvFALSE;
gctSIZE_T bytes = *Bytes;
gcmkHEADER_ARG("Kernel=%p *Pool=%d *Bytes=%lu Alignment=%lu Type=%d",
Kernel, *Pool, *Bytes, Alignment, Type);
*NodeObject = gcvNULL;
/* Check flags. */
contiguous = Flag & gcvALLOC_FLAG_CONTIGUOUS;
cacheable = Flag & gcvALLOC_FLAG_CACHEABLE;
secure = Flag & gcvALLOC_FLAG_SECURITY;
if (Flag & gcvALLOC_FLAG_FAST_POOLS)
{
fastPools = gcvTRUE;
Flag &= ~gcvALLOC_FLAG_FAST_POOLS;
}
if (Flag & gcvALLOC_FLAG_4K_PAGES)
{
virtualPool4K = gcvTRUE;
Flag &= ~gcvALLOC_FLAG_4K_PAGES;
}
#if gcdALLOC_ON_FAULT
if (Type == gcvVIDMEM_COLOR_BUFFER)
{
Flag |= gcvALLOC_FLAG_ALLOC_ON_FAULT;
}
#endif
if (Flag & gcvALLOC_FLAG_ALLOC_ON_FAULT)
{
*Pool = gcvPOOL_VIRTUAL;
}
if (Flag & gcvALLOC_FLAG_DMABUF_EXPORTABLE)
{
gctSIZE_T pageSize = 0;
gckOS_GetPageSize(Kernel->os, &pageSize);
/* Usually, the exported dmabuf might be later imported to DRM,
** while DRM requires input size to be page aligned.
*/
bytes = gcmALIGN(bytes, pageSize);
}
if (Type == gcvVIDMEM_TYPE_COMMAND)
{
#if gcdALLOC_CMD_FROM_RESERVE || gcdSECURITY || gcdDISABLE_GPU_VIRTUAL_ADDRESS || !USE_KERNEL_VIRTUAL_BUFFERS
Flag |= gcvALLOC_FLAG_CONTIGUOUS;
#endif
}
if (Type == gcvVIDMEM_TYPE_TILE_STATUS)
{
gctBOOL tileStatusInVirtual;
{
tileStatusInVirtual =
gckHARDWARE_IsFeatureAvailable(Kernel->hardware, gcvFEATURE_MC20);
}
if (!tileStatusInVirtual)
{
/* Must be contiguous if not support virtual tile status. */
Flag |= gcvALLOC_FLAG_CONTIGUOUS;
}
}
if (*Pool == gcvPOOL_DEFAULT) {
switch (Type)
{
case gcvSURF_TYPE_UNKNOWN:
case gcvSURF_DEPTH:
case gcvSURF_RENDER_TARGET:
case gcvSURF_TEXTURE:
*Pool = gcvPOOL_VIRTUAL;
break;
default:
break;
}
}
AllocateMemory:
#if gcdCAPTURE_ONLY_MODE
if (*Pool != gcvPOOL_VIRTUAL)
{
*Pool = gcvPOOL_SYSTEM;
}
#endif
/* Get initial pool. */
switch (pool = *Pool)
{
case gcvPOOL_DEFAULT:
case gcvPOOL_LOCAL:
pool = gcvPOOL_LOCAL_INTERNAL;
loopCount = (gctINT) gcvPOOL_NUMBER_OF_POOLS;
break;
case gcvPOOL_UNIFIED:
pool = gcvPOOL_SYSTEM;
loopCount = (gctINT) gcvPOOL_NUMBER_OF_POOLS;
break;
default:
loopCount = 1;
break;
}
while (loopCount-- > 0)
{
if (pool == gcvPOOL_VIRTUAL)
{
/* Try contiguous virtual first. */
#if gcdCONTIGUOUS_SIZE_LIMIT
if (bytes > gcdCONTIGUOUS_SIZE_LIMIT && contiguous == gcvFALSE)
{
status = gcvSTATUS_OUT_OF_MEMORY;
}
else
#endif
#if gcdENABLE_GPU_1M_PAGE
if (!virtualPool4K && Kernel->core != gcvCORE_VG && Kernel->hardware->mmuVersion)
{
/* Create a gckVIDMEM_NODE from contiguous memory. */
status = gckVIDMEM_NODE_AllocateVirtualChunk(
Kernel,
pool,
Type,
Flag | gcvALLOC_FLAG_CONTIGUOUS,
&bytes,
&nodeObject);
if (gcmIS_SUCCESS(status))
{
/* Memory allocated. */
break;
}
}
#endif
{
/* Create a gckVIDMEM_NODE from contiguous memory. */
status = gckVIDMEM_NODE_AllocateVirtual(
Kernel,
pool,
Type,
Flag | gcvALLOC_FLAG_CONTIGUOUS,
&bytes,
&nodeObject);
}
if (gcmIS_SUCCESS(status))
{
/* Memory allocated. */
break;
}
if (contiguous)
{
break;
}
#if gcdENABLE_GPU_1M_PAGE
/* Try non-contiguous virtual chunk. */
if (!virtualPool4K && Kernel->hardware->mmuVersion && Kernel->core != gcvCORE_VG)
{
/* Create a gckVIDMEM_NODE from contiguous memory. */
status = gckVIDMEM_NODE_AllocateVirtualChunk(
Kernel,
pool,
Type,
Flag | gcvALLOC_FLAG_NON_CONTIGUOUS,
&bytes,
&nodeObject);
if (gcmIS_SUCCESS(status))
{
/* Memory allocated. */
break;
}
}
#endif
/* Try non-contiguous virtual. */
/* Create a gckVIDMEM_NODE for virtual memory. */
gcmkONERROR(
gckVIDMEM_NODE_AllocateVirtual(Kernel,
pool,
Type,
Flag | gcvALLOC_FLAG_NON_CONTIGUOUS,
&bytes, &nodeObject));
/* Success. */
break;
}
/* gcvPOOL_SYSTEM/gcvPOOL_SRAM can't be cacheable. */
else if (cacheable == gcvFALSE && secure == gcvFALSE)
{
#ifdef EMULATOR
/* Cmodel only support 1 SRAM currently. */
Kernel->sRAMIndex = 0;
Kernel->extSRAMIndex = 0;
#endif
/* Get pointer to gckVIDMEM object for pool. */
status = gckKERNEL_GetVideoMemoryPool(Kernel, pool, &videoMemory);
if (gcmIS_SUCCESS(status))
{
/* Allocate memory. */
if ((Flag & videoMemory->capability) != Flag)
{
status = gcvSTATUS_NOT_SUPPORTED;
}
#if defined(gcdLINEAR_SIZE_LIMIT)
/* 512 KB */
else if (bytes > gcdLINEAR_SIZE_LIMIT)
{
status = gcvSTATUS_OUT_OF_MEMORY;
}
#endif
else
{
hasFastPools = gcvTRUE;
status = gckVIDMEM_NODE_AllocateLinear(Kernel,
videoMemory,
pool,
Type,
Flag,
Alignment,
(pool == gcvPOOL_SYSTEM ||
pool == gcvPOOL_INTERNAL_SRAM ||
pool == gcvPOOL_EXTERNAL_SRAM),
&bytes,
&nodeObject);
}
if (gcmIS_SUCCESS(status))
{
/* Memory allocated. */
break;
}
#if gcdCAPTURE_ONLY_MODE
else
{
gcmkPRINT("Capture only mode: Out of Memory");
}
#endif
}
}
if (pool == gcvPOOL_LOCAL_INTERNAL)
{
/* Advance to external memory. */
pool = gcvPOOL_LOCAL_EXTERNAL;
}
else
if (pool == gcvPOOL_LOCAL_EXTERNAL)
{
if (Kernel->sRAMLoopMode)
{
/* Advance to Internal SRAM memory block. */
pool = gcvPOOL_INTERNAL_SRAM;
}
else
{
/* Advance to contiguous reserved memory. */
pool = gcvPOOL_SYSTEM;
}
}
else
if (pool == gcvPOOL_INTERNAL_SRAM)
{
if (Kernel->sRAMIndex < gcvSRAM_INTER_COUNT - 1 && !Kernel->sRAMPhysFaked[Kernel->sRAMIndex])
{
Kernel->sRAMIndex++;
loopCount++;
}
else
{
/* Advance to contiguous reserved memory. */
pool = gcvPOOL_SYSTEM;
}
}
else
if (pool == gcvPOOL_SYSTEM)
{
/* Do not go ahead to try relative slow pools */
if (fastPools && hasFastPools)
{
status = gcvSTATUS_OUT_OF_MEMORY;
break;
}
/* Advance to virtual memory. */
pool = gcvPOOL_VIRTUAL;
}
else
{
/* Out of pools. */
gcmkONERROR(gcvSTATUS_OUT_OF_MEMORY);
}
}
if (nodeObject == gcvNULL)
{
if (contiguous)
{
/* Broadcast OOM message. */
status = gckOS_Broadcast(Kernel->os, Kernel->hardware, gcvBROADCAST_OUT_OF_MEMORY);
if (gcmIS_SUCCESS(status))
{
/* Get some memory. */
gckOS_Delay(gcvNULL, 1);
goto AllocateMemory;
}
}
/* Nothing allocated. */
gcmkONERROR(gcvSTATUS_OUT_OF_MEMORY);
}
#if gcdCAPTURE_ONLY_MODE
nodeObject->captureSize = bytes;
#endif
/* Return node and pool used for allocation. */
*Pool = pool;
*Bytes = bytes;
*NodeObject = nodeObject;
/* Return status. */
gcmkFOOTER_ARG("*Pool=%d *NodeObject=%p", *Pool, *NodeObject);
return gcvSTATUS_OK;
OnError:
/* Return the status. */
gcmkFOOTER();
return status;
}
/*******************************************************************************
**
** _AllocateLinearMemory
**
** Private function to allocate the requested amount of video memory, output
** video memory handle.
*/
gceSTATUS
_AllocateLinearMemory(
IN gckKERNEL Kernel,
IN gctUINT32 ProcessID,
IN gcsHAL_INTERFACE * Interface
)
{
gceSTATUS status;
gckVIDMEM_NODE nodeObject = gcvNULL;
gctUINT32 handle = 0;
gceDATABASE_TYPE dbType;
gcePOOL pool = (gcePOOL)Interface->u.AllocateLinearVideoMemory.pool;
gctSIZE_T bytes = (gctSIZE_T)Interface->u.AllocateLinearVideoMemory.bytes;
gctUINT32 alignment = Interface->u.AllocateLinearVideoMemory.alignment;
gceVIDMEM_TYPE type = (Interface->u.AllocateLinearVideoMemory.type & 0xFF);
gctUINT32 flag = Interface->u.AllocateLinearVideoMemory.flag;
gctUINT64 mappingInOne = 1;
gctBOOL isContiguous;
gcmkHEADER_ARG("Kernel=%p pool=%d bytes=%lu alignment=%lu type=%d",
Kernel, pool, bytes, alignment, type);
gcmkVERIFY_ARGUMENT(bytes != 0);
if (Interface->u.AllocateLinearVideoMemory.sRAMIndex >= 0)
{
Kernel->sRAMIndex = Interface->u.AllocateLinearVideoMemory.sRAMIndex;
}
if (Interface->u.AllocateLinearVideoMemory.extSRAMIndex >= 0)
{
Kernel->extSRAMIndex = Interface->u.AllocateLinearVideoMemory.extSRAMIndex;
}
gckOS_QueryOption(Kernel->os, "allMapInOne", &mappingInOne);
if (mappingInOne == 0)
{
/* TODO: it should page align if driver uses dynamic mapping for mapped user memory.
* it should be adjusted with different os.
*/
alignment = gcmALIGN(alignment, 4096);
}
/* Allocate video memory node. */
gcmkONERROR(
gckKERNEL_AllocateVideoMemory(Kernel,
alignment,
type,
flag,
&bytes,
&pool,
&nodeObject));
/* Allocate handle for this video memory. */
gcmkONERROR(
gckVIDMEM_HANDLE_Allocate(Kernel, nodeObject, &handle));
/* Return node and pool used for allocation. */
Interface->u.AllocateLinearVideoMemory.node = handle;
Interface->u.AllocateLinearVideoMemory.pool = pool;
Interface->u.AllocateLinearVideoMemory.bytes = bytes;
/* Encode surface type and pool to database type. */
dbType = gcvDB_VIDEO_MEMORY
| (type << gcdDB_VIDEO_MEMORY_TYPE_SHIFT)
| (pool << gcdDB_VIDEO_MEMORY_POOL_SHIFT);
/* Record in process db. */
gcmkONERROR(
gckKERNEL_AddProcessDB(Kernel,
ProcessID,
dbType,
gcmINT2PTR(handle),
gcvNULL,
bytes));
gcmkONERROR(gckVIDMEM_NODE_IsContiguous(Kernel, nodeObject, &isContiguous));
if (isContiguous)
{
/* Record in process db. */
gcmkONERROR(
gckKERNEL_AddProcessDB(Kernel,
ProcessID,
gcvDB_CONTIGUOUS,
gcmINT2PTR(handle),
gcvNULL,
bytes));
}
if (type & gcvVIDMEM_TYPE_COMMAND)
{
/* Record in process db. */
gcmkONERROR(
gckKERNEL_AddProcessDB(Kernel,
ProcessID,
gcvDB_COMMAND_BUFFER,
gcmINT2PTR(handle),
gcvNULL,
bytes));
}
/* Return status. */
gcmkFOOTER_ARG("pool=%d node=0x%x", pool, handle);
return gcvSTATUS_OK;
OnError:
if (handle)
{
/* Destroy handle allocated. */
gcmkVERIFY_OK(gckVIDMEM_HANDLE_Dereference(Kernel, ProcessID, handle));
}
if (nodeObject)
{
/* Free video memory allocated. */
gcmkVERIFY_OK(gckVIDMEM_NODE_Dereference(Kernel, nodeObject));
}
/* Return the status. */
gcmkFOOTER();
return status;
}
/*******************************************************************************
**
** _ReleaseVideoMemory
**
** Release handle of a video memory.
**
** INPUT:
**
** gckKERNEL Kernel
** Pointer to an gckKERNEL object.
**
** gctUINT32 ProcessID
** ProcessID of current process.
**
** gctUINT32 Handle
** Handle of video memory.
**
** OUTPUT:
**
** Nothing.
*/
gceSTATUS
_ReleaseVideoMemory(
IN gckKERNEL Kernel,
IN gctUINT32 ProcessID,
IN gctUINT32 Handle
)
{
gceSTATUS status;
gckVIDMEM_NODE nodeObject;
gceDATABASE_TYPE type;
gctBOOL isContiguous;
gcmkHEADER_ARG("Kernel=%p ProcessID=%d Handle=%d",
Kernel, ProcessID, Handle);
gcmkONERROR(
gckVIDMEM_HANDLE_Lookup(Kernel, ProcessID, Handle, &nodeObject));
type = gcvDB_VIDEO_MEMORY
| (nodeObject->type << gcdDB_VIDEO_MEMORY_TYPE_SHIFT)
| (nodeObject->pool << gcdDB_VIDEO_MEMORY_POOL_SHIFT);
gcmkONERROR(
gckKERNEL_RemoveProcessDB(Kernel,
ProcessID,
type,
gcmINT2PTR(Handle)));
gcmkONERROR(gckVIDMEM_NODE_IsContiguous(Kernel, nodeObject, &isContiguous));
if (isContiguous)
{
gckKERNEL_RemoveProcessDB(Kernel,
ProcessID,
gcvDB_CONTIGUOUS,
gcmINT2PTR(Handle));
}
if (nodeObject->type & gcvVIDMEM_TYPE_COMMAND)
{
gckKERNEL_RemoveProcessDB(Kernel,
ProcessID,
gcvDB_COMMAND_BUFFER,
gcmINT2PTR(Handle));
}
gckVIDMEM_HANDLE_Dereference(Kernel, ProcessID, Handle);
gckVIDMEM_NODE_Dereference(Kernel, nodeObject);
gcmkFOOTER_NO();
return gcvSTATUS_OK;
OnError:
gcmkFOOTER();
return status;
}
/*******************************************************************************
**
** _LockVideoMemory
**
** Lock a video memory node. It will generate a cpu virtual address used
** by software and a GPU address used by GPU.
**
** INPUT:
**
** gckKERNEL Kernel
** Pointer to an gckKERNEL object.
**
** gceCORE Core
** GPU to which video memory is locked.
**
** gcsHAL_INTERFACE * Interface
** Pointer to a gcsHAL_INTERFACE structure that defines the command to
** be dispatched.
**
** OUTPUT:
**
** gcsHAL_INTERFACE * Interface
** Pointer to a gcsHAL_INTERFACE structure that receives any data to be
** returned.
*/
static gceSTATUS
_LockVideoMemory(
IN gckKERNEL Kernel,
IN gceCORE Core,
IN gctUINT32 ProcessID,
IN OUT gcsHAL_INTERFACE * Interface
)
{
gceSTATUS status;
gctUINT32 handle;
gckVIDMEM_NODE nodeObject = gcvNULL;
gctBOOL referenced = gcvFALSE;
gctUINT32 address = gcvINVALID_ADDRESS;
gctPOINTER logical = gcvNULL;
gctPHYS_ADDR_T physical = gcvINVALID_PHYSICAL_ADDRESS;
gctUINT32 gid = 0;
gctBOOL asynchronous = gcvFALSE;
gcmkHEADER_ARG("Kernel=%p ProcessID=%d",
Kernel, ProcessID);
handle = Interface->u.LockVideoMemory.node;
gcmkONERROR(
gckVIDMEM_HANDLE_Lookup(Kernel, ProcessID, handle, &nodeObject));
/* Ref node. */
gcmkONERROR(gckVIDMEM_NODE_Reference(Kernel, nodeObject));
referenced = gcvTRUE;
#if gcdCAPTURE_ONLY_MODE
if (Interface->u.LockVideoMemory.queryCapSize)
{
Interface->u.LockVideoMemory.captureSize = nodeObject->captureSize;
return gcvSTATUS_OK;
}
else
{
nodeObject->captureLogical = Interface->u.LockVideoMemory.captureLogical;
}
#endif
/* Lock for userspace CPU userspace. */
gcmkONERROR(
gckVIDMEM_NODE_LockCPU(Kernel,
nodeObject,
Interface->u.LockVideoMemory.cacheable,
gcvTRUE,
&logical));
/* Lock for GPU address. */
gcmkONERROR(gckVIDMEM_NODE_Lock(Kernel, nodeObject, &address));
/* Get CPU physical address. */
gcmkONERROR(gckVIDMEM_NODE_GetPhysical(Kernel, nodeObject, 0, &physical));
gcmkONERROR(gckVIDMEM_NODE_GetGid(Kernel, nodeObject, &gid));
Interface->u.LockVideoMemory.address = address;
Interface->u.LockVideoMemory.memory = gcmPTR_TO_UINT64(logical);
Interface->u.LockVideoMemory.physicalAddress = physical;
Interface->u.LockVideoMemory.gid = gid;
gcmkONERROR(
gckKERNEL_AddProcessDB(Kernel,
ProcessID,
gcvDB_VIDEO_MEMORY_LOCKED,
gcmINT2PTR(handle),
logical,
0));
/* Ref handle. */
gckVIDMEM_HANDLE_Reference(Kernel, ProcessID, handle);
gcmkFOOTER_NO();
return gcvSTATUS_OK;
OnError:
if (logical)
{
gckVIDMEM_NODE_UnlockCPU(Kernel, nodeObject, ProcessID, gcvTRUE, gcvFALSE);
}
if (address)
{
gckVIDMEM_NODE_Unlock(Kernel, nodeObject, ProcessID, &asynchronous);
if (asynchronous)
{
gckVIDMEM_NODE_Unlock(Kernel, nodeObject, ProcessID, gcvNULL);
}
}
if (referenced)
{
gckVIDMEM_NODE_Dereference(Kernel, nodeObject);
}
gcmkFOOTER();
return status;
}
/*******************************************************************************
**
** _UnlockVideoMemory
**
** Unlock a video memory node.
**
** INPUT:
**
** gckKERNEL Kernel
** Pointer to an gckKERNEL object.
**
** gctUINT32 ProcessID
** ProcessID of current process.
**
** gcsHAL_INTERFACE * Interface
** Pointer to a gcsHAL_INTERFACE structure that defines the command to
** be dispatched.
**
** OUTPUT:
**
** gcsHAL_INTERFACE * Interface
** Pointer to a gcsHAL_INTERFACE structure that receives any data to be
** returned.
*/
static gceSTATUS
_UnlockVideoMemory(
IN gckKERNEL Kernel,
IN gctUINT32 ProcessID,
IN OUT gcsHAL_INTERFACE * Interface
)
{
gceSTATUS status;
gckVIDMEM_NODE nodeObject;
gcuVIDMEM_NODE_PTR node;
gckVIDMEM_BLOCK vidMemBlock = gcvNULL;
gctSIZE_T bytes;
gctUINT64 mappingInOne = 1;
gcmkHEADER_ARG("Kernel=%p ProcessID=%d",
Kernel, ProcessID);
Interface->u.UnlockVideoMemory.pool = gcvPOOL_UNKNOWN;
Interface->u.UnlockVideoMemory.bytes = 0;
gcmkONERROR(gckVIDMEM_HANDLE_Lookup(
Kernel,
ProcessID,
(gctUINT32)Interface->u.UnlockVideoMemory.node,
&nodeObject
));
gckOS_QueryOption(Kernel->os, "allMapInOne", &mappingInOne);
/* Unlock CPU. */
gcmkONERROR(gckVIDMEM_NODE_UnlockCPU(
Kernel, nodeObject, ProcessID, gcvTRUE, mappingInOne == 1));
/* Unlock video memory. */
gcmkONERROR(gckVIDMEM_NODE_Unlock(
Kernel,
nodeObject,
ProcessID,
&Interface->u.UnlockVideoMemory.asynchroneous
));
/* Leave deref handle and deref node in later operation. */
node = nodeObject->node;
vidMemBlock = node->VirtualChunk.parent;
if (node->VidMem.parent->object.type == gcvOBJ_VIDMEM)
{
bytes = node->VidMem.bytes;
}
else if (vidMemBlock && vidMemBlock->object.type == gcvOBJ_VIDMEM_BLOCK)
{
bytes = node->VirtualChunk.bytes;
}
else
{
bytes = node->Virtual.bytes;
}
Interface->u.UnlockVideoMemory.pool = nodeObject->pool;
Interface->u.UnlockVideoMemory.bytes = bytes;
#if gcdCAPTURE_ONLY_MODE
Interface->u.UnlockVideoMemory.captureLogical = nodeObject->captureLogical;
#endif
gcmkFOOTER_NO();
return gcvSTATUS_OK;
OnError:
/*
* Unlikely to fail expect error node or unlocked, there's no error roll
* back requried for those two conditions.
*/
gcmkFOOTER();
return status;
}
/*******************************************************************************
**
** _BottomHalfUnlockVideoMemory
**
** Unlock video memory from gpu.
**
** INPUT:
**
** gckKERNEL Kernel
** Pointer to an gckKERNEL object.
**
** gctUINT32 ProcessID
** Process ID owning this memory.
**
** gceVIDMEM_TYPE
** Video memory allocation type.
**
** gctPOINTER Pointer
** Video memory to be unlock.
*/
static gceSTATUS
_BottomHalfUnlockVideoMemory(
IN gckKERNEL Kernel,
IN gctUINT32 ProcessID,
IN gceVIDMEM_TYPE Type,
IN gctUINT32 Node
)
{
gceSTATUS status;
gckVIDMEM_NODE nodeObject = gcvNULL;
/* Remove record from process db. */
gcmkVERIFY_OK(gckKERNEL_RemoveProcessDB(
Kernel,
ProcessID,
gcvDB_VIDEO_MEMORY_LOCKED,
gcmINT2PTR(Node)
));
gcmkONERROR(gckVIDMEM_HANDLE_Lookup(
Kernel,
ProcessID,
Node,
&nodeObject
));
/* Deref handle. */
gckVIDMEM_HANDLE_Dereference(Kernel, ProcessID, Node);
/* Unlock video memory, synced. */
gcmkONERROR(gckVIDMEM_NODE_Unlock(Kernel, nodeObject, ProcessID, gcvNULL));
/* Deref node. */
gcmkONERROR(gckVIDMEM_NODE_Dereference(Kernel, nodeObject));
return gcvSTATUS_OK;
OnError:
return status;
}
static gceSTATUS
_WrapUserMemory(
IN gckKERNEL Kernel,
IN gctUINT32 ProcessID,
IN gcsHAL_INTERFACE * Interface
)
{
gceSTATUS status;
gckVIDMEM_NODE nodeObject = gcvNULL;
gceDATABASE_TYPE type;
gctUINT32 handle = 0;
gcmkONERROR(
gckVIDMEM_NODE_WrapUserMemory(Kernel,
&Interface->u.WrapUserMemory.desc,
Interface->u.WrapUserMemory.type,
&nodeObject,
&Interface->u.WrapUserMemory.bytes));
/* Create handle representation for userspace. */
gcmkONERROR(
gckVIDMEM_HANDLE_Allocate(Kernel,
nodeObject,
&handle));
type = gcvDB_VIDEO_MEMORY
| (nodeObject->type << gcdDB_VIDEO_MEMORY_TYPE_SHIFT)
| (nodeObject->pool << gcdDB_VIDEO_MEMORY_POOL_SHIFT);
gcmkONERROR(
gckKERNEL_AddProcessDB(Kernel,
ProcessID,
type,
gcmINT2PTR(handle),
gcvNULL,
(gctSIZE_T)Interface->u.WrapUserMemory.bytes));
Interface->u.WrapUserMemory.node = handle;
return gcvSTATUS_OK;
OnError:
if (handle)
{
gckVIDMEM_HANDLE_Dereference(Kernel, ProcessID, handle);
}
if (nodeObject)
{
gckVIDMEM_NODE_Dereference(Kernel, nodeObject);
}
return status;
}
static gceSTATUS
_ExportVideoMemory(
IN gckKERNEL Kernel,
IN gctUINT32 ProcessID,
IN gcsHAL_INTERFACE * Interface
)
{
gceSTATUS status;
gckVIDMEM_NODE nodeObject = gcvNULL;
gcmkONERROR(
gckVIDMEM_HANDLE_Lookup(Kernel,
ProcessID,
Interface->u.ExportVideoMemory.node,
&nodeObject));
gcmkONERROR(
gckVIDMEM_NODE_Export(Kernel,
nodeObject,
Interface->u.ExportVideoMemory.flags,
gcvNULL,
&Interface->u.ExportVideoMemory.fd));
OnError:
return status;
}
static gceSTATUS
_NameVideoMemory(
IN gckKERNEL Kernel,
IN gctUINT32 ProcessID,
IN gcsHAL_INTERFACE * Interface
)
{
gceSTATUS status;
gckVIDMEM_NODE nodeObject = gcvNULL;
gcmkONERROR(
gckVIDMEM_HANDLE_Lookup(Kernel,
ProcessID,
Interface->u.NameVideoMemory.handle,
&nodeObject));
gcmkONERROR(
gckVIDMEM_NODE_Name(Kernel,
nodeObject,
&Interface->u.NameVideoMemory.name));
OnError:
return status;
}
static gceSTATUS
_ImportVideoMemory(
IN gckKERNEL Kernel,
IN gctUINT32 ProcessID,
IN gcsHAL_INTERFACE * Interface
)
{
gceSTATUS status;
gckVIDMEM_NODE nodeObject = gcvNULL;
gctUINT32 handle = 0;
gcmkONERROR(
gckVIDMEM_NODE_Import(Kernel,
Interface->u.ImportVideoMemory.name,
&nodeObject));
/* Create handle representation for userspace. */
gcmkONERROR(
gckVIDMEM_HANDLE_Allocate(Kernel,
nodeObject,
&handle));
gcmkONERROR(
gckKERNEL_AddProcessDB(Kernel,
ProcessID,
gcvDB_VIDEO_MEMORY,
gcmINT2PTR(handle),
gcvNULL,
0));
Interface->u.ImportVideoMemory.handle = handle;
return gcvSTATUS_OK;
OnError:
if (handle)
{
gckVIDMEM_HANDLE_Dereference(Kernel, ProcessID, handle);
}
if (nodeObject)
{
gckVIDMEM_NODE_Dereference(Kernel, nodeObject);
}
return status;
}
/*******************************************************************************
**
** gckKERNEL_SetVidMemMetadata
**
** Set/Get metadata to/from gckVIDMEM_NODE object.
**
** INPUT:
**
** gckKERNEL Kernel
** Pointer to an gckKERNEL object.
**
** gctUINT32 ProcessID
** ProcessID of current process.
**
** INOUT:
**
** gcsHAL_INTERFACE * Interface
** Pointer to a interface structure
*/
#if defined(CONFIG_DMA_SHARED_BUFFER)
#include <linux/dma-buf.h>
gceSTATUS
_SetVidMemMetadata(
IN gckKERNEL Kernel,
IN gctUINT32 ProcessID,
INOUT gcsHAL_INTERFACE * Interface
)
{
gceSTATUS status = gcvSTATUS_NOT_SUPPORTED;
gckVIDMEM_NODE nodeObj = gcvNULL;
gcmkHEADER_ARG("Kernel=%p ProcessID=%d", Kernel, ProcessID);
gcmkONERROR(gckVIDMEM_HANDLE_Lookup(Kernel, ProcessID, Interface->u.SetVidMemMetadata.node, &nodeObj));
if (Interface->u.SetVidMemMetadata.readback)
{
Interface->u.SetVidMemMetadata.ts_fd = nodeObj->metadata.ts_fd;
Interface->u.SetVidMemMetadata.fc_enabled = nodeObj->metadata.fc_enabled;
Interface->u.SetVidMemMetadata.fc_value = nodeObj->metadata.fc_value;
Interface->u.SetVidMemMetadata.fc_value_upper = nodeObj->metadata.fc_value_upper;
Interface->u.SetVidMemMetadata.compressed = nodeObj->metadata.compressed;
Interface->u.SetVidMemMetadata.compress_format = nodeObj->metadata.compress_format;
}
else
{
#ifdef gcdANDROID
if (nodeObj->metadata.ts_address == 0 && nodeObj->tsNode != NULL)
{
gctUINT32 PhysicalAddress = 0;
/* Lock for GPU address. */
gcmkONERROR(gckVIDMEM_NODE_Lock(Kernel, nodeObj->tsNode, &PhysicalAddress));
nodeObj->metadata.ts_address = (
PhysicalAddress + Kernel->hardware->baseAddress);
gcmkONERROR(gckVIDMEM_NODE_Unlock(Kernel, nodeObj->tsNode, ProcessID, gcvNULL));
}
#else
nodeObj->metadata.ts_fd = Interface->u.SetVidMemMetadata.ts_fd;
if (nodeObj->metadata.ts_fd >= 0)
{
nodeObj->metadata.ts_dma_buf = dma_buf_get(nodeObj->metadata.ts_fd);
if (IS_ERR(nodeObj->metadata.ts_dma_buf))
{
gcmkONERROR(gcvSTATUS_NOT_FOUND);
}
dma_buf_put(nodeObj->metadata.ts_dma_buf);
}
else
{
nodeObj->metadata.ts_dma_buf = NULL;
}
#endif
nodeObj->metadata.fc_enabled = Interface->u.SetVidMemMetadata.fc_enabled;
nodeObj->metadata.fc_value = Interface->u.SetVidMemMetadata.fc_value;
nodeObj->metadata.fc_value_upper = Interface->u.SetVidMemMetadata.fc_value_upper;
nodeObj->metadata.compressed = Interface->u.SetVidMemMetadata.compressed;
nodeObj->metadata.compress_format = Interface->u.SetVidMemMetadata.compress_format;
}
OnError:
gcmkFOOTER();
return status;
}
#else
gceSTATUS
_SetVidMemMetadata(
IN gckKERNEL Kernel,
IN gctUINT32 ProcessID,
INOUT gcsHAL_INTERFACE * Interface
)
{
gcmkFATAL("The kernel did NOT support CONFIG_DMA_SHARED_BUFFER");
return gcvSTATUS_NOT_SUPPORTED;
}
#endif
static gceSTATUS
_GetVideoMemoryFd(
IN gckKERNEL Kernel,
IN gctUINT32 ProcessID,
IN gcsHAL_INTERFACE * Interface
)
{
gceSTATUS status;
gckVIDMEM_NODE nodeObject = gcvNULL;
gcmkONERROR(
gckVIDMEM_HANDLE_Lookup(Kernel,
ProcessID,
Interface->u.GetVideoMemoryFd.handle,
&nodeObject));
gcmkONERROR(
gckVIDMEM_NODE_GetFd(Kernel,
nodeObject,
&Interface->u.GetVideoMemoryFd.fd));
/* No need to add it to processDB because OS will release all fds when
** process quits.
*/
OnError:
return status;
}
gceSTATUS
gckKERNEL_QueryDatabase(
IN gckKERNEL Kernel,
IN gctUINT32 ProcessID,
IN OUT gcsHAL_INTERFACE * Interface
)
{
gceSTATUS status;
gctINT i;
gceDATABASE_TYPE type[2] = {
gcvDB_VIDEO_MEMORY | (gcvPOOL_SYSTEM << gcdDB_VIDEO_MEMORY_POOL_SHIFT),
gcvDB_VIDEO_MEMORY | (gcvPOOL_VIRTUAL << gcdDB_VIDEO_MEMORY_POOL_SHIFT),
};
gcmkHEADER();
/* Query video memory. */
gcmkONERROR(
gckKERNEL_QueryProcessDB(Kernel,
Interface->u.Database.processID,
!Interface->u.Database.validProcessID,
gcvDB_VIDEO_MEMORY,
&Interface->u.Database.vidMem));
/* Query non-paged memory. */
gcmkONERROR(
gckKERNEL_QueryProcessDB(Kernel,
Interface->u.Database.processID,
!Interface->u.Database.validProcessID,
gcvDB_NON_PAGED,
&Interface->u.Database.nonPaged));
/* Query contiguous memory. */
gcmkONERROR(
gckKERNEL_QueryProcessDB(Kernel,
Interface->u.Database.processID,
!Interface->u.Database.validProcessID,
gcvDB_CONTIGUOUS,
&Interface->u.Database.contiguous));
/* Query GPU idle time. */
gcmkONERROR(
gckKERNEL_QueryProcessDB(Kernel,
Interface->u.Database.processID,
!Interface->u.Database.validProcessID,
gcvDB_IDLE,
&Interface->u.Database.gpuIdle));
for (i = 0; i < 2; i++)
{
/* Query each video memory pool. */
gcmkONERROR(
gckKERNEL_QueryProcessDB(Kernel,
Interface->u.Database.processID,
!Interface->u.Database.validProcessID,
type[i],
&Interface->u.Database.vidMemPool[i]));
}
#if gcmIS_DEBUG(gcdDEBUG_TRACE)
gckKERNEL_DumpVidMemUsage(Kernel, Interface->u.Database.processID);
#endif
gcmkFOOTER_NO();
return gcvSTATUS_OK;
OnError:
gcmkFOOTER();
return status;
}
gceSTATUS
gckKERNEL_ConfigPowerManagement(
IN gckKERNEL Kernel,
IN OUT gcsHAL_INTERFACE * Interface
)
{
gceSTATUS status;
gctBOOL enable = Interface->u.ConfigPowerManagement.enable;
gcmkHEADER();
gcmkONERROR(gckHARDWARE_EnablePowerManagement(Kernel->hardware, enable));
if (enable == gcvFALSE)
{
gcmkONERROR(
gckHARDWARE_SetPowerState(Kernel->hardware, gcvPOWER_ON));
}
gcmkFOOTER_NO();
return gcvSTATUS_OK;
OnError:
gcmkFOOTER();
return status;
}
static gceSTATUS
gckKERNEL_CacheOperation(
IN gckKERNEL Kernel,
IN gctUINT32 ProcessID,
IN gctUINT32 Node,
IN gceCACHEOPERATION Operation,
IN gctPOINTER Logical,
IN gctSIZE_T Bytes
)
{
gceSTATUS status;
gckVIDMEM_NODE nodeObject = gcvNULL;
gcuVIDMEM_NODE_PTR node = gcvNULL;
gckVIDMEM_BLOCK vidMemBlock = gcvNULL;
gctSIZE_T offset = 0;
void *memHandle;
gcmkHEADER_ARG("Kernel=%p pid=%u Node=%u op=%d Logical=%p Bytes=0x%lx",
Kernel, ProcessID, Node, Operation, Logical, Bytes);
gcmkONERROR(gckVIDMEM_HANDLE_Lookup(Kernel,
ProcessID,
Node,
&nodeObject));
node = nodeObject->node;
vidMemBlock = node->VirtualChunk.parent;
if (node->VidMem.parent->object.type == gcvOBJ_VIDMEM)
{
static gctBOOL printed;
if (!printed)
{
printed = gcvTRUE;
gcmkPRINT("[galcore]: %s: Flush Video Memory", __FUNCTION__);
}
gcmkFOOTER_NO();
return gcvSTATUS_OK;
}
else if (vidMemBlock && vidMemBlock->object.type == gcvOBJ_VIDMEM_BLOCK)
{
memHandle = vidMemBlock->physical;
offset = node->VirtualChunk.offset;
}
else
{
memHandle = node->Virtual.physical;
}
switch (Operation)
{
case gcvCACHE_FLUSH:
/* Clean and invalidate the cache. */
status = gckOS_CacheFlush(Kernel->os,
ProcessID,
memHandle,
offset,
Logical,
Bytes);
break;
case gcvCACHE_CLEAN:
/* Clean the cache. */
status = gckOS_CacheClean(Kernel->os,
ProcessID,
memHandle,
offset,
Logical,
Bytes);
break;
case gcvCACHE_INVALIDATE:
/* Invalidate the cache. */
status = gckOS_CacheInvalidate(Kernel->os,
ProcessID,
memHandle,
offset,
Logical,
Bytes);
break;
case gcvCACHE_MEMORY_BARRIER:
status = gckOS_MemoryBarrier(Kernel->os, Logical);
break;
default:
gcmkONERROR(gcvSTATUS_INVALID_ARGUMENT);
break;
}
OnError:
gcmkFOOTER();
return status;
}
static gceSTATUS
_WaitFence(
IN gckKERNEL Kernel,
IN gctUINT32 ProcessID,
IN gcsHAL_INTERFACE * Interface
)
{
gceSTATUS status;
gckVIDMEM_NODE node;
gckCOMMAND command = Kernel->command;
gckCOMMAND asyncCommand = Kernel->asyncCommand;
gckFENCE fence = gcvNULL;
gctUINT i;
gcmkASSERT(command != gcvNULL);
gcmkONERROR(
gckVIDMEM_HANDLE_Lookup(Kernel,
ProcessID,
Interface->u.WaitFence.handle,
&node));
gcmkONERROR(gckVIDMEM_NODE_Reference(Kernel, node));
/* Wait for fence of all engines. */
for (i = 0; i < gcvENGINE_GPU_ENGINE_COUNT; i++)
{
gckFENCE_SYNC sync = &node->sync[i];
if (i == gcvENGINE_RENDER)
{
fence = command->fence;
}
else
{
fence = asyncCommand->fence;
}
gcmkONERROR(gckVIDMEM_NODE_InvalidateCache(
Kernel,
fence->videoMem,
0,
fence->logical,
8
));
if (sync->commitStamp <= *(gctUINT64_PTR)fence->logical)
{
continue;
}
else
{
gckOS_Signal(Kernel->os, sync->signal, gcvFALSE);
gcmkVERIFY_OK(gckOS_AcquireMutex(Kernel->os, &fence->mutex, gcvINFINITE));
/* Add to waiting list. */
gcsLIST_AddTail(&sync->head, &fence->waitingList);
gcmkASSERT(sync->inList == gcvFALSE);
sync->inList = gcvTRUE;
gcmkVERIFY_OK(gckOS_ReleaseMutex(Kernel->os, &fence->mutex));
/* Wait. */
status = gckOS_WaitSignal(
Kernel->os,
sync->signal,
gcvTRUE,
Interface->u.WaitFence.timeOut
);
gcmkVERIFY_OK(gckOS_AcquireMutex(Kernel->os, &fence->mutex, gcvINFINITE));
if (sync->inList)
{
gcsLIST_Del(&sync->head);
sync->inList = gcvFALSE;
}
gcmkVERIFY_OK(gckOS_ReleaseMutex(Kernel->os, &fence->mutex));
}
}
gckVIDMEM_NODE_Dereference(Kernel, node);
OnError:
return status;
}
static gceSTATUS
_Commit(
IN gckDEVICE Device,
IN gceHARDWARE_TYPE HwType,
IN gceENGINE Engine,
IN gctUINT32 ProcessId,
IN OUT gcsHAL_COMMIT * Commit
)
{
gceSTATUS status;
gcsHAL_SUBCOMMIT *subCommit = &Commit->subCommit;
gcsHAL_SUBCOMMIT _subCommit;
gctPOINTER userPtr = gcvNULL;
gctBOOL needCopy = gcvFALSE;
gckKERNEL kernel;
gcmkVERIFY_OK(gckOS_QueryNeedCopy(Device->os, ProcessId, &needCopy));
do
{
gckCOMMAND command;
gckEVENT eventObj;
gctUINT64 next;
/* Skip the first nested sub-commit struct. */
if (userPtr)
{
/* Copy/map sub-commit from user. */
if (needCopy)
{
subCommit = &_subCommit;
status = gckOS_CopyFromUserData(
Device->os,
subCommit,
userPtr,
gcmSIZEOF(gcsHAL_SUBCOMMIT)
);
}
else
{
status = gckOS_MapUserPointer(
Device->os,
userPtr,
gcmSIZEOF(gcsHAL_SUBCOMMIT),
(gctPOINTER *)&subCommit
);
}
if (gcmIS_ERROR(status))
{
userPtr = gcvNULL;
gcmkONERROR(status);
}
}
if (subCommit->coreId >= gcvCORE_COUNT)
{
gcmkONERROR(gcvSTATUS_INVALID_ARGUMENT);
}
/* Determine the objects. */
if (HwType == gcvHARDWARE_3D || HwType == gcvHARDWARE_3D2D || HwType == gcvHARDWARE_VIP)
{
kernel = Device->coreInfoArray[subCommit->coreId].kernel;
}
else
{
kernel = Device->map[HwType].kernels[subCommit->coreId];
}
if (Engine == gcvENGINE_BLT)
{
command = kernel->asyncCommand;
eventObj = kernel->asyncEvent;
}
else
{
command = kernel->command;
eventObj = kernel->eventObj;
}
{
/* Commit command buffers. */
status = gckCOMMAND_Commit(command,
subCommit,
ProcessId,
Commit->shared,
&Commit->commitStamp,
&Commit->contextSwitched);
if (status != gcvSTATUS_INTERRUPTED)
{
gcmkONERROR(status);
}
/* Commit events. */
status = gckEVENT_Commit(
eventObj,
gcmUINT64_TO_PTR(subCommit->queue),
kernel->hardware->options.powerManagement
);
if (status != gcvSTATUS_INTERRUPTED)
{
gcmkONERROR(status);
}
}
next = subCommit->next;
/* Unmap user pointer if mapped. */
if (!needCopy && userPtr)
{
gcmkVERIFY_OK(gckOS_UnmapUserPointer(
Device->os,
userPtr,
gcmSIZEOF(gcsHAL_SUBCOMMIT),
subCommit
));
}
/* Advance to next sub-commit from user. */
userPtr = gcmUINT64_TO_PTR(next);
}
while (userPtr);
subCommit = &Commit->subCommit;
userPtr = gcvNULL;
if (HwType == gcvHARDWARE_3D || HwType == gcvHARDWARE_3D2D || HwType == gcvHARDWARE_VIP)
{
kernel = Device->coreInfoArray[subCommit->coreId].kernel;
}
else
{
kernel = Device->map[HwType].kernels[subCommit->coreId];
}
if (!kernel->hardware->options.gpuProfiler || !kernel->profileEnable)
{
return gcvSTATUS_OK;
}
do
{
gctUINT64 next;
/* Skip the first nested sub-commit struct. */
if (userPtr)
{
/* Copy/map sub-commit from user. */
if (needCopy)
{
subCommit = &_subCommit;
status = gckOS_CopyFromUserData(
Device->os,
subCommit,
userPtr,
gcmSIZEOF(gcsHAL_SUBCOMMIT)
);
}
else
{
status = gckOS_MapUserPointer(
Device->os,
userPtr,
gcmSIZEOF(gcsHAL_SUBCOMMIT),
(gctPOINTER *)&subCommit
);
}
if (gcmIS_ERROR(status))
{
userPtr = gcvNULL;
gcmkONERROR(status);
}
}
if (HwType == gcvHARDWARE_3D || HwType == gcvHARDWARE_3D2D || HwType == gcvHARDWARE_VIP)
{
kernel = Device->coreInfoArray[subCommit->coreId].kernel;
}
else
{
kernel = Device->map[HwType].kernels[subCommit->coreId];
}
if ((kernel->hardware->options.gpuProfiler == gcvTRUE) &&
(kernel->profileEnable == gcvTRUE))
{
gcmkONERROR(gckCOMMAND_Stall(kernel->command, gcvTRUE));
if (kernel->command->currContext)
{
gcmkONERROR(gckHARDWARE_UpdateContextProfile(
kernel->hardware,
kernel->command->currContext));
}
}
next = subCommit->next;
/* Unmap user pointer if mapped. */
if (!needCopy && userPtr)
{
gcmkVERIFY_OK(gckOS_UnmapUserPointer(
Device->os,
userPtr,
gcmSIZEOF(gcsHAL_SUBCOMMIT),
subCommit
));
}
/* Advance to next sub-commit from user. */
userPtr = gcmUINT64_TO_PTR(next);
}
while (userPtr);
return gcvSTATUS_OK;
OnError:
if (!needCopy && userPtr)
{
gckOS_UnmapUserPointer(
Device->os,
userPtr,
gcmSIZEOF(gcsHAL_SUBCOMMIT),
subCommit
);
}
return status;
}
#ifdef __linux__
typedef struct _gcsGRRAPHIC_BUFFER_PARCLE
{
gcsFDPRIVATE base;
gckKERNEL kernel;
gckVIDMEM_NODE node[3];
gctSHBUF shBuf;
gctINT32 signal;
}
gcsGRAPHIC_BUFFER_PARCLE;
static void
_ReleaseGraphicBuffer(
gckKERNEL Kernel,
gcsGRAPHIC_BUFFER_PARCLE * Parcle
)
{
gctUINT i;
for (i = 0; i < 3; i++)
{
if (Parcle->node[i])
{
gckVIDMEM_NODE_Dereference(Kernel, Parcle->node[i]);
}
}
if (Parcle->shBuf)
{
gckKERNEL_DestroyShBuffer(Kernel, Parcle->shBuf);
}
if (Parcle->signal)
{
gckOS_DestroyUserSignal(Kernel->os, Parcle->signal);
}
gcmkOS_SAFE_FREE(Kernel->os, Parcle);
}
static gctINT
_FdReleaseGraphicBuffer(
gcsFDPRIVATE_PTR Private
)
{
gcsGRAPHIC_BUFFER_PARCLE * parcle = (gcsGRAPHIC_BUFFER_PARCLE *) Private;
_ReleaseGraphicBuffer(parcle->kernel, parcle);
return 0;
}
static gceSTATUS
_GetGraphicBufferFd(
IN gckKERNEL Kernel,
IN gctUINT32 ProcessID,
IN gctUINT32 Node[3],
IN gctUINT64 ShBuf,
IN gctUINT64 Signal,
OUT gctINT32 * Fd
)
{
gceSTATUS status;
gctUINT i;
gcsGRAPHIC_BUFFER_PARCLE * parcle = gcvNULL;
gcmkONERROR(gckOS_Allocate(
Kernel->os,
gcmSIZEOF(gcsGRAPHIC_BUFFER_PARCLE),
(gctPOINTER *)&parcle
));
gckOS_ZeroMemory(parcle, sizeof(gcsGRAPHIC_BUFFER_PARCLE));
parcle->base.release = _FdReleaseGraphicBuffer;
parcle->kernel = Kernel;
for (i = 0; i < 3 && Node[i] != 0; i++)
{
gckVIDMEM_NODE nodeObject = gcvNULL;
gcmkONERROR(
gckVIDMEM_HANDLE_Lookup(Kernel, ProcessID, Node[i], &nodeObject));
gcmkONERROR(gckVIDMEM_NODE_Reference(Kernel, nodeObject));
parcle->node[i] = nodeObject;
}
if (ShBuf)
{
gctSHBUF shBuf = gcmUINT64_TO_PTR(ShBuf);
gcmkONERROR(gckKERNEL_MapShBuffer(Kernel, shBuf));
parcle->shBuf = shBuf;
}
if (Signal)
{
gctSIGNAL signal = gcmUINT64_TO_PTR(Signal);
gcmkONERROR(
gckOS_MapSignal(Kernel->os,
signal,
(gctHANDLE)(gctUINTPTR_T)ProcessID,
&signal));
parcle->signal= (gctINT32)Signal;
}
gcmkONERROR(gckOS_GetFd("viv-gr", &parcle->base, Fd));
return gcvSTATUS_OK;
OnError:
if (parcle)
{
_ReleaseGraphicBuffer(Kernel, parcle);
}
return status;
}
#endif
/*******************************************************************************
**
** gckKERNEL_Dispatch
**
** Dispatch a command received from the user HAL layer.
**
** INPUT:
**
** gckKERNEL Kernel
** Pointer to an gckKERNEL object.
**
** gcsHAL_INTERFACE * Interface
** Pointer to a gcsHAL_INTERFACE structure that defines the command to
** be dispatched.
**
** OUTPUT:
**
** gcsHAL_INTERFACE * Interface
** Pointer to a gcsHAL_INTERFACE structure that receives any data to be
** returned.
*/
gceSTATUS
gckKERNEL_Dispatch(
IN gckKERNEL Kernel,
IN gckDEVICE Device,
IN OUT gcsHAL_INTERFACE * Interface
)
{
gceSTATUS status = gcvSTATUS_OK;
gctPHYS_ADDR physical = gcvNULL;
gctSIZE_T bytes;
gctPOINTER logical = gcvNULL;
#if (gcdENABLE_3D)
gckCONTEXT context = gcvNULL;
#endif
gckKERNEL kernel = Kernel;
gctUINT32 processID;
#if !USE_NEW_LINUX_SIGNAL
gctSIGNAL signal;
#endif
gctBOOL powerMutexAcquired = gcvFALSE;
gctBOOL commitMutexAcquired = gcvFALSE;
gctBOOL idle = gcvFALSE;
gcmkHEADER_ARG("Kernel=%p Interface=%p", Kernel, Interface);
/* Verify the arguments. */
gcmkVERIFY_OBJECT(Kernel, gcvOBJ_KERNEL);
gcmkVERIFY_ARGUMENT(Interface != gcvNULL);
#if gcmIS_DEBUG(gcdDEBUG_TRACE)
gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_KERNEL,
"Dispatching command %d (%s)",
Interface->command, _DispatchText[Interface->command]);
gcmSTATIC_ASSERT(gcvHAL_DESTROY_MMU == gcmCOUNTOF(_DispatchText) - 1,
"DispatchText array does not match command codes");
#endif
#if QNX_SINGLE_THREADED_DEBUGGING
gckOS_AcquireMutex(Kernel->os, Kernel->debugMutex, gcvINFINITE);
#endif
/* Get the current process ID. */
gcmkONERROR(gckOS_GetProcessID(&processID));
/* Dispatch on command. */
switch (Interface->command)
{
case gcvHAL_GET_BASE_ADDRESS:
/* Get base address. */
Interface->u.GetBaseAddress.baseAddress = Kernel->hardware->baseAddress;
Interface->u.GetBaseAddress.flatMappingRangeCount = Kernel->mmu->gpuPhysicalRangeCount;
if (Kernel->mmu->gpuPhysicalRangeCount)
{
gckOS_MemCopy(Interface->u.GetBaseAddress.flatMappingRanges, Kernel->mmu->gpuPhysicalRanges,
gcmSIZEOF(gcsFLAT_MAPPING_RANGE) * Kernel->mmu->gpuPhysicalRangeCount);
}
break;
case gcvHAL_QUERY_VIDEO_MEMORY:
/* Query video memory size. */
gcmkONERROR(gckKERNEL_QueryVideoMemory(Kernel, Interface));
break;
case gcvHAL_QUERY_CHIP_IDENTITY:
/* Query chip identity. */
gcmkONERROR(
gckHARDWARE_QueryChipIdentity(
Kernel->hardware,
&Interface->u.QueryChipIdentity));
break;
case gcvHAL_QUERY_CHIP_FREQUENCY:
/* Query chip clock. */
gcmkONERROR(
gckHARDWARE_QueryFrequency(Kernel->hardware));
Interface->u.QueryChipFrequency.mcClk = Kernel->hardware->mcClk;
Interface->u.QueryChipFrequency.shClk = Kernel->hardware->shClk;
break;
case gcvHAL_MAP_MEMORY:
physical = gcmINT2PTR(Interface->u.MapMemory.physName);
/* Map memory. */
gcmkONERROR(
gckKERNEL_MapMemory(Kernel,
physical,
(gctSIZE_T) Interface->u.MapMemory.bytes,
&logical));
Interface->u.MapMemory.logical = gcmPTR_TO_UINT64(logical);
gcmkVERIFY_OK(
gckKERNEL_AddProcessDB(Kernel,
processID, gcvDB_MAP_MEMORY,
logical,
physical,
(gctSIZE_T) Interface->u.MapMemory.bytes));
break;
case gcvHAL_UNMAP_MEMORY:
physical = gcmINT2PTR(Interface->u.UnmapMemory.physName);
gcmkVERIFY_OK(
gckKERNEL_RemoveProcessDB(Kernel,
processID, gcvDB_MAP_MEMORY,
gcmUINT64_TO_PTR(Interface->u.UnmapMemory.logical)));
/* Unmap memory. */
gcmkONERROR(
gckKERNEL_UnmapMemory(Kernel,
physical,
(gctSIZE_T) Interface->u.UnmapMemory.bytes,
gcmUINT64_TO_PTR(Interface->u.UnmapMemory.logical),
processID));
break;
case gcvHAL_ALLOCATE_NON_PAGED_MEMORY:
bytes = (gctSIZE_T) Interface->u.AllocateNonPagedMemory.bytes;
/* Allocate non-paged memory. */
gcmkONERROR(
gckOS_AllocateNonPagedMemory(
Kernel->os,
gcvTRUE,
gcvALLOC_FLAG_CONTIGUOUS,
&bytes,
&physical,
&logical));
Interface->u.AllocateNonPagedMemory.bytes = bytes;
Interface->u.AllocateNonPagedMemory.logical = gcmPTR_TO_UINT64(logical);
Interface->u.AllocateNonPagedMemory.physName = gcmPTR_TO_NAME(physical);
gcmkVERIFY_OK(
gckKERNEL_AddProcessDB(Kernel,
processID, gcvDB_NON_PAGED,
logical,
gcmINT2PTR(Interface->u.AllocateNonPagedMemory.physName),
bytes));
break;
case gcvHAL_FREE_NON_PAGED_MEMORY:
physical = gcmNAME_TO_PTR(Interface->u.FreeNonPagedMemory.physName);
gcmkVERIFY_OK(
gckKERNEL_RemoveProcessDB(Kernel,
processID, gcvDB_NON_PAGED,
gcmUINT64_TO_PTR(Interface->u.FreeNonPagedMemory.logical)));
/* Free non-paged memory. */
gcmkONERROR(
gckOS_FreeNonPagedMemory(Kernel->os,
physical,
gcmUINT64_TO_PTR(Interface->u.FreeNonPagedMemory.logical),
(gctSIZE_T) Interface->u.FreeNonPagedMemory.bytes));
gcmRELEASE_NAME(Interface->u.FreeNonPagedMemory.physName);
break;
case gcvHAL_ALLOCATE_LINEAR_VIDEO_MEMORY:
/* Allocate memory. */
gcmkONERROR(_AllocateLinearMemory(Kernel, processID, Interface));
break;
case gcvHAL_RELEASE_VIDEO_MEMORY:
/* Release video memory. */
gcmkONERROR(_ReleaseVideoMemory(
Kernel, processID,
(gctUINT32)Interface->u.ReleaseVideoMemory.node
));
break;
case gcvHAL_LOCK_VIDEO_MEMORY:
/* Lock video memory. */
gcmkONERROR(_LockVideoMemory(Kernel, Kernel->core, processID, Interface));
break;
case gcvHAL_UNLOCK_VIDEO_MEMORY:
/* Unlock video memory. */
gcmkONERROR(_UnlockVideoMemory(Kernel, processID, Interface));
break;
case gcvHAL_BOTTOM_HALF_UNLOCK_VIDEO_MEMORY:
gcmkERR_BREAK(_BottomHalfUnlockVideoMemory(Kernel, processID,
Interface->u.BottomHalfUnlockVideoMemory.type,
Interface->u.BottomHalfUnlockVideoMemory.node));
break;
case gcvHAL_EVENT_COMMIT:
if (!Interface->commitMutex)
{
gcmkONERROR(gckOS_AcquireMutex(Kernel->os,
Kernel->device->commitMutex,
gcvINFINITE
));
commitMutexAcquired = gcvTRUE;
}
/* Commit an event queue. */
if (Interface->engine == gcvENGINE_BLT)
{
if (!gckHARDWARE_IsFeatureAvailable(Kernel->hardware, gcvFEATURE_ASYNC_BLIT))
{
gcmkONERROR(gcvSTATUS_NOT_SUPPORTED);
}
gcmkONERROR(gckEVENT_Commit(
Kernel->asyncEvent, gcmUINT64_TO_PTR(Interface->u.Event.queue), gcvFALSE));
}
else
{
gcmkONERROR(gckEVENT_Commit(
Kernel->eventObj, gcmUINT64_TO_PTR(Interface->u.Event.queue), gcvFALSE));
}
if (!Interface->commitMutex)
{
gcmkONERROR(gckOS_ReleaseMutex(Kernel->os, Kernel->device->commitMutex));
commitMutexAcquired = gcvFALSE;
}
break;
case gcvHAL_COMMIT:
if (!Interface->commitMutex)
{
gcmkONERROR(gckOS_AcquireMutex(Kernel->os,
Device->commitMutex,
gcvINFINITE
));
commitMutexAcquired = gcvTRUE;
}
gcmkONERROR(_Commit(Device,
Kernel->hardware->type,
Interface->engine,
processID,
&Interface->u.Commit));
if (!Interface->commitMutex)
{
gcmkVERIFY_OK(gckOS_ReleaseMutex(Kernel->os, Device->commitMutex));
commitMutexAcquired = gcvFALSE;
}
break;
#if !USE_NEW_LINUX_SIGNAL
case gcvHAL_USER_SIGNAL:
/* Dispatch depends on the user signal subcommands. */
switch(Interface->u.UserSignal.command)
{
case gcvUSER_SIGNAL_CREATE:
/* Create a signal used in the user space. */
gcmkONERROR(
gckOS_CreateUserSignal(Kernel->os,
Interface->u.UserSignal.manualReset,
&Interface->u.UserSignal.id));
gcmkVERIFY_OK(
gckKERNEL_AddProcessDB(Kernel,
processID, gcvDB_SIGNAL,
gcmINT2PTR(Interface->u.UserSignal.id),
gcvNULL,
0));
break;
case gcvUSER_SIGNAL_DESTROY:
gcmkVERIFY_OK(gckKERNEL_RemoveProcessDB(
Kernel,
processID, gcvDB_SIGNAL,
gcmINT2PTR(Interface->u