Logo Search packages:      
Sourcecode: maxdb-7.5.00 version File versions

ven71.c

Go to the documentation of this file.
/*!
  @file           ven71.c
  @author         JoergM
  @brief          Kernel RunTime: Dispatcher and Queue Management
  @see            

\if EMIT_LICENCE

    ========== licence begin  GPL
    Copyright (c) 2001-2005 SAP AG

    This program is free software; you can redistribute it and/or
    modify it under the terms of the GNU General Public License
    as published by the Free Software Foundation; either version 2
    of the License, or (at your option) any later version.

    This program is distributed in the hope that it will be useful,
    but WITHOUT ANY WARRANTY; without even the implied warranty of
    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
    GNU General Public License for more details.

    You should have received a copy of the GNU General Public License
    along with this program; if not, write to the Free Software
    Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA  02111-1307, USA.
    ========== licence end



\endif
*/

#define MOD__  "ven71.c:"

#include "gen00.h"
#include "heo00.h"
#include "geo002.h"
#include "heo52.h"
#include "geo50_0.h"
#include "geo007_1.h"
#include "gen71.h"
#include "hen71.h"
#include "hen40.h"
#include "geo00_2.h"
#include "gen55.h" /* e55_remactive() */
#include "heo56.h" /* vsleep() */
#include "gen72.h" /* e72_wake() */
#include "gen73.h" /* e73_dl_enqu(), e73_dl_dequ(), e73_ioc_enqu() */ 
#include "gen75.h"
#include "RunTime/System/RTESys_MicroTime.h"
#include "hsp77.h"
#include "RunTime/System/RTESys_MemoryBarrier.h"
#include "RunTime/System/RTESys_AtomicOperation.h"

/*
 *  DEFINES
 */

#define INFINIT_TIME_VAL         (SAPDB_UInt8)-1

#define  WRN_INTERNAL_MICRO_TIMER_FAILED              N(903), WRN_TYPE,_T("TASKING "),_T("Dispatcher Timing wrong %s %qu > %s %qu")

#define DISPATCHER_LOOPS    \
        (this_ukt->NumRegWait && (XPARAM(ulRegionCollisionLoop) > \
                                 XPARAM(ulDispLoopsBefSemop))) \
        ? XPARAM(ulRegionCollisionLoop) : XPARAM(ulDispLoopsBefSemop) ;

/*
 *  At system termination, ignore all requests except for bufwriter.
 */
#define IGNORE_REQUEST_AT_SYSTEM_END() \
    if ( (KGS->state >= SERVER_SHUTDOWNREINIT) &&\
         (this_ukt->curr_task->type != TT_TW) ) continue ;


#ifdef SAPDB_NO_SPINLOCK_FOR_UKT2UKT
#if defined(ALPHA) || defined(I386)
extern  void                 *e76_mem_sync( struct TASK_TYPE * pTask );
#define MEM_SYNC( task_ )      e76_mem_sync( task_ )
#else
#define MEM_SYNC( task_ )      task_
#endif
#endif

#define FIND_ALL_TASKS        1
#define FIND_PRIO_TASKS       2

/* local functions */

static  void       en71_findcom ( ten50_UKT_Control *this_ukt,
                                  time_t *new_timeout );
static  void       en71_freeUktOwnedRegions (ten50_UKT_Control * this_ukt);
static  void       en71_shutdownKill ( ten50_UKT_Control *this_ukt );
static SAPDB_Bool  en71TaskMoveCheck( ten50_UKT_Control**   srcUKT,
                                      ten50_UKT_Control**   dstUKT );
static void        en71MoveTask ( struct TASK_TYPE*       tcb,
                                  struct DOUBLY_LINKED*   request );
static void        en71SendMoveRequest ( ten50_UKT_Control*   srcUKT,
                                         ten50_UKT_Control*   dstUKT,
                                         SAPDB_UInt8          stopWaitForBestFitTaskAt );
static void        en71SendMoveMeassIntRequest();

static void        en71_StopUkt(SAPDB_Int4 uktIndex);

#ifdef DEBUG_SLEEP_STATE
static  void       en71_debugSleepState ( ten50_UKT_Control *this_ukt );
#endif

#ifndef SAPDB_NO_SPINLOCK_FOR_UKT2UKT
static  int        en71_xxxDequeue( struct XXX_QUEUE_HEAD * queueHead, struct XXX_QUEUE * copyOfItemToDequeue );
#endif

#ifdef DEB_RUNQ
#include "gen38.h" /* nocheck */
#define PRINT_ENQU(_Task, _FromTask, _Req)                                        \
{         if ( _FromTask )                                                        \
            {                                                                     \
                DBG1 (( MF__,"T%d added from T%d(%s) to r_que, len %d\n",         \
                  _Task->index,                                                   \
                  this_ukt->RunQueue[this_ukt->RunQueueLen].FromTask->index ,     \
                  e38_req_name ( _Req->req_type ),                                \
                  this_ukt->RunQueueLen+1 ));                                     \
            } else {                                                              \
                DBG1 (( MF__,"T%d added from T? to r_que, len %d\n",              \
                    _Task->index, this_ukt->RunQueueLen+1 ));                     \
            }                                                                     \
}

#define PRINT_RUN_TASK                                                            \
        if ( this_ukt->curr_task->ukt != this_ukt )                               \
          {                                                                       \
            DBG1 (( MF__,"T%d NOT in this UKP!\n", this_ukt->curr_task->index));  \
            vabort ( WRITE_CORE ) ;                                               \
          }                                                                       \
        DBG1 (( MF__,"RUN T%3d (%d) QueueLen: %d\n", this_ukt->curr_task->index,  \
                  this_ukt->curr_task->ulDynPrio, this_ukt->RunQueueLen )) ;
#else
#define PRINT_ENQU 
#define PRINT_RUN_TASK                                                            \
        if ( this_ukt->curr_task->ukt != this_ukt )                               \
          {                                                                       \
            DBG1 (( MF__,"T%d NOT in this UKP!\n", this_ukt->curr_task->index));  \
            vabort ( WRITE_CORE ) ;                                               \
          } 
#endif

#define ENQU_RUNQUEUE(_Task, _FromTask, _Req, _BasePrio)                          \
{                                                                                 \
    _Task->is_in_queue = TASK_IS_IN_RUN_QUEUE ;                                   \
    _Task->ulDynPrio = _BasePrio + _Task->prio_flag * PRIO_FACTOR ;               \
    this_ukt->RunQueue[this_ukt->RunQueueLen].Task = _Task;                       \
    this_ukt->RunQueue[this_ukt->RunQueueLen].FromTask = _FromTask;               \
    this_ukt->RunQueue[this_ukt->RunQueueLen].Req  = _Req;                        \
    PRINT_ENQU(_Task, _FromTask, _Req) ;                                          \
    this_ukt->RunQueueLen++ ;                                                     \
    if (( _Task->MoveTimeCollectionEnabled ) || ( _Task->TimeCollectionEnabled )) \
    {                                                                             \
        _Task->TaskStateStat.runqueueEnqueueTime = RTESys_MicroSecTimer();        \
    }                                                                             \
} 
        
#define ENQU_RUNQUEUE_NO_REQUEST(_Task, _BasePrio)                                \
{                                                                                 \
    _Task->is_in_queue = TASK_IS_IN_RUN_QUEUE ;                                   \
    _Task->ulDynPrio   = _BasePrio + _Task->prio_flag * PRIO_FACTOR ;             \
    _Task->qa.req_type = 0;                                                       \ 
    this_ukt->RunQueue[this_ukt->RunQueueLen].Task = _Task;                       \
    this_ukt->RunQueue[this_ukt->RunQueueLen].FromTask = NULL;                    \
    this_ukt->RunQueue[this_ukt->RunQueueLen].Req  = &_Task->qa;                  \
    DBG1 (( MF__,"T%d added from T? to r_que, len %d\n",                          \
    _Task->index, this_ukt->RunQueueLen+1 ));                                     \
    this_ukt->RunQueueLen++ ;                                                     \
    if (( _Task->MoveTimeCollectionEnabled ) || ( _Task->TimeCollectionEnabled )) \
    {                                                                             \    
        _Task->TaskStateStat.runqueueEnqueueTime = RTESys_MicroSecTimer();        \
    }                                                                             \
}

#define BASE_PRIO_U2U (XPARAM(lPrioBaseU2U))
#define BASE_PRIO_RAV (XPARAM(lPrioBaseRAV))
#define BASE_PRIO_IOC (XPARAM(lPrioBaseIOC))
#define BASE_PRIO_COM (XPARAM(lPrioBaseCOM))
#define BASE_PRIO_REX (XPARAM(lPrioBaseREX))
#define PRIO_FACTOR   (XPARAM(lPrioFactor))

#define REMOVE_TASK_FROM_COM_QUEU( this_com_task, task ) \
    {   *this_com_task = task->next_in_com ;             \
        task->next_in_com = 0 ;                          \
        if ( this_ukt->last_com_task == task )           \
        this_ukt->last_com_task = NULL  ;                \
        this_ukt->act_com_elem-- ;                       \
    }

/* imported data */

extern  int                             e60_dbgdbglvl ;

/* local data */

static void en71_HandleTaskStatisticReset( ten50_UKT_Control *this_ukt,
                                           struct TASK_TYPE  *pTaskCtrl );

static void en71_HandleUKTStatisticReset( ten50_UKT_Control *this_ukt );

static void en71_DoEnterDispatcherTiming( ten50_UKT_Control *this_ukt,
                                          struct TASK_TYPE  *pTaskCtrl );

static void en71_DoLeaveDispatcherTiming(ten50_UKT_Control *this_ukt);

/*
 * ===========================================================================
 */
#if  defined(IA64) || defined(HP_IA64) || defined(_M_IA64)
extern void *RTE_IA64GetBsp(void);
#endif

00216 void    en71Dispatcher ( ten50_UKT_Control *this_ukt)
{
#undef  MF__
#define MF__ MOD__"en71_dispatcher"
    int                 rc ;
    ULONG              loop_count ;
    time_t             new_timeout ;
    struct TASK_TYPE   *pTaskCtrl           = this_ukt->curr_task;
    struct TASK_TYPE   *lastRunningTaskCtrl = pTaskCtrl;
    SAPDB_Bool         timeCollectionEnabled = KGS->fTimeMeasure;
#ifdef DEBUG_RTE
char eBuffer[256];

sp77sprintf(eBuffer, sizeof(eBuffer), "ENTER en71Dispatcher UKT%d curr_task T%d", 
       this_ukt->index, 
       (this_ukt->curr_task ? this_ukt->curr_task->index : 0) );
MSGD(( INFO_TEST_WILDCARD, eBuffer ));
#endif

    this_ukt->m_IsWaitingForNextScheduledTask = true; /* enforce spinlocks to use giveup timeslice while in dispatcher */

    en71_HandleUKTStatisticReset(this_ukt);

    /* PTS 1110829 */
    if ( 0 != pTaskCtrl )
    {
        DBGIN_T(pTaskCtrl->index);

        pTaskCtrl->disp_counter++;

        en71_HandleTaskStatisticReset(this_ukt, pTaskCtrl);

        pTaskCtrl->pCurrStackPointer = (SAPDB_UInt4 *)&rc;
#ifdef DEBUG_RTE
        if ( ((SAPDB_Byte *)pTaskCtrl->pCurrStackPointer) < pTaskCtrl->StackChunkStart
          || ((SAPDB_Byte *)pTaskCtrl->pCurrStackPointer) > pTaskCtrl->StackChunkEnd )
        {
            char sBuffer[256];
            tsp00_TaskId taskId;
            vgetpid(&taskId); 
            sp77sprintf(sBuffer, sizeof(sBuffer),
                            "Stack chunk UKT %d task T%d mismatch %p not [%p,%p] (T%d)", 
                            this_ukt->index, pTaskCtrl->index,
                            pTaskCtrl->pCurrStackPointer, pTaskCtrl->StackChunkStart,
                            pTaskCtrl->StackChunkEnd, taskId );
            MSGD(( INFO_TEST_WILDCARD, sBuffer ));
            while ( ((SAPDB_Byte *)pTaskCtrl->pCurrStackPointer) < pTaskCtrl->StackChunkStart
                 || ((SAPDB_Byte *)pTaskCtrl->pCurrStackPointer) > pTaskCtrl->StackChunkEnd )
              sleep(0);
        }
#endif
        /* PTS 1110953 + 1141648 */
#if defined(PA20W)
        if ( pTaskCtrl->maxStackPointer < (SAPDB_UInt4 *)&rc )
        {
            pTaskCtrl->maxStackPointer = (SAPDB_UInt4 *)&rc;
        }
#else
        if ( pTaskCtrl->maxStackPointer > (SAPDB_UInt4 *)&rc )
        {
            pTaskCtrl->maxStackPointer = (SAPDB_UInt4 *)&rc;
        }
#endif
#if  defined(IA64) || defined(HP_IA64) || defined(_M_IA64)
        pTaskCtrl->pCurrBackingStorePointer = (SAPDB_Byte *)RTE_IA64GetBsp();
        if ( pTaskCtrl->maxBackingStorePointer < pTaskCtrl->pCurrBackingStorePointer )
        {
            pTaskCtrl->maxBackingStorePointer = pTaskCtrl->pCurrBackingStorePointer;
        }
#endif

        en71_DoEnterDispatcherTiming(this_ukt, pTaskCtrl);
    }
    else
    {
        DBGIN_T(0);
    }

    this_ukt->disp_counter ++ ;


    loop_count = DISPATCHER_LOOPS ;

    for ( ; ; )
    {
        if ( loop_count > 0 )
        {
            --loop_count;
        }

        if ( KGS->state >= SERVER_SHUTDOWNREINIT ) 
        {
            MSGD (( INFO_DISP_SHUTKILL_STATE1, KGS->state ));
            en71_shutdownKill ( this_ukt );
        }

    /*
     *  Copy the debuglevel from the shared segment to the data segment.
     *  The debuglevel could be changed by the console driver utility.
     */
        e60_dbgdbglvl = KGS->debuglevel ;

        /******************************************************************/
        /*  the xxx queues are used for requests from other UKP's         */
        /*  and are as such only of interest when more than one UKP       */
        /*  is active                                                     */
        /******************************************************************/
        {
            if ( KGS->ulNumOfUKTs > 1 )
            {
                struct XXX_QUEUE_HEAD           * lxx ;
                struct XXX_QUEUE                * lxxx ;
                int                               ix ; 
                int                               EntryFound ;
#ifndef SAPDB_NO_SPINLOCK_FOR_UKT2UKT
                struct XXX_QUEUE                  activeRequest ;
#endif

                DBG1 (( MF__,"searching in XXX-Queue \n" ));
                /*
                 *  Step through the columns of the matrix.
                 */
                for ( ix = KGS->ulNumOfUKTs - 1 ; ix >= 0 ; ix -- )
                {
                    do 
                    {
                      lxx = this_ukt->xxx[ix];
                      if   ( ! lxx ) break ;

#ifndef SAPDB_NO_SPINLOCK_FOR_UKT2UKT

                      EntryFound = en71_xxxDequeue(lxx, &activeRequest);
                      if ( !EntryFound )
                      {
                          break; /* next XXX queue */
                      }

                      lxxx = &activeRequest;
#ifdef SAPDB_SLOW
/* SAPDB_SLOW */          if ( ! lxxx->FromTask )
/* SAPDB_SLOW */          {
/* SAPDB_SLOW */              MSGD(( INFO_REMOTEFUNC_NOT_SET, 0, lxxx->func_param )) ;
/* SAPDB_SLOW */              break;
/* SAPDB_SLOW */          }
/* SAPDB_SLOW */
/* SAPDB_SLOW */          if ( lxxx->task->ukt != this_ukt )
/* SAPDB_SLOW */          {
/* SAPDB_SLOW */              MSGD(( INFO_REMOTEFUNC_NOT_SET, -1, lxxx->func_param )) ;
/* SAPDB_SLOW */              break;
/* SAPDB_SLOW */          }
/* SAPDB_SLOW */
/* SAPDB_SLOW */          if ( ! lxxx->to_execute )
/* SAPDB_SLOW */          {
/* SAPDB_SLOW */              MSGD(( INFO_REMOTEFUNC_NOT_SET, lxxx->FromTask->index, lxxx->func_param )) ;
/* SAPDB_SLOW */              break;
/* SAPDB_SLOW */          }
#endif /* SAPDB_SLOW */

                      IGNORE_REQUEST_AT_SYSTEM_END() ;

                      if( this_ukt != lxxx->task->ukt )
                      {
                          /* The load balancing has moved the task to another UKT during
                             'Ukt2Ukt' enqueue operation. Now we forward this request.. */
                          en71EnqueueUkt2Ukt ( this_ukt , lxxx->task, lxxx->FromTask, 
                                               lxxx->to_execute, lxxx->func_param );
                          this_ukt->curr_task = NULL;
                      }
                      else
                      {
                        /* this_ukt->curr_task may be reset to 0 by the call... */
                        this_ukt->curr_task = lxxx->task;
                        lxxx->to_execute ( lxxx->func_param, lxxx->FromTask );
                      }

#else

                      /* Code without using spinlocks for XXX queue */
                      EntryFound = FALSE ;
                      /*
                       *  lxxx is a pointer to the first queue element.
                       */
                      lxxx = lxx->xx_work ;

                      if ( ( this_ukt->curr_task = MEM_SYNC ( lxxx->task ) ) != 0 )
                      {
                        int fLoopEnd ;
                        int LoopCnt = 0 ;
                        VOIDFUNC_PTR pSvFuncToExecute ;
                        /*
                         *  Dequeue the request
                         */
                        IGNORE_REQUEST_AT_SYSTEM_END() ;

                        if( this_ukt != this_ukt->curr_task->ukt )
                        {
                            en71EnqueueUkt2Ukt ( this_ukt , tcb , lxxx->FromTask, 
                                                 lxxx->to_execute, lxxx->func_param );
                            this_ukt->curr_task = NULL;
                        }
                        else
                        {
                            /*
                            *  Execute the request
                            */
                            /* PTS 1106134 */
                            do
                            {
                                if ( (pSvFuncToExecute = lxxx->to_execute) == NULL )
                                {
                                    MSGD(( INFO_REMOTEFUNC_NOT_SET, lxxx->FromTask->index, lxxx->func_param )) ;
                                    fLoopEnd = ++LoopCnt == 10 ;
                                    sqlyieldthread();
                                }
                                else
                                {
                                    pSvFuncToExecute = lxxx->to_execute;
                                    fLoopEnd = TRUE ;
                                }
                            }
                            while ( !fLoopEnd ) ;

                            pSvFuncToExecute ( lxxx->func_param, lxxx->FromTask );
                            lxxx->task = 0 ;
                            lxxx->to_execute = NULL; /* PTS 1000746 */
                            lxx->xx_work = lxxx->next ;
                            EntryFound = TRUE  ;
                        }

#endif /* SAPDB_NO_SPINLOCK_FOR_UKT2UKT */

                        /*
                         *  On behalf of the request a task could become runnable
                         *  otherwise this_ukt->curr_task is reset to 0
                         */
                        if ( this_ukt->curr_task )
                        {
                            DBG1 (( MF__,"XXX task    T%d  request %d \n", 
                                    this_ukt->curr_task->index , 
                                    (&this_ukt->curr_task->qa)->req_type ));
                            ENQU_RUNQUEUE(this_ukt->curr_task, lxxx->FromTask, 
                                    (&this_ukt->curr_task->qa), BASE_PRIO_U2U ) ;
                        }

#ifdef SAPDB_NO_SPINLOCK_FOR_UKT2UKT
                      } /* if ( lxxx->task != 0 ) */
#endif /* SAPDB_NO_SPINLOCK_FOR_UKT2UKT */

                    } while ( EntryFound ) ;
                } /* for */             
            } /* if ulNumOfUKTs > 1 */
        } /* XXX queue */

        /******************************************************************/
        /* search AIOC queue, add elemets to RUN_QUEU                     */
        /******************************************************************/
        while ( (this_ukt->curr_quu = this_ukt->aioc_queue.last->request) )
        {
            DBG1 (( MF__,"queue: %s, last->request = 0x%x\n", "AIOC", 
                                 this_ukt->aioc_queue.last->request )) ;
            this_ukt->aioc_queue.last->request = 0 ;                   
            this_ukt->aioc_queue.last    = this_ukt->aioc_queue.last->next; 
            IGNORE_REQUEST_AT_SYSTEM_END() ;

            DBG1 (( MF__,"queue: %s, task    T%d  request %d\n",
                    "AIOC", this_ukt->curr_quu->taskid ->index , 
                    this_ukt->curr_quu->req_type ));

            if ( this_ukt->curr_quu->req_type == REQ_ASYNIO )
            {
                this_ukt->curr_task = this_ukt->curr_quu->taskid ;

                DBG1 (( MF__,"REQ_ASYNIO, this_ukt 0x%x, quu->ukt 0x%x \n",
                                this_ukt , this_ukt->curr_task->ukt ));

                this_ukt->curr_quu->args.vai_parms.finished = TRUE ;
                /* task searches for finished io req. on call of 'vasynwait' */
                if ( this_ukt->curr_task->state != TSK_ASYNWAIT_READ &&
                     this_ukt->curr_task->state != TSK_ASYNWAIT_WRITE ) 
                {
                    DBG1 (( MF__,"REQ_ASYNIO continue, state is %s\n", 
                              e38_tskstate_name(this_ukt->curr_task->state) ));
                    continue ;
                }
            }
            ENQU_RUNQUEUE( this_ukt->curr_quu->taskid, NULL, 
                            this_ukt->curr_quu, BASE_PRIO_IOC) ; 
        }

        /******************************************************************/
        /* search IOC queue, add elemets to RUN_QUEU                      */
        /******************************************************************/
#if defined(OLD_SPINLOCK_IOC_QUEUE) || !defined(RTESYS_HAS_NATIVE_ATOMIC_OPERATION)
#ifdef  DEBUG_RTE
        DBG1 (( MF__,"searching in IOC-Queue \n" ));

        WAIT_UNTIL_ALONE ( this_ukt->ioc_queue.exclusive );
        {
            struct IOC_QUEUE *ioptr ;
            for ( ioptr = this_ukt->ioc_queue.last ; 
                    ioptr->request != NULL; 
                        ioptr = ioptr->next )
            {
                DBG1 (( MF__,"IOC-QUEUE 0x%lx 0x%lx\n",
                    (long) ioptr , (long) ioptr->request ));
                if ( ioptr->next == this_ukt->ioc_queue.last
                  || ioptr->next == ioptr )
                {
                    break;
                }
            }
        }
        CLEARLOCK ( this_ukt->ioc_queue.exclusive );
#endif /*DEBUG_RTE*/

        while ( 0 != ( this_ukt->curr_quu = this_ukt->ioc_queue.last->request ) )
        {
            RTESys_ReadMemoryBarrier();

            /* dequeue the request */
            this_ukt->ioc_queue.last->request = 0 ;
            this_ukt->ioc_queue.last = this_ukt->ioc_queue.last->next ;
            this_ukt->curr_task      = this_ukt->curr_quu->taskid ;

#else
        {
            struct  DOUBLY_LINKED * lastIocElement;

            while ( 0 != (lastIocElement = this_ukt->ioc_queue.last) )
            {
                struct  DOUBLY_LINKED * firstIocElement;

                while ( !RTESys_CmpxchgPointer( (void **)&this_ukt->ioc_queue.last,
                                                lastIocElement,
                                                0,
                                                (void **)&lastIocElement ) );
                
                RTESys_ReadMemoryBarrier();

                firstIocElement = lastIocElement;
                while ( firstIocElement->backward )
                {
                    firstIocElement->backward->forward = firstIocElement;
                    firstIocElement = firstIocElement->backward;
                }

                while ( firstIocElement )
                {
                    this_ukt->curr_quu = firstIocElement;
                    firstIocElement = firstIocElement->forward;
            
                    this_ukt->curr_task      = this_ukt->curr_quu->taskid ;
#endif

                    if ( this_ukt->curr_task) 
                    { /* since REQ_NEW_MOVE_TASK_MEAS_INTERVAL_EO00 and REQ_MOVE_TASK_EO00 no task might be specified */
                        this_ukt->curr_task->is_in_queue = TASK_IS_IN_NON_QUEUE ;
                        IGNORE_REQUEST_AT_SYSTEM_END() ;

                        /* The load balancing has moved the task to another UKT during
                           'ioc' enqueue operation. Now we forward this request.. */
                        if( this_ukt != this_ukt->curr_task->ukt )
                        {
                            en71_iocEnquAndWake ( this_ukt->curr_task->ukt, this_ukt->curr_task, 
                                                  this_ukt->curr_quu );
                            this_ukt->curr_task = 0;
                            continue;
                        }
                    }

                    /*
                     *  INSERT_MOVING_TASK requests
                     */
                    if (this_ukt->curr_quu->req_type == REQ_INSERT_MOVING_TASK_E000)
                    {
                        struct  TASK_TYPE * tmp_tcb;

                        MSGD(( INFO_TASK_MOVE_REQUEST_REC, this_ukt->curr_task->index ));

                        /* integrate task into UKTs task chain! */
                        tmp_tcb = this_ukt->pFirstTaskCtrl;

                        if ( tmp_tcb ) tmp_tcb->prev_task   = this_ukt->curr_task;

                        this_ukt->pFirstTaskCtrl       = this_ukt->curr_task;
                        this_ukt->curr_task->next_task = tmp_tcb;
                        this_ukt->curr_task->prev_task = NULL;

                        if (this_ukt->curr_task->prio_flag)  this_ukt->SelfDispatch++;

                        en71AlterTaskCnt( this_ukt->curr_task, ADD_ALL_COUNTER );

                        this_ukt->curr_quu->req_type     = this_ukt->curr_quu->req_type_org;
                        this_ukt->curr_quu->req_type_org = 0;
                    }

                    /*
                     *  NEW_MOVE_TASK_MEAS_INTERVAL requests
                     */
                    if (this_ukt->curr_quu->req_type == REQ_NEW_MOVE_TASK_MEAS_INTERVAL_EO00)
                    {   
                        tsp00_Int4                           idx;
                        struct REQ_PARMS_MOVE_TASK_MESS_INT* reqArg;
                        SAPDB_UInt8                          runnableMicroSeconds;
                        struct TASK_TYPE*                    pTaskCtrl;
                        SAPDB_UInt8                          timeNow = RTESys_MicroSecTimer();
                        tsp00_Int4                           runQueueLen = this_ukt->RunQueueLen;

                        for ( idx = 0; idx < runQueueLen; idx++ )
                        {
                            pTaskCtrl = this_ukt->RunQueue[idx].Task;

                            if (( pTaskCtrl->moveLock.type.longTerm != 0 ) ||
                                ( pTaskCtrl->TaskStateStat.runqueueEnqueueTime == 0 ))
                                continue;

                            runnableMicroSeconds = timeNow - pTaskCtrl->TaskStateStat.runqueueEnqueueTime;

                            /* accumulate the tasks time waititng in the run queue to be scheduled */
                            pTaskCtrl->runnableMicroSeconds[this_ukt->currTimeVar] += runnableMicroSeconds;

                            /* Has the tasks a new runnable time maximum? */
                            if ( pTaskCtrl->maxRunnableMicroSeconds[this_ukt->currTimeVar] < runnableMicroSeconds )
                                pTaskCtrl->maxRunnableMicroSeconds[this_ukt->currTimeVar] = runnableMicroSeconds;

                            /* set the UKTs runnable time values */
                            this_ukt->runnableMicroSeconds[this_ukt->currTimeVar] += runnableMicroSeconds;

                            if ( this_ukt->maxRunnableMicroSeconds[this_ukt->currTimeVar] < runnableMicroSeconds )
                                this_ukt->maxRunnableMicroSeconds[this_ukt->currTimeVar] = runnableMicroSeconds;
                        }

                        reqArg = &this_ukt->curr_quu->args.mtmi_parms;
                        /* skip old requests */
                        if ( reqArg->taskMoveIntervalStartTime == KGS->taskMoveIntervalStartTimes[KGS->currTimeVar])
                        {
                            this_ukt->currTimeVar                                     = reqArg->timeVar;
                            this_ukt->taskMoveIntervalStartTime                       = reqArg->taskMoveIntervalStartTime;
                            this_ukt->UKTSleepTimeMicroSeconds[this_ukt->currTimeVar] = 0;
                            this_ukt->runnableMicroSeconds[this_ukt->currTimeVar]     = 0;
                            this_ukt->maxRunnableMicroSeconds[this_ukt->currTimeVar]  = 0;
                            this_ukt->moveTaskToUKT                                   = NULL; /* drop old move task requests */
                        }
                        /* free the queue element which woke us */
                        e73_dl_enqu ( &KGS->freelist, this_ukt->curr_quu );
                        this_ukt->curr_task = 0 ;
                        continue ;
                    }

                    /*
                     *  MOVE_TASK requests
                     */
                    if (this_ukt->curr_quu->req_type == REQ_MOVE_TASK_EO00)
                    {   
                        struct REQ_PARMS_MOVE_TASK *reqArg;

                        reqArg = &this_ukt->curr_quu->args.mt_parms;
                        /*  skip old requests */
                        if ( reqArg->taskMoveIntervalStartTime == KGS->taskMoveIntervalStartTimes[KGS->currTimeVar])
                        {
                            this_ukt->stopWaitForBestFitTaskAt = reqArg->stopWaitForBestFitTaskAt;
                            this_ukt->moveTaskToUKT            = reqArg->moveTaskToUKT;
                        }
                        /* free the queue element which woke us */
                        e73_dl_enqu ( &KGS->freelist, this_ukt->curr_quu );
                        this_ukt->curr_task = 0 ;
                        continue ;
                    }

                    /*
                     *  CONNECT requests
                     */
                    if (this_ukt->curr_quu->req_type == REQ_CONNECT)
                    {
                        if ( this_ukt->curr_task->state == TSK_CONNECTWAIT )
                        {
                            ENQU_RUNQUEUE(this_ukt->curr_task, NULL, 
                                          this_ukt->curr_quu, BASE_PRIO_COM )  ;
                        }
                        else
                        {
                            /* - don't enqueue into the run-queue if the task
                                 is not waiting for a connection */
                            e73_dl_enqu ( &this_ukt->curr_task->dlq_connreq, this_ukt->curr_quu );
                        }

                        this_ukt->curr_task = 0 ;
                        continue ;
                    }

                    /*
                     *  RELEASE requests
                     */
                    if ( this_ukt->curr_quu->req_type == REQ_RELEASE )
                    {
                        e75_release_request ( this_ukt->curr_quu );
                        this_ukt->curr_task = 0 ;
                        continue ;
                    }

#ifdef DEBUG_RTE
                    if ( this_ukt->curr_quu->req_type == REQ_VBLOCKIO )
                    {
                        DBG1 (( MF__,"REQ_VBLOCKIO, this_ukt 0x%x, quu->ukt 0x%x \n",
                                    this_ukt , this_ukt->curr_task->ukt ));
                    }
#endif /* DEBUG_RTE */

                    if ( this_ukt->curr_quu->req_type == REQ_ASYNIO )
                    {
                        DBG1 (( MF__,"REQ_ASYNIO, this_ukt 0x%x, quu->ukt 0x%x \n",
                                    this_ukt , this_ukt->curr_task->ukt ));
                        this_ukt->curr_quu->args.vai_parms.finished = TRUE ;
                        /* task searches for finished io req. on call of 'vasynwait' */
                        if ( this_ukt->curr_task->state != TSK_ASYNWAIT_READ &&
                             this_ukt->curr_task->state != TSK_ASYNWAIT_WRITE ) continue ;
                    }

                    /*
                           *  RESUME Tracewriter
                           */
                          if ( this_ukt->curr_quu->req_type == REQ_RESUME_TW_EO00 )
                          {
                                DBG1 (( MF__,"REQ_RESUME_TW, this_ukt 0x%x, quu->ukt 0x%x \n",
                                          this_ukt , this_ukt->curr_task->ukt ));

                        RELEASE_FREELIST_ELEM (this_ukt->curr_quu);

                        /* --- tracewriter suspended and not already triggered? */
                        if ( KGS->tw->state == TSK_VSUSPEND && Trace_Flush (KGS->tw->index))
                        {
                                      DBG1 (( MF__,"REQ_RESUME_TW, this_ukt 0x%x, quu->ukt 0x%x \n",
                                                this_ukt , this_ukt->curr_task->ukt ));

                            /* --- resume tracewriter by 'REQ_VRESUME' and 'qa' */
                            this_ukt->curr_quu              = &KGS->tw->qa;
                            this_ukt->curr_quu->req_type    = REQ_VRESUME;
                            KGS->tw->resume_count = 0; /* This is done instead of increment the counter by one as */
                                                       /* vresume would do */
                            ++KGS->tw->totalResumerCount;
                        }
                        else
                        {
                            this_ukt->curr_task = 0;
                            continue ;
                        }
                    }

                          DBG1 (( MF__,"IOC task    T%d  request %d \n", 
                                      this_ukt->curr_task->index ,  
                                      this_ukt->curr_quu->req_type ));
                    ENQU_RUNQUEUE(this_ukt->curr_task, NULL, 
                            this_ukt->curr_quu,  BASE_PRIO_IOC) ;
#if !defined(OLD_SPINLOCK_IOC_QUEUE) && defined(RTESYS_HAS_NATIVE_ATOMIC_OPERATION)
                }
            }
#endif
        }
        /* IOC queue */

        /******************************************************************/
        /* search COM queue, add elemets to RUN_QUEU                      */
        /******************************************************************/
        new_timeout = MAXTIMEVALUE ;
        /* PTS 1110829 adopted to NT behaviour */
        if ( BASE_PRIO_COM > 0 || this_ukt->RunQueueLen <= 1 )
        {
            en71_findcom ( this_ukt, &new_timeout );
        }

        if ( KGS->state >= SERVER_SHUTDOWNREINIT ) 
        {
            MSGD (( INFO_DISP_SHUTKILL_STATE1, KGS->state ));
            en71_shutdownKill ( this_ukt );
        }

        /******************************************************************/
        /* search RUN queue, search for higest prio Task                  */
        /******************************************************************/
        if ( this_ukt->RunQueueLen > 0 )
        { 
            tsp00_Int4        idx, runIndex = 0;
            struct TASK_TYPE* highestPrioTask;
            struct TASK_TYPE* tempTaskCtrl;
            struct TASK_TYPE* moveTask = NULL;
            struct RUN_QUEUE* runQueue               = this_ukt->RunQueue;
            tsp00_Int4        runQueueLen            = this_ukt->RunQueueLen;

            if (( this_ukt->moveTaskToUKT != NULL ) && ( this_ukt->numOfMovableTasks != 0 ))
            {
                SAPDB_UInt8  maxRunnableMicroSeconds = 0;
                SAPDB_UInt2  prevTimeVar             = PREV_TIME_VAR(this_ukt->currTimeVar);
                SAPDB_UInt8  taskMaxRunnableMicroSeconds;

                for ( idx = 0; idx < runQueueLen; idx++ )
                {
                    tempTaskCtrl                = runQueue[idx].Task;
                    taskMaxRunnableMicroSeconds = tempTaskCtrl->maxRunnableMicroSeconds[prevTimeVar];  

                    if (( tempTaskCtrl->moveLock.val == 0 ) && ( tempTaskCtrl != lastRunningTaskCtrl ))
                    {
                        if ( this_ukt->stopWaitForBestFitTaskAt > KGS->current_time )
                        { /* we are searching for the UKTs task with maximum runnable time */
                            if ( taskMaxRunnableMicroSeconds == this_ukt->maxRunnableMicroSeconds[prevTimeVar] )
                            {
                                moveTask = tempTaskCtrl;
                                break;
                            }
                        }
                        else if (( maxRunnableMicroSeconds  
                                 < taskMaxRunnableMicroSeconds ) &&
                                 ( this_ukt->moveTaskToUKT->maxRunnableMicroSeconds[prevTimeVar] 
                                 < taskMaxRunnableMicroSeconds ))
                        {
                            moveTask                = tempTaskCtrl;
                            maxRunnableMicroSeconds = taskMaxRunnableMicroSeconds;
                        }
                    }
                }
            }

            for ( idx = 0, this_ukt->SelfDispatch = 0, highestPrioTask = runQueue[0].Task; 
                  idx < runQueueLen; 
                  idx++ ) 
            {
                struct DOUBLY_LINKED*    tmpRequest;

                tempTaskCtrl = runQueue[idx].Task;
                tmpRequest   = runQueue[idx].Req;

                if ( tempTaskCtrl == moveTask )
                {
                    tempTaskCtrl->is_in_queue = TASK_IS_IN_NON_QUEUE;
                    tempTaskCtrl->ulDynPrio   = 0; 

                    en71MoveTask ( tempTaskCtrl, tmpRequest );

                    if ( (this_ukt->RunQueueLen = --runQueueLen) != 0 )
                    {
                        if ( runQueueLen > idx )
                        {
                            if ( idx == 0 )
                                highestPrioTask = runQueue[runQueueLen].Task;

                            runQueue[idx--] = runQueue [runQueueLen];
                        }
                        else
                        {
                            tempTaskCtrl = runQueue[--idx].Task;
                            tmpRequest   = runQueue[idx].Req;
                        }
                        continue;
                    }
                    else
                    {   
                        highestPrioTask = NULL;
                        break; /* no more entries in run queue */
                    }
                }

                tempTaskCtrl->ulDynPrio++;

                if ( tempTaskCtrl->prio_flag ) 
                    this_ukt->SelfDispatch++;

                if ( REQ_RESCHEDULE_EO00 == runQueue[idx].Req->req_type ) /* Yield request? */
                    tempTaskCtrl->yieldRequestAtPos = idx;

                if ( highestPrioTask->ulDynPrio < tempTaskCtrl->ulDynPrio )
                { 
                    highestPrioTask = tempTaskCtrl;
                    runIndex        = idx;
                }
            }

            if ( highestPrioTask )
            {
                if ( -1 != highestPrioTask->yieldRequestAtPos )
                {   
                    /* Swap run queue index because yield has to be served first */
                    runIndex = highestPrioTask->yieldRequestAtPos;
                    highestPrioTask->yieldRequestAtPos = -1;
                }

                highestPrioTask->is_in_queue = TASK_IS_IN_NON_QUEUE;
                highestPrioTask->ulDynPrio   = 0; 

                if ( highestPrioTask->prio_flag ) this_ukt->SelfDispatch--;

                if ( pTaskCtrl 
                  && pTaskCtrl != highestPrioTask
                  && pTaskCtrl->YieldTask )
                {
                    /* we will spend some unexpected time in the run queue as runnable... */
                    /* to allow dispatcher to move us later we reenable timing collection temporary */
                    /* We also have to repeat the dispatcherEnterTime measurement which we skipped before... */
                    pTaskCtrl->YieldTask = false;
                    en71_DoEnterDispatcherTiming(this_ukt, pTaskCtrl);
                }

                this_ukt->curr_task = highestPrioTask ;
                this_ukt->curr_quu  = runQueue[runIndex].Req ; 

                if ( --this_ukt->RunQueueLen )
                    runQueue[runIndex] = runQueue[this_ukt->RunQueueLen] ; 


                PRINT_RUN_TASK ;
                this_ukt->state = KT_RUNNING ;

                /* if we rescheduled ourself(task yield), time collection may be suppressed */
                en71_DoLeaveDispatcherTiming(this_ukt);
                this_ukt->curr_task->YieldTask = false;
#ifdef DEBUG_RTE
                sp77sprintf(eBuffer, sizeof(eBuffer),
                            "LEAVE en71Dispatcher UKT%d curr_task T%d", 
                           this_ukt->index, 
                           (this_ukt->curr_task ? this_ukt->curr_task->index : 0) );
                MSGD(( INFO_TEST_WILDCARD, eBuffer ));
#endif
                /* Wild running kernel... Do not leave dispatcher if not trace writer... */
                if ( KGS->state >= SERVER_SHUTDOWNREINIT ) 
                {
                    MSGD (( INFO_DISP_SHUTKILL_STATE1, KGS->state ));
                    en71_shutdownKill (this_ukt);
                }

                this_ukt->m_IsWaitingForNextScheduledTask = false; /* allow spinlocks to yield again */
                return ;
            }
        }

        if ( !loop_count 
          && this_ukt->fSemAlreadyPosted == FALSE )
        {
            loop_count = DISPATCHER_LOOPS ;

            this_ukt->ukp_timeout = new_timeout ;
            DBG1 (( MF__,"SLEEP, timeout %d seconds \n", (int) (new_timeout - KGS->current_time) ));

WAIT_AGAIN :

#ifdef DEBUG_SLEEP_STATE
            en71_debugSleepState ( this_ukt );
#endif
            this_ukt->state = KT_SLEEPING;

            this_ukt->SleepingStateEnterMicroSeconds = RTESys_MicroSecTimer();

            if ( this_ukt->RunningStateEnterMicroSeconds )      /* if it is set, it marks the time after wakeup below... */
            {
                this_ukt->UKTRunningStat.ulCount++;
                this_ukt->UKTRunningStat.ulAbsMicroSeconds += (this_ukt->SleepingStateEnterMicroSeconds - this_ukt->RunningStateEnterMicroSeconds) ;
            }

            rc = semop ( this_ukt->semid , &semrcv , 1 );
 
            this_ukt->RunningStateEnterMicroSeconds                   = RTESys_MicroSecTimer();;
            this_ukt->UKTSleepTimeMicroSeconds[this_ukt->currTimeVar] =
                this_ukt->RunningStateEnterMicroSeconds - this_ukt->SleepingStateEnterMicroSeconds;

            /* UKTIOWaitStat.ulAbsMicroSeconds only counts the micro seconds spended waiting for DeviIO */
            if ( this_ukt->wait_for_ioproc_io )
            {
                this_ukt->UKTIOWaitStat.ulCount++;
                this_ukt->UKTIOWaitStat.ulAbsMicroSeconds += (this_ukt->RunningStateEnterMicroSeconds - this_ukt->SleepingStateEnterMicroSeconds) ;
            }

            if ( rc )
            {
                if ( errno == EINTR )
                { 
                    if ( this_ukt->aio.aio_in_progress <= 0 )
                    {
#ifndef LINUX
                        MSGD (( INFO_DISP_IGN_SIGNAL )) ;
#endif
                        goto WAIT_AGAIN ;
                    }
                    else
                    {
                        DBG1 (( MF__,"AWOKE, perhaps aio_complete\n")) ;
                    }
                }
                else
                {
                    /* The UKT semaphore is unusable... We cannot wake up tracewriter
                       so we let him die... */
                    en81_NotifyCoordinator( KGS->tw->ukt == this_ukt );
                    if ( KGS->state < SERVER_SHUTDOWNREINIT )
                    {
                        MSGCD (( ERR_DISP_SEMOP_ERR, sqlerrs() ))
                    }
                    en71_StopUkt( THREAD_INDEX(this_ukt) );
                }
            }
            DBG1 (( MF__,"AWOKE \n" ));

            this_ukt->ukp_timeout = MAXTIMEVALUE ;
            this_ukt->state = KT_RUNNING;
        }
        this_ukt->fSemAlreadyPosted = FALSE;
    } /* for (;;) */
    /*NOTREACHED*/
}

/*
 * ===========================================================================
 */

#ifdef DEBUG_SLEEP_STATE
#undef  MF__
#define MF__ MOD__"en71_debugSleepState"
static  void    en71_debugSleepState ( ten50_UKT_Control *this_ukt )
{
    DBGIN;
    if ( this_ukt->wait_for_ioproc_io )
        DBG1(( MF__, "disp sleeps, %d tasks are waiting for io\n", 
                this_ukt->wait_for_ioproc_io ));

    if ( this_ukt->ActTasksVwait || this_ukt->ActTasksPrio )
    {
      struct  TASK_TYPE * tcb ;

      DBG1(( MF__, "disp sleeps, %d tasks are waiting, %d prios\n",
             this_ukt->ActTasksVwait , this_ukt->ActTasksPrio ));

        for ( tcb = this_ukt->pFirstTaskCtrl ; tcb ; tcb = tcb->next_task )
        {
            if ( tcb->state == TSK_VWAIT )
            { 
                if ( tcb->SetPrioToTask )
                    DBG1(( MF__, "T%d is waiting for T%d (%s) prio %d\n",
                      tcb->index, tcb->SetPrioToTask , 
                      e38_tskstate_name((KGS->pFirstTaskCtrl + tcb->SetPrioToTask -1)->state),
                      (KGS->pFirstTaskCtrl + tcb->SetPrioToTask -1)->prio_flag ));
            }
            else
            {
                if ( tcb->prio_flag )
                  DBG1(( MF__, "T%d (%s) prio %d\n", tcb->index,
                        e38_tskstate_name(tcb->state), tcb->prio_flag )) ;
            }
        }
    }
    DBGOUT;
}
#endif

/*
 * ===========================================================================
 */

#undef  MF__
#define MF__ MOD__"en71_findcom"
static  void    en71_findcom ( ten50_UKT_Control *this_ukt,
                               time_t *new_timeout )
{
    struct TASK_TYPE            **stcb ;
    struct TASK_TYPE            *tcb ;

    DBGIN;
    DBG1 (( MF__,"searching in COM-Queue \n" ));
    for ( stcb = &this_ukt->com_queue ; *stcb ; )
    {
        tcb = *stcb ;
        if ( (tcb->state != TSK_VRECEIVE) 
          && (tcb->state != TSK_VRELEASE)
          && (tcb->state != TSK_CONNECTWAIT) )
        {
            /*
             *  Find the time when the next timeout expires.
             */
            if (  (tcb->task_timeout < *new_timeout)
               && (tcb->type != TT_UT) )
            {
                *new_timeout = tcb->task_timeout ;
                DBG1 (( MF__,"reducing timeout to %d \n", *new_timeout ));
            }
            stcb = & tcb->next_in_com ;
            continue;
        }

        if ( KGS->state < SERVER_SHUTDOWNREINIT )
        {
            if ( e75_has_com ( tcb ) )
            {
                REMOVE_TASK_FROM_COM_QUEU ( stcb , tcb );
                DBG1 (( MF__,"COM task  T%d \n", tcb->index ));
                if ( this_ukt->last_com_task == tcb ) this_ukt->last_com_task = NULL ;

                en71LongTermMoveLock( tcb, WAITING_FOR_NEW_COMMAND, false );
                ENQU_RUNQUEUE_NO_REQUEST(tcb, BASE_PRIO_COM) ;
            }
            else
            {
                /*
                 *  Find the time when the next timeout expires.
                 */
                if (  (tcb->task_timeout < *new_timeout)
                   && (tcb->type != TT_UT) )
              {
                *new_timeout = tcb->task_timeout ;
                DBG1 (( MF__,"reducing timeout to %d \n", *new_timeout ));
              }
              stcb = & tcb->next_in_com ;
           }
        }
        else
        {
            /*
             *  Never awake a task at system end.
             */
            REMOVE_TASK_FROM_COM_QUEU ( stcb , tcb );
        }
    }
}

/*
 * ===========================================================================
 */

static  void    en71_shutdownKill (ten50_UKT_Control *this_ukt)
{
#undef  MF__
#define MF__ MOD__"en71_shutdownKill"
struct TASK_TYPE            *tcb ;
    DBGIN;

    /*
     *  Release all tasks of this UKP.
     */
    e75_release_all_ukp_tasks ();

    /*
     *  Free all regions held by this process.
     */
    en71_freeUktOwnedRegions ( this_ukt);

    /*
     *  All processes not containing bufwriter
     *  should exit as immediate as possible.
     */
    if ( KGS->tw->ukt != this_ukt )
    {
        en81_NotifyCoordinator(0);
        en71_StopUkt( THREAD_INDEX(this_ukt) );
    }
    DBGOUT;
}

/*
 * ===========================================================================
 */

static void en71_freeUktOwnedRegions (ten50_UKT_Control *this_ukt )
{
#undef  MF__
#define MF__ MOD__"en71_freeUktOwnedRegions"
    int                               idx ;
    struct DDB4_SEMA                * sem ;
    struct TASK_TYPE                * tcb ;
    struct N_QUEUE                  * qp ;

    DBGIN;

    for ( idx = 0 ; idx < XPARAM(ulNoOfRegions) ; idx ++ )
    {
        sem = KGS->semaphore + idx ;

        tcb = (struct TASK_TYPE *)sem->owner ;

        if ( tcb == NULL || tcb->ukt != this_ukt ) continue ;
        if ( tcb == KGS->tw ) continue ;

        /* clear list and check for waiting tracewriter */
        WAIT_UNTIL_ALONE( sem->semaliste.sem_exclusive );
            sem->owner = 0;
            for ( qp = sem->semaliste.last ; qp && qp->taskid ; qp = qp->next )
            {
                /* the tracewriter is already waiting... */
                if ( qp->taskid == KGS->tw )
                {
                    sem->owner = KGS->tw; /* new owner tracewriter */
                }
                qp->taskid = 0;
                sem->semaliste.last = qp;
            }
        CLEARLOCK ( sem->semaliste.sem_exclusive );

        /* if tracewriter is new owner wake him up */
        if ( sem->owner )
        {
            /* fake an vendexcl by current semaphore owning task... */
            this_ukt->curr_task = tcb;

            if ( KGS->tw->ukt == this_ukt )
            {
                KGS->tw->qa.req_type = REQ_EXCLUSIVE ;
                e71_EnquRav ( KGS->tw, (tsp00_Bool)true );
            }
            else
            {
                en71EnqueueUkt2Ukt ( this_ukt , KGS->tw , tcb, e55_remactive, (void *)(idx+1) );
            }
        }
    }
    DBGOUT;
}

/*
 * ===========================================================================
 */

01230 void    en71_iocEnquAndWake ( 
ten50_UKT_Control               * ukt ,
struct TASK_TYPE                * tcb ,
struct DOUBLY_LINKED            * quu )
{
#undef  MF__
#define MF__ MOD__"en71_iocEnquAndWake"
    en71_ioc ( ukt, tcb, quu );
    e72_wake ( ukt );
}
/*
 * ===========================================================================
 */

01244 void    en71_ioc ( 
ten50_UKT_Control               * ukt ,
struct TASK_TYPE                * tcb ,
struct DOUBLY_LINKED            * quu )
{
#undef  MF__
#define MF__ MOD__"en71_ioc"

    DBGIN;
    /*
     *  Enter queue element in IOC queue
     *  this is the only enque-procedure that may be used by other
     *  processes.......................
     */
    DBG1 (( MF__,"called, task T%d   kt tid %ld \n",
            tcb->index , (long) ukt->tid ));
    DBG1 (( MF__,"request 0x%x  req_type %d \n",
                quu , quu->req_type ));

    quu->taskid = tcb ;

    e73_ioc_enqu( & ukt->ioc_queue , quu );
    tcb->QueStat.ulUKTQueCount++ ;
    tcb->is_in_queue = TASK_IS_IN_IOC_QUEUE ;

    DBGOUT;
}

/*
 * ===========================================================================
 */

void    en71_iocNoTask ( 
ten50_UKT_Control               * ukt ,
struct DOUBLY_LINKED            * quu )

{
#undef  MF__
#define MF__ MOD__"en71_iocNoTask"

    DBGIN;
    /*
     *  Enter queue element in IOC queue
     *  this is the only enque-procedure that may be used by other
     *  processes.......................
     */
    quu->taskid = NULL;
    e73_ioc_enqu( &ukt->ioc_queue , quu );

    DBGOUT;
}

/*
 * ===========================================================================
 */
 void v2prio ( tsp00_Int4 task_id ,
              UINT1 prio ,
              tsp00_Bool set_prio ,
              int root , 
              int leaf, 
              int locktype )
{
#undef  MF__
#define MF__ MOD__"v2prio"
  ten50_UKT_Control               * this_ukt = THIS_UKT_CTRL;
  DBGIN;
  DBG1 (( MF__,"T%d tries to prio Task T%d (%d/%d/%d)\n",
          this_ukt->curr_task->index , task_id, root, leaf, locktype ));
  if ( prio == 2 && set_prio )
    { 
      this_ukt->curr_task->lWaitForRoot = root ;
    }
  vprio ( task_id, prio, set_prio ) ;
  DBGOUT;
}

/*
 * ===========================================================================
 */
 void vprio ( tsp00_Int4 task_id ,
             UINT1 prio ,
             tsp00_Bool set_prio )
{
#undef  MF__
#define MF__ MOD__"vprio"
ten50_UKT_Control               * this_ukt = THIS_UKT_CTRL;
    struct TASK_TYPE *tcb;
    
    DBGIN;
    if ( ! XPARAM(lPrioFactor) )
        return;
    
    if ( set_prio ) this_ukt->curr_task->lWaitForTask = task_id ;
    
    tcb    = KGS->pFirstTaskCtrl + ( task_id - 1 ) ;
    if ( ( !tcb->prio_flag         && !set_prio ) ||
        ( tcb->prio_flag == prio  &&  set_prio ) )
        return ;
    
    if ( set_prio )
    { 
        if ( prio == 2 )
        { 
            tcb->save_prio = tcb->prio_flag ;
            DBG5 (( MF__, "Saving PRIO %d, NEW_PRIO is %d\n",
                tcb->prio_flag, prio ));
        }
        else
            if ( tcb->prio_flag == 2 )
            { 
                tcb->save_prio = prio ;
                DBG4 (( MF__, "Saving PRIO %d, ACT_PRIO is 2\n", prio ));
                return ;
            }
    }
    else               /* "set_prio" is FALSE, looking for "SavePrioFlag" */
    {
        if ( tcb->save_prio )
        { 
            tcb->prio_flag = tcb->save_prio;
            tcb->save_prio = 0 ;
            return ;
        }
    }       

    tcb->prio_flag = set_prio ? prio : 0 ;
    if ( set_prio )
    { 
        tcb->PrioStat.ulTotalCount++ ;
        tcb->ukt->ActTasksPrio++ ;
        if ( tcb != this_ukt->curr_task )
        {
            switch ( tcb->is_in_queue )
            { 
            case TASK_IS_IN_NON_QUEUE : 
                break ;
            case TASK_IS_IN_IOC_QUEUE :
                tcb->ukt->SelfDispatch++;
                break ;
            case TASK_IS_IN_RUN_QUEUE :
                tcb->ulDynPrio += prio * XPARAM(lPrioFactor) ;
                tcb->ukt->SelfDispatch++;
                break ;
            }
            if ( tcb->ukt != this_ukt ) 
                tcb->PrioStat.ulOtherUkt ++;
        }
    }
    else tcb->ukt->ActTasksPrio-- ;

    DBGOUT;
}

/*
 * ===========================================================================
 */

01401 void    e71_com ( struct TASK_TYPE * tcb )
{
#undef  MF__
#define MF__ MOD__"e71_com"
    ten50_UKT_Control * this_ukt;
    struct TASK_TYPE ** stcb ;

    DBGIN_T(tcb->index);
    /*
     *  Enter the task in the COM queue.
     *  The COM queue is a chained list of tcbs (not circular).
     *  Search the list to check whether the task is already in there.
     *  If not found, append it to the end.
     */
#ifdef SAPDB_SLOW
    this_ukt = THIS_UKT_CTRL;
    if ( tcb->ukt != this_ukt )
    {
        MSGCD (( ERR_WRONG_COM_ENQUE )) ;
        return ;
    }
#else
    this_ukt = tcb->ukt;
#endif

    this_ukt->act_com_elem++ ;

#ifdef FOR_LATER_OPTIMIZATION
    if ( this_ukt->last_com_task )
      stcb = & this_ukt->last_com_task->next_in_com ;
    else
#endif
    for ( stcb = &this_ukt->com_queue ;
          *stcb ;
          stcb = &(*stcb)->next_in_com )
    {
        if ( *stcb == tcb )
        {
            DBG1 (( MF__,"e71_com: was already present T%d \n", tcb->index ));
            return ;
        }
    }
    * stcb = tcb ;
    tcb->next_in_com = NULL ;
    this_ukt->last_com_task = tcb ;

    tcb->QueStat.ulCOMQueCount ++ ;
    tcb->command_count++ ; /* or count in ven53c / vreceive() */
    tcb->ukt->cmd_counter++ ; /* or count in ven53c / vreceive() */
    tcb->is_in_queue = TASK_IS_IN_COM_QUEUE ;

    DBGOUT;
}

/*
 * ===========================================================================
 */
 void e71_EnquRav ( 
  struct TASK_TYPE * tcb ,
  boolean            Prio )
{
#undef  MF__
#define MF__ MOD__"e71_EnquRav"
  ten50_UKT_Control * this_ukt;

  DBGIN;

  this_ukt = tcb->ukt;
  ENQU_RUNQUEUE(tcb,
                this_ukt->curr_task,
                &tcb->qa,
                BASE_PRIO_RAV) ;
  tcb->ulDynPrio += Prio * PRIO_FACTOR ;
  DBGOUT;
}

/*
 * ===========================================================================
 */

 void e71_EnquRex ( struct TASK_TYPE * tcb ,
                    boolean            Prio )
{
#undef  MF__
#define MF__ MOD__"e71_EnquRex"
  ten50_UKT_Control * this_ukt;

  DBGIN;

  this_ukt = tcb->ukt;
  ENQU_RUNQUEUE( tcb, 
                 this_ukt->curr_task, 
                 &tcb->qa,
                 BASE_PRIO_REX) ;
  tcb->ulDynPrio += Prio * PRIO_FACTOR ;
  DBGOUT;
}
/*
 * ===========================================================================
 */

01502 void e71_YieldTaskEnquRex ( ten50_UKT_Control * this_ukt,
                            tsp00_Bool          Prio )
{
#undef  MF__
#define MF__ MOD__"e71_YieldTaskEnquRex"
  struct TASK_TYPE *tcb = this_ukt->curr_task;

  DBGIN;

  tcb->YieldTask                                     = true;
  this_ukt->RunQueue[this_ukt->RunQueueLen].Task     = tcb;
  this_ukt->RunQueue[this_ukt->RunQueueLen].FromTask = tcb;
  this_ukt->RunQueue[this_ukt->RunQueueLen].Req      = &tcb->qa;
  this_ukt->RunQueueLen++;

  tcb->is_in_queue                                   = TASK_IS_IN_RUN_QUEUE ;
  tcb->ulDynPrio                                     = BASE_PRIO_REX + ( (tcb->prio_flag+Prio) * PRIO_FACTOR);

  DBGOUT;
}
/*
 * ===========================================================================
 */

01526 void    en71EnqueueUkt2Ukt ( 
ten50_UKT_Control               * this_ukt ,
struct TASK_TYPE                * tcb ,
struct TASK_TYPE                * fromtcb ,
VOIDFUNC_PTR                    func ,
void                            *func_param )
{
#undef  MF__
#define MF__ MOD__"en71EnqueueUkt2Ukt"
    struct XXX_QUEUE_HEAD * xxxqh ;
    struct XXX_QUEUE      * xxxq ;
    ten50_UKT_Control     * ukt  = tcb->ukt;


    DBGIN_T(this_ukt->curr_task->index);
    DBG1 (( MF__,"wakes T%d \n", tcb->index ));

    xxxqh             = ukt->xxx [ this_ukt->index - 1 ];

#ifndef SAPDB_NO_SPINLOCK_FOR_UKT2UKT
    WAIT_UNTIL_ALONE ( xxxqh->exclusive );
    /*
     * we are alone now
     */
    xxxq              = xxxqh->xx_request ;
    xxxq->to_execute  = func ;
    xxxq->func_param  = func_param ;
    xxxq->FromTask    = fromtcb;
    xxxq->task        = tcb;
    xxxqh->xx_request = xxxq->next ;

    CLEARLOCK ( xxxqh->exclusive );
#else
    while ( xxxqh->xx_request->to_execute != NULL )
    {
         /* PTS 1000746 */
        xxxqh->num_overflows++ ;
    }
    xxxq              = xxxqh->xx_request ;
    xxxq->to_execute  = func ;
    xxxq->func_param  = func_param ;
    xxxq->FromTask    = fromtcb;
    xxxq->task        = MEM_SYNC ( tcb );
    xxxqh->xx_request = xxxq->next ;
#endif

    if ( tcb->prio_flag && XPARAM(fDynDispQueSrch) )
      ukt->SelfDispatch ++ ; 
    tcb->QueStat.ulUToUQueCount++ ;

    e72_wake ( ukt );
    DBGOUT_T(this_ukt->curr_task->index );
}

#ifndef SAPDB_NO_SPINLOCK_FOR_UKT2UKT
/*
  Function: en71_xxxDequeue
  Description: Dequeue from given XXX queue using spinlock protected code
  Argument: queueHead [in] Pointer to queue head
            copyOfItemToDequeue [out] filled with content of item to dequeue
  Return value: false if nothing to dequeue, true else
 */
static int
en71_xxxDequeue( struct XXX_QUEUE_HEAD * queueHead,
                 struct XXX_QUEUE      * copyOfItemToDequeue )
{
    int foundItem     = 0;
    struct XXX_QUEUE * itemToDequeue;

    WAIT_UNTIL_ALONE ( queueHead->exclusive );

    /*
     * we are alone now
     */
    itemToDequeue = queueHead->xx_work;
    if ( 0 != itemToDequeue && 0 != itemToDequeue->task )
    {
        foundItem = 1;
        memcpy(copyOfItemToDequeue, itemToDequeue, sizeof(struct XXX_QUEUE));
        itemToDequeue->task = 0;
        itemToDequeue->to_execute = 0;
        queueHead->xx_work = itemToDequeue->next;
    }

    CLEARLOCK ( queueHead->exclusive );

    return ( foundItem );
}
#endif

/*
 * =====================================================================
 */

void vdelay_commit ( RTE_TaskId pid )
{
#undef  MF__
#define MF__ MOD__"vdelay_commit"

  DBGIN;
  if ( XPARAM(fDelayCommit) )
    vsleep ( pid, 0 );
  DBGOUT;
}

void vdelay_lw ( RTE_TaskId pid )
{
#undef  MF__
#define MF__ MOD__"vdelay_lw"
    ten50_UKT_Control * this_ukt = THIS_UKT_CTRL;
    
    DBGIN;
    if ( this_ukt->delay_active 
      || (XPARAM(lDelayLW) && ( this_ukt->act_com_elem + this_ukt->act_rex_elem > 4 )) )
    { 
        struct DOUBLY_LINKED            * lquu ;
        
        this_ukt->curr_task->state = TSK_VSLEEP ;
        this_ukt->curr_task->TaskStateStat.VsleepStat.ulCount++;
        lquu = & this_ukt->curr_task->qa ;
        lquu->req_type = REQ_VSLEEP ;
        lquu->taskid   = this_ukt->curr_task ;
        
        DBG1 (( MF__,"before : act_rex_elem = %d, act_com_elem = %d\n",
            this_ukt->act_rex_elem, this_ukt->act_com_elem )) ;
        
        if ( !this_ukt->delay_active )
        {
            this_ukt->end_switch_cnt = XPARAM(lDelayLW) > 1
                                            ? XPARAM(lDelayLW)
                                            : this_ukt->activeTasks > DEF_KERNEL_TASKS
                                                ? this_ukt->activeTasks - DEF_KERNEL_TASKS
                                                : this_ukt->activeTasks ;
            
            if ( this_ukt->act_com_elem > 10 ) 
            {
                this_ukt->delay_prio = this_ukt->curr_task->prio_flag = 1 ;/* first in rex */
            }
            else 
            {
                this_ukt->delay_prio = this_ukt->curr_task->prio_flag = 
                    this_ukt->act_rex_elem > 10 ? 10 : 0 ;
            }
        }
        else
        {
            this_ukt->curr_task->prio_flag = 
                ( this_ukt->delay_prio > 1 
                && this_ukt->act_rex_elem < this_ukt->delay_prio ) 
                    ? 0 : this_ukt->delay_prio ;
        }
        
        e71_YieldTaskEnquRex ( this_ukt, this_ukt->curr_task->prio_flag );
        this_ukt->curr_task->prio_flag = 0 ;
        this_ukt->delay_active++ ;
        GOTO_DISP(&this_ukt);
        this_ukt->delay_active-- ;
        DBG1 (( MF__,"after  : act_rex_elem = %d, act_com_elem = %d\n",
            this_ukt->act_rex_elem, this_ukt->act_com_elem )) ;
        if ( !this_ukt->delay_active )
        { 
            this_ukt->end_switch_cnt = 0 ;
        }
    }
    DBGOUT;
}

/*
 * =====================================================================
 */

01697 time_t en71LoadBalancing()
  {
#undef  MF__
#define MF__ MOD__"en71LoadBalancing"

  static SAPDB_Bool    measInterval = false;
  tsp00_Longuint       sleepTime = XPARAM(uLoadBalancingCheck);

  if ( KGS->state == SERVER_WARM )
    {
    if ( KGS->taskMoveMeasIntervals < 2 )
      { /* we don't have enough statistic data! */
      en71SendMoveMeassIntRequest ();  
      sleepTime = XPARAM(uLoadBalancingCheck);
      }
    else if ( measInterval == true ) 
      {
      en71SendMoveMeassIntRequest();

      if ( KGS->taskMoved ) 
        { /* A task has been moved during last measurement interval. Interval */
          /* statistics will be ignored. Don't make a move check next time. */
        KGS->taskMoved = false;
        sleepTime      = XPARAM(uLoadBalancingCheck);
        }
      else
        {
        measInterval  = false; /* Next time we do a move check. */
        sleepTime     = XPARAM(uLoadBalancingCheck) / 2;
        }
      }
    else
      {
      ten50_UKT_Control *srcUKT, *dstUKT;

      if (en71TaskMoveCheck( &srcUKT, &dstUKT ))
      {
        en71SendMoveRequest ( srcUKT, dstUKT, XPARAM(uLoadBalancingCheck) / 2 + KGS->current_time );

        sleepTime = XPARAM(uLoadBalancingCheck);
      }
      else
        sleepTime = XPARAM(uLoadBalancingCheck) - (XPARAM(uLoadBalancingCheck) / 2);

      measInterval = true;   /* Next time we start a new measurement interval. */
      }
    }

  return sleepTime;
  }

/*
 * =====================================================================
 */

01752 void en71TempMoveLock( struct TASK_TYPE   *tcb,
                       SAPDB_UInt2        cnt,
                       SAPDB_Bool         setLock )
{
#undef  MF__
#define MF__ MOD__"en71TempMoveLock"
    if (setLock)
    {
        if ( tcb->moveLock.val == 0 )
            tcb->ukt->numOfMovableTasks--;

        tcb->moveLock.type.temporary += cnt;
    }
    else if ( tcb->moveLock.type.temporary != 0 )
    {
#if defined (CHECK_MOVELOCK_UNDERRUN)
        if (tcb->moveLock.type.temporary < cnt )
             vabort ( WRITE_CORE ) ;
#endif
        tcb->moveLock.type.temporary -= cnt;

        if ( tcb->moveLock.val == 0 )
            tcb->ukt->numOfMovableTasks++;
    }
}

/*
 * =====================================================================
 */

01782 void en71LongTermMoveLock( struct TASK_TYPE   *tcb,
                           SAPDB_UInt2        lockType,
                           SAPDB_Bool         setLock )
{
#undef  MF__
#define MF__ MOD__"en71LongTermMoveLock"

    if (setLock)
    {
        if ( tcb->moveLock.val == 0 )
            tcb->ukt->numOfMovableTasks--;

        tcb->moveLock.type.longTerm |= lockType;
    }
    else if (( tcb->moveLock.type.longTerm & lockType ) != 0 )
    {
        tcb->moveLock.type.longTerm &= ~lockType;

        if ( tcb->moveLock.val == 0 )
            tcb->ukt->numOfMovableTasks++;
    }
}

/*
 * =====================================================================
 */

01809 void  en71AlterTaskCnt( struct TASK_TYPE   *tcb,
                        SAPDB_UInt2        alterType )
{
#undef  MF__
#define MF__ MOD__"en71AlterTaskCnt"
    ten50_UKT_Control   *this_ukt = tcb->ukt;
    SAPDB_Int2          alterVal  = COUNTER_SUB & alterType ? -1 : 1;

    if ( alterType & TOTAL_COUNTER )
    {
        /* alter total counter  */
        switch ( tcb->type )
        {
        case TT_EV:
            this_ukt->numConnectableTasks += alterVal;
            this_ukt->numEventTasks       += alterVal;
            break;
        case TT_UT:
            this_ukt->numConnectableTasks += alterVal;
            this_ukt->numUtilityTasks     += alterVal;
            break;
        case TT_US:
            this_ukt->numConnectableTasks += alterVal;
            this_ukt->numUserTasks        += alterVal;
            break;
        case TT_SV:
            this_ukt->numServTasks        += alterVal;
            break;
        }
    }

    if ( alterType & ACTIVE_COUNTER )
    {
        if ( tcb->moveLock.val == 0 )
            this_ukt->numOfMovableTasks += alterVal;

        this_ukt->activeTasks += alterVal;

        switch ( tcb->type )
        {
        case TT_EV:
            this_ukt->activeEventTasks    += alterVal;
            break;
        case TT_UT:
            this_ukt->activeUtilityTasks  += alterVal;
            break;
        case TT_US:
            this_ukt->activeUserTasks     += alterVal;
            break;
        case TT_SV:
            this_ukt->activeServTasks     += alterVal;
            break;
        }
    }
}

/*
 * =====================================================================
 */

static void en71SendMoveMeassIntRequest()
{
#undef  MF__
#define MF__ MOD__"en71SendMoveMeassIntRequest"
  ten50_UKT_Control*                   tmpUKT;
  struct DOUBLY_LINKED*                request;
  struct REQ_PARMS_MOVE_TASK_MESS_INT  *reqArg;

  for ( tmpUKT = KGS->first_kp; tmpUKT <= KGS->last_kp; tmpUKT++ )
  {
    /* - This check is needed, because of an non responding UKT leeds to an empty 
         queue free element list ( e73_dl_dequ ) */
    if (( tmpUKT->dispCounterDuringMesIntReq == tmpUKT->disp_counter ) &&
        ( tmpUKT->state                      != KT_SLEEPING ))
        return;
  }

  KGS->currTimeVar                                  = NEXT_TIME_VAR(KGS->currTimeVar);
  KGS->taskMoveIntervalStartTimes[KGS->currTimeVar] = KGS->current_time;
  KGS->taskMoveMeasIntervals++;

  for ( tmpUKT =  KGS->first_kp; tmpUKT <= KGS->last_kp; tmpUKT++ )
  {
      /* send new measurement interval to UKT */
      request                            = e73_dl_dequ(&KGS->freelist);
      request->req_type                  = REQ_NEW_MOVE_TASK_MEAS_INTERVAL_EO00;
      reqArg                             = &request->args.mtmi_parms;
      reqArg->timeVar                    = KGS->currTimeVar;
      reqArg->taskMoveIntervalStartTime  = KGS->taskMoveIntervalStartTimes[KGS->currTimeVar];

      /* --- store to use during next interval request */
      tmpUKT->dispCounterDuringMesIntReq = tmpUKT->disp_counter;

      en71_iocNoTask ( tmpUKT, request );
      e72_wake ( tmpUKT );
  }
}

/*
 * =====================================================================
 */

static void en71SendMoveRequest ( ten50_UKT_Control*   srcUKT,
                                  ten50_UKT_Control*   dstUKT,
                                  SAPDB_UInt8          stopWaitForBestFitTaskAt )
{
#undef  MF__
#define MF__ MOD__"en71SendMoveRequest"
    
    struct DOUBLY_LINKED*                request;
    struct REQ_PARMS_MOVE_TASK           *reqArg;
    
  /* - This check is needed, because of an non responding UKT leeds to an empty 
       queue free element list ( e73_dl_dequ ) */
    if (( srcUKT->dispCounterDuringSendMoveReq == srcUKT->disp_counter ) &&
        ( srcUKT->state                        != KT_SLEEPING ))
      return;
    

    /* send move task */
    request                            = e73_dl_dequ(&KGS-> freelist);
    request->req_type                  = REQ_MOVE_TASK_EO00;
    reqArg                             = &request->args.mt_parms;
    reqArg->moveTaskToUKT              = dstUKT;
    reqArg->stopWaitForBestFitTaskAt   = stopWaitForBestFitTaskAt;
    reqArg->taskMoveIntervalStartTime  = KGS->taskMoveIntervalStartTimes[KGS->currTimeVar];
    
    /* --- store to use during next request */
    srcUKT->dispCounterDuringSendMoveReq = srcUKT->disp_counter;

    en71_iocNoTask ( srcUKT, request );
    e72_wake ( srcUKT );
}


/*
 * =====================================================================
 */

static void en71MoveTask ( struct TASK_TYPE*       tcb,
                           struct DOUBLY_LINKED*   request )
{
#undef  MF__
#define MF__ MOD__"en71MoveTask"
  ten50_UKT_Control*   ukt = tcb->ukt;
  SAPDB_UInt2          entry;
  SAPDB_UInt2          timeVar;

  /* alter move task static entry */
  WAIT_UNTIL_ALONE ( KGS->lastTaskMoves.exclusive );
  entry                        = KGS->lastTaskMoves.currEntry;
  KGS->lastTaskMoves.currEntry = NEXT_ENTRY( entry, NUM_OF_TASK_MOVE_STAT_RECS);
  CLEARLOCK ( KGS->lastTaskMoves.exclusive );

  timeVar = PREV_TIME_VAR(ukt->currTimeVar);

  KGS->lastTaskMoves.entries[entry].moveTime                    = KGS->current_time;
  KGS->lastTaskMoves.entries[entry].task                        = tcb;
  KGS->lastTaskMoves.entries[entry].taskMaxRunnableMicroSeconds = tcb->maxRunnableMicroSeconds[timeVar];
  KGS->lastTaskMoves.entries[entry].srcUKT                      = ukt;
  KGS->lastTaskMoves.entries[entry].srcMaxRunnableMicroSeconds  = ukt->maxRunnableMicroSeconds[timeVar];
  KGS->lastTaskMoves.entries[entry].dstUKT                      = ukt->moveTaskToUKT;
  KGS->lastTaskMoves.entries[entry].dstMaxRunnableMicroSeconds  = ukt->moveTaskToUKT->maxRunnableMicroSeconds[timeVar];

  /* remove this task from task list */
  if ( tcb->next_task ) tcb->next_task->prev_task = tcb->prev_task;
  if ( tcb->prev_task ) tcb->prev_task->next_task = tcb->next_task;
  else                  ukt->pFirstTaskCtrl       = tcb->next_task;

  tcb->next_task = NULL;
  tcb->prev_task = NULL;

  KGS->taskMoved = true;

  MSGD(( INFO_TASK_MOVE_REQUEST_TO, tcb->index, tcb->ukt->index,
         ukt->moveTaskToUKT->index ));

  en71AlterTaskCnt( tcb, SUB_ALL_COUNTER );

  tcb->ukt                      = ukt->moveTaskToUKT;
  ukt->moveTaskToUKT            = NULL;
  ukt->stopWaitForBestFitTaskAt = 0;

  request->req_type_org = request->req_type;
  request->req_type     = REQ_INSERT_MOVING_TASK_E000;

  en71_iocEnquAndWake ( tcb->ukt, tcb, request );
}


/*
 * =====================================================================
 */

static SAPDB_Bool en71TaskMoveCheck( ten50_UKT_Control**   srcUKT,
                                     ten50_UKT_Control**   dstUKT )
{
#undef  MF__
#define MF__ MOD__"en71TaskMoveCheck"
  ten50_UKT_Control*              tmpUKT;
  SAPDB_UInt8                     microSecondsCompare;
  SAPDB_UInt2                     timeVar, prevTimeVar;

  timeVar     = KGS->currTimeVar;
  prevTimeVar = PREV_TIME_VAR(timeVar);

  for ( *srcUKT =  NULL, microSecondsCompare = 0,
        tmpUKT  =  KGS->first_kp;
        tmpUKT  <= KGS->last_kp;
        tmpUKT++ )
  {
    if ( tmpUKT->maxRunnableMicroSeconds[prevTimeVar] == 0 ) /* This task has no problem! */
        continue;

    /* the UKT has not changed its time counter variable for a long time, 
       might be running in a tight loop */
    if ((tmpUKT->taskMoveIntervalStartTime != KGS->taskMoveIntervalStartTimes[prevTimeVar]) &&
        (tmpUKT->taskMoveIntervalStartTime != KGS->taskMoveIntervalStartTimes[timeVar]))
      {
        continue;
      }

    /* Is there at least one movable task and are threre two or more active tasks inside the UKT */
    if (( tmpUKT->activeTasks > 1 ) && ( tmpUKT->numOfMovableTasks != 0 )) 
    {
      /* The time between the following values is treated as equal if the
         difference is within a 'XPARAM(uLoadBalancingEquality)' percent range. */
      switch ( TASK_MOVE_TIME_COMP( microSecondsCompare, 
                                    tmpUKT->maxRunnableMicroSeconds[prevTimeVar], 
                                    XPARAM(uLoadBalancingEquality )))
      {
        case TASK_MOVE_TIME_COMP_GREATER:
          *srcUKT           = tmpUKT;
          microSecondsCompare = (*srcUKT)->maxRunnableMicroSeconds[prevTimeVar];
          break;
        case TASK_MOVE_TIME_COMP_EQUAL:
          if (( *srcUKT == NULL ) ||( tmpUKT->runnableMicroSeconds[prevTimeVar] > (*srcUKT)->runnableMicroSeconds[prevTimeVar] ))
          {
            *srcUKT           = tmpUKT;
            microSecondsCompare = (*srcUKT)->maxRunnableMicroSeconds[prevTimeVar];
          }
        break;
      }
    }
  }

  if ( (*srcUKT) == NULL )
    return false;

  for ( *dstUKT = NULL, microSecondsCompare = INFINIT_TIME_VAL,
        tmpUKT =  KGS->first_kp;
        tmpUKT <= KGS->last_kp;
        tmpUKT++ )
  {
    /* the UKT has not changed its time counter variable for a long time,
       might be running in a tight loop */
    if ((tmpUKT->taskMoveIntervalStartTime != KGS->taskMoveIntervalStartTimes[prevTimeVar]) &&
        (tmpUKT->taskMoveIntervalStartTime != KGS->taskMoveIntervalStartTimes[timeVar]))
    {
      continue;
    } 
        
      if (( tmpUKT != *srcUKT ) && ( tmpUKT->tasksAllowedToMoveTo == true ))
    {
      /* The time between the following UKTs is treated as equal if the
         difference is within a 'XPARAM(uLoadBalancingDiff)' percent range. */
      if ( TASK_MOVE_TIME_COMP( (*srcUKT)->maxRunnableMicroSeconds[prevTimeVar], 
                                tmpUKT->maxRunnableMicroSeconds[prevTimeVar], 
                                XPARAM(uLoadBalancingDiff)) == TASK_MOVE_TIME_COMP_LESS )
      {
        /* The time between the following values is treated as equal if the
           difference is within a 'XPARAM(uLoadBalancingEquality)' percent range. */
        switch ( TASK_MOVE_TIME_COMP( microSecondsCompare, 
                                      tmpUKT->maxRunnableMicroSeconds[prevTimeVar], 
                                      XPARAM(uLoadBalancingEquality )))
        {
          case TASK_MOVE_TIME_COMP_LESS:
            *dstUKT           = tmpUKT;
            microSecondsCompare = (*dstUKT)->maxRunnableMicroSeconds[prevTimeVar];
            break;
          case TASK_MOVE_TIME_COMP_EQUAL:
            if (( *dstUKT == NULL ) || ( tmpUKT->UKTSleepTimeMicroSeconds[prevTimeVar] > (*dstUKT)->UKTSleepTimeMicroSeconds[prevTimeVar] ))
            {
              *dstUKT           = tmpUKT;
              microSecondsCompare = (*dstUKT)->maxRunnableMicroSeconds[prevTimeVar];
            }
          break;
        }
      }
    }
  }

  if ( *dstUKT != NULL )
    return true; 

  return false;
}

volatile int en71_neverStop = 1;

/*
  Instead of terminating the UKT thread, the UKT ends in an endless loop
 */
static void en71_StopUkt(SAPDB_Int4 uktIndex)
{
    char UKTNameBuf [20] ;
    sp77sprintf(UKTNameBuf, sizeof(UKTNameBuf), "UKT%d", uktIndex );
    MSGCD (( INFO_PROCESS_STOPPED, UKTNameBuf ));
    while ( en71_neverStop )
    {
        sqlyieldthread();
    }
}
/* UKT statistic record is resetted independend from current setting of 'TimeEnabled' */
static void en71_HandleUKTStatisticReset( ten50_UKT_Control *this_ukt )
{
    if ( this_ukt->SleepStatRecResetPending )
    {
        memset(&this_ukt->UKTRunningStat, 0, sizeof(teo52_AbsoluteTime));
        memset(&this_ukt->UKTIOWaitStat, 0, sizeof(teo52_AbsoluteTime));
        this_ukt->RunningStateEnterMicroSeconds = 0;
#ifndef HAS_CONSOLE_PIPE
        {
            /* If we would have a console pipe, the internal MicroSecTimer */
            /* could be used, but if we use shared memory to collect information */
            /* the MicroSecondTimer cannot be used, since it is process local. To */
            /* provide propper adjustment time() is used, which is identical for all processes */
            struct timeval  tv ;

            (void) gettimeofday ( &tv , (void *)0 );

            this_ukt->SysIdleTimeMeasureBeginMicroSeconds = tv.tv_sec * ((SAPDB_UInt8)1000000) + tv.tv_usec;
        }
#endif
        this_ukt->IdleTimeMeasureBeginMicroseconds = RTESys_MicroSecTimer();
        this_ukt->SleepStatRecResetPending = false;
    }
    else if ( 0 == this_ukt->IdleTimeMeasureBeginMicroseconds )
    {
#ifndef HAS_CONSOLE_PIPE
        /* If we would have a console pipe, the internal MicroSecTimer */
        /* could be used, but if we use shared memory to collect information */
        /* the MicroSecondTimer cannot be used, since it is process local. To */
        /* provide propper adjustment KGS->current_time is used, which is identical for all processes */
        struct timeval  tv ;

        (void) gettimeofday ( &tv , (void *)0 );

        this_ukt->SysIdleTimeMeasureBeginMicroSeconds = tv.tv_sec * ((SAPDB_UInt8)1000000) + tv.tv_usec;
#endif
        this_ukt->IdleTimeMeasureBeginMicroseconds = RTESys_MicroSecTimer();
    }
}

/* Task statistic record is resetted independend from current setting of 'TimeEnabled' */
static void en71_HandleTaskStatisticReset( ten50_UKT_Control *this_ukt
                                         , struct TASK_TYPE  *pTaskCtrl )
{
    if ( EO52_NO_RESET_PENDING != pTaskCtrl->TaskStatRecResetPending )
    {
        memset( &(pTaskCtrl->TaskStateStat), 0, sizeof(teo52_TaskStateStatisticRec) );
        if ( EO52_RESET_TASK_FULL_PENDING == pTaskCtrl->TaskStatRecResetPending )
        {
            memset ( &pTaskCtrl->QueStat, 0, sizeof ( teo52_TaskQueueStatistic ) );
            pTaskCtrl->QueueStatRecResetPending = false;
            memset ( &pTaskCtrl->PrioStat, 0, sizeof ( teo52_TaskPrioStatistic ) );
            pTaskCtrl->disp_counter      = 1; /* we entered... */
            pTaskCtrl->command_count     = 0;
            pTaskCtrl->self_susp_cnt     = 0;
            pTaskCtrl->ulBegExclCalls    = 0;
        }
        pTaskCtrl->TaskStatRecResetPending = EO52_NO_RESET_PENDING;
    }

    if ( pTaskCtrl->QueueStatRecResetPending )
    {
        memset ( &pTaskCtrl->QueStat, 0, sizeof ( teo52_TaskQueueStatistic ) );
        pTaskCtrl->QueueStatRecResetPending = false;
    }

}

/* 
  >>>>>  Enter dispatcher timing >>>>>>>>
 */
static void en71_DoEnterDispatcherTiming( ten50_UKT_Control *this_ukt,
                                          struct TASK_TYPE  *pTaskCtrl )
{
    pTaskCtrl->TimeCollectionEnabled     = KGS->fTimeMeasure;
    pTaskCtrl->MoveTimeCollectionEnabled = XPARAM(uLoadBalancingCheck);

    if ( !pTaskCtrl->YieldTask 
     && ( pTaskCtrl->MoveTimeCollectionEnabled || pTaskCtrl->TimeCollectionEnabled ) )
    {
        SAPDB_UInt8 timeNow = RTESys_MicroSecTimer();

        /* 
          <<<<<<  Enter dispatcher timing <<<<<<<<
         */
        if ( TASK_IS_IN_RUN_QUEUE == pTaskCtrl->is_in_queue ) 
        {
            /* if self dispatching, reset runqueueEnqueueTime */
            /* to prevent negaitiv differenz to dispatcherEnterTime */
            pTaskCtrl->TaskStateStat.runqueueEnqueueTime = timeNow;
        }
        pTaskCtrl->TaskStateStat.dispatcherEnterTime = timeNow;
    }
}

/* 
  <<<<<<  Leave dispatcher timing <<<<<<<<
*/
static void en71_DoLeaveDispatcherTiming(ten50_UKT_Control *this_ukt)
{
    /* leaving is possible for current task only... */
    struct TASK_TYPE  *pTaskCtrl = this_ukt->curr_task;

    if ( !pTaskCtrl->YieldTask 
     && ( pTaskCtrl->MoveTimeCollectionEnabled || pTaskCtrl->TimeCollectionEnabled ) )
    {
        pTaskCtrl->TaskStateStat.dispatcherLeaveTime = RTESys_MicroSecTimer();

        if ( pTaskCtrl->TaskStateStat.dispatcherEnterTime > pTaskCtrl->TaskStateStat.dispatcherLeaveTime )
        {
            MSGD (( WRN_INTERNAL_MICRO_TIMER_FAILED, 
                "Enter", pTaskCtrl->TaskStateStat.dispatcherEnterTime, 
                "Leave", pTaskCtrl->TaskStateStat.dispatcherLeaveTime )); /* --- we should never return here */
            /* System clock might be changed, Correct this leap!! */
            pTaskCtrl->TaskStateStat.dispatcherEnterTime = pTaskCtrl->TaskStateStat.dispatcherLeaveTime;
        }

        if ( pTaskCtrl->TaskStateStat.runqueueEnqueueTime )
        {
            SAPDB_UInt8         runnableMicroSeconds;
            SAPDB_UInt2         currUKTTimeVar = this_ukt->currTimeVar;

            if ( pTaskCtrl->TaskStateStat.runqueueEnqueueTime > pTaskCtrl->TaskStateStat.dispatcherLeaveTime )
            {
                MSGD (( WRN_INTERNAL_MICRO_TIMER_FAILED,
                        "Enqueue", pTaskCtrl->TaskStateStat.runqueueEnqueueTime, 
                        "Leave", pTaskCtrl->TaskStateStat.dispatcherLeaveTime )); /* --- we should never return here */
                /* System clock might be changed, Correct this leap!! */
                pTaskCtrl->TaskStateStat.runqueueEnqueueTime = pTaskCtrl->TaskStateStat.dispatcherLeaveTime;
            }

            /* PTS 1118094 problem with several requests pending... */
            if ( pTaskCtrl->TaskStateStat.dispatcherEnterTime > pTaskCtrl->TaskStateStat.runqueueEnqueueTime )
            {
                pTaskCtrl->TaskStateStat.runqueueEnqueueTime = pTaskCtrl->TaskStateStat.dispatcherEnterTime;
            }

            runnableMicroSeconds = 
                pTaskCtrl->TaskStateStat.dispatcherLeaveTime  - pTaskCtrl->TaskStateStat.runqueueEnqueueTime;

            if ( pTaskCtrl->lastTimeVar != currUKTTimeVar )
            {   /* since the last runninig state the counter variable has changed! */
                /* set the Tasks runnable time values */
                pTaskCtrl->runnableMicroSeconds[currUKTTimeVar]    = runnableMicroSeconds;
                pTaskCtrl->maxRunnableMicroSeconds[currUKTTimeVar] = runnableMicroSeconds;
                pTaskCtrl->lastTimeVar                             = currUKTTimeVar;
            }
            else
            {
                /* accumulate the tasks time waititng in the run queue to be scheduled */
                pTaskCtrl->runnableMicroSeconds[currUKTTimeVar] += runnableMicroSeconds;

                /* Has the tasks a new runnable time maximum? */
                if ( pTaskCtrl->maxRunnableMicroSeconds[currUKTTimeVar] < runnableMicroSeconds )
                     pTaskCtrl->maxRunnableMicroSeconds[currUKTTimeVar] = runnableMicroSeconds;
            }

            /* set the UKTs runnable time values */
            this_ukt->runnableMicroSeconds[currUKTTimeVar] += runnableMicroSeconds;

            /* long term move lock set? */
            if ( pTaskCtrl->moveLock.type.longTerm == 0 ) 
            { /* Is there a new UKTs maximum of the tasks time staying in the run queue? */
                if ( this_ukt->maxRunnableMicroSeconds[currUKTTimeVar] < runnableMicroSeconds )
                    this_ukt->maxRunnableMicroSeconds[currUKTTimeVar] = runnableMicroSeconds;
            }
        }
    }
}

Generated by  Doxygen 1.6.0   Back to index