Changeset 100 in 3DVCSoftware for trunk/source/App


Ignore:
Timestamp:
9 Aug 2012, 12:53:16 (12 years ago)
Author:
tech
Message:

Adopted modifications:

  • disparity vector generation (A0097)
  • inter-view motion prediction modification (A0049)
  • simplification of disparity vector derivation (A0126)
  • region boundary chain coding (A0070)
  • residual skip intra (A0087)
  • VSO modification (A0033/A0093)

+ Clean ups + Bug fixes

Update of cfg files (A0033 modification 2)

Location:
trunk/source/App
Files:
11 edited

Legend:

Unmodified
Added
Removed
  • trunk/source/App/TAppDecoder/TAppDecCfg.cpp

    r56 r100  
    115115  size_t iAppendLength = strlen(pchStringToAppend);
    116116
    117   rpchOutputFileName = (Char*) malloc(iInLength+iAppendLength+1);                                                                                               
    118   Char* pCDot = strrchr(pchInputFileName,'.');                          
    119   pCDot = pCDot ? pCDot : pchInputFileName + iInLength;                         
     117  rpchOutputFileName = (Char*) malloc(iInLength+iAppendLength+1);                       
     118  Char* pCDot = strrchr(pchInputFileName,'.');        
     119  pCDot = pCDot ? pCDot : pchInputFileName + iInLength;       
    120120  size_t iCharsToDot = pCDot - pchInputFileName ;
    121   size_t iCharsToEnd = iInLength - iCharsToDot;                                
     121  size_t iCharsToEnd = iInLength - iCharsToDot;        
    122122  strncpy(rpchOutputFileName                            ,  pchInputFileName            , iCharsToDot  );
    123123  strncpy(rpchOutputFileName+ iCharsToDot               ,  pchStringToAppend           , iAppendLength);
    124   strncpy(rpchOutputFileName+ iCharsToDot+iAppendLength ,  pchInputFileName+iCharsToDot, iCharsToEnd  );                               
    125   rpchOutputFileName[iInLength+iAppendLength] = '\0';                          
     124  strncpy(rpchOutputFileName+ iCharsToDot+iAppendLength ,  pchInputFileName+iCharsToDot, iCharsToEnd  );       
     125  rpchOutputFileName[iInLength+iAppendLength] = '\0';        
    126126}
    127127
  • trunk/source/App/TAppDecoder/TAppDecTop.h

    r77 r100  
    8989  Void  decode            (); ///< main decoding function
    9090#if VIDYO_VPS_INTEGRATION
    91   Void  increaseNumberOfViews   (UInt layerId, UInt viewId, UInt isDepth);
     91  Void  increaseNumberOfViews  (UInt layerId, UInt viewId, UInt isDepth);
    9292#else
    93   Void  increaseNumberOfViews   (Int newNumberOfViewDepth);
     93  Void  increaseNumberOfViews  (Int newNumberOfViewDepth);
    9494#endif
    9595  TDecTop* getTDecTop     ( Int viewId, Bool isDepth );
  • trunk/source/App/TAppEncoder/TAppEncCfg.cpp

    r58 r100  
    7474{
    7575  m_aidQP = NULL;
     76#if FIXES
     77  m_aidQPdepth = NULL;
     78#endif
    7679}
    7780
     
    8285    delete[] m_aidQP; m_aidQP = NULL;
    8386  }
     87
     88#if FIXES
     89  if ( m_aidQPdepth )
     90  {
     91    delete[] m_aidQPdepth; m_aidQPdepth = NULL;
     92  }
     93#endif
     94
    8495  for(Int i = 0; i< m_pchInputFileList.size(); i++ )
    8596  {
     
    306317#if HHI_VSO
    307318  ("VSOConfig",                       m_pchVSOConfig            , (Char *) 0    , "VSO configuration")
    308     ("VSO",                             m_bUseVSO                 , false         , "Use VSO" )
    309     // GT: For development, will be removed later
     319  ("VSO",                             m_bUseVSO                 , false         , "Use VSO" )   
    310320  ("VSOMode",                         m_uiVSOMode               , (UInt)   4    , "VSO Mode")
     321  ("LambdaScaleVSO",                  m_dLambdaScaleVSO         , (Double) 1    , "Lambda Scaling for VSO")
     322
    311323#if HHI_VSO_LS_TABLE
    312   ("LambdaScaleVSO",                  m_dLambdaScaleVSO         , (Double) 1  , "Lambda Scaling for VSO")
    313 #else
    314   ("LambdaScaleVSO",                  m_dLambdaScaleVSO         , (Double) 0.5  , "Lambda Scaling for VSO")
    315 #endif
    316     ("ForceLambdaScaleVSO",             m_bForceLambdaScaleVSO    , false         , "Force using Lambda Scale VSO also in non-VSO-Mode")
     324  ("VSOLSTable",                      m_bVSOLSTable             , true          , "Depth QP dependent video/depth rate allocation by Lagrange multiplier" )   
     325#endif
     326
     327#if SAIT_VSO_EST_A0033
     328  ("UseEstimatedVSD",                 m_bUseEstimatedVSD        , true          , "Model based VSD estimation instead of rendering based for some encoder decisions" )     
     329#endif
     330#if LGE_VSO_EARLY_SKIP_A0093
     331  ("VSOEarlySkip",                    m_bVSOEarlySkip           , true          , "Early skip of VSO computation if synthesis error assumed to be zero" )     
     332#endif
     333  ("ForceLambdaScaleVSO",             m_bForceLambdaScaleVSO    , false         , "Force using Lambda Scale VSO also in non-VSO-Mode")
    317334#if HHI_VSO_DIST_INT
    318   ("AllowNegDist",                    m_bAllowNegDist           , true         , "Allow negative Distortion in VSO")
     335  ("AllowNegDist",                    m_bAllowNegDist           , true          , "Allow negative Distortion in VSO")
    319336#endif
    320337
     
    667684     0.753550, 0.800000 
    668685  };
    669   AOT( (m_aiQP[1] < 0) || (m_aiQP[1] > 51));
    670   m_dLambdaScaleVSO *= adLambdaScaleTable[m_aiQP[1]];
     686  if ( m_bVSOLSTable )
     687  {
     688    AOT( (m_aiQP[1] < 0) || (m_aiQP[1] > 51));
     689    m_dLambdaScaleVSO *= adLambdaScaleTable[m_aiQP[1]];
     690  }
    671691#endif
    672692#endif
     
    15621582    printf("VSO Negative Distortion      : %d\n",    m_bAllowNegDist ? 1 : 0);
    15631583#endif
     1584#if HHI_VSO_LS_TABLE
     1585    printf("VSO LS Table                 : %d\n",    m_bVSOLSTable ? 1 : 0);   
     1586#endif
     1587#if SAIT_VSO_EST_A0033
     1588    printf("VSO Estimated VSD            : %d\n",    m_bUseEstimatedVSD ? 1 : 0);       
     1589#endif
     1590#if LGE_VSO_EARLY_SKIP_A0093
     1591    printf("VSO Early Skip               : %d\n",    m_bVSOEarlySkip ? 1 : 0);   
     1592#endif
     1593   
    15641594  }
    15651595#endif
     
    16581688#if HHI_VSO
    16591689  printf("VSO:%d ", m_bUseVSO             );
    1660 #endif
     1690#endif 
    16611691#if HHI_DMM_WEDGE_INTRA || HHI_DMM_PRED_TEX
    16621692  printf("DMM:%d ", m_bUseDMM );
  • trunk/source/App/TAppEncoder/TAppEncCfg.h

    r56 r100  
    216216#endif
    217217#if HHI_INTER_VIEW_RESIDUAL_PRED
    218   UInt      m_uiMultiviewResPredMode;                         ///< using multiview residual prediction
     218  UInt      m_uiMultiviewResPredMode;          ///< using multiview residual prediction
    219219#endif
    220220
    221221#if FAST_DECISION_FOR_MRG_RD_COST
    222   Bool      m_useFastDecisionForMerge;                        ///< flag for using Fast Decision Merge RD-Cost
    223 #endif
    224   Bool      m_bUseCbfFastMode;                              ///< flag for using Cbf Fast PU Mode Decision
    225   Int       m_iSliceMode;           ///< 0: Disable all Recon slice limits, 1 : Maximum number of largest coding units per slice, 2: Maximum number of bytes in a slice
    226   Int       m_iSliceArgument;       ///< If m_iSliceMode==1, m_iSliceArgument=max. # of largest coding units. If m_iSliceMode==2, m_iSliceArgument=max. # of bytes.
    227   Int       m_iEntropySliceMode;    ///< 0: Disable all entropy slice limits, 1 : Maximum number of largest coding units per slice, 2: Constraint based entropy slice
    228   Int       m_iEntropySliceArgument;///< If m_iEntropySliceMode==1, m_iEntropySliceArgument=max. # of largest coding units. If m_iEntropySliceMode==2, m_iEntropySliceArgument=max. # of bins.
    229 
    230   Int       m_iSliceGranularity;///< 0: Slices always end at LCU borders. 1-3: slices may end at a depth of 1-3 below LCU level.
    231   Bool m_bLFCrossSliceBoundaryFlag;  ///< 0: Cross-slice-boundary in-loop filtering 1: non-cross-slice-boundary in-loop filtering
    232   Int  m_iTileBehaviorControlPresentFlag; //!< 1: tile behavior control parameters are in PPS 0: tile behavior control parameters are not in PPS
    233   Bool m_bLFCrossTileBoundaryFlag;  //!< 1: Cross-tile-boundary in-loop filtering 0: non-cross-tile-boundary in-loop filtering
     222  Bool      m_useFastDecisionForMerge;         ///< flag for using Fast Decision Merge RD-Cost
     223#endif
     224  Bool      m_bUseCbfFastMode;                 ///< flag for using Cbf Fast PU Mode Decision
     225  Int       m_iSliceMode;                      ///< 0: Disable all Recon slice limits, 1 : Maximum number of largest coding units per slice, 2: Maximum number of bytes in a slice
     226  Int       m_iSliceArgument;                  ///< If m_iSliceMode==1, m_iSliceArgument=max. # of largest coding units. If m_iSliceMode==2, m_iSliceArgument=max. # of bytes.
     227  Int       m_iEntropySliceMode;               ///< 0: Disable all entropy slice limits, 1 : Maximum number of largest coding units per slice, 2: Constraint based entropy slice
     228  Int       m_iEntropySliceArgument;           ///< If m_iEntropySliceMode==1, m_iEntropySliceArgument=max. # of largest coding units. If m_iEntropySliceMode==2, m_iEntropySliceArgument=max. # of bins.
     229
     230  Int       m_iSliceGranularity;               ///< 0: Slices always end at LCU borders. 1-3: slices may end at a depth of 1-3 below LCU level.
     231  Bool      m_bLFCrossSliceBoundaryFlag;       ///< 0: Cross-slice-boundary in-loop filtering 1: non-cross-slice-boundary in-loop filtering
     232  Int       m_iTileBehaviorControlPresentFlag; //!< 1: tile behavior control parameters are in PPS 0: tile behavior control parameters are not in PPS
     233  Bool      m_bLFCrossTileBoundaryFlag;        //!< 1: Cross-tile-boundary in-loop filtering 0: non-cross-tile-boundary in-loop filtering
    234234  Int       m_iColumnRowInfoPresent;
    235235  Int       m_iUniformSpacingIdr;
     
    280280  Char*     m_pchVSOConfig;
    281281  Bool      m_bUseVSO;                                    ///< flag for using View Synthesis Optimization
    282 
     282#if HHI_VSO_LS_TABLE
     283  Bool      m_bVSOLSTable;                                ///< Depth QP dependent Lagrange parameter optimization (m23714)
     284#endif
     285#if LGE_VSO_EARLY_SKIP_A0093
     286  Bool      m_bVSOEarlySkip;                              ///< Early skip of VSO computation (JCT3V-A0093 modification 4)
     287#endif
    283288  //// Used for development by GT, might be removed later
    284289  Double    m_dLambdaScaleVSO;                            ///< Scaling factor for Lambda in VSO mode
     
    289294  UInt      m_uiVSOMode;                                  ///< Number of VSO Mode, 1 = , 2 = simple, org vs. ren, 3 = simple, ren vs. ren, 4 = full 
    290295#endif
     296#if SAIT_VSO_EST_A0033
     297  Bool      m_bUseEstimatedVSD;                           ///< Flag for using model based VSD estimation instead of VSO for some encoder decisions (JCT3V-A0033 modification 3)
     298  Double    m_dDispCoeff;
     299#endif
    291300
    292301  // coding tools (depth intra modes)
  • trunk/source/App/TAppEncoder/TAppEncTop.cpp

    r77 r100  
    203203    m_acTEncTopList[iViewIdx]->setLambdaScaleVSO               ( 1     );
    204204    m_acTEncTopList[iViewIdx]->setVSOMode                      ( 0     );
    205     m_acTEncTopList[iViewIdx]->setUseVSO                       ( false ); //GT: might be enabled later for VSO Mode 4
     205    m_acTEncTopList[iViewIdx]->setUseVSO                       ( false );
     206#if SAIT_VSO_EST_A0033
     207    m_acTEncTopList[iViewIdx]->setUseEstimatedVSD              ( false );
     208#endif
    206209#endif
    207210
     
    517520#endif
    518521      m_acTEncDepthTopList[iViewIdx]->setVSOMode                      ( m_uiVSOMode );
     522
     523#if SAIT_VSO_EST_A0033
     524      m_acTEncDepthTopList[iViewIdx]->setUseEstimatedVSD              ( m_bUseEstimatedVSD );
     525#endif
    519526#endif
    520527
     
    648655    if ( m_uiVSOMode == 4 )
    649656    {
     657#if HHI_VSO_SPEEDUP_A033
     658#if LGE_VSO_EARLY_SKIP_A0093
     659      m_cRendererModel.create( m_cRenModStrParser.getNumOfBaseViews(), m_cRenModStrParser.getNumOfModels(), m_iSourceWidth, g_uiMaxCUHeight , LOG2_DISP_PREC_LUT, 0, m_bVSOEarlySkip );
     660#else
     661      m_cRendererModel.create( m_cRenModStrParser.getNumOfBaseViews(), m_cRenModStrParser.getNumOfModels(), m_iSourceWidth, g_uiMaxCUHeight , LOG2_DISP_PREC_LUT, 0 );
     662#endif
     663#else
    650664      m_cRendererModel.create( m_cRenModStrParser.getNumOfBaseViews(), m_cRenModStrParser.getNumOfModels(), m_iSourceWidth, m_iSourceHeight, LOG2_DISP_PREC_LUT, 0 );
     665#endif
    651666
    652667      for ( Int iViewNum = 0; iViewNum < m_iNumberOfViews; iViewNum++ )
     
    11411156};
    11421157
     1158#if SAIT_VSO_EST_A0033
     1159TComPicYuv* TAppEncTop::xGetPicYuvFromViewTemp( Int iViewIdx, Int iPoc, Bool bDepth, Bool bRecon )
     1160{
     1161  TComPic*    pcPic = xGetPicFromView( iViewIdx, iPoc, bDepth);
     1162  TComPicYuv* pcPicYuv = NULL;
     1163
     1164  if (pcPic != NULL)
     1165  {
     1166    if( bRecon )
     1167    {
     1168      if ( pcPic->getReconMark() )
     1169      {
     1170        pcPicYuv = pcPic->getPicYuvRec();
     1171      }
     1172    }
     1173    else
     1174    {
     1175      pcPicYuv = pcPic->getPicYuvOrg();
     1176    }
     1177  };
     1178
     1179  return pcPicYuv;
     1180};
     1181#endif
     1182
    11431183/**
    11441184 *
     
    12091249#endif
    12101250#if HHI_VSO
     1251#if HHI_VSO_SPEEDUP_A033
     1252Void TAppEncTop::setupRenModel( Int iPoc, Int iEncViewIdx, Int iEncContent, Int iHorOffset )
     1253{
     1254  m_cRendererModel.setHorOffset( iHorOffset );
     1255#else
    12111256Void TAppEncTop::setupRenModel( Int iPoc, Int iEncViewIdx, Int iEncContent )
    12121257{
     1258#endif
    12131259  Int iEncViewSIdx = m_cCameraData.getBaseId2SortedId()[ iEncViewIdx ];
    12141260
  • trunk/source/App/TAppEncoder/TAppEncTop.h

    r77 r100  
    6363private:
    6464  // class interface
    65   std::vector<TEncTop*>                           m_acTEncTopList ;
    66   std::vector<TEncTop*>                           m_acTEncDepthTopList ;
    67   std::vector<TVideoIOYuv*>             m_acTVideoIOYuvInputFileList;  ///< input YUV file
     65  std::vector<TEncTop*>      m_acTEncTopList ;
     66  std::vector<TEncTop*>      m_acTEncDepthTopList ;
     67  std::vector<TVideoIOYuv*>  m_acTVideoIOYuvInputFileList;  ///< input YUV file
    6868  std::vector<TVideoIOYuv*>  m_acTVideoIOYuvDepthInputFileList;
    69   std::vector<TVideoIOYuv*>             m_acTVideoIOYuvReconFileList;  ///< output reconstruction file
     69  std::vector<TVideoIOYuv*>  m_acTVideoIOYuvReconFileList;  ///< output reconstruction file
    7070  std::vector<TVideoIOYuv*>  m_acTVideoIOYuvDepthReconFileList;
    7171
     
    7373  std::vector< TComList<TComPicYuv*>* >  m_picYuvDepthRec;         
    7474
    75   std::vector<Int>              m_frameRcvd;                  ///< number of received frames
    76   std::vector<Int>              m_depthFrameRcvd;   
     75  std::vector<Int>           m_frameRcvd;                  ///< number of received frames
     76  std::vector<Int>           m_depthFrameRcvd;   
    7777
    7878  unsigned                   m_essentialBytes;
     
    124124  virtual ~TAppEncTop();
    125125 
     126#if SAIT_VSO_EST_A0033
     127  TComPicYuv* xGetPicYuvFromViewTemp( Int iViewIdx, Int iPoc, Bool bDepth, Bool bRecon );
     128#endif
     129
    126130  Void        encode      ();                               ///< main encoding function
    127131  TEncTop*    getTEncTop( Int viewId, Bool isDepth );   
     
    135139#endif
    136140#if HHI_VSO
     141#if HHI_VSO_SPEEDUP_A033
     142  Void                  setupRenModel    ( Int iPoc, Int iEncViewIdx, Int iEncContent, Int iHorOffset );
     143#else
    137144  Void                  setupRenModel    ( Int iPoc, Int iEncViewIdx, Int iEncContent );
     145#endif
    138146#endif
    139147 
  • trunk/source/App/TAppRenderer/RendererMain.cpp

    r56 r100  
    4242
    4343int main(int argc, char* argv[])
    44         {
     44  {
    4545  TAppRendererTop  cTAppRendererTop;
    4646
  • trunk/source/App/TAppRenderer/TAppRendererCfg.cpp

    r56 r100  
    5353// ====================================================================================================================
    5454
    55 #define MAX_INPUT_VIEW_NUM                                      10
     55#define MAX_INPUT_VIEW_NUM          10
    5656#define MAX_OUTPUT_VIEW_NUM         64
    5757
  • trunk/source/App/TAppRenderer/TAppRendererCfg.h

    r56 r100  
    8080  ////camera specification ////
    8181  Char*               m_pchCameraParameterFile;         ///< camera parameter file
    82   Char*               m_pchSynthViewCameraNumbers;            ///< numbers of views to synthesize
     82  Char*               m_pchSynthViewCameraNumbers;      ///< numbers of views to synthesize
    8383  Char*               m_pchViewConfig;                  ///< String to setup renderer
    84   Char*               m_pchBaseViewCameraNumbers;             ///< numbers of base views
     84  Char*               m_pchBaseViewCameraNumbers;       ///< numbers of base views
    8585
    8686  // derived
     
    8989  Bool                m_bUseSetupString;                ///< true if setup string is used
    9090
    91   Int                 m_iNumberOfInputViews;                                            ///< number of input Views
    92   Int                 m_iNumberOfOutputViews;                                           ///< number views to synthesize
     91  Int                 m_iNumberOfInputViews;            ///< number of input Views
     92  Int                 m_iNumberOfOutputViews;           ///< number views to synthesize
    9393
    9494  //// renderer Modes ////
    95   Int                 m_iRenderDirection;                  ///< 0: interpolate, 1: extrapolate from left, 2: extrapolate from right
     95  Int                 m_iRenderDirection;               ///< 0: interpolate, 1: extrapolate from left, 2: extrapolate from right
    9696
    9797  Int                 m_iLog2SamplingFactor;            ///< factor for horizontal upsampling before processing
  • trunk/source/App/TAppRenderer/TAppRendererTop.cpp

    r81 r100  
    7070    pcVideoInput->open( m_pchVideoInputFileList[iViewIdx], false, iFileBitDepth, iInteralBitDepth );  // read mode
    7171    pcDepthInput->open( m_pchDepthInputFileList[iViewIdx], false, iFileBitDepth, iInteralBitDepth );  // read mode
    72 #if HHI_FIX
    7372    pcVideoInput->skipFrames(m_iFrameSkip, m_iSourceWidth, m_iSourceHeight  );
    7473    pcDepthInput->skipFrames(m_iFrameSkip, m_iSourceWidth, m_iSourceHeight  );
    75 #endif
     74
    7675    m_apcTVideoIOYuvVideoInput.push_back( pcVideoInput );
    7776    m_apcTVideoIOYuvDepthInput.push_back( pcDepthInput );
     
    186185  while ( ( ( iNumOfRenderedFrames < m_iFramesToBeRendered ) || ( m_iFramesToBeRendered == 0 ) ) && !bAnyEOS )
    187186  {
    188 
    189 #if HHI_FIX
    190187    if ( iFrame >= m_iFrameSkip )
    191188    {
    192 #endif
    193     // read in depth and video
    194     for(Int iBaseViewIdx=0; iBaseViewIdx < m_iNumberOfInputViews; iBaseViewIdx++ )
    195     {
    196       m_apcTVideoIOYuvVideoInput[iBaseViewIdx]->read( apcPicYuvBaseVideo[iBaseViewIdx], aiPad  ) ;
    197 
    198       apcPicYuvBaseVideo[iBaseViewIdx]->extendPicBorder();
    199 
    200       bAnyEOS |= m_apcTVideoIOYuvVideoInput[iBaseViewIdx]->isEof();
    201 
    202       m_apcTVideoIOYuvDepthInput[iBaseViewIdx]->read( apcPicYuvBaseDepth[iBaseViewIdx], aiPad  ) ;
    203       apcPicYuvBaseDepth[iBaseViewIdx]->extendPicBorder();
    204       bAnyEOS |= m_apcTVideoIOYuvDepthInput[iBaseViewIdx]->isEof();
    205 
    206       if ( m_bTempDepthFilter && (iFrame >= m_iFrameSkip) )
     189      // read in depth and video
     190      for(Int iBaseViewIdx=0; iBaseViewIdx < m_iNumberOfInputViews; iBaseViewIdx++ )
    207191      {
    208         m_pcRenTop->temporalFilterVSRS( apcPicYuvBaseVideo[iBaseViewIdx], apcPicYuvBaseDepth[iBaseViewIdx], apcPicYuvLastBaseVideo[iBaseViewIdx], apcPicYuvLastBaseDepth[iBaseViewIdx], ( iFrame == m_iFrameSkip) );
     192        m_apcTVideoIOYuvVideoInput[iBaseViewIdx]->read( apcPicYuvBaseVideo[iBaseViewIdx], aiPad  ) ;
     193
     194        apcPicYuvBaseVideo[iBaseViewIdx]->extendPicBorder();
     195
     196        bAnyEOS |= m_apcTVideoIOYuvVideoInput[iBaseViewIdx]->isEof();
     197
     198        m_apcTVideoIOYuvDepthInput[iBaseViewIdx]->read( apcPicYuvBaseDepth[iBaseViewIdx], aiPad  ) ;
     199        apcPicYuvBaseDepth[iBaseViewIdx]->extendPicBorder();
     200        bAnyEOS |= m_apcTVideoIOYuvDepthInput[iBaseViewIdx]->isEof();
     201
     202        if ( m_bTempDepthFilter && (iFrame >= m_iFrameSkip) )
     203        {
     204          m_pcRenTop->temporalFilterVSRS( apcPicYuvBaseVideo[iBaseViewIdx], apcPicYuvBaseDepth[iBaseViewIdx], apcPicYuvLastBaseVideo[iBaseViewIdx], apcPicYuvLastBaseDepth[iBaseViewIdx], ( iFrame == m_iFrameSkip) );
     205        }
    209206      }
    210207    }
    211 
    212 #if HHI_FIX
    213     }
    214     else
    215 #else
    216     if ( iFrame < m_iFrameSkip ) // Skip Frames
    217 #endif
    218    
     208    else   
    219209    {
    220210      std::cout << "Skipping Frame " << iFrame << std::endl;
     
    536526
    537527    AOT( m_iLog2SamplingFactor != 0 );
     528#if LGE_VSO_EARLY_SKIP_A0093
     529    cCurModel.create( m_cRenModStrParser.getNumOfBaseViews(), m_cRenModStrParser.getNumOfModels(), m_iSourceWidth, m_iSourceHeight, m_iShiftPrecision, m_iBlendHoleMargin, false );
     530#else
    538531    cCurModel.create( m_cRenModStrParser.getNumOfBaseViews(), m_cRenModStrParser.getNumOfModels(), m_iSourceWidth, m_iSourceHeight, m_iShiftPrecision, m_iBlendHoleMargin );
     532#endif
     533
     534#if HHI_VSO_SPEEDUP_A033
     535    cCurModel.setHorOffset( 0 );
     536#endif
    539537
    540538    for ( Int iViewIdx = 0; iViewIdx < m_iNumberOfInputViews; iViewIdx++ )
     
    563561    {
    564562
    565 #if HHI_FIX
    566563      if ( iFrame >= m_iFrameSkip )
    567564      {     
    568 #endif
    569       // read in depth and video
    570       for(Int iBaseViewIdx=0; iBaseViewIdx < m_iNumberOfInputViews; iBaseViewIdx++ )
    571       {
    572         m_apcTVideoIOYuvVideoInput[iBaseViewIdx]->read( apcPicYuvBaseVideo[iBaseViewIdx], aiPad  ) ;
    573         bAnyEOS |= m_apcTVideoIOYuvVideoInput[iBaseViewIdx]->isEof();
    574 
    575         m_apcTVideoIOYuvDepthInput[iBaseViewIdx]->read( apcPicYuvBaseDepth[iBaseViewIdx], aiPad  ) ;
    576         bAnyEOS |= m_apcTVideoIOYuvDepthInput[iBaseViewIdx]->isEof();
    577       }
    578 #if HHI_FIX
     565        // read in depth and video
     566        for(Int iBaseViewIdx=0; iBaseViewIdx < m_iNumberOfInputViews; iBaseViewIdx++ )
     567        {
     568          m_apcTVideoIOYuvVideoInput[iBaseViewIdx]->read( apcPicYuvBaseVideo[iBaseViewIdx], aiPad  ) ;
     569          bAnyEOS |= m_apcTVideoIOYuvVideoInput[iBaseViewIdx]->isEof();
     570
     571          m_apcTVideoIOYuvDepthInput[iBaseViewIdx]->read( apcPicYuvBaseDepth[iBaseViewIdx], aiPad  ) ;
     572          bAnyEOS |= m_apcTVideoIOYuvDepthInput[iBaseViewIdx]->isEof();
     573        }
    579574      }
    580575      else
    581 #else
    582       if ( iFrame < m_iFrameSkip )
    583 #endif
    584576      {
    585 #if HHI_FIX
    586577        iFrame++;
    587 #endif
    588578        continue;
    589579      }
     
    598588      }
    599589
    600 #if HHI_FIX
    601590      m_cCameraData.update( (UInt) ( iFrame - m_iFrameSkip ));
    602 #endif
    603591
    604592      for(Int iBaseViewIdx=0; iBaseViewIdx < m_iNumberOfInputViews; iBaseViewIdx++ )
    605593      {
    606 #if HHI_FIX
    607 #else
    608         m_cCameraData.update( (UInt)iFrame );
    609 #endif
    610 
    611594        // setup virtual views
    612595        Int iBaseViewSIdx = m_cCameraData.getBaseId2SortedId()[iBaseViewIdx];
     
    726709
    727710  AOT( m_iLog2SamplingFactor != 0 );
     711#if HHI_VSO_SPEEDUP_A033
     712  cCurModel.setHorOffset( 0 );
     713#endif
     714#if LGE_VSO_EARLY_SKIP_A0093
     715  cCurModel.create( m_iNumberOfInputViews, m_iNumberOfOutputViews, m_iSourceWidth, m_iSourceHeight, m_iShiftPrecision, m_iBlendHoleMargin, false );
     716#else
    728717  cCurModel.create( m_iNumberOfInputViews, m_iNumberOfOutputViews, m_iSourceWidth, m_iSourceHeight, m_iShiftPrecision, m_iBlendHoleMargin );
     718#endif
    729719
    730720  for ( UInt uiBaseView = 0; uiBaseView < m_iNumberOfInputViews; uiBaseView++ )
     
    788778  {
    789779
    790 #if HHI_FIX
    791780    if ( iFrame >= m_iFrameSkip )
    792781    {     
    793 #endif
    794     // read in depth and video
    795     for(Int iBaseViewIdx=0; iBaseViewIdx < m_iNumberOfInputViews; iBaseViewIdx++ )
    796     {
    797       m_apcTVideoIOYuvVideoInput[iBaseViewIdx]->read( apcPicYuvBaseVideo[iBaseViewIdx], aiPad  ) ;
    798       bAnyEOS |= m_apcTVideoIOYuvVideoInput[iBaseViewIdx]->isEof();
    799 
    800       m_apcTVideoIOYuvDepthInput[iBaseViewIdx]->read( apcPicYuvBaseDepth[iBaseViewIdx], aiPad  ) ;
    801       bAnyEOS |= m_apcTVideoIOYuvDepthInput[iBaseViewIdx]->isEof();
    802 
    803       if ( iFrame >= m_iFrameSkip )
     782      // read in depth and video
     783      for(Int iBaseViewIdx=0; iBaseViewIdx < m_iNumberOfInputViews; iBaseViewIdx++ )
    804784      {
    805         Int iBaseViewSIdx = m_cCameraData.getBaseId2SortedId()[iBaseViewIdx];
    806         cCurModel.setBaseView( iBaseViewSIdx, apcPicYuvBaseVideo[iBaseViewIdx], apcPicYuvBaseDepth[iBaseViewIdx], NULL, NULL );
     785        m_apcTVideoIOYuvVideoInput[iBaseViewIdx]->read( apcPicYuvBaseVideo[iBaseViewIdx], aiPad  ) ;
     786        bAnyEOS |= m_apcTVideoIOYuvVideoInput[iBaseViewIdx]->isEof();
     787
     788        m_apcTVideoIOYuvDepthInput[iBaseViewIdx]->read( apcPicYuvBaseDepth[iBaseViewIdx], aiPad  ) ;
     789        bAnyEOS |= m_apcTVideoIOYuvDepthInput[iBaseViewIdx]->isEof();
     790
     791        if ( iFrame >= m_iFrameSkip )
     792        {
     793          Int iBaseViewSIdx = m_cCameraData.getBaseId2SortedId()[iBaseViewIdx];
     794          cCurModel.setBaseView( iBaseViewSIdx, apcPicYuvBaseVideo[iBaseViewIdx], apcPicYuvBaseDepth[iBaseViewIdx], NULL, NULL );
     795        }
    807796      }
    808797    }
    809 
    810 #if HHI_FIX
    811     }
    812798    else
    813 #else
    814     if ( iFrame < m_iFrameSkip ) // Skip Frames
    815 #endif
    816799    {
    817800      iFrame++;
    818801      continue;
    819802    }
    820 
    821 #if HHI_FIX
    822803    m_cCameraData.update( (UInt) (iFrame - m_iFrameSkip ));
    823 #else
    824     m_cCameraData.update( (UInt)iFrame );
    825 #endif
    826 
    827804    for(Int iSynthViewIdx=0; iSynthViewIdx < m_iNumberOfOutputViews; iSynthViewIdx++ )
    828805    {
     
    972949  {
    973950
    974 #if HHI_FIX
     951
    975952    if ( iFrame >= m_iFrameSkip )
    976953    {     
    977 #endif
    978     // read in depth and video
    979     for(Int iBaseViewIdx=0; iBaseViewIdx < m_iNumberOfInputViews; iBaseViewIdx++ )
    980     {
    981       m_apcTVideoIOYuvVideoInput[iBaseViewIdx]->read( apcPicYuvBaseVideo[iBaseViewIdx], aiPad  ) ;
    982       apcPicYuvBaseVideo[iBaseViewIdx]->extendPicBorder();
    983       bAnyEOS |= m_apcTVideoIOYuvVideoInput[iBaseViewIdx]->isEof();
    984 
    985       m_apcTVideoIOYuvDepthInput[iBaseViewIdx]->read( apcPicYuvBaseDepth[iBaseViewIdx], aiPad  ) ;
    986       apcPicYuvBaseDepth[iBaseViewIdx]->extendPicBorder();
    987       bAnyEOS |= m_apcTVideoIOYuvDepthInput[iBaseViewIdx]->isEof();
    988 
    989       if ( m_bTempDepthFilter && (iFrame >= m_iFrameSkip) )
     954      // read in depth and video
     955      for(Int iBaseViewIdx=0; iBaseViewIdx < m_iNumberOfInputViews; iBaseViewIdx++ )
    990956      {
    991         m_pcRenTop->temporalFilterVSRS( apcPicYuvBaseVideo[iBaseViewIdx], apcPicYuvBaseDepth[iBaseViewIdx], apcPicYuvLastBaseVideo[iBaseViewIdx], apcPicYuvLastBaseDepth[iBaseViewIdx], ( iFrame == m_iFrameSkip) );
     957        m_apcTVideoIOYuvVideoInput[iBaseViewIdx]->read( apcPicYuvBaseVideo[iBaseViewIdx], aiPad  ) ;
     958        apcPicYuvBaseVideo[iBaseViewIdx]->extendPicBorder();
     959        bAnyEOS |= m_apcTVideoIOYuvVideoInput[iBaseViewIdx]->isEof();
     960
     961        m_apcTVideoIOYuvDepthInput[iBaseViewIdx]->read( apcPicYuvBaseDepth[iBaseViewIdx], aiPad  ) ;
     962        apcPicYuvBaseDepth[iBaseViewIdx]->extendPicBorder();
     963        bAnyEOS |= m_apcTVideoIOYuvDepthInput[iBaseViewIdx]->isEof();
     964
     965        if ( m_bTempDepthFilter && (iFrame >= m_iFrameSkip) )
     966        {
     967          m_pcRenTop->temporalFilterVSRS( apcPicYuvBaseVideo[iBaseViewIdx], apcPicYuvBaseDepth[iBaseViewIdx], apcPicYuvLastBaseVideo[iBaseViewIdx], apcPicYuvLastBaseDepth[iBaseViewIdx], ( iFrame == m_iFrameSkip) );
     968        }
    992969      }
    993970    }
    994 
    995 #if HHI_FIX
    996     }
    997971    else
    998 #else
    999     if ( iFrame < m_iFrameSkip ) // Skip Frames
    1000 #endif
    1001972    {
    1002973      std::cout << "Skipping Frame " << iFrame << std::endl;
     
    1005976      continue;
    1006977    }
    1007 
    1008 #if HHI_FIX
    1009978    m_cCameraData.update( (UInt) ( iFrame - m_iFrameSkip ) );
    1010 #else
    1011     m_cCameraData.update( (UInt)iFrame );
    1012 #endif
    1013979
    1014980    for(Int iViewIdx=1; iViewIdx < m_iNumberOfInputViews; iViewIdx++ )
  • trunk/source/App/TAppRenderer/TAppRendererTop.h

    r56 r100  
    6060private:
    6161  // class interface
    62   std::vector<TVideoIOYuv*>              m_apcTVideoIOYuvVideoInput;
     62  std::vector<TVideoIOYuv*>    m_apcTVideoIOYuvVideoInput;
    6363  std::vector<TVideoIOYuv*>    m_apcTVideoIOYuvDepthInput;
    64   std::vector<TVideoIOYuv*>              m_apcTVideoIOYuvSynthOutput;
     64  std::vector<TVideoIOYuv*>    m_apcTVideoIOYuvSynthOutput;
    6565
    6666  // RendererInterface
Note: See TracChangeset for help on using the changeset viewer.