libflame
revision_anchor
|
Go to the source code of this file.
References FLA_Check_error_level(), FLA_LQ_UT_check(), and FLA_LQ_UT_internal().
{ FLA_Error r_val; // Check parameters. if ( FLA_Check_error_level() >= FLA_MIN_ERROR_CHECKING ) FLA_LQ_UT_check( A, T ); // Invoke FLA_LQ_UT_internal() with the standard control tree. //r_val = FLA_LQ_UT_blk_var1( A, T, fla_lqut_cntl_leaf ); r_val = FLA_LQ_UT_internal( A, T, fla_lqut_cntl_leaf ); return r_val; }
FLA_Error FLA_LQ_UT_create_T | ( | FLA_Obj | A, |
FLA_Obj * | T | ||
) |
References FLA_Obj_create(), FLA_Obj_datatype(), FLA_Obj_min_dim(), FLA_Obj_row_stride(), and FLA_Query_blocksize().
{ FLA_Datatype datatype; dim_t b_alg, k; dim_t rs_T, cs_T; // Query the datatype of A. datatype = FLA_Obj_datatype( A ); // Query the blocksize from the library. b_alg = FLA_Query_blocksize( datatype, FLA_DIMENSION_MIN ); // Scale the blocksize by a pre-set global constant. b_alg = ( dim_t )( ( ( double ) b_alg ) * FLA_LQ_INNER_TO_OUTER_B_RATIO ); // Query the minimum dimension of A. k = FLA_Obj_min_dim( A ); // Figure out whether T should be row-major or column-major. if ( FLA_Obj_row_stride( A ) == 1 ) { rs_T = 1; cs_T = b_alg; } else // if ( FLA_Obj_col_stride( A ) == 1 ) { rs_T = k; cs_T = 1; } // Create a b_alg x k matrix to hold the block Householder transforms that // will be accumulated within the LQ factorization algorithm. FLA_Obj_create( datatype, b_alg, k, rs_T, cs_T, T ); return FLA_SUCCESS; }
FLA_Error FLA_LQ_UT_form_Q | ( | FLA_Obj | A, |
FLA_Obj | T, | ||
FLA_Obj | Q | ||
) |
References FLA_Apply_Q_UT_create_workspace(), FLA_Check_error_level(), FLA_Copyr(), FLA_LQ_UT_form_Q_blk_var1(), FLA_LQ_UT_form_Q_check(), FLA_Merge_1x2(), FLA_Obj_free(), FLA_Obj_is(), FLA_Obj_length(), FLA_ONE, FLA_Part_2x2(), FLA_Set_diag(), FLA_Setr(), and FLA_ZERO.
{ FLA_Error r_val = FLA_SUCCESS; FLA_Obj QTL, QTR, QBL, QBR; FLA_Obj QT; FLA_Obj W; dim_t m_A; if ( FLA_Check_error_level() >= FLA_MIN_ERROR_CHECKING ) FLA_LQ_UT_form_Q_check( A, T, Q ); // Zero out the lower triangle of Q. FLA_Setr( FLA_LOWER_TRIANGULAR, FLA_ZERO, Q ); // Set the digaonal to one. FLA_Set_diag( FLA_ONE, Q ); // If A and Q are different objects, copy the Householder vectors // from A to QT, and zero out the upper triangle of QBR. If they // are the same object, we don't need to do the copy, and don't // need to zero anything out since the user should only have A and // Q be the same object if A is square, since Q needs to be square // (specifically, dim(Q) needs to equal n(A)). if ( FLA_Obj_is( A, Q ) == FALSE ) { m_A = FLA_Obj_length( A ); FLA_Part_2x2( Q, &QTL, &QTR, &QBL, &QBR, m_A, m_A, FLA_TL ); FLA_Merge_1x2( QTL, QTR, &QT ); // Copy the Householder vectors in A to QT. FLA_Copyr( FLA_UPPER_TRIANGULAR, A, QT ); // Zero out the lower triangle of QBR. FLA_Setr( FLA_UPPER_TRIANGULAR, FLA_ZERO, QBR ); } // Create workspace for applying the block Householder transforms. FLA_Apply_Q_UT_create_workspace( T, Q, &W ); // Overwrite Q, which currently contains Householder vectors in the // strictly lower triangle and identity in the upper triangle, with // the unitary matrix associated with those Householder transforms. r_val = FLA_LQ_UT_form_Q_blk_var1( Q, T, W ); // Free the temporary workspace. FLA_Obj_free( &W ); return r_val; }
FLA_Error FLA_LQ_UT_form_Q_blk_var1 | ( | FLA_Obj | A, |
FLA_Obj | T, | ||
FLA_Obj | W | ||
) |
References FLA_Apply_Q_UT(), FLA_Cont_with_1x3_to_1x2(), FLA_Cont_with_3x3_to_2x2(), FLA_LQ_UT_form_Q_opt_var1(), FLA_Merge_1x2(), FLA_Obj_length(), FLA_Obj_min_dim(), FLA_Obj_width(), FLA_Part_1x2(), FLA_Part_2x1(), FLA_Part_2x2(), FLA_Repart_1x2_to_1x3(), and FLA_Repart_2x2_to_3x3().
Referenced by FLA_LQ_UT_form_Q().
{ FLA_Obj ATL, ATR, A00, A01, A02, ABL, ABR, A10, A11, A12, A20, A21, A22; FLA_Obj TL, TR, T0, T1, T2; FLA_Obj T1T, T2B; FLA_Obj WTL, WTR, WBL, WBR; FLA_Obj AR1, AR2; dim_t b, b_alg; dim_t m_BR, n_BR; b_alg = FLA_Obj_length( T ); // If A is longer than T is wide, then we need to position ourseves // carefully within the matrix for the initial partitioning. if ( FLA_Obj_length( A ) > FLA_Obj_width( T ) ) { m_BR = FLA_Obj_length( A ) - FLA_Obj_width( T ); n_BR = FLA_Obj_width( A ) - FLA_Obj_width( T ); } else { m_BR = 0; n_BR = 0; } FLA_Part_2x2( A, &ATL, &ATR, &ABL, &ABR, m_BR, n_BR, FLA_BR ); FLA_Part_1x2( T, &TL, &TR, 0, FLA_RIGHT ); while ( FLA_Obj_length( ATL ) > 0 ) { b = min( b_alg, FLA_Obj_min_dim( ATL ) ); // Since T was filled from left to right, and since we need to access them // in reverse order, we need to handle the case where the last block is // smaller than the other b x b blocks. if ( FLA_Obj_width( TR ) == 0 && FLA_Obj_width( T ) % b_alg > 0 ) b = FLA_Obj_width( T ) % b_alg; FLA_Repart_2x2_to_3x3( ATL, /**/ ATR, &A00, &A01, /**/ &A02, &A10, &A11, /**/ &A12, /* ************* */ /* ******************** */ ABL, /**/ ABR, &A20, &A21, /**/ &A22, b, b, FLA_TL ); FLA_Repart_1x2_to_1x3( TL, /**/ TR, &T0, &T1, /**/ &T2, b, FLA_LEFT ); /*------------------------------------------------------------*/ FLA_Part_2x1( T1, &T1T, &T2B, b, FLA_TOP ); FLA_Part_2x2( W, &WTL, &WTR, &WBL, &WBR, b, FLA_Obj_length( A21 ), FLA_TL ); // Use an unblocked algorithm for the first (or only) block. if ( FLA_Obj_length( ABR ) == 0 ) { FLA_LQ_UT_form_Q_opt_var1( A11, T1T ); } else { FLA_Merge_1x2( A11, A12, &AR1 ); FLA_Merge_1x2( A21, A22, &AR2 ); // Apply the block Householder transforms to A21 and A22. FLA_Apply_Q_UT( FLA_RIGHT, FLA_CONJ_TRANSPOSE, FLA_FORWARD, FLA_ROWWISE, AR1, T1T, WTL, AR2 ); // Apply H to the current block panel consisting of A11 and A12. FLA_LQ_UT_form_Q_opt_var1( AR1, T1T ); } /*------------------------------------------------------------*/ FLA_Cont_with_3x3_to_2x2( &ATL, /**/ &ATR, A00, /**/ A01, A02, /* ************** */ /* ****************** */ A10, /**/ A11, A12, &ABL, /**/ &ABR, A20, /**/ A21, A22, FLA_BR ); FLA_Cont_with_1x3_to_1x2( &TL, /**/ &TR, T0, /**/ T1, T2, FLA_RIGHT ); } return FLA_SUCCESS; }
FLA_Error FLA_LQ_UT_form_Q_opc_var1 | ( | int | m_A, |
int | n_A, | ||
scomplex * | buff_A, | ||
int | rs_A, | ||
int | cs_A, | ||
scomplex * | buff_T, | ||
int | rs_T, | ||
int | cs_T | ||
) |
References bli_c0(), bli_c1(), bli_cconjv(), bli_cscalv(), BLIS_NO_CONJUGATE, FLA_Apply_H2_UT_r_opc_var1(), scomplex::imag, and scomplex::real.
Referenced by FLA_LQ_UT_form_Q_opt_var1().
{ scomplex zero = bli_c0(); scomplex one = bli_c1(); int min_m_n = min( m_A, n_A ); int i; for ( i = min_m_n - 1; i >= 0; --i ) { scomplex* alpha11 = buff_A + (i )*cs_A + (i )*rs_A; scomplex* a21 = buff_A + (i )*cs_A + (i+1)*rs_A; scomplex* a12t = buff_A + (i+1)*cs_A + (i )*rs_A; scomplex* A22 = buff_A + (i+1)*cs_A + (i+1)*rs_A; scomplex* tau11 = buff_T + (i )*cs_T + (i )*rs_T; scomplex minus_inv_tau11; int n_ahead = n_A - i - 1; int m_ahead = m_A - i - 1; FLA_Apply_H2_UT_r_opc_var1( m_ahead, n_ahead, tau11, a12t, cs_A, a21, rs_A, A22, rs_A, cs_A ); minus_inv_tau11.real = -one.real / tau11->real; minus_inv_tau11.imag = zero.imag; alpha11->real = one.real + minus_inv_tau11.real; alpha11->imag = zero.imag; bli_cconjv( n_ahead, a12t, cs_A ); bli_cscalv( BLIS_NO_CONJUGATE, n_ahead, &minus_inv_tau11, a12t, cs_A ); } return FLA_SUCCESS; }
FLA_Error FLA_LQ_UT_form_Q_opd_var1 | ( | int | m_A, |
int | n_A, | ||
double * | buff_A, | ||
int | rs_A, | ||
int | cs_A, | ||
double * | buff_T, | ||
int | rs_T, | ||
int | cs_T | ||
) |
References bli_d1(), bli_dscalv(), BLIS_NO_CONJUGATE, and FLA_Apply_H2_UT_r_opd_var1().
Referenced by FLA_LQ_UT_form_Q_opt_var1().
{ double one = bli_d1(); int min_m_n = min( m_A, n_A ); int i; for ( i = min_m_n - 1; i >= 0; --i ) { double* alpha11 = buff_A + (i )*cs_A + (i )*rs_A; double* a21 = buff_A + (i )*cs_A + (i+1)*rs_A; double* a12t = buff_A + (i+1)*cs_A + (i )*rs_A; double* A22 = buff_A + (i+1)*cs_A + (i+1)*rs_A; double* tau11 = buff_T + (i )*cs_T + (i )*rs_T; double minus_inv_tau11; int n_ahead = n_A - i - 1; int m_ahead = m_A - i - 1; FLA_Apply_H2_UT_r_opd_var1( m_ahead, n_ahead, tau11, a12t, cs_A, a21, rs_A, A22, rs_A, cs_A ); minus_inv_tau11 = -one / *tau11; *alpha11 = one + minus_inv_tau11; bli_dscalv( BLIS_NO_CONJUGATE, n_ahead, &minus_inv_tau11, a12t, cs_A ); } return FLA_SUCCESS; }
FLA_Error FLA_LQ_UT_form_Q_ops_var1 | ( | int | m_A, |
int | n_A, | ||
float * | buff_A, | ||
int | rs_A, | ||
int | cs_A, | ||
float * | buff_T, | ||
int | rs_T, | ||
int | cs_T | ||
) |
References bli_d1(), bli_sscalv(), BLIS_NO_CONJUGATE, and FLA_Apply_H2_UT_r_ops_var1().
Referenced by FLA_LQ_UT_form_Q_opt_var1().
{ float one = bli_d1(); int min_m_n = min( m_A, n_A ); int i; for ( i = min_m_n - 1; i >= 0; --i ) { float* alpha11 = buff_A + (i )*cs_A + (i )*rs_A; float* a21 = buff_A + (i )*cs_A + (i+1)*rs_A; float* a12t = buff_A + (i+1)*cs_A + (i )*rs_A; float* A22 = buff_A + (i+1)*cs_A + (i+1)*rs_A; float* tau11 = buff_T + (i )*cs_T + (i )*rs_T; float minus_inv_tau11; int n_ahead = n_A - i - 1; int m_ahead = m_A - i - 1; FLA_Apply_H2_UT_r_ops_var1( m_ahead, n_ahead, tau11, a12t, cs_A, a21, rs_A, A22, rs_A, cs_A ); minus_inv_tau11 = -one / *tau11; *alpha11 = one + minus_inv_tau11; bli_sscalv( BLIS_NO_CONJUGATE, n_ahead, &minus_inv_tau11, a12t, cs_A ); } return FLA_SUCCESS; }
FLA_Error FLA_LQ_UT_form_Q_opt_var1 | ( | FLA_Obj | A, |
FLA_Obj | T | ||
) |
References FLA_LQ_UT_form_Q_opc_var1(), FLA_LQ_UT_form_Q_opd_var1(), FLA_LQ_UT_form_Q_ops_var1(), FLA_LQ_UT_form_Q_opz_var1(), FLA_Obj_col_stride(), FLA_Obj_datatype(), FLA_Obj_length(), FLA_Obj_row_stride(), and FLA_Obj_width().
Referenced by FLA_LQ_UT_form_Q_blk_var1().
{ FLA_Datatype datatype; int m_A, n_A; int rs_A, cs_A; int rs_T, cs_T; datatype = FLA_Obj_datatype( A ); m_A = FLA_Obj_length( A ); n_A = FLA_Obj_width( A ); rs_A = FLA_Obj_row_stride( A ); cs_A = FLA_Obj_col_stride( A ); rs_T = FLA_Obj_row_stride( T ); cs_T = FLA_Obj_col_stride( T ); switch ( datatype ) { case FLA_FLOAT: { float* buff_A = ( float* ) FLA_FLOAT_PTR( A ); float* buff_T = ( float* ) FLA_FLOAT_PTR( T ); FLA_LQ_UT_form_Q_ops_var1( m_A, n_A, buff_A, rs_A, cs_A, buff_T, rs_T, cs_T ); break; } case FLA_DOUBLE: { double* buff_A = ( double* ) FLA_DOUBLE_PTR( A ); double* buff_T = ( double* ) FLA_DOUBLE_PTR( T ); FLA_LQ_UT_form_Q_opd_var1( m_A, n_A, buff_A, rs_A, cs_A, buff_T, rs_T, cs_T ); break; } case FLA_COMPLEX: { scomplex* buff_A = ( scomplex* ) FLA_COMPLEX_PTR( A ); scomplex* buff_T = ( scomplex* ) FLA_COMPLEX_PTR( T ); FLA_LQ_UT_form_Q_opc_var1( m_A, n_A, buff_A, rs_A, cs_A, buff_T, rs_T, cs_T ); break; } case FLA_DOUBLE_COMPLEX: { dcomplex* buff_A = ( dcomplex* ) FLA_DOUBLE_COMPLEX_PTR( A ); dcomplex* buff_T = ( dcomplex* ) FLA_DOUBLE_COMPLEX_PTR( T ); FLA_LQ_UT_form_Q_opz_var1( m_A, n_A, buff_A, rs_A, cs_A, buff_T, rs_T, cs_T ); break; } } return FLA_SUCCESS; }
FLA_Error FLA_LQ_UT_form_Q_opz_var1 | ( | int | m_A, |
int | n_A, | ||
dcomplex * | buff_A, | ||
int | rs_A, | ||
int | cs_A, | ||
dcomplex * | buff_T, | ||
int | rs_T, | ||
int | cs_T | ||
) |
References bli_z0(), bli_z1(), bli_zconjv(), bli_zscalv(), BLIS_NO_CONJUGATE, FLA_Apply_H2_UT_r_opz_var1(), dcomplex::imag, and dcomplex::real.
Referenced by FLA_LQ_UT_form_Q_opt_var1().
{ dcomplex zero = bli_z0(); dcomplex one = bli_z1(); int min_m_n = min( m_A, n_A ); int i; for ( i = min_m_n - 1; i >= 0; --i ) { dcomplex* alpha11 = buff_A + (i )*cs_A + (i )*rs_A; dcomplex* a21 = buff_A + (i )*cs_A + (i+1)*rs_A; dcomplex* a12t = buff_A + (i+1)*cs_A + (i )*rs_A; dcomplex* A22 = buff_A + (i+1)*cs_A + (i+1)*rs_A; dcomplex* tau11 = buff_T + (i )*cs_T + (i )*rs_T; dcomplex minus_inv_tau11; int n_ahead = n_A - i - 1; int m_ahead = m_A - i - 1; FLA_Apply_H2_UT_r_opz_var1( m_ahead, n_ahead, tau11, a12t, cs_A, a21, rs_A, A22, rs_A, cs_A ); minus_inv_tau11.real = -one.real / tau11->real; minus_inv_tau11.imag = zero.imag; alpha11->real = one.real + minus_inv_tau11.real; alpha11->imag = zero.imag; bli_zconjv( n_ahead, a12t, cs_A ); bli_zscalv( BLIS_NO_CONJUGATE, n_ahead, &minus_inv_tau11, a12t, cs_A ); } return FLA_SUCCESS; }
FLA_Error FLA_LQ_UT_internal | ( | FLA_Obj | A, |
FLA_Obj | T, | ||
fla_lqut_t * | cntl | ||
) |
References FLA_Check_error_level(), FLA_LQ_UT_blk_var1(), FLA_LQ_UT_blk_var2(), FLA_LQ_UT_blk_var3(), FLA_LQ_UT_internal_check(), FLA_LQ_UT_macro_task(), FLA_LQ_UT_opt_var1(), FLA_LQ_UT_opt_var2(), FLA_LQ_UT_unb_var1(), FLA_LQ_UT_unb_var2(), and FLASH_Queue_get_enabled().
Referenced by FLA_LQ_UT(), FLA_LQ_UT_blk_var1(), FLA_LQ_UT_blk_var2(), FLA_LQ_UT_blk_var3(), FLA_LQ_UT_macro_task(), FLA_LQ_UT_task(), and FLASH_LQ_UT().
{ FLA_Error r_val = FLA_SUCCESS; if ( FLA_Check_error_level() == FLA_FULL_ERROR_CHECKING ) FLA_LQ_UT_internal_check( A, T, cntl ); if ( FLA_Cntl_matrix_type( cntl ) == FLA_HIER && FLA_Cntl_variant( cntl ) == FLA_SUBPROBLEM ) { if ( FLASH_Queue_get_enabled( ) ) { // Enqueue ENQUEUE_FLASH_LQ_UT_macro( A, T, cntl ); } else { // Execute r_val = FLA_LQ_UT_macro_task( A, T, cntl ); } } else { if ( FLA_Cntl_variant( cntl ) == FLA_UNBLOCKED_VARIANT1 ) { r_val = FLA_LQ_UT_unb_var1( A, T ); } else if ( FLA_Cntl_variant( cntl ) == FLA_UNB_OPT_VARIANT1 ) { r_val = FLA_LQ_UT_opt_var1( A, T ); } else if ( FLA_Cntl_variant( cntl ) == FLA_BLOCKED_VARIANT1 ) { r_val = FLA_LQ_UT_blk_var1( A, T, cntl ); } else if ( FLA_Cntl_variant( cntl ) == FLA_UNBLOCKED_VARIANT2 ) { r_val = FLA_LQ_UT_unb_var2( A, T ); } else if ( FLA_Cntl_variant( cntl ) == FLA_UNB_OPT_VARIANT2 ) { r_val = FLA_LQ_UT_opt_var2( A, T ); } else if ( FLA_Cntl_variant( cntl ) == FLA_BLOCKED_VARIANT2 ) { r_val = FLA_LQ_UT_blk_var2( A, T, cntl ); } else if ( FLA_Cntl_variant( cntl ) == FLA_BLOCKED_VARIANT3 ) { r_val = FLA_LQ_UT_blk_var3( A, T, cntl ); } else { FLA_Check_error_code( FLA_NOT_YET_IMPLEMENTED ); } } return r_val; }
FLA_Error FLA_LQ_UT_recover_tau | ( | FLA_Obj | T, |
FLA_Obj | tau | ||
) |
References FLA_Check_error_level(), FLA_Cont_with_1x3_to_1x2(), FLA_Cont_with_3x1_to_2x1(), FLA_LQ_UT_recover_tau_check(), FLA_LQ_UT_recover_tau_submatrix(), FLA_Obj_length(), FLA_Obj_width(), FLA_Part_1x2(), FLA_Part_2x1(), FLA_Repart_1x2_to_1x3(), and FLA_Repart_2x1_to_3x1().
{ FLA_Obj TL, TR, T0, T1, T2; FLA_Obj tT, t0, tB, t1, t2; dim_t b_alg, b; if ( FLA_Check_error_level() >= FLA_MIN_ERROR_CHECKING ) FLA_LQ_UT_recover_tau_check( T, t ); b_alg = FLA_Obj_length( T ); FLA_Part_1x2( T, &TL, &TR, 0, FLA_LEFT ); FLA_Part_2x1( t, &tT, &tB, 0, FLA_TOP ); while ( FLA_Obj_width( TL ) < FLA_Obj_width( T ) ){ b = min( FLA_Obj_width( TR ), b_alg ); FLA_Repart_1x2_to_1x3( TL, /**/ TR, &T0, /**/ &T1, &T2, b, FLA_RIGHT ); FLA_Repart_2x1_to_3x1( tT, &t0, /* ** */ /* ** */ &t1, tB, &t2, b, FLA_BOTTOM ); /*------------------------------------------------------------*/ FLA_LQ_UT_recover_tau_submatrix( T1, t1 ); /*------------------------------------------------------------*/ FLA_Cont_with_1x3_to_1x2( &TL, /**/ &TR, T0, T1, /**/ T2, FLA_LEFT ); FLA_Cont_with_3x1_to_2x1( &tT, t0, t1, /* ** */ /* ** */ &tB, t2, FLA_TOP ); } return FLA_SUCCESS; }
References FLA_Apply_Q_UT(), FLA_Apply_Q_UT_create_workspace(), FLA_Check_error_level(), FLA_Copy_external(), FLA_LQ_UT_solve_check(), FLA_Obj_free(), FLA_Obj_length(), FLA_ONE, FLA_Part_1x2(), FLA_Part_2x1(), FLA_Set(), FLA_Trsm_external(), and FLA_ZERO.
{ FLA_Obj W; FLA_Obj AL, AR; FLA_Obj XT, XB; // Check parameters. if ( FLA_Check_error_level() >= FLA_MIN_ERROR_CHECKING ) FLA_LQ_UT_solve_check( A, T, B, X ); FLA_Apply_Q_UT_create_workspace( T, X, &W ); FLA_Part_1x2( A, &AL, &AR, FLA_Obj_length( A ), FLA_LEFT ); FLA_Part_2x1( X, &XT, &XB, FLA_Obj_length( B ), FLA_TOP ); FLA_Copy_external( B, XT ); FLA_Trsm_external( FLA_LEFT, FLA_LOWER_TRIANGULAR, FLA_NO_TRANSPOSE, FLA_NONUNIT_DIAG, FLA_ONE, AL, XT ); FLA_Set( FLA_ZERO, XB ); FLA_Apply_Q_UT( FLA_LEFT, FLA_NO_TRANSPOSE, FLA_FORWARD, FLA_ROWWISE, A, T, W, X ); FLA_Obj_free( &W ); return FLA_SUCCESS; }
FLA_Error FLASH_LQ_UT | ( | FLA_Obj | A, |
FLA_Obj | TW | ||
) |
References FLA_Abort(), FLA_Check_error_level(), FLA_LQ_UT_check(), FLA_LQ_UT_internal(), FLA_Print_message(), FLASH_Obj_depth(), FLASH_Obj_scalar_length_tl(), FLASH_Obj_scalar_min_dim(), FLASH_Obj_scalar_width_tl(), FLASH_Queue_begin(), and FLASH_Queue_end().
{ FLA_Error r_val; dim_t b_alg, b_flash; // Check parameters. if ( FLA_Check_error_level() >= FLA_MIN_ERROR_CHECKING ) FLA_LQ_UT_check( A, TW ); // *** The current hierarchical LQ_UT algorithm assumes that the matrix // has a hierarchical depth of 1. We check for that here, because we // anticipate that we'll use a more general algorithm in the future, and // we don't want to forget to remove the constraint. *** if ( FLASH_Obj_depth( A ) != 1 ) { FLA_Print_message( "FLASH_LQ_UT() currently only supports matrices of depth 1", __FILE__, __LINE__ ); FLA_Abort(); } // Inspect the length of TTL to get the blocksize used by the LQ // factorization, which will be our inner blocksize for Apply_Q_UT. b_alg = FLASH_Obj_scalar_length_tl( TW ); b_flash = FLASH_Obj_scalar_width_tl( TW ); // The traditional (non-incremental) LQ_UT algorithm-by-blocks requires // that the algorithmic blocksize be equal to the storage blocksize. if ( b_alg != b_flash ) { FLA_Print_message( "FLASH_LQ_UT() requires that b_alg == b_store", __FILE__, __LINE__ ); FLA_Abort(); } // The traditional (non-incremental) LQ_UT algorithm-by-blocks requires // that min_dim(A) % b_flash == 0. if ( FLASH_Obj_scalar_min_dim( A ) % b_flash != 0 ) { FLA_Print_message( "FLASH_LQ_UT() requires that min_dim( A ) %% b_store == 0", __FILE__, __LINE__ ); FLA_Abort(); } // Begin a parallel region. FLASH_Queue_begin(); // Invoke FLA_LQ_UT_internal() with hierarchical control tree. r_val = FLA_LQ_UT_internal( A, TW, flash_lqut_cntl ); // End the parallel region. FLASH_Queue_end(); return r_val; }
FLA_Error FLASH_LQ_UT_create_hier_matrices | ( | FLA_Obj | A_flat, |
dim_t | depth, | ||
dim_t * | b_flash, | ||
FLA_Obj * | A, | ||
FLA_Obj * | TW | ||
) |
References FLA_Abort(), FLA_Obj_datatype(), FLA_Obj_min_dim(), FLA_Print_message(), FLASH_Obj_create_ext(), and FLASH_Obj_create_hier_copy_of_flat().
{ FLA_Datatype datatype; dim_t m, n; dim_t min_m_n; // *** The current LQ_UT algorithm implemented assumes that // the matrix has a hierarchical depth of 1. We check for that here // because we anticipate that we'll use a more general algorithm in the // future, and we don't want to forget to remove the constraint. *** if ( depth != 1 ) { FLA_Print_message( "FLASH_LQ_UT() currently only supports matrices of depth 1", __FILE__, __LINE__ ); FLA_Abort(); } // Create hierarchical copy of matrix A_flat. FLASH_Obj_create_hier_copy_of_flat( A_flat, depth, b_flash, A ); // Query the datatype of matrix A_flat. datatype = FLA_Obj_datatype( A_flat ); // Query the minimum dimension of A_flat. min_m_n = FLA_Obj_min_dim( A_flat ); // Set the m and n dimensions of TW to be min_m_n. m = min_m_n; n = min_m_n; // Create hierarchical matrices T and W. FLASH_Obj_create_ext( datatype, m, n, depth, b_flash, b_flash, TW ); return FLA_SUCCESS; }
References FLA_Check_error_level(), FLA_LQ_UT_solve_check(), FLA_Obj_length(), FLA_ONE, FLA_Part_1x2(), FLA_Part_2x1(), FLA_ZERO, FLASH_Apply_Q_UT(), FLASH_Apply_Q_UT_create_workspace(), FLASH_Copy(), FLASH_Obj_free(), FLASH_Set(), and FLASH_Trsm().
{ FLA_Obj W; FLA_Obj AL, AR; FLA_Obj XT, XB; // Check parameters. if ( FLA_Check_error_level() >= FLA_MIN_ERROR_CHECKING ) FLA_LQ_UT_solve_check( A, T, B, X ); FLASH_Apply_Q_UT_create_workspace( T, X, &W ); FLA_Part_1x2( A, &AL, &AR, FLA_Obj_length( A ), FLA_LEFT ); FLA_Part_2x1( X, &XT, &XB, FLA_Obj_length( B ), FLA_TOP ); FLASH_Copy( B, XT ); FLASH_Trsm( FLA_LEFT, FLA_LOWER_TRIANGULAR, FLA_NO_TRANSPOSE, FLA_NONUNIT_DIAG, FLA_ONE, AL, XT ); FLASH_Set( FLA_ZERO, XB ); FLASH_Apply_Q_UT( FLA_LEFT, FLA_NO_TRANSPOSE, FLA_FORWARD, FLA_ROWWISE, A, T, W, X ); FLASH_Obj_free( &W ); return FLA_SUCCESS; }