libflame
revision_anchor
|
Functions | |
void | bli_sinvscalm (conj_t conj, int m, int n, float *alpha, float *a, int a_rs, int a_cs) |
void | bli_dinvscalm (conj_t conj, int m, int n, double *alpha, double *a, int a_rs, int a_cs) |
void | bli_csinvscalm (conj_t conj, int m, int n, float *alpha, scomplex *a, int a_rs, int a_cs) |
void | bli_cinvscalm (conj_t conj, int m, int n, scomplex *alpha, scomplex *a, int a_rs, int a_cs) |
void | bli_zdinvscalm (conj_t conj, int m, int n, double *alpha, dcomplex *a, int a_rs, int a_cs) |
void | bli_zinvscalm (conj_t conj, int m, int n, dcomplex *alpha, dcomplex *a, int a_rs, int a_cs) |
void bli_cinvscalm | ( | conj_t | conj, |
int | m, | ||
int | n, | ||
scomplex * | alpha, | ||
scomplex * | a, | ||
int | a_rs, | ||
int | a_cs | ||
) |
References bli_cinvert2s(), bli_cscal(), bli_is_row_storage(), bli_is_vector(), bli_vector_dim(), bli_vector_inc(), bli_zero_dim2(), and BLIS_NO_TRANSPOSE.
Referenced by FLA_Inv_scal_external(), and FLA_Inv_scalc_external().
{ scomplex alpha_inv; scomplex* a_begin; int lda, inca; int n_iter; int n_elem; int j; // Return early if possible. if ( bli_zero_dim2( m, n ) ) return; if ( bli_ceq1( alpha ) ) return; // Handle cases where A is a vector to ensure that the underlying axpy // gets invoked only once. if ( bli_is_vector( m, n ) ) { // Initialize with values appropriate for a vector. n_iter = 1; n_elem = bli_vector_dim( m, n ); lda = 1; // multiplied by zero when n_iter == 1; not needed. inca = bli_vector_inc( BLIS_NO_TRANSPOSE, m, n, a_rs, a_cs ); } else // matrix case { // Initialize with optimal values for column-major storage. n_iter = n; n_elem = m; lda = a_cs; inca = a_rs; // An optimization: if A is row-major, then let's access the matrix // by rows instead of by columns to increase spatial locality. if ( bli_is_row_storage( a_rs, a_cs ) ) { bli_swap_ints( n_iter, n_elem ); bli_swap_ints( lda, inca ); } } bli_cinvert2s( conj, alpha, &alpha_inv ); for ( j = 0; j < n_iter; j++ ) { a_begin = a + j*lda; bli_cscal( n_elem, &alpha_inv, a_begin, inca ); } }
void bli_csinvscalm | ( | conj_t | conj, |
int | m, | ||
int | n, | ||
float * | alpha, | ||
scomplex * | a, | ||
int | a_rs, | ||
int | a_cs | ||
) |
References bli_csscal(), bli_is_row_storage(), bli_is_vector(), bli_sinvert2s(), bli_vector_dim(), bli_vector_inc(), bli_zero_dim2(), and BLIS_NO_TRANSPOSE.
Referenced by FLA_Inv_scal_external(), and FLA_Inv_scalc_external().
{ float alpha_inv; scomplex* a_begin; int lda, inca; int n_iter; int n_elem; int j; // Return early if possible. if ( bli_zero_dim2( m, n ) ) return; if ( bli_seq1( alpha ) ) return; // Handle cases where A is a vector to ensure that the underlying axpy // gets invoked only once. if ( bli_is_vector( m, n ) ) { // Initialize with values appropriate for a vector. n_iter = 1; n_elem = bli_vector_dim( m, n ); lda = 1; // multiplied by zero when n_iter == 1; not needed. inca = bli_vector_inc( BLIS_NO_TRANSPOSE, m, n, a_rs, a_cs ); } else // matrix case { // Initialize with optimal values for column-major storage. n_iter = n; n_elem = m; lda = a_cs; inca = a_rs; // An optimization: if A is row-major, then let's access the matrix // by rows instead of by columns to increase spatial locality. if ( bli_is_row_storage( a_rs, a_cs ) ) { bli_swap_ints( n_iter, n_elem ); bli_swap_ints( lda, inca ); } } bli_sinvert2s( conj, alpha, &alpha_inv ); for ( j = 0; j < n_iter; j++ ) { a_begin = a + j*lda; bli_csscal( n_elem, &alpha_inv, a_begin, inca ); } }
void bli_dinvscalm | ( | conj_t | conj, |
int | m, | ||
int | n, | ||
double * | alpha, | ||
double * | a, | ||
int | a_rs, | ||
int | a_cs | ||
) |
References bli_dinvert2s(), bli_dscal(), bli_is_row_storage(), bli_is_vector(), bli_vector_dim(), bli_vector_inc(), bli_zero_dim2(), and BLIS_NO_TRANSPOSE.
Referenced by FLA_Inv_scal_external(), and FLA_Inv_scalc_external().
{ double alpha_inv; double* a_begin; int lda, inca; int n_iter; int n_elem; int j; // Return early if possible. if ( bli_zero_dim2( m, n ) ) return; if ( bli_deq1( alpha ) ) return; // Handle cases where A is a vector to ensure that the underlying axpy // gets invoked only once. if ( bli_is_vector( m, n ) ) { // Initialize with values appropriate for a vector. n_iter = 1; n_elem = bli_vector_dim( m, n ); lda = 1; // multiplied by zero when n_iter == 1; not needed. inca = bli_vector_inc( BLIS_NO_TRANSPOSE, m, n, a_rs, a_cs ); } else // matrix case { // Initialize with optimal values for column-major storage. n_iter = n; n_elem = m; lda = a_cs; inca = a_rs; // An optimization: if A is row-major, then let's access the matrix // by rows instead of by columns to increase spatial locality. if ( bli_is_row_storage( a_rs, a_cs ) ) { bli_swap_ints( n_iter, n_elem ); bli_swap_ints( lda, inca ); } } bli_dinvert2s( conj, alpha, &alpha_inv ); for ( j = 0; j < n_iter; j++ ) { a_begin = a + j*lda; bli_dscal( n_elem, &alpha_inv, a_begin, inca ); } }
void bli_sinvscalm | ( | conj_t | conj, |
int | m, | ||
int | n, | ||
float * | alpha, | ||
float * | a, | ||
int | a_rs, | ||
int | a_cs | ||
) |
References bli_is_row_storage(), bli_is_vector(), bli_sinvert2s(), bli_sscal(), bli_vector_dim(), bli_vector_inc(), bli_zero_dim2(), and BLIS_NO_TRANSPOSE.
Referenced by FLA_Inv_scal_external(), and FLA_Inv_scalc_external().
{ float alpha_inv; float* a_begin; int lda, inca; int n_iter; int n_elem; int j; // Return early if possible. if ( bli_zero_dim2( m, n ) ) return; if ( bli_seq1( alpha ) ) return; // Handle cases where A is a vector to ensure that the underlying axpy // gets invoked only once. if ( bli_is_vector( m, n ) ) { // Initialize with values appropriate for a vector. n_iter = 1; n_elem = bli_vector_dim( m, n ); lda = 1; // multiplied by zero when n_iter == 1; not needed. inca = bli_vector_inc( BLIS_NO_TRANSPOSE, m, n, a_rs, a_cs ); } else // matrix case { // Initialize with optimal values for column-major storage. n_iter = n; n_elem = m; lda = a_cs; inca = a_rs; // An optimization: if A is row-major, then let's access the matrix // by rows instead of by columns to increase spatial locality. if ( bli_is_row_storage( a_rs, a_cs ) ) { bli_swap_ints( n_iter, n_elem ); bli_swap_ints( lda, inca ); } } bli_sinvert2s( conj, alpha, &alpha_inv ); for ( j = 0; j < n_iter; j++ ) { a_begin = a + j*lda; bli_sscal( n_elem, &alpha_inv, a_begin, inca ); } }
void bli_zdinvscalm | ( | conj_t | conj, |
int | m, | ||
int | n, | ||
double * | alpha, | ||
dcomplex * | a, | ||
int | a_rs, | ||
int | a_cs | ||
) |
References bli_dinvert2s(), bli_is_row_storage(), bli_is_vector(), bli_vector_dim(), bli_vector_inc(), bli_zdscal(), bli_zero_dim2(), and BLIS_NO_TRANSPOSE.
Referenced by FLA_Inv_scal_external(), and FLA_Inv_scalc_external().
{ double alpha_inv; dcomplex* a_begin; int lda, inca; int n_iter; int n_elem; int j; // Return early if possible. if ( bli_zero_dim2( m, n ) ) return; if ( bli_deq1( alpha ) ) return; // Handle cases where A is a vector to ensure that the underlying axpy // gets invoked only once. if ( bli_is_vector( m, n ) ) { // Initialize with values appropriate for a vector. n_iter = 1; n_elem = bli_vector_dim( m, n ); lda = 1; // multiplied by zero when n_iter == 1; not needed. inca = bli_vector_inc( BLIS_NO_TRANSPOSE, m, n, a_rs, a_cs ); } else // matrix case { // Initialize with optimal values for column-major storage. n_iter = n; n_elem = m; lda = a_cs; inca = a_rs; // An optimization: if A is row-major, then let's access the matrix // by rows instead of by columns to increase spatial locality. if ( bli_is_row_storage( a_rs, a_cs ) ) { bli_swap_ints( n_iter, n_elem ); bli_swap_ints( lda, inca ); } } bli_dinvert2s( conj, alpha, &alpha_inv ); for ( j = 0; j < n_iter; j++ ) { a_begin = a + j*lda; bli_zdscal( n_elem, &alpha_inv, a_begin, inca ); } }
void bli_zinvscalm | ( | conj_t | conj, |
int | m, | ||
int | n, | ||
dcomplex * | alpha, | ||
dcomplex * | a, | ||
int | a_rs, | ||
int | a_cs | ||
) |
References bli_is_row_storage(), bli_is_vector(), bli_vector_dim(), bli_vector_inc(), bli_zero_dim2(), bli_zinvert2s(), bli_zscal(), and BLIS_NO_TRANSPOSE.
Referenced by FLA_Inv_scal_external(), and FLA_Inv_scalc_external().
{ dcomplex alpha_inv; dcomplex* a_begin; int lda, inca; int n_iter; int n_elem; int j; // Return early if possible. if ( bli_zero_dim2( m, n ) ) return; if ( bli_zeq1( alpha ) ) return; // Handle cases where A is a vector to ensure that the underlying axpy // gets invoked only once. if ( bli_is_vector( m, n ) ) { // Initialize with values appropriate for a vector. n_iter = 1; n_elem = bli_vector_dim( m, n ); lda = 1; // multiplied by zero when n_iter == 1; not needed. inca = bli_vector_inc( BLIS_NO_TRANSPOSE, m, n, a_rs, a_cs ); } else // matrix case { // Initialize with optimal values for column-major storage. n_iter = n; n_elem = m; lda = a_cs; inca = a_rs; // An optimization: if A is row-major, then let's access the matrix // by rows instead of by columns to increase spatial locality. if ( bli_is_row_storage( a_rs, a_cs ) ) { bli_swap_ints( n_iter, n_elem ); bli_swap_ints( lda, inca ); } } bli_zinvert2s( conj, alpha, &alpha_inv ); for ( j = 0; j < n_iter; j++ ) { a_begin = a + j*lda; bli_zscal( n_elem, &alpha_inv, a_begin, inca ); } }