Skip to content
Draft
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
200 changes: 198 additions & 2 deletions kernel/power/dgemm_kernel_power10.c
Original file line number Diff line number Diff line change
Expand Up @@ -188,7 +188,7 @@ CNAME (BLASLONG m, BLASLONG n, BLASLONG k, FLOAT alpha, FLOAT * A, FLOAT * B,
v4sf_t *rowC;
v4sf_t result[4];
__vector_quad acc0, acc1, acc2, acc3, acc4,acc5,acc6,acc7;
BLASLONG l = 0;
BLASLONG l = 1;
vec_t *rowA = (vec_t *) & AO[0];
__vector_pair rowB, rowB1;
rowB = *((__vector_pair *)((void *)&BO[0]));
Expand All @@ -201,7 +201,203 @@ CNAME (BLASLONG m, BLASLONG n, BLASLONG k, FLOAT alpha, FLOAT * A, FLOAT * B,
__builtin_mma_xvf64ger (&acc5, rowB1, rowA[2]);
__builtin_mma_xvf64ger (&acc6, rowB, rowA[3]);
__builtin_mma_xvf64ger (&acc7, rowB1, rowA[3]);
for (l = 1; l < temp; l++)

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Can we have a macro like this for https://github.com/OpenMathLib/OpenBLAS/blob/develop/kernel/power/sgemm_kernel_power10.c#L128 to reduce the number of lines?

for (l = 1; l + 15 < temp; l += 16)
{

vec_t *rowA0 = (vec_t *)&AO[(l + 0) << 3];
__vector_pair rowB0 = *((__vector_pair *)((void *)&BO[(l + 0) << 3]));
__vector_pair rowB0_1 = *((__vector_pair *)((void *)&BO[((l + 0) << 3) + 4]));
__builtin_mma_xvf64gerpp(&acc0, rowB0, rowA0[0]);
__builtin_mma_xvf64gerpp(&acc1, rowB0_1, rowA0[0]);
__builtin_mma_xvf64gerpp(&acc2, rowB0, rowA0[1]);
__builtin_mma_xvf64gerpp(&acc3, rowB0_1, rowA0[1]);
__builtin_mma_xvf64gerpp(&acc4, rowB0, rowA0[2]);
__builtin_mma_xvf64gerpp(&acc5, rowB0_1, rowA0[2]);
__builtin_mma_xvf64gerpp(&acc6, rowB0, rowA0[3]);
__builtin_mma_xvf64gerpp(&acc7, rowB0_1, rowA0[3]);

vec_t *rowA1 = (vec_t *)&AO[(l + 1) << 3];
__vector_pair rowB1 = *((__vector_pair *)((void *)&BO[(l + 1) << 3]));
__vector_pair rowB1_1 = *((__vector_pair *)((void *)&BO[((l + 1) << 3) + 4]));
__builtin_mma_xvf64gerpp(&acc0, rowB1, rowA1[0]);
__builtin_mma_xvf64gerpp(&acc1, rowB1_1, rowA1[0]);
__builtin_mma_xvf64gerpp(&acc2, rowB1, rowA1[1]);
__builtin_mma_xvf64gerpp(&acc3, rowB1_1, rowA1[1]);
__builtin_mma_xvf64gerpp(&acc4, rowB1, rowA1[2]);
__builtin_mma_xvf64gerpp(&acc5, rowB1_1, rowA1[2]);
__builtin_mma_xvf64gerpp(&acc6, rowB1, rowA1[3]);
__builtin_mma_xvf64gerpp(&acc7, rowB1_1, rowA1[3]);

vec_t *rowA2 = (vec_t *)&AO[(l + 2) << 3];
__vector_pair rowB2 = *((__vector_pair *)((void *)&BO[(l + 2) << 3]));
__vector_pair rowB2_1 = *((__vector_pair *)((void *)&BO[((l + 2) << 3) + 4]));
__builtin_mma_xvf64gerpp(&acc0, rowB2, rowA2[0]);
__builtin_mma_xvf64gerpp(&acc1, rowB2_1, rowA2[0]);
__builtin_mma_xvf64gerpp(&acc2, rowB2, rowA2[1]);
__builtin_mma_xvf64gerpp(&acc3, rowB2_1, rowA2[1]);
__builtin_mma_xvf64gerpp(&acc4, rowB2, rowA2[2]);
__builtin_mma_xvf64gerpp(&acc5, rowB2_1, rowA2[2]);
__builtin_mma_xvf64gerpp(&acc6, rowB2, rowA2[3]);
__builtin_mma_xvf64gerpp(&acc7, rowB2_1, rowA2[3]);

vec_t *rowA3 = (vec_t *)&AO[(l + 3) << 3];
__vector_pair rowB3 = *((__vector_pair *)((void *)&BO[(l + 3) << 3]));
__vector_pair rowB3_1 = *((__vector_pair *)((void *)&BO[((l + 3) << 3) + 4]));
__builtin_mma_xvf64gerpp(&acc0, rowB3, rowA3[0]);
__builtin_mma_xvf64gerpp(&acc1, rowB3_1, rowA3[0]);
__builtin_mma_xvf64gerpp(&acc2, rowB3, rowA3[1]);
__builtin_mma_xvf64gerpp(&acc3, rowB3_1, rowA3[1]);
__builtin_mma_xvf64gerpp(&acc4, rowB3, rowA3[2]);
__builtin_mma_xvf64gerpp(&acc5, rowB3_1, rowA3[2]);
__builtin_mma_xvf64gerpp(&acc6, rowB3, rowA3[3]);
__builtin_mma_xvf64gerpp(&acc7, rowB3_1, rowA3[3]);

vec_t *rowA4 = (vec_t *)&AO[(l + 4) << 3];
__vector_pair rowB4 = *((__vector_pair *)((void *)&BO[(l + 4) << 3]));
__vector_pair rowB4_1 = *((__vector_pair *)((void *)&BO[((l + 4) << 3) + 4]));
__builtin_mma_xvf64gerpp(&acc0, rowB4, rowA4[0]);
__builtin_mma_xvf64gerpp(&acc1, rowB4_1, rowA4[0]);
__builtin_mma_xvf64gerpp(&acc2, rowB4, rowA4[1]);
__builtin_mma_xvf64gerpp(&acc3, rowB4_1, rowA4[1]);
__builtin_mma_xvf64gerpp(&acc4, rowB4, rowA4[2]);
__builtin_mma_xvf64gerpp(&acc5, rowB4_1, rowA4[2]);
__builtin_mma_xvf64gerpp(&acc6, rowB4, rowA4[3]);
__builtin_mma_xvf64gerpp(&acc7, rowB4_1, rowA4[3]);

vec_t *rowA5 = (vec_t *)&AO[(l + 5) << 3];
__vector_pair rowB5 = *((__vector_pair *)((void *)&BO[(l + 5) << 3]));
__vector_pair rowB5_1 = *((__vector_pair *)((void *)&BO[((l + 5) << 3) + 4]));
__builtin_mma_xvf64gerpp(&acc0, rowB5, rowA5[0]);
__builtin_mma_xvf64gerpp(&acc1, rowB5_1, rowA5[0]);
__builtin_mma_xvf64gerpp(&acc2, rowB5, rowA5[1]);
__builtin_mma_xvf64gerpp(&acc3, rowB5_1, rowA5[1]);
__builtin_mma_xvf64gerpp(&acc4, rowB5, rowA5[2]);
__builtin_mma_xvf64gerpp(&acc5, rowB5_1, rowA5[2]);
__builtin_mma_xvf64gerpp(&acc6, rowB5, rowA5[3]);
__builtin_mma_xvf64gerpp(&acc7, rowB5_1, rowA5[3]);

vec_t *rowA6 = (vec_t *)&AO[(l + 6) << 3];
__vector_pair rowB6 = *((__vector_pair *)((void *)&BO[(l + 6) << 3]));
__vector_pair rowB6_1 = *((__vector_pair *)((void *)&BO[((l + 6) << 3) + 4]));
__builtin_mma_xvf64gerpp(&acc0, rowB6, rowA6[0]);
__builtin_mma_xvf64gerpp(&acc1, rowB6_1, rowA6[0]);
__builtin_mma_xvf64gerpp(&acc2, rowB6, rowA6[1]);
__builtin_mma_xvf64gerpp(&acc3, rowB6_1, rowA6[1]);
__builtin_mma_xvf64gerpp(&acc4, rowB6, rowA6[2]);
__builtin_mma_xvf64gerpp(&acc5, rowB6_1, rowA6[2]);
__builtin_mma_xvf64gerpp(&acc6, rowB6, rowA6[3]);
__builtin_mma_xvf64gerpp(&acc7, rowB6_1, rowA6[3]);

vec_t *rowA7 = (vec_t *)&AO[(l + 7) << 3];
__vector_pair rowB7 = *((__vector_pair *)((void *)&BO[(l + 7) << 3]));
__vector_pair rowB7_1 = *((__vector_pair *)((void *)&BO[((l + 7) << 3) + 4]));
__builtin_mma_xvf64gerpp(&acc0, rowB7, rowA7[0]);
__builtin_mma_xvf64gerpp(&acc1, rowB7_1, rowA7[0]);
__builtin_mma_xvf64gerpp(&acc2, rowB7, rowA7[1]);
__builtin_mma_xvf64gerpp(&acc3, rowB7_1, rowA7[1]);
__builtin_mma_xvf64gerpp(&acc4, rowB7, rowA7[2]);
__builtin_mma_xvf64gerpp(&acc5, rowB7_1, rowA7[2]);
__builtin_mma_xvf64gerpp(&acc6, rowB7, rowA7[3]);
__builtin_mma_xvf64gerpp(&acc7, rowB7_1, rowA7[3]);

vec_t *rowA8 = (vec_t *)&AO[(l + 8) << 3];
__vector_pair rowB8 = *((__vector_pair *)((void *)&BO[(l + 8) << 3]));
__vector_pair rowB8_1 = *((__vector_pair *)((void *)&BO[((l + 8) << 3) + 4]));
__builtin_mma_xvf64gerpp(&acc0, rowB8, rowA8[0]);
__builtin_mma_xvf64gerpp(&acc1, rowB8_1, rowA8[0]);
__builtin_mma_xvf64gerpp(&acc2, rowB8, rowA8[1]);
__builtin_mma_xvf64gerpp(&acc3, rowB8_1, rowA8[1]);
__builtin_mma_xvf64gerpp(&acc4, rowB8, rowA8[2]);
__builtin_mma_xvf64gerpp(&acc5, rowB8_1, rowA8[2]);
__builtin_mma_xvf64gerpp(&acc6, rowB8, rowA8[3]);
__builtin_mma_xvf64gerpp(&acc7, rowB8_1, rowA8[3]);

vec_t *rowA9 = (vec_t *)&AO[(l + 9) << 3];
__vector_pair rowB9 = *((__vector_pair *)((void *)&BO[(l + 9) << 3]));
__vector_pair rowB9_1 = *((__vector_pair *)((void *)&BO[((l + 9) << 3) + 4]));
__builtin_mma_xvf64gerpp(&acc0, rowB9, rowA9[0]);
__builtin_mma_xvf64gerpp(&acc1, rowB9_1, rowA9[0]);
__builtin_mma_xvf64gerpp(&acc2, rowB9, rowA9[1]);
__builtin_mma_xvf64gerpp(&acc3, rowB9_1, rowA9[1]);
__builtin_mma_xvf64gerpp(&acc4, rowB9, rowA9[2]);
__builtin_mma_xvf64gerpp(&acc5, rowB9_1, rowA9[2]);
__builtin_mma_xvf64gerpp(&acc6, rowB9, rowA9[3]);
__builtin_mma_xvf64gerpp(&acc7, rowB9_1, rowA9[3]);

vec_t *rowA10 = (vec_t *)&AO[(l + 10) << 3];
__vector_pair rowB10 = *((__vector_pair *)((void *)&BO[(l + 10) << 3]));
__vector_pair rowB10_1 = *((__vector_pair *)((void *)&BO[((l + 10) << 3) + 4]));
__builtin_mma_xvf64gerpp(&acc0, rowB10, rowA10[0]);
__builtin_mma_xvf64gerpp(&acc1, rowB10_1, rowA10[0]);
__builtin_mma_xvf64gerpp(&acc2, rowB10, rowA10[1]);
__builtin_mma_xvf64gerpp(&acc3, rowB10_1, rowA10[1]);
__builtin_mma_xvf64gerpp(&acc4, rowB10, rowA10[2]);
__builtin_mma_xvf64gerpp(&acc5, rowB10_1, rowA10[2]);
__builtin_mma_xvf64gerpp(&acc6, rowB10, rowA10[3]);
__builtin_mma_xvf64gerpp(&acc7, rowB10_1, rowA10[3]);

vec_t *rowA11 = (vec_t *)&AO[(l + 11) << 3];
__vector_pair rowB11 = *((__vector_pair *)((void *)&BO[(l + 11) << 3]));
__vector_pair rowB11_1 = *((__vector_pair *)((void *)&BO[((l + 11) << 3) + 4]));
__builtin_mma_xvf64gerpp(&acc0, rowB11, rowA11[0]);
__builtin_mma_xvf64gerpp(&acc1, rowB11_1, rowA11[0]);
__builtin_mma_xvf64gerpp(&acc2, rowB11, rowA11[1]);
__builtin_mma_xvf64gerpp(&acc3, rowB11_1, rowA11[1]);
__builtin_mma_xvf64gerpp(&acc4, rowB11, rowA11[2]);
__builtin_mma_xvf64gerpp(&acc5, rowB11_1, rowA11[2]);
__builtin_mma_xvf64gerpp(&acc6, rowB11, rowA11[3]);
__builtin_mma_xvf64gerpp(&acc7, rowB11_1, rowA11[3]);

vec_t *rowA12 = (vec_t *)&AO[(l + 12) << 3];
__vector_pair rowB12 = *((__vector_pair *)((void *)&BO[(l + 12) << 3]));
__vector_pair rowB12_1 = *((__vector_pair *)((void *)&BO[((l + 12) << 3) + 4]));
__builtin_mma_xvf64gerpp(&acc0, rowB12, rowA12[0]);
__builtin_mma_xvf64gerpp(&acc1, rowB12_1, rowA12[0]);
__builtin_mma_xvf64gerpp(&acc2, rowB12, rowA12[1]);
__builtin_mma_xvf64gerpp(&acc3, rowB12_1, rowA12[1]);
__builtin_mma_xvf64gerpp(&acc4, rowB12, rowA12[2]);
__builtin_mma_xvf64gerpp(&acc5, rowB12_1, rowA12[2]);
__builtin_mma_xvf64gerpp(&acc6, rowB12, rowA12[3]);
__builtin_mma_xvf64gerpp(&acc7, rowB12_1, rowA12[3]);

vec_t *rowA13 = (vec_t *)&AO[(l + 13) << 3];
__vector_pair rowB13 = *((__vector_pair *)((void *)&BO[(l + 13) << 3]));
__vector_pair rowB13_1 = *((__vector_pair *)((void *)&BO[((l + 13) << 3) + 4]));
__builtin_mma_xvf64gerpp(&acc0, rowB13, rowA13[0]);
__builtin_mma_xvf64gerpp(&acc1, rowB13_1, rowA13[0]);
__builtin_mma_xvf64gerpp(&acc2, rowB13, rowA13[1]);
__builtin_mma_xvf64gerpp(&acc3, rowB13_1, rowA13[1]);
__builtin_mma_xvf64gerpp(&acc4, rowB13, rowA13[2]);
__builtin_mma_xvf64gerpp(&acc5, rowB13_1, rowA13[2]);
__builtin_mma_xvf64gerpp(&acc6, rowB13, rowA13[3]);
__builtin_mma_xvf64gerpp(&acc7, rowB13_1, rowA13[3]);

vec_t *rowA14 = (vec_t *)&AO[(l + 14) << 3];
__vector_pair rowB14 = *((__vector_pair *)((void *)&BO[(l + 14) << 3]));
__vector_pair rowB14_1 = *((__vector_pair *)((void *)&BO[((l + 14) << 3) + 4]));
__builtin_mma_xvf64gerpp(&acc0, rowB14, rowA14[0]);
__builtin_mma_xvf64gerpp(&acc1, rowB14_1, rowA14[0]);
__builtin_mma_xvf64gerpp(&acc2, rowB14, rowA14[1]);
__builtin_mma_xvf64gerpp(&acc3, rowB14_1, rowA14[1]);
__builtin_mma_xvf64gerpp(&acc4, rowB14, rowA14[2]);
__builtin_mma_xvf64gerpp(&acc5, rowB14_1, rowA14[2]);
__builtin_mma_xvf64gerpp(&acc6, rowB14, rowA14[3]);
__builtin_mma_xvf64gerpp(&acc7, rowB14_1, rowA14[3]);

vec_t *rowA15 = (vec_t *)&AO[(l + 15) << 3];
__vector_pair rowB15 = *((__vector_pair *)((void *)&BO[(l + 15) << 3]));
__vector_pair rowB15_1 = *((__vector_pair *)((void *)&BO[((l + 15) << 3) + 4]));
__builtin_mma_xvf64gerpp(&acc0, rowB15, rowA15[0]);
__builtin_mma_xvf64gerpp(&acc1, rowB15_1, rowA15[0]);
__builtin_mma_xvf64gerpp(&acc2, rowB15, rowA15[1]);
__builtin_mma_xvf64gerpp(&acc3, rowB15_1, rowA15[1]);
__builtin_mma_xvf64gerpp(&acc4, rowB15, rowA15[2]);
__builtin_mma_xvf64gerpp(&acc5, rowB15_1, rowA15[2]);
__builtin_mma_xvf64gerpp(&acc6, rowB15, rowA15[3]);
__builtin_mma_xvf64gerpp(&acc7, rowB15_1, rowA15[3]);

}
for (; l < temp; l++)
{
rowA = (vec_t *) & AO[l << 3];
rowB = *((__vector_pair *)((void *)&BO[l << 3]));
Expand Down
45 changes: 45 additions & 0 deletions kernel/power/dgemv_n_microk_power10.c
Original file line number Diff line number Diff line change
Expand Up @@ -25,8 +25,53 @@ OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*****************************************************************************/

#define HAVE_KERNEL_4x2 1
#define HAVE_KERNEL_4x1 1
#define HAVE_KERNEL_4x4 1

static void dgemv_kernel_4x2(BLASLONG n, FLOAT *a0, FLOAT *a1, FLOAT *xo, FLOAT *y, FLOAT alpha)
{
FLOAT x0,x1;
x0 = xo[0] * alpha;
x1 = xo[1] * alpha;
__vector double v_x0 = {x0,x0};
__vector double v_x1 = {x1,x1};
__vector double* v_y =(__vector double*)y;
__vector double* va0 = (__vector double*)a0;
__vector double* va1 = (__vector double*)a1;
for (int i=0; i< n/2; i+=2)
{

v_y[i]+= va0[i] * v_x0 + va1[i] * v_x1;
v_y[i+1]+= va0[i+1] * v_x0 + va1[i+1] * v_x1;

}


}

static void dgemv_kernel_4x1(BLASLONG n, FLOAT *a0, FLOAT *xo, FLOAT *y, FLOAT alpha)
{
BLASLONG i;
FLOAT x[1] __attribute__ ((aligned (16)));

FLOAT x0,x1;
x0 = xo[0] * alpha;

__vector double v_x0 = {x0,x0};
__vector double* v_y =(__vector double*)y;
__vector double* va0 = (__vector double*)a0;
for (int i=0; i< n/2; i+=2)
{

v_y[i]+= va0[i] * v_x0 ;
v_y[i+1]+= va0[i+1] * v_x0 ;

}

}


static void dgemv_kernel_4x4 (long n, double *ap, long lda, double *x, double *y, double alpha)
{
double *a0;
Expand Down
Loading