git: fcbf47a8eb7c - main - databases/caterva: Rename patch files

From: Po-Chuan Hsieh <sunpoet_at_FreeBSD.org>
Date: Wed, 05 Apr 2023 17:33:39 UTC
The branch main has been updated by sunpoet:

URL: https://cgit.FreeBSD.org/ports/commit/?id=fcbf47a8eb7c959b31e76d35b4da52fec4017f49

commit fcbf47a8eb7c959b31e76d35b4da52fec4017f49
Author:     Po-Chuan Hsieh <sunpoet@FreeBSD.org>
AuthorDate: 2023-04-05 17:12:49 +0000
Commit:     Po-Chuan Hsieh <sunpoet@FreeBSD.org>
CommitDate: 2023-04-05 17:29:44 +0000

    databases/caterva: Rename patch files
---
 ...patch-contribs_c-blosc2_blosc_shuffle-altivec.c | 134 --------------
 ...tch-contribs_c-blosc2_blosc_transpose-altivec.h |  58 -------
 ..._blosc_bitshuffle-altivec.c => patch-powerpc64} | 192 +++++++++++++++++++++
 3 files changed, 192 insertions(+), 192 deletions(-)

diff --git a/databases/caterva/files/patch-contribs_c-blosc2_blosc_shuffle-altivec.c b/databases/caterva/files/patch-contribs_c-blosc2_blosc_shuffle-altivec.c
deleted file mode 100644
index 69a9ed1dbe69..000000000000
--- a/databases/caterva/files/patch-contribs_c-blosc2_blosc_shuffle-altivec.c
+++ /dev/null
@@ -1,134 +0,0 @@
---- contribs/c-blosc2/blosc/shuffle-altivec.c.orig	2023-03-23 20:25:30 UTC
-+++ contribs/c-blosc2/blosc/shuffle-altivec.c
-@@ -25,7 +25,7 @@ shuffle2_altivec(uint8_t* const dest, const uint8_t* c
-                  const int32_t vectorizable_elements, const int32_t total_elements){
-   static const int32_t bytesoftype = 2;
-   uint32_t i, j;
--  __vector uint8_t xmm0[2];
-+  __vector unsigned char xmm0[2];
- 
-   for (j = 0; j < vectorizable_elements; j += 16){
-     /* Fetch 16 elements (32 bytes) */
-@@ -47,7 +47,7 @@ shuffle4_altivec(uint8_t* const dest, const uint8_t* c
-                  const int32_t vectorizable_elements, const int32_t total_elements){
-   static const int32_t bytesoftype = 4;
-   int32_t i, j;
--  __vector uint8_t xmm0[4];
-+  __vector unsigned char xmm0[4];
- 
-   for (j = 0; j < vectorizable_elements; j += 16)
-   {
-@@ -73,7 +73,7 @@ shuffle8_altivec(uint8_t* const dest, const uint8_t* c
-                  const int32_t vectorizable_elements, const int32_t total_elements) {
-   static const uint8_t bytesoftype = 8;
-   int32_t i, j;
--  __vector uint8_t xmm0[8];
-+  __vector unsigned char xmm0[8];
- 
-   for (j = 0; j < vectorizable_elements; j += 16)
-   {
-@@ -96,7 +96,7 @@ shuffle16_altivec(uint8_t* const dest, const uint8_t* 
-                   const int32_t vectorizable_elements, const int32_t total_elements) {
-   static const int32_t bytesoftype = 16;
-   int32_t i, j;
--  __vector uint8_t xmm0[16];
-+  __vector unsigned char xmm0[16];
- 
-   for (j = 0; j < vectorizable_elements; j += 16)
-   {
-@@ -121,7 +121,7 @@ shuffle16_tiled_altivec(uint8_t* const dest, const uin
-                         const int32_t bytesoftype) {
-   int32_t j, k;
-   const int32_t vecs_per_el_rem = bytesoftype & 0xF;
--  __vector uint8_t xmm[16];
-+  __vector unsigned char xmm[16];
- 
-   for (j = 0; j < vectorizable_elements; j += 16) {
-     /* Advance the offset into the type by the vector size (in bytes), unless this is
-@@ -152,7 +152,7 @@ unshuffle2_altivec(uint8_t* const dest, const uint8_t*
-                    const int32_t vectorizable_elements, const int32_t total_elements) {
-   static const int32_t bytesoftype = 2;
-   uint32_t i, j;
--  __vector uint8_t xmm0[2], xmm1[2];
-+  __vector unsigned char xmm0[2], xmm1[2];
- 
-   for (j = 0; j < vectorizable_elements; j += 16) {
-     /* Load 16 elements (32 bytes) into 2 vectors registers. */
-@@ -176,7 +176,7 @@ unshuffle4_altivec(uint8_t* const dest, const uint8_t*
-                    const int32_t vectorizable_elements, const int32_t total_elements) {
-   static const int32_t bytesoftype = 4;
-   uint32_t i, j;
--  __vector uint8_t xmm0[4], xmm1[4];
-+  __vector unsigned char xmm0[4], xmm1[4];
- 
-   for (j = 0; j < vectorizable_elements; j += 16) {
-     /* Load 16 elements (64 bytes) into 4 vectors registers. */
-@@ -191,11 +191,11 @@ unshuffle4_altivec(uint8_t* const dest, const uint8_t*
-     /* Shuffle 2-byte words */
-     for (i = 0; i < 2; i++) {
-       /* Compute the low 32 bytes */
--      xmm0[i] = (__vector uint8_t) vec_vmrghh((__vector uint16_t)xmm1[i * 2],
--                                              (__vector uint16_t) xmm1[i * 2 + 1]);
-+      xmm0[i] = (__vector unsigned char) vec_vmrghh((__vector unsigned short)xmm1[i * 2],
-+                                              (__vector unsigned short) xmm1[i * 2 + 1]);
-       /* Compute the hi 32 bytes */
--      xmm0[i+2] = (__vector uint8_t) vec_vmrglh((__vector uint16_t)xmm1[i * 2],
--                                                (__vector uint16_t)xmm1[i * 2 + 1]);
-+      xmm0[i+2] = (__vector unsigned char) vec_vmrglh((__vector unsigned short)xmm1[i * 2],
-+                                                (__vector unsigned short)xmm1[i * 2 + 1]);
-     }
-     /* Store the result vectors in proper order */
-     vec_xst(xmm0[0], bytesoftype * j, dest);
-@@ -211,7 +211,7 @@ unshuffle8_altivec(uint8_t* const dest, const uint8_t*
-                    const int32_t vectorizable_elements, const int32_t total_elements) {
-   static const uint8_t bytesoftype = 8;
-   uint32_t i, j;
--  __vector uint8_t xmm0[8], xmm1[8];
-+  __vector unsigned char xmm0[8], xmm1[8];
- 
-   // Initialize permutations for writing
-   for (j = 0; j < vectorizable_elements; j += 16) {
-@@ -225,17 +225,17 @@ unshuffle8_altivec(uint8_t* const dest, const uint8_t*
-     }
-     /* Shuffle 2-byte words */
-     for (i = 0; i < 4; i++) {
--      xmm0[i] = (__vector uint8_t)vec_vmrghh((__vector uint16_t)xmm1[i * 2],
--                                             (__vector uint16_t)xmm1[i * 2 + 1]);
--      xmm0[4 + i] = (__vector uint8_t)vec_vmrglh((__vector uint16_t)xmm1[i * 2],
--                                                 (__vector uint16_t)xmm1[i * 2 + 1]);
-+      xmm0[i] = (__vector unsigned char)vec_vmrghh((__vector unsigned short)xmm1[i * 2],
-+                                             (__vector unsigned short)xmm1[i * 2 + 1]);
-+      xmm0[4 + i] = (__vector unsigned char)vec_vmrglh((__vector unsigned short)xmm1[i * 2],
-+                                                 (__vector unsigned short)xmm1[i * 2 + 1]);
-     }
-     /* Shuffle 4-byte dwords */
-     for (i = 0; i < 4; i++) {
--      xmm1[i] = (__vector uint8_t)vec_vmrghw((__vector uint32_t)xmm0[i * 2],
--                                             (__vector uint32_t)xmm0[i * 2 + 1]);
--      xmm1[4 + i] = (__vector uint8_t)vec_vmrglw((__vector uint32_t)xmm0[i * 2],
--                                                 (__vector uint32_t)xmm0[i * 2 + 1]);
-+      xmm1[i] = (__vector unsigned char)vec_vmrghw((__vector unsigned int)xmm0[i * 2],
-+                                             (__vector unsigned int)xmm0[i * 2 + 1]);
-+      xmm1[4 + i] = (__vector unsigned char)vec_vmrglw((__vector unsigned int)xmm0[i * 2],
-+                                                 (__vector unsigned int)xmm0[i * 2 + 1]);
-     }
-     /* Store the result vectors in proper order */
-     vec_xst(xmm1[0], bytesoftype * j, dest);
-@@ -256,7 +256,7 @@ unshuffle16_altivec(uint8_t* const dest, const uint8_t
-                     const int32_t vectorizable_elements, const int32_t total_elements) {
-   static const int32_t bytesoftype = 16;
-   uint32_t i, j;
--  __vector uint8_t xmm0[16];
-+  __vector unsigned char xmm0[16];
- 
-   for (j = 0; j < vectorizable_elements; j += 16) {
-     /* Load 16 elements (64 bytes) into 4 vectors registers. */
-@@ -280,7 +280,7 @@ unshuffle16_tiled_altivec(uint8_t* const dest, const u
-                           const int32_t bytesoftype) {
-   int32_t i, j, offset_into_type;
-   const int32_t vecs_per_el_rem = bytesoftype &  0xF;
--  __vector uint8_t xmm[16];
-+  __vector unsigned char xmm[16];
- 
- 
-   /* Advance the offset into the type by the vector size (in bytes), unless this is
diff --git a/databases/caterva/files/patch-contribs_c-blosc2_blosc_transpose-altivec.h b/databases/caterva/files/patch-contribs_c-blosc2_blosc_transpose-altivec.h
deleted file mode 100644
index 060dceb6dde1..000000000000
--- a/databases/caterva/files/patch-contribs_c-blosc2_blosc_transpose-altivec.h
+++ /dev/null
@@ -1,58 +0,0 @@
---- contribs/c-blosc2/blosc/transpose-altivec.h.orig	2023-03-23 20:13:07 UTC
-+++ contribs/c-blosc2/blosc/transpose-altivec.h
-@@ -15,18 +15,18 @@ extern "C" {
- extern "C" {
- #endif
- 
--static const __vector uint8_t even = (const __vector uint8_t) {
-+static const __vector unsigned char even = (const __vector unsigned char) {
-   0x00, 0x02, 0x04, 0x06, 0x08, 0x0a, 0x0c, 0x0e,
-   0x10, 0x12, 0x14, 0x16, 0x18, 0x1a, 0x1c, 0x1e};
- 
--static const __vector uint8_t odd = (const __vector uint8_t) {
-+static const __vector unsigned char odd = (const __vector unsigned char) {
-   0x01, 0x03, 0x05, 0x07, 0x09, 0x0b, 0x0d, 0x0f,
-   0x11, 0x13, 0x15, 0x17, 0x19, 0x1b, 0x1d, 0x1f};
- 
- 
- /* Transpose inplace 2 vectors of 16 bytes in src into dst. */
--static void transpose2x16(__vector uint8_t *xmm0) {
--  __vector uint8_t xmm1[2];
-+static void transpose2x16(__vector unsigned char *xmm0) {
-+  __vector unsigned char xmm1[2];
-   xmm1[0] = vec_perm(xmm0[0], xmm0[1], even);
-   xmm1[1] = vec_perm(xmm0[0], xmm0[1], odd);
- 
-@@ -38,8 +38,8 @@ static void transpose2x16(__vector uint8_t *xmm0) {
- 
- /* Transpose inplace 4 vectors of 16 bytes in src into dst.
-  * Total cost: 8 calls to vec_perm. */
--static void transpose4x16(__vector uint8_t *xmm0) {
--  __vector uint8_t xmm1[4];
-+static void transpose4x16(__vector unsigned char *xmm0) {
-+  __vector unsigned char xmm1[4];
- 
-   /* Transpose vectors 0-1*/
-   xmm1[0] = vec_perm(xmm0[0], xmm0[1], even);
-@@ -56,8 +56,8 @@ static void transpose4x16(__vector uint8_t *xmm0) {
- 
- /* Transpose inplace 8 vectors of 16 bytes in src into dst.
-  * Total cost: 24 calls to vec_perm. */
--static void transpose8x16(__vector uint8_t *xmm0) {
--  __vector uint8_t xmm1[8];
-+static void transpose8x16(__vector unsigned char *xmm0) {
-+  __vector unsigned char xmm1[8];
- 
-   /* Transpose vectors 0-1*/
-   for (int i = 0; i < 8; i += 2){
-@@ -85,8 +85,8 @@ static void transpose8x16(__vector uint8_t *xmm0) {
- 
- /* Transpose inplace 16 vectors of 16 bytes in src into dst.
-  * Total cost: 64 calls to vec_perm. */
--static void transpose16x16(__vector uint8_t * xmm0){
--  __vector uint8_t xmm1[16];
-+static void transpose16x16(__vector unsigned char * xmm0){
-+  __vector unsigned char xmm1[16];
-   /* Transpose vectors 0-1*/
-   for (int i = 0; i < 16; i += 2){
-     xmm1[i] = vec_perm(xmm0[i], xmm0[i+1], even);
diff --git a/databases/caterva/files/patch-contribs_c-blosc2_blosc_bitshuffle-altivec.c b/databases/caterva/files/patch-powerpc64
similarity index 53%
rename from databases/caterva/files/patch-contribs_c-blosc2_blosc_bitshuffle-altivec.c
rename to databases/caterva/files/patch-powerpc64
index a5f4cc517c65..e14cc95a5ec9 100644
--- a/databases/caterva/files/patch-contribs_c-blosc2_blosc_bitshuffle-altivec.c
+++ b/databases/caterva/files/patch-powerpc64
@@ -219,3 +219,195 @@
            oui16 = (uint16_t*)&out_b[ii + (jj>>3) + kk * elem_size];
            *oui16 = tmp[4];
          }
+--- contribs/c-blosc2/blosc/shuffle-altivec.c.orig	2023-03-23 20:25:30 UTC
++++ contribs/c-blosc2/blosc/shuffle-altivec.c
+@@ -25,7 +25,7 @@ shuffle2_altivec(uint8_t* const dest, const uint8_t* c
+                  const int32_t vectorizable_elements, const int32_t total_elements){
+   static const int32_t bytesoftype = 2;
+   uint32_t i, j;
+-  __vector uint8_t xmm0[2];
++  __vector unsigned char xmm0[2];
+ 
+   for (j = 0; j < vectorizable_elements; j += 16){
+     /* Fetch 16 elements (32 bytes) */
+@@ -47,7 +47,7 @@ shuffle4_altivec(uint8_t* const dest, const uint8_t* c
+                  const int32_t vectorizable_elements, const int32_t total_elements){
+   static const int32_t bytesoftype = 4;
+   int32_t i, j;
+-  __vector uint8_t xmm0[4];
++  __vector unsigned char xmm0[4];
+ 
+   for (j = 0; j < vectorizable_elements; j += 16)
+   {
+@@ -73,7 +73,7 @@ shuffle8_altivec(uint8_t* const dest, const uint8_t* c
+                  const int32_t vectorizable_elements, const int32_t total_elements) {
+   static const uint8_t bytesoftype = 8;
+   int32_t i, j;
+-  __vector uint8_t xmm0[8];
++  __vector unsigned char xmm0[8];
+ 
+   for (j = 0; j < vectorizable_elements; j += 16)
+   {
+@@ -96,7 +96,7 @@ shuffle16_altivec(uint8_t* const dest, const uint8_t* 
+                   const int32_t vectorizable_elements, const int32_t total_elements) {
+   static const int32_t bytesoftype = 16;
+   int32_t i, j;
+-  __vector uint8_t xmm0[16];
++  __vector unsigned char xmm0[16];
+ 
+   for (j = 0; j < vectorizable_elements; j += 16)
+   {
+@@ -121,7 +121,7 @@ shuffle16_tiled_altivec(uint8_t* const dest, const uin
+                         const int32_t bytesoftype) {
+   int32_t j, k;
+   const int32_t vecs_per_el_rem = bytesoftype & 0xF;
+-  __vector uint8_t xmm[16];
++  __vector unsigned char xmm[16];
+ 
+   for (j = 0; j < vectorizable_elements; j += 16) {
+     /* Advance the offset into the type by the vector size (in bytes), unless this is
+@@ -152,7 +152,7 @@ unshuffle2_altivec(uint8_t* const dest, const uint8_t*
+                    const int32_t vectorizable_elements, const int32_t total_elements) {
+   static const int32_t bytesoftype = 2;
+   uint32_t i, j;
+-  __vector uint8_t xmm0[2], xmm1[2];
++  __vector unsigned char xmm0[2], xmm1[2];
+ 
+   for (j = 0; j < vectorizable_elements; j += 16) {
+     /* Load 16 elements (32 bytes) into 2 vectors registers. */
+@@ -176,7 +176,7 @@ unshuffle4_altivec(uint8_t* const dest, const uint8_t*
+                    const int32_t vectorizable_elements, const int32_t total_elements) {
+   static const int32_t bytesoftype = 4;
+   uint32_t i, j;
+-  __vector uint8_t xmm0[4], xmm1[4];
++  __vector unsigned char xmm0[4], xmm1[4];
+ 
+   for (j = 0; j < vectorizable_elements; j += 16) {
+     /* Load 16 elements (64 bytes) into 4 vectors registers. */
+@@ -191,11 +191,11 @@ unshuffle4_altivec(uint8_t* const dest, const uint8_t*
+     /* Shuffle 2-byte words */
+     for (i = 0; i < 2; i++) {
+       /* Compute the low 32 bytes */
+-      xmm0[i] = (__vector uint8_t) vec_vmrghh((__vector uint16_t)xmm1[i * 2],
+-                                              (__vector uint16_t) xmm1[i * 2 + 1]);
++      xmm0[i] = (__vector unsigned char) vec_vmrghh((__vector unsigned short)xmm1[i * 2],
++                                              (__vector unsigned short) xmm1[i * 2 + 1]);
+       /* Compute the hi 32 bytes */
+-      xmm0[i+2] = (__vector uint8_t) vec_vmrglh((__vector uint16_t)xmm1[i * 2],
+-                                                (__vector uint16_t)xmm1[i * 2 + 1]);
++      xmm0[i+2] = (__vector unsigned char) vec_vmrglh((__vector unsigned short)xmm1[i * 2],
++                                                (__vector unsigned short)xmm1[i * 2 + 1]);
+     }
+     /* Store the result vectors in proper order */
+     vec_xst(xmm0[0], bytesoftype * j, dest);
+@@ -211,7 +211,7 @@ unshuffle8_altivec(uint8_t* const dest, const uint8_t*
+                    const int32_t vectorizable_elements, const int32_t total_elements) {
+   static const uint8_t bytesoftype = 8;
+   uint32_t i, j;
+-  __vector uint8_t xmm0[8], xmm1[8];
++  __vector unsigned char xmm0[8], xmm1[8];
+ 
+   // Initialize permutations for writing
+   for (j = 0; j < vectorizable_elements; j += 16) {
+@@ -225,17 +225,17 @@ unshuffle8_altivec(uint8_t* const dest, const uint8_t*
+     }
+     /* Shuffle 2-byte words */
+     for (i = 0; i < 4; i++) {
+-      xmm0[i] = (__vector uint8_t)vec_vmrghh((__vector uint16_t)xmm1[i * 2],
+-                                             (__vector uint16_t)xmm1[i * 2 + 1]);
+-      xmm0[4 + i] = (__vector uint8_t)vec_vmrglh((__vector uint16_t)xmm1[i * 2],
+-                                                 (__vector uint16_t)xmm1[i * 2 + 1]);
++      xmm0[i] = (__vector unsigned char)vec_vmrghh((__vector unsigned short)xmm1[i * 2],
++                                             (__vector unsigned short)xmm1[i * 2 + 1]);
++      xmm0[4 + i] = (__vector unsigned char)vec_vmrglh((__vector unsigned short)xmm1[i * 2],
++                                                 (__vector unsigned short)xmm1[i * 2 + 1]);
+     }
+     /* Shuffle 4-byte dwords */
+     for (i = 0; i < 4; i++) {
+-      xmm1[i] = (__vector uint8_t)vec_vmrghw((__vector uint32_t)xmm0[i * 2],
+-                                             (__vector uint32_t)xmm0[i * 2 + 1]);
+-      xmm1[4 + i] = (__vector uint8_t)vec_vmrglw((__vector uint32_t)xmm0[i * 2],
+-                                                 (__vector uint32_t)xmm0[i * 2 + 1]);
++      xmm1[i] = (__vector unsigned char)vec_vmrghw((__vector unsigned int)xmm0[i * 2],
++                                             (__vector unsigned int)xmm0[i * 2 + 1]);
++      xmm1[4 + i] = (__vector unsigned char)vec_vmrglw((__vector unsigned int)xmm0[i * 2],
++                                                 (__vector unsigned int)xmm0[i * 2 + 1]);
+     }
+     /* Store the result vectors in proper order */
+     vec_xst(xmm1[0], bytesoftype * j, dest);
+@@ -256,7 +256,7 @@ unshuffle16_altivec(uint8_t* const dest, const uint8_t
+                     const int32_t vectorizable_elements, const int32_t total_elements) {
+   static const int32_t bytesoftype = 16;
+   uint32_t i, j;
+-  __vector uint8_t xmm0[16];
++  __vector unsigned char xmm0[16];
+ 
+   for (j = 0; j < vectorizable_elements; j += 16) {
+     /* Load 16 elements (64 bytes) into 4 vectors registers. */
+@@ -280,7 +280,7 @@ unshuffle16_tiled_altivec(uint8_t* const dest, const u
+                           const int32_t bytesoftype) {
+   int32_t i, j, offset_into_type;
+   const int32_t vecs_per_el_rem = bytesoftype &  0xF;
+-  __vector uint8_t xmm[16];
++  __vector unsigned char xmm[16];
+ 
+ 
+   /* Advance the offset into the type by the vector size (in bytes), unless this is
+--- contribs/c-blosc2/blosc/transpose-altivec.h.orig	2023-03-23 20:13:07 UTC
++++ contribs/c-blosc2/blosc/transpose-altivec.h
+@@ -15,18 +15,18 @@ extern "C" {
+ extern "C" {
+ #endif
+ 
+-static const __vector uint8_t even = (const __vector uint8_t) {
++static const __vector unsigned char even = (const __vector unsigned char) {
+   0x00, 0x02, 0x04, 0x06, 0x08, 0x0a, 0x0c, 0x0e,
+   0x10, 0x12, 0x14, 0x16, 0x18, 0x1a, 0x1c, 0x1e};
+ 
+-static const __vector uint8_t odd = (const __vector uint8_t) {
++static const __vector unsigned char odd = (const __vector unsigned char) {
+   0x01, 0x03, 0x05, 0x07, 0x09, 0x0b, 0x0d, 0x0f,
+   0x11, 0x13, 0x15, 0x17, 0x19, 0x1b, 0x1d, 0x1f};
+ 
+ 
+ /* Transpose inplace 2 vectors of 16 bytes in src into dst. */
+-static void transpose2x16(__vector uint8_t *xmm0) {
+-  __vector uint8_t xmm1[2];
++static void transpose2x16(__vector unsigned char *xmm0) {
++  __vector unsigned char xmm1[2];
+   xmm1[0] = vec_perm(xmm0[0], xmm0[1], even);
+   xmm1[1] = vec_perm(xmm0[0], xmm0[1], odd);
+ 
+@@ -38,8 +38,8 @@ static void transpose2x16(__vector uint8_t *xmm0) {
+ 
+ /* Transpose inplace 4 vectors of 16 bytes in src into dst.
+  * Total cost: 8 calls to vec_perm. */
+-static void transpose4x16(__vector uint8_t *xmm0) {
+-  __vector uint8_t xmm1[4];
++static void transpose4x16(__vector unsigned char *xmm0) {
++  __vector unsigned char xmm1[4];
+ 
+   /* Transpose vectors 0-1*/
+   xmm1[0] = vec_perm(xmm0[0], xmm0[1], even);
+@@ -56,8 +56,8 @@ static void transpose4x16(__vector uint8_t *xmm0) {
+ 
+ /* Transpose inplace 8 vectors of 16 bytes in src into dst.
+  * Total cost: 24 calls to vec_perm. */
+-static void transpose8x16(__vector uint8_t *xmm0) {
+-  __vector uint8_t xmm1[8];
++static void transpose8x16(__vector unsigned char *xmm0) {
++  __vector unsigned char xmm1[8];
+ 
+   /* Transpose vectors 0-1*/
+   for (int i = 0; i < 8; i += 2){
+@@ -85,8 +85,8 @@ static void transpose8x16(__vector uint8_t *xmm0) {
+ 
+ /* Transpose inplace 16 vectors of 16 bytes in src into dst.
+  * Total cost: 64 calls to vec_perm. */
+-static void transpose16x16(__vector uint8_t * xmm0){
+-  __vector uint8_t xmm1[16];
++static void transpose16x16(__vector unsigned char * xmm0){
++  __vector unsigned char xmm1[16];
+   /* Transpose vectors 0-1*/
+   for (int i = 0; i < 16; i += 2){
+     xmm1[i] = vec_perm(xmm0[i], xmm0[i+1], even);