summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorStephan Hartmann <sultan@gentoo.org>2023-01-15 19:20:17 +0100
committerStephan Hartmann <sultan@gentoo.org>2023-01-15 19:20:17 +0100
commit5566428d4e16ff47a6f54ad495d2dd95b6db89b5 (patch)
tree0096d42530c52a8559ab8a3774f5b82523da34c2 /www-client/chromium
parentprofiles/arch/powerpc/ppc64: www-client/chromium[-system-libpng] works now (diff)
downloadgentoo-5566428d4e16ff47a6f54ad495d2dd95b6db89b5.tar.gz
gentoo-5566428d4e16ff47a6f54ad495d2dd95b6db89b5.tar.bz2
gentoo-5566428d4e16ff47a6f54ad495d2dd95b6db89b5.zip
www-client/chromium: move ppc64le to devspace
Signed-off-by: Stephan Hartmann <sultan@gentoo.org>
Diffstat (limited to 'www-client/chromium')
-rw-r--r--www-client/chromium/Manifest1
-rw-r--r--www-client/chromium/chromium-109.0.5414.74-r1.ebuild13
-rw-r--r--www-client/chromium/files/ppc64le/chromium-ppc64-libpng.patch914
-rw-r--r--www-client/chromium/files/ppc64le/fix-breakpad-compile.patch29
-rw-r--r--www-client/chromium/files/ppc64le/fix-swiftshader-compile.patch26
-rw-r--r--www-client/chromium/files/ppc64le/libpng-pdfium-compile-98.patch13
6 files changed, 7 insertions, 989 deletions
diff --git a/www-client/chromium/Manifest b/www-client/chromium/Manifest
index a32f139bb2c3..63b5b3af45d6 100644
--- a/www-client/chromium/Manifest
+++ b/www-client/chromium/Manifest
@@ -4,6 +4,7 @@ DIST chromium-109-patchset-2.tar.xz 7280 BLAKE2B b7f86dbb6c40005df67764a7dea57c8
DIST chromium-109.0.5414.74.tar.xz 1747778008 BLAKE2B dc2893c59398caf72546c42ada961215ce1b25785da66272b72fe14ac0db00ac4282b0de5ee65cca932e273fe69f41704b4ee2aa173d21da535708e8d0aab633 SHA512 79ff744b813e833ab4f257361a2f1847a57df579b9c925bad7575484f079c4c1b0b1c59c2c48e683bcee27a79e4bfd87f96c83fa28e0209ba4d3a71340842579
DIST chromium-110-patchset-2.tar.xz 11196 BLAKE2B 97a4d65f9cdc1cdec15ddcee8561b6fef9d6ce8ea667c4c779da56b3665ff8fd67f2e81d8b55cf9709575c96e29545c4ab81b02dcb5b3fced7185cb9f43d63d2 SHA512 cff28487a6739cdffb007566c1b71977c87394bd2912e9b3ee32c9349bb0c1d882e252a2264132d908393bc5440520e3ea93736e1a0f155211280cab7d605037
DIST chromium-110.0.5481.24.tar.xz 1703153748 BLAKE2B 8fa030928d51d9ea0a6aa4a674bb1e731e89833da146575f1e617e2e4fb4a7d55f963ba855341f2564488b4464b52d4aaa9d0f789e4f7f1dcb35df6f5e0232aa SHA512 49ff6fcfcc4a1cc30e073fa421fced527e2eb7248a1dfde5da91768a91ea69aa113934f80865ffe590a3c417634c5f7a2fe04145b199a2b4d751ea386f297e9a
+DIST chromium-ppc64le-gentoo-patches-1.tar.xz 5636 BLAKE2B 1d898939df023c59285b27bee552470483ea06375d1ee8d6947b89c5927c23cc7bfec6b49f3b376ece931d11a56f8e2a45791e0f92ad61974fc8c34c1082d89c SHA512 8a71cb007e47cda8e5fe5d185729389e65c48bd322c8ee8b3986bee8571427b959628f2666bda646a3f89ae64197c0957d3626845ff03461dbd5dee4c964d07c
DIST chromium-profiler-0.2.tar 259952640 BLAKE2B 5e82389fce00b267280d633b26551231c558b80f0c92cd7d60271095a3503365ab673e7a46dc5f74f8a82db1f21262cb24dcc0531d954bab5fb3b175dab3394d SHA512 9fa89a5ab728419683e7495ae676567480987d68a2d589f2aa97bfcbcf123f3c8f97f8bca5f65e5e3f6e41475535add473ded83b9c7a33bb4746a9d05b298a6c
DIST chromium_108.0.5359.71-2raptor0~deb11u1.debian.tar.xz 439020 BLAKE2B ef900e3978b03c7330147b338266694f2ea4bafee8a34fb871bd7548a05c9276c25e8c34dd26875ca552db758590c4e4a80ae411c2fe658e485d9dca6e55adae SHA512 99486b35c5eb718e25faaddd526970969f5d8c4c74f3c17f64f6fbe25f45a0764643bf746644b205ac27d13912f36eba6f497a0552d7d96d1af4a569d895e64c
DIST chromium_109.0.5414.74-2raptor0~deb11u1.debian.tar.xz 445284 BLAKE2B 838df6439816d36df2e9bb9224745aa0c0a3f6c3e19ec2b29b78b8e2be8c59e8df9dde7fdba49a73ee7bdd2282016f0366500f5bd1579b870a6aab25f4bb115f SHA512 6e6523d5f2aed2d931074af1dddea7312d5c8ed09d2b143dd9309db634547c37b67ba3c905fefa67097fb44c64f4d45431eedeed7b33d8b07eb2bae73bdc7f16
diff --git a/www-client/chromium/chromium-109.0.5414.74-r1.ebuild b/www-client/chromium/chromium-109.0.5414.74-r1.ebuild
index 36936ce4427f..4f28d615a369 100644
--- a/www-client/chromium/chromium-109.0.5414.74-r1.ebuild
+++ b/www-client/chromium/chromium-109.0.5414.74-r1.ebuild
@@ -23,7 +23,10 @@ PATCHSET_URI_PPC64="https://quickbuild.io/~raptor-engineering-public"
PATCHSET_NAME_PPC64="chromium_109.0.5414.74-2raptor0~deb11u1.debian"
SRC_URI="https://commondatastorage.googleapis.com/chromium-browser-official/${P}.tar.xz
https://github.com/stha09/chromium-patches/releases/download/${PATCHSET_NAME}/${PATCHSET_NAME}.tar.xz
- ppc64? ( ${PATCHSET_URI_PPC64}/+archive/ubuntu/chromium/+files/${PATCHSET_NAME_PPC64}.tar.xz )
+ ppc64? (
+ ${PATCHSET_URI_PPC64}/+archive/ubuntu/chromium/+files/${PATCHSET_NAME_PPC64}.tar.xz
+ https://dev.gentoo.org/~sultan/distfiles/www-client/chromium/chromium-ppc64le-gentoo-patches-1.tar.xz
+ )
pgo? ( https://github.com/elkablo/chromium-profiler/releases/download/v0.2/chromium-profiler-0.2.tar )"
LICENSE="BSD"
@@ -348,15 +351,11 @@ src_prepare() {
if use ppc64 ; then
local p
for p in $(grep -v "^#" "${WORKDIR}"/debian/patches/series | grep "^ppc64le" || die); do
- if [[ $p =~ "fix-breakpad-compile.patch" ]]; then
- eapply "${FILESDIR}/ppc64le/fix-breakpad-compile.patch"
- else
+ if [[ ! $p =~ "fix-breakpad-compile.patch" ]]; then
eapply "${WORKDIR}/debian/patches/${p}"
fi
done
- eapply "${FILESDIR}/ppc64le/libpng-pdfium-compile-98.patch"
- eapply "${FILESDIR}/ppc64le/fix-swiftshader-compile.patch"
- eapply "${FILESDIR}/ppc64le/chromium-ppc64-libpng.patch"
+ PATCHES+=( "${WORKDIR}/ppc64le" )
fi
default
diff --git a/www-client/chromium/files/ppc64le/chromium-ppc64-libpng.patch b/www-client/chromium/files/ppc64le/chromium-ppc64-libpng.patch
deleted file mode 100644
index 610ac650230c..000000000000
--- a/www-client/chromium/files/ppc64le/chromium-ppc64-libpng.patch
+++ /dev/null
@@ -1,914 +0,0 @@
---- /dev/null
-+++ b/third_party/libpng/powerpc/filter_vsx_intrinsics.c
-@@ -0,0 +1,768 @@
-+/* filter_vsx_intrinsics.c - PowerPC optimised filter functions
-+ *
-+ * Copyright (c) 2018 Cosmin Truta
-+ * Copyright (c) 2017 Glenn Randers-Pehrson
-+ * Written by Vadim Barkov, 2017.
-+ *
-+ * This code is released under the libpng license.
-+ * For conditions of distribution and use, see the disclaimer
-+ * and license in png.h
-+ */
-+
-+#include <stdio.h>
-+#include <stdint.h>
-+#include "../pngpriv.h"
-+
-+#ifdef PNG_READ_SUPPORTED
-+
-+/* This code requires -maltivec and -mvsx on the command line: */
-+#if PNG_POWERPC_VSX_IMPLEMENTATION == 1 /* intrinsics code from pngpriv.h */
-+
-+#include <altivec.h>
-+
-+#if PNG_POWERPC_VSX_OPT > 0
-+
-+#ifndef __VSX__
-+# error "This code requires VSX support (POWER7 and later). Please provide -mvsx compiler flag."
-+#endif
-+
-+#define vec_ld_unaligned(vec,data) vec = vec_vsx_ld(0,data)
-+#define vec_st_unaligned(vec,data) vec_vsx_st(vec,0,data)
-+
-+
-+/* Functions in this file look at most 3 pixels (a,b,c) to predict the 4th (d).
-+ * They're positioned like this:
-+ * prev: c b
-+ * row: a d
-+ * The Sub filter predicts d=a, Avg d=(a+b)/2, and Paeth predicts d to be
-+ * whichever of a, b, or c is closest to p=a+b-c.
-+ * ( this is taken from ../intel/filter_sse2_intrinsics.c )
-+ */
-+
-+#define vsx_declare_common_vars(row_info,row,prev_row,offset) \
-+ png_byte i;\
-+ png_bytep rp = row + offset;\
-+ png_const_bytep pp = prev_row;\
-+ size_t unaligned_top = 16 - (((size_t)rp % 16));\
-+ size_t istop;\
-+ if(unaligned_top == 16)\
-+ unaligned_top = 0;\
-+ istop = row_info->rowbytes;\
-+ if((unaligned_top < istop))\
-+ istop -= unaligned_top;\
-+ else{\
-+ unaligned_top = istop;\
-+ istop = 0;\
-+ }
-+
-+void png_read_filter_row_up_vsx(png_row_infop row_info, png_bytep row,
-+ png_const_bytep prev_row)
-+{
-+ vector unsigned char rp_vec;
-+ vector unsigned char pp_vec;
-+ vsx_declare_common_vars(row_info,row,prev_row,0)
-+
-+ /* Altivec operations require 16-byte aligned data
-+ * but input can be unaligned. So we calculate
-+ * unaligned part as usual.
-+ */
-+ for (i = 0; i < unaligned_top; i++)
-+ {
-+ *rp = (png_byte)(((int)(*rp) + (int)(*pp++)) & 0xff);
-+ rp++;
-+ }
-+
-+ /* Using SIMD while we can */
-+ while( istop >= 16 )
-+ {
-+ rp_vec = vec_ld(0,rp);
-+ vec_ld_unaligned(pp_vec,pp);
-+
-+ rp_vec = vec_add(rp_vec,pp_vec);
-+
-+ vec_st(rp_vec,0,rp);
-+
-+ pp += 16;
-+ rp += 16;
-+ istop -= 16;
-+ }
-+
-+ if(istop > 0)
-+ {
-+ /* If byte count of row is not divisible by 16
-+ * we will process remaining part as usual
-+ */
-+ for (i = 0; i < istop; i++)
-+ {
-+ *rp = (png_byte)(((int)(*rp) + (int)(*pp++)) & 0xff);
-+ rp++;
-+ }
-+}
-+
-+}
-+
-+static const vector unsigned char VSX_LEFTSHIFTED1_4 = {16,16,16,16, 0, 1, 2, 3,16,16,16,16,16,16,16,16};
-+static const vector unsigned char VSX_LEFTSHIFTED2_4 = {16,16,16,16,16,16,16,16, 4, 5, 6, 7,16,16,16,16};
-+static const vector unsigned char VSX_LEFTSHIFTED3_4 = {16,16,16,16,16,16,16,16,16,16,16,16, 8, 9,10,11};
-+
-+static const vector unsigned char VSX_LEFTSHIFTED1_3 = {16,16,16, 0, 1, 2,16,16,16,16,16,16,16,16,16,16};
-+static const vector unsigned char VSX_LEFTSHIFTED2_3 = {16,16,16,16,16,16, 3, 4, 5,16,16,16,16,16,16,16};
-+static const vector unsigned char VSX_LEFTSHIFTED3_3 = {16,16,16,16,16,16,16,16,16, 6, 7, 8,16,16,16,16};
-+static const vector unsigned char VSX_LEFTSHIFTED4_3 = {16,16,16,16,16,16,16,16,16,16,16,16, 9,10,11,16};
-+
-+static const vector unsigned char VSX_NOT_SHIFTED1_4 = {16,16,16,16, 4, 5, 6, 7,16,16,16,16,16,16,16,16};
-+static const vector unsigned char VSX_NOT_SHIFTED2_4 = {16,16,16,16,16,16,16,16, 8, 9,10,11,16,16,16,16};
-+static const vector unsigned char VSX_NOT_SHIFTED3_4 = {16,16,16,16,16,16,16,16,16,16,16,16,12,13,14,15};
-+
-+static const vector unsigned char VSX_NOT_SHIFTED1_3 = {16,16,16, 3, 4, 5,16,16,16,16,16,16,16,16,16,16};
-+static const vector unsigned char VSX_NOT_SHIFTED2_3 = {16,16,16,16,16,16, 6, 7, 8,16,16,16,16,16,16,16};
-+static const vector unsigned char VSX_NOT_SHIFTED3_3 = {16,16,16,16,16,16,16,16,16, 9,10,11,16,16,16,16};
-+static const vector unsigned char VSX_NOT_SHIFTED4_3 = {16,16,16,16,16,16,16,16,16,16,16,16,12,13,14,16};
-+
-+static const vector unsigned char VSX_CHAR_ZERO = {0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0};
-+#ifdef __LITTLE_ENDIAN__
-+
-+static const vector unsigned char VSX_CHAR_TO_SHORT1_4 = { 4,16, 5,16, 6,16, 7,16,16,16,16,16,16,16,16,16};
-+static const vector unsigned char VSX_CHAR_TO_SHORT2_4 = { 8,16, 9,16,10,16,11,16,16,16,16,16,16,16,16,16};
-+static const vector unsigned char VSX_CHAR_TO_SHORT3_4 = {12,16,13,16,14,16,15,16,16,16,16,16,16,16,16,16};
-+
-+static const vector unsigned char VSX_SHORT_TO_CHAR1_4 = {16,16,16,16, 0, 2, 4, 6,16,16,16,16,16,16,16,16};
-+static const vector unsigned char VSX_SHORT_TO_CHAR2_4 = {16,16,16,16,16,16,16,16, 0, 2, 4, 6,16,16,16,16};
-+static const vector unsigned char VSX_SHORT_TO_CHAR3_4 = {16,16,16,16,16,16,16,16,16,16,16,16, 0, 2, 4, 6};
-+
-+static const vector unsigned char VSX_CHAR_TO_SHORT1_3 = { 3,16, 4,16, 5,16,16,16,16,16,16,16,16,16,16,16};
-+static const vector unsigned char VSX_CHAR_TO_SHORT2_3 = { 6,16, 7,16, 8,16,16,16,16,16,16,16,16,16,16,16};
-+static const vector unsigned char VSX_CHAR_TO_SHORT3_3 = { 9,16,10,16,11,16,16,16,16,16,16,16,16,16,16,16};
-+static const vector unsigned char VSX_CHAR_TO_SHORT4_3 = {12,16,13,16,14,16,16,16,16,16,16,16,16,16,16,16};
-+
-+static const vector unsigned char VSX_SHORT_TO_CHAR1_3 = {16,16,16, 0, 2, 4,16,16,16,16,16,16,16,16,16,16};
-+static const vector unsigned char VSX_SHORT_TO_CHAR2_3 = {16,16,16,16,16,16, 0, 2, 4,16,16,16,16,16,16,16};
-+static const vector unsigned char VSX_SHORT_TO_CHAR3_3 = {16,16,16,16,16,16,16,16,16, 0, 2, 4,16,16,16,16};
-+static const vector unsigned char VSX_SHORT_TO_CHAR4_3 = {16,16,16,16,16,16,16,16,16,16,16,16, 0, 2, 4,16};
-+
-+#elif defined(__BIG_ENDIAN__)
-+
-+static const vector unsigned char VSX_CHAR_TO_SHORT1_4 = {16, 4,16, 5,16, 6,16, 7,16,16,16,16,16,16,16,16};
-+static const vector unsigned char VSX_CHAR_TO_SHORT2_4 = {16, 8,16, 9,16,10,16,11,16,16,16,16,16,16,16,16};
-+static const vector unsigned char VSX_CHAR_TO_SHORT3_4 = {16,12,16,13,16,14,16,15,16,16,16,16,16,16,16,16};
-+
-+static const vector unsigned char VSX_SHORT_TO_CHAR1_4 = {16,16,16,16, 1, 3, 5, 7,16,16,16,16,16,16,16,16};
-+static const vector unsigned char VSX_SHORT_TO_CHAR2_4 = {16,16,16,16,16,16,16,16, 1, 3, 5, 7,16,16,16,16};
-+static const vector unsigned char VSX_SHORT_TO_CHAR3_4 = {16,16,16,16,16,16,16,16,16,16,16,16, 1, 3, 5, 7};
-+
-+static const vector unsigned char VSX_CHAR_TO_SHORT1_3 = {16, 3,16, 4,16, 5,16,16,16,16,16,16,16,16,16,16};
-+static const vector unsigned char VSX_CHAR_TO_SHORT2_3 = {16, 6,16, 7,16, 8,16,16,16,16,16,16,16,16,16,16};
-+static const vector unsigned char VSX_CHAR_TO_SHORT3_3 = {16, 9,16,10,16,11,16,16,16,16,16,16,16,16,16,16};
-+static const vector unsigned char VSX_CHAR_TO_SHORT4_3 = {16,12,16,13,16,14,16,16,16,16,16,16,16,16,16,16};
-+
-+static const vector unsigned char VSX_SHORT_TO_CHAR1_3 = {16,16,16, 1, 3, 5,16,16,16,16,16,16,16,16,16,16};
-+static const vector unsigned char VSX_SHORT_TO_CHAR2_3 = {16,16,16,16,16,16, 1, 3, 5,16,16,16,16,16,16,16};
-+static const vector unsigned char VSX_SHORT_TO_CHAR3_3 = {16,16,16,16,16,16,16,16,16, 1, 3, 5,16,16,16,16};
-+static const vector unsigned char VSX_SHORT_TO_CHAR4_3 = {16,16,16,16,16,16,16,16,16,16,16,16, 1, 3, 5,16};
-+
-+#endif
-+
-+#define vsx_char_to_short(vec,offset,bpp) (vector unsigned short)vec_perm((vec),VSX_CHAR_ZERO,VSX_CHAR_TO_SHORT##offset##_##bpp)
-+#define vsx_short_to_char(vec,offset,bpp) vec_perm(((vector unsigned char)(vec)),VSX_CHAR_ZERO,VSX_SHORT_TO_CHAR##offset##_##bpp)
-+
-+#ifdef PNG_USE_ABS
-+# define vsx_abs(number) abs(number)
-+#else
-+# define vsx_abs(number) (number > 0) ? (number) : -(number)
-+#endif
-+
-+void png_read_filter_row_sub4_vsx(png_row_infop row_info, png_bytep row,
-+ png_const_bytep prev_row)
-+{
-+ png_byte bpp = 4;
-+
-+ vector unsigned char rp_vec;
-+ vector unsigned char part_vec;
-+
-+ vsx_declare_common_vars(row_info,row,prev_row,bpp)
-+
-+ PNG_UNUSED(pp)
-+
-+ /* Altivec operations require 16-byte aligned data
-+ * but input can be unaligned. So we calculate
-+ * unaligned part as usual.
-+ */
-+ for (i = 0; i < unaligned_top; i++)
-+ {
-+ *rp = (png_byte)(((int)(*rp) + (int)(*(rp-bpp))) & 0xff);
-+ rp++;
-+ }
-+
-+ /* Using SIMD while we can */
-+ while( istop >= 16 )
-+ {
-+ for(i=0;i < bpp ; i++)
-+ {
-+ *rp = (png_byte)(((int)(*rp) + (int)(*(rp-bpp))) & 0xff);
-+ rp++;
-+ }
-+ rp -= bpp;
-+
-+ rp_vec = vec_ld(0,rp);
-+ part_vec = vec_perm(rp_vec,VSX_CHAR_ZERO,VSX_LEFTSHIFTED1_4);
-+ rp_vec = vec_add(rp_vec,part_vec);
-+
-+ part_vec = vec_perm(rp_vec,VSX_CHAR_ZERO,VSX_LEFTSHIFTED2_4);
-+ rp_vec = vec_add(rp_vec,part_vec);
-+
-+ part_vec = vec_perm(rp_vec,VSX_CHAR_ZERO,VSX_LEFTSHIFTED3_4);
-+ rp_vec = vec_add(rp_vec,part_vec);
-+
-+ vec_st(rp_vec,0,rp);
-+
-+ rp += 16;
-+ istop -= 16;
-+ }
-+
-+ if(istop > 0)
-+ for (i = 0; i < istop % 16; i++)
-+ {
-+ *rp = (png_byte)(((int)(*rp) + (int)(*(rp - bpp))) & 0xff);
-+ rp++;
-+ }
-+
-+}
-+
-+void png_read_filter_row_sub3_vsx(png_row_infop row_info, png_bytep row,
-+ png_const_bytep prev_row)
-+{
-+ png_byte bpp = 3;
-+
-+ vector unsigned char rp_vec;
-+ vector unsigned char part_vec;
-+
-+ vsx_declare_common_vars(row_info,row,prev_row,bpp)
-+
-+ PNG_UNUSED(pp)
-+
-+ /* Altivec operations require 16-byte aligned data
-+ * but input can be unaligned. So we calculate
-+ * unaligned part as usual.
-+ */
-+ for (i = 0; i < unaligned_top; i++)
-+ {
-+ *rp = (png_byte)(((int)(*rp) + (int)(*(rp-bpp))) & 0xff);
-+ rp++;
-+ }
-+
-+ /* Using SIMD while we can */
-+ while( istop >= 16 )
-+ {
-+ for(i=0;i < bpp ; i++)
-+ {
-+ *rp = (png_byte)(((int)(*rp) + (int)(*(rp-bpp))) & 0xff);
-+ rp++;
-+ }
-+ rp -= bpp;
-+
-+ rp_vec = vec_ld(0,rp);
-+ part_vec = vec_perm(rp_vec,VSX_CHAR_ZERO,VSX_LEFTSHIFTED1_3);
-+ rp_vec = vec_add(rp_vec,part_vec);
-+
-+ part_vec = vec_perm(rp_vec,VSX_CHAR_ZERO,VSX_LEFTSHIFTED2_3);
-+ rp_vec = vec_add(rp_vec,part_vec);
-+
-+ part_vec = vec_perm(rp_vec,VSX_CHAR_ZERO,VSX_LEFTSHIFTED3_3);
-+ rp_vec = vec_add(rp_vec,part_vec);
-+
-+ part_vec = vec_perm(rp_vec,VSX_CHAR_ZERO,VSX_LEFTSHIFTED4_3);
-+ rp_vec = vec_add(rp_vec,part_vec);
-+
-+ vec_st(rp_vec,0,rp);
-+ rp += 15;
-+ istop -= 16;
-+
-+ /* Since 16 % bpp = 16 % 3 = 1, last element of array must
-+ * be proceeded manually
-+ */
-+ *rp = (png_byte)(((int)(*rp) + (int)(*(rp-bpp))) & 0xff);
-+ rp++;
-+ }
-+
-+ if(istop > 0)
-+ for (i = 0; i < istop % 16; i++)
-+ {
-+ *rp = (png_byte)(((int)(*rp) + (int)(*(rp-bpp))) & 0xff);
-+ rp++;
-+ }
-+}
-+
-+void png_read_filter_row_avg4_vsx(png_row_infop row_info, png_bytep row,
-+ png_const_bytep prev_row)
-+{
-+ png_byte bpp = 4;
-+
-+ vector unsigned char rp_vec;
-+ vector unsigned char pp_vec;
-+ vector unsigned char pp_part_vec;
-+ vector unsigned char rp_part_vec;
-+ vector unsigned char avg_vec;
-+
-+ vsx_declare_common_vars(row_info,row,prev_row,bpp)
-+ rp -= bpp;
-+ if(istop >= bpp)
-+ istop -= bpp;
-+
-+ for (i = 0; i < bpp; i++)
-+ {
-+ *rp = (png_byte)(((int)(*rp) +
-+ ((int)(*pp++) / 2 )) & 0xff);
-+
-+ rp++;
-+ }
-+
-+ /* Altivec operations require 16-byte aligned data
-+ * but input can be unaligned. So we calculate
-+ * unaligned part as usual.
-+ */
-+ for (i = 0; i < unaligned_top; i++)
-+ {
-+ *rp = (png_byte)(((int)(*rp) +
-+ (int)(*pp++ + *(rp-bpp)) / 2 ) & 0xff);
-+
-+ rp++;
-+ }
-+
-+ /* Using SIMD while we can */
-+ while( istop >= 16 )
-+ {
-+ for(i=0;i < bpp ; i++)
-+ {
-+ *rp = (png_byte)(((int)(*rp) +
-+ (int)(*pp++ + *(rp-bpp)) / 2 ) & 0xff);
-+
-+ rp++;
-+ }
-+ rp -= bpp;
-+ pp -= bpp;
-+
-+ vec_ld_unaligned(pp_vec,pp);
-+ rp_vec = vec_ld(0,rp);
-+
-+ rp_part_vec = vec_perm(rp_vec,VSX_CHAR_ZERO,VSX_LEFTSHIFTED1_4);
-+ pp_part_vec = vec_perm(pp_vec,VSX_CHAR_ZERO,VSX_NOT_SHIFTED1_4);
-+ avg_vec = vec_avg(rp_part_vec,pp_part_vec);
-+ avg_vec = vec_sub(avg_vec, vec_and(vec_xor(rp_part_vec,pp_part_vec),vec_splat_u8(1)));
-+ rp_vec = vec_add(rp_vec,avg_vec);
-+
-+ rp_part_vec = vec_perm(rp_vec,VSX_CHAR_ZERO,VSX_LEFTSHIFTED2_4);
-+ pp_part_vec = vec_perm(pp_vec,VSX_CHAR_ZERO,VSX_NOT_SHIFTED2_4);
-+ avg_vec = vec_avg(rp_part_vec,pp_part_vec);
-+ avg_vec = vec_sub(avg_vec, vec_and(vec_xor(rp_part_vec,pp_part_vec),vec_splat_u8(1)));
-+ rp_vec = vec_add(rp_vec,avg_vec);
-+
-+ rp_part_vec = vec_perm(rp_vec,VSX_CHAR_ZERO,VSX_LEFTSHIFTED3_4);
-+ pp_part_vec = vec_perm(pp_vec,VSX_CHAR_ZERO,VSX_NOT_SHIFTED3_4);
-+ avg_vec = vec_avg(rp_part_vec,pp_part_vec);
-+ avg_vec = vec_sub(avg_vec, vec_and(vec_xor(rp_part_vec,pp_part_vec),vec_splat_u8(1)));
-+ rp_vec = vec_add(rp_vec,avg_vec);
-+
-+ vec_st(rp_vec,0,rp);
-+
-+ rp += 16;
-+ pp += 16;
-+ istop -= 16;
-+ }
-+
-+ if(istop > 0)
-+ for (i = 0; i < istop % 16; i++)
-+ {
-+ *rp = (png_byte)(((int)(*rp) +
-+ (int)(*pp++ + *(rp-bpp)) / 2 ) & 0xff);
-+
-+ rp++;
-+ }
-+}
-+
-+void png_read_filter_row_avg3_vsx(png_row_infop row_info, png_bytep row,
-+ png_const_bytep prev_row)
-+{
-+ png_byte bpp = 3;
-+
-+ vector unsigned char rp_vec;
-+ vector unsigned char pp_vec;
-+ vector unsigned char pp_part_vec;
-+ vector unsigned char rp_part_vec;
-+ vector unsigned char avg_vec;
-+
-+ vsx_declare_common_vars(row_info,row,prev_row,bpp)
-+ rp -= bpp;
-+ if(istop >= bpp)
-+ istop -= bpp;
-+
-+ for (i = 0; i < bpp; i++)
-+ {
-+ *rp = (png_byte)(((int)(*rp) +
-+ ((int)(*pp++) / 2 )) & 0xff);
-+
-+ rp++;
-+ }
-+
-+ /* Altivec operations require 16-byte aligned data
-+ * but input can be unaligned. So we calculate
-+ * unaligned part as usual.
-+ */
-+ for (i = 0; i < unaligned_top; i++)
-+ {
-+ *rp = (png_byte)(((int)(*rp) +
-+ (int)(*pp++ + *(rp-bpp)) / 2 ) & 0xff);
-+
-+ rp++;
-+ }
-+
-+ /* Using SIMD while we can */
-+ while( istop >= 16 )
-+ {
-+ for(i=0;i < bpp ; i++)
-+ {
-+ *rp = (png_byte)(((int)(*rp) +
-+ (int)(*pp++ + *(rp-bpp)) / 2 ) & 0xff);
-+
-+ rp++;
-+ }
-+ rp -= bpp;
-+ pp -= bpp;
-+
-+ vec_ld_unaligned(pp_vec,pp);
-+ rp_vec = vec_ld(0,rp);
-+
-+ rp_part_vec = vec_perm(rp_vec,VSX_CHAR_ZERO,VSX_LEFTSHIFTED1_3);
-+ pp_part_vec = vec_perm(pp_vec,VSX_CHAR_ZERO,VSX_NOT_SHIFTED1_3);
-+ avg_vec = vec_avg(rp_part_vec,pp_part_vec);
-+ avg_vec = vec_sub(avg_vec, vec_and(vec_xor(rp_part_vec,pp_part_vec),vec_splat_u8(1)));
-+ rp_vec = vec_add(rp_vec,avg_vec);
-+
-+ rp_part_vec = vec_perm(rp_vec,VSX_CHAR_ZERO,VSX_LEFTSHIFTED2_3);
-+ pp_part_vec = vec_perm(pp_vec,VSX_CHAR_ZERO,VSX_NOT_SHIFTED2_3);
-+ avg_vec = vec_avg(rp_part_vec,pp_part_vec);
-+ avg_vec = vec_sub(avg_vec, vec_and(vec_xor(rp_part_vec,pp_part_vec),vec_splat_u8(1)));
-+ rp_vec = vec_add(rp_vec,avg_vec);
-+
-+ rp_part_vec = vec_perm(rp_vec,VSX_CHAR_ZERO,VSX_LEFTSHIFTED3_3);
-+ pp_part_vec = vec_perm(pp_vec,VSX_CHAR_ZERO,VSX_NOT_SHIFTED3_3);
-+ avg_vec = vec_avg(rp_part_vec,pp_part_vec);
-+ avg_vec = vec_sub(avg_vec, vec_and(vec_xor(rp_part_vec,pp_part_vec),vec_splat_u8(1)));
-+ rp_vec = vec_add(rp_vec,avg_vec);
-+
-+ rp_part_vec = vec_perm(rp_vec,VSX_CHAR_ZERO,VSX_LEFTSHIFTED4_3);
-+ pp_part_vec = vec_perm(pp_vec,VSX_CHAR_ZERO,VSX_NOT_SHIFTED4_3);
-+ avg_vec = vec_avg(rp_part_vec,pp_part_vec);
-+ avg_vec = vec_sub(avg_vec, vec_and(vec_xor(rp_part_vec,pp_part_vec),vec_splat_u8(1)));
-+ rp_vec = vec_add(rp_vec,avg_vec);
-+
-+ vec_st(rp_vec,0,rp);
-+
-+ rp += 15;
-+ pp += 15;
-+ istop -= 16;
-+
-+ /* Since 16 % bpp = 16 % 3 = 1, last element of array must
-+ * be proceeded manually
-+ */
-+ *rp = (png_byte)(((int)(*rp) +
-+ (int)(*pp++ + *(rp-bpp)) / 2 ) & 0xff);
-+ rp++;
-+ }
-+
-+ if(istop > 0)
-+ for (i = 0; i < istop % 16; i++)
-+ {
-+ *rp = (png_byte)(((int)(*rp) +
-+ (int)(*pp++ + *(rp-bpp)) / 2 ) & 0xff);
-+
-+ rp++;
-+ }
-+}
-+
-+/* Bytewise c ? t : e. */
-+#define if_then_else(c,t,e) vec_sel(e,t,c)
-+
-+#define vsx_paeth_process(rp,pp,a,b,c,pa,pb,pc,bpp) {\
-+ c = *(pp - bpp);\
-+ a = *(rp - bpp);\
-+ b = *pp++;\
-+ p = b - c;\
-+ pc = a - c;\
-+ pa = vsx_abs(p);\
-+ pb = vsx_abs(pc);\
-+ pc = vsx_abs(p + pc);\
-+ if (pb < pa) pa = pb, a = b;\
-+ if (pc < pa) a = c;\
-+ a += *rp;\
-+ *rp++ = (png_byte)a;\
-+ }
-+
-+void png_read_filter_row_paeth4_vsx(png_row_infop row_info, png_bytep row,
-+ png_const_bytep prev_row)
-+{
-+ png_byte bpp = 4;
-+
-+ int a, b, c, pa, pb, pc, p;
-+ vector unsigned char rp_vec;
-+ vector unsigned char pp_vec;
-+ vector unsigned short a_vec,b_vec,c_vec,nearest_vec;
-+ vector signed short pa_vec,pb_vec,pc_vec,smallest_vec;
-+
-+ vsx_declare_common_vars(row_info,row,prev_row,bpp)
-+ rp -= bpp;
-+ if(istop >= bpp)
-+ istop -= bpp;
-+
-+ /* Process the first pixel in the row completely (this is the same as 'up'
-+ * because there is only one candidate predictor for the first row).
-+ */
-+ for(i = 0; i < bpp ; i++)
-+ {
-+ *rp = (png_byte)( *rp + *pp);
-+ rp++;
-+ pp++;
-+ }
-+
-+ for(i = 0; i < unaligned_top ; i++)
-+ {
-+ vsx_paeth_process(rp,pp,a,b,c,pa,pb,pc,bpp)
-+ }
-+
-+ while( istop >= 16)
-+ {
-+ for(i = 0; i < bpp ; i++)
-+ {
-+ vsx_paeth_process(rp,pp,a,b,c,pa,pb,pc,bpp)
-+ }
-+
-+ rp -= bpp;
-+ pp -= bpp;
-+ rp_vec = vec_ld(0,rp);
-+ vec_ld_unaligned(pp_vec,pp);
-+
-+ a_vec = vsx_char_to_short(vec_perm(rp_vec , VSX_CHAR_ZERO , VSX_LEFTSHIFTED1_4),1,4);
-+ b_vec = vsx_char_to_short(vec_perm(pp_vec , VSX_CHAR_ZERO , VSX_NOT_SHIFTED1_4),1,4);
-+ c_vec = vsx_char_to_short(vec_perm(pp_vec , VSX_CHAR_ZERO , VSX_LEFTSHIFTED1_4),1,4);
-+ pa_vec = (vector signed short) vec_sub(b_vec,c_vec);
-+ pb_vec = (vector signed short) vec_sub(a_vec , c_vec);
-+ pc_vec = vec_add(pa_vec,pb_vec);
-+ pa_vec = vec_abs(pa_vec);
-+ pb_vec = vec_abs(pb_vec);
-+ pc_vec = vec_abs(pc_vec);
-+ smallest_vec = vec_min(pc_vec, vec_min(pa_vec,pb_vec));
-+ nearest_vec = if_then_else(
-+ vec_cmpeq(pa_vec,smallest_vec),
-+ a_vec,
-+ if_then_else(
-+ vec_cmpeq(pb_vec,smallest_vec),
-+ b_vec,
-+ c_vec
-+ )
-+ );
-+ rp_vec = vec_add(rp_vec,(vsx_short_to_char(nearest_vec,1,4)));
-+
-+ a_vec = vsx_char_to_short(vec_perm(rp_vec , VSX_CHAR_ZERO , VSX_LEFTSHIFTED2_4),2,4);
-+ b_vec = vsx_char_to_short(vec_perm(pp_vec , VSX_CHAR_ZERO , VSX_NOT_SHIFTED2_4),2,4);
-+ c_vec = vsx_char_to_short(vec_perm(pp_vec , VSX_CHAR_ZERO , VSX_LEFTSHIFTED2_4),2,4);
-+ pa_vec = (vector signed short) vec_sub(b_vec,c_vec);
-+ pb_vec = (vector signed short) vec_sub(a_vec , c_vec);
-+ pc_vec = vec_add(pa_vec,pb_vec);
-+ pa_vec = vec_abs(pa_vec);
-+ pb_vec = vec_abs(pb_vec);
-+ pc_vec = vec_abs(pc_vec);
-+ smallest_vec = vec_min(pc_vec, vec_min(pa_vec,pb_vec));
-+ nearest_vec = if_then_else(
-+ vec_cmpeq(pa_vec,smallest_vec),
-+ a_vec,
-+ if_then_else(
-+ vec_cmpeq(pb_vec,smallest_vec),
-+ b_vec,
-+ c_vec
-+ )
-+ );
-+ rp_vec = vec_add(rp_vec,(vsx_short_to_char(nearest_vec,2,4)));
-+
-+ a_vec = vsx_char_to_short(vec_perm(rp_vec , VSX_CHAR_ZERO , VSX_LEFTSHIFTED3_4),3,4);
-+ b_vec = vsx_char_to_short(vec_perm(pp_vec , VSX_CHAR_ZERO , VSX_NOT_SHIFTED3_4),3,4);
-+ c_vec = vsx_char_to_short(vec_perm(pp_vec , VSX_CHAR_ZERO , VSX_LEFTSHIFTED3_4),3,4);
-+ pa_vec = (vector signed short) vec_sub(b_vec,c_vec);
-+ pb_vec = (vector signed short) vec_sub(a_vec , c_vec);
-+ pc_vec = vec_add(pa_vec,pb_vec);
-+ pa_vec = vec_abs(pa_vec);
-+ pb_vec = vec_abs(pb_vec);
-+ pc_vec = vec_abs(pc_vec);
-+ smallest_vec = vec_min(pc_vec, vec_min(pa_vec,pb_vec));
-+ nearest_vec = if_then_else(
-+ vec_cmpeq(pa_vec,smallest_vec),
-+ a_vec,
-+ if_then_else(
-+ vec_cmpeq(pb_vec,smallest_vec),
-+ b_vec,
-+ c_vec
-+ )
-+ );
-+ rp_vec = vec_add(rp_vec,(vsx_short_to_char(nearest_vec,3,4)));
-+
-+ vec_st(rp_vec,0,rp);
-+
-+ rp += 16;
-+ pp += 16;
-+ istop -= 16;
-+ }
-+
-+ if(istop > 0)
-+ for (i = 0; i < istop % 16; i++)
-+ {
-+ vsx_paeth_process(rp,pp,a,b,c,pa,pb,pc,bpp)
-+ }
-+}
-+
-+void png_read_filter_row_paeth3_vsx(png_row_infop row_info, png_bytep row,
-+ png_const_bytep prev_row)
-+{
-+ png_byte bpp = 3;
-+
-+ int a, b, c, pa, pb, pc, p;
-+ vector unsigned char rp_vec;
-+ vector unsigned char pp_vec;
-+ vector unsigned short a_vec,b_vec,c_vec,nearest_vec;
-+ vector signed short pa_vec,pb_vec,pc_vec,smallest_vec;
-+
-+ vsx_declare_common_vars(row_info,row,prev_row,bpp)
-+ rp -= bpp;
-+ if(istop >= bpp)
-+ istop -= bpp;
-+
-+ /* Process the first pixel in the row completely (this is the same as 'up'
-+ * because there is only one candidate predictor for the first row).
-+ */
-+ for(i = 0; i < bpp ; i++)
-+ {
-+ *rp = (png_byte)( *rp + *pp);
-+ rp++;
-+ pp++;
-+ }
-+
-+ for(i = 0; i < unaligned_top ; i++)
-+ {
-+ vsx_paeth_process(rp,pp,a,b,c,pa,pb,pc,bpp)
-+ }
-+
-+ while( istop >= 16)
-+ {
-+ for(i = 0; i < bpp ; i++)
-+ {
-+ vsx_paeth_process(rp,pp,a,b,c,pa,pb,pc,bpp)
-+ }
-+
-+ rp -= bpp;
-+ pp -= bpp;
-+ rp_vec = vec_ld(0,rp);
-+ vec_ld_unaligned(pp_vec,pp);
-+
-+ a_vec = vsx_char_to_short(vec_perm(rp_vec , VSX_CHAR_ZERO , VSX_LEFTSHIFTED1_3),1,3);
-+ b_vec = vsx_char_to_short(vec_perm(pp_vec , VSX_CHAR_ZERO , VSX_NOT_SHIFTED1_3),1,3);
-+ c_vec = vsx_char_to_short(vec_perm(pp_vec , VSX_CHAR_ZERO , VSX_LEFTSHIFTED1_3),1,3);
-+ pa_vec = (vector signed short) vec_sub(b_vec,c_vec);
-+ pb_vec = (vector signed short) vec_sub(a_vec , c_vec);
-+ pc_vec = vec_add(pa_vec,pb_vec);
-+ pa_vec = vec_abs(pa_vec);
-+ pb_vec = vec_abs(pb_vec);
-+ pc_vec = vec_abs(pc_vec);
-+ smallest_vec = vec_min(pc_vec, vec_min(pa_vec,pb_vec));
-+ nearest_vec = if_then_else(
-+ vec_cmpeq(pa_vec,smallest_vec),
-+ a_vec,
-+ if_then_else(
-+ vec_cmpeq(pb_vec,smallest_vec),
-+ b_vec,
-+ c_vec
-+ )
-+ );
-+ rp_vec = vec_add(rp_vec,(vsx_short_to_char(nearest_vec,1,3)));
-+
-+ a_vec = vsx_char_to_short(vec_perm(rp_vec , VSX_CHAR_ZERO , VSX_LEFTSHIFTED2_3),2,3);
-+ b_vec = vsx_char_to_short(vec_perm(pp_vec , VSX_CHAR_ZERO , VSX_NOT_SHIFTED2_3),2,3);
-+ c_vec = vsx_char_to_short(vec_perm(pp_vec , VSX_CHAR_ZERO , VSX_LEFTSHIFTED2_3),2,3);
-+ pa_vec = (vector signed short) vec_sub(b_vec,c_vec);
-+ pb_vec = (vector signed short) vec_sub(a_vec , c_vec);
-+ pc_vec = vec_add(pa_vec,pb_vec);
-+ pa_vec = vec_abs(pa_vec);
-+ pb_vec = vec_abs(pb_vec);
-+ pc_vec = vec_abs(pc_vec);
-+ smallest_vec = vec_min(pc_vec, vec_min(pa_vec,pb_vec));
-+ nearest_vec = if_then_else(
-+ vec_cmpeq(pa_vec,smallest_vec),
-+ a_vec,
-+ if_then_else(
-+ vec_cmpeq(pb_vec,smallest_vec),
-+ b_vec,
-+ c_vec
-+ )
-+ );
-+ rp_vec = vec_add(rp_vec,(vsx_short_to_char(nearest_vec,2,3)));
-+
-+ a_vec = vsx_char_to_short(vec_perm(rp_vec , VSX_CHAR_ZERO , VSX_LEFTSHIFTED3_3),3,3);
-+ b_vec = vsx_char_to_short(vec_perm(pp_vec , VSX_CHAR_ZERO , VSX_NOT_SHIFTED3_3),3,3);
-+ c_vec = vsx_char_to_short(vec_perm(pp_vec , VSX_CHAR_ZERO , VSX_LEFTSHIFTED3_3),3,3);
-+ pa_vec = (vector signed short) vec_sub(b_vec,c_vec);
-+ pb_vec = (vector signed short) vec_sub(a_vec , c_vec);
-+ pc_vec = vec_add(pa_vec,pb_vec);
-+ pa_vec = vec_abs(pa_vec);
-+ pb_vec = vec_abs(pb_vec);
-+ pc_vec = vec_abs(pc_vec);
-+ smallest_vec = vec_min(pc_vec, vec_min(pa_vec,pb_vec));
-+ nearest_vec = if_then_else(
-+ vec_cmpeq(pa_vec,smallest_vec),
-+ a_vec,
-+ if_then_else(
-+ vec_cmpeq(pb_vec,smallest_vec),
-+ b_vec,
-+ c_vec
-+ )
-+ );
-+ rp_vec = vec_add(rp_vec,(vsx_short_to_char(nearest_vec,3,3)));
-+
-+ a_vec = vsx_char_to_short(vec_perm(rp_vec , VSX_CHAR_ZERO , VSX_LEFTSHIFTED4_3),4,3);
-+ b_vec = vsx_char_to_short(vec_perm(pp_vec , VSX_CHAR_ZERO , VSX_NOT_SHIFTED4_3),4,3);
-+ c_vec = vsx_char_to_short(vec_perm(pp_vec , VSX_CHAR_ZERO , VSX_LEFTSHIFTED4_3),4,3);
-+ pa_vec = (vector signed short) vec_sub(b_vec,c_vec);
-+ pb_vec = (vector signed short) vec_sub(a_vec , c_vec);
-+ pc_vec = vec_add(pa_vec,pb_vec);
-+ pa_vec = vec_abs(pa_vec);
-+ pb_vec = vec_abs(pb_vec);
-+ pc_vec = vec_abs(pc_vec);
-+ smallest_vec = vec_min(pc_vec, vec_min(pa_vec,pb_vec));
-+ nearest_vec = if_then_else(
-+ vec_cmpeq(pa_vec,smallest_vec),
-+ a_vec,
-+ if_then_else(
-+ vec_cmpeq(pb_vec,smallest_vec),
-+ b_vec,
-+ c_vec
-+ )
-+ );
-+ rp_vec = vec_add(rp_vec,(vsx_short_to_char(nearest_vec,4,3)));
-+
-+ vec_st(rp_vec,0,rp);
-+
-+ rp += 15;
-+ pp += 15;
-+ istop -= 16;
-+
-+ /* Since 16 % bpp = 16 % 3 = 1, last element of array must
-+ * be proceeded manually
-+ */
-+ vsx_paeth_process(rp,pp,a,b,c,pa,pb,pc,bpp)
-+ }
-+
-+ if(istop > 0)
-+ for (i = 0; i < istop % 16; i++)
-+ {
-+ vsx_paeth_process(rp,pp,a,b,c,pa,pb,pc,bpp)
-+ }
-+}
-+
-+#endif /* PNG_POWERPC_VSX_OPT > 0 */
-+#endif /* PNG_POWERPC_VSX_IMPLEMENTATION == 1 (intrinsics) */
-+#endif /* READ */
---- /dev/null
-+++ b/third_party/libpng/powerpc/powerpc_init.c
-@@ -0,0 +1,126 @@
-+
-+/* powerpc_init.c - POWERPC optimised filter functions
-+ *
-+ * Copyright (c) 2018 Cosmin Truta
-+ * Copyright (c) 2017 Glenn Randers-Pehrson
-+ * Written by Vadim Barkov, 2017.
-+ *
-+ * This code is released under the libpng license.
-+ * For conditions of distribution and use, see the disclaimer
-+ * and license in png.h
-+ */
-+
-+/* Below, after checking __linux__, various non-C90 POSIX 1003.1 functions are
-+ * called.
-+ */
-+#define _POSIX_SOURCE 1
-+
-+#include <stdio.h>
-+#include "../pngpriv.h"
-+
-+#ifdef PNG_READ_SUPPORTED
-+
-+#if PNG_POWERPC_VSX_OPT > 0
-+#ifdef PNG_POWERPC_VSX_CHECK_SUPPORTED /* Do run-time checks */
-+/* WARNING: it is strongly recommended that you do not build libpng with
-+ * run-time checks for CPU features if at all possible. In the case of the PowerPC
-+ * VSX instructions there is no processor-specific way of detecting the
-+ * presence of the required support, therefore run-time detection is extremely
-+ * OS specific.
-+ *
-+ * You may set the macro PNG_POWERPC_VSX_FILE to the file name of file containing
-+ * a fragment of C source code which defines the png_have_vsx function. There
-+ * are a number of implementations in contrib/powerpc-vsx, but the only one that
-+ * has partial support is contrib/powerpc-vsx/linux.c - a generic Linux
-+ * implementation which reads /proc/cpufino.
-+ */
-+#ifndef PNG_POWERPC_VSX_FILE
-+# ifdef __linux__
-+# define PNG_POWERPC_VSX_FILE "contrib/powerpc-vsx/linux_aux.c"
-+# endif
-+#endif
-+
-+#ifdef PNG_POWERPC_VSX_FILE
-+
-+#include <signal.h> /* for sig_atomic_t */
-+static int png_have_vsx(png_structp png_ptr);
-+#include PNG_POWERPC_VSX_FILE
-+
-+#else /* PNG_POWERPC_VSX_FILE */
-+# error "PNG_POWERPC_VSX_FILE undefined: no support for run-time POWERPC VSX checks"
-+#endif /* PNG_POWERPC_VSX_FILE */
-+#endif /* PNG_POWERPC_VSX_CHECK_SUPPORTED */
-+
-+void
-+png_init_filter_functions_vsx(png_structp pp, unsigned int bpp)
-+{
-+ /* The switch statement is compiled in for POWERPC_VSX_API, the call to
-+ * png_have_vsx is compiled in for POWERPC_VSX_CHECK. If both are defined
-+ * the check is only performed if the API has not set the PowerPC option on
-+ * or off explicitly. In this case the check controls what happens.
-+ */
-+
-+#ifdef PNG_POWERPC_VSX_API_SUPPORTED
-+ switch ((pp->options >> PNG_POWERPC_VSX) & 3)
-+ {
-+ case PNG_OPTION_UNSET:
-+ /* Allow the run-time check to execute if it has been enabled -
-+ * thus both API and CHECK can be turned on. If it isn't supported
-+ * this case will fall through to the 'default' below, which just
-+ * returns.
-+ */
-+#endif /* PNG_POWERPC_VSX_API_SUPPORTED */
-+#ifdef PNG_POWERPC_VSX_CHECK_SUPPORTED
-+ {
-+ static volatile sig_atomic_t no_vsx = -1; /* not checked */
-+
-+ if (no_vsx < 0)
-+ no_vsx = !png_have_vsx(pp);
-+
-+ if (no_vsx)
-+ return;
-+ }
-+#ifdef PNG_POWERPC_VSX_API_SUPPORTED
-+ break;
-+#endif
-+#endif /* PNG_POWERPC_VSX_CHECK_SUPPORTED */
-+
-+#ifdef PNG_POWERPC_VSX_API_SUPPORTED
-+ default: /* OFF or INVALID */
-+ return;
-+
-+ case PNG_OPTION_ON:
-+ /* Option turned on */
-+ break;
-+ }
-+#endif
-+
-+ /* IMPORTANT: any new internal functions used here must be declared using
-+ * PNG_INTERNAL_FUNCTION in ../pngpriv.h. This is required so that the
-+ * 'prefix' option to configure works:
-+ *
-+ * ./configure --with-libpng-prefix=foobar_
-+ *
-+ * Verify you have got this right by running the above command, doing a build
-+ * and examining pngprefix.h; it must contain a #define for every external
-+ * function you add. (Notice that this happens automatically for the
-+ * initialization function.)
-+ */
-+ pp->read_filter[PNG_FILTER_VALUE_UP-1] = png_read_filter_row_up_vsx;
-+
-+ if (bpp == 3)
-+ {
-+ pp->read_filter[PNG_FILTER_VALUE_SUB-1] = png_read_filter_row_sub3_vsx;
-+ pp->read_filter[PNG_FILTER_VALUE_AVG-1] = png_read_filter_row_avg3_vsx;
-+ pp->read_filter[PNG_FILTER_VALUE_PAETH-1] = png_read_filter_row_paeth3_vsx;
-+ }
-+
-+ else if (bpp == 4)
-+ {
-+ pp->read_filter[PNG_FILTER_VALUE_SUB-1] = png_read_filter_row_sub4_vsx;
-+ pp->read_filter[PNG_FILTER_VALUE_AVG-1] = png_read_filter_row_avg4_vsx;
-+ pp->read_filter[PNG_FILTER_VALUE_PAETH-1] = png_read_filter_row_paeth4_vsx;
-+ }
-+}
-+#endif /* PNG_POWERPC_VSX_OPT > 0 */
-+#endif /* READ */
---- a/third_party/libpng/BUILD.gn
-+++ b/third_party/libpng/BUILD.gn
-@@ -89,6 +91,11 @@ source_set("libpng_sources") {
- "mips/filter_msa_intrinsics.c",
- "mips/mips_init.c",
- ]
-+ } else if (current_cpu == "ppc64") {
-+ sources += [
-+ "powerpc/filter_vsx_intrinsics.c",
-+ "powerpc/powerpc_init.c",
-+ ]
- }
-
- configs -= [ "//build/config/compiler:chromium_code" ]
diff --git a/www-client/chromium/files/ppc64le/fix-breakpad-compile.patch b/www-client/chromium/files/ppc64le/fix-breakpad-compile.patch
deleted file mode 100644
index a07cc1941cec..000000000000
--- a/www-client/chromium/files/ppc64le/fix-breakpad-compile.patch
+++ /dev/null
@@ -1,29 +0,0 @@
-Index: chromium-98.0.4758.80/third_party/breakpad/BUILD.gn
-===================================================================
---- chromium-98.0.4758.80.orig/third_party/breakpad/BUILD.gn
-+++ chromium-98.0.4758.80/third_party/breakpad/BUILD.gn
-@@ -623,7 +623,6 @@ if (is_linux || is_chromeos || is_android) {
- "breakpad/src/client/minidump_file_writer.h",
- "breakpad/src/common/convert_UTF.cc",
- "breakpad/src/common/convert_UTF.h",
-- "breakpad/src/common/linux/breakpad_getcontext.S",
- "breakpad/src/common/linux/elf_core_dump.cc",
- "breakpad/src/common/linux/elf_core_dump.h",
- "breakpad/src/common/linux/elfutils.cc",
-@@ -655,6 +654,8 @@ if (is_linux || is_chromeos || is_android) {
- configs += [ "//build/config/compiler:no_chromium_code" ]
- public_configs = [ ":client_config" ]
-
-+ defines = [ "HAVE_GETCONTEXT" ]
-+
- if (current_cpu == "arm" && is_chromeos_ash) {
- # Avoid running out of registers in
- # linux_syscall_support.h:sys_clone()'s inline assembly.
-@@ -712,7 +713,6 @@ if (is_linux || is_chromeos || is_android) {
- "breakpad/src/client/linux/minidump_writer/minidump_writer_unittest.cc",
- "breakpad/src/client/linux/minidump_writer/minidump_writer_unittest_utils.cc",
- "breakpad/src/client/linux/minidump_writer/proc_cpuinfo_reader_unittest.cc",
-- "breakpad/src/common/linux/breakpad_getcontext_unittest.cc",
- "breakpad/src/common/linux/elf_core_dump_unittest.cc",
- "breakpad/src/common/linux/file_id_unittest.cc",
- "breakpad/src/common/linux/linux_libc_support_unittest.cc",
diff --git a/www-client/chromium/files/ppc64le/fix-swiftshader-compile.patch b/www-client/chromium/files/ppc64le/fix-swiftshader-compile.patch
deleted file mode 100644
index 8474cb6b96cb..000000000000
--- a/www-client/chromium/files/ppc64le/fix-swiftshader-compile.patch
+++ /dev/null
@@ -1,26 +0,0 @@
---- chromium-101.0.4951.54/third_party/swiftshader/third_party/llvm-10.0/BUILD.gn.orig 2022-05-15 10:30:50.887333316 +0200
-+++ chromium-101.0.4951.54/third_party/swiftshader/third_party/llvm-10.0/BUILD.gn 2022-05-15 10:31:43.477318032 +0200
-@@ -131,7 +131,6 @@
- if (is_ubsan_vptr) {
- sources = [
- "llvm/lib/MC/MCWasmObjectTargetWriter.cpp",
-- "llvm/lib/MC/MCXCOFFObjectTargetWriter.cpp",
- "llvm/lib/Target/ARM/MCTargetDesc/ARMTargetStreamer.cpp",
- "llvm/lib/Target/TargetIntrinsicInfo.cpp",
- ]
-@@ -579,6 +578,7 @@
- "llvm/lib/MC/MCAsmInfoCOFF.cpp",
- "llvm/lib/MC/MCAsmInfoDarwin.cpp",
- "llvm/lib/MC/MCAsmInfoELF.cpp",
-+ "llvm/lib/MC/MCAsmInfoXCOFF.cpp",
- "llvm/lib/MC/MCAsmMacro.cpp",
- "llvm/lib/MC/MCAsmStreamer.cpp",
- "llvm/lib/MC/MCAssembler.cpp",
-@@ -633,6 +633,7 @@
- "llvm/lib/MC/MCWin64EH.cpp",
- "llvm/lib/MC/MCWinCOFFStreamer.cpp",
- "llvm/lib/MC/MCWinEH.cpp",
-+ "llvm/lib/MC/MCXCOFFObjectTargetWriter.cpp",
- "llvm/lib/MC/MCXCOFFStreamer.cpp",
- "llvm/lib/MC/MachObjectWriter.cpp",
- "llvm/lib/MC/StringTableBuilder.cpp",
diff --git a/www-client/chromium/files/ppc64le/libpng-pdfium-compile-98.patch b/www-client/chromium/files/ppc64le/libpng-pdfium-compile-98.patch
deleted file mode 100644
index 681eb80ea07f..000000000000
--- a/www-client/chromium/files/ppc64le/libpng-pdfium-compile-98.patch
+++ /dev/null
@@ -1,13 +0,0 @@
-Index: chromium-98.0.4758.80/third_party/pdfium/BUILD.gn
-===================================================================
---- chromium-98.0.4758.80.orig/third_party/pdfium/BUILD.gn
-+++ chromium-98.0.4758.80/third_party/pdfium/BUILD.gn
-@@ -16,7 +16,7 @@
- }
-
- config("pdfium_common_config") {
-- cflags = []
-+ cflags = [ "-DPNG_POWERPC_VSX_OPT=0" ]
- cflags_cc = []
- ldflags = []
- include_dirs = [ "." ]