gnu: x265: Fix building on armhf-linux and aarch64-linux.
* gnu/packages/video.scm (x265)[source]: Update list of patches. [arguments]: Change configure flag to ensure PIC for all architectures. * gnu/packages/patches/x265-arm-asm-primitives.patch: Remove file. * gnu/packages/patches/x265-detect512-all-arches.patch: New file. * gnu/local.mk (dist_patch_DATA): Update patch registry.
This commit is contained in:
		
							parent
							
								
									2d5fa80e83
								
							
						
					
					
						commit
						b7028a16e6
					
				
					 4 changed files with 41 additions and 368 deletions
				
			
		| 
						 | 
					@ -1227,7 +1227,7 @@ dist_patch_DATA =						\
 | 
				
			||||||
  %D%/packages/patches/wpa-supplicant-fix-nonce-reuse.patch	\
 | 
					  %D%/packages/patches/wpa-supplicant-fix-nonce-reuse.patch	\
 | 
				
			||||||
  %D%/packages/patches/wpa-supplicant-krack-followups.patch	\
 | 
					  %D%/packages/patches/wpa-supplicant-krack-followups.patch	\
 | 
				
			||||||
  %D%/packages/patches/wxmaxima-do-not-use-old-gnuplot-parameters.patch	\
 | 
					  %D%/packages/patches/wxmaxima-do-not-use-old-gnuplot-parameters.patch	\
 | 
				
			||||||
  %D%/packages/patches/x265-arm-asm-primitives.patch		\
 | 
					  %D%/packages/patches/x265-detect512-all-arches.patch		\
 | 
				
			||||||
  %D%/packages/patches/xapian-revert-5489fb2f8.patch		\
 | 
					  %D%/packages/patches/xapian-revert-5489fb2f8.patch		\
 | 
				
			||||||
  %D%/packages/patches/xboing-CVE-2004-0149.patch		\
 | 
					  %D%/packages/patches/xboing-CVE-2004-0149.patch		\
 | 
				
			||||||
  %D%/packages/patches/xf86-video-ark-remove-mibstore.patch	\
 | 
					  %D%/packages/patches/xf86-video-ark-remove-mibstore.patch	\
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -1,360 +0,0 @@
 | 
				
			||||||
From <https://git.busybox.net/buildroot/tree/package/x265/0003-arm-asm-primitives.patch?id=57d4a27eaf1a9e59d767c321e7b7500c5060a2ac>.
 | 
					 | 
				
			||||||
This fixes build errors like:
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
  cd /tmp/guix-build-x265-2.8.drv-0/x265_2.8/build/encoder && /gnu/store/cd5q2pni1d95fs3cdabbclyh9hqhw2nq-gcc-5.5.0/bin/c++  -DEXPORT_C_API=1 -DHAVE_ARMV6=1 -DHAVE_INT_TYPES_H=1 -DHAVE_NEON -DHIGH_BIT_DEPTH=0 -DX265_ARCH_ARM=1 -DX265_DEPTH=8 -DX265_NS=x265 -D__STDC_LIMIT_MACROS=1 -I/tmp/guix-build-x265-2.8.drv-0/x265_2.8/source/. -I/tmp/guix-build-x265-2.8.drv-0/x265_2.8/source/common -I/tmp/guix-build-x265-2.8.drv-0/x265_2.8/source/encoder -I/tmp/guix-build-x265-2.8.drv-0/x265_2.8/build  -O2 -g -DNDEBUG   -Wall -Wextra -Wshadow -std=gnu++98 -fPIC -mcpu=native -mfloat-abi=hard -mfpu=neon -marm -fPIC -Wno-array-bounds -ffast-math -fno-exceptions -Wno-uninitialized -o CMakeFiles/encoder.dir/search.cpp.o -c /tmp/guix-build-x265-2.8.drv-0/x265_2.8/source/encoder/search.cpp
 | 
					 | 
				
			||||||
  /tmp/guix-build-x265-2.8.drv-0/x265_2.8/source/common/arm/asm-primitives.cpp:437:38: error: incompatible types in assignment of ?void(const pixel*, intptr_t, int16_t*, intptr_t) {aka void(const unsigned char*, int, short int*, int)}? to ?void (* [2])(const pixel*, intptr_t, int16_t*, intptr_t) {aka void (* [2])(const unsigned char*, int, short int*, int)}?
 | 
					 | 
				
			||||||
	   p.pu[LUMA_64x48].convert_p2s = PFX(filterPixelToShort_64x48_neon);
 | 
					 | 
				
			||||||
					^
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
Downloaded from upstream bug report:
 | 
					 | 
				
			||||||
https://bitbucket.org/multicoreware/x265/issues/406
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
Signed-off-by: Bernd Kuhls <bernd.kuhls@t-online.de>
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
--- ./source/common/arm/asm-primitives.cpp.orig	2018-05-21 02:33:10.000000000 -0600
 | 
					 | 
				
			||||||
+++ ./source/common/arm/asm-primitives.cpp	2018-05-28 20:38:37.302378303 -0600
 | 
					 | 
				
			||||||
@@ -48,77 +48,77 @@ void setupAssemblyPrimitives(EncoderPrim
 | 
					 | 
				
			||||||
         p.ssim_4x4x2_core = PFX(ssim_4x4x2_core_neon);
 | 
					 | 
				
			||||||
 
 | 
					 | 
				
			||||||
         // addAvg
 | 
					 | 
				
			||||||
-         p.pu[LUMA_4x4].addAvg   = PFX(addAvg_4x4_neon);
 | 
					 | 
				
			||||||
-         p.pu[LUMA_4x8].addAvg   = PFX(addAvg_4x8_neon);
 | 
					 | 
				
			||||||
-         p.pu[LUMA_4x16].addAvg  = PFX(addAvg_4x16_neon);
 | 
					 | 
				
			||||||
-         p.pu[LUMA_8x4].addAvg   = PFX(addAvg_8x4_neon);
 | 
					 | 
				
			||||||
-         p.pu[LUMA_8x8].addAvg   = PFX(addAvg_8x8_neon);
 | 
					 | 
				
			||||||
-         p.pu[LUMA_8x16].addAvg  = PFX(addAvg_8x16_neon);
 | 
					 | 
				
			||||||
-         p.pu[LUMA_8x32].addAvg  = PFX(addAvg_8x32_neon);
 | 
					 | 
				
			||||||
-         p.pu[LUMA_12x16].addAvg = PFX(addAvg_12x16_neon);
 | 
					 | 
				
			||||||
-         p.pu[LUMA_16x4].addAvg  = PFX(addAvg_16x4_neon);
 | 
					 | 
				
			||||||
-         p.pu[LUMA_16x8].addAvg  = PFX(addAvg_16x8_neon);
 | 
					 | 
				
			||||||
-         p.pu[LUMA_16x12].addAvg = PFX(addAvg_16x12_neon);
 | 
					 | 
				
			||||||
-         p.pu[LUMA_16x16].addAvg = PFX(addAvg_16x16_neon);
 | 
					 | 
				
			||||||
-         p.pu[LUMA_16x32].addAvg = PFX(addAvg_16x32_neon);
 | 
					 | 
				
			||||||
-         p.pu[LUMA_16x64].addAvg = PFX(addAvg_16x64_neon);
 | 
					 | 
				
			||||||
-         p.pu[LUMA_24x32].addAvg = PFX(addAvg_24x32_neon);
 | 
					 | 
				
			||||||
-         p.pu[LUMA_32x8].addAvg  = PFX(addAvg_32x8_neon);
 | 
					 | 
				
			||||||
-         p.pu[LUMA_32x16].addAvg = PFX(addAvg_32x16_neon);
 | 
					 | 
				
			||||||
-         p.pu[LUMA_32x24].addAvg = PFX(addAvg_32x24_neon);
 | 
					 | 
				
			||||||
-         p.pu[LUMA_32x32].addAvg = PFX(addAvg_32x32_neon);
 | 
					 | 
				
			||||||
-         p.pu[LUMA_32x64].addAvg = PFX(addAvg_32x64_neon);
 | 
					 | 
				
			||||||
-         p.pu[LUMA_48x64].addAvg = PFX(addAvg_48x64_neon);
 | 
					 | 
				
			||||||
-         p.pu[LUMA_64x16].addAvg = PFX(addAvg_64x16_neon);
 | 
					 | 
				
			||||||
-         p.pu[LUMA_64x32].addAvg = PFX(addAvg_64x32_neon);
 | 
					 | 
				
			||||||
-         p.pu[LUMA_64x48].addAvg = PFX(addAvg_64x48_neon);
 | 
					 | 
				
			||||||
-         p.pu[LUMA_64x64].addAvg = PFX(addAvg_64x64_neon);
 | 
					 | 
				
			||||||
+         p.pu[LUMA_4x4].addAvg[ALIGNED]   = PFX(addAvg_4x4_neon);
 | 
					 | 
				
			||||||
+         p.pu[LUMA_4x8].addAvg[ALIGNED]   = PFX(addAvg_4x8_neon);
 | 
					 | 
				
			||||||
+         p.pu[LUMA_4x16].addAvg[ALIGNED]  = PFX(addAvg_4x16_neon);
 | 
					 | 
				
			||||||
+         p.pu[LUMA_8x4].addAvg[ALIGNED]   = PFX(addAvg_8x4_neon);
 | 
					 | 
				
			||||||
+         p.pu[LUMA_8x8].addAvg[ALIGNED]   = PFX(addAvg_8x8_neon);
 | 
					 | 
				
			||||||
+         p.pu[LUMA_8x16].addAvg[ALIGNED]  = PFX(addAvg_8x16_neon);
 | 
					 | 
				
			||||||
+         p.pu[LUMA_8x32].addAvg[ALIGNED]  = PFX(addAvg_8x32_neon);
 | 
					 | 
				
			||||||
+         p.pu[LUMA_12x16].addAvg[ALIGNED] = PFX(addAvg_12x16_neon);
 | 
					 | 
				
			||||||
+         p.pu[LUMA_16x4].addAvg[ALIGNED]  = PFX(addAvg_16x4_neon);
 | 
					 | 
				
			||||||
+         p.pu[LUMA_16x8].addAvg[ALIGNED]  = PFX(addAvg_16x8_neon);
 | 
					 | 
				
			||||||
+         p.pu[LUMA_16x12].addAvg[ALIGNED] = PFX(addAvg_16x12_neon);
 | 
					 | 
				
			||||||
+         p.pu[LUMA_16x16].addAvg[ALIGNED] = PFX(addAvg_16x16_neon);
 | 
					 | 
				
			||||||
+         p.pu[LUMA_16x32].addAvg[ALIGNED] = PFX(addAvg_16x32_neon);
 | 
					 | 
				
			||||||
+         p.pu[LUMA_16x64].addAvg[ALIGNED] = PFX(addAvg_16x64_neon);
 | 
					 | 
				
			||||||
+         p.pu[LUMA_24x32].addAvg[ALIGNED] = PFX(addAvg_24x32_neon);
 | 
					 | 
				
			||||||
+         p.pu[LUMA_32x8].addAvg[ALIGNED]  = PFX(addAvg_32x8_neon);
 | 
					 | 
				
			||||||
+         p.pu[LUMA_32x16].addAvg[ALIGNED] = PFX(addAvg_32x16_neon);
 | 
					 | 
				
			||||||
+         p.pu[LUMA_32x24].addAvg[ALIGNED] = PFX(addAvg_32x24_neon);
 | 
					 | 
				
			||||||
+         p.pu[LUMA_32x32].addAvg[ALIGNED] = PFX(addAvg_32x32_neon);
 | 
					 | 
				
			||||||
+         p.pu[LUMA_32x64].addAvg[ALIGNED] = PFX(addAvg_32x64_neon);
 | 
					 | 
				
			||||||
+         p.pu[LUMA_48x64].addAvg[ALIGNED] = PFX(addAvg_48x64_neon);
 | 
					 | 
				
			||||||
+         p.pu[LUMA_64x16].addAvg[ALIGNED] = PFX(addAvg_64x16_neon);
 | 
					 | 
				
			||||||
+         p.pu[LUMA_64x32].addAvg[ALIGNED] = PFX(addAvg_64x32_neon);
 | 
					 | 
				
			||||||
+         p.pu[LUMA_64x48].addAvg[ALIGNED] = PFX(addAvg_64x48_neon);
 | 
					 | 
				
			||||||
+         p.pu[LUMA_64x64].addAvg[ALIGNED] = PFX(addAvg_64x64_neon);
 | 
					 | 
				
			||||||
 
 | 
					 | 
				
			||||||
         // chroma addAvg
 | 
					 | 
				
			||||||
-        p.chroma[X265_CSP_I420].pu[CHROMA_420_4x2].addAvg   = PFX(addAvg_4x2_neon);
 | 
					 | 
				
			||||||
-        p.chroma[X265_CSP_I420].pu[CHROMA_420_4x4].addAvg   = PFX(addAvg_4x4_neon);
 | 
					 | 
				
			||||||
-        p.chroma[X265_CSP_I420].pu[CHROMA_420_4x8].addAvg   = PFX(addAvg_4x8_neon);
 | 
					 | 
				
			||||||
-        p.chroma[X265_CSP_I420].pu[CHROMA_420_4x16].addAvg  = PFX(addAvg_4x16_neon);
 | 
					 | 
				
			||||||
-        p.chroma[X265_CSP_I420].pu[CHROMA_420_6x8].addAvg   = PFX(addAvg_6x8_neon);
 | 
					 | 
				
			||||||
-        p.chroma[X265_CSP_I420].pu[CHROMA_420_8x2].addAvg   = PFX(addAvg_8x2_neon);
 | 
					 | 
				
			||||||
-        p.chroma[X265_CSP_I420].pu[CHROMA_420_8x4].addAvg   = PFX(addAvg_8x4_neon);
 | 
					 | 
				
			||||||
-        p.chroma[X265_CSP_I420].pu[CHROMA_420_8x6].addAvg   = PFX(addAvg_8x6_neon);
 | 
					 | 
				
			||||||
-        p.chroma[X265_CSP_I420].pu[CHROMA_420_8x8].addAvg   = PFX(addAvg_8x8_neon);
 | 
					 | 
				
			||||||
-        p.chroma[X265_CSP_I420].pu[CHROMA_420_8x16].addAvg  = PFX(addAvg_8x16_neon);
 | 
					 | 
				
			||||||
-        p.chroma[X265_CSP_I420].pu[CHROMA_420_8x32].addAvg  = PFX(addAvg_8x32_neon);
 | 
					 | 
				
			||||||
-        p.chroma[X265_CSP_I420].pu[CHROMA_420_12x16].addAvg = PFX(addAvg_12x16_neon);
 | 
					 | 
				
			||||||
-        p.chroma[X265_CSP_I420].pu[CHROMA_420_16x4].addAvg  = PFX(addAvg_16x4_neon);
 | 
					 | 
				
			||||||
-        p.chroma[X265_CSP_I420].pu[CHROMA_420_16x8].addAvg  = PFX(addAvg_16x8_neon);
 | 
					 | 
				
			||||||
-        p.chroma[X265_CSP_I420].pu[CHROMA_420_16x12].addAvg = PFX(addAvg_16x12_neon);
 | 
					 | 
				
			||||||
-        p.chroma[X265_CSP_I420].pu[CHROMA_420_16x16].addAvg = PFX(addAvg_16x16_neon);
 | 
					 | 
				
			||||||
-        p.chroma[X265_CSP_I420].pu[CHROMA_420_16x32].addAvg = PFX(addAvg_16x32_neon);
 | 
					 | 
				
			||||||
-        p.chroma[X265_CSP_I420].pu[CHROMA_420_24x32].addAvg = PFX(addAvg_24x32_neon);
 | 
					 | 
				
			||||||
-        p.chroma[X265_CSP_I420].pu[CHROMA_420_32x8].addAvg  = PFX(addAvg_32x8_neon);
 | 
					 | 
				
			||||||
-        p.chroma[X265_CSP_I420].pu[CHROMA_420_32x16].addAvg = PFX(addAvg_32x16_neon);
 | 
					 | 
				
			||||||
-        p.chroma[X265_CSP_I420].pu[CHROMA_420_32x24].addAvg = PFX(addAvg_32x24_neon);
 | 
					 | 
				
			||||||
-        p.chroma[X265_CSP_I420].pu[CHROMA_420_32x32].addAvg = PFX(addAvg_32x32_neon);
 | 
					 | 
				
			||||||
-
 | 
					 | 
				
			||||||
-        p.chroma[X265_CSP_I422].pu[CHROMA_422_4x8].addAvg   = PFX(addAvg_4x8_neon);
 | 
					 | 
				
			||||||
-        p.chroma[X265_CSP_I422].pu[CHROMA_422_4x16].addAvg  = PFX(addAvg_4x16_neon);
 | 
					 | 
				
			||||||
-        p.chroma[X265_CSP_I422].pu[CHROMA_422_4x32].addAvg  = PFX(addAvg_4x32_neon);
 | 
					 | 
				
			||||||
-        p.chroma[X265_CSP_I422].pu[CHROMA_422_6x16].addAvg  = PFX(addAvg_6x16_neon);
 | 
					 | 
				
			||||||
-        p.chroma[X265_CSP_I422].pu[CHROMA_422_8x4].addAvg   = PFX(addAvg_8x4_neon);
 | 
					 | 
				
			||||||
-        p.chroma[X265_CSP_I422].pu[CHROMA_422_8x8].addAvg   = PFX(addAvg_8x8_neon);
 | 
					 | 
				
			||||||
-        p.chroma[X265_CSP_I422].pu[CHROMA_422_8x12].addAvg  = PFX(addAvg_8x12_neon);
 | 
					 | 
				
			||||||
-        p.chroma[X265_CSP_I422].pu[CHROMA_422_8x16].addAvg  = PFX(addAvg_8x16_neon);
 | 
					 | 
				
			||||||
-        p.chroma[X265_CSP_I422].pu[CHROMA_422_8x32].addAvg  = PFX(addAvg_8x32_neon);
 | 
					 | 
				
			||||||
-        p.chroma[X265_CSP_I422].pu[CHROMA_422_8x64].addAvg  = PFX(addAvg_8x64_neon);
 | 
					 | 
				
			||||||
-        p.chroma[X265_CSP_I422].pu[CHROMA_422_12x32].addAvg = PFX(addAvg_12x32_neon);
 | 
					 | 
				
			||||||
-        p.chroma[X265_CSP_I422].pu[CHROMA_422_16x8].addAvg  = PFX(addAvg_16x8_neon);
 | 
					 | 
				
			||||||
-        p.chroma[X265_CSP_I422].pu[CHROMA_422_16x16].addAvg = PFX(addAvg_16x16_neon);
 | 
					 | 
				
			||||||
-        p.chroma[X265_CSP_I422].pu[CHROMA_422_16x24].addAvg = PFX(addAvg_16x24_neon);
 | 
					 | 
				
			||||||
-        p.chroma[X265_CSP_I422].pu[CHROMA_422_16x32].addAvg = PFX(addAvg_16x32_neon);
 | 
					 | 
				
			||||||
-        p.chroma[X265_CSP_I422].pu[CHROMA_422_16x64].addAvg = PFX(addAvg_16x64_neon);
 | 
					 | 
				
			||||||
-        p.chroma[X265_CSP_I422].pu[CHROMA_422_24x64].addAvg = PFX(addAvg_24x64_neon);
 | 
					 | 
				
			||||||
-        p.chroma[X265_CSP_I422].pu[CHROMA_422_32x16].addAvg = PFX(addAvg_32x16_neon);
 | 
					 | 
				
			||||||
-        p.chroma[X265_CSP_I422].pu[CHROMA_422_32x32].addAvg = PFX(addAvg_32x32_neon);
 | 
					 | 
				
			||||||
-        p.chroma[X265_CSP_I422].pu[CHROMA_422_32x48].addAvg = PFX(addAvg_32x48_neon);
 | 
					 | 
				
			||||||
-        p.chroma[X265_CSP_I422].pu[CHROMA_422_32x64].addAvg = PFX(addAvg_32x64_neon);
 | 
					 | 
				
			||||||
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_4x2].addAvg[ALIGNED]   = PFX(addAvg_4x2_neon);
 | 
					 | 
				
			||||||
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_4x4].addAvg[ALIGNED]   = PFX(addAvg_4x4_neon);
 | 
					 | 
				
			||||||
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_4x8].addAvg[ALIGNED]   = PFX(addAvg_4x8_neon);
 | 
					 | 
				
			||||||
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_4x16].addAvg[ALIGNED]  = PFX(addAvg_4x16_neon);
 | 
					 | 
				
			||||||
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_6x8].addAvg[ALIGNED]   = PFX(addAvg_6x8_neon);
 | 
					 | 
				
			||||||
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_8x2].addAvg[ALIGNED]   = PFX(addAvg_8x2_neon);
 | 
					 | 
				
			||||||
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_8x4].addAvg[ALIGNED]   = PFX(addAvg_8x4_neon);
 | 
					 | 
				
			||||||
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_8x6].addAvg[ALIGNED]   = PFX(addAvg_8x6_neon);
 | 
					 | 
				
			||||||
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_8x8].addAvg[ALIGNED]   = PFX(addAvg_8x8_neon);
 | 
					 | 
				
			||||||
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_8x16].addAvg[ALIGNED]  = PFX(addAvg_8x16_neon);
 | 
					 | 
				
			||||||
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_8x32].addAvg[ALIGNED]  = PFX(addAvg_8x32_neon);
 | 
					 | 
				
			||||||
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_12x16].addAvg[ALIGNED] = PFX(addAvg_12x16_neon);
 | 
					 | 
				
			||||||
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_16x4].addAvg[ALIGNED]  = PFX(addAvg_16x4_neon);
 | 
					 | 
				
			||||||
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_16x8].addAvg[ALIGNED]  = PFX(addAvg_16x8_neon);
 | 
					 | 
				
			||||||
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_16x12].addAvg[ALIGNED] = PFX(addAvg_16x12_neon);
 | 
					 | 
				
			||||||
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_16x16].addAvg[ALIGNED] = PFX(addAvg_16x16_neon);
 | 
					 | 
				
			||||||
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_16x32].addAvg[ALIGNED] = PFX(addAvg_16x32_neon);
 | 
					 | 
				
			||||||
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_24x32].addAvg[ALIGNED] = PFX(addAvg_24x32_neon);
 | 
					 | 
				
			||||||
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_32x8].addAvg[ALIGNED]  = PFX(addAvg_32x8_neon);
 | 
					 | 
				
			||||||
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_32x16].addAvg[ALIGNED] = PFX(addAvg_32x16_neon);
 | 
					 | 
				
			||||||
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_32x24].addAvg[ALIGNED] = PFX(addAvg_32x24_neon);
 | 
					 | 
				
			||||||
+        p.chroma[X265_CSP_I420].pu[CHROMA_420_32x32].addAvg[ALIGNED] = PFX(addAvg_32x32_neon);
 | 
					 | 
				
			||||||
+
 | 
					 | 
				
			||||||
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_4x8].addAvg[ALIGNED]   = PFX(addAvg_4x8_neon);
 | 
					 | 
				
			||||||
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_4x16].addAvg[ALIGNED]  = PFX(addAvg_4x16_neon);
 | 
					 | 
				
			||||||
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_4x32].addAvg[ALIGNED]  = PFX(addAvg_4x32_neon);
 | 
					 | 
				
			||||||
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_6x16].addAvg[ALIGNED]  = PFX(addAvg_6x16_neon);
 | 
					 | 
				
			||||||
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_8x4].addAvg[ALIGNED]   = PFX(addAvg_8x4_neon);
 | 
					 | 
				
			||||||
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_8x8].addAvg[ALIGNED]   = PFX(addAvg_8x8_neon);
 | 
					 | 
				
			||||||
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_8x12].addAvg[ALIGNED]  = PFX(addAvg_8x12_neon);
 | 
					 | 
				
			||||||
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_8x16].addAvg[ALIGNED]  = PFX(addAvg_8x16_neon);
 | 
					 | 
				
			||||||
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_8x32].addAvg[ALIGNED]  = PFX(addAvg_8x32_neon);
 | 
					 | 
				
			||||||
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_8x64].addAvg[ALIGNED]  = PFX(addAvg_8x64_neon);
 | 
					 | 
				
			||||||
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_12x32].addAvg[ALIGNED] = PFX(addAvg_12x32_neon);
 | 
					 | 
				
			||||||
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_16x8].addAvg[ALIGNED]  = PFX(addAvg_16x8_neon);
 | 
					 | 
				
			||||||
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_16x16].addAvg[ALIGNED] = PFX(addAvg_16x16_neon);
 | 
					 | 
				
			||||||
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_16x24].addAvg[ALIGNED] = PFX(addAvg_16x24_neon);
 | 
					 | 
				
			||||||
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_16x32].addAvg[ALIGNED] = PFX(addAvg_16x32_neon);
 | 
					 | 
				
			||||||
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_16x64].addAvg[ALIGNED] = PFX(addAvg_16x64_neon);
 | 
					 | 
				
			||||||
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_24x64].addAvg[ALIGNED] = PFX(addAvg_24x64_neon);
 | 
					 | 
				
			||||||
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_32x16].addAvg[ALIGNED] = PFX(addAvg_32x16_neon);
 | 
					 | 
				
			||||||
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_32x32].addAvg[ALIGNED] = PFX(addAvg_32x32_neon);
 | 
					 | 
				
			||||||
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_32x48].addAvg[ALIGNED] = PFX(addAvg_32x48_neon);
 | 
					 | 
				
			||||||
+        p.chroma[X265_CSP_I422].pu[CHROMA_422_32x64].addAvg[ALIGNED] = PFX(addAvg_32x64_neon);
 | 
					 | 
				
			||||||
 
 | 
					 | 
				
			||||||
         // quant
 | 
					 | 
				
			||||||
          p.quant = PFX(quant_neon);
 | 
					 | 
				
			||||||
@@ -402,7 +402,7 @@ void setupAssemblyPrimitives(EncoderPrim
 | 
					 | 
				
			||||||
         p.scale2D_64to32  = PFX(scale2D_64to32_neon);
 | 
					 | 
				
			||||||
 
 | 
					 | 
				
			||||||
         // scale1D_128to64
 | 
					 | 
				
			||||||
-        p.scale1D_128to64 = PFX(scale1D_128to64_neon);
 | 
					 | 
				
			||||||
+        p.scale1D_128to64[ALIGNED] = PFX(scale1D_128to64_neon);
 | 
					 | 
				
			||||||
 
 | 
					 | 
				
			||||||
         // copy_count
 | 
					 | 
				
			||||||
         p.cu[BLOCK_4x4].copy_cnt     = PFX(copy_cnt_4_neon);
 | 
					 | 
				
			||||||
@@ -411,37 +411,37 @@ void setupAssemblyPrimitives(EncoderPrim
 | 
					 | 
				
			||||||
         p.cu[BLOCK_32x32].copy_cnt   = PFX(copy_cnt_32_neon);
 | 
					 | 
				
			||||||
 
 | 
					 | 
				
			||||||
         // filterPixelToShort
 | 
					 | 
				
			||||||
-        p.pu[LUMA_4x4].convert_p2s   = PFX(filterPixelToShort_4x4_neon);
 | 
					 | 
				
			||||||
-        p.pu[LUMA_4x8].convert_p2s   = PFX(filterPixelToShort_4x8_neon);
 | 
					 | 
				
			||||||
-        p.pu[LUMA_4x16].convert_p2s  = PFX(filterPixelToShort_4x16_neon);
 | 
					 | 
				
			||||||
-        p.pu[LUMA_8x4].convert_p2s   = PFX(filterPixelToShort_8x4_neon);
 | 
					 | 
				
			||||||
-        p.pu[LUMA_8x8].convert_p2s   = PFX(filterPixelToShort_8x8_neon);
 | 
					 | 
				
			||||||
-        p.pu[LUMA_8x16].convert_p2s  = PFX(filterPixelToShort_8x16_neon);
 | 
					 | 
				
			||||||
-        p.pu[LUMA_8x32].convert_p2s  = PFX(filterPixelToShort_8x32_neon);
 | 
					 | 
				
			||||||
-        p.pu[LUMA_12x16].convert_p2s = PFX(filterPixelToShort_12x16_neon);
 | 
					 | 
				
			||||||
-        p.pu[LUMA_16x4].convert_p2s  = PFX(filterPixelToShort_16x4_neon);
 | 
					 | 
				
			||||||
-        p.pu[LUMA_16x8].convert_p2s  = PFX(filterPixelToShort_16x8_neon);
 | 
					 | 
				
			||||||
-        p.pu[LUMA_16x12].convert_p2s = PFX(filterPixelToShort_16x12_neon);
 | 
					 | 
				
			||||||
-        p.pu[LUMA_16x16].convert_p2s = PFX(filterPixelToShort_16x16_neon);
 | 
					 | 
				
			||||||
-        p.pu[LUMA_16x32].convert_p2s = PFX(filterPixelToShort_16x32_neon);
 | 
					 | 
				
			||||||
-        p.pu[LUMA_16x64].convert_p2s = PFX(filterPixelToShort_16x64_neon);
 | 
					 | 
				
			||||||
-        p.pu[LUMA_24x32].convert_p2s = PFX(filterPixelToShort_24x32_neon);
 | 
					 | 
				
			||||||
-        p.pu[LUMA_32x8].convert_p2s  = PFX(filterPixelToShort_32x8_neon);
 | 
					 | 
				
			||||||
-        p.pu[LUMA_32x16].convert_p2s = PFX(filterPixelToShort_32x16_neon);
 | 
					 | 
				
			||||||
-        p.pu[LUMA_32x24].convert_p2s = PFX(filterPixelToShort_32x24_neon);
 | 
					 | 
				
			||||||
-        p.pu[LUMA_32x32].convert_p2s = PFX(filterPixelToShort_32x32_neon);
 | 
					 | 
				
			||||||
-        p.pu[LUMA_32x64].convert_p2s = PFX(filterPixelToShort_32x64_neon);
 | 
					 | 
				
			||||||
-        p.pu[LUMA_48x64].convert_p2s = PFX(filterPixelToShort_48x64_neon);
 | 
					 | 
				
			||||||
-        p.pu[LUMA_64x16].convert_p2s = PFX(filterPixelToShort_64x16_neon);
 | 
					 | 
				
			||||||
-        p.pu[LUMA_64x32].convert_p2s = PFX(filterPixelToShort_64x32_neon);
 | 
					 | 
				
			||||||
-        p.pu[LUMA_64x48].convert_p2s = PFX(filterPixelToShort_64x48_neon);
 | 
					 | 
				
			||||||
-        p.pu[LUMA_64x64].convert_p2s = PFX(filterPixelToShort_64x64_neon);
 | 
					 | 
				
			||||||
+        p.pu[LUMA_4x4].convert_p2s[ALIGNED]   = PFX(filterPixelToShort_4x4_neon);
 | 
					 | 
				
			||||||
+        p.pu[LUMA_4x8].convert_p2s[ALIGNED]   = PFX(filterPixelToShort_4x8_neon);
 | 
					 | 
				
			||||||
+        p.pu[LUMA_4x16].convert_p2s[ALIGNED]  = PFX(filterPixelToShort_4x16_neon);
 | 
					 | 
				
			||||||
+        p.pu[LUMA_8x4].convert_p2s[ALIGNED]   = PFX(filterPixelToShort_8x4_neon);
 | 
					 | 
				
			||||||
+        p.pu[LUMA_8x8].convert_p2s[ALIGNED]   = PFX(filterPixelToShort_8x8_neon);
 | 
					 | 
				
			||||||
+        p.pu[LUMA_8x16].convert_p2s[ALIGNED]  = PFX(filterPixelToShort_8x16_neon);
 | 
					 | 
				
			||||||
+        p.pu[LUMA_8x32].convert_p2s[ALIGNED]  = PFX(filterPixelToShort_8x32_neon);
 | 
					 | 
				
			||||||
+        p.pu[LUMA_12x16].convert_p2s[ALIGNED] = PFX(filterPixelToShort_12x16_neon);
 | 
					 | 
				
			||||||
+        p.pu[LUMA_16x4].convert_p2s[ALIGNED]  = PFX(filterPixelToShort_16x4_neon);
 | 
					 | 
				
			||||||
+        p.pu[LUMA_16x8].convert_p2s[ALIGNED]  = PFX(filterPixelToShort_16x8_neon);
 | 
					 | 
				
			||||||
+        p.pu[LUMA_16x12].convert_p2s[ALIGNED] = PFX(filterPixelToShort_16x12_neon);
 | 
					 | 
				
			||||||
+        p.pu[LUMA_16x16].convert_p2s[ALIGNED] = PFX(filterPixelToShort_16x16_neon);
 | 
					 | 
				
			||||||
+        p.pu[LUMA_16x32].convert_p2s[ALIGNED] = PFX(filterPixelToShort_16x32_neon);
 | 
					 | 
				
			||||||
+        p.pu[LUMA_16x64].convert_p2s[ALIGNED] = PFX(filterPixelToShort_16x64_neon);
 | 
					 | 
				
			||||||
+        p.pu[LUMA_24x32].convert_p2s[ALIGNED] = PFX(filterPixelToShort_24x32_neon);
 | 
					 | 
				
			||||||
+        p.pu[LUMA_32x8].convert_p2s[ALIGNED]  = PFX(filterPixelToShort_32x8_neon);
 | 
					 | 
				
			||||||
+        p.pu[LUMA_32x16].convert_p2s[ALIGNED] = PFX(filterPixelToShort_32x16_neon);
 | 
					 | 
				
			||||||
+        p.pu[LUMA_32x24].convert_p2s[ALIGNED] = PFX(filterPixelToShort_32x24_neon);
 | 
					 | 
				
			||||||
+        p.pu[LUMA_32x32].convert_p2s[ALIGNED] = PFX(filterPixelToShort_32x32_neon);
 | 
					 | 
				
			||||||
+        p.pu[LUMA_32x64].convert_p2s[ALIGNED] = PFX(filterPixelToShort_32x64_neon);
 | 
					 | 
				
			||||||
+        p.pu[LUMA_48x64].convert_p2s[ALIGNED] = PFX(filterPixelToShort_48x64_neon);
 | 
					 | 
				
			||||||
+        p.pu[LUMA_64x16].convert_p2s[ALIGNED] = PFX(filterPixelToShort_64x16_neon);
 | 
					 | 
				
			||||||
+        p.pu[LUMA_64x32].convert_p2s[ALIGNED] = PFX(filterPixelToShort_64x32_neon);
 | 
					 | 
				
			||||||
+        p.pu[LUMA_64x48].convert_p2s[ALIGNED] = PFX(filterPixelToShort_64x48_neon);
 | 
					 | 
				
			||||||
+        p.pu[LUMA_64x64].convert_p2s[ALIGNED] = PFX(filterPixelToShort_64x64_neon);
 | 
					 | 
				
			||||||
 
 | 
					 | 
				
			||||||
         // Block_fill
 | 
					 | 
				
			||||||
-        p.cu[BLOCK_4x4].blockfill_s   = PFX(blockfill_s_4x4_neon);
 | 
					 | 
				
			||||||
-        p.cu[BLOCK_8x8].blockfill_s   = PFX(blockfill_s_8x8_neon);
 | 
					 | 
				
			||||||
-        p.cu[BLOCK_16x16].blockfill_s = PFX(blockfill_s_16x16_neon);
 | 
					 | 
				
			||||||
-        p.cu[BLOCK_32x32].blockfill_s = PFX(blockfill_s_32x32_neon);
 | 
					 | 
				
			||||||
+        p.cu[BLOCK_4x4].blockfill_s[ALIGNED]   = PFX(blockfill_s_4x4_neon);
 | 
					 | 
				
			||||||
+        p.cu[BLOCK_8x8].blockfill_s[ALIGNED]   = PFX(blockfill_s_8x8_neon);
 | 
					 | 
				
			||||||
+        p.cu[BLOCK_16x16].blockfill_s[ALIGNED] = PFX(blockfill_s_16x16_neon);
 | 
					 | 
				
			||||||
+        p.cu[BLOCK_32x32].blockfill_s[ALIGNED] = PFX(blockfill_s_32x32_neon);
 | 
					 | 
				
			||||||
 
 | 
					 | 
				
			||||||
         // Blockcopy_ss
 | 
					 | 
				
			||||||
         p.cu[BLOCK_4x4].copy_ss   = PFX(blockcopy_ss_4x4_neon);
 | 
					 | 
				
			||||||
@@ -495,21 +495,21 @@ void setupAssemblyPrimitives(EncoderPrim
 | 
					 | 
				
			||||||
         p.chroma[X265_CSP_I422].cu[BLOCK_422_32x64].copy_sp = PFX(blockcopy_sp_32x64_neon);
 | 
					 | 
				
			||||||
 
 | 
					 | 
				
			||||||
         // pixel_add_ps
 | 
					 | 
				
			||||||
-        p.cu[BLOCK_4x4].add_ps   = PFX(pixel_add_ps_4x4_neon);
 | 
					 | 
				
			||||||
-        p.cu[BLOCK_8x8].add_ps   = PFX(pixel_add_ps_8x8_neon);
 | 
					 | 
				
			||||||
-        p.cu[BLOCK_16x16].add_ps = PFX(pixel_add_ps_16x16_neon);
 | 
					 | 
				
			||||||
-        p.cu[BLOCK_32x32].add_ps = PFX(pixel_add_ps_32x32_neon);
 | 
					 | 
				
			||||||
-        p.cu[BLOCK_64x64].add_ps = PFX(pixel_add_ps_64x64_neon);
 | 
					 | 
				
			||||||
+        p.cu[BLOCK_4x4].add_ps[ALIGNED]   = PFX(pixel_add_ps_4x4_neon);
 | 
					 | 
				
			||||||
+        p.cu[BLOCK_8x8].add_ps[ALIGNED]   = PFX(pixel_add_ps_8x8_neon);
 | 
					 | 
				
			||||||
+        p.cu[BLOCK_16x16].add_ps[ALIGNED] = PFX(pixel_add_ps_16x16_neon);
 | 
					 | 
				
			||||||
+        p.cu[BLOCK_32x32].add_ps[ALIGNED] = PFX(pixel_add_ps_32x32_neon);
 | 
					 | 
				
			||||||
+        p.cu[BLOCK_64x64].add_ps[ALIGNED] = PFX(pixel_add_ps_64x64_neon);
 | 
					 | 
				
			||||||
 
 | 
					 | 
				
			||||||
         // chroma add_ps
 | 
					 | 
				
			||||||
-        p.chroma[X265_CSP_I420].cu[BLOCK_420_4x4].add_ps   = PFX(pixel_add_ps_4x4_neon);
 | 
					 | 
				
			||||||
-        p.chroma[X265_CSP_I420].cu[BLOCK_420_8x8].add_ps   = PFX(pixel_add_ps_8x8_neon);
 | 
					 | 
				
			||||||
-        p.chroma[X265_CSP_I420].cu[BLOCK_420_16x16].add_ps = PFX(pixel_add_ps_16x16_neon);
 | 
					 | 
				
			||||||
-        p.chroma[X265_CSP_I420].cu[BLOCK_420_32x32].add_ps = PFX(pixel_add_ps_32x32_neon);
 | 
					 | 
				
			||||||
-        p.chroma[X265_CSP_I422].cu[BLOCK_422_4x8].add_ps   = PFX(pixel_add_ps_4x8_neon);
 | 
					 | 
				
			||||||
-        p.chroma[X265_CSP_I422].cu[BLOCK_422_8x16].add_ps  = PFX(pixel_add_ps_8x16_neon);
 | 
					 | 
				
			||||||
-        p.chroma[X265_CSP_I422].cu[BLOCK_422_16x32].add_ps = PFX(pixel_add_ps_16x32_neon);
 | 
					 | 
				
			||||||
-        p.chroma[X265_CSP_I422].cu[BLOCK_422_32x64].add_ps = PFX(pixel_add_ps_32x64_neon);
 | 
					 | 
				
			||||||
+        p.chroma[X265_CSP_I420].cu[BLOCK_420_4x4].add_ps[ALIGNED]   = PFX(pixel_add_ps_4x4_neon);
 | 
					 | 
				
			||||||
+        p.chroma[X265_CSP_I420].cu[BLOCK_420_8x8].add_ps[ALIGNED]   = PFX(pixel_add_ps_8x8_neon);
 | 
					 | 
				
			||||||
+        p.chroma[X265_CSP_I420].cu[BLOCK_420_16x16].add_ps[ALIGNED] = PFX(pixel_add_ps_16x16_neon);
 | 
					 | 
				
			||||||
+        p.chroma[X265_CSP_I420].cu[BLOCK_420_32x32].add_ps[ALIGNED] = PFX(pixel_add_ps_32x32_neon);
 | 
					 | 
				
			||||||
+        p.chroma[X265_CSP_I422].cu[BLOCK_422_4x8].add_ps[ALIGNED]   = PFX(pixel_add_ps_4x8_neon);
 | 
					 | 
				
			||||||
+        p.chroma[X265_CSP_I422].cu[BLOCK_422_8x16].add_ps[ALIGNED]  = PFX(pixel_add_ps_8x16_neon);
 | 
					 | 
				
			||||||
+        p.chroma[X265_CSP_I422].cu[BLOCK_422_16x32].add_ps[ALIGNED] = PFX(pixel_add_ps_16x32_neon);
 | 
					 | 
				
			||||||
+        p.chroma[X265_CSP_I422].cu[BLOCK_422_32x64].add_ps[ALIGNED] = PFX(pixel_add_ps_32x64_neon);
 | 
					 | 
				
			||||||
 
 | 
					 | 
				
			||||||
         // cpy2Dto1D_shr
 | 
					 | 
				
			||||||
         p.cu[BLOCK_4x4].cpy2Dto1D_shr   = PFX(cpy2Dto1D_shr_4x4_neon);
 | 
					 | 
				
			||||||
@@ -518,10 +518,10 @@ void setupAssemblyPrimitives(EncoderPrim
 | 
					 | 
				
			||||||
         p.cu[BLOCK_32x32].cpy2Dto1D_shr = PFX(cpy2Dto1D_shr_32x32_neon);
 | 
					 | 
				
			||||||
 
 | 
					 | 
				
			||||||
         // ssd_s
 | 
					 | 
				
			||||||
-        p.cu[BLOCK_4x4].ssd_s   = PFX(pixel_ssd_s_4x4_neon);
 | 
					 | 
				
			||||||
-        p.cu[BLOCK_8x8].ssd_s   = PFX(pixel_ssd_s_8x8_neon);
 | 
					 | 
				
			||||||
-        p.cu[BLOCK_16x16].ssd_s = PFX(pixel_ssd_s_16x16_neon);
 | 
					 | 
				
			||||||
-        p.cu[BLOCK_32x32].ssd_s = PFX(pixel_ssd_s_32x32_neon);
 | 
					 | 
				
			||||||
+        p.cu[BLOCK_4x4].ssd_s[ALIGNED]   = PFX(pixel_ssd_s_4x4_neon);
 | 
					 | 
				
			||||||
+        p.cu[BLOCK_8x8].ssd_s[ALIGNED]   = PFX(pixel_ssd_s_8x8_neon);
 | 
					 | 
				
			||||||
+        p.cu[BLOCK_16x16].ssd_s[ALIGNED] = PFX(pixel_ssd_s_16x16_neon);
 | 
					 | 
				
			||||||
+        p.cu[BLOCK_32x32].ssd_s[ALIGNED] = PFX(pixel_ssd_s_32x32_neon);
 | 
					 | 
				
			||||||
 
 | 
					 | 
				
			||||||
         // sse_ss
 | 
					 | 
				
			||||||
         p.cu[BLOCK_4x4].sse_ss   = PFX(pixel_sse_ss_4x4_neon);
 | 
					 | 
				
			||||||
@@ -548,10 +548,10 @@ void setupAssemblyPrimitives(EncoderPrim
 | 
					 | 
				
			||||||
         p.chroma[X265_CSP_I422].cu[BLOCK_422_32x64].sub_ps = PFX(pixel_sub_ps_32x64_neon);
 | 
					 | 
				
			||||||
 
 | 
					 | 
				
			||||||
         // calc_Residual
 | 
					 | 
				
			||||||
-        p.cu[BLOCK_4x4].calcresidual   = PFX(getResidual4_neon);
 | 
					 | 
				
			||||||
-        p.cu[BLOCK_8x8].calcresidual   = PFX(getResidual8_neon);
 | 
					 | 
				
			||||||
-        p.cu[BLOCK_16x16].calcresidual = PFX(getResidual16_neon);
 | 
					 | 
				
			||||||
-        p.cu[BLOCK_32x32].calcresidual = PFX(getResidual32_neon);
 | 
					 | 
				
			||||||
+        p.cu[BLOCK_4x4].calcresidual[ALIGNED]   = PFX(getResidual4_neon);
 | 
					 | 
				
			||||||
+        p.cu[BLOCK_8x8].calcresidual[ALIGNED]   = PFX(getResidual8_neon);
 | 
					 | 
				
			||||||
+        p.cu[BLOCK_16x16].calcresidual[ALIGNED] = PFX(getResidual16_neon);
 | 
					 | 
				
			||||||
+        p.cu[BLOCK_32x32].calcresidual[ALIGNED] = PFX(getResidual32_neon);
 | 
					 | 
				
			||||||
 
 | 
					 | 
				
			||||||
         // sse_pp
 | 
					 | 
				
			||||||
         p.cu[BLOCK_4x4].sse_pp   = PFX(pixel_sse_pp_4x4_neon);
 | 
					 | 
				
			||||||
@@ -722,31 +722,31 @@ void setupAssemblyPrimitives(EncoderPrim
 | 
					 | 
				
			||||||
         p.pu[LUMA_64x64].sad_x4 = PFX(sad_x4_64x64_neon);
 | 
					 | 
				
			||||||
 
 | 
					 | 
				
			||||||
         // pixel_avg_pp
 | 
					 | 
				
			||||||
-        p.pu[LUMA_4x4].pixelavg_pp   = PFX(pixel_avg_pp_4x4_neon);
 | 
					 | 
				
			||||||
-        p.pu[LUMA_4x8].pixelavg_pp   = PFX(pixel_avg_pp_4x8_neon);
 | 
					 | 
				
			||||||
-        p.pu[LUMA_4x16].pixelavg_pp  = PFX(pixel_avg_pp_4x16_neon);
 | 
					 | 
				
			||||||
-        p.pu[LUMA_8x4].pixelavg_pp   = PFX(pixel_avg_pp_8x4_neon);
 | 
					 | 
				
			||||||
-        p.pu[LUMA_8x8].pixelavg_pp   = PFX(pixel_avg_pp_8x8_neon);
 | 
					 | 
				
			||||||
-        p.pu[LUMA_8x16].pixelavg_pp  = PFX(pixel_avg_pp_8x16_neon);
 | 
					 | 
				
			||||||
-        p.pu[LUMA_8x32].pixelavg_pp  = PFX(pixel_avg_pp_8x32_neon);
 | 
					 | 
				
			||||||
-        p.pu[LUMA_12x16].pixelavg_pp = PFX(pixel_avg_pp_12x16_neon);
 | 
					 | 
				
			||||||
-        p.pu[LUMA_16x4].pixelavg_pp  = PFX(pixel_avg_pp_16x4_neon);
 | 
					 | 
				
			||||||
-        p.pu[LUMA_16x8].pixelavg_pp  = PFX(pixel_avg_pp_16x8_neon);
 | 
					 | 
				
			||||||
-        p.pu[LUMA_16x12].pixelavg_pp = PFX(pixel_avg_pp_16x12_neon);
 | 
					 | 
				
			||||||
-        p.pu[LUMA_16x16].pixelavg_pp = PFX(pixel_avg_pp_16x16_neon);
 | 
					 | 
				
			||||||
-        p.pu[LUMA_16x32].pixelavg_pp = PFX(pixel_avg_pp_16x32_neon);
 | 
					 | 
				
			||||||
-        p.pu[LUMA_16x64].pixelavg_pp = PFX(pixel_avg_pp_16x64_neon);
 | 
					 | 
				
			||||||
-        p.pu[LUMA_24x32].pixelavg_pp = PFX(pixel_avg_pp_24x32_neon);
 | 
					 | 
				
			||||||
-        p.pu[LUMA_32x8].pixelavg_pp  = PFX(pixel_avg_pp_32x8_neon);
 | 
					 | 
				
			||||||
-        p.pu[LUMA_32x16].pixelavg_pp = PFX(pixel_avg_pp_32x16_neon);
 | 
					 | 
				
			||||||
-        p.pu[LUMA_32x24].pixelavg_pp = PFX(pixel_avg_pp_32x24_neon);
 | 
					 | 
				
			||||||
-        p.pu[LUMA_32x32].pixelavg_pp = PFX(pixel_avg_pp_32x32_neon);
 | 
					 | 
				
			||||||
-        p.pu[LUMA_32x64].pixelavg_pp = PFX(pixel_avg_pp_32x64_neon);
 | 
					 | 
				
			||||||
-        p.pu[LUMA_48x64].pixelavg_pp = PFX(pixel_avg_pp_48x64_neon);
 | 
					 | 
				
			||||||
-        p.pu[LUMA_64x16].pixelavg_pp = PFX(pixel_avg_pp_64x16_neon);
 | 
					 | 
				
			||||||
-        p.pu[LUMA_64x32].pixelavg_pp = PFX(pixel_avg_pp_64x32_neon);
 | 
					 | 
				
			||||||
-        p.pu[LUMA_64x48].pixelavg_pp = PFX(pixel_avg_pp_64x48_neon);
 | 
					 | 
				
			||||||
-        p.pu[LUMA_64x64].pixelavg_pp = PFX(pixel_avg_pp_64x64_neon);
 | 
					 | 
				
			||||||
+        p.pu[LUMA_4x4].pixelavg_pp[ALIGNED]   = PFX(pixel_avg_pp_4x4_neon);
 | 
					 | 
				
			||||||
+        p.pu[LUMA_4x8].pixelavg_pp[ALIGNED]   = PFX(pixel_avg_pp_4x8_neon);
 | 
					 | 
				
			||||||
+        p.pu[LUMA_4x16].pixelavg_pp[ALIGNED]  = PFX(pixel_avg_pp_4x16_neon);
 | 
					 | 
				
			||||||
+        p.pu[LUMA_8x4].pixelavg_pp[ALIGNED]   = PFX(pixel_avg_pp_8x4_neon);
 | 
					 | 
				
			||||||
+        p.pu[LUMA_8x8].pixelavg_pp[ALIGNED]   = PFX(pixel_avg_pp_8x8_neon);
 | 
					 | 
				
			||||||
+        p.pu[LUMA_8x16].pixelavg_pp[ALIGNED]  = PFX(pixel_avg_pp_8x16_neon);
 | 
					 | 
				
			||||||
+        p.pu[LUMA_8x32].pixelavg_pp[ALIGNED]  = PFX(pixel_avg_pp_8x32_neon);
 | 
					 | 
				
			||||||
+        p.pu[LUMA_12x16].pixelavg_pp[ALIGNED] = PFX(pixel_avg_pp_12x16_neon);
 | 
					 | 
				
			||||||
+        p.pu[LUMA_16x4].pixelavg_pp[ALIGNED]  = PFX(pixel_avg_pp_16x4_neon);
 | 
					 | 
				
			||||||
+        p.pu[LUMA_16x8].pixelavg_pp[ALIGNED]  = PFX(pixel_avg_pp_16x8_neon);
 | 
					 | 
				
			||||||
+        p.pu[LUMA_16x12].pixelavg_pp[ALIGNED] = PFX(pixel_avg_pp_16x12_neon);
 | 
					 | 
				
			||||||
+        p.pu[LUMA_16x16].pixelavg_pp[ALIGNED] = PFX(pixel_avg_pp_16x16_neon);
 | 
					 | 
				
			||||||
+        p.pu[LUMA_16x32].pixelavg_pp[ALIGNED] = PFX(pixel_avg_pp_16x32_neon);
 | 
					 | 
				
			||||||
+        p.pu[LUMA_16x64].pixelavg_pp[ALIGNED] = PFX(pixel_avg_pp_16x64_neon);
 | 
					 | 
				
			||||||
+        p.pu[LUMA_24x32].pixelavg_pp[ALIGNED] = PFX(pixel_avg_pp_24x32_neon);
 | 
					 | 
				
			||||||
+        p.pu[LUMA_32x8].pixelavg_pp[ALIGNED]  = PFX(pixel_avg_pp_32x8_neon);
 | 
					 | 
				
			||||||
+        p.pu[LUMA_32x16].pixelavg_pp[ALIGNED] = PFX(pixel_avg_pp_32x16_neon);
 | 
					 | 
				
			||||||
+        p.pu[LUMA_32x24].pixelavg_pp[ALIGNED] = PFX(pixel_avg_pp_32x24_neon);
 | 
					 | 
				
			||||||
+        p.pu[LUMA_32x32].pixelavg_pp[ALIGNED] = PFX(pixel_avg_pp_32x32_neon);
 | 
					 | 
				
			||||||
+        p.pu[LUMA_32x64].pixelavg_pp[ALIGNED] = PFX(pixel_avg_pp_32x64_neon);
 | 
					 | 
				
			||||||
+        p.pu[LUMA_48x64].pixelavg_pp[ALIGNED] = PFX(pixel_avg_pp_48x64_neon);
 | 
					 | 
				
			||||||
+        p.pu[LUMA_64x16].pixelavg_pp[ALIGNED] = PFX(pixel_avg_pp_64x16_neon);
 | 
					 | 
				
			||||||
+        p.pu[LUMA_64x32].pixelavg_pp[ALIGNED] = PFX(pixel_avg_pp_64x32_neon);
 | 
					 | 
				
			||||||
+        p.pu[LUMA_64x48].pixelavg_pp[ALIGNED] = PFX(pixel_avg_pp_64x48_neon);
 | 
					 | 
				
			||||||
+        p.pu[LUMA_64x64].pixelavg_pp[ALIGNED] = PFX(pixel_avg_pp_64x64_neon);
 | 
					 | 
				
			||||||
 
 | 
					 | 
				
			||||||
         // planecopy
 | 
					 | 
				
			||||||
         p.planecopy_cp = PFX(pixel_planecopy_cp_neon);
 | 
					 | 
				
			||||||
							
								
								
									
										37
									
								
								gnu/packages/patches/x265-detect512-all-arches.patch
									
										
									
									
									
										Normal file
									
								
							
							
						
						
									
										37
									
								
								gnu/packages/patches/x265-detect512-all-arches.patch
									
										
									
									
									
										Normal file
									
								
							| 
						 | 
					@ -0,0 +1,37 @@
 | 
				
			||||||
 | 
					https://sources.debian.org/data/main/x/x265/2.9-3/debian/patches/0003-detect512-is-needed-on-all-architectures.patch
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					From: Adam Sampson <unknown@bitbucket>
 | 
				
			||||||
 | 
					Date: Sun, 14 Oct 2018 14:04:18 +0200
 | 
				
			||||||
 | 
					Subject: detect512 is needed on all architectures
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					---
 | 
				
			||||||
 | 
					 source/common/cpu.cpp | 9 +++++----
 | 
				
			||||||
 | 
					 1 file changed, 5 insertions(+), 4 deletions(-)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					diff --git a/source/common/cpu.cpp b/source/common/cpu.cpp
 | 
				
			||||||
 | 
					index 0681ff5..fa687da 100644
 | 
				
			||||||
 | 
					--- a/source/common/cpu.cpp
 | 
				
			||||||
 | 
					+++ b/source/common/cpu.cpp
 | 
				
			||||||
 | 
					@@ -110,6 +110,11 @@ const cpu_name_t cpu_names[] =
 | 
				
			||||||
 | 
					     { "", 0 },
 | 
				
			||||||
 | 
					 };
 | 
				
			||||||
 | 
					 
 | 
				
			||||||
 | 
					+bool detect512()
 | 
				
			||||||
 | 
					+{
 | 
				
			||||||
 | 
					+    return(enable512);
 | 
				
			||||||
 | 
					+}
 | 
				
			||||||
 | 
					+
 | 
				
			||||||
 | 
					 #if X265_ARCH_X86
 | 
				
			||||||
 | 
					 
 | 
				
			||||||
 | 
					 extern "C" {
 | 
				
			||||||
 | 
					@@ -123,10 +128,6 @@ uint64_t PFX(cpu_xgetbv)(int xcr);
 | 
				
			||||||
 | 
					 #pragma warning(disable: 4309) // truncation of constant value
 | 
				
			||||||
 | 
					 #endif
 | 
				
			||||||
 | 
					 
 | 
				
			||||||
 | 
					-bool detect512()
 | 
				
			||||||
 | 
					-{
 | 
				
			||||||
 | 
					-    return(enable512);
 | 
				
			||||||
 | 
					-}
 | 
				
			||||||
 | 
					 uint32_t cpu_detect(bool benableavx512 )
 | 
				
			||||||
 | 
					 {
 | 
				
			||||||
 | 
					 
 | 
				
			||||||
| 
						 | 
					@ -401,7 +401,7 @@ and creating Matroska files from other media files (@code{mkvmerge}).")
 | 
				
			||||||
        (sha256
 | 
					        (sha256
 | 
				
			||||||
         (base32
 | 
					         (base32
 | 
				
			||||||
          "090hp4216isis8q5gb7bwzia8rfyzni54z21jnwm97x3hiy6ibpb"))
 | 
					          "090hp4216isis8q5gb7bwzia8rfyzni54z21jnwm97x3hiy6ibpb"))
 | 
				
			||||||
        (patches (search-patches "x265-arm-asm-primitives.patch"))
 | 
					        (patches (search-patches "x265-detect512-all-arches.patch"))
 | 
				
			||||||
        (modules '((guix build utils)))
 | 
					        (modules '((guix build utils)))
 | 
				
			||||||
        (snippet '(begin
 | 
					        (snippet '(begin
 | 
				
			||||||
                    (delete-file-recursively "source/compat/getopt")
 | 
					                    (delete-file-recursively "source/compat/getopt")
 | 
				
			||||||
| 
						 | 
					@ -409,12 +409,8 @@ and creating Matroska files from other media files (@code{mkvmerge}).")
 | 
				
			||||||
    (build-system cmake-build-system)
 | 
					    (build-system cmake-build-system)
 | 
				
			||||||
    (arguments
 | 
					    (arguments
 | 
				
			||||||
     `(#:tests? #f ; tests are skipped if cpu-optimized code isn't built
 | 
					     `(#:tests? #f ; tests are skipped if cpu-optimized code isn't built
 | 
				
			||||||
       ;; Currently the source code doesn't check for aarch64.
 | 
					       ;; Ensure position independent code for everyone.
 | 
				
			||||||
       ,@(if (any (cute string-prefix? <> (or (%current-system)
 | 
					       #:configure-flags '("-DENABLE_PIC=TRUE")
 | 
				
			||||||
                                              (%current-target-system)))
 | 
					 | 
				
			||||||
                  '("armhf" "aarch64"))
 | 
					 | 
				
			||||||
           '(#:configure-flags '("-DENABLE_PIC=TRUE"))
 | 
					 | 
				
			||||||
           '())
 | 
					 | 
				
			||||||
       #:phases
 | 
					       #:phases
 | 
				
			||||||
       (modify-phases %standard-phases
 | 
					       (modify-phases %standard-phases
 | 
				
			||||||
         (add-before 'configure 'prepare-build
 | 
					         (add-before 'configure 'prepare-build
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
		Reference in a new issue