Index: jdmarker.c =================================================================== --- jdmarker.c (revision 829) +++ jdmarker.c (working copy) @@ -910,7 +910,7 @@ } if (cinfo->marker->discarded_bytes != 0) { - WARNMS2(cinfo, JWRN_EXTRANEOUS_DATA, cinfo->marker->discarded_bytes, c); + TRACEMS2(cinfo, 1, JWRN_EXTRANEOUS_DATA, cinfo->marker->discarded_bytes, c); cinfo->marker->discarded_bytes = 0; } @@ -944,7 +944,144 @@ return TRUE; } +#ifdef MOTION_JPEG_SUPPORTED +/* The default Huffman tables used by motion JPEG frames. When a motion JPEG + * frame does not have DHT tables, we should use the huffman tables suggested by + * the JPEG standard. Each of these tables represents a member of the JHUFF_TBLS + * struct so we can just copy it to the according JHUFF_TBLS member. + */ +/* DC table 0 */ +LOCAL(const unsigned char) mjpg_dc0_bits[] = { + 0x00, 0x01, 0x05, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 +}; + +LOCAL(const unsigned char) mjpg_dc0_huffval[] = { + 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, + 0x08, 0x09, 0x0A, 0x0B +}; + +/* DC table 1 */ +LOCAL(const unsigned char) mjpg_dc1_bits[] = { + 0x00, 0x03, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00 +}; + +LOCAL(const unsigned char) mjpg_dc1_huffval[] = { + 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, + 0x08, 0x09, 0x0A, 0x0B +}; + +/* AC table 0 */ +LOCAL(const unsigned char) mjpg_ac0_bits[] = { + 0x00, 0x02, 0x01, 0x03, 0x03, 0x02, 0x04, 0x03, + 0x05, 0x05, 0x04, 0x04, 0x00, 0x00, 0x01, 0x7D +}; + +LOCAL(const unsigned char) mjpg_ac0_huffval[] = { + 0x01, 0x02, 0x03, 0x00, 0x04, 0x11, 0x05, 0x12, + 0x21, 0x31, 0x41, 0x06, 0x13, 0x51, 0x61, 0x07, + 0x22, 0x71, 0x14, 0x32, 0x81, 0x91, 0xA1, 0x08, + 0x23, 0x42, 0xB1, 0xC1, 0x15, 0x52, 0xD1, 0xF0, + 0x24, 0x33, 0x62, 0x72, 0x82, 0x09, 0x0A, 0x16, + 0x17, 0x18, 0x19, 0x1A, 0x25, 0x26, 0x27, 0x28, + 0x29, 0x2A, 0x34, 0x35, 0x36, 0x37, 0x38, 0x39, + 0x3A, 0x43, 0x44, 0x45, 0x46, 0x47, 0x48, 0x49, + 0x4A, 0x53, 0x54, 0x55, 0x56, 0x57, 0x58, 0x59, + 0x5A, 0x63, 0x64, 0x65, 0x66, 0x67, 0x68, 0x69, + 0x6A, 0x73, 0x74, 0x75, 0x76, 0x77, 0x78, 0x79, + 0x7A, 0x83, 0x84, 0x85, 0x86, 0x87, 0x88, 0x89, + 0x8A, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, 0x98, + 0x99, 0x9A, 0xA2, 0xA3, 0xA4, 0xA5, 0xA6, 0xA7, + 0xA8, 0xA9, 0xAA, 0xB2, 0xB3, 0xB4, 0xB5, 0xB6, + 0xB7, 0xB8, 0xB9, 0xBA, 0xC2, 0xC3, 0xC4, 0xC5, + 0xC6, 0xC7, 0xC8, 0xC9, 0xCA, 0xD2, 0xD3, 0xD4, + 0xD5, 0xD6, 0xD7, 0xD8, 0xD9, 0xDA, 0xE1, 0xE2, + 0xE3, 0xE4, 0xE5, 0xE6, 0xE7, 0xE8, 0xE9, 0xEA, + 0xF1, 0xF2, 0xF3, 0xF4, 0xF5, 0xF6, 0xF7, 0xF8, + 0xF9, 0xFA +}; + +/* AC table 1 */ +LOCAL(const unsigned char) mjpg_ac1_bits[] = { + 0x00, 0x02, 0x01, 0x02, 0x04, 0x04, 0x03, 0x04, + 0x07, 0x05, 0x04, 0x04, 0x00, 0x01, 0x02, 0x77 +}; + +LOCAL(const unsigned char) mjpg_ac1_huffval[] = { + 0x00, 0x01, 0x02, 0x03, 0x11, 0x04, 0x05, 0x21, + 0x31, 0x06, 0x12, 0x41, 0x51, 0x07, 0x61, 0x71, + 0x13, 0x22, 0x32, 0x81, 0x08, 0x14, 0x42, 0x91, + 0xA1, 0xB1, 0xC1, 0x09, 0x23, 0x33, 0x52, 0xF0, + 0x15, 0x62, 0x72, 0xD1, 0x0A, 0x16, 0x24, 0x34, + 0xE1, 0x25, 0xF1, 0x17, 0x18, 0x19, 0x1A, 0x26, + 0x27, 0x28, 0x29, 0x2A, 0x35, 0x36, 0x37, 0x38, + 0x39, 0x3A, 0x43, 0x44, 0x45, 0x46, 0x47, 0x48, + 0x49, 0x4A, 0x53, 0x54, 0x55, 0x56, 0x57, 0x58, + 0x59, 0x5A, 0x63, 0x64, 0x65, 0x66, 0x67, 0x68, + 0x69, 0x6A, 0x73, 0x74, 0x75, 0x76, 0x77, 0x78, + 0x79, 0x7A, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, + 0x88, 0x89, 0x8A, 0x92, 0x93, 0x94, 0x95, 0x96, + 0x97, 0x98, 0x99, 0x9A, 0xA2, 0xA3, 0xA4, 0xA5, + 0xA6, 0xA7, 0xA8, 0xA9, 0xAA, 0xB2, 0xB3, 0xB4, + 0xB5, 0xB6, 0xB7, 0xB8, 0xB9, 0xBA, 0xC2, 0xC3, + 0xC4, 0xC5, 0xC6, 0xC7, 0xC8, 0xC9, 0xCA, 0xD2, + 0xD3, 0xD4, 0xD5, 0xD6, 0xD7, 0xD8, 0xD9, 0xDA, + 0xE2, 0xE3, 0xE4, 0xE5, 0xE6, 0xE7, 0xE8, 0xE9, + 0xEA, 0xF2, 0xF3, 0xF4, 0xF5, 0xF6, 0xF7, 0xF8, + 0xF9, 0xFA +}; + +/* Loads the default Huffman tables used by motion JPEG frames. This function + * just copies the huffman tables suggested in the JPEG standard when we have + * not load them. + */ +LOCAL(void) +mjpg_load_huff_tables (j_decompress_ptr cinfo) +{ + JHUFF_TBL *htblptr; + + if (! cinfo->dc_huff_tbl_ptrs[0]) { + htblptr = jpeg_alloc_huff_table((j_common_ptr) cinfo); + MEMZERO(htblptr, SIZEOF(JHUFF_TBL)); + MEMCOPY(&htblptr->bits[1], mjpg_dc0_bits, SIZEOF(mjpg_dc0_bits)); + MEMCOPY(&htblptr->huffval[0], mjpg_dc0_huffval, SIZEOF(mjpg_dc0_huffval)); + cinfo->dc_huff_tbl_ptrs[0] = htblptr; + } + + if (! cinfo->dc_huff_tbl_ptrs[1]) { + htblptr = jpeg_alloc_huff_table((j_common_ptr) cinfo); + MEMZERO(htblptr, SIZEOF(JHUFF_TBL)); + MEMCOPY(&htblptr->bits[1], mjpg_dc1_bits, SIZEOF(mjpg_dc1_bits)); + MEMCOPY(&htblptr->huffval[0], mjpg_dc1_huffval, SIZEOF(mjpg_dc1_huffval)); + cinfo->dc_huff_tbl_ptrs[1] = htblptr; + } + + if (! cinfo->ac_huff_tbl_ptrs[0]) { + htblptr = jpeg_alloc_huff_table((j_common_ptr) cinfo); + MEMZERO(htblptr, SIZEOF(JHUFF_TBL)); + MEMCOPY(&htblptr->bits[1], mjpg_ac0_bits, SIZEOF(mjpg_ac0_bits)); + MEMCOPY(&htblptr->huffval[0], mjpg_ac0_huffval, SIZEOF(mjpg_ac0_huffval)); + cinfo->ac_huff_tbl_ptrs[0] = htblptr; + } + + if (! cinfo->ac_huff_tbl_ptrs[1]) { + htblptr = jpeg_alloc_huff_table((j_common_ptr) cinfo); + MEMZERO(htblptr, SIZEOF(JHUFF_TBL)); + MEMCOPY(&htblptr->bits[1], mjpg_ac1_bits, SIZEOF(mjpg_ac1_bits)); + MEMCOPY(&htblptr->huffval[0], mjpg_ac1_huffval, SIZEOF(mjpg_ac1_huffval)); + cinfo->ac_huff_tbl_ptrs[1] = htblptr; + } +} + +#else + +#define mjpg_load_huff_tables(cinfo) + +#endif /* MOTION_JPEG_SUPPORTED */ + + /* * Read markers until SOS or EOI. * @@ -1013,6 +1150,7 @@ break; case M_SOS: + mjpg_load_huff_tables(cinfo); if (! get_sos(cinfo)) return JPEG_SUSPENDED; cinfo->unread_marker = 0; /* processed the marker */ Index: jmorecfg.h =================================================================== --- jmorecfg.h (revision 829) +++ jmorecfg.h (working copy) @@ -153,14 +153,18 @@ /* INT16 must hold at least the values -32768..32767. */ #ifndef XMD_H /* X11/xmd.h correctly defines INT16 */ +#ifndef _BASETSD_H_ /* basetsd.h correctly defines INT32 */ typedef short INT16; #endif +#endif /* INT32 must hold at least signed 32-bit values. */ #ifndef XMD_H /* X11/xmd.h correctly defines INT32 */ +#ifndef _BASETSD_H_ /* basetsd.h correctly defines INT32 */ typedef long INT32; #endif +#endif /* Datatype used for image dimensions. The JPEG standard only supports * images up to 64K*64K due to 16-bit fields in SOF markers. Therefore @@ -210,11 +214,13 @@ * explicit coding is needed; see uses of the NEED_FAR_POINTERS symbol. */ +#ifndef FAR #ifdef NEED_FAR_POINTERS #define FAR far #else #define FAR #endif +#endif /* Index: jpeglib.h =================================================================== --- jpeglib.h (revision 829) +++ jpeglib.h (working copy) @@ -15,6 +15,10 @@ #ifndef JPEGLIB_H #define JPEGLIB_H +/* Begin chromium edits */ +#include "jpeglibmangler.h" +/* End chromium edits */ + /* * First we include the configuration files that record how this * installation of the JPEG library is set up. jconfig.h can be Index: jpeglibmangler.h =================================================================== --- jpeglibmangler.h (revision 0) +++ jpeglibmangler.h (revision 0) @@ -0,0 +1,113 @@ +// Copyright (c) 2009 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef THIRD_PARTY_LIBJPEG_TURBO_JPEGLIBMANGLER_H_ +#define THIRD_PARTY_LIBJPEG_TURBO_JPEGLIBMANGLER_H_ + +// Mangle all externally visible function names so we can build our own libjpeg +// without system libraries trying to use it. + +#define jpeg_make_c_derived_tbl chromium_jpeg_make_c_derived_tbl +#define jpeg_gen_optimal_table chromium_jpeg_gen_optimal_table +#define jpeg_make_d_derived_tbl chromium_jpeg_make_d_derived_tbl +#define jpeg_fill_bit_buffer chromium_jpeg_fill_bit_buffer +#define jpeg_huff_decode chromium_jpeg_huff_decode +#define jpeg_fdct_islow chromium_jpeg_fdct_islow +#define jpeg_fdct_ifast chromium_jpeg_fdct_ifast +#define jpeg_fdct_float chromium_jpeg_fdct_float +#define jpeg_idct_islow chromium_jpeg_idct_islow +#define jpeg_idct_ifast chromium_jpeg_idct_ifast +#define jpeg_idct_float chromium_jpeg_idct_float +#define jpeg_idct_4x4 chromium_jpeg_idct_4x4 +#define jpeg_idct_2x2 chromium_jpeg_idct_2x2 +#define jpeg_idct_1x1 chromium_jpeg_idct_1x1 +#define jinit_compress_master chromium_jinit_compress_master +#define jinit_c_master_control chromium_jinit_c_master_control +#define jinit_c_main_controller chromium_jinit_c_main_controller +#define jinit_c_prep_controller chromium_jinit_c_prep_controller +#define jinit_c_coef_controller chromium_jinit_c_coef_controller +#define jinit_color_converter chromium_jinit_color_converter +#define jinit_downsampler chromium_jinit_downsampler +#define jinit_forward_dct chromium_jinit_forward_dct +#define jinit_huff_encoder chromium_jinit_huff_encoder +#define jinit_phuff_encoder chromium_jinit_phuff_encoder +#define jinit_marker_writer chromium_jinit_marker_writer +#define jinit_master_decompress chromium_jinit_master_decompress +#define jinit_d_main_controller chromium_jinit_d_main_controller +#define jinit_d_coef_controller chromium_jinit_d_coef_controller +#define jinit_d_post_controller chromium_jinit_d_post_controller +#define jinit_input_controller chromium_jinit_input_controller +#define jinit_marker_reader chromium_jinit_marker_reader +#define jinit_huff_decoder chromium_jinit_huff_decoder +#define jinit_phuff_decoder chromium_jinit_phuff_decoder +#define jinit_inverse_dct chromium_jinit_inverse_dct +#define jinit_upsampler chromium_jinit_upsampler +#define jinit_color_deconverter chromium_jinit_color_deconverter +#define jinit_1pass_quantizer chromium_jinit_1pass_quantizer +#define jinit_2pass_quantizer chromium_jinit_2pass_quantizer +#define jinit_merged_upsampler chromium_jinit_merged_upsampler +#define jinit_memory_mgr chromium_jinit_memory_mgr +#define jdiv_round_up chromium_jdiv_round_up +#define jround_up chromium_jround_up +#define jcopy_sample_rows chromium_jcopy_sample_rows +#define jcopy_block_row chromium_jcopy_block_row +#define jzero_far chromium_jzero_far +#define jpeg_std_error chromium_jpeg_std_error +#define jpeg_CreateCompress chromium_jpeg_CreateCompress +#define jpeg_CreateDecompress chromium_jpeg_CreateDecompress +#define jpeg_destroy_compress chromium_jpeg_destroy_compress +#define jpeg_destroy_decompress chromium_jpeg_destroy_decompress +#define jpeg_stdio_dest chromium_jpeg_stdio_dest +#define jpeg_stdio_src chromium_jpeg_stdio_src +#define jpeg_set_defaults chromium_jpeg_set_defaults +#define jpeg_set_colorspace chromium_jpeg_set_colorspace +#define jpeg_default_colorspace chromium_jpeg_default_colorspace +#define jpeg_set_quality chromium_jpeg_set_quality +#define jpeg_set_linear_quality chromium_jpeg_set_linear_quality +#define jpeg_add_quant_table chromium_jpeg_add_quant_table +#define jpeg_quality_scaling chromium_jpeg_quality_scaling +#define jpeg_simple_progression chromium_jpeg_simple_progression +#define jpeg_suppress_tables chromium_jpeg_suppress_tables +#define jpeg_alloc_quant_table chromium_jpeg_alloc_quant_table +#define jpeg_alloc_huff_table chromium_jpeg_alloc_huff_table +#define jpeg_start_compress chromium_jpeg_start_compress +#define jpeg_write_scanlines chromium_jpeg_write_scanlines +#define jpeg_finish_compress chromium_jpeg_finish_compress +#define jpeg_write_raw_data chromium_jpeg_write_raw_data +#define jpeg_write_marker chromium_jpeg_write_marker +#define jpeg_write_m_header chromium_jpeg_write_m_header +#define jpeg_write_m_byte chromium_jpeg_write_m_byte +#define jpeg_write_tables chromium_jpeg_write_tables +#define jpeg_read_header chromium_jpeg_read_header +#define jpeg_start_decompress chromium_jpeg_start_decompress +#define jpeg_read_scanlines chromium_jpeg_read_scanlines +#define jpeg_finish_decompress chromium_jpeg_finish_decompress +#define jpeg_read_raw_data chromium_jpeg_read_raw_data +#define jpeg_has_multiple_scans chromium_jpeg_has_multiple_scans +#define jpeg_start_output chromium_jpeg_start_output +#define jpeg_finish_output chromium_jpeg_finish_output +#define jpeg_input_complete chromium_jpeg_input_complete +#define jpeg_new_colormap chromium_jpeg_new_colormap +#define jpeg_consume_input chromium_jpeg_consume_input +#define jpeg_calc_output_dimensions chromium_jpeg_calc_output_dimensions +#define jpeg_save_markers chromium_jpeg_save_markers +#define jpeg_set_marker_processor chromium_jpeg_set_marker_processor +#define jpeg_read_coefficients chromium_jpeg_read_coefficients +#define jpeg_write_coefficients chromium_jpeg_write_coefficients +#define jpeg_copy_critical_parameters chromium_jpeg_copy_critical_parameters +#define jpeg_abort_compress chromium_jpeg_abort_compress +#define jpeg_abort_decompress chromium_jpeg_abort_decompress +#define jpeg_abort chromium_jpeg_abort +#define jpeg_destroy chromium_jpeg_destroy +#define jpeg_resync_to_restart chromium_jpeg_resync_to_restart +#define jpeg_get_small chromium_jpeg_get_small +#define jpeg_free_small chromium_jpeg_free_small +#define jpeg_get_large chromium_jpeg_get_large +#define jpeg_free_large chromium_jpeg_free_large +#define jpeg_mem_available chromium_jpeg_mem_available +#define jpeg_open_backing_store chromium_jpeg_open_backing_store +#define jpeg_mem_init chromium_jpeg_mem_init +#define jpeg_mem_term chromium_jpeg_mem_term + +#endif // THIRD_PARTY_LIBJPEG_TURBO_JPEGLIBMANGLER_H_ Index: simd/jcgrass2-64.asm =================================================================== --- simd/jcgrass2-64.asm (revision 829) +++ simd/jcgrass2-64.asm (working copy) @@ -30,7 +30,7 @@ SECTION SEG_CONST alignz 16 - global EXTN(jconst_rgb_gray_convert_sse2) + global EXTN(jconst_rgb_gray_convert_sse2) PRIVATE EXTN(jconst_rgb_gray_convert_sse2): Index: simd/jiss2fst.asm =================================================================== --- simd/jiss2fst.asm (revision 829) +++ simd/jiss2fst.asm (working copy) @@ -59,7 +59,7 @@ %define CONST_SHIFT (16 - PRE_MULTIPLY_SCALE_BITS - CONST_BITS) alignz 16 - global EXTN(jconst_idct_ifast_sse2) + global EXTN(jconst_idct_ifast_sse2) PRIVATE EXTN(jconst_idct_ifast_sse2): @@ -92,7 +92,7 @@ %define WK_NUM 2 align 16 - global EXTN(jsimd_idct_ifast_sse2) + global EXTN(jsimd_idct_ifast_sse2) PRIVATE EXTN(jsimd_idct_ifast_sse2): push ebp Index: simd/jcclrss2-64.asm =================================================================== --- simd/jcclrss2-64.asm (revision 829) +++ simd/jcclrss2-64.asm (working copy) @@ -37,7 +37,7 @@ align 16 - global EXTN(jsimd_rgb_ycc_convert_sse2) + global EXTN(jsimd_rgb_ycc_convert_sse2) PRIVATE EXTN(jsimd_rgb_ycc_convert_sse2): push rbp Index: simd/jiss2red-64.asm =================================================================== --- simd/jiss2red-64.asm (revision 829) +++ simd/jiss2red-64.asm (working copy) @@ -73,7 +73,7 @@ SECTION SEG_CONST alignz 16 - global EXTN(jconst_idct_red_sse2) + global EXTN(jconst_idct_red_sse2) PRIVATE EXTN(jconst_idct_red_sse2): @@ -114,7 +114,7 @@ %define WK_NUM 2 align 16 - global EXTN(jsimd_idct_4x4_sse2) + global EXTN(jsimd_idct_4x4_sse2) PRIVATE EXTN(jsimd_idct_4x4_sse2): push rbp @@ -413,7 +413,7 @@ ; r13 = JDIMENSION output_col align 16 - global EXTN(jsimd_idct_2x2_sse2) + global EXTN(jsimd_idct_2x2_sse2) PRIVATE EXTN(jsimd_idct_2x2_sse2): push rbp Index: simd/ji3dnflt.asm =================================================================== --- simd/ji3dnflt.asm (revision 829) +++ simd/ji3dnflt.asm (working copy) @@ -27,7 +27,7 @@ SECTION SEG_CONST alignz 16 - global EXTN(jconst_idct_float_3dnow) + global EXTN(jconst_idct_float_3dnow) PRIVATE EXTN(jconst_idct_float_3dnow): @@ -63,7 +63,7 @@ ; FAST_FLOAT workspace[DCTSIZE2] align 16 - global EXTN(jsimd_idct_float_3dnow) + global EXTN(jsimd_idct_float_3dnow) PRIVATE EXTN(jsimd_idct_float_3dnow): push ebp Index: simd/jsimdcpu.asm =================================================================== --- simd/jsimdcpu.asm (revision 829) +++ simd/jsimdcpu.asm (working copy) @@ -29,7 +29,7 @@ ; align 16 - global EXTN(jpeg_simd_cpu_support) + global EXTN(jpeg_simd_cpu_support) PRIVATE EXTN(jpeg_simd_cpu_support): push ebx Index: simd/jdmerss2-64.asm =================================================================== --- simd/jdmerss2-64.asm (revision 829) +++ simd/jdmerss2-64.asm (working copy) @@ -35,7 +35,7 @@ SECTION SEG_CONST alignz 16 - global EXTN(jconst_merged_upsample_sse2) + global EXTN(jconst_merged_upsample_sse2) PRIVATE EXTN(jconst_merged_upsample_sse2): Index: simd/jdsammmx.asm =================================================================== --- simd/jdsammmx.asm (revision 829) +++ simd/jdsammmx.asm (working copy) @@ -22,7 +22,7 @@ SECTION SEG_CONST alignz 16 - global EXTN(jconst_fancy_upsample_mmx) + global EXTN(jconst_fancy_upsample_mmx) PRIVATE EXTN(jconst_fancy_upsample_mmx): @@ -58,7 +58,7 @@ %define output_data_ptr(b) (b)+20 ; JSAMPARRAY * output_data_ptr align 16 - global EXTN(jsimd_h2v1_fancy_upsample_mmx) + global EXTN(jsimd_h2v1_fancy_upsample_mmx) PRIVATE EXTN(jsimd_h2v1_fancy_upsample_mmx): push ebp @@ -216,7 +216,7 @@ %define gotptr wk(0)-SIZEOF_POINTER ; void * gotptr align 16 - global EXTN(jsimd_h2v2_fancy_upsample_mmx) + global EXTN(jsimd_h2v2_fancy_upsample_mmx) PRIVATE EXTN(jsimd_h2v2_fancy_upsample_mmx): push ebp @@ -542,7 +542,7 @@ %define output_data_ptr(b) (b)+20 ; JSAMPARRAY * output_data_ptr align 16 - global EXTN(jsimd_h2v1_upsample_mmx) + global EXTN(jsimd_h2v1_upsample_mmx) PRIVATE EXTN(jsimd_h2v1_upsample_mmx): push ebp @@ -643,7 +643,7 @@ %define output_data_ptr(b) (b)+20 ; JSAMPARRAY * output_data_ptr align 16 - global EXTN(jsimd_h2v2_upsample_mmx) + global EXTN(jsimd_h2v2_upsample_mmx) PRIVATE EXTN(jsimd_h2v2_upsample_mmx): push ebp Index: simd/jdmrgmmx.asm =================================================================== --- simd/jdmrgmmx.asm (revision 829) +++ simd/jdmrgmmx.asm (working copy) @@ -40,7 +40,7 @@ %define gotptr wk(0)-SIZEOF_POINTER ; void * gotptr align 16 - global EXTN(jsimd_h2v1_merged_upsample_mmx) + global EXTN(jsimd_h2v1_merged_upsample_mmx) PRIVATE EXTN(jsimd_h2v1_merged_upsample_mmx): push ebp @@ -409,7 +409,7 @@ %define output_buf(b) (b)+20 ; JSAMPARRAY output_buf align 16 - global EXTN(jsimd_h2v2_merged_upsample_mmx) + global EXTN(jsimd_h2v2_merged_upsample_mmx) PRIVATE EXTN(jsimd_h2v2_merged_upsample_mmx): push ebp Index: simd/jdsamss2.asm =================================================================== --- simd/jdsamss2.asm (revision 829) +++ simd/jdsamss2.asm (working copy) @@ -22,7 +22,7 @@ SECTION SEG_CONST alignz 16 - global EXTN(jconst_fancy_upsample_sse2) + global EXTN(jconst_fancy_upsample_sse2) PRIVATE EXTN(jconst_fancy_upsample_sse2): @@ -58,7 +58,7 @@ %define output_data_ptr(b) (b)+20 ; JSAMPARRAY * output_data_ptr align 16 - global EXTN(jsimd_h2v1_fancy_upsample_sse2) + global EXTN(jsimd_h2v1_fancy_upsample_sse2) PRIVATE EXTN(jsimd_h2v1_fancy_upsample_sse2): push ebp @@ -214,7 +214,7 @@ %define gotptr wk(0)-SIZEOF_POINTER ; void * gotptr align 16 - global EXTN(jsimd_h2v2_fancy_upsample_sse2) + global EXTN(jsimd_h2v2_fancy_upsample_sse2) PRIVATE EXTN(jsimd_h2v2_fancy_upsample_sse2): push ebp @@ -538,7 +538,7 @@ %define output_data_ptr(b) (b)+20 ; JSAMPARRAY * output_data_ptr align 16 - global EXTN(jsimd_h2v1_upsample_sse2) + global EXTN(jsimd_h2v1_upsample_sse2) PRIVATE EXTN(jsimd_h2v1_upsample_sse2): push ebp @@ -637,7 +637,7 @@ %define output_data_ptr(b) (b)+20 ; JSAMPARRAY * output_data_ptr align 16 - global EXTN(jsimd_h2v2_upsample_sse2) + global EXTN(jsimd_h2v2_upsample_sse2) PRIVATE EXTN(jsimd_h2v2_upsample_sse2): push ebp Index: simd/jiss2flt-64.asm =================================================================== --- simd/jiss2flt-64.asm (revision 829) +++ simd/jiss2flt-64.asm (working copy) @@ -38,7 +38,7 @@ SECTION SEG_CONST alignz 16 - global EXTN(jconst_idct_float_sse2) + global EXTN(jconst_idct_float_sse2) PRIVATE EXTN(jconst_idct_float_sse2): @@ -74,7 +74,7 @@ ; FAST_FLOAT workspace[DCTSIZE2] align 16 - global EXTN(jsimd_idct_float_sse2) + global EXTN(jsimd_idct_float_sse2) PRIVATE EXTN(jsimd_idct_float_sse2): push rbp Index: simd/jfss2int-64.asm =================================================================== --- simd/jfss2int-64.asm (revision 829) +++ simd/jfss2int-64.asm (working copy) @@ -67,7 +67,7 @@ SECTION SEG_CONST alignz 16 - global EXTN(jconst_fdct_islow_sse2) + global EXTN(jconst_fdct_islow_sse2) PRIVATE EXTN(jconst_fdct_islow_sse2): @@ -101,7 +101,7 @@ %define WK_NUM 6 align 16 - global EXTN(jsimd_fdct_islow_sse2) + global EXTN(jsimd_fdct_islow_sse2) PRIVATE EXTN(jsimd_fdct_islow_sse2): push rbp Index: simd/jcqnts2f.asm =================================================================== --- simd/jcqnts2f.asm (revision 829) +++ simd/jcqnts2f.asm (working copy) @@ -35,7 +35,7 @@ %define workspace ebp+16 ; FAST_FLOAT * workspace align 16 - global EXTN(jsimd_convsamp_float_sse2) + global EXTN(jsimd_convsamp_float_sse2) PRIVATE EXTN(jsimd_convsamp_float_sse2): push ebp @@ -115,7 +115,7 @@ %define workspace ebp+16 ; FAST_FLOAT * workspace align 16 - global EXTN(jsimd_quantize_float_sse2) + global EXTN(jsimd_quantize_float_sse2) PRIVATE EXTN(jsimd_quantize_float_sse2): push ebp Index: simd/jdmrgss2.asm =================================================================== --- simd/jdmrgss2.asm (revision 829) +++ simd/jdmrgss2.asm (working copy) @@ -40,7 +40,7 @@ %define gotptr wk(0)-SIZEOF_POINTER ; void * gotptr align 16 - global EXTN(jsimd_h2v1_merged_upsample_sse2) + global EXTN(jsimd_h2v1_merged_upsample_sse2) PRIVATE EXTN(jsimd_h2v1_merged_upsample_sse2): push ebp @@ -560,7 +560,7 @@ %define output_buf(b) (b)+20 ; JSAMPARRAY output_buf align 16 - global EXTN(jsimd_h2v2_merged_upsample_sse2) + global EXTN(jsimd_h2v2_merged_upsample_sse2) PRIVATE EXTN(jsimd_h2v2_merged_upsample_sse2): push ebp Index: simd/jfmmxint.asm =================================================================== --- simd/jfmmxint.asm (revision 829) +++ simd/jfmmxint.asm (working copy) @@ -66,7 +66,7 @@ SECTION SEG_CONST alignz 16 - global EXTN(jconst_fdct_islow_mmx) + global EXTN(jconst_fdct_islow_mmx) PRIVATE EXTN(jconst_fdct_islow_mmx): @@ -101,7 +101,7 @@ %define WK_NUM 2 align 16 - global EXTN(jsimd_fdct_islow_mmx) + global EXTN(jsimd_fdct_islow_mmx) PRIVATE EXTN(jsimd_fdct_islow_mmx): push ebp Index: simd/jcgryss2-64.asm =================================================================== --- simd/jcgryss2-64.asm (revision 829) +++ simd/jcgryss2-64.asm (working copy) @@ -37,7 +37,7 @@ align 16 - global EXTN(jsimd_rgb_gray_convert_sse2) + global EXTN(jsimd_rgb_gray_convert_sse2) PRIVATE EXTN(jsimd_rgb_gray_convert_sse2): push rbp Index: simd/jcqnts2i.asm =================================================================== --- simd/jcqnts2i.asm (revision 829) +++ simd/jcqnts2i.asm (working copy) @@ -35,7 +35,7 @@ %define workspace ebp+16 ; DCTELEM * workspace align 16 - global EXTN(jsimd_convsamp_sse2) + global EXTN(jsimd_convsamp_sse2) PRIVATE EXTN(jsimd_convsamp_sse2): push ebp @@ -117,7 +117,7 @@ %define workspace ebp+16 ; DCTELEM * workspace align 16 - global EXTN(jsimd_quantize_sse2) + global EXTN(jsimd_quantize_sse2) PRIVATE EXTN(jsimd_quantize_sse2): push ebp Index: simd/jiss2fst-64.asm =================================================================== --- simd/jiss2fst-64.asm (revision 829) +++ simd/jiss2fst-64.asm (working copy) @@ -60,7 +60,7 @@ %define CONST_SHIFT (16 - PRE_MULTIPLY_SCALE_BITS - CONST_BITS) alignz 16 - global EXTN(jconst_idct_ifast_sse2) + global EXTN(jconst_idct_ifast_sse2) PRIVATE EXTN(jconst_idct_ifast_sse2): @@ -93,7 +93,7 @@ %define WK_NUM 2 align 16 - global EXTN(jsimd_idct_ifast_sse2) + global EXTN(jsimd_idct_ifast_sse2) PRIVATE EXTN(jsimd_idct_ifast_sse2): push rbp Index: simd/jiss2flt.asm =================================================================== --- simd/jiss2flt.asm (revision 829) +++ simd/jiss2flt.asm (working copy) @@ -37,7 +37,7 @@ SECTION SEG_CONST alignz 16 - global EXTN(jconst_idct_float_sse2) + global EXTN(jconst_idct_float_sse2) PRIVATE EXTN(jconst_idct_float_sse2): @@ -73,7 +73,7 @@ ; FAST_FLOAT workspace[DCTSIZE2] align 16 - global EXTN(jsimd_idct_float_sse2) + global EXTN(jsimd_idct_float_sse2) PRIVATE EXTN(jsimd_idct_float_sse2): push ebp Index: simd/jiss2int.asm =================================================================== --- simd/jiss2int.asm (revision 829) +++ simd/jiss2int.asm (working copy) @@ -66,7 +66,7 @@ SECTION SEG_CONST alignz 16 - global EXTN(jconst_idct_islow_sse2) + global EXTN(jconst_idct_islow_sse2) PRIVATE EXTN(jconst_idct_islow_sse2): @@ -105,7 +105,7 @@ %define WK_NUM 12 align 16 - global EXTN(jsimd_idct_islow_sse2) + global EXTN(jsimd_idct_islow_sse2) PRIVATE EXTN(jsimd_idct_islow_sse2): push ebp Index: simd/jfsseflt-64.asm =================================================================== --- simd/jfsseflt-64.asm (revision 829) +++ simd/jfsseflt-64.asm (working copy) @@ -38,7 +38,7 @@ SECTION SEG_CONST alignz 16 - global EXTN(jconst_fdct_float_sse) + global EXTN(jconst_fdct_float_sse) PRIVATE EXTN(jconst_fdct_float_sse): @@ -65,7 +65,7 @@ %define WK_NUM 2 align 16 - global EXTN(jsimd_fdct_float_sse) + global EXTN(jsimd_fdct_float_sse) PRIVATE EXTN(jsimd_fdct_float_sse): push rbp Index: simd/jccolss2-64.asm =================================================================== --- simd/jccolss2-64.asm (revision 829) +++ simd/jccolss2-64.asm (working copy) @@ -34,7 +34,7 @@ SECTION SEG_CONST alignz 16 - global EXTN(jconst_rgb_ycc_convert_sse2) + global EXTN(jconst_rgb_ycc_convert_sse2) PRIVATE EXTN(jconst_rgb_ycc_convert_sse2): Index: simd/jcsamss2-64.asm =================================================================== --- simd/jcsamss2-64.asm (revision 829) +++ simd/jcsamss2-64.asm (working copy) @@ -41,7 +41,7 @@ ; r15 = JSAMPARRAY output_data align 16 - global EXTN(jsimd_h2v1_downsample_sse2) + global EXTN(jsimd_h2v1_downsample_sse2) PRIVATE EXTN(jsimd_h2v1_downsample_sse2): push rbp @@ -185,7 +185,7 @@ ; r15 = JSAMPARRAY output_data align 16 - global EXTN(jsimd_h2v2_downsample_sse2) + global EXTN(jsimd_h2v2_downsample_sse2) PRIVATE EXTN(jsimd_h2v2_downsample_sse2): push rbp Index: simd/jdclrss2-64.asm =================================================================== --- simd/jdclrss2-64.asm (revision 829) +++ simd/jdclrss2-64.asm (working copy) @@ -39,7 +39,7 @@ %define WK_NUM 2 align 16 - global EXTN(jsimd_ycc_rgb_convert_sse2) + global EXTN(jsimd_ycc_rgb_convert_sse2) PRIVATE EXTN(jsimd_ycc_rgb_convert_sse2): push rbp Index: simd/jdcolmmx.asm =================================================================== --- simd/jdcolmmx.asm (revision 829) +++ simd/jdcolmmx.asm (working copy) @@ -35,7 +35,7 @@ SECTION SEG_CONST alignz 16 - global EXTN(jconst_ycc_rgb_convert_mmx) + global EXTN(jconst_ycc_rgb_convert_mmx) PRIVATE EXTN(jconst_ycc_rgb_convert_mmx): Index: simd/jcclrmmx.asm =================================================================== --- simd/jcclrmmx.asm (revision 829) +++ simd/jcclrmmx.asm (working copy) @@ -40,7 +40,7 @@ %define gotptr wk(0)-SIZEOF_POINTER ; void * gotptr align 16 - global EXTN(jsimd_rgb_ycc_convert_mmx) + global EXTN(jsimd_rgb_ycc_convert_mmx) PRIVATE EXTN(jsimd_rgb_ycc_convert_mmx): push ebp Index: simd/jfsseflt.asm =================================================================== --- simd/jfsseflt.asm (revision 829) +++ simd/jfsseflt.asm (working copy) @@ -37,7 +37,7 @@ SECTION SEG_CONST alignz 16 - global EXTN(jconst_fdct_float_sse) + global EXTN(jconst_fdct_float_sse) PRIVATE EXTN(jconst_fdct_float_sse): @@ -65,7 +65,7 @@ %define WK_NUM 2 align 16 - global EXTN(jsimd_fdct_float_sse) + global EXTN(jsimd_fdct_float_sse) PRIVATE EXTN(jsimd_fdct_float_sse): push ebp Index: simd/jdmrgss2-64.asm =================================================================== --- simd/jdmrgss2-64.asm (revision 829) +++ simd/jdmrgss2-64.asm (working copy) @@ -39,7 +39,7 @@ %define WK_NUM 3 align 16 - global EXTN(jsimd_h2v1_merged_upsample_sse2) + global EXTN(jsimd_h2v1_merged_upsample_sse2) PRIVATE EXTN(jsimd_h2v1_merged_upsample_sse2): push rbp @@ -543,7 +543,7 @@ ; r13 = JSAMPARRAY output_buf align 16 - global EXTN(jsimd_h2v2_merged_upsample_sse2) + global EXTN(jsimd_h2v2_merged_upsample_sse2) PRIVATE EXTN(jsimd_h2v2_merged_upsample_sse2): push rbp Index: simd/jdcolss2.asm =================================================================== --- simd/jdcolss2.asm (revision 829) +++ simd/jdcolss2.asm (working copy) @@ -35,7 +35,7 @@ SECTION SEG_CONST alignz 16 - global EXTN(jconst_ycc_rgb_convert_sse2) + global EXTN(jconst_ycc_rgb_convert_sse2) PRIVATE EXTN(jconst_ycc_rgb_convert_sse2): Index: simd/jdmermmx.asm =================================================================== --- simd/jdmermmx.asm (revision 829) +++ simd/jdmermmx.asm (working copy) @@ -35,7 +35,7 @@ SECTION SEG_CONST alignz 16 - global EXTN(jconst_merged_upsample_mmx) + global EXTN(jconst_merged_upsample_mmx) PRIVATE EXTN(jconst_merged_upsample_mmx): Index: simd/jcclrss2.asm =================================================================== --- simd/jcclrss2.asm (revision 829) +++ simd/jcclrss2.asm (working copy) @@ -38,7 +38,7 @@ align 16 - global EXTN(jsimd_rgb_ycc_convert_sse2) + global EXTN(jsimd_rgb_ycc_convert_sse2) PRIVATE EXTN(jsimd_rgb_ycc_convert_sse2): push ebp Index: simd/jiss2red.asm =================================================================== --- simd/jiss2red.asm (revision 829) +++ simd/jiss2red.asm (working copy) @@ -72,7 +72,7 @@ SECTION SEG_CONST alignz 16 - global EXTN(jconst_idct_red_sse2) + global EXTN(jconst_idct_red_sse2) PRIVATE EXTN(jconst_idct_red_sse2): @@ -113,7 +113,7 @@ %define WK_NUM 2 align 16 - global EXTN(jsimd_idct_4x4_sse2) + global EXTN(jsimd_idct_4x4_sse2) PRIVATE EXTN(jsimd_idct_4x4_sse2): push ebp @@ -424,7 +424,7 @@ %define output_col(b) (b)+20 ; JDIMENSION output_col align 16 - global EXTN(jsimd_idct_2x2_sse2) + global EXTN(jsimd_idct_2x2_sse2) PRIVATE EXTN(jsimd_idct_2x2_sse2): push ebp Index: simd/jdmerss2.asm =================================================================== --- simd/jdmerss2.asm (revision 829) +++ simd/jdmerss2.asm (working copy) @@ -35,7 +35,7 @@ SECTION SEG_CONST alignz 16 - global EXTN(jconst_merged_upsample_sse2) + global EXTN(jconst_merged_upsample_sse2) PRIVATE EXTN(jconst_merged_upsample_sse2): Index: simd/jfss2fst-64.asm =================================================================== --- simd/jfss2fst-64.asm (revision 829) +++ simd/jfss2fst-64.asm (working copy) @@ -53,7 +53,7 @@ %define CONST_SHIFT (16 - PRE_MULTIPLY_SCALE_BITS - CONST_BITS) alignz 16 - global EXTN(jconst_fdct_ifast_sse2) + global EXTN(jconst_fdct_ifast_sse2) PRIVATE EXTN(jconst_fdct_ifast_sse2): @@ -80,7 +80,7 @@ %define WK_NUM 2 align 16 - global EXTN(jsimd_fdct_ifast_sse2) + global EXTN(jsimd_fdct_ifast_sse2) PRIVATE EXTN(jsimd_fdct_ifast_sse2): push rbp Index: simd/jcqntmmx.asm =================================================================== --- simd/jcqntmmx.asm (revision 829) +++ simd/jcqntmmx.asm (working copy) @@ -35,7 +35,7 @@ %define workspace ebp+16 ; DCTELEM * workspace align 16 - global EXTN(jsimd_convsamp_mmx) + global EXTN(jsimd_convsamp_mmx) PRIVATE EXTN(jsimd_convsamp_mmx): push ebp @@ -140,7 +140,7 @@ %define workspace ebp+16 ; DCTELEM * workspace align 16 - global EXTN(jsimd_quantize_mmx) + global EXTN(jsimd_quantize_mmx) PRIVATE EXTN(jsimd_quantize_mmx): push ebp Index: simd/jimmxfst.asm =================================================================== --- simd/jimmxfst.asm (revision 829) +++ simd/jimmxfst.asm (working copy) @@ -59,7 +59,7 @@ %define CONST_SHIFT (16 - PRE_MULTIPLY_SCALE_BITS - CONST_BITS) alignz 16 - global EXTN(jconst_idct_ifast_mmx) + global EXTN(jconst_idct_ifast_mmx) PRIVATE EXTN(jconst_idct_ifast_mmx): @@ -94,7 +94,7 @@ ; JCOEF workspace[DCTSIZE2] align 16 - global EXTN(jsimd_idct_ifast_mmx) + global EXTN(jsimd_idct_ifast_mmx) PRIVATE EXTN(jsimd_idct_ifast_mmx): push ebp Index: simd/jfss2fst.asm =================================================================== --- simd/jfss2fst.asm (revision 829) +++ simd/jfss2fst.asm (working copy) @@ -52,7 +52,7 @@ %define CONST_SHIFT (16 - PRE_MULTIPLY_SCALE_BITS - CONST_BITS) alignz 16 - global EXTN(jconst_fdct_ifast_sse2) + global EXTN(jconst_fdct_ifast_sse2) PRIVATE EXTN(jconst_fdct_ifast_sse2): @@ -80,7 +80,7 @@ %define WK_NUM 2 align 16 - global EXTN(jsimd_fdct_ifast_sse2) + global EXTN(jsimd_fdct_ifast_sse2) PRIVATE EXTN(jsimd_fdct_ifast_sse2): push ebp Index: simd/jcgrammx.asm =================================================================== --- simd/jcgrammx.asm (revision 829) +++ simd/jcgrammx.asm (working copy) @@ -33,7 +33,7 @@ SECTION SEG_CONST alignz 16 - global EXTN(jconst_rgb_gray_convert_mmx) + global EXTN(jconst_rgb_gray_convert_mmx) PRIVATE EXTN(jconst_rgb_gray_convert_mmx): Index: simd/jdcolss2-64.asm =================================================================== --- simd/jdcolss2-64.asm (revision 829) +++ simd/jdcolss2-64.asm (working copy) @@ -35,7 +35,7 @@ SECTION SEG_CONST alignz 16 - global EXTN(jconst_ycc_rgb_convert_sse2) + global EXTN(jconst_ycc_rgb_convert_sse2) PRIVATE EXTN(jconst_ycc_rgb_convert_sse2): Index: simd/jf3dnflt.asm =================================================================== --- simd/jf3dnflt.asm (revision 829) +++ simd/jf3dnflt.asm (working copy) @@ -27,7 +27,7 @@ SECTION SEG_CONST alignz 16 - global EXTN(jconst_fdct_float_3dnow) + global EXTN(jconst_fdct_float_3dnow) PRIVATE EXTN(jconst_fdct_float_3dnow): @@ -55,7 +55,7 @@ %define WK_NUM 2 align 16 - global EXTN(jsimd_fdct_float_3dnow) + global EXTN(jsimd_fdct_float_3dnow) PRIVATE EXTN(jsimd_fdct_float_3dnow): push ebp Index: simd/jdsamss2-64.asm =================================================================== --- simd/jdsamss2-64.asm (revision 829) +++ simd/jdsamss2-64.asm (working copy) @@ -23,7 +23,7 @@ SECTION SEG_CONST alignz 16 - global EXTN(jconst_fancy_upsample_sse2) + global EXTN(jconst_fancy_upsample_sse2) PRIVATE EXTN(jconst_fancy_upsample_sse2): @@ -59,7 +59,7 @@ ; r13 = JSAMPARRAY * output_data_ptr align 16 - global EXTN(jsimd_h2v1_fancy_upsample_sse2) + global EXTN(jsimd_h2v1_fancy_upsample_sse2) PRIVATE EXTN(jsimd_h2v1_fancy_upsample_sse2): push rbp @@ -201,7 +201,7 @@ %define WK_NUM 4 align 16 - global EXTN(jsimd_h2v2_fancy_upsample_sse2) + global EXTN(jsimd_h2v2_fancy_upsample_sse2) PRIVATE EXTN(jsimd_h2v2_fancy_upsample_sse2): push rbp @@ -498,7 +498,7 @@ ; r13 = JSAMPARRAY * output_data_ptr align 16 - global EXTN(jsimd_h2v1_upsample_sse2) + global EXTN(jsimd_h2v1_upsample_sse2) PRIVATE EXTN(jsimd_h2v1_upsample_sse2): push rbp @@ -587,7 +587,7 @@ ; r13 = JSAMPARRAY * output_data_ptr align 16 - global EXTN(jsimd_h2v2_upsample_sse2) + global EXTN(jsimd_h2v2_upsample_sse2) PRIVATE EXTN(jsimd_h2v2_upsample_sse2): push rbp Index: simd/jcgrass2.asm =================================================================== --- simd/jcgrass2.asm (revision 829) +++ simd/jcgrass2.asm (working copy) @@ -30,7 +30,7 @@ SECTION SEG_CONST alignz 16 - global EXTN(jconst_rgb_gray_convert_sse2) + global EXTN(jconst_rgb_gray_convert_sse2) PRIVATE EXTN(jconst_rgb_gray_convert_sse2): Index: simd/jcsammmx.asm =================================================================== --- simd/jcsammmx.asm (revision 829) +++ simd/jcsammmx.asm (working copy) @@ -40,7 +40,7 @@ %define output_data(b) (b)+28 ; JSAMPARRAY output_data align 16 - global EXTN(jsimd_h2v1_downsample_mmx) + global EXTN(jsimd_h2v1_downsample_mmx) PRIVATE EXTN(jsimd_h2v1_downsample_mmx): push ebp @@ -182,7 +182,7 @@ %define output_data(b) (b)+28 ; JSAMPARRAY output_data align 16 - global EXTN(jsimd_h2v2_downsample_mmx) + global EXTN(jsimd_h2v2_downsample_mmx) PRIVATE EXTN(jsimd_h2v2_downsample_mmx): push ebp Index: simd/jsimd_arm_neon.S =================================================================== --- simd/jsimd_arm_neon.S (revision 272637) +++ simd/jsimd_arm_neon.S (working copy) @@ -41,11 +41,9 @@ /* Supplementary macro for setting function attributes */ .macro asm_function fname #ifdef __APPLE__ - .func _\fname .globl _\fname _\fname: #else - .func \fname .global \fname #ifdef __ELF__ .hidden \fname @@ -670,7 +668,6 @@ .unreq ROW6R .unreq ROW7L .unreq ROW7R -.endfunc /*****************************************************************************/ @@ -895,7 +892,6 @@ .unreq TMP2 .unreq TMP3 .unreq TMP4 -.endfunc /*****************************************************************************/ @@ -1108,7 +1104,6 @@ .unreq TMP2 .unreq TMP3 .unreq TMP4 -.endfunc .purgem idct_helper @@ -1263,7 +1258,6 @@ .unreq OUTPUT_COL .unreq TMP1 .unreq TMP2 -.endfunc .purgem idct_helper @@ -1547,7 +1541,6 @@ .unreq U .unreq V .unreq N -.endfunc .purgem do_yuv_to_rgb .purgem do_yuv_to_rgb_stage1 @@ -1858,7 +1851,6 @@ .unreq U .unreq V .unreq N -.endfunc .purgem do_rgb_to_yuv .purgem do_rgb_to_yuv_stage1 @@ -1940,7 +1932,6 @@ .unreq TMP2 .unreq TMP3 .unreq TMP4 -.endfunc /*****************************************************************************/ @@ -2064,7 +2055,6 @@ .unreq DATA .unreq TMP -.endfunc /*****************************************************************************/ @@ -2166,7 +2156,6 @@ .unreq CORRECTION .unreq SHIFT .unreq LOOP_COUNT -.endfunc /*****************************************************************************/ @@ -2401,7 +2390,6 @@ .unreq WIDTH .unreq TMP -.endfunc .purgem upsample16 .purgem upsample32 Index: simd/jsimd_i386.c =================================================================== --- simd/jsimd_i386.c (revision 829) +++ simd/jsimd_i386.c (working copy) @@ -61,6 +61,7 @@ simd_support &= JSIMD_SSE2; } +#ifndef JPEG_DECODE_ONLY GLOBAL(int) jsimd_can_rgb_ycc (void) { @@ -82,6 +83,7 @@ return 0; } +#endif GLOBAL(int) jsimd_can_rgb_gray (void) @@ -127,6 +129,7 @@ return 0; } +#ifndef JPEG_DECODE_ONLY GLOBAL(void) jsimd_rgb_ycc_convert (j_compress_ptr cinfo, JSAMPARRAY input_buf, JSAMPIMAGE output_buf, @@ -179,6 +182,7 @@ mmxfct(cinfo->image_width, input_buf, output_buf, output_row, num_rows); } +#endif GLOBAL(void) jsimd_rgb_gray_convert (j_compress_ptr cinfo, @@ -286,6 +290,7 @@ input_row, output_buf, num_rows); } +#ifndef JPEG_DECODE_ONLY GLOBAL(int) jsimd_can_h2v2_downsample (void) { @@ -351,6 +356,7 @@ compptr->v_samp_factor, compptr->width_in_blocks, input_data, output_data); } +#endif GLOBAL(int) jsimd_can_h2v2_upsample (void) @@ -636,6 +642,7 @@ in_row_group_ctr, output_buf); } +#ifndef JPEG_DECODE_ONLY GLOBAL(int) jsimd_can_convsamp (void) { @@ -855,6 +862,7 @@ else if (simd_support & JSIMD_3DNOW) jsimd_quantize_float_3dnow(coef_block, divisors, workspace); } +#endif GLOBAL(int) jsimd_can_idct_2x2 (void) @@ -1045,4 +1053,3 @@ jsimd_idct_float_3dnow(compptr->dct_table, coef_block, output_buf, output_col); } - Index: simd/jcqnts2f-64.asm =================================================================== --- simd/jcqnts2f-64.asm (revision 829) +++ simd/jcqnts2f-64.asm (working copy) @@ -36,7 +36,7 @@ ; r12 = FAST_FLOAT * workspace align 16 - global EXTN(jsimd_convsamp_float_sse2) + global EXTN(jsimd_convsamp_float_sse2) PRIVATE EXTN(jsimd_convsamp_float_sse2): push rbp @@ -110,7 +110,7 @@ ; r12 = FAST_FLOAT * workspace align 16 - global EXTN(jsimd_quantize_float_sse2) + global EXTN(jsimd_quantize_float_sse2) PRIVATE EXTN(jsimd_quantize_float_sse2): push rbp Index: simd/jcqnt3dn.asm =================================================================== --- simd/jcqnt3dn.asm (revision 829) +++ simd/jcqnt3dn.asm (working copy) @@ -35,7 +35,7 @@ %define workspace ebp+16 ; FAST_FLOAT * workspace align 16 - global EXTN(jsimd_convsamp_float_3dnow) + global EXTN(jsimd_convsamp_float_3dnow) PRIVATE EXTN(jsimd_convsamp_float_3dnow): push ebp @@ -138,7 +138,7 @@ %define workspace ebp+16 ; FAST_FLOAT * workspace align 16 - global EXTN(jsimd_quantize_float_3dnow) + global EXTN(jsimd_quantize_float_3dnow) PRIVATE EXTN(jsimd_quantize_float_3dnow): push ebp Index: simd/jcsamss2.asm =================================================================== --- simd/jcsamss2.asm (revision 829) +++ simd/jcsamss2.asm (working copy) @@ -40,7 +40,7 @@ %define output_data(b) (b)+28 ; JSAMPARRAY output_data align 16 - global EXTN(jsimd_h2v1_downsample_sse2) + global EXTN(jsimd_h2v1_downsample_sse2) PRIVATE EXTN(jsimd_h2v1_downsample_sse2): push ebp @@ -195,7 +195,7 @@ %define output_data(b) (b)+28 ; JSAMPARRAY output_data align 16 - global EXTN(jsimd_h2v2_downsample_sse2) + global EXTN(jsimd_h2v2_downsample_sse2) PRIVATE EXTN(jsimd_h2v2_downsample_sse2): push ebp Index: simd/jsimd_x86_64.c =================================================================== --- simd/jsimd_x86_64.c (revision 829) +++ simd/jsimd_x86_64.c (working copy) @@ -29,6 +29,7 @@ #define IS_ALIGNED_SSE(ptr) (IS_ALIGNED(ptr, 4)) /* 16 byte alignment */ +#ifndef JPEG_DECODE_ONLY GLOBAL(int) jsimd_can_rgb_ycc (void) { @@ -45,6 +46,7 @@ return 1; } +#endif GLOBAL(int) jsimd_can_rgb_gray (void) @@ -80,6 +82,7 @@ return 1; } +#ifndef JPEG_DECODE_ONLY GLOBAL(void) jsimd_rgb_ycc_convert (j_compress_ptr cinfo, JSAMPARRAY input_buf, JSAMPIMAGE output_buf, @@ -118,6 +121,7 @@ sse2fct(cinfo->image_width, input_buf, output_buf, output_row, num_rows); } +#endif GLOBAL(void) jsimd_rgb_gray_convert (j_compress_ptr cinfo, @@ -197,6 +201,7 @@ sse2fct(cinfo->output_width, input_buf, input_row, output_buf, num_rows); } +#ifndef JPEG_DECODE_ONLY GLOBAL(int) jsimd_can_h2v2_downsample (void) { @@ -242,6 +247,7 @@ compptr->width_in_blocks, input_data, output_data); } +#endif GLOBAL(int) jsimd_can_h2v2_upsample (void) @@ -451,6 +457,7 @@ sse2fct(cinfo->output_width, input_buf, in_row_group_ctr, output_buf); } +#ifndef JPEG_DECODE_ONLY GLOBAL(int) jsimd_can_convsamp (void) { @@ -601,6 +608,7 @@ { jsimd_quantize_float_sse2(coef_block, divisors, workspace); } +#endif GLOBAL(int) jsimd_can_idct_2x2 (void) @@ -750,4 +758,3 @@ jsimd_idct_float_sse2(compptr->dct_table, coef_block, output_buf, output_col); } - Index: simd/jimmxint.asm =================================================================== --- simd/jimmxint.asm (revision 829) +++ simd/jimmxint.asm (working copy) @@ -66,7 +66,7 @@ SECTION SEG_CONST alignz 16 - global EXTN(jconst_idct_islow_mmx) + global EXTN(jconst_idct_islow_mmx) PRIVATE EXTN(jconst_idct_islow_mmx): @@ -107,7 +107,7 @@ ; JCOEF workspace[DCTSIZE2] align 16 - global EXTN(jsimd_idct_islow_mmx) + global EXTN(jsimd_idct_islow_mmx) PRIVATE EXTN(jsimd_idct_islow_mmx): push ebp Index: simd/jcgrymmx.asm =================================================================== --- simd/jcgrymmx.asm (revision 829) +++ simd/jcgrymmx.asm (working copy) @@ -41,7 +41,7 @@ %define gotptr wk(0)-SIZEOF_POINTER ; void * gotptr align 16 - global EXTN(jsimd_rgb_gray_convert_mmx) + global EXTN(jsimd_rgb_gray_convert_mmx) PRIVATE EXTN(jsimd_rgb_gray_convert_mmx): push ebp Index: simd/jfss2int.asm =================================================================== --- simd/jfss2int.asm (revision 829) +++ simd/jfss2int.asm (working copy) @@ -66,7 +66,7 @@ SECTION SEG_CONST alignz 16 - global EXTN(jconst_fdct_islow_sse2) + global EXTN(jconst_fdct_islow_sse2) PRIVATE EXTN(jconst_fdct_islow_sse2): @@ -101,7 +101,7 @@ %define WK_NUM 6 align 16 - global EXTN(jsimd_fdct_islow_sse2) + global EXTN(jsimd_fdct_islow_sse2) PRIVATE EXTN(jsimd_fdct_islow_sse2): push ebp Index: simd/jcgryss2.asm =================================================================== --- simd/jcgryss2.asm (revision 829) +++ simd/jcgryss2.asm (working copy) @@ -39,7 +39,7 @@ align 16 - global EXTN(jsimd_rgb_gray_convert_sse2) + global EXTN(jsimd_rgb_gray_convert_sse2) PRIVATE EXTN(jsimd_rgb_gray_convert_sse2): push ebp Index: simd/jccolmmx.asm =================================================================== --- simd/jccolmmx.asm (revision 829) +++ simd/jccolmmx.asm (working copy) @@ -37,7 +37,7 @@ SECTION SEG_CONST alignz 16 - global EXTN(jconst_rgb_ycc_convert_mmx) + global EXTN(jconst_rgb_ycc_convert_mmx) PRIVATE EXTN(jconst_rgb_ycc_convert_mmx): Index: simd/jimmxred.asm =================================================================== --- simd/jimmxred.asm (revision 829) +++ simd/jimmxred.asm (working copy) @@ -72,7 +72,7 @@ SECTION SEG_CONST alignz 16 - global EXTN(jconst_idct_red_mmx) + global EXTN(jconst_idct_red_mmx) PRIVATE EXTN(jconst_idct_red_mmx): @@ -115,7 +115,7 @@ ; JCOEF workspace[DCTSIZE2] align 16 - global EXTN(jsimd_idct_4x4_mmx) + global EXTN(jsimd_idct_4x4_mmx) PRIVATE EXTN(jsimd_idct_4x4_mmx): push ebp @@ -503,7 +503,7 @@ %define output_col(b) (b)+20 ; JDIMENSION output_col align 16 - global EXTN(jsimd_idct_2x2_mmx) + global EXTN(jsimd_idct_2x2_mmx) PRIVATE EXTN(jsimd_idct_2x2_mmx): push ebp Index: simd/jsimdext.inc =================================================================== --- simd/jsimdext.inc (revision 829) +++ simd/jsimdext.inc (working copy) @@ -73,6 +73,9 @@ ; * *BSD family Unix using elf format ; * Unix System V, including Solaris x86, UnixWare and SCO Unix +; PIC is the default on Linux +%define PIC + ; mark stack as non-executable section .note.GNU-stack noalloc noexec nowrite progbits @@ -375,4 +378,14 @@ ; %include "jsimdcfg.inc" +; Begin chromium edits +%ifdef MACHO ; ----(nasm -fmacho -DMACHO ...)-------- +%define PRIVATE :private_extern +%elifdef ELF ; ----(nasm -felf[64] -DELF ...)------------ +%define PRIVATE :hidden +%else +%define PRIVATE +%endif +; End chromium edits + ; -------------------------------------------------------------------------- Index: simd/jdclrmmx.asm =================================================================== --- simd/jdclrmmx.asm (revision 829) +++ simd/jdclrmmx.asm (working copy) @@ -40,7 +40,7 @@ %define gotptr wk(0)-SIZEOF_POINTER ; void * gotptr align 16 - global EXTN(jsimd_ycc_rgb_convert_mmx) + global EXTN(jsimd_ycc_rgb_convert_mmx) PRIVATE EXTN(jsimd_ycc_rgb_convert_mmx): push ebp Index: simd/jccolss2.asm =================================================================== --- simd/jccolss2.asm (revision 829) +++ simd/jccolss2.asm (working copy) @@ -34,7 +34,7 @@ SECTION SEG_CONST alignz 16 - global EXTN(jconst_rgb_ycc_convert_sse2) + global EXTN(jconst_rgb_ycc_convert_sse2) PRIVATE EXTN(jconst_rgb_ycc_convert_sse2): Index: simd/jisseflt.asm =================================================================== --- simd/jisseflt.asm (revision 829) +++ simd/jisseflt.asm (working copy) @@ -37,7 +37,7 @@ SECTION SEG_CONST alignz 16 - global EXTN(jconst_idct_float_sse) + global EXTN(jconst_idct_float_sse) PRIVATE EXTN(jconst_idct_float_sse): @@ -73,7 +73,7 @@ ; FAST_FLOAT workspace[DCTSIZE2] align 16 - global EXTN(jsimd_idct_float_sse) + global EXTN(jsimd_idct_float_sse) PRIVATE EXTN(jsimd_idct_float_sse): push ebp Index: simd/jcqnts2i-64.asm =================================================================== --- simd/jcqnts2i-64.asm (revision 829) +++ simd/jcqnts2i-64.asm (working copy) @@ -36,7 +36,7 @@ ; r12 = DCTELEM * workspace align 16 - global EXTN(jsimd_convsamp_sse2) + global EXTN(jsimd_convsamp_sse2) PRIVATE EXTN(jsimd_convsamp_sse2): push rbp @@ -112,7 +112,7 @@ ; r12 = DCTELEM * workspace align 16 - global EXTN(jsimd_quantize_sse2) + global EXTN(jsimd_quantize_sse2) PRIVATE EXTN(jsimd_quantize_sse2): push rbp Index: simd/jdclrss2.asm =================================================================== --- simd/jdclrss2.asm (revision 829) +++ simd/jdclrss2.asm (working copy) @@ -40,7 +40,7 @@ %define gotptr wk(0)-SIZEOF_POINTER ; void * gotptr align 16 - global EXTN(jsimd_ycc_rgb_convert_sse2) + global EXTN(jsimd_ycc_rgb_convert_sse2) PRIVATE EXTN(jsimd_ycc_rgb_convert_sse2): push ebp Index: simd/jcqntsse.asm =================================================================== --- simd/jcqntsse.asm (revision 829) +++ simd/jcqntsse.asm (working copy) @@ -35,7 +35,7 @@ %define workspace ebp+16 ; FAST_FLOAT * workspace align 16 - global EXTN(jsimd_convsamp_float_sse) + global EXTN(jsimd_convsamp_float_sse) PRIVATE EXTN(jsimd_convsamp_float_sse): push ebp @@ -138,7 +138,7 @@ %define workspace ebp+16 ; FAST_FLOAT * workspace align 16 - global EXTN(jsimd_quantize_float_sse) + global EXTN(jsimd_quantize_float_sse) PRIVATE EXTN(jsimd_quantize_float_sse): push ebp Index: simd/jiss2int-64.asm =================================================================== --- simd/jiss2int-64.asm (revision 829) +++ simd/jiss2int-64.asm (working copy) @@ -67,7 +67,7 @@ SECTION SEG_CONST alignz 16 - global EXTN(jconst_idct_islow_sse2) + global EXTN(jconst_idct_islow_sse2) PRIVATE EXTN(jconst_idct_islow_sse2): @@ -106,7 +106,7 @@ %define WK_NUM 12 align 16 - global EXTN(jsimd_idct_islow_sse2) + global EXTN(jsimd_idct_islow_sse2) PRIVATE EXTN(jsimd_idct_islow_sse2): push rbp Index: simd/jfmmxfst.asm =================================================================== --- simd/jfmmxfst.asm (revision 829) +++ simd/jfmmxfst.asm (working copy) @@ -52,7 +52,7 @@ %define CONST_SHIFT (16 - PRE_MULTIPLY_SCALE_BITS - CONST_BITS) alignz 16 - global EXTN(jconst_fdct_ifast_mmx) + global EXTN(jconst_fdct_ifast_mmx) PRIVATE EXTN(jconst_fdct_ifast_mmx): @@ -80,7 +80,7 @@ %define WK_NUM 2 align 16 - global EXTN(jsimd_fdct_ifast_mmx) + global EXTN(jsimd_fdct_ifast_mmx) PRIVATE EXTN(jsimd_fdct_ifast_mmx): push ebp Index: jdarith.c =================================================================== --- jdarith.c (revision 829) +++ jdarith.c (working copy) @@ -150,8 +150,8 @@ */ sv = *st; qe = jpeg_aritab[sv & 0x7F]; /* => Qe_Value */ - nl = qe & 0xFF; qe >>= 8; /* Next_Index_LPS + Switch_MPS */ - nm = qe & 0xFF; qe >>= 8; /* Next_Index_MPS */ + nl = (unsigned char) qe & 0xFF; qe >>= 8; /* Next_Index_LPS + Switch_MPS */ + nm = (unsigned char) qe & 0xFF; qe >>= 8; /* Next_Index_MPS */ /* Decode & estimation procedures per sections D.2.4 & D.2.5 */ temp = e->a - qe; Index: jdhuff.c =================================================================== --- jdhuff.c (revision 829) +++ jdhuff.c (working copy) @@ -742,7 +742,7 @@ * this module, since we'll just re-assign them on the next call.) */ -#define BUFSIZE (DCTSIZE2 * 2) +#define BUFSIZE (DCTSIZE2 * 2u) METHODDEF(boolean) decode_mcu (j_decompress_ptr cinfo, JBLOCKROW *MCU_data) Index: jchuff.c =================================================================== --- jchuff.c (revision 1219) +++ jchuff.c (revision 1220) @@ -22,8 +22,36 @@ #include "jchuff.h" /* Declarations shared with jcphuff.c */ #include <limits.h> +/* + * NOTE: If USE_CLZ_INTRINSIC is defined, then clz/bsr instructions will be + * used for bit counting rather than the lookup table. This will reduce the + * memory footprint by 64k, which is important for some mobile applications + * that create many isolated instances of libjpeg-turbo (web browsers, for + * instance.) This may improve performance on some mobile platforms as well. + * This feature is enabled by default only on ARM processors, because some x86 + * chips have a slow implementation of bsr, and the use of clz/bsr cannot be + * shown to have a significant performance impact even on the x86 chips that + * have a fast implementation of it. When building for ARMv6, you can + * explicitly disable the use of clz/bsr by adding -mthumb to the compiler + * flags (this defines __thumb__). + */ + +/* NOTE: Both GCC and Clang define __GNUC__ */ +#if defined __GNUC__ && defined __arm__ +#if !defined __thumb__ || defined __thumb2__ +#define USE_CLZ_INTRINSIC +#endif +#endif + +#ifdef USE_CLZ_INTRINSIC +#define JPEG_NBITS_NONZERO(x) (32 - __builtin_clz(x)) +#define JPEG_NBITS(x) (x ? JPEG_NBITS_NONZERO(x) : 0) +#else static unsigned char jpeg_nbits_table[65536]; static int jpeg_nbits_table_init = 0; +#define JPEG_NBITS(x) (jpeg_nbits_table[x]) +#define JPEG_NBITS_NONZERO(x) JPEG_NBITS(x) +#endif #ifndef min #define min(a,b) ((a)<(b)?(a):(b)) @@ -272,6 +300,7 @@ dtbl->ehufsi[i] = huffsize[p]; } +#ifndef USE_CLZ_INTRINSIC if(!jpeg_nbits_table_init) { for(i = 0; i < 65536; i++) { int nbits = 0, temp = i; @@ -280,6 +309,7 @@ } jpeg_nbits_table_init = 1; } +#endif } @@ -482,7 +512,7 @@ temp2 += temp3; /* Find the number of bits needed for the magnitude of the coefficient */ - nbits = jpeg_nbits_table[temp]; + nbits = JPEG_NBITS(temp); /* Emit the Huffman-coded symbol for the number of bits */ code = dctbl->ehufco[nbits]; @@ -516,7 +546,7 @@ temp ^= temp3; \ temp -= temp3; \ temp2 += temp3; \ - nbits = jpeg_nbits_table[temp]; \ + nbits = JPEG_NBITS_NONZERO(temp); \ /* if run length > 15, must emit special run-length-16 codes (0xF0) */ \ while (r > 15) { \ EMIT_BITS(code_0xf0, size_0xf0) \ Index: simd/jsimd_arm64.c =================================================================== --- /dev/null +++ simd/jsimd_arm64.c @@ -0,0 +1,544 @@ +/* + * jsimd_arm64.c + * + * Copyright 2009 Pierre Ossman <ossman@cendio.se> for Cendio AB + * Copyright 2009-2011, 2013-2014 D. R. Commander + * + * Based on the x86 SIMD extension for IJG JPEG library, + * Copyright (C) 1999-2006, MIYASAKA Masaru. + * For conditions of distribution and use, see copyright notice in jsimdext.inc + * + * This file contains the interface between the "normal" portions + * of the library and the SIMD implementations when running on a + * 64-bit ARM architecture. + */ + +#define JPEG_INTERNALS +#include "../jinclude.h" +#include "../jpeglib.h" +#include "../jsimd.h" +#include "../jdct.h" +#include "../jsimddct.h" +#include "jsimd.h" + +#include <stdio.h> +#include <string.h> +#include <ctype.h> + +static unsigned int simd_support = ~0; + +/* + * Check what SIMD accelerations are supported. + * + * FIXME: This code is racy under a multi-threaded environment. + */ + +/* + * ARMv8 architectures support NEON extensions by default. + * It is no longer optional as it was with ARMv7. + */ + + +LOCAL(void) +init_simd (void) +{ + char *env = NULL; + + if (simd_support != ~0U) + return; + + simd_support = 0; + + simd_support |= JSIMD_ARM_NEON; + + /* Force different settings through environment variables */ + env = getenv("JSIMD_FORCENEON"); + if ((env != NULL) && (strcmp(env, "1") == 0)) + simd_support &= JSIMD_ARM_NEON; + env = getenv("JSIMD_FORCENONE"); + if ((env != NULL) && (strcmp(env, "1") == 0)) + simd_support = 0; +} + +GLOBAL(int) +jsimd_can_rgb_ycc (void) +{ + init_simd(); + + return 0; +} + +GLOBAL(int) +jsimd_can_rgb_gray (void) +{ + init_simd(); + + return 0; +} + +GLOBAL(int) +jsimd_can_ycc_rgb (void) +{ + init_simd(); + + /* The code is optimised for these values only */ + if (BITS_IN_JSAMPLE != 8) + return 0; + if (sizeof(JDIMENSION) != 4) + return 0; + if ((RGB_PIXELSIZE != 3) && (RGB_PIXELSIZE != 4)) + return 0; + + if (simd_support & JSIMD_ARM_NEON) + return 1; + + return 0; +} + +GLOBAL(int) +jsimd_can_ycc_rgb565 (void) +{ + init_simd(); + + /* The code is optimised for these values only */ + if (BITS_IN_JSAMPLE != 8) + return 0; + if (sizeof(JDIMENSION) != 4) + return 0; + + if (simd_support & JSIMD_ARM_NEON) + return 1; + + return 0; +} + +GLOBAL(void) +jsimd_rgb_ycc_convert (j_compress_ptr cinfo, + JSAMPARRAY input_buf, JSAMPIMAGE output_buf, + JDIMENSION output_row, int num_rows) +{ +} + +GLOBAL(void) +jsimd_rgb_gray_convert (j_compress_ptr cinfo, + JSAMPARRAY input_buf, JSAMPIMAGE output_buf, + JDIMENSION output_row, int num_rows) +{ +} + +GLOBAL(void) +jsimd_ycc_rgb_convert (j_decompress_ptr cinfo, + JSAMPIMAGE input_buf, JDIMENSION input_row, + JSAMPARRAY output_buf, int num_rows) +{ + void (*neonfct)(JDIMENSION, JSAMPIMAGE, JDIMENSION, JSAMPARRAY, int); + + switch(cinfo->out_color_space) { + case JCS_EXT_RGB: + neonfct=jsimd_ycc_extrgb_convert_neon; + break; + case JCS_EXT_RGBX: + case JCS_EXT_RGBA: + neonfct=jsimd_ycc_extrgbx_convert_neon; + break; + case JCS_EXT_BGR: + neonfct=jsimd_ycc_extbgr_convert_neon; + break; + case JCS_EXT_BGRX: + case JCS_EXT_BGRA: + neonfct=jsimd_ycc_extbgrx_convert_neon; + break; + case JCS_EXT_XBGR: + case JCS_EXT_ABGR: + neonfct=jsimd_ycc_extxbgr_convert_neon; + break; + case JCS_EXT_XRGB: + case JCS_EXT_ARGB: + neonfct=jsimd_ycc_extxrgb_convert_neon; + break; + default: + neonfct=jsimd_ycc_extrgb_convert_neon; + break; + } + + if (simd_support & JSIMD_ARM_NEON) + neonfct(cinfo->output_width, input_buf, input_row, output_buf, num_rows); +} + +GLOBAL(void) +jsimd_ycc_rgb565_convert (j_decompress_ptr cinfo, + JSAMPIMAGE input_buf, JDIMENSION input_row, + JSAMPARRAY output_buf, int num_rows) +{ + if (simd_support & JSIMD_ARM_NEON) + jsimd_ycc_rgb565_convert_neon(cinfo->output_width, input_buf, input_row, + output_buf, num_rows); +} + +GLOBAL(int) +jsimd_can_h2v2_downsample (void) +{ + init_simd(); + + return 0; +} + +GLOBAL(int) +jsimd_can_h2v1_downsample (void) +{ + init_simd(); + + return 0; +} + +GLOBAL(void) +jsimd_h2v2_downsample (j_compress_ptr cinfo, jpeg_component_info * compptr, + JSAMPARRAY input_data, JSAMPARRAY output_data) +{ +} + +GLOBAL(void) +jsimd_h2v1_downsample (j_compress_ptr cinfo, jpeg_component_info * compptr, + JSAMPARRAY input_data, JSAMPARRAY output_data) +{ +} + +GLOBAL(int) +jsimd_can_h2v2_upsample (void) +{ + init_simd(); + + return 0; +} + +GLOBAL(int) +jsimd_can_h2v1_upsample (void) +{ + init_simd(); + + return 0; +} + +GLOBAL(void) +jsimd_h2v2_upsample (j_decompress_ptr cinfo, + jpeg_component_info * compptr, + JSAMPARRAY input_data, + JSAMPARRAY * output_data_ptr) +{ +} + +GLOBAL(void) +jsimd_h2v1_upsample (j_decompress_ptr cinfo, + jpeg_component_info * compptr, + JSAMPARRAY input_data, + JSAMPARRAY * output_data_ptr) +{ +} + +GLOBAL(int) +jsimd_can_h2v2_fancy_upsample (void) +{ + init_simd(); + + return 0; +} + +GLOBAL(int) +jsimd_can_h2v1_fancy_upsample (void) +{ + init_simd(); + + return 0; +} + +GLOBAL(void) +jsimd_h2v2_fancy_upsample (j_decompress_ptr cinfo, + jpeg_component_info * compptr, + JSAMPARRAY input_data, + JSAMPARRAY * output_data_ptr) +{ +} + +GLOBAL(void) +jsimd_h2v1_fancy_upsample (j_decompress_ptr cinfo, + jpeg_component_info * compptr, + JSAMPARRAY input_data, + JSAMPARRAY * output_data_ptr) +{ +} + +GLOBAL(int) +jsimd_can_h2v2_merged_upsample (void) +{ + init_simd(); + + return 0; +} + +GLOBAL(int) +jsimd_can_h2v1_merged_upsample (void) +{ + init_simd(); + + return 0; +} + +GLOBAL(void) +jsimd_h2v2_merged_upsample (j_decompress_ptr cinfo, + JSAMPIMAGE input_buf, + JDIMENSION in_row_group_ctr, + JSAMPARRAY output_buf) +{ +} + +GLOBAL(void) +jsimd_h2v1_merged_upsample (j_decompress_ptr cinfo, + JSAMPIMAGE input_buf, + JDIMENSION in_row_group_ctr, + JSAMPARRAY output_buf) +{ +} + +GLOBAL(int) +jsimd_can_convsamp (void) +{ + init_simd(); + + return 0; +} + +GLOBAL(int) +jsimd_can_convsamp_float (void) +{ + init_simd(); + + return 0; +} + +GLOBAL(void) +jsimd_convsamp (JSAMPARRAY sample_data, JDIMENSION start_col, + DCTELEM * workspace) +{ +} + +GLOBAL(void) +jsimd_convsamp_float (JSAMPARRAY sample_data, JDIMENSION start_col, + FAST_FLOAT * workspace) +{ +} + +GLOBAL(int) +jsimd_can_fdct_islow (void) +{ + init_simd(); + + return 0; +} + +GLOBAL(int) +jsimd_can_fdct_ifast (void) +{ + init_simd(); + + return 0; +} + +GLOBAL(int) +jsimd_can_fdct_float (void) +{ + init_simd(); + + return 0; +} + +GLOBAL(void) +jsimd_fdct_islow (DCTELEM * data) +{ +} + +GLOBAL(void) +jsimd_fdct_ifast (DCTELEM * data) +{ +} + +GLOBAL(void) +jsimd_fdct_float (FAST_FLOAT * data) +{ +} + +GLOBAL(int) +jsimd_can_quantize (void) +{ + init_simd(); + + return 0; +} + +GLOBAL(int) +jsimd_can_quantize_float (void) +{ + init_simd(); + + return 0; +} + +GLOBAL(void) +jsimd_quantize (JCOEFPTR coef_block, DCTELEM * divisors, + DCTELEM * workspace) +{ +} + +GLOBAL(void) +jsimd_quantize_float (JCOEFPTR coef_block, FAST_FLOAT * divisors, + FAST_FLOAT * workspace) +{ +} + +GLOBAL(int) +jsimd_can_idct_2x2 (void) +{ + init_simd(); + + /* The code is optimised for these values only */ + if (DCTSIZE != 8) + return 0; + if (sizeof(JCOEF) != 2) + return 0; + if (BITS_IN_JSAMPLE != 8) + return 0; + if (sizeof(JDIMENSION) != 4) + return 0; + if (sizeof(ISLOW_MULT_TYPE) != 2) + return 0; + + if (simd_support & JSIMD_ARM_NEON) + return 1; + + return 0; +} + +GLOBAL(int) +jsimd_can_idct_4x4 (void) +{ + init_simd(); + + /* The code is optimised for these values only */ + if (DCTSIZE != 8) + return 0; + if (sizeof(JCOEF) != 2) + return 0; + if (BITS_IN_JSAMPLE != 8) + return 0; + if (sizeof(JDIMENSION) != 4) + return 0; + if (sizeof(ISLOW_MULT_TYPE) != 2) + return 0; + + if (simd_support & JSIMD_ARM_NEON) + return 1; + + return 0; +} + +GLOBAL(void) +jsimd_idct_2x2 (j_decompress_ptr cinfo, jpeg_component_info * compptr, + JCOEFPTR coef_block, JSAMPARRAY output_buf, + JDIMENSION output_col) +{ + if (simd_support & JSIMD_ARM_NEON) + jsimd_idct_2x2_neon(compptr->dct_table, coef_block, output_buf, + output_col); +} + +GLOBAL(void) +jsimd_idct_4x4 (j_decompress_ptr cinfo, jpeg_component_info * compptr, + JCOEFPTR coef_block, JSAMPARRAY output_buf, + JDIMENSION output_col) +{ + if (simd_support & JSIMD_ARM_NEON) + jsimd_idct_4x4_neon(compptr->dct_table, coef_block, output_buf, + output_col); +} + +GLOBAL(int) +jsimd_can_idct_islow (void) +{ + init_simd(); + + /* The code is optimised for these values only */ + if (DCTSIZE != 8) + return 0; + if (sizeof(JCOEF) != 2) + return 0; + if (BITS_IN_JSAMPLE != 8) + return 0; + if (sizeof(JDIMENSION) != 4) + return 0; + if (sizeof(ISLOW_MULT_TYPE) != 2) + return 0; + + if (simd_support & JSIMD_ARM_NEON) + return 1; + + return 0; +} + +GLOBAL(int) +jsimd_can_idct_ifast (void) +{ + init_simd(); + + /* The code is optimised for these values only */ + if (DCTSIZE != 8) + return 0; + if (sizeof(JCOEF) != 2) + return 0; + if (BITS_IN_JSAMPLE != 8) + return 0; + if (sizeof(JDIMENSION) != 4) + return 0; + if (sizeof(IFAST_MULT_TYPE) != 2) + return 0; + if (IFAST_SCALE_BITS != 2) + return 0; + + if (simd_support & JSIMD_ARM_NEON) + return 1; + + return 0; +} + +GLOBAL(int) +jsimd_can_idct_float (void) +{ + init_simd(); + + return 0; +} + +GLOBAL(void) +jsimd_idct_islow (j_decompress_ptr cinfo, jpeg_component_info * compptr, + JCOEFPTR coef_block, JSAMPARRAY output_buf, + JDIMENSION output_col) +{ + if (simd_support & JSIMD_ARM_NEON) + jsimd_idct_islow_neon(compptr->dct_table, coef_block, output_buf, + output_col); +} + +GLOBAL(void) +jsimd_idct_ifast (j_decompress_ptr cinfo, jpeg_component_info * compptr, + JCOEFPTR coef_block, JSAMPARRAY output_buf, + JDIMENSION output_col) +{ + if (simd_support & JSIMD_ARM_NEON) + jsimd_idct_ifast_neon(compptr->dct_table, coef_block, output_buf, + output_col); +} + +GLOBAL(void) +jsimd_idct_float (j_decompress_ptr cinfo, jpeg_component_info * compptr, + JCOEFPTR coef_block, JSAMPARRAY output_buf, + JDIMENSION output_col) +{ +} Index: simd/jsimd_arm64_neon.S new file mode 100644 =================================================================== --- /dev/null +++ simd/jsimd_arm64_neon.S @@ -0,0 +1,1861 @@ +/* + * ARMv8 NEON optimizations for libjpeg-turbo + * + * Copyright (C) 2009-2011 Nokia Corporation and/or its subsidiary(-ies). + * All rights reserved. + * Author: Siarhei Siamashka <siarhei.siamashka@nokia.com> + * Copyright (C) 2013-2014, Linaro Limited + * Author: Ragesh Radhakrishnan <ragesh.r@linaro.org> + * + * This software is provided 'as-is', without any express or implied + * warranty. In no event will the authors be held liable for any damages + * arising from the use of this software. + * + * Permission is granted to anyone to use this software for any purpose, + * including commercial applications, and to alter it and redistribute it + * freely, subject to the following restrictions: + * + * 1. The origin of this software must not be misrepresented; you must not + * claim that you wrote the original software. If you use this software + * in a product, an acknowledgment in the product documentation would be + * appreciated but is not required. + * 2. Altered source versions must be plainly marked as such, and must not be + * misrepresented as being the original software. + * 3. This notice may not be removed or altered from any source distribution. + */ + +#if defined(__linux__) && defined(__ELF__) +.section .note.GNU-stack,"",%progbits /* mark stack as non-executable */ +#endif + +.text +.arch armv8-a+fp+simd + + +#define RESPECT_STRICT_ALIGNMENT 1 + + +/*****************************************************************************/ + +/* Supplementary macro for setting function attributes */ +.macro asm_function fname +#ifdef __APPLE__ + .globl _\fname +_\fname: +#else + .global \fname +#ifdef __ELF__ + .hidden \fname + .type \fname, %function +#endif +\fname: +#endif +.endm + +/* Transpose elements of single 128 bit registers */ +.macro transpose_single x0,x1,xi,xilen,literal + ins \xi\xilen[0], \x0\xilen[0] + ins \x1\xilen[0], \x0\xilen[1] + trn1 \x0\literal, \x0\literal, \x1\literal + trn2 \x1\literal, \xi\literal, \x1\literal +.endm + +/* Transpose elements of 2 differnet registers */ +.macro transpose x0,x1,xi,xilen,literal + mov \xi\xilen, \x0\xilen + trn1 \x0\literal, \x0\literal, \x1\literal + trn2 \x1\literal, \xi\literal, \x1\literal +.endm + +/* Transpose a block of 4x4 coefficients in four 64-bit registers */ +.macro transpose_4x4_32 x0,x0len x1,x1len x2,x2len x3,x3len,xi,xilen + mov \xi\xilen, \x0\xilen + trn1 \x0\x0len, \x0\x0len, \x2\x2len + trn2 \x2\x2len, \xi\x0len, \x2\x2len + mov \xi\xilen, \x1\xilen + trn1 \x1\x1len, \x1\x1len, \x3\x3len + trn2 \x3\x3len, \xi\x1len, \x3\x3len +.endm + +.macro transpose_4x4_16 x0,x0len x1,x1len, x2,x2len, x3,x3len,xi,xilen + mov \xi\xilen, \x0\xilen + trn1 \x0\x0len, \x0\x0len, \x1\x1len + trn2 \x1\x2len, \xi\x0len, \x1\x2len + mov \xi\xilen, \x2\xilen + trn1 \x2\x2len, \x2\x2len, \x3\x3len + trn2 \x3\x2len, \xi\x1len, \x3\x3len +.endm + +.macro transpose_4x4 x0, x1, x2, x3,x5 + transpose_4x4_16 \x0,.4h, \x1,.4h, \x2,.4h,\x3,.4h,\x5,.16b + transpose_4x4_32 \x0,.2s, \x1,.2s, \x2,.2s,\x3,.2s,\x5,.16b +.endm + + +#define CENTERJSAMPLE 128 + +/*****************************************************************************/ + +/* + * Perform dequantization and inverse DCT on one block of coefficients. + * + * GLOBAL(void) + * jsimd_idct_islow_neon (void * dct_table, JCOEFPTR coef_block, + * JSAMPARRAY output_buf, JDIMENSION output_col) + */ + +#define FIX_0_298631336 (2446) +#define FIX_0_390180644 (3196) +#define FIX_0_541196100 (4433) +#define FIX_0_765366865 (6270) +#define FIX_0_899976223 (7373) +#define FIX_1_175875602 (9633) +#define FIX_1_501321110 (12299) +#define FIX_1_847759065 (15137) +#define FIX_1_961570560 (16069) +#define FIX_2_053119869 (16819) +#define FIX_2_562915447 (20995) +#define FIX_3_072711026 (25172) + +#define FIX_1_175875602_MINUS_1_961570560 (FIX_1_175875602 - FIX_1_961570560) +#define FIX_1_175875602_MINUS_0_390180644 (FIX_1_175875602 - FIX_0_390180644) +#define FIX_0_541196100_MINUS_1_847759065 (FIX_0_541196100 - FIX_1_847759065) +#define FIX_3_072711026_MINUS_2_562915447 (FIX_3_072711026 - FIX_2_562915447) +#define FIX_0_298631336_MINUS_0_899976223 (FIX_0_298631336 - FIX_0_899976223) +#define FIX_1_501321110_MINUS_0_899976223 (FIX_1_501321110 - FIX_0_899976223) +#define FIX_2_053119869_MINUS_2_562915447 (FIX_2_053119869 - FIX_2_562915447) +#define FIX_0_541196100_PLUS_0_765366865 (FIX_0_541196100 + FIX_0_765366865) + +/* + * Reference SIMD-friendly 1-D ISLOW iDCT C implementation. + * Uses some ideas from the comments in 'simd/jiss2int-64.asm' + */ +#define REF_1D_IDCT(xrow0, xrow1, xrow2, xrow3, xrow4, xrow5, xrow6, xrow7) \ +{ \ + DCTELEM row0, row1, row2, row3, row4, row5, row6, row7; \ + INT32 q1, q2, q3, q4, q5, q6, q7; \ + INT32 tmp11_plus_tmp2, tmp11_minus_tmp2; \ + \ + /* 1-D iDCT input data */ \ + row0 = xrow0; \ + row1 = xrow1; \ + row2 = xrow2; \ + row3 = xrow3; \ + row4 = xrow4; \ + row5 = xrow5; \ + row6 = xrow6; \ + row7 = xrow7; \ + \ + q5 = row7 + row3; \ + q4 = row5 + row1; \ + q6 = MULTIPLY(q5, FIX_1_175875602_MINUS_1_961570560) + \ + MULTIPLY(q4, FIX_1_175875602); \ + q7 = MULTIPLY(q5, FIX_1_175875602) + \ + MULTIPLY(q4, FIX_1_175875602_MINUS_0_390180644); \ + q2 = MULTIPLY(row2, FIX_0_541196100) + \ + MULTIPLY(row6, FIX_0_541196100_MINUS_1_847759065); \ + q4 = q6; \ + q3 = ((INT32) row0 - (INT32) row4) << 13; \ + q6 += MULTIPLY(row5, -FIX_2_562915447) + \ + MULTIPLY(row3, FIX_3_072711026_MINUS_2_562915447); \ + /* now we can use q1 (reloadable constants have been used up) */ \ + q1 = q3 + q2; \ + q4 += MULTIPLY(row7, FIX_0_298631336_MINUS_0_899976223) + \ + MULTIPLY(row1, -FIX_0_899976223); \ + q5 = q7; \ + q1 = q1 + q6; \ + q7 += MULTIPLY(row7, -FIX_0_899976223) + \ + MULTIPLY(row1, FIX_1_501321110_MINUS_0_899976223); \ + \ + /* (tmp11 + tmp2) has been calculated (out_row1 before descale) */ \ + tmp11_plus_tmp2 = q1; \ + row1 = 0; \ + \ + q1 = q1 - q6; \ + q5 += MULTIPLY(row5, FIX_2_053119869_MINUS_2_562915447) + \ + MULTIPLY(row3, -FIX_2_562915447); \ + q1 = q1 - q6; \ + q6 = MULTIPLY(row2, FIX_0_541196100_PLUS_0_765366865) + \ + MULTIPLY(row6, FIX_0_541196100); \ + q3 = q3 - q2; \ + \ + /* (tmp11 - tmp2) has been calculated (out_row6 before descale) */ \ + tmp11_minus_tmp2 = q1; \ + \ + q1 = ((INT32) row0 + (INT32) row4) << 13; \ + q2 = q1 + q6; \ + q1 = q1 - q6; \ + \ + /* pick up the results */ \ + tmp0 = q4; \ + tmp1 = q5; \ + tmp2 = (tmp11_plus_tmp2 - tmp11_minus_tmp2) / 2; \ + tmp3 = q7; \ + tmp10 = q2; \ + tmp11 = (tmp11_plus_tmp2 + tmp11_minus_tmp2) / 2; \ + tmp12 = q3; \ + tmp13 = q1; \ +} + +#define XFIX_0_899976223 v0.4h[0] +#define XFIX_0_541196100 v0.4h[1] +#define XFIX_2_562915447 v0.4h[2] +#define XFIX_0_298631336_MINUS_0_899976223 v0.4h[3] +#define XFIX_1_501321110_MINUS_0_899976223 v1.4h[0] +#define XFIX_2_053119869_MINUS_2_562915447 v1.4h[1] +#define XFIX_0_541196100_PLUS_0_765366865 v1.4h[2] +#define XFIX_1_175875602 v1.4h[3] +#define XFIX_1_175875602_MINUS_0_390180644 v2.4h[0] +#define XFIX_0_541196100_MINUS_1_847759065 v2.4h[1] +#define XFIX_3_072711026_MINUS_2_562915447 v2.4h[2] +#define XFIX_1_175875602_MINUS_1_961570560 v2.4h[3] + +.balign 16 +jsimd_idct_islow_neon_consts: + .short FIX_0_899976223 /* d0[0] */ + .short FIX_0_541196100 /* d0[1] */ + .short FIX_2_562915447 /* d0[2] */ + .short FIX_0_298631336_MINUS_0_899976223 /* d0[3] */ + .short FIX_1_501321110_MINUS_0_899976223 /* d1[0] */ + .short FIX_2_053119869_MINUS_2_562915447 /* d1[1] */ + .short FIX_0_541196100_PLUS_0_765366865 /* d1[2] */ + .short FIX_1_175875602 /* d1[3] */ + /* reloadable constants */ + .short FIX_1_175875602_MINUS_0_390180644 /* d2[0] */ + .short FIX_0_541196100_MINUS_1_847759065 /* d2[1] */ + .short FIX_3_072711026_MINUS_2_562915447 /* d2[2] */ + .short FIX_1_175875602_MINUS_1_961570560 /* d2[3] */ + +asm_function jsimd_idct_islow_neon + + DCT_TABLE .req x0 + COEF_BLOCK .req x1 + OUTPUT_BUF .req x2 + OUTPUT_COL .req x3 + TMP1 .req x0 + TMP2 .req x1 + TMP3 .req x2 + TMP4 .req x15 + + ROW0L .req v16 + ROW0R .req v17 + ROW1L .req v18 + ROW1R .req v19 + ROW2L .req v20 + ROW2R .req v21 + ROW3L .req v22 + ROW3R .req v23 + ROW4L .req v24 + ROW4R .req v25 + ROW5L .req v26 + ROW5R .req v27 + ROW6L .req v28 + ROW6R .req v29 + ROW7L .req v30 + ROW7R .req v31 + /* Save all NEON registers and x15 (32 NEON registers * 8 bytes + 16) */ + sub sp, sp, 272 + str x15, [sp], 16 + adr x15, jsimd_idct_islow_neon_consts + st1 {v0.8b - v3.8b}, [sp], 32 + st1 {v4.8b - v7.8b}, [sp], 32 + st1 {v8.8b - v11.8b}, [sp], 32 + st1 {v12.8b - v15.8b}, [sp], 32 + st1 {v16.8b - v19.8b}, [sp], 32 + st1 {v20.8b - v23.8b}, [sp], 32 + st1 {v24.8b - v27.8b}, [sp], 32 + st1 {v28.8b - v31.8b}, [sp], 32 + ld1 {v16.4h, v17.4h, v18.4h, v19.4h}, [COEF_BLOCK], 32 + ld1 {v0.4h, v1.4h, v2.4h, v3.4h}, [DCT_TABLE], 32 + ld1 {v20.4h, v21.4h, v22.4h, v23.4h}, [COEF_BLOCK], 32 + mul v16.4h, v16.4h, v0.4h + mul v17.4h, v17.4h, v1.4h + ins v16.2d[1], v17.2d[0] /* 128 bit q8 */ + ld1 {v4.4h, v5.4h, v6.4h, v7.4h}, [DCT_TABLE], 32 + mul v18.4h, v18.4h, v2.4h + mul v19.4h, v19.4h, v3.4h + ins v18.2d[1], v19.2d[0] /* 128 bit q9 */ + ld1 {v24.4h, v25.4h, v26.4h, v27.4h}, [COEF_BLOCK], 32 + mul v20.4h, v20.4h, v4.4h + mul v21.4h, v21.4h, v5.4h + ins v20.2d[1], v21.2d[0] /* 128 bit q10 */ + ld1 {v0.4h, v1.4h, v2.4h, v3.4h}, [DCT_TABLE], 32 + mul v22.4h, v22.4h, v6.4h + mul v23.4h, v23.4h, v7.4h + ins v22.2d[1], v23.2d[0] /* 128 bit q11 */ + ld1 {v28.4h, v29.4h, v30.4h, v31.4h}, [COEF_BLOCK] + mul v24.4h, v24.4h, v0.4h + mul v25.4h, v25.4h, v1.4h + ins v24.2d[1], v25.2d[0] /* 128 bit q12 */ + ld1 {v4.4h, v5.4h, v6.4h, v7.4h}, [DCT_TABLE], 32 + mul v28.4h, v28.4h, v4.4h + mul v29.4h, v29.4h, v5.4h + ins v28.2d[1], v29.2d[0] /* 128 bit q14 */ + mul v26.4h, v26.4h, v2.4h + mul v27.4h, v27.4h, v3.4h + ins v26.2d[1], v27.2d[0] /* 128 bit q13 */ + ld1 {v0.4h, v1.4h, v2.4h, v3.4h}, [x15] /* load constants */ + add x15, x15, #16 + mul v30.4h, v30.4h, v6.4h + mul v31.4h, v31.4h, v7.4h + ins v30.2d[1], v31.2d[0] /* 128 bit q15 */ + /* Go to the bottom of the stack */ + sub sp, sp, 352 + stp x4, x5, [sp], 16 + st1 {v8.4h - v11.4h}, [sp], 32 /* save NEON registers */ + st1 {v12.4h - v15.4h}, [sp], 32 + /* 1-D IDCT, pass 1, left 4x8 half */ + add v4.4h, ROW7L.4h, ROW3L.4h + add v5.4h, ROW5L.4h, ROW1L.4h + smull v12.4s, v4.4h, XFIX_1_175875602_MINUS_1_961570560 + smlal v12.4s, v5.4h, XFIX_1_175875602 + smull v14.4s, v4.4h, XFIX_1_175875602 + /* Check for the zero coefficients in the right 4x8 half */ + smlal v14.4s, v5.4h, XFIX_1_175875602_MINUS_0_390180644 + ssubl v6.4s, ROW0L.4h, ROW4L.4h + ldp w4, w5, [COEF_BLOCK, #(-96 + 2 * (4 + 1 * 8))] + smull v4.4s, ROW2L.4h, XFIX_0_541196100 + smlal v4.4s, ROW6L.4h, XFIX_0_541196100_MINUS_1_847759065 + orr x0, x4, x5 + mov v8.16b, v12.16b + smlsl v12.4s, ROW5L.4h, XFIX_2_562915447 + ldp w4, w5, [COEF_BLOCK, #(-96 + 2 * (4 + 2 * 8))] + smlal v12.4s, ROW3L.4h, XFIX_3_072711026_MINUS_2_562915447 + shl v6.4s, v6.4s, #13 + orr x0, x0, x4 + smlsl v8.4s, ROW1L.4h, XFIX_0_899976223 + orr x0, x0 , x5 + add v2.4s, v6.4s, v4.4s + ldp w4, w5, [COEF_BLOCK, #(-96 + 2 * (4 + 3 * 8))] + mov v10.16b, v14.16b + add v2.4s, v2.4s, v12.4s + orr x0, x0, x4 + smlsl v14.4s, ROW7L.4h, XFIX_0_899976223 + orr x0, x0, x5 + smlal v14.4s, ROW1L.4h, XFIX_1_501321110_MINUS_0_899976223 + rshrn ROW1L.4h, v2.4s, #11 + ldp w4, w5, [COEF_BLOCK, #(-96 + 2 * (4 + 4 * 8))] + sub v2.4s, v2.4s, v12.4s + smlal v10.4s, ROW5L.4h, XFIX_2_053119869_MINUS_2_562915447 + orr x0, x0, x4 + smlsl v10.4s, ROW3L.4h, XFIX_2_562915447 + orr x0, x0, x5 + sub v2.4s, v2.4s, v12.4s + smull v12.4s, ROW2L.4h, XFIX_0_541196100_PLUS_0_765366865 + ldp w4, w5, [COEF_BLOCK, #(-96 + 2 * (4 + 5 * 8))] + smlal v12.4s, ROW6L.4h, XFIX_0_541196100 + sub v6.4s, v6.4s, v4.4s + orr x0, x0, x4 + rshrn ROW6L.4h, v2.4s, #11 + orr x0, x0, x5 + add v2.4s, v6.4s, v10.4s + ldp w4, w5, [COEF_BLOCK, #(-96 + 2 * (4 + 6 * 8))] + sub v6.4s, v6.4s, v10.4s + saddl v10.4s, ROW0L.4h, ROW4L.4h + orr x0, x0, x4 + rshrn ROW2L.4h, v2.4s, #11 + orr x0, x0, x5 + rshrn ROW5L.4h, v6.4s, #11 + ldp w4, w5, [COEF_BLOCK, #(-96 + 2 * (4 + 7 * 8))] + shl v10.4s, v10.4s, #13 + smlal v8.4s, ROW7L.4h, XFIX_0_298631336_MINUS_0_899976223 + orr x0, x0, x4 + add v4.4s, v10.4s, v12.4s + orr x0, x0, x5 + cmp x0, #0 /* orrs instruction removed */ + sub v2.4s, v10.4s, v12.4s + add v12.4s, v4.4s, v14.4s + ldp w4, w5, [COEF_BLOCK, #(-96 + 2 * (4 + 0 * 8))] + sub v4.4s, v4.4s, v14.4s + add v10.4s, v2.4s, v8.4s + orr x0, x4, x5 + sub v6.4s, v2.4s, v8.4s + /* pop {x4, x5} */ + sub sp, sp, 80 + ldp x4, x5, [sp], 16 + rshrn ROW7L.4h, v4.4s, #11 + rshrn ROW3L.4h, v10.4s, #11 + rshrn ROW0L.4h, v12.4s, #11 + rshrn ROW4L.4h, v6.4s, #11 + + beq 3f /* Go to do some special handling for the sparse right 4x8 half */ + + /* 1-D IDCT, pass 1, right 4x8 half */ + ld1 {v2.4h}, [x15] /* reload constants */ + add v10.4h, ROW7R.4h, ROW3R.4h + add v8.4h, ROW5R.4h, ROW1R.4h + /* Transpose ROW6L <-> ROW7L (v3 available free register) */ + transpose ROW6L, ROW7L, v3, .16b, .4h + smull v12.4s, v10.4h, XFIX_1_175875602_MINUS_1_961570560 + smlal v12.4s, v8.4h, XFIX_1_175875602 + /* Transpose ROW2L <-> ROW3L (v3 available free register) */ + transpose ROW2L, ROW3L, v3, .16b, .4h + smull v14.4s, v10.4h, XFIX_1_175875602 + smlal v14.4s, v8.4h, XFIX_1_175875602_MINUS_0_390180644 + /* Transpose ROW0L <-> ROW1L (v3 available free register) */ + transpose ROW0L, ROW1L, v3, .16b, .4h + ssubl v6.4s, ROW0R.4h, ROW4R.4h + smull v4.4s, ROW2R.4h, XFIX_0_541196100 + smlal v4.4s, ROW6R.4h, XFIX_0_541196100_MINUS_1_847759065 + /* Transpose ROW4L <-> ROW5L (v3 available free register) */ + transpose ROW4L, ROW5L, v3, .16b, .4h + mov v8.16b, v12.16b + smlsl v12.4s, ROW5R.4h, XFIX_2_562915447 + smlal v12.4s, ROW3R.4h, XFIX_3_072711026_MINUS_2_562915447 + /* Transpose ROW1L <-> ROW3L (v3 available free register) */ + transpose ROW1L, ROW3L, v3, .16b, .2s + shl v6.4s, v6.4s, #13 + smlsl v8.4s, ROW1R.4h, XFIX_0_899976223 + /* Transpose ROW4L <-> ROW6L (v3 available free register) */ + transpose ROW4L, ROW6L, v3, .16b, .2s + add v2.4s, v6.4s, v4.4s + mov v10.16b, v14.16b + add v2.4s, v2.4s, v12.4s + /* Transpose ROW0L <-> ROW2L (v3 available free register) */ + transpose ROW0L, ROW2L, v3, .16b, .2s + smlsl v14.4s, ROW7R.4h, XFIX_0_899976223 + smlal v14.4s, ROW1R.4h, XFIX_1_501321110_MINUS_0_899976223 + rshrn ROW1R.4h, v2.4s, #11 + /* Transpose ROW5L <-> ROW7L (v3 available free register) */ + transpose ROW5L, ROW7L, v3, .16b, .2s + sub v2.4s, v2.4s, v12.4s + smlal v10.4s, ROW5R.4h, XFIX_2_053119869_MINUS_2_562915447 + smlsl v10.4s, ROW3R.4h, XFIX_2_562915447 + sub v2.4s, v2.4s, v12.4s + smull v12.4s, ROW2R.4h, XFIX_0_541196100_PLUS_0_765366865 + smlal v12.4s, ROW6R.4h, XFIX_0_541196100 + sub v6.4s, v6.4s, v4.4s + rshrn ROW6R.4h, v2.4s, #11 + add v2.4s, v6.4s, v10.4s + sub v6.4s, v6.4s, v10.4s + saddl v10.4s, ROW0R.4h, ROW4R.4h + rshrn ROW2R.4h, v2.4s, #11 + rshrn ROW5R.4h, v6.4s, #11 + shl v10.4s, v10.4s, #13 + smlal v8.4s, ROW7R.4h, XFIX_0_298631336_MINUS_0_899976223 + add v4.4s, v10.4s, v12.4s + sub v2.4s, v10.4s, v12.4s + add v12.4s, v4.4s, v14.4s + sub v4.4s, v4.4s, v14.4s + add v10.4s, v2.4s, v8.4s + sub v6.4s, v2.4s, v8.4s + rshrn ROW7R.4h, v4.4s, #11 + rshrn ROW3R.4h, v10.4s, #11 + rshrn ROW0R.4h, v12.4s, #11 + rshrn ROW4R.4h, v6.4s, #11 + /* Transpose right 4x8 half */ + transpose ROW6R, ROW7R, v3, .16b, .4h + transpose ROW2R, ROW3R, v3, .16b, .4h + transpose ROW0R, ROW1R, v3, .16b, .4h + transpose ROW4R, ROW5R, v3, .16b, .4h + transpose ROW1R, ROW3R, v3, .16b, .2s + transpose ROW4R, ROW6R, v3, .16b, .2s + transpose ROW0R, ROW2R, v3, .16b, .2s + transpose ROW5R, ROW7R, v3, .16b, .2s + +1: /* 1-D IDCT, pass 2 (normal variant), left 4x8 half */ + ld1 {v2.4h}, [x15] /* reload constants */ + smull v12.4S, ROW1R.4h, XFIX_1_175875602 /* ROW5L.4h <-> ROW1R.4h */ + smlal v12.4s, ROW1L.4h, XFIX_1_175875602 + smlal v12.4s, ROW3R.4h, XFIX_1_175875602_MINUS_1_961570560 /* ROW7L.4h <-> ROW3R.4h */ + smlal v12.4s, ROW3L.4h, XFIX_1_175875602_MINUS_1_961570560 + smull v14.4s, ROW3R.4h, XFIX_1_175875602 /* ROW7L.4h <-> ROW3R.4h */ + smlal v14.4s, ROW3L.4h, XFIX_1_175875602 + smlal v14.4s, ROW1R.4h, XFIX_1_175875602_MINUS_0_390180644 /* ROW5L.4h <-> ROW1R.4h */ + smlal v14.4s, ROW1L.4h, XFIX_1_175875602_MINUS_0_390180644 + ssubl v6.4s, ROW0L.4h, ROW0R.4h /* ROW4L.4h <-> ROW0R.4h */ + smull v4.4s, ROW2L.4h, XFIX_0_541196100 + smlal v4.4s, ROW2R.4h, XFIX_0_541196100_MINUS_1_847759065 /* ROW6L.4h <-> ROW2R.4h */ + mov v8.16b, v12.16b + smlsl v12.4s, ROW1R.4h, XFIX_2_562915447 /* ROW5L.4h <-> ROW1R.4h */ + smlal v12.4s, ROW3L.4h, XFIX_3_072711026_MINUS_2_562915447 + shl v6.4s, v6.4s, #13 + smlsl v8.4s, ROW1L.4h, XFIX_0_899976223 + add v2.4s, v6.4s, v4.4s + mov v10.16b, v14.16b + add v2.4s, v2.4s, v12.4s + smlsl v14.4s, ROW3R.4h, XFIX_0_899976223 /* ROW7L.4h <-> ROW3R.4h */ + smlal v14.4s, ROW1L.4h, XFIX_1_501321110_MINUS_0_899976223 + shrn ROW1L.4h, v2.4s, #16 + sub v2.4s, v2.4s, v12.4s + smlal v10.4s, ROW1R.4h, XFIX_2_053119869_MINUS_2_562915447 /* ROW5L.4h <-> ROW1R.4h */ + smlsl v10.4s, ROW3L.4h, XFIX_2_562915447 + sub v2.4s, v2.4s, v12.4s + smull v12.4s, ROW2L.4h, XFIX_0_541196100_PLUS_0_765366865 + smlal v12.4s, ROW2R.4h, XFIX_0_541196100 /* ROW6L.4h <-> ROW2R.4h */ + sub v6.4s, v6.4s, v4.4s + shrn ROW2R.4h, v2.4s, #16 /* ROW6L.4h <-> ROW2R.4h */ + add v2.4s, v6.4s, v10.4s + sub v6.4s, v6.4s, v10.4s + saddl v10.4s, ROW0L.4h, ROW0R.4h /* ROW4L.4h <-> ROW0R.4h */ + shrn ROW2L.4h, v2.4s, #16 + shrn ROW1R.4h, v6.4s, #16 /* ROW5L.4h <-> ROW1R.4h */ + shl v10.4s, v10.4s, #13 + smlal v8.4s, ROW3R.4h, XFIX_0_298631336_MINUS_0_899976223 /* ROW7L.4h <-> ROW3R.4h */ + add v4.4s, v10.4s, v12.4s + sub v2.4s, v10.4s, v12.4s + add v12.4s, v4.4s, v14.4s + sub v4.4s, v4.4s, v14.4s + add v10.4s, v2.4s, v8.4s + sub v6.4s, v2.4s, v8.4s + shrn ROW3R.4h, v4.4s, #16 /* ROW7L.4h <-> ROW3R.4h */ + shrn ROW3L.4h, v10.4s, #16 + shrn ROW0L.4h, v12.4s, #16 + shrn ROW0R.4h, v6.4s, #16 /* ROW4L.4h <-> ROW0R.4h */ + /* 1-D IDCT, pass 2, right 4x8 half */ + ld1 {v2.4h}, [x15] /* reload constants */ + smull v12.4s, ROW5R.4h, XFIX_1_175875602 + smlal v12.4s, ROW5L.4h, XFIX_1_175875602 /* ROW5L.4h <-> ROW1R.4h */ + smlal v12.4s, ROW7R.4h, XFIX_1_175875602_MINUS_1_961570560 + smlal v12.4s, ROW7L.4h, XFIX_1_175875602_MINUS_1_961570560 /* ROW7L.4h <-> ROW3R.4h */ + smull v14.4s, ROW7R.4h, XFIX_1_175875602 + smlal v14.4s, ROW7L.4h, XFIX_1_175875602 /* ROW7L.4h <-> ROW3R.4h */ + smlal v14.4s, ROW5R.4h, XFIX_1_175875602_MINUS_0_390180644 + smlal v14.4s, ROW5L.4h, XFIX_1_175875602_MINUS_0_390180644 /* ROW5L.4h <-> ROW1R.4h */ + ssubl v6.4s, ROW4L.4h, ROW4R.4h /* ROW4L.4h <-> ROW0R.4h */ + smull v4.4s, ROW6L.4h, XFIX_0_541196100 /* ROW6L.4h <-> ROW2R.4h */ + smlal v4.4s, ROW6R.4h, XFIX_0_541196100_MINUS_1_847759065 + mov v8.16b, v12.16b + smlsl v12.4s, ROW5R.4h, XFIX_2_562915447 + smlal v12.4s, ROW7L.4h, XFIX_3_072711026_MINUS_2_562915447 /* ROW7L.4h <-> ROW3R.4h */ + shl v6.4s, v6.4s, #13 + smlsl v8.4s, ROW5L.4h, XFIX_0_899976223 /* ROW5L.4h <-> ROW1R.4h */ + add v2.4s, v6.4s, v4.4s + mov v10.16b, v14.16b + add v2.4s, v2.4s, v12.4s + smlsl v14.4s, ROW7R.4h, XFIX_0_899976223 + smlal v14.4s, ROW5L.4h, XFIX_1_501321110_MINUS_0_899976223 /* ROW5L.4h <-> ROW1R.4h */ + shrn ROW5L.4h, v2.4s, #16 /* ROW5L.4h <-> ROW1R.4h */ + sub v2.4s, v2.4s, v12.4s + smlal v10.4s, ROW5R.4h, XFIX_2_053119869_MINUS_2_562915447 + smlsl v10.4s, ROW7L.4h, XFIX_2_562915447 /* ROW7L.4h <-> ROW3R.4h */ + sub v2.4s, v2.4s, v12.4s + smull v12.4s, ROW6L.4h, XFIX_0_541196100_PLUS_0_765366865 /* ROW6L.4h <-> ROW2R.4h */ + smlal v12.4s, ROW6R.4h, XFIX_0_541196100 + sub v6.4s, v6.4s, v4.4s + shrn ROW6R.4h, v2.4s, #16 + add v2.4s, v6.4s, v10.4s + sub v6.4s, v6.4s, v10.4s + saddl v10.4s, ROW4L.4h, ROW4R.4h /* ROW4L.4h <-> ROW0R.4h */ + shrn ROW6L.4h, v2.4s, #16 /* ROW6L.4h <-> ROW2R.4h */ + shrn ROW5R.4h, v6.4s, #16 + shl v10.4s, v10.4s, #13 + smlal v8.4s, ROW7R.4h, XFIX_0_298631336_MINUS_0_899976223 + add v4.4s, v10.4s, v12.4s + sub v2.4s, v10.4s, v12.4s + add v12.4s, v4.4s, v14.4s + sub v4.4s, v4.4s, v14.4s + add v10.4s, v2.4s, v8.4s + sub v6.4s, v2.4s, v8.4s + shrn ROW7R.4h, v4.4s, #16 + shrn ROW7L.4h, v10.4s, #16 /* ROW7L.4h <-> ROW3R.4h */ + shrn ROW4L.4h, v12.4s, #16 /* ROW4L.4h <-> ROW0R.4h */ + shrn ROW4R.4h, v6.4s, #16 + +2: /* Descale to 8-bit and range limit */ + ins v16.2d[1], v17.2d[0] + ins v18.2d[1], v19.2d[0] + ins v20.2d[1], v21.2d[0] + ins v22.2d[1], v23.2d[0] + sqrshrn v16.8b, v16.8h, #2 + sqrshrn2 v16.16b, v18.8h, #2 + sqrshrn v18.8b, v20.8h, #2 + sqrshrn2 v18.16b, v22.8h, #2 + + /* vpop {v8.4h - d15.4h} */ /* restore NEON registers */ + ld1 {v8.4h - v11.4h}, [sp], 32 + ld1 {v12.4h - v15.4h}, [sp], 32 + ins v24.2d[1], v25.2d[0] + + sqrshrn v20.8b, v24.8h, #2 + /* Transpose the final 8-bit samples and do signed->unsigned conversion */ + /* trn1 v16.8h, v16.8h, v18.8h */ + transpose v16, v18, v3, .16b, .8h + ins v26.2d[1], v27.2d[0] + ins v28.2d[1], v29.2d[0] + ins v30.2d[1], v31.2d[0] + sqrshrn2 v20.16b, v26.8h, #2 + sqrshrn v22.8b, v28.8h, #2 + movi v0.16b, #(CENTERJSAMPLE) + sqrshrn2 v22.16b, v30.8h, #2 + transpose_single v16, v17, v3, .2d, .8b + transpose_single v18, v19, v3, .2d, .8b + add v16.8b, v16.8b, v0.8b + add v17.8b, v17.8b, v0.8b + add v18.8b, v18.8b, v0.8b + add v19.8b, v19.8b, v0.8b + transpose v20, v22, v3, .16b, .8h + /* Store results to the output buffer */ + ldp TMP1, TMP2, [OUTPUT_BUF], 16 + add TMP1, TMP1, OUTPUT_COL + add TMP2, TMP2, OUTPUT_COL + st1 {v16.8b}, [TMP1] + transpose_single v20, v21, v3, .2d, .8b + st1 {v17.8b}, [TMP2] + ldp TMP1, TMP2, [OUTPUT_BUF], 16 + add TMP1, TMP1, OUTPUT_COL + add TMP2, TMP2, OUTPUT_COL + st1 {v18.8b}, [TMP1] + add v20.8b, v20.8b, v0.8b + add v21.8b, v21.8b, v0.8b + st1 {v19.8b}, [TMP2] + ldp TMP1, TMP2, [OUTPUT_BUF], 16 + ldp TMP3, TMP4, [OUTPUT_BUF] + add TMP1, TMP1, OUTPUT_COL + add TMP2, TMP2, OUTPUT_COL + add TMP3, TMP3, OUTPUT_COL + add TMP4, TMP4, OUTPUT_COL + transpose_single v22, v23, v3, .2d, .8b + st1 {v20.8b}, [TMP1] + add v22.8b, v22.8b, v0.8b + add v23.8b, v23.8b, v0.8b + st1 {v21.8b}, [TMP2] + st1 {v22.8b}, [TMP3] + st1 {v23.8b}, [TMP4] + ldr x15, [sp], 16 + ld1 {v0.8b - v3.8b}, [sp], 32 + ld1 {v4.8b - v7.8b}, [sp], 32 + ld1 {v8.8b - v11.8b}, [sp], 32 + ld1 {v12.8b - v15.8b}, [sp], 32 + ld1 {v16.8b - v19.8b}, [sp], 32 + ld1 {v20.8b - v23.8b}, [sp], 32 + ld1 {v24.8b - v27.8b}, [sp], 32 + ld1 {v28.8b - v31.8b}, [sp], 32 + blr x30 + +3: /* Left 4x8 half is done, right 4x8 half contains mostly zeros */ + + /* Transpose left 4x8 half */ + transpose ROW6L, ROW7L, v3, .16b, .4h + transpose ROW2L, ROW3L, v3, .16b, .4h + transpose ROW0L, ROW1L, v3, .16b, .4h + transpose ROW4L, ROW5L, v3, .16b, .4h + shl ROW0R.4h, ROW0R.4h, #2 /* PASS1_BITS */ + transpose ROW1L, ROW3L, v3, .16b, .2s + transpose ROW4L, ROW6L, v3, .16b, .2s + transpose ROW0L, ROW2L, v3, .16b, .2s + transpose ROW5L, ROW7L, v3, .16b, .2s + cmp x0, #0 + beq 4f /* Right 4x8 half has all zeros, go to 'sparse' second pass */ + + /* Only row 0 is non-zero for the right 4x8 half */ + dup ROW1R.4h, ROW0R.4h[1] + dup ROW2R.4h, ROW0R.4h[2] + dup ROW3R.4h, ROW0R.4h[3] + dup ROW4R.4h, ROW0R.4h[0] + dup ROW5R.4h, ROW0R.4h[1] + dup ROW6R.4h, ROW0R.4h[2] + dup ROW7R.4h, ROW0R.4h[3] + dup ROW0R.4h, ROW0R.4h[0] + b 1b /* Go to 'normal' second pass */ + +4: /* 1-D IDCT, pass 2 (sparse variant with zero rows 4-7), left 4x8 half */ + ld1 {v2.4h}, [x15] /* reload constants */ + smull v12.4s, ROW1L.4h, XFIX_1_175875602 + smlal v12.4s, ROW3L.4h, XFIX_1_175875602_MINUS_1_961570560 + smull v14.4s, ROW3L.4h, XFIX_1_175875602 + smlal v14.4s, ROW1L.4h, XFIX_1_175875602_MINUS_0_390180644 + smull v4.4s, ROW2L.4h, XFIX_0_541196100 + sshll v6.4s, ROW0L.4h, #13 + mov v8.16b, v12.16b + smlal v12.4s, ROW3L.4h, XFIX_3_072711026_MINUS_2_562915447 + smlsl v8.4s, ROW1L.4h, XFIX_0_899976223 + add v2.4s, v6.4s, v4.4s + mov v10.16b, v14.16b + smlal v14.4s, ROW1L.4h, XFIX_1_501321110_MINUS_0_899976223 + add v2.4s, v2.4s, v12.4s + add v12.4s, v12.4s, v12.4s + smlsl v10.4s, ROW3L.4h, XFIX_2_562915447 + shrn ROW1L.4h, v2.4s, #16 + sub v2.4s, v2.4s, v12.4s + smull v12.4s, ROW2L.4h, XFIX_0_541196100_PLUS_0_765366865 + sub v6.4s, v6.4s, v4.4s + shrn ROW2R.4h, v2.4s, #16 /* ROW6L.4h <-> ROW2R.4h */ + add v2.4s, v6.4s, v10.4s + sub v6.4s, v6.4s, v10.4s + sshll v10.4s, ROW0L.4h, #13 + shrn ROW2L.4h, v2.4s, #16 + shrn ROW1R.4h, v6.4s, #16 /* ROW5L.4h <-> ROW1R.4h */ + add v4.4s, v10.4s, v12.4s + sub v2.4s, v10.4s, v12.4s + add v12.4s, v4.4s, v14.4s + sub v4.4s, v4.4s, v14.4s + add v10.4s, v2.4s, v8.4s + sub v6.4s, v2.4s, v8.4s + shrn ROW3R.4h, v4.4s, #16 /* ROW7L.4h <-> ROW3R.4h */ + shrn ROW3L.4h, v10.4s, #16 + shrn ROW0L.4h, v12.4s, #16 + shrn ROW0R.4h, v6.4s, #16 /* ROW4L.4h <-> ROW0R.4h */ + /* 1-D IDCT, pass 2 (sparse variant with zero rows 4-7), right 4x8 half */ + ld1 {v2.4h}, [x15] /* reload constants */ + smull v12.4s, ROW5L.4h, XFIX_1_175875602 + smlal v12.4s, ROW7L.4h, XFIX_1_175875602_MINUS_1_961570560 + smull v14.4s, ROW7L.4h, XFIX_1_175875602 + smlal v14.4s, ROW5L.4h, XFIX_1_175875602_MINUS_0_390180644 + smull v4.4s, ROW6L.4h, XFIX_0_541196100 + sshll v6.4s, ROW4L.4h, #13 + mov v8.16b, v12.16b + smlal v12.4s, ROW7L.4h, XFIX_3_072711026_MINUS_2_562915447 + smlsl v8.4s, ROW5L.4h, XFIX_0_899976223 + add v2.4s, v6.4s, v4.4s + mov v10.16b, v14.16b + smlal v14.4s, ROW5L.4h, XFIX_1_501321110_MINUS_0_899976223 + add v2.4s, v2.4s, v12.4s + add v12.4s, v12.4s, v12.4s + smlsl v10.4s, ROW7L.4h, XFIX_2_562915447 + shrn ROW5L.4h, v2.4s, #16 /* ROW5L.4h <-> ROW1R.4h */ + sub v2.4s, v2.4s, v12.4s + smull v12.4s, ROW6L.4h, XFIX_0_541196100_PLUS_0_765366865 + sub v6.4s, v6.4s, v4.4s + shrn ROW6R.4h, v2.4s, #16 + add v2.4s, v6.4s, v10.4s + sub v6.4s, v6.4s, v10.4s + sshll v10.4s, ROW4L.4h, #13 + shrn ROW6L.4h, v2.4s, #16 /* ROW6L.4h <-> ROW2R.4h */ + shrn ROW5R.4h, v6.4s, #16 + add v4.4s, v10.4s, v12.4s + sub v2.4s, v10.4s, v12.4s + add v12.4s, v4.4s, v14.4s + sub v4.4s, v4.4s, v14.4s + add v10.4s, v2.4s, v8.4s + sub v6.4s, v2.4s, v8.4s + shrn ROW7R.4h, v4.4s, #16 + shrn ROW7L.4h, v10.4s, #16 /* ROW7L.4h <-> ROW3R.4h */ + shrn ROW4L.4h, v12.4s, #16 /* ROW4L.4h <-> ROW0R.4h */ + shrn ROW4R.4h, v6.4s, #16 + b 2b /* Go to epilogue */ + + .unreq DCT_TABLE + .unreq COEF_BLOCK + .unreq OUTPUT_BUF + .unreq OUTPUT_COL + .unreq TMP1 + .unreq TMP2 + .unreq TMP3 + .unreq TMP4 + + .unreq ROW0L + .unreq ROW0R + .unreq ROW1L + .unreq ROW1R + .unreq ROW2L + .unreq ROW2R + .unreq ROW3L + .unreq ROW3R + .unreq ROW4L + .unreq ROW4R + .unreq ROW5L + .unreq ROW5R + .unreq ROW6L + .unreq ROW6R + .unreq ROW7L + .unreq ROW7R + + +/*****************************************************************************/ + +/* + * jsimd_idct_ifast_neon + * + * This function contains a fast, not so accurate integer implementation of + * the inverse DCT (Discrete Cosine Transform). It uses the same calculations + * and produces exactly the same output as IJG's original 'jpeg_idct_ifast' + * function from jidctfst.c + * + * Normally 1-D AAN DCT needs 5 multiplications and 29 additions. + * But in ARM NEON case some extra additions are required because VQDMULH + * instruction can't handle the constants larger than 1. So the expressions + * like "x * 1.082392200" have to be converted to "x * 0.082392200 + x", + * which introduces an extra addition. Overall, there are 6 extra additions + * per 1-D IDCT pass, totalling to 5 VQDMULH and 35 VADD/VSUB instructions. + */ + +#define XFIX_1_082392200 v0.4h[0] +#define XFIX_1_414213562 v0.4h[1] +#define XFIX_1_847759065 v0.4h[2] +#define XFIX_2_613125930 v0.4h[3] + +.balign 16 +jsimd_idct_ifast_neon_consts: + .short (277 * 128 - 256 * 128) /* XFIX_1_082392200 */ + .short (362 * 128 - 256 * 128) /* XFIX_1_414213562 */ + .short (473 * 128 - 256 * 128) /* XFIX_1_847759065 */ + .short (669 * 128 - 512 * 128) /* XFIX_2_613125930 */ + +asm_function jsimd_idct_ifast_neon + + DCT_TABLE .req x0 + COEF_BLOCK .req x1 + OUTPUT_BUF .req x2 + OUTPUT_COL .req x3 + TMP1 .req x0 + TMP2 .req x1 + TMP3 .req x2 + TMP4 .req x22 + TMP5 .req x23 + + /* Load and dequantize coefficients into NEON registers + * with the following allocation: + * 0 1 2 3 | 4 5 6 7 + * ---------+-------- + * 0 | d16 | d17 ( v8.8h ) + * 1 | d18 | d19 ( v9.8h ) + * 2 | d20 | d21 ( v10.8h ) + * 3 | d22 | d23 ( v11.8h ) + * 4 | d24 | d25 ( v12.8h ) + * 5 | d26 | d27 ( v13.8h ) + * 6 | d28 | d29 ( v14.8h ) + * 7 | d30 | d31 ( v15.8h ) + */ + /* Save NEON registers used in fast IDCT */ + sub sp, sp, #176 + stp x22, x23, [sp], 16 + adr x23, jsimd_idct_ifast_neon_consts + st1 {v0.8b - v3.8b}, [sp], 32 + st1 {v4.8b - v7.8b}, [sp], 32 + st1 {v8.8b - v11.8b}, [sp], 32 + st1 {v12.8b - v15.8b}, [sp], 32 + st1 {v16.8b - v19.8b}, [sp], 32 + ld1 {v8.8h, v9.8h}, [COEF_BLOCK], 32 + ld1 {v0.8h, v1.8h}, [DCT_TABLE], 32 + ld1 {v10.8h, v11.8h}, [COEF_BLOCK], 32 + mul v8.8h, v8.8h, v0.8h + ld1 {v2.8h, v3.8h}, [DCT_TABLE], 32 + mul v9.8h, v9.8h, v1.8h + ld1 {v12.8h, v13.8h}, [COEF_BLOCK], 32 + mul v10.8h, v10.8h, v2.8h + ld1 {v0.8h, v1.8h}, [DCT_TABLE], 32 + mul v11.8h, v11.8h, v3.8h + ld1 {v14.8h, v15.8h}, [COEF_BLOCK], 32 + mul v12.8h, v12.8h, v0.8h + ld1 {v2.8h, v3.8h}, [DCT_TABLE], 32 + mul v14.8h, v14.8h, v2.8h + mul v13.8h, v13.8h, v1.8h + ld1 {v0.4h}, [x23] /* load constants */ + mul v15.8h, v15.8h, v3.8h + + /* 1-D IDCT, pass 1 */ + sub v2.8h, v10.8h, v14.8h + add v14.8h, v10.8h, v14.8h + sub v1.8h, v11.8h, v13.8h + add v13.8h, v11.8h, v13.8h + sub v5.8h, v9.8h, v15.8h + add v15.8h, v9.8h, v15.8h + sqdmulh v4.8h, v2.8h, XFIX_1_414213562 + sqdmulh v6.8h, v1.8h, XFIX_2_613125930 + add v3.8h, v1.8h, v1.8h + sub v1.8h, v5.8h, v1.8h + add v10.8h, v2.8h, v4.8h + sqdmulh v4.8h, v1.8h, XFIX_1_847759065 + sub v2.8h, v15.8h, v13.8h + add v3.8h, v3.8h, v6.8h + sqdmulh v6.8h, v2.8h, XFIX_1_414213562 + add v1.8h, v1.8h, v4.8h + sqdmulh v4.8h, v5.8h, XFIX_1_082392200 + sub v10.8h, v10.8h, v14.8h + add v2.8h, v2.8h, v6.8h + sub v6.8h, v8.8h, v12.8h + add v12.8h, v8.8h, v12.8h + add v9.8h, v5.8h, v4.8h + add v5.8h, v6.8h, v10.8h + sub v10.8h, v6.8h, v10.8h + add v6.8h, v15.8h, v13.8h + add v8.8h, v12.8h, v14.8h + sub v3.8h, v6.8h, v3.8h + sub v12.8h, v12.8h, v14.8h + sub v3.8h, v3.8h, v1.8h + sub v1.8h, v9.8h, v1.8h + add v2.8h, v3.8h, v2.8h + sub v15.8h, v8.8h, v6.8h + add v1.8h, v1.8h, v2.8h + add v8.8h, v8.8h, v6.8h + add v14.8h, v5.8h, v3.8h + sub v9.8h, v5.8h, v3.8h + sub v13.8h, v10.8h, v2.8h + add v10.8h, v10.8h, v2.8h + /* Transpose q8-q9 */ + mov v18.16b, v8.16b + trn1 v8.8h, v8.8h, v9.8h + trn2 v9.8h, v18.8h, v9.8h + sub v11.8h, v12.8h, v1.8h + /* Transpose q14-q15 */ + mov v18.16b, v14.16b + trn1 v14.8h, v14.8h, v15.8h + trn2 v15.8h, v18.8h, v15.8h + add v12.8h, v12.8h, v1.8h + /* Transpose q10-q11 */ + mov v18.16b, v10.16b + trn1 v10.8h, v10.8h, v11.8h + trn2 v11.8h, v18.8h, v11.8h + /* Transpose q12-q13 */ + mov v18.16b, v12.16b + trn1 v12.8h, v12.8h, v13.8h + trn2 v13.8h, v18.8h, v13.8h + /* Transpose q9-q11 */ + mov v18.16b, v9.16b + trn1 v9.4s, v9.4s, v11.4s + trn2 v11.4s, v18.4s, v11.4s + /* Transpose q12-q14 */ + mov v18.16b, v12.16b + trn1 v12.4s, v12.4s, v14.4s + trn2 v14.4s, v18.4s, v14.4s + /* Transpose q8-q10 */ + mov v18.16b, v8.16b + trn1 v8.4s, v8.4s, v10.4s + trn2 v10.4s, v18.4s, v10.4s + /* Transpose q13-q15 */ + mov v18.16b, v13.16b + trn1 v13.4s, v13.4s, v15.4s + trn2 v15.4s, v18.4s, v15.4s + /* vswp v14.4h, v10-MSB.4h */ + umov x22, v14.d[0] + ins v14.2d[0], v10.2d[1] + ins v10.2d[1], x22 + /* vswp v13.4h, v9MSB.4h */ + + umov x22, v13.d[0] + ins v13.2d[0], v9.2d[1] + ins v9.2d[1], x22 + /* 1-D IDCT, pass 2 */ + sub v2.8h, v10.8h, v14.8h + /* vswp v15.4h, v11MSB.4h */ + umov x22, v15.d[0] + ins v15.2d[0], v11.2d[1] + ins v11.2d[1], x22 + add v14.8h, v10.8h, v14.8h + /* vswp v12.4h, v8-MSB.4h */ + umov x22, v12.d[0] + ins v12.2d[0], v8.2d[1] + ins v8.2d[1], x22 + sub v1.8h, v11.8h, v13.8h + add v13.8h, v11.8h, v13.8h + sub v5.8h, v9.8h, v15.8h + add v15.8h, v9.8h, v15.8h + sqdmulh v4.8h, v2.8h, XFIX_1_414213562 + sqdmulh v6.8h, v1.8h, XFIX_2_613125930 + add v3.8h, v1.8h, v1.8h + sub v1.8h, v5.8h, v1.8h + add v10.8h, v2.8h, v4.8h + sqdmulh v4.8h, v1.8h, XFIX_1_847759065 + sub v2.8h, v15.8h, v13.8h + add v3.8h, v3.8h, v6.8h + sqdmulh v6.8h, v2.8h, XFIX_1_414213562 + add v1.8h, v1.8h, v4.8h + sqdmulh v4.8h, v5.8h, XFIX_1_082392200 + sub v10.8h, v10.8h, v14.8h + add v2.8h, v2.8h, v6.8h + sub v6.8h, v8.8h, v12.8h + add v12.8h, v8.8h, v12.8h + add v9.8h, v5.8h, v4.8h + add v5.8h, v6.8h, v10.8h + sub v10.8h, v6.8h, v10.8h + add v6.8h, v15.8h, v13.8h + add v8.8h, v12.8h, v14.8h + sub v3.8h, v6.8h, v3.8h + sub v12.8h, v12.8h, v14.8h + sub v3.8h, v3.8h, v1.8h + sub v1.8h, v9.8h, v1.8h + add v2.8h, v3.8h, v2.8h + sub v15.8h, v8.8h, v6.8h + add v1.8h, v1.8h, v2.8h + add v8.8h, v8.8h, v6.8h + add v14.8h, v5.8h, v3.8h + sub v9.8h, v5.8h, v3.8h + sub v13.8h, v10.8h, v2.8h + add v10.8h, v10.8h, v2.8h + sub v11.8h, v12.8h, v1.8h + add v12.8h, v12.8h, v1.8h + /* Descale to 8-bit and range limit */ + movi v0.16b, #0x80 + sqshrn v8.8b, v8.8h, #5 + sqshrn2 v8.16b, v9.8h, #5 + sqshrn v9.8b, v10.8h, #5 + sqshrn2 v9.16b, v11.8h, #5 + sqshrn v10.8b, v12.8h, #5 + sqshrn2 v10.16b, v13.8h, #5 + sqshrn v11.8b, v14.8h, #5 + sqshrn2 v11.16b, v15.8h, #5 + add v8.16b, v8.16b, v0.16b + add v9.16b, v9.16b, v0.16b + add v10.16b, v10.16b, v0.16b + add v11.16b, v11.16b, v0.16b + /* Transpose the final 8-bit samples */ + /* Transpose q8-q9 */ + mov v18.16b, v8.16b + trn1 v8.8h, v8.8h, v9.8h + trn2 v9.8h, v18.8h, v9.8h + /* Transpose q10-q11 */ + mov v18.16b, v10.16b + trn1 v10.8h, v10.8h, v11.8h + trn2 v11.8h, v18.8h, v11.8h + /* Transpose q8-q10 */ + mov v18.16b, v8.16b + trn1 v8.4s, v8.4s, v10.4s + trn2 v10.4s, v18.4s, v10.4s + /* Transpose q9-q11 */ + mov v18.16b, v9.16b + trn1 v9.4s, v9.4s, v11.4s + trn2 v11.4s, v18.4s, v11.4s + /* make copy */ + ins v17.2d[0], v8.2d[1] + /* Transpose d16-d17-msb */ + mov v18.16b, v8.16b + trn1 v8.8b, v8.8b, v17.8b + trn2 v17.8b, v18.8b, v17.8b + /* make copy */ + ins v19.2d[0], v9.2d[1] + mov v18.16b, v9.16b + trn1 v9.8b, v9.8b, v19.8b + trn2 v19.8b, v18.8b, v19.8b + /* Store results to the output buffer */ + ldp TMP1, TMP2, [OUTPUT_BUF], 16 + add TMP1, TMP1, OUTPUT_COL + add TMP2, TMP2, OUTPUT_COL + st1 {v8.8b}, [TMP1] + st1 {v17.8b}, [TMP2] + ldp TMP1, TMP2, [OUTPUT_BUF], 16 + add TMP1, TMP1, OUTPUT_COL + add TMP2, TMP2, OUTPUT_COL + st1 {v9.8b}, [TMP1] + /* make copy */ + ins v7.2d[0], v10.2d[1] + mov v18.16b, v10.16b + trn1 v10.8b, v10.8b, v7.8b + trn2 v7.8b, v18.8b, v7.8b + st1 {v19.8b}, [TMP2] + ldp TMP1, TMP2, [OUTPUT_BUF], 16 + ldp TMP4, TMP5, [OUTPUT_BUF], 16 + add TMP1, TMP1, OUTPUT_COL + add TMP2, TMP2, OUTPUT_COL + add TMP4, TMP4, OUTPUT_COL + add TMP5, TMP5, OUTPUT_COL + st1 {v10.8b}, [TMP1] + /* make copy */ + ins v16.2d[0], v11.2d[1] + mov v18.16b, v11.16b + trn1 v11.8b, v11.8b, v16.8b + trn2 v16.8b, v18.8b, v16.8b + st1 {v7.8b}, [TMP2] + st1 {v11.8b}, [TMP4] + st1 {v16.8b}, [TMP5] + sub sp, sp, #176 + ldp x22, x23, [sp], 16 + ld1 {v0.8b - v3.8b}, [sp], 32 + ld1 {v4.8b - v7.8b}, [sp], 32 + ld1 {v8.8b - v11.8b}, [sp], 32 + ld1 {v12.8b - v15.8b}, [sp], 32 + ld1 {v16.8b - v19.8b}, [sp], 32 + blr x30 + + .unreq DCT_TABLE + .unreq COEF_BLOCK + .unreq OUTPUT_BUF + .unreq OUTPUT_COL + .unreq TMP1 + .unreq TMP2 + .unreq TMP3 + .unreq TMP4 + + +/*****************************************************************************/ + +/* + * jsimd_idct_4x4_neon + * + * This function contains inverse-DCT code for getting reduced-size + * 4x4 pixels output from an 8x8 DCT block. It uses the same calculations + * and produces exactly the same output as IJG's original 'jpeg_idct_4x4' + * function from jpeg-6b (jidctred.c). + * + * NOTE: jpeg-8 has an improved implementation of 4x4 inverse-DCT, which + * requires much less arithmetic operations and hence should be faster. + * The primary purpose of this particular NEON optimized function is + * bit exact compatibility with jpeg-6b. + * + * TODO: a bit better instructions scheduling can be achieved by expanding + * idct_helper/transpose_4x4 macros and reordering instructions, + * but readability will suffer somewhat. + */ + +#define CONST_BITS 13 + +#define FIX_0_211164243 (1730) /* FIX(0.211164243) */ +#define FIX_0_509795579 (4176) /* FIX(0.509795579) */ +#define FIX_0_601344887 (4926) /* FIX(0.601344887) */ +#define FIX_0_720959822 (5906) /* FIX(0.720959822) */ +#define FIX_0_765366865 (6270) /* FIX(0.765366865) */ +#define FIX_0_850430095 (6967) /* FIX(0.850430095) */ +#define FIX_0_899976223 (7373) /* FIX(0.899976223) */ +#define FIX_1_061594337 (8697) /* FIX(1.061594337) */ +#define FIX_1_272758580 (10426) /* FIX(1.272758580) */ +#define FIX_1_451774981 (11893) /* FIX(1.451774981) */ +#define FIX_1_847759065 (15137) /* FIX(1.847759065) */ +#define FIX_2_172734803 (17799) /* FIX(2.172734803) */ +#define FIX_2_562915447 (20995) /* FIX(2.562915447) */ +#define FIX_3_624509785 (29692) /* FIX(3.624509785) */ + +.balign 16 +jsimd_idct_4x4_neon_consts: + .short FIX_1_847759065 /* v0.4h[0] */ + .short -FIX_0_765366865 /* v0.4h[1] */ + .short -FIX_0_211164243 /* v0.4h[2] */ + .short FIX_1_451774981 /* v0.4h[3] */ + .short -FIX_2_172734803 /* d1[0] */ + .short FIX_1_061594337 /* d1[1] */ + .short -FIX_0_509795579 /* d1[2] */ + .short -FIX_0_601344887 /* d1[3] */ + .short FIX_0_899976223 /* v2.4h[0] */ + .short FIX_2_562915447 /* v2.4h[1] */ + .short 1 << (CONST_BITS+1) /* v2.4h[2] */ + .short 0 /* v2.4h[3] */ + +.macro idct_helper x4, x6, x8, x10, x12, x14, x16, shift, y26, y27, y28, y29 + smull v28.4s, \x4, v2.4h[2] + smlal v28.4s, \x8, v0.4h[0] + smlal v28.4s, \x14, v0.4h[1] + + smull v26.4s, \x16, v1.4h[2] + smlal v26.4s, \x12, v1.4h[3] + smlal v26.4s, \x10, v2.4h[0] + smlal v26.4s, \x6, v2.4h[1] + + smull v30.4s, \x4, v2.4h[2] + smlsl v30.4s, \x8, v0.4h[0] + smlsl v30.4s, \x14, v0.4h[1] + + smull v24.4s, \x16, v0.4h[2] + smlal v24.4s, \x12, v0.4h[3] + smlal v24.4s, \x10, v1.4h[0] + smlal v24.4s, \x6, v1.4h[1] + + add v20.4s, v28.4s, v26.4s + sub v28.4s, v28.4s, v26.4s + +.if \shift > 16 + srshr v20.4s, v20.4s, #\shift + srshr v28.4s, v28.4s, #\shift + xtn \y26, v20.4s + xtn \y29, v28.4s +.else + rshrn \y26, v20.4s, #\shift + rshrn \y29, v28.4s, #\shift +.endif + + add v20.4s, v30.4s, v24.4s + sub v30.4s, v30.4s, v24.4s + +.if \shift > 16 + srshr v20.4s, v20.4s, #\shift + srshr v30.4s, v30.4s, #\shift + xtn \y27, v20.4s + xtn \y28, v30.4s +.else + rshrn \y27, v20.4s, #\shift + rshrn \y28, v30.4s, #\shift +.endif + +.endm + +asm_function jsimd_idct_4x4_neon + + DCT_TABLE .req x0 + COEF_BLOCK .req x1 + OUTPUT_BUF .req x2 + OUTPUT_COL .req x3 + TMP1 .req x0 + TMP2 .req x1 + TMP3 .req x2 + TMP4 .req x15 + + /* Save all used NEON registers */ + sub sp, sp, 272 + str x15, [sp], 16 + /* Load constants (v3.4h is just used for padding) */ + adr TMP4, jsimd_idct_4x4_neon_consts + st1 {v0.8b - v3.8b}, [sp], 32 + st1 {v4.8b - v7.8b}, [sp], 32 + st1 {v8.8b - v11.8b}, [sp], 32 + st1 {v12.8b - v15.8b}, [sp], 32 + st1 {v16.8b - v19.8b}, [sp], 32 + st1 {v20.8b - v23.8b}, [sp], 32 + st1 {v24.8b - v27.8b}, [sp], 32 + st1 {v28.8b - v31.8b}, [sp], 32 + ld1 {v0.4h, v1.4h, v2.4h, v3.4h}, [TMP4] + + /* Load all COEF_BLOCK into NEON registers with the following allocation: + * 0 1 2 3 | 4 5 6 7 + * ---------+-------- + * 0 | v4.4h | v5.4h + * 1 | v6.4h | v7.4h + * 2 | v8.4h | v9.4h + * 3 | v10.4h | v11.4h + * 4 | - | - + * 5 | v12.4h | v13.4h + * 6 | v14.4h | v15.4h + * 7 | v16.4h | v17.4h + */ + ld1 {v4.4h, v5.4h, v6.4h, v7.4h}, [COEF_BLOCK], 32 + ld1 {v8.4h, v9.4h, v10.4h, v11.4h}, [COEF_BLOCK], 32 + add COEF_BLOCK, COEF_BLOCK, #16 + ld1 {v12.4h, v13.4h, v14.4h, v15.4h}, [COEF_BLOCK], 32 + ld1 {v16.4h, v17.4h}, [COEF_BLOCK], 16 + /* dequantize */ + ld1 {v18.4h, v19.4h, v20.4h, v21.4h}, [DCT_TABLE], 32 + mul v4.4h, v4.4h, v18.4h + mul v5.4h, v5.4h, v19.4h + ins v4.2d[1], v5.2d[0] /* 128 bit q4 */ + ld1 {v22.4h, v23.4h, v24.4h, v25.4h}, [DCT_TABLE], 32 + mul v6.4h, v6.4h, v20.4h + mul v7.4h, v7.4h, v21.4h + ins v6.2d[1], v7.2d[0] /* 128 bit q6 */ + mul v8.4h, v8.4h, v22.4h + mul v9.4h, v9.4h, v23.4h + ins v8.2d[1], v9.2d[0] /* 128 bit q8 */ + add DCT_TABLE, DCT_TABLE, #16 + ld1 {v26.4h, v27.4h, v28.4h, v29.4h}, [DCT_TABLE], 32 + mul v10.4h, v10.4h, v24.4h + mul v11.4h, v11.4h, v25.4h + ins v10.2d[1], v11.2d[0] /* 128 bit q10 */ + mul v12.4h, v12.4h, v26.4h + mul v13.4h, v13.4h, v27.4h + ins v12.2d[1], v13.2d[0] /* 128 bit q12 */ + ld1 {v30.4h, v31.4h}, [DCT_TABLE], 16 + mul v14.4h, v14.4h, v28.4h + mul v15.4h, v15.4h, v29.4h + ins v14.2d[1], v15.2d[0] /* 128 bit q14 */ + mul v16.4h, v16.4h, v30.4h + mul v17.4h, v17.4h, v31.4h + ins v16.2d[1], v17.2d[0] /* 128 bit q16 */ + + /* Pass 1 */ + idct_helper v4.4h, v6.4h, v8.4h, v10.4h, v12.4h, v14.4h, v16.4h, 12, v4.4h, v6.4h, v8.4h, v10.4h + transpose_4x4 v4, v6, v8, v10, v3 + ins v10.2d[1], v11.2d[0] + idct_helper v5.4h, v7.4h, v9.4h, v11.4h, v13.4h, v15.4h, v17.4h, 12, v5.4h, v7.4h, v9.4h, v11.4h + transpose_4x4 v5, v7, v9, v11, v3 + ins v10.2d[1], v11.2d[0] + /* Pass 2 */ + idct_helper v4.4h, v6.4h, v8.4h, v10.4h, v7.4h, v9.4h, v11.4h, 19, v26.4h, v27.4h, v28.4h, v29.4h + transpose_4x4 v26, v27, v28, v29, v3 + + /* Range limit */ + movi v30.8h, #0x80 + ins v26.2d[1], v27.2d[0] + ins v28.2d[1], v29.2d[0] + add v26.8h, v26.8h, v30.8h + add v28.8h, v28.8h, v30.8h + sqxtun v26.8b, v26.8h + sqxtun v27.8b, v28.8h + + /* Store results to the output buffer */ + ldp TMP1, TMP2, [OUTPUT_BUF], 16 + ldp TMP3, TMP4, [OUTPUT_BUF] + add TMP1, TMP1, OUTPUT_COL + add TMP2, TMP2, OUTPUT_COL + add TMP3, TMP3, OUTPUT_COL + add TMP4, TMP4, OUTPUT_COL + +#if defined(__ARMEL__) && !RESPECT_STRICT_ALIGNMENT + /* We can use much less instructions on little endian systems if the + * OS kernel is not configured to trap unaligned memory accesses + */ + st1 {v26.s}[0], [TMP1], 4 + st1 {v27.s}[0], [TMP3], 4 + st1 {v26.s}[1], [TMP2], 4 + st1 {v27.s}[1], [TMP4], 4 +#else + st1 {v26.b}[0], [TMP1], 1 + st1 {v27.b}[0], [TMP3], 1 + st1 {v26.b}[1], [TMP1], 1 + st1 {v27.b}[1], [TMP3], 1 + st1 {v26.b}[2], [TMP1], 1 + st1 {v27.b}[2], [TMP3], 1 + st1 {v26.b}[3], [TMP1], 1 + st1 {v27.b}[3], [TMP3], 1 + + st1 {v26.b}[4], [TMP2], 1 + st1 {v27.b}[4], [TMP4], 1 + st1 {v26.b}[5], [TMP2], 1 + st1 {v27.b}[5], [TMP4], 1 + st1 {v26.b}[6], [TMP2], 1 + st1 {v27.b}[6], [TMP4], 1 + st1 {v26.b}[7], [TMP2], 1 + st1 {v27.b}[7], [TMP4], 1 +#endif + + /* vpop {v8.4h - v15.4h} ;not available */ + sub sp, sp, #272 + ldr x15, [sp], 16 + ld1 {v0.8b - v3.8b}, [sp], 32 + ld1 {v4.8b - v7.8b}, [sp], 32 + ld1 {v8.8b - v11.8b}, [sp], 32 + ld1 {v12.8b - v15.8b}, [sp], 32 + ld1 {v16.8b - v19.8b}, [sp], 32 + ld1 {v20.8b - v23.8b}, [sp], 32 + ld1 {v24.8b - v27.8b}, [sp], 32 + ld1 {v28.8b - v31.8b}, [sp], 32 + blr x30 + + .unreq DCT_TABLE + .unreq COEF_BLOCK + .unreq OUTPUT_BUF + .unreq OUTPUT_COL + .unreq TMP1 + .unreq TMP2 + .unreq TMP3 + .unreq TMP4 + +.purgem idct_helper + + +/*****************************************************************************/ + +/* + * jsimd_idct_2x2_neon + * + * This function contains inverse-DCT code for getting reduced-size + * 2x2 pixels output from an 8x8 DCT block. It uses the same calculations + * and produces exactly the same output as IJG's original 'jpeg_idct_2x2' + * function from jpeg-6b (jidctred.c). + * + * NOTE: jpeg-8 has an improved implementation of 2x2 inverse-DCT, which + * requires much less arithmetic operations and hence should be faster. + * The primary purpose of this particular NEON optimized function is + * bit exact compatibility with jpeg-6b. + */ + +.balign 8 +jsimd_idct_2x2_neon_consts: + .short -FIX_0_720959822 /* v14[0] */ + .short FIX_0_850430095 /* v14[1] */ + .short -FIX_1_272758580 /* v14[2] */ + .short FIX_3_624509785 /* v14[3] */ + +.macro idct_helper x4, x6, x10, x12, x16, shift, y26, y27 + sshll v15.4s, \x4, #15 + smull v26.4s, \x6, v14.4h[3] + smlal v26.4s, \x10, v14.4h[2] + smlal v26.4s, \x12, v14.4h[1] + smlal v26.4s, \x16, v14.4h[0] + + add v20.4s, v15.4s, v26.4s + sub v15.4s, v15.4s, v26.4s + +.if \shift > 16 + srshr v20.4s, v20.4s, #\shift + srshr v15.4s, v15.4s, #\shift + xtn \y26, v20.4s + xtn \y27, v15.4s +.else + rshrn \y26, v20.4s, #\shift + rshrn \y27, v15.4s, #\shift +.endif + +.endm + +asm_function jsimd_idct_2x2_neon + + DCT_TABLE .req x0 + COEF_BLOCK .req x1 + OUTPUT_BUF .req x2 + OUTPUT_COL .req x3 + TMP1 .req x0 + TMP2 .req x15 + + /* vpush {v8.4h - v15.4h} ; not available */ + sub sp, sp, 208 + str x15, [sp], 16 + + /* Load constants */ + adr TMP2, jsimd_idct_2x2_neon_consts + st1 {v4.8b - v7.8b}, [sp], 32 + st1 {v8.8b - v11.8b}, [sp], 32 + st1 {v12.8b - v15.8b}, [sp], 32 + st1 {v16.8b - v19.8b}, [sp], 32 + st1 {v21.8b - v22.8b}, [sp], 16 + st1 {v24.8b - v27.8b}, [sp], 32 + st1 {v30.8b - v31.8b}, [sp], 16 + ld1 {v14.4h}, [TMP2] + + /* Load all COEF_BLOCK into NEON registers with the following allocation: + * 0 1 2 3 | 4 5 6 7 + * ---------+-------- + * 0 | v4.4h | v5.4h + * 1 | v6.4h | v7.4h + * 2 | - | - + * 3 | v10.4h | v11.4h + * 4 | - | - + * 5 | v12.4h | v13.4h + * 6 | - | - + * 7 | v16.4h | v17.4h + */ + ld1 {v4.4h, v5.4h, v6.4h, v7.4h}, [COEF_BLOCK], 32 + add COEF_BLOCK, COEF_BLOCK, #16 + ld1 {v10.4h, v11.4h}, [COEF_BLOCK], 16 + add COEF_BLOCK, COEF_BLOCK, #16 + ld1 {v12.4h, v13.4h}, [COEF_BLOCK], 16 + add COEF_BLOCK, COEF_BLOCK, #16 + ld1 {v16.4h, v17.4h}, [COEF_BLOCK], 16 + /* Dequantize */ + ld1 {v18.4h, v19.4h, v20.4h, v21.4h}, [DCT_TABLE], 32 + mul v4.4h, v4.4h, v18.4h + mul v5.4h, v5.4h, v19.4h + ins v4.2d[1], v5.2d[0] + mul v6.4h, v6.4h, v20.4h + mul v7.4h, v7.4h, v21.4h + ins v6.2d[1], v7.2d[0] + add DCT_TABLE, DCT_TABLE, #16 + ld1 {v24.4h, v25.4h}, [DCT_TABLE], 16 + mul v10.4h, v10.4h, v24.4h + mul v11.4h, v11.4h, v25.4h + ins v10.2d[1], v11.2d[0] + add DCT_TABLE, DCT_TABLE, #16 + ld1 {v26.4h, v27.4h}, [DCT_TABLE], 16 + mul v12.4h, v12.4h, v26.4h + mul v13.4h, v13.4h, v27.4h + ins v12.2d[1], v13.2d[0] + add DCT_TABLE, DCT_TABLE, #16 + ld1 {v30.4h, v31.4h}, [DCT_TABLE], 16 + mul v16.4h, v16.4h, v30.4h + mul v17.4h, v17.4h, v31.4h + ins v16.2d[1], v17.2d[0] + + /* Pass 1 */ +#if 0 + idct_helper v4.4h, v6.4h, v10.4h, v12.4h, v16.4h, 13, v4.4h, v6.4h + transpose_4x4 v4.4h, v6.4h, v8.4h, v10.4h + idct_helper v5.4h, v7.4h, v11.4h, v13.4h, v17.4h, 13, v5.4h, v7.4h + transpose_4x4 v5.4h, v7.4h, v9.4h, v11.4h +#else + smull v26.4s, v6.4h, v14.4h[3] + smlal v26.4s, v10.4h, v14.4h[2] + smlal v26.4s, v12.4h, v14.4h[1] + smlal v26.4s, v16.4h, v14.4h[0] + smull v24.4s, v7.4h, v14.4h[3] + smlal v24.4s, v11.4h, v14.4h[2] + smlal v24.4s, v13.4h, v14.4h[1] + smlal v24.4s, v17.4h, v14.4h[0] + sshll v15.4s, v4.4h, #15 + sshll v30.4s, v5.4h, #15 + add v20.4s, v15.4s, v26.4s + sub v15.4s, v15.4s, v26.4s + rshrn v4.4h, v20.4s, #13 + rshrn v6.4h, v15.4s, #13 + add v20.4s, v30.4s, v24.4s + sub v15.4s, v30.4s, v24.4s + rshrn v5.4h, v20.4s, #13 + rshrn v7.4h, v15.4s, #13 + ins v4.2d[1], v5.2d[0] + ins v6.2d[1], v7.2d[0] + transpose v4, v6, v3, .16b, .8h + transpose v6, v10, v3, .16b, .4s + ins v11.2d[0], v10.2d[1] + ins v7.2d[0], v6.2d[1] +#endif + + /* Pass 2 */ + idct_helper v4.4h, v6.4h, v10.4h, v7.4h, v11.4h, 20, v26.4h, v27.4h + + /* Range limit */ + movi v30.8h, #0x80 + ins v26.2d[1], v27.2d[0] + add v26.8h, v26.8h, v30.8h + sqxtun v30.8b, v26.8h + ins v26.2d[0], v30.2d[0] + sqxtun v27.8b, v26.8h + + /* Store results to the output buffer */ + ldp TMP1, TMP2, [OUTPUT_BUF] + add TMP1, TMP1, OUTPUT_COL + add TMP2, TMP2, OUTPUT_COL + + st1 {v26.b}[0], [TMP1], 1 + st1 {v27.b}[4], [TMP1], 1 + st1 {v26.b}[1], [TMP2], 1 + st1 {v27.b}[5], [TMP2], 1 + + sub sp, sp, #208 + ldr x15, [sp], 16 + ld1 {v4.8b - v7.8b}, [sp], 32 + ld1 {v8.8b - v11.8b}, [sp], 32 + ld1 {v12.8b - v15.8b}, [sp], 32 + ld1 {v16.8b - v19.8b}, [sp], 32 + ld1 {v21.8b - v22.8b}, [sp], 16 + ld1 {v24.8b - v27.8b}, [sp], 32 + ld1 {v30.8b - v31.8b}, [sp], 16 + blr x30 + + .unreq DCT_TABLE + .unreq COEF_BLOCK + .unreq OUTPUT_BUF + .unreq OUTPUT_COL + .unreq TMP1 + .unreq TMP2 + +.purgem idct_helper + + +/*****************************************************************************/ + +/* + * jsimd_ycc_extrgb_convert_neon + * jsimd_ycc_extbgr_convert_neon + * jsimd_ycc_extrgbx_convert_neon + * jsimd_ycc_extbgrx_convert_neon + * jsimd_ycc_extxbgr_convert_neon + * jsimd_ycc_extxrgb_convert_neon + * + * Colorspace conversion YCbCr -> RGB + */ + + +.macro do_load size + .if \size == 8 + ld1 {v4.8b}, [U], 8 + ld1 {v5.8b}, [V], 8 + ld1 {v0.8b}, [Y], 8 + prfm PLDL1KEEP, [U, #64] + prfm PLDL1KEEP, [V, #64] + prfm PLDL1KEEP, [Y, #64] + .elseif \size == 4 + ld1 {v4.b}[0], [U], 1 + ld1 {v4.b}[1], [U], 1 + ld1 {v4.b}[2], [U], 1 + ld1 {v4.b}[3], [U], 1 + ld1 {v5.b}[0], [V], 1 + ld1 {v5.b}[1], [V], 1 + ld1 {v5.b}[2], [V], 1 + ld1 {v5.b}[3], [V], 1 + ld1 {v0.b}[0], [Y], 1 + ld1 {v0.b}[1], [Y], 1 + ld1 {v0.b}[2], [Y], 1 + ld1 {v0.b}[3], [Y], 1 + .elseif \size == 2 + ld1 {v4.b}[4], [U], 1 + ld1 {v4.b}[5], [U], 1 + ld1 {v5.b}[4], [V], 1 + ld1 {v5.b}[5], [V], 1 + ld1 {v0.b}[4], [Y], 1 + ld1 {v0.b}[5], [Y], 1 + .elseif \size == 1 + ld1 {v4.b}[6], [U], 1 + ld1 {v5.b}[6], [V], 1 + ld1 {v0.b}[6], [Y], 1 + .else + .error unsupported macroblock size + .endif +.endm + +.macro do_store bpp, size + .if \bpp == 24 + .if \size == 8 + st3 {v10.8b, v11.8b, v12.8b}, [RGB], 24 + .elseif \size == 4 + st3 {v10.b, v11.b, v12.b}[0], [RGB], 3 + st3 {v10.b, v11.b, v12.b}[1], [RGB], 3 + st3 {v10.b, v11.b, v12.b}[2], [RGB], 3 + st3 {v10.b, v11.b, v12.b}[3], [RGB], 3 + .elseif \size == 2 + st3 {v10.b, v11.b, v12.b}[4], [RGB], 3 + st3 {v10.b, v11.b, v12.b}[5], [RGB], 3 + .elseif \size == 1 + st3 {v10.b, v11.b, v12.b}[6], [RGB], 3 + .else + .error unsupported macroblock size + .endif + .elseif \bpp == 32 + .if \size == 8 + st4 {v10.8b, v11.8b, v12.8b, v13.8b}, [RGB], 32 + .elseif \size == 4 + st4 {v10.b, v11.b, v12.b, v13.b}[0], [RGB], 4 + st4 {v10.b, v11.b, v12.b, v13.b}[1], [RGB], 4 + st4 {v10.b, v11.b, v12.b, v13.b}[2], [RGB], 4 + st4 {v10.b, v11.b, v12.b, v13.b}[3], [RGB], 4 + .elseif \size == 2 + st4 {v10.b, v11.b, v12.b, v13.b}[4], [RGB], 4 + st4 {v10.b, v11.b, v12.b, v13.b}[5], [RGB], 4 + .elseif \size == 1 + st4 {v10.b, v11.b, v12.b, v13.b}[6], [RGB], 4 + .else + .error unsupported macroblock size + .endif + .elseif \bpp==16 + .if \size == 8 + st1 {v25.8h}, [RGB],16 + .elseif \size == 4 + st1 {v25.4h}, [RGB],8 + .elseif \size == 2 + st1 {v25.h}[4], [RGB],2 + st1 {v25.h}[5], [RGB],2 + .elseif \size == 1 + st1 {v25.h}[6], [RGB],2 + .else + .error unsupported macroblock size + .endif + .else + .error unsupported bpp + .endif +.endm + +.macro generate_jsimd_ycc_rgb_convert_neon colorid, bpp, r_offs, rsize, g_offs, gsize, b_offs, bsize, defsize + +/* + * 2-stage pipelined YCbCr->RGB conversion + */ + +.macro do_yuv_to_rgb_stage1 + uaddw v6.8h, v2.8h, v4.8b /* q3 = u - 128 */ + uaddw v8.8h, v2.8h, v5.8b /* q2 = v - 128 */ + smull v20.4s, v6.4h, v1.4h[1] /* multiply by -11277 */ + smlal v20.4s, v8.4h, v1.4h[2] /* multiply by -23401 */ + smull2 v22.4s, v6.8h, v1.4h[1] /* multiply by -11277 */ + smlal2 v22.4s, v8.8h, v1.4h[2] /* multiply by -23401 */ + smull v24.4s, v8.4h, v1.4h[0] /* multiply by 22971 */ + smull2 v26.4s, v8.8h, v1.4h[0] /* multiply by 22971 */ + smull v28.4s, v6.4h, v1.4h[3] /* multiply by 29033 */ + smull2 v30.4s, v6.8h, v1.4h[3] /* multiply by 29033 */ +.endm + +.macro do_yuv_to_rgb_stage2 + rshrn v20.4h, v20.4s, #15 + rshrn2 v20.8h, v22.4s, #15 + rshrn v24.4h, v24.4s, #14 + rshrn2 v24.8h, v26.4s, #14 + rshrn v28.4h, v28.4s, #14 + rshrn2 v28.8h, v30.4s, #14 + uaddw v20.8h, v20.8h, v0.8b + uaddw v24.8h, v24.8h, v0.8b + uaddw v28.8h, v28.8h, v0.8b +.if \bpp != 16 + sqxtun v1\g_offs\defsize, v20.8h + sqxtun v1\r_offs\defsize, v24.8h + sqxtun v1\b_offs\defsize, v28.8h +.else + sqshlu v21.8h, v20.8h, #8 + sqshlu v25.8h, v24.8h, #8 + sqshlu v29.8h, v28.8h, #8 + sri v25.8h, v21.8h, #5 + sri v25.8h, v29.8h, #11 +.endif + +.endm + +.macro do_yuv_to_rgb_stage2_store_load_stage1 + rshrn v20.4h, v20.4s, #15 + rshrn v24.4h, v24.4s, #14 + rshrn v28.4h, v28.4s, #14 + ld1 {v4.8b}, [U], 8 + rshrn2 v20.8h, v22.4s, #15 + rshrn2 v24.8h, v26.4s, #14 + rshrn2 v28.8h, v30.4s, #14 + ld1 {v5.8b}, [V], 8 + uaddw v20.8h, v20.8h, v0.8b + uaddw v24.8h, v24.8h, v0.8b + uaddw v28.8h, v28.8h, v0.8b +.if \bpp != 16 /**************** rgb24/rgb32 *********************************/ + sqxtun v1\g_offs\defsize, v20.8h + ld1 {v0.8b}, [Y], 8 + sqxtun v1\r_offs\defsize, v24.8h + prfm PLDL1KEEP, [U, #64] + prfm PLDL1KEEP, [V, #64] + prfm PLDL1KEEP, [Y, #64] + sqxtun v1\b_offs\defsize, v28.8h + uaddw v6.8h, v2.8h, v4.8b /* v6.16b = u - 128 */ + uaddw v8.8h, v2.8h, v5.8b /* q2 = v - 128 */ + smull v20.4s, v6.4h, v1.4h[1] /* multiply by -11277 */ + smlal v20.4s, v8.4h, v1.4h[2] /* multiply by -23401 */ + smull2 v22.4s, v6.8h, v1.4h[1] /* multiply by -11277 */ + smlal2 v22.4s, v8.8h, v1.4h[2] /* multiply by -23401 */ + smull v24.4s, v8.4h, v1.4h[0] /* multiply by 22971 */ + smull2 v26.4s, v8.8h, v1.4h[0] /* multiply by 22971 */ +.else /**************************** rgb565 ***********************************/ + sqshlu v21.8h, v20.8h, #8 + sqshlu v25.8h, v24.8h, #8 + sqshlu v29.8h, v28.8h, #8 + uaddw v6.8h, v2.8h, v4.8b /* v6.16b = u - 128 */ + uaddw v8.8h, v2.8h, v5.8b /* q2 = v - 128 */ + ld1 {v0.8b}, [Y], 8 + smull v20.4s, v6.4h, v1.4h[1] /* multiply by -11277 */ + smlal v20.4s, v8.4h, v1.4h[2] /* multiply by -23401 */ + smull2 v22.4s, v6.8h, v1.4h[1] /* multiply by -11277 */ + smlal2 v22.4s, v8.8h, v1.4h[2] /* multiply by -23401 */ + sri v25.8h, v21.8h, #5 + smull v24.4s, v8.4h, v1.4h[0] /* multiply by 22971 */ + smull2 v26.4s, v8.8h, v1.4h[0] /* multiply by 22971 */ + prfm PLDL1KEEP, [U, #64] + prfm PLDL1KEEP, [V, #64] + prfm PLDL1KEEP, [Y, #64] + sri v25.8h, v29.8h, #11 +.endif + do_store \bpp, 8 + smull v28.4s, v6.4h, v1.4h[3] /* multiply by 29033 */ + smull2 v30.4s, v6.8h, v1.4h[3] /* multiply by 29033 */ +.endm + +.macro do_yuv_to_rgb + do_yuv_to_rgb_stage1 + do_yuv_to_rgb_stage2 +.endm + +/* Apple gas crashes on adrl, work around that by using adr. + * But this requires a copy of these constants for each function. + */ + +.balign 16 +jsimd_ycc_\colorid\()_neon_consts: + .short 0, 0, 0, 0 + .short 22971, -11277, -23401, 29033 + .short -128, -128, -128, -128 + .short -128, -128, -128, -128 + +asm_function jsimd_ycc_\colorid\()_convert_neon + OUTPUT_WIDTH .req x0 + INPUT_BUF .req x1 + INPUT_ROW .req x2 + OUTPUT_BUF .req x3 + NUM_ROWS .req x4 + + INPUT_BUF0 .req x5 + INPUT_BUF1 .req x6 + INPUT_BUF2 .req INPUT_BUF + + RGB .req x7 + Y .req x8 + U .req x9 + V .req x10 + N .req x15 + + sub sp, sp, 336 + str x15, [sp], 16 + /* Load constants to d1, d2, d3 (v0.4h is just used for padding) */ + adr x15, jsimd_ycc_\colorid\()_neon_consts + /* Save NEON registers */ + st1 {v0.8b - v3.8b}, [sp], 32 + st1 {v4.8b - v7.8b}, [sp], 32 + st1 {v8.8b - v11.8b}, [sp], 32 + st1 {v12.8b - v15.8b}, [sp], 32 + st1 {v16.8b - v19.8b}, [sp], 32 + st1 {v20.8b - v23.8b}, [sp], 32 + st1 {v24.8b - v27.8b}, [sp], 32 + st1 {v28.8b - v31.8b}, [sp], 32 + ld1 {v0.4h, v1.4h}, [x15], 16 + ld1 {v2.8h}, [x15] + + /* Save ARM registers and handle input arguments */ + /* push {x4, x5, x6, x7, x8, x9, x10, x30} */ + stp x4, x5, [sp], 16 + stp x6, x7, [sp], 16 + stp x8, x9, [sp], 16 + stp x10, x30, [sp], 16 + ldr INPUT_BUF0, [INPUT_BUF] + ldr INPUT_BUF1, [INPUT_BUF, 8] + ldr INPUT_BUF2, [INPUT_BUF, 16] + .unreq INPUT_BUF + + /* Initially set v10, v11.4h, v12.8b, d13 to 0xFF */ + movi v10.16b, #255 + movi v13.16b, #255 + + /* Outer loop over scanlines */ + cmp NUM_ROWS, #1 + blt 9f +0: + lsl x16, INPUT_ROW, #3 + ldr Y, [INPUT_BUF0, x16] + ldr U, [INPUT_BUF1, x16] + mov N, OUTPUT_WIDTH + ldr V, [INPUT_BUF2, x16] + add INPUT_ROW, INPUT_ROW, #1 + ldr RGB, [OUTPUT_BUF], #8 + + /* Inner loop over pixels */ + subs N, N, #8 + blt 3f + do_load 8 + do_yuv_to_rgb_stage1 + subs N, N, #8 + blt 2f +1: + do_yuv_to_rgb_stage2_store_load_stage1 + subs N, N, #8 + bge 1b +2: + do_yuv_to_rgb_stage2 + do_store \bpp, 8 + tst N, #7 + beq 8f +3: + tst N, #4 + beq 3f + do_load 4 +3: + tst N, #2 + beq 4f + do_load 2 +4: + tst N, #1 + beq 5f + do_load 1 +5: + do_yuv_to_rgb + tst N, #4 + beq 6f + do_store \bpp, 4 +6: + tst N, #2 + beq 7f + do_store \bpp, 2 +7: + tst N, #1 + beq 8f + do_store \bpp, 1 +8: + subs NUM_ROWS, NUM_ROWS, #1 + bgt 0b +9: + /* Restore all registers and return */ + sub sp, sp, #336 + ldr x15, [sp], 16 + ld1 {v0.8b - v3.8b}, [sp], 32 + ld1 {v4.8b - v7.8b}, [sp], 32 + ld1 {v8.8b - v11.8b}, [sp], 32 + ld1 {v12.8b - v15.8b}, [sp], 32 + ld1 {v16.8b - v19.8b}, [sp], 32 + ld1 {v20.8b - v23.8b}, [sp], 32 + ld1 {v24.8b - v27.8b}, [sp], 32 + ld1 {v28.8b - v31.8b}, [sp], 32 + /* pop {r4, r5, r6, r7, r8, r9, r10, pc} */ + ldp x4, x5, [sp], 16 + ldp x6, x7, [sp], 16 + ldp x8, x9, [sp], 16 + ldp x10, x30, [sp], 16 + br x30 + .unreq OUTPUT_WIDTH + .unreq INPUT_ROW + .unreq OUTPUT_BUF + .unreq NUM_ROWS + .unreq INPUT_BUF0 + .unreq INPUT_BUF1 + .unreq INPUT_BUF2 + .unreq RGB + .unreq Y + .unreq U + .unreq V + .unreq N + +.purgem do_yuv_to_rgb +.purgem do_yuv_to_rgb_stage1 +.purgem do_yuv_to_rgb_stage2 +.purgem do_yuv_to_rgb_stage2_store_load_stage1 +.endm + +/*--------------------------------- id ----- bpp R rsize G gsize B bsize defsize */ +generate_jsimd_ycc_rgb_convert_neon extrgb, 24, 0, .4h, 1, .4h, 2, .4h, .8b +generate_jsimd_ycc_rgb_convert_neon extbgr, 24, 2, .4h, 1, .4h, 0, .4h, .8b +generate_jsimd_ycc_rgb_convert_neon extrgbx, 32, 0, .4h, 1, .4h, 2, .4h, .8b +generate_jsimd_ycc_rgb_convert_neon extbgrx, 32, 2, .4h, 1, .4h, 0, .4h, .8b +generate_jsimd_ycc_rgb_convert_neon extxbgr, 32, 3, .4h, 2, .4h, 1, .4h, .8b +generate_jsimd_ycc_rgb_convert_neon extxrgb, 32, 1, .4h, 2, .4h, 3, .4h, .8b +generate_jsimd_ycc_rgb_convert_neon rgb565, 16, 0, .4h, 0, .4h, 0, .4h, .8b +.purgem do_load +.purgem do_store