OpenJPH
Open-source implementation of JPEG2000 Part-15
ojph_transform_avx2.cpp
Go to the documentation of this file.
1//***************************************************************************/
2// This software is released under the 2-Clause BSD license, included
3// below.
4//
5// Copyright (c) 2019, Aous Naman
6// Copyright (c) 2019, Kakadu Software Pty Ltd, Australia
7// Copyright (c) 2019, The University of New South Wales, Australia
8//
9// Redistribution and use in source and binary forms, with or without
10// modification, are permitted provided that the following conditions are
11// met:
12//
13// 1. Redistributions of source code must retain the above copyright
14// notice, this list of conditions and the following disclaimer.
15//
16// 2. Redistributions in binary form must reproduce the above copyright
17// notice, this list of conditions and the following disclaimer in the
18// documentation and/or other materials provided with the distribution.
19//
20// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
21// IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
22// TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
23// PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
24// HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
25// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
26// TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
27// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
28// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
29// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
30// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31//***************************************************************************/
32// This file is part of the OpenJPH software implementation.
33// File: ojph_transform_avx2.cpp
34// Author: Aous Naman
35// Date: 28 August 2019
36//***************************************************************************/
37
38#include <cstdio>
39
40#include "ojph_defs.h"
41#include "ojph_arch.h"
42#include "ojph_mem.h"
43#include "ojph_transform.h"
45
46#include <immintrin.h>
47
48namespace ojph {
49 namespace local {
50
53 const line_buf* line_src2,
54 line_buf *line_dst, ui32 repeat)
55 {
56 si32 *dst = line_dst->i32;
57 const si32 *src1 = line_src1->i32, *src2 = line_src2->i32;
58
59 for (ui32 i = (repeat + 7) >> 3; i > 0; --i, dst+=8, src1+=8, src2+=8)
60 {
61 __m256i s1 = _mm256_load_si256((__m256i*)src1);
62 __m256i s2 = _mm256_load_si256((__m256i*)src2);
63 __m256i d = _mm256_load_si256((__m256i*)dst);
64 s1 = _mm256_srai_epi32(_mm256_add_epi32(s1, s2), 1);
65 d = _mm256_sub_epi32(d, s1);
66 _mm256_store_si256((__m256i*)dst, d);
67 }
68 }
69
72 const line_buf* line_src2,
73 line_buf *line_dst, ui32 repeat)
74 {
75 si32 *dst = line_dst->i32;
76 const si32 *src1 = line_src1->i32, *src2 = line_src2->i32;
77
78 __m256i offset = _mm256_set1_epi32(2);
79 for (ui32 i = (repeat + 7) >> 3; i > 0; --i, dst+=8, src1+=8, src2+=8)
80 {
81 __m256i s1 = _mm256_load_si256((__m256i*)src1);
82 s1 = _mm256_add_epi32(s1, offset);
83 __m256i s2 = _mm256_load_si256((__m256i*)src2);
84 s2 = _mm256_add_epi32(s2, s1);
85 __m256i d = _mm256_load_si256((__m256i*)dst);
86 d = _mm256_add_epi32(d, _mm256_srai_epi32(s2, 2));
87 _mm256_store_si256((__m256i*)dst, d);
88 }
89 }
90
92 void avx2_rev_horz_wvlt_fwd_tx(line_buf* line_src, line_buf *line_ldst,
93 line_buf *line_hdst,ui32 width, bool even)
94 {
95 if (width > 1)
96 {
97 si32 *src = line_src->i32;
98 si32 *ldst = line_ldst->i32, *hdst = line_hdst->i32;
99
100 const ui32 L_width = (width + (even ? 1 : 0)) >> 1;
101 const ui32 H_width = (width + (even ? 0 : 1)) >> 1;
102
103 // extension
104 src[-1] = src[1];
105 src[width] = src[width-2];
106 // predict
107 const si32* sp = src + (even ? 1 : 0);
108 si32 *dph = hdst;
109 const __m256i mask = _mm256_setr_epi32(0, 2, 4, 6, 1, 3, 5, 7);
110 for (ui32 i = (H_width + 7) >> 3; i > 0; --i, dph+=8)
111 { //this is doing twice the work it needs to do
112 //it can be definitely written better
113 __m256i s1 = _mm256_loadu_si256((__m256i*)(sp-1));
114 __m256i s2 = _mm256_loadu_si256((__m256i*)(sp+1));
115 __m256i d = _mm256_loadu_si256((__m256i*)sp);
116 s1 = _mm256_srai_epi32(_mm256_add_epi32(s1, s2), 1);
117 __m256i d1 = _mm256_sub_epi32(d, s1);
118 sp += 8;
119 s1 = _mm256_loadu_si256((__m256i*)(sp-1));
120 s2 = _mm256_loadu_si256((__m256i*)(sp+1));
121 d = _mm256_loadu_si256((__m256i*)sp);
122 s1 = _mm256_srai_epi32(_mm256_add_epi32(s1, s2), 1);
123 __m256i d2 = _mm256_sub_epi32(d, s1);
124 sp += 8;
125 d1 = _mm256_permutevar8x32_epi32(d1, mask);
126 d2 = _mm256_permutevar8x32_epi32(d2, mask);
127 d = _mm256_permute2x128_si256(d1, d2, (2 << 4) | 0);
128 _mm256_store_si256((__m256i*)dph, d);
129 }
130
131 // extension
132 hdst[-1] = hdst[0];
133 hdst[H_width] = hdst[H_width-1];
134 // update
135 sp = src + (even ? 0 : 1);
136 const si32* sph = hdst + (even ? 0 : 1);
137 si32 *dpl = ldst;
138 __m256i offset = _mm256_set1_epi32(2);
139 for (ui32 i = (L_width + 7) >> 3; i > 0; --i, sp+=16, sph+=8, dpl+=8)
140 {
141 __m256i s1 = _mm256_loadu_si256((__m256i*)(sph-1));
142 s1 = _mm256_add_epi32(s1, offset);
143 __m256i s2 = _mm256_loadu_si256((__m256i*)sph);
144 s2 = _mm256_add_epi32(s2, s1);
145 __m256i d1 = _mm256_loadu_si256((__m256i*)sp);
146 __m256i d2 = _mm256_loadu_si256((__m256i*)sp + 1);
147 d1 = _mm256_permutevar8x32_epi32(d1, mask);
148 d2 = _mm256_permutevar8x32_epi32(d2, mask);
149 __m256i d = _mm256_permute2x128_si256(d1, d2, (2 << 4) | 0);
150 d = _mm256_add_epi32(d, _mm256_srai_epi32(s2, 2));
151 _mm256_store_si256((__m256i*)dpl, d);
152 }
153 }
154 else
155 {
156 if (even)
157 line_ldst->i32[0] = line_src->i32[0];
158 else
159 line_hdst->i32[0] = line_src->i32[0] << 1;
160 }
161 }
162
165 const line_buf* line_src2,
166 line_buf *line_dst, ui32 repeat)
167 {
168 si32 *dst = line_dst->i32;
169 const si32 *src1 = line_src1->i32, *src2 = line_src2->i32;
170
171 for (ui32 i = (repeat + 7) >> 3; i > 0; --i, dst+=8, src1+=8, src2+=8)
172 {
173 __m256i s1 = _mm256_load_si256((__m256i*)src1);
174 __m256i s2 = _mm256_load_si256((__m256i*)src2);
175 __m256i d = _mm256_load_si256((__m256i*)dst);
176 s1 = _mm256_srai_epi32(_mm256_add_epi32(s1, s2), 1);
177 d = _mm256_add_epi32(d, s1);
178 _mm256_store_si256((__m256i*)dst, d);
179 }
180 }
181
184 const line_buf* line_src2,
185 line_buf *line_dst, ui32 repeat)
186 {
187 si32 *dst = line_dst->i32;
188 const si32 *src1 = line_src1->i32, *src2 = line_src2->i32;
189
190 __m256i offset = _mm256_set1_epi32(2);
191 for (ui32 i = (repeat + 7) >> 3; i > 0; --i, dst+=8, src1+=8, src2+=8)
192 {
193 __m256i s1 = _mm256_load_si256((__m256i*)src1);
194 s1 = _mm256_add_epi32(s1, offset);
195 __m256i s2 = _mm256_load_si256((__m256i*)src2);
196 s2 = _mm256_add_epi32(s2, s1);
197 __m256i d = _mm256_load_si256((__m256i*)dst);
198 d = _mm256_sub_epi32(d, _mm256_srai_epi32(s2, 2));
199 _mm256_store_si256((__m256i*)dst, d);
200 }
201 }
202
204 void avx2_rev_horz_wvlt_bwd_tx(line_buf* line_dst, line_buf *line_lsrc,
205 line_buf *line_hsrc, ui32 width, bool even)
206 {
207 if (width > 1)
208 {
209 si32 *lsrc = line_lsrc->i32, *hsrc = line_hsrc->i32;
210 si32 *dst = line_dst->i32;
211
212 const ui32 L_width = (width + (even ? 1 : 0)) >> 1;
213 const ui32 H_width = (width + (even ? 0 : 1)) >> 1;
214
215 // extension
216 hsrc[-1] = hsrc[0];
217 hsrc[H_width] = hsrc[H_width-1];
218 //inverse update
219 const si32 *sph = hsrc + (even ? 0 : 1);
220 si32 *spl = lsrc;
221 __m256i offset = _mm256_set1_epi32(2);
222 for (ui32 i = (L_width + 7) >> 3; i > 0; --i, sph+=8, spl+=8)
223 {
224 __m256i s1 = _mm256_loadu_si256((__m256i*)(sph-1));
225 s1 = _mm256_add_epi32(s1, offset);
226 __m256i s2 = _mm256_loadu_si256((__m256i*)sph);
227 s2 = _mm256_add_epi32(s2, s1);
228 __m256i d = _mm256_load_si256((__m256i*)spl);
229 d = _mm256_sub_epi32(d, _mm256_srai_epi32(s2, 2));
230 _mm256_store_si256((__m256i*)spl, d);
231 }
232
233 // extension
234 lsrc[-1] = lsrc[0];
235 lsrc[L_width] = lsrc[L_width - 1];
236 // inverse predict and combine
237 si32 *dp = dst + (even ? 0 : -1);
238 spl = lsrc + (even ? 0 : -1);
239 sph = hsrc;
240 ui32 width = L_width + (even ? 0 : 1);
241 for (ui32 i = (width + 7) >> 3; i > 0; --i, sph+=8, spl+=8, dp+=16)
242 {
243 __m256i s1 = _mm256_loadu_si256((__m256i*)spl);
244 __m256i s2 = _mm256_loadu_si256((__m256i*)(spl+1));
245 __m256i d = _mm256_load_si256((__m256i*)sph);
246 s2 = _mm256_srai_epi32(_mm256_add_epi32(s1, s2), 1);
247 d = _mm256_add_epi32(d, s2);
248 s2 = _mm256_unpackhi_epi32(s1, d);
249 s1 = _mm256_unpacklo_epi32(s1, d);
250 d = _mm256_permute2x128_si256(s1, s2, (2 << 4) | 0);
251 _mm256_storeu_si256((__m256i*)dp, d);
252 d = _mm256_permute2x128_si256(s1, s2, (3 << 4) | 1);
253 _mm256_storeu_si256((__m256i*)dp + 1, d);
254 }
255 }
256 else
257 {
258 if (even)
259 line_dst->i32[0] = line_lsrc->i32[0];
260 else
261 line_dst->i32[0] = line_hsrc->i32[0] >> 1;
262 }
263 }
264 }
265}
void avx2_rev_vert_wvlt_fwd_update(const line_buf *line_src1, const line_buf *line_src2, line_buf *line_dst, ui32 repeat)
void avx2_rev_horz_wvlt_fwd_tx(line_buf *line_src, line_buf *line_ldst, line_buf *line_hdst, ui32 width, bool even)
void avx2_rev_vert_wvlt_fwd_predict(const line_buf *line_src1, const line_buf *line_src2, line_buf *line_dst, ui32 repeat)
void avx2_rev_vert_wvlt_bwd_predict(const line_buf *line_src1, const line_buf *line_src2, line_buf *line_dst, ui32 repeat)
void avx2_rev_horz_wvlt_bwd_tx(line_buf *line_dst, line_buf *line_lsrc, line_buf *line_hsrc, ui32 width, bool even)
void avx2_rev_vert_wvlt_bwd_update(const line_buf *line_src1, const line_buf *line_src2, line_buf *line_dst, ui32 repeat)
int32_t si32
Definition: ojph_defs.h:55
uint32_t ui32
Definition: ojph_defs.h:54
si32 * i32
Definition: ojph_mem.h:155