iLab Neuromorphic Robotics Toolkit  0.1
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Groups Pages
MgzJVideoReaderHelpers.H
Go to the documentation of this file.
1 /*! @file
2  @author Randolph Voorhies (voorhies@usc.edu)
3  @copyright GNU Public License (GPL v3)
4  @section License
5  @verbatim
6  // ////////////////////////////////////////////////////////////////////////
7  // The iLab Neuromorphic Robotics Toolkit (NRT) //
8  // Copyright 2010-2012 by the University of Southern California (USC) //
9  // and the iLab at USC. //
10  // //
11  // iLab - University of Southern California //
12  // Hedco Neurociences Building, Room HNB-10 //
13  // Los Angeles, Ca 90089-2520 - USA //
14  // //
15  // See http://ilab.usc.edu for information about this project. //
16  // ////////////////////////////////////////////////////////////////////////
17  // This file is part of The iLab Neuromorphic Robotics Toolkit. //
18  // //
19  // The iLab Neuromorphic Robotics Toolkit is free software: you can //
20  // redistribute it and/or modify it under the terms of the GNU General //
21  // Public License as published by the Free Software Foundation, either //
22  // version 3 of the License, or (at your option) any later version. //
23  // //
24  // The iLab Neuromorphic Robotics Toolkit is distributed in the hope //
25  // that it will be useful, but WITHOUT ANY WARRANTY; without even the //
26  // implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR //
27  // PURPOSE. See the GNU General Public License for more details. //
28  // //
29  // You should have received a copy of the GNU General Public License //
30  // along with The iLab Neuromorphic Robotics Toolkit. If not, see //
31  // <http://www.gnu.org/licenses/>. //
32  // ////////////////////////////////////////////////////////////////////////
33  @endverbatim */
34 
35 
36 #ifndef INCLUDE_NRT_IMAGEPROC_IO_IMAGESOURCE_VIDEOREADERS_DETAILS_MGZJVIDEOREADERHELPERS_H
37 #define INCLUDE_NRT_IMAGEPROC_IO_IMAGESOURCE_VIDEOREADERS_DETAILS_MGZJVIDEOREADERHELPERS_H
38 
39 #include <nrt/config.h>
41 
42 namespace nrt
43 {
44  namespace mgzj
45  {
46  struct JournalEntry
47  {
48  uint64 start_byte;
49  uint64 end_byte;
50  byte pix_type;
51  uint32 width;
52  uint32 height;
53  uint32 flags;
54  byte byte_swap;
55  };
56 
57  // FIXME: will need to better sync this up with GenericImage
58  // DO not change this, as these numbers are based on legacy code from the INVT
59  enum NativeType
60  {
61  NONE = 0,
62  RGB_U8 = 1,
63  GRAY_U8 = 2,
64  GRAY_F32 = 3,
65  VIDEO = 4, //TODO change me to whatever you want
66  RGB_F32 = 5,
67  HSV_U8 = 9,
68  };
69 
70  NativeType getNativeType(GenericImage img);
71 
72  /// TEMP class for the color conversions for now. TODO: this will need to move to videoFrame
74  {
75  public:
77  {
78  BITS_OUT = 16;
79  VIDEOYUV_Y_OFFSET = 16;
80  VIDEOYUV_Y_RANGE = 219; // Y range = [16,235]
81  VIDEOYUV_UV_OFFSET = 128;
82  VIDEOYUV_UV_RANGE = 224; // UV range = [16,240]
83 
84  JPEGYUV_RGB_Y = 1.0;
85  JPEGYUV_R_V = 1.402;
86  JPEGYUV_G_U = -0.34414;
87  JPEGYUV_G_V = -0.71414;
88  JPEGYUV_B_U = 1.772;
89 
90  VIDEOYUV_RGB_Y = 1.0 * (255.0/VIDEOYUV_Y_RANGE);
91  VIDEOYUV_R_V = 1.402 * (255.0/VIDEOYUV_UV_RANGE);
92  VIDEOYUV_G_U = -0.34414 * (255.0/VIDEOYUV_UV_RANGE);
93  VIDEOYUV_G_V = -0.71414 * (255.0/VIDEOYUV_UV_RANGE);
94  VIDEOYUV_B_U = 1.772 * (255.0/VIDEOYUV_UV_RANGE);
95 
96  // so that we get proper rounding in fixed-point integer arithmetic:
97  const int half =
98  BITS_OUT > 0
99  ? 1<<(BITS_OUT-1)
100  : 0;
101 
102  const int scale = 1<<BITS_OUT;
103 
104  for (int i = 0; i < 256; i++)
105  {
106  const int y = i-VIDEOYUV_Y_OFFSET;
107  const int uv = i-VIDEOYUV_UV_OFFSET;
108 
109  RGB_Y_tab[i] = half+int(0.5 + y * VIDEOYUV_RGB_Y * scale);
110  R_V_tab[i] = int(0.5 + uv * VIDEOYUV_R_V * scale);
111  // FIXME should probably have -0.5 instead of +0.5 here and
112  // flip the sign of G_U_tab and G_V_tab:
113  G_U_tab[i] = int(0.5 - uv * VIDEOYUV_G_U * scale);
114  G_V_tab[i] = int(0.5 - uv * VIDEOYUV_G_V * scale);
115  B_U_tab[i] = int(0.5 + uv * VIDEOYUV_B_U * scale);
116  }
117  }
118 
119  void yuv422_to_rgb24_c(byte* dst,
120  const int w, const int h,
121  const byte* yuv422ptr,
122  const bool byteswap)
123  {
124  if (byteswap)
125  for (int j = 0; j < h; ++j)
126  for (int i = 0; i < w; i += 2)
127  {
128  // we have 2 luminance pixels per chroma pair
129 
130  const byte y1 = yuv422ptr[0];
131  const byte u = yuv422ptr[1];
132  const byte y2 = yuv422ptr[2];
133  const byte v = yuv422ptr[3];
134 
135  yuv422ptr += 4;
136 
137  const int r_v = R_V_tab[v];
138  const int g_uv = - G_U_tab[u] - G_V_tab[v];
139  const int b_u = B_U_tab[u];
140 
141  // first luminance pixel:
142  const int rgb_y1 = RGB_Y_tab[y1];
143 
144  *dst++ = clamped_convert<byte>((rgb_y1 + r_v) >> BITS_OUT);
145  *dst++ = clamped_convert<byte>((rgb_y1 + g_uv) >> BITS_OUT);
146  *dst++ = clamped_convert<byte>((rgb_y1 + b_u) >> BITS_OUT);
147 
148  // second luminance pixel:
149  const int rgb_y2 = RGB_Y_tab[y2];
150 
151  *dst++ = clamped_convert<byte>((rgb_y2 + r_v) >> BITS_OUT);
152  *dst++ = clamped_convert<byte>((rgb_y2 + g_uv) >> BITS_OUT);
153  *dst++ = clamped_convert<byte>((rgb_y2 + b_u) >> BITS_OUT);
154  }
155 
156  else // no byteswap
157  for (int j = 0; j < h; ++j)
158  for (int i = 0; i < w; i += 2)
159  {
160  // we have 2 luminance pixels per chroma pair
161 
162  const byte y1 = yuv422ptr[1];
163  const byte u = yuv422ptr[0];
164  const byte y2 = yuv422ptr[3];
165  const byte v = yuv422ptr[2];
166 
167  yuv422ptr += 4;
168 
169  const int r_v = R_V_tab[v];
170  const int g_uv = - G_U_tab[u] - G_V_tab[v];
171  const int b_u = B_U_tab[u];
172 
173  // first luminance pixel:
174  const int rgb_y1 = RGB_Y_tab[y1];
175 
176  *dst++ = clamped_convert<byte>((rgb_y1 + r_v) >> BITS_OUT);
177  *dst++ = clamped_convert<byte>((rgb_y1 + g_uv) >> BITS_OUT);
178  *dst++ = clamped_convert<byte>((rgb_y1 + b_u) >> BITS_OUT);
179 
180  // second luminance pixel:
181  const int rgb_y2 = RGB_Y_tab[y2];
182 
183  *dst++ = clamped_convert<byte>((rgb_y2 + r_v) >> BITS_OUT);
184  *dst++ = clamped_convert<byte>((rgb_y2 + g_uv) >> BITS_OUT);
185  *dst++ = clamped_convert<byte>((rgb_y2 + b_u) >> BITS_OUT);
186  }
187  }
188 
189  void yv12_to_rgb24_c(unsigned char* dst,
190  int dst_stride,
191  const unsigned char* y_src,
192  const unsigned char* u_src,
193  const unsigned char* v_src,
194  int y_stride,
195  int uv_stride,
196  int width,
197  int height)
198  {
199  if (width & 1) NRT_FATAL("width must be even");
200  if (height & 1) NRT_FATAL("height must be even");
201 
202  const int dst_dif = 6 * dst_stride - 3 * width;
203  int y_dif = 2 * y_stride - width;
204 
205  unsigned char* dst2 = dst + 3 * dst_stride;
206  const unsigned char* y_src2 = y_src + y_stride;
207 
208  if (height < 0) { /* flip image? */
209  height = -height;
210  y_src += (height - 1) * y_stride;
211  y_src2 = y_src - y_stride;
212  u_src += (height / 2 - 1) * uv_stride;
213  v_src += (height / 2 - 1) * uv_stride;
214  y_dif = -width - 2 * y_stride;
215  uv_stride = -uv_stride;
216  }
217 
218  for (int y = height / 2; y; y--) {
219  for (int x = 0; x < width / 2; x++) {
220  const int u = u_src[x];
221  const int v = v_src[x];
222 
223  const int r_v = R_V_tab[v];
224  const int g_uv = - G_U_tab[u] - G_V_tab[v];
225  const int b_u = B_U_tab[u];
226 
227  {
228  const int rgb_y = RGB_Y_tab[*y_src];
229  const int r = (rgb_y + r_v) >> BITS_OUT;
230  const int g = (rgb_y + g_uv) >> BITS_OUT;
231  const int b = (rgb_y + b_u) >> BITS_OUT;
232  dst[0] = clamped_convert<unsigned char>(r);
233  dst[1] = clamped_convert<unsigned char>(g);
234  dst[2] = clamped_convert<unsigned char>(b);
235  y_src++;
236  }
237  {
238  const int rgb_y = RGB_Y_tab[*y_src];
239  const int r = (rgb_y + r_v) >> BITS_OUT;
240  const int g = (rgb_y + g_uv) >> BITS_OUT;
241  const int b = (rgb_y + b_u) >> BITS_OUT;
242  dst[3] = clamped_convert<unsigned char>(r);
243  dst[4] = clamped_convert<unsigned char>(g);
244  dst[5] = clamped_convert<unsigned char>(b);
245  y_src++;
246  }
247  {
248  const int rgb_y = RGB_Y_tab[*y_src2];
249  const int r = (rgb_y + r_v) >> BITS_OUT;
250  const int g = (rgb_y + g_uv) >> BITS_OUT;
251  const int b = (rgb_y + b_u) >> BITS_OUT;
252  dst2[0] = clamped_convert<unsigned char>(r);
253  dst2[1] = clamped_convert<unsigned char>(g);
254  dst2[2] = clamped_convert<unsigned char>(b);
255  y_src2++;
256  }
257  {
258  const int rgb_y = RGB_Y_tab[*y_src2];
259  const int r = (rgb_y + r_v) >> BITS_OUT;
260  const int g = (rgb_y + g_uv) >> BITS_OUT;
261  const int b = (rgb_y + b_u) >> BITS_OUT;
262  dst2[3] = clamped_convert<unsigned char>(r);
263  dst2[4] = clamped_convert<unsigned char>(g);
264  dst2[5] = clamped_convert<unsigned char>(b);
265  y_src2++;
266  }
267 
268  dst += 6;
269  dst2 += 6;
270  }
271 
272  dst += dst_dif;
273  dst2 += dst_dif;
274 
275  y_src += y_dif;
276  y_src2 += y_dif;
277 
278  u_src += uv_stride;
279  v_src += uv_stride;
280  }
281  }
282 
283  private:
284  // Use this many extra bits in our fixed-point approximation to
285  // floating-point arithmetic:
286  int BITS_OUT;
287  int VIDEOYUV_Y_OFFSET ;
288  int VIDEOYUV_Y_RANGE ; // Y range = [16,235]
289  int VIDEOYUV_UV_OFFSET;
290  int VIDEOYUV_UV_RANGE ; // UV range = [16,240]
291 
292  double JPEGYUV_RGB_Y;
293  double JPEGYUV_R_V ;
294  double JPEGYUV_G_U ;
295  double JPEGYUV_G_V ;
296  double JPEGYUV_B_U ;
297 
298  double VIDEOYUV_RGB_Y;
299  double VIDEOYUV_R_V ;
300  double VIDEOYUV_G_U ;
301  double VIDEOYUV_G_V ;
302  double VIDEOYUV_B_U ;
303 
304  // rgb lookup tables
305  int RGB_Y_tab[256];
306  int B_U_tab[256];
307  int G_U_tab[256];
308  int G_V_tab[256];
309  int R_V_tab[256];
310  };
311 
312 
313 
314  }
315 }
316 
317 #endif // INCLUDE_NRT_IMAGEPROC_IO_IMAGESOURCE_VIDEOREADERS_DETAILS_MGZJVIDEOREADERHELPERS_H
318