1 | // Copyright 2012 Google Inc. All Rights Reserved.
|
---|
2 | //
|
---|
3 | // Licensed under the Apache License, Version 2.0 (the "License"); you
|
---|
4 | // may not use this file except in compliance with the License. You
|
---|
5 | // may obtain a copy of the License at
|
---|
6 | //
|
---|
7 | // http://www.apache.org/licenses/LICENSE-2.0
|
---|
8 | //
|
---|
9 | // Unless required by applicable law or agreed to in writing, software
|
---|
10 | // distributed under the License is distributed on an "AS IS" BASIS,
|
---|
11 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
---|
12 | // implied. See the License for the specific language governing
|
---|
13 | // permissions and limitations under the License.
|
---|
14 |
|
---|
15 | #ifndef WEBGL_LOADER_COMPRESS_H_
|
---|
16 | #define WEBGL_LOADER_COMPRESS_H_
|
---|
17 |
|
---|
18 | #include <math.h>
|
---|
19 |
|
---|
20 | #include "base.h"
|
---|
21 | #include "bounds.h"
|
---|
22 | #include "stream.h"
|
---|
23 | #include "utf8.h"
|
---|
24 |
|
---|
25 | namespace webgl_loader {
|
---|
26 |
|
---|
27 | void AttribsToQuantizedAttribs(const AttribList& interleaved_attribs,
|
---|
28 | const BoundsParams& bounds_params,
|
---|
29 | QuantizedAttribList* quantized_attribs) {
|
---|
30 | quantized_attribs->resize(interleaved_attribs.size());
|
---|
31 | for (size_t i = 0; i < interleaved_attribs.size(); i += 8) {
|
---|
32 | for (size_t j = 0; j < 8; ++j) {
|
---|
33 | quantized_attribs->at(i + j) = Quantize(interleaved_attribs[i + j],
|
---|
34 | bounds_params.mins[j],
|
---|
35 | bounds_params.scales[j],
|
---|
36 | bounds_params.outputMaxes[j]);
|
---|
37 | }
|
---|
38 | }
|
---|
39 | }
|
---|
40 |
|
---|
41 | uint16 ZigZag(int16 word) {
|
---|
42 | return (word >> 15) ^ (word << 1);
|
---|
43 | }
|
---|
44 |
|
---|
45 | void CompressAABBToUtf8(const Bounds& bounds,
|
---|
46 | const BoundsParams& total_bounds,
|
---|
47 | ByteSinkInterface* utf8) {
|
---|
48 | const int maxPosition = (1 << 14) - 1; // 16383;
|
---|
49 | uint16 mins[3] = { 0 };
|
---|
50 | uint16 maxes[3] = { 0 };
|
---|
51 | for (int i = 0; i < 3; ++i) {
|
---|
52 | float total_min = total_bounds.mins[i];
|
---|
53 | float total_scale = total_bounds.scales[i];
|
---|
54 | mins[i] = Quantize(bounds.mins[i], total_min, total_scale, maxPosition);
|
---|
55 | maxes[i] = Quantize(bounds.maxes[i], total_min, total_scale, maxPosition);
|
---|
56 | }
|
---|
57 | for (int i = 0; i < 3; ++i) {
|
---|
58 | Uint16ToUtf8(mins[i], utf8);
|
---|
59 | }
|
---|
60 | for (int i = 0; i < 3; ++i) {
|
---|
61 | Uint16ToUtf8(maxes[i] - mins[i], utf8);
|
---|
62 | }
|
---|
63 | }
|
---|
64 |
|
---|
65 | void CompressIndicesToUtf8(const OptimizedIndexList& list,
|
---|
66 | ByteSinkInterface* utf8) {
|
---|
67 | // For indices, we don't do delta from the most recent index, but
|
---|
68 | // from the high water mark. The assumption is that the high water
|
---|
69 | // mark only ever moves by one at a time. Foruntately, the vertex
|
---|
70 | // optimizer does that for us, to optimize for per-transform vertex
|
---|
71 | // fetch order.
|
---|
72 | uint16 index_high_water_mark = 0;
|
---|
73 | for (size_t i = 0; i < list.size(); ++i) {
|
---|
74 | const int index = list[i];
|
---|
75 | CHECK(index >= 0);
|
---|
76 | CHECK(index <= index_high_water_mark);
|
---|
77 | CHECK(Uint16ToUtf8(index_high_water_mark - index, utf8));
|
---|
78 | if (index == index_high_water_mark) {
|
---|
79 | ++index_high_water_mark;
|
---|
80 | }
|
---|
81 | }
|
---|
82 | }
|
---|
83 |
|
---|
84 | void CompressQuantizedAttribsToUtf8(const QuantizedAttribList& attribs,
|
---|
85 | ByteSinkInterface* utf8) {
|
---|
86 | for (size_t i = 0; i < 8; ++i) {
|
---|
87 | // Use a transposed representation, and delta compression.
|
---|
88 | uint16 prev = 0;
|
---|
89 | for (size_t j = i; j < attribs.size(); j += 8) {
|
---|
90 | const uint16 word = attribs[j];
|
---|
91 | const uint16 za = ZigZag(static_cast<int16>(word - prev));
|
---|
92 | prev = word;
|
---|
93 | CHECK(Uint16ToUtf8(za, utf8));
|
---|
94 | }
|
---|
95 | }
|
---|
96 | }
|
---|
97 |
|
---|
98 | class EdgeCachingCompressor {
|
---|
99 | public:
|
---|
100 | // Assuming that the vertex cache optimizer LRU is 32 vertices, we
|
---|
101 | // expect ~64 triangles, and ~96 edges.
|
---|
102 | static const size_t kMaxLruSize = 96;
|
---|
103 | static const int kLruSentinel = -1;
|
---|
104 |
|
---|
105 | EdgeCachingCompressor(const QuantizedAttribList& attribs,
|
---|
106 | OptimizedIndexList& indices)
|
---|
107 | : attribs_(attribs),
|
---|
108 | indices_(indices),
|
---|
109 | deltas_(attribs.size()),
|
---|
110 | index_high_water_mark_(0),
|
---|
111 | lru_size_(0) {
|
---|
112 | memset(last_attrib_, 0, sizeof(last_attrib_));
|
---|
113 | }
|
---|
114 |
|
---|
115 | // Work in progress. Does not remotely work.
|
---|
116 | void CompressWithLRU(ByteSinkInterface* utf8) {
|
---|
117 | size_t match_indices[3];
|
---|
118 | size_t match_winding[3];
|
---|
119 | for (size_t i = 0; i < indices_.size(); i += 3) {
|
---|
120 | const uint16* triangle = &indices_[i];
|
---|
121 | // Try to find edge matches to cheaply encode indices and employ
|
---|
122 | // parallelogram prediction.
|
---|
123 | const size_t num_matches = LruEdge(triangle,
|
---|
124 | match_indices, match_winding);
|
---|
125 | switch (num_matches) {
|
---|
126 | case 0:
|
---|
127 | LruEdgeZero(triangle);
|
---|
128 | // No edges match, so use simple predictor.
|
---|
129 | continue;
|
---|
130 | case 1:
|
---|
131 | LruEdgeOne(triangle[match_winding[1]],
|
---|
132 | triangle[match_winding[2]], match_indices[0]);
|
---|
133 | break;
|
---|
134 | case 2:
|
---|
135 | LruEdgeTwo(triangle[match_winding[2]],
|
---|
136 | match_indices[0], match_indices[1]);
|
---|
137 | break;
|
---|
138 | case 3:
|
---|
139 | LruEdgeThree(match_indices[0], match_indices[1], match_indices[2]);
|
---|
140 | break;
|
---|
141 | default:
|
---|
142 | DumpDebug();
|
---|
143 | CHECK(false);
|
---|
144 | }
|
---|
145 | }
|
---|
146 | }
|
---|
147 |
|
---|
148 | // Instead of using an LRU cache of edges, simply scan the history
|
---|
149 | // for matching edges.
|
---|
150 | void Compress(ByteSinkInterface* utf8) {
|
---|
151 | // TODO: do this pre-quantization.
|
---|
152 | // Normal prediction.
|
---|
153 | const size_t num_attribs = attribs_.size() / 8;
|
---|
154 | std::vector<int> crosses(3 * num_attribs);
|
---|
155 | for (size_t i = 0; i < indices_.size(); i += 3) {
|
---|
156 | // Compute face cross products.
|
---|
157 | const uint16 i0 = indices_[i + 0];
|
---|
158 | const uint16 i1 = indices_[i + 1];
|
---|
159 | const uint16 i2 = indices_[i + 2];
|
---|
160 | int e1[3], e2[3], cross[3];
|
---|
161 | e1[0] = attribs_[8*i1 + 0] - attribs_[8*i0 + 0];
|
---|
162 | e1[1] = attribs_[8*i1 + 1] - attribs_[8*i0 + 1];
|
---|
163 | e1[2] = attribs_[8*i1 + 2] - attribs_[8*i0 + 2];
|
---|
164 | e2[0] = attribs_[8*i2 + 0] - attribs_[8*i0 + 0];
|
---|
165 | e2[1] = attribs_[8*i2 + 1] - attribs_[8*i0 + 1];
|
---|
166 | e2[2] = attribs_[8*i2 + 2] - attribs_[8*i0 + 2];
|
---|
167 | cross[0] = e1[1] * e2[2] - e1[2] * e2[1];
|
---|
168 | cross[1] = e1[2] * e2[0] - e1[0] * e2[2];
|
---|
169 | cross[2] = e1[0] * e2[1] - e1[1] * e2[0];
|
---|
170 | // Accumulate face cross product into each vertex.
|
---|
171 | for (size_t j = 0; j < 3; ++j) {
|
---|
172 | crosses[3*i0 + j] += cross[j];
|
---|
173 | crosses[3*i1 + j] += cross[j];
|
---|
174 | crosses[3*i2 + j] += cross[j];
|
---|
175 | }
|
---|
176 | }
|
---|
177 | // Compute normal residues.
|
---|
178 | for (size_t idx = 0; idx < num_attribs; ++idx) {
|
---|
179 | float pnx = crosses[3*idx + 0];
|
---|
180 | float pny = crosses[3*idx + 1];
|
---|
181 | float pnz = crosses[3*idx + 2];
|
---|
182 | const float pnorm = 511.0 / sqrt(pnx*pnx + pny*pny + pnz*pnz);
|
---|
183 | pnx *= pnorm;
|
---|
184 | pny *= pnorm;
|
---|
185 | pnz *= pnorm;
|
---|
186 |
|
---|
187 | float nx = attribs_[8*idx + 5] - 511;
|
---|
188 | float ny = attribs_[8*idx + 6] - 511;
|
---|
189 | float nz = attribs_[8*idx + 7] - 511;
|
---|
190 | const float norm = 511.0 / sqrt(nx*nx + ny*ny + nz*nz);
|
---|
191 | nx *= norm;
|
---|
192 | ny *= norm;
|
---|
193 | nz *= norm;
|
---|
194 |
|
---|
195 | const uint16 dx = ZigZag(nx - pnx);
|
---|
196 | const uint16 dy = ZigZag(ny - pny);
|
---|
197 | const uint16 dz = ZigZag(nz - pnz);
|
---|
198 |
|
---|
199 | deltas_[5*num_attribs + idx] = dx;
|
---|
200 | deltas_[6*num_attribs + idx] = dy;
|
---|
201 | deltas_[7*num_attribs + idx] = dz;
|
---|
202 | }
|
---|
203 | for (size_t triangle_start_index = 0;
|
---|
204 | triangle_start_index < indices_.size(); triangle_start_index += 3) {
|
---|
205 | const uint16 i0 = indices_[triangle_start_index + 0];
|
---|
206 | const uint16 i1 = indices_[triangle_start_index + 1];
|
---|
207 | const uint16 i2 = indices_[triangle_start_index + 2];
|
---|
208 | // To force simple compression, set |max_backref| to 0 here
|
---|
209 | // and in loader.js.
|
---|
210 | // |max_backref| should be configurable and communicated.
|
---|
211 | const uint16 max_backref = triangle_start_index < kMaxLruSize ?
|
---|
212 | triangle_start_index : kMaxLruSize;
|
---|
213 | // Scan the index list for matching edges.
|
---|
214 | uint16 backref = 0;
|
---|
215 | for (; backref < max_backref; backref += 3) {
|
---|
216 | const size_t candidate_start_index = triangle_start_index - backref;
|
---|
217 | const uint16 j0 = indices_[candidate_start_index + 0];
|
---|
218 | const uint16 j1 = indices_[candidate_start_index + 1];
|
---|
219 | const uint16 j2 = indices_[candidate_start_index + 2];
|
---|
220 | // Compare input and candidate triangle edges in a
|
---|
221 | // winding-sensitive order. Matching edges must reference
|
---|
222 | // vertices in opposite order, and the first check sees if the
|
---|
223 | // triangles are in strip order. If necessary, re-order the
|
---|
224 | // triangle in |indices_| so that the matching edge appears
|
---|
225 | // first.
|
---|
226 | if (j1 == i1 && j2 == i0) {
|
---|
227 | ParallelogramPredictor(backref, j0, triangle_start_index);
|
---|
228 | break;
|
---|
229 | } else if (j1 == i0 && j2 == i2) {
|
---|
230 | indices_[triangle_start_index + 0] = i2;
|
---|
231 | indices_[triangle_start_index + 1] = i0;
|
---|
232 | indices_[triangle_start_index + 2] = i1;
|
---|
233 | ParallelogramPredictor(backref, j0, triangle_start_index);
|
---|
234 | break;
|
---|
235 | } else if (j1 == i2 && j2 == i1) {
|
---|
236 | indices_[triangle_start_index + 0] = i1;
|
---|
237 | indices_[triangle_start_index + 1] = i2;
|
---|
238 | indices_[triangle_start_index + 2] = i0;
|
---|
239 | ParallelogramPredictor(backref, j0, triangle_start_index);
|
---|
240 | break;
|
---|
241 | } else if (j2 == i1 && j0 == i0) {
|
---|
242 | ParallelogramPredictor(backref + 1, j1, triangle_start_index);
|
---|
243 | break;
|
---|
244 | } else if (j2 == i0 && j0 == i2) {
|
---|
245 | indices_[triangle_start_index + 0] = i2;
|
---|
246 | indices_[triangle_start_index + 1] = i0;
|
---|
247 | indices_[triangle_start_index + 2] = i1;
|
---|
248 | ParallelogramPredictor(backref + 1, j1, triangle_start_index);
|
---|
249 | break;
|
---|
250 | } else if (j2 == i2 && j0 == i1) {
|
---|
251 | indices_[triangle_start_index + 0] = i1;
|
---|
252 | indices_[triangle_start_index + 1] = i2;
|
---|
253 | indices_[triangle_start_index + 2] = i0;
|
---|
254 | ParallelogramPredictor(backref + 1, j1, triangle_start_index);
|
---|
255 | break;
|
---|
256 | } else if (j0 == i1 && j1 == i0) {
|
---|
257 | ParallelogramPredictor(backref + 2, j2, triangle_start_index);
|
---|
258 | break;
|
---|
259 | } else if (j0 == i0 && j1 == i2) {
|
---|
260 | indices_[triangle_start_index + 0] = i2;
|
---|
261 | indices_[triangle_start_index + 1] = i0;
|
---|
262 | indices_[triangle_start_index + 2] = i1;
|
---|
263 | ParallelogramPredictor(backref + 2, j2, triangle_start_index);
|
---|
264 | break;
|
---|
265 | } else if (j0 == i2 && j1 == i1) {
|
---|
266 | indices_[triangle_start_index + 0] = i1;
|
---|
267 | indices_[triangle_start_index + 1] = i2;
|
---|
268 | indices_[triangle_start_index + 2] = i0;
|
---|
269 | ParallelogramPredictor(backref + 2, j2, triangle_start_index);
|
---|
270 | break;
|
---|
271 | }
|
---|
272 | }
|
---|
273 | if (backref == max_backref) {
|
---|
274 | SimplePredictor(max_backref, triangle_start_index);
|
---|
275 | }
|
---|
276 | }
|
---|
277 | // Emit as UTF-8.
|
---|
278 | for (size_t i = 0; i < deltas_.size(); ++i) {
|
---|
279 | if (!Uint16ToUtf8(deltas_[i], utf8)) {
|
---|
280 | // TODO: bounds-dependent texcoords are still busted :(
|
---|
281 | Uint16ToUtf8(0, utf8);
|
---|
282 | }
|
---|
283 | }
|
---|
284 | for (size_t i = 0; i < codes_.size(); ++i) {
|
---|
285 | CHECK(Uint16ToUtf8(codes_[i], utf8));
|
---|
286 | }
|
---|
287 | }
|
---|
288 |
|
---|
289 | const QuantizedAttribList& deltas() const { return deltas_; }
|
---|
290 |
|
---|
291 | const OptimizedIndexList& codes() const { return codes_; }
|
---|
292 |
|
---|
293 | void DumpDebug(FILE* fp = stdout) {
|
---|
294 | for (size_t i = 0; i < lru_size_; ++i) {
|
---|
295 | fprintf(fp, PRIuS ": %d\n", i, edge_lru_[i]);
|
---|
296 | }
|
---|
297 | }
|
---|
298 |
|
---|
299 | private:
|
---|
300 | // The simple predictor is slightly (maybe 5%) more effective than
|
---|
301 | // |CompressQuantizedAttribsToUtf8|. Instead of delta encoding in
|
---|
302 | // attribute order, we use the last referenced attribute as the
|
---|
303 | // predictor.
|
---|
304 | void SimplePredictor(size_t max_backref, size_t triangle_start_index) {
|
---|
305 | const uint16 i0 = indices_[triangle_start_index + 0];
|
---|
306 | const uint16 i1 = indices_[triangle_start_index + 1];
|
---|
307 | const uint16 i2 = indices_[triangle_start_index + 2];
|
---|
308 | if (HighWaterMark(i0, max_backref)) {
|
---|
309 | // Would it be faster to do the dumb delta, in this case?
|
---|
310 | EncodeDeltaAttrib(i0, last_attrib_);
|
---|
311 | }
|
---|
312 | if (HighWaterMark(i1)) {
|
---|
313 | EncodeDeltaAttrib(i1, &attribs_[8*i0]);
|
---|
314 | }
|
---|
315 | if (HighWaterMark(i2)) {
|
---|
316 | // We get a little frisky with the third vertex in the triangle.
|
---|
317 | // Instead of simply using the previous vertex, use the average
|
---|
318 | // of the first two.
|
---|
319 | for (size_t j = 0; j < 8; ++j) {
|
---|
320 | int average = attribs_[8*i0 + j];
|
---|
321 | average += attribs_[8*i1 + j];
|
---|
322 | average /= 2;
|
---|
323 | last_attrib_[j] = average;
|
---|
324 | }
|
---|
325 | EncodeDeltaAttrib(i2, last_attrib_);
|
---|
326 | // The above doesn't add much. Consider the simpler:
|
---|
327 | // EncodeDeltaAttrib(i2, &attribs_[8*i1]);
|
---|
328 | }
|
---|
329 | }
|
---|
330 |
|
---|
331 | void EncodeDeltaAttrib(size_t index, const uint16* predicted) {
|
---|
332 | const size_t num_attribs = attribs_.size() / 8;
|
---|
333 | for (size_t i = 0; i < 5; ++i) {
|
---|
334 | const int delta = attribs_[8*index + i] - predicted[i];
|
---|
335 | const uint16 code = ZigZag(delta);
|
---|
336 | deltas_[num_attribs*i + index] = code;
|
---|
337 | }
|
---|
338 | UpdateLastAttrib(index);
|
---|
339 | }
|
---|
340 |
|
---|
341 | void ParallelogramPredictor(uint16 backref_edge,
|
---|
342 | size_t backref_vert,
|
---|
343 | size_t triangle_start_index) {
|
---|
344 | codes_.push_back(backref_edge); // Encoding matching edge.
|
---|
345 | const uint16 i2 = indices_[triangle_start_index + 2];
|
---|
346 | if (HighWaterMark(i2)) { // Encode third vertex.
|
---|
347 | // Parallelogram prediction for the new vertex.
|
---|
348 | const uint16 i0 = indices_[triangle_start_index + 0];
|
---|
349 | const uint16 i1 = indices_[triangle_start_index + 1];
|
---|
350 | const size_t num_attribs = attribs_.size() / 8;
|
---|
351 | for (size_t j = 0; j < 5; ++j) {
|
---|
352 | const uint16 orig = attribs_[8*i2 + j];
|
---|
353 | int delta = attribs_[8*i0 + j];
|
---|
354 | delta += attribs_[8*i1 + j];
|
---|
355 | delta -= attribs_[8*backref_vert + j];
|
---|
356 | last_attrib_[j] = orig;
|
---|
357 | const uint16 code = ZigZag(orig - delta);
|
---|
358 | deltas_[num_attribs*j + i2] = code;
|
---|
359 | }
|
---|
360 | }
|
---|
361 | }
|
---|
362 |
|
---|
363 | // Returns |true| if |index_high_water_mark_| is incremented, otherwise
|
---|
364 | // returns |false| and automatically updates |last_attrib_|.
|
---|
365 | bool HighWaterMark(uint16 index, uint16 start_code = 0) {
|
---|
366 | codes_.push_back(index_high_water_mark_ - index + start_code);
|
---|
367 | if (index == index_high_water_mark_) {
|
---|
368 | ++index_high_water_mark_;
|
---|
369 | return true;
|
---|
370 | } else {
|
---|
371 | UpdateLastAttrib(index);
|
---|
372 | }
|
---|
373 | return false;
|
---|
374 | }
|
---|
375 |
|
---|
376 | void UpdateLastAttrib(uint16 index) {
|
---|
377 | for (size_t i = 0; i < 8; ++i) {
|
---|
378 | last_attrib_[i] = attribs_[8*index + i];
|
---|
379 | }
|
---|
380 | }
|
---|
381 |
|
---|
382 | // Find edge matches of |triangle| referenced in |edge_lru_|
|
---|
383 | // |match_indices| stores where the matches occur in |edge_lru_|
|
---|
384 | // |match_winding| stores where the matches occur in |triangle|
|
---|
385 | size_t LruEdge(const uint16* triangle,
|
---|
386 | size_t* match_indices,
|
---|
387 | size_t* match_winding) {
|
---|
388 | const uint16 i0 = triangle[0];
|
---|
389 | const uint16 i1 = triangle[1];
|
---|
390 | const uint16 i2 = triangle[2];
|
---|
391 | // The primary thing is to find the first matching edge, if
|
---|
392 | // any. If we assume that our mesh is mostly manifold, then each
|
---|
393 | // edge is shared by at most two triangles (with the indices in
|
---|
394 | // opposite order), so we actually want to eventually remove all
|
---|
395 | // matching edges. However, this means we have to continue
|
---|
396 | // scanning the array to find all matching edges, not just the
|
---|
397 | // first. The edges that don't match will then pushed to the
|
---|
398 | // front.
|
---|
399 | size_t num_matches = 0;
|
---|
400 | for (size_t i = 0; i < lru_size_ && num_matches < 3; ++i) {
|
---|
401 | const int edge_index = edge_lru_[i];
|
---|
402 | // |winding| is a tricky detail used to dereference the edge to
|
---|
403 | // yield |e0| and |e1|, since we must handle the edge that wraps
|
---|
404 | // the last and first vertex. For now, just implement this in a
|
---|
405 | // straightforward way using a switch, but since this code would
|
---|
406 | // actually also need to run in the decompressor, we must
|
---|
407 | // optimize it.
|
---|
408 | const int winding = edge_index % 3;
|
---|
409 | uint16 e0, e1;
|
---|
410 | switch (winding) {
|
---|
411 | case 0:
|
---|
412 | e0 = indices_[edge_index + 1];
|
---|
413 | e1 = indices_[edge_index + 2];
|
---|
414 | break;
|
---|
415 | case 1:
|
---|
416 | e0 = indices_[edge_index + 2];
|
---|
417 | e1 = indices_[edge_index];
|
---|
418 | break;
|
---|
419 | case 2:
|
---|
420 | e0 = indices_[edge_index];
|
---|
421 | e1 = indices_[edge_index + 1];
|
---|
422 | break;
|
---|
423 | default:
|
---|
424 | DumpDebug();
|
---|
425 | CHECK(false);
|
---|
426 | }
|
---|
427 |
|
---|
428 | // Check each edge of the input triangle against |e0| and
|
---|
429 | // |e1|. Note that we reverse the winding of the input triangle.
|
---|
430 | // TODO: does this properly handle degenerate input?
|
---|
431 | if (e0 == i1 && e1 == i0) {
|
---|
432 | match_winding[num_matches] = 2;
|
---|
433 | match_indices[++num_matches] = i;
|
---|
434 | } else if (e0 == i2 && e1 == i1) {
|
---|
435 | match_winding[num_matches] = 0;
|
---|
436 | match_indices[++num_matches] = i;
|
---|
437 | } else if (e0 == i0 && e1 == i2) {
|
---|
438 | match_winding[num_matches] = 1;
|
---|
439 | match_indices[++num_matches] = i;
|
---|
440 | }
|
---|
441 | }
|
---|
442 | switch (num_matches) {
|
---|
443 | case 1:
|
---|
444 | match_winding[1] = (match_winding[0] + 1) % 3; // Fall through.
|
---|
445 | case 2:
|
---|
446 | match_winding[2] = 3 - match_winding[1] - match_winding[0];
|
---|
447 | default: ; // Do nothing.
|
---|
448 | }
|
---|
449 | return num_matches;
|
---|
450 | }
|
---|
451 |
|
---|
452 | // If no edges were found in |triangle|, then simply push the edges
|
---|
453 | // onto |edge_lru_|.
|
---|
454 | void LruEdgeZero(const uint16* triangle) {
|
---|
455 | // Shift |edge_lru_| by three elements. Note that the |edge_lru_|
|
---|
456 | // array has at least three extra elements to make this simple.
|
---|
457 | lru_size_ += 3;
|
---|
458 | if (lru_size_ > kMaxLruSize) lru_size_ = kMaxLruSize;
|
---|
459 | memmove(edge_lru_ + 3, edge_lru_, lru_size_ * sizeof(int));
|
---|
460 | // Push |triangle| to front of |edge_lru_|
|
---|
461 | edge_lru_[0] = triangle[0];
|
---|
462 | edge_lru_[1] = triangle[1];
|
---|
463 | edge_lru_[2] = triangle[2];
|
---|
464 | }
|
---|
465 |
|
---|
466 | // Remove one edge and add two.
|
---|
467 | void LruEdgeOne(size_t i0, size_t i1, size_t match_index) {
|
---|
468 | CHECK(match_index < lru_size_);
|
---|
469 | // Shift |edge_lru_| by one element, starting with |match_index| + 1.
|
---|
470 | memmove(edge_lru_ + match_index + 2, edge_lru_ + match_index + 1,
|
---|
471 | (lru_size_ - match_index) * sizeof(int));
|
---|
472 | // Shift |edge_lru_| by two elements until reaching |match_index|.
|
---|
473 | memmove(edge_lru_ + 2, edge_lru_, match_index * sizeof(int));
|
---|
474 | edge_lru_[0] = i0;
|
---|
475 | edge_lru_[1] = i1;
|
---|
476 | }
|
---|
477 |
|
---|
478 | // Remove two edges and add one.
|
---|
479 | void LruEdgeTwo(int i0, size_t match_index0, size_t match_index1) {
|
---|
480 | CHECK(match_index0 < lru_size_);
|
---|
481 | CHECK(match_index1 < lru_size_);
|
---|
482 |
|
---|
483 | // memmove 1
|
---|
484 | // memmove 2
|
---|
485 | edge_lru_[0] = i0;
|
---|
486 | }
|
---|
487 |
|
---|
488 | // All edges were found, so just remove them from |edge_lru_|.
|
---|
489 | void LruEdgeThree(size_t match_index0,
|
---|
490 | size_t match_index1,
|
---|
491 | size_t match_index2) {
|
---|
492 | const size_t shift_two = match_index1 - 1;
|
---|
493 | for (size_t i = match_index0; i < shift_two; ++i) {
|
---|
494 | edge_lru_[i] = edge_lru_[i + 1];
|
---|
495 | }
|
---|
496 | const size_t shift_three = match_index2 - 2;
|
---|
497 | for (size_t i = shift_two; i < shift_three; ++i) {
|
---|
498 | edge_lru_[i] = edge_lru_[i + 2];
|
---|
499 | }
|
---|
500 | lru_size_ -= 3;
|
---|
501 | for (size_t i = shift_three; i < lru_size_; ++i) {
|
---|
502 | edge_lru_[i] = edge_lru_[i + 3];
|
---|
503 | }
|
---|
504 | }
|
---|
505 |
|
---|
506 | // |attribs_| and |indices_| is the input mesh.
|
---|
507 | const QuantizedAttribList& attribs_;
|
---|
508 | // |indices_| are non-const because |Compress| may update triangle
|
---|
509 | // winding order.
|
---|
510 | OptimizedIndexList& indices_;
|
---|
511 | // |deltas_| contains the compressed attributes. They can be
|
---|
512 | // compressed in one of two ways:
|
---|
513 | // (1) with parallelogram prediction, compared with the predicted vertex,
|
---|
514 | // (2) otherwise, compared with the last referenced vertex.
|
---|
515 | // Note that even (2) is different and probably better than what
|
---|
516 | // |CompressQuantizedAttribsToutf8| does, which is comparing with
|
---|
517 | // the last encoded vertex.
|
---|
518 | QuantizedAttribList deltas_;
|
---|
519 | // |codes_| contains the compressed indices. Small values encode an
|
---|
520 | // edge match; that is, the first edge of the next triangle matches
|
---|
521 | // a recently-seen edge.
|
---|
522 | OptimizedIndexList codes_;
|
---|
523 | // |index_high_water_mark_| is used as it is in |CompressIndicesToUtf8|.
|
---|
524 | uint16 index_high_water_mark_;
|
---|
525 | // |last_attrib_referenced_| is the index of the last referenced
|
---|
526 | // attribute. This is used to delta encode attributes when no edge match
|
---|
527 | // is found.
|
---|
528 | uint16 last_attrib_[8];
|
---|
529 | size_t lru_size_;
|
---|
530 | // |edge_lru_| contains the LRU lits of edge references. It stores
|
---|
531 | // indices to the input |indices_|. By convention, an edge points to
|
---|
532 | // the vertex opposite the edge in question. We pad the array by a
|
---|
533 | // triangle to simplify edge cases.
|
---|
534 | int edge_lru_[kMaxLruSize + 3];
|
---|
535 | };
|
---|
536 |
|
---|
537 | } // namespace webgl_loader
|
---|
538 |
|
---|
539 | #endif // WEBGL_LOADER_COMPRESS_H_
|
---|