From 0b6982391228014cbe2727842d66cc764e4c3d0a Mon Sep 17 00:00:00 2001 From: Maxim Lebedev Date: Sun, 6 Aug 2023 05:58:57 +0600 Subject: [PATCH] :bug: Fixed Scopes domain parsing --- go.mod | 7 +- go.sum | 14 +- .../auth/delivery/http/auth_http_schema.go | 1 + internal/auth/delivery/http/auth_http_test.go | 2 +- internal/domain/scope_test.go | 75 - internal/domain/scopes.go | 16 +- internal/domain/scopes_test.go | 83 + vendor/github.com/andybalholm/brotli/LICENSE | 19 - .../github.com/andybalholm/brotli/README.md | 7 - .../andybalholm/brotli/backward_references.go | 185 - .../brotli/backward_references_hq.go | 796 - .../github.com/andybalholm/brotli/bit_cost.go | 436 - .../andybalholm/brotli/bit_reader.go | 266 - .../andybalholm/brotli/block_splitter.go | 144 - .../brotli/block_splitter_command.go | 434 - .../brotli/block_splitter_distance.go | 433 - .../brotli/block_splitter_literal.go | 433 - .../andybalholm/brotli/brotli_bit_stream.go | 1300 - .../github.com/andybalholm/brotli/cluster.go | 30 - .../andybalholm/brotli/cluster_command.go | 164 - .../andybalholm/brotli/cluster_distance.go | 326 - .../andybalholm/brotli/cluster_literal.go | 326 - .../github.com/andybalholm/brotli/command.go | 254 - .../andybalholm/brotli/compress_fragment.go | 834 - .../brotli/compress_fragment_two_pass.go | 748 - .../andybalholm/brotli/constants.go | 77 - .../github.com/andybalholm/brotli/context.go | 2176 - .../github.com/andybalholm/brotli/decode.go | 2581 - .../andybalholm/brotli/dictionary.go | 122890 --------------- .../andybalholm/brotli/dictionary_hash.go | 32779 ---- .../github.com/andybalholm/brotli/encode.go | 1220 - .../andybalholm/brotli/encoder_dict.go | 22 - .../andybalholm/brotli/entropy_encode.go | 592 - .../brotli/entropy_encode_static.go | 4394 - .../github.com/andybalholm/brotli/fast_log.go | 290 - .../andybalholm/brotli/find_match_length.go | 45 - vendor/github.com/andybalholm/brotli/h10.go | 287 - vendor/github.com/andybalholm/brotli/h5.go | 214 - vendor/github.com/andybalholm/brotli/h6.go | 216 - vendor/github.com/andybalholm/brotli/hash.go | 342 - .../andybalholm/brotli/hash_composite.go | 93 - .../brotli/hash_forgetful_chain.go | 252 - .../brotli/hash_longest_match_quickly.go | 214 - .../andybalholm/brotli/hash_rolling.go | 168 - .../andybalholm/brotli/histogram.go | 226 - vendor/github.com/andybalholm/brotli/http.go | 192 - .../github.com/andybalholm/brotli/huffman.go | 653 - .../andybalholm/brotli/literal_cost.go | 182 - .../github.com/andybalholm/brotli/memory.go | 66 - .../andybalholm/brotli/metablock.go | 574 - .../andybalholm/brotli/metablock_command.go | 165 - .../andybalholm/brotli/metablock_distance.go | 165 - .../andybalholm/brotli/metablock_literal.go | 165 - .../github.com/andybalholm/brotli/params.go | 37 - .../github.com/andybalholm/brotli/platform.go | 103 - .../github.com/andybalholm/brotli/prefix.go | 30 - .../andybalholm/brotli/prefix_dec.go | 723 - .../github.com/andybalholm/brotli/quality.go | 196 - .../github.com/andybalholm/brotli/reader.go | 108 - .../andybalholm/brotli/ringbuffer.go | 134 - vendor/github.com/andybalholm/brotli/state.go | 294 - .../andybalholm/brotli/static_dict.go | 662 - .../andybalholm/brotli/static_dict_lut.go | 75094 --------- .../andybalholm/brotli/symbol_list.go | 22 - .../andybalholm/brotli/transform.go | 641 - .../andybalholm/brotli/utf8_util.go | 70 - vendor/github.com/andybalholm/brotli/util.go | 7 - .../andybalholm/brotli/write_bits.go | 52 - .../github.com/andybalholm/brotli/writer.go | 119 - .../github.com/brianvoe/gofakeit/v6/README.md | 1 + .../brianvoe/gofakeit/v6/address.go | 4 +- .../brianvoe/gofakeit/v6/data/address.go | 2 +- .../brianvoe/gofakeit/v6/data/car.go | 2 +- .../brianvoe/gofakeit/v6/data/celebrity.go | 2 +- .../brianvoe/gofakeit/v6/data/colors.go | 2 +- .../brianvoe/gofakeit/v6/internet.go | 11 + .../github.com/brianvoe/gofakeit/v6/string.go | 8 +- .../github.com/brianvoe/gofakeit/v6/time.go | 12 +- .../brianvoe/gofakeit/v6/word_connective.go | 14 +- vendor/github.com/klauspost/compress/LICENSE | 304 - .../klauspost/compress/flate/deflate.go | 988 - .../klauspost/compress/flate/dict_decoder.go | 184 - .../klauspost/compress/flate/fast_encoder.go | 216 - .../compress/flate/huffman_bit_writer.go | 1182 - .../klauspost/compress/flate/huffman_code.go | 417 - .../compress/flate/huffman_sortByFreq.go | 159 - .../compress/flate/huffman_sortByLiteral.go | 201 - .../klauspost/compress/flate/inflate.go | 793 - .../klauspost/compress/flate/inflate_gen.go | 1283 - .../klauspost/compress/flate/level1.go | 241 - .../klauspost/compress/flate/level2.go | 214 - .../klauspost/compress/flate/level3.go | 241 - .../klauspost/compress/flate/level4.go | 221 - .../klauspost/compress/flate/level5.go | 310 - .../klauspost/compress/flate/level6.go | 325 - .../klauspost/compress/flate/regmask_amd64.go | 37 - .../klauspost/compress/flate/regmask_other.go | 40 - .../klauspost/compress/flate/stateless.go | 318 - .../klauspost/compress/flate/token.go | 379 - .../klauspost/compress/gzip/gunzip.go | 374 - .../klauspost/compress/gzip/gzip.go | 269 - .../klauspost/compress/zlib/reader.go | 183 - .../klauspost/compress/zlib/writer.go | 201 - vendor/github.com/valyala/fasthttp/.gitignore | 8 - vendor/github.com/valyala/fasthttp/LICENSE | 9 - vendor/github.com/valyala/fasthttp/README.md | 638 - .../github.com/valyala/fasthttp/SECURITY.md | 115 - vendor/github.com/valyala/fasthttp/TODO | 4 - vendor/github.com/valyala/fasthttp/args.go | 642 - vendor/github.com/valyala/fasthttp/b2s_new.go | 12 - vendor/github.com/valyala/fasthttp/b2s_old.go | 16 - vendor/github.com/valyala/fasthttp/brotli.go | 190 - .../github.com/valyala/fasthttp/bytesconv.go | 357 - .../valyala/fasthttp/bytesconv_32.go | 8 - .../valyala/fasthttp/bytesconv_64.go | 8 - .../valyala/fasthttp/bytesconv_table.go | 10 - vendor/github.com/valyala/fasthttp/client.go | 2915 - .../github.com/valyala/fasthttp/coarseTime.go | 13 - .../github.com/valyala/fasthttp/compress.go | 456 - vendor/github.com/valyala/fasthttp/cookie.go | 555 - vendor/github.com/valyala/fasthttp/doc.go | 55 - .../valyala/fasthttp/fasthttputil/doc.go | 2 - .../fasthttputil/inmemory_listener.go | 134 - .../fasthttp/fasthttputil/pipeconns.go | 343 - .../valyala/fasthttp/fasthttputil/rsa.key | 28 - .../valyala/fasthttp/fasthttputil/rsa.pem | 17 - vendor/github.com/valyala/fasthttp/fs.go | 1458 - vendor/github.com/valyala/fasthttp/header.go | 3423 - vendor/github.com/valyala/fasthttp/headers.go | 165 - vendor/github.com/valyala/fasthttp/http.go | 2365 - .../github.com/valyala/fasthttp/lbclient.go | 203 - vendor/github.com/valyala/fasthttp/methods.go | 14 - vendor/github.com/valyala/fasthttp/nocopy.go | 11 - .../github.com/valyala/fasthttp/peripconn.go | 100 - vendor/github.com/valyala/fasthttp/s2b_new.go | 11 - vendor/github.com/valyala/fasthttp/s2b_old.go | 24 - vendor/github.com/valyala/fasthttp/server.go | 2957 - .../valyala/fasthttp/stackless/doc.go | 3 - .../valyala/fasthttp/stackless/func.go | 80 - .../valyala/fasthttp/stackless/writer.go | 138 - vendor/github.com/valyala/fasthttp/status.go | 177 - vendor/github.com/valyala/fasthttp/stream.go | 54 - .../github.com/valyala/fasthttp/streaming.go | 113 - vendor/github.com/valyala/fasthttp/strings.go | 94 - vendor/github.com/valyala/fasthttp/tcp.go | 13 - .../valyala/fasthttp/tcp_windows.go | 13 - .../github.com/valyala/fasthttp/tcpdialer.go | 454 - vendor/github.com/valyala/fasthttp/timer.go | 55 - vendor/github.com/valyala/fasthttp/tls.go | 60 - vendor/github.com/valyala/fasthttp/uri.go | 908 - .../github.com/valyala/fasthttp/uri_unix.go | 13 - .../valyala/fasthttp/uri_windows.go | 14 - .../github.com/valyala/fasthttp/userdata.go | 104 - .../github.com/valyala/fasthttp/workerpool.go | 251 - vendor/modules.txt | 11 +- vendor/source.toby3d.me/toby3d/form/form.go | 73 +- 156 files changed, 190 insertions(+), 285707 deletions(-) create mode 100644 internal/domain/scopes_test.go delete mode 100644 vendor/github.com/andybalholm/brotli/LICENSE delete mode 100644 vendor/github.com/andybalholm/brotli/README.md delete mode 100644 vendor/github.com/andybalholm/brotli/backward_references.go delete mode 100644 vendor/github.com/andybalholm/brotli/backward_references_hq.go delete mode 100644 vendor/github.com/andybalholm/brotli/bit_cost.go delete mode 100644 vendor/github.com/andybalholm/brotli/bit_reader.go delete mode 100644 vendor/github.com/andybalholm/brotli/block_splitter.go delete mode 100644 vendor/github.com/andybalholm/brotli/block_splitter_command.go delete mode 100644 vendor/github.com/andybalholm/brotli/block_splitter_distance.go delete mode 100644 vendor/github.com/andybalholm/brotli/block_splitter_literal.go delete mode 100644 vendor/github.com/andybalholm/brotli/brotli_bit_stream.go delete mode 100644 vendor/github.com/andybalholm/brotli/cluster.go delete mode 100644 vendor/github.com/andybalholm/brotli/cluster_command.go delete mode 100644 vendor/github.com/andybalholm/brotli/cluster_distance.go delete mode 100644 vendor/github.com/andybalholm/brotli/cluster_literal.go delete mode 100644 vendor/github.com/andybalholm/brotli/command.go delete mode 100644 vendor/github.com/andybalholm/brotli/compress_fragment.go delete mode 100644 vendor/github.com/andybalholm/brotli/compress_fragment_two_pass.go delete mode 100644 vendor/github.com/andybalholm/brotli/constants.go delete mode 100644 vendor/github.com/andybalholm/brotli/context.go delete mode 100644 vendor/github.com/andybalholm/brotli/decode.go delete mode 100644 vendor/github.com/andybalholm/brotli/dictionary.go delete mode 100644 vendor/github.com/andybalholm/brotli/dictionary_hash.go delete mode 100644 vendor/github.com/andybalholm/brotli/encode.go delete mode 100644 vendor/github.com/andybalholm/brotli/encoder_dict.go delete mode 100644 vendor/github.com/andybalholm/brotli/entropy_encode.go delete mode 100644 vendor/github.com/andybalholm/brotli/entropy_encode_static.go delete mode 100644 vendor/github.com/andybalholm/brotli/fast_log.go delete mode 100644 vendor/github.com/andybalholm/brotli/find_match_length.go delete mode 100644 vendor/github.com/andybalholm/brotli/h10.go delete mode 100644 vendor/github.com/andybalholm/brotli/h5.go delete mode 100644 vendor/github.com/andybalholm/brotli/h6.go delete mode 100644 vendor/github.com/andybalholm/brotli/hash.go delete mode 100644 vendor/github.com/andybalholm/brotli/hash_composite.go delete mode 100644 vendor/github.com/andybalholm/brotli/hash_forgetful_chain.go delete mode 100644 vendor/github.com/andybalholm/brotli/hash_longest_match_quickly.go delete mode 100644 vendor/github.com/andybalholm/brotli/hash_rolling.go delete mode 100644 vendor/github.com/andybalholm/brotli/histogram.go delete mode 100644 vendor/github.com/andybalholm/brotli/http.go delete mode 100644 vendor/github.com/andybalholm/brotli/huffman.go delete mode 100644 vendor/github.com/andybalholm/brotli/literal_cost.go delete mode 100644 vendor/github.com/andybalholm/brotli/memory.go delete mode 100644 vendor/github.com/andybalholm/brotli/metablock.go delete mode 100644 vendor/github.com/andybalholm/brotli/metablock_command.go delete mode 100644 vendor/github.com/andybalholm/brotli/metablock_distance.go delete mode 100644 vendor/github.com/andybalholm/brotli/metablock_literal.go delete mode 100644 vendor/github.com/andybalholm/brotli/params.go delete mode 100644 vendor/github.com/andybalholm/brotli/platform.go delete mode 100644 vendor/github.com/andybalholm/brotli/prefix.go delete mode 100644 vendor/github.com/andybalholm/brotli/prefix_dec.go delete mode 100644 vendor/github.com/andybalholm/brotli/quality.go delete mode 100644 vendor/github.com/andybalholm/brotli/reader.go delete mode 100644 vendor/github.com/andybalholm/brotli/ringbuffer.go delete mode 100644 vendor/github.com/andybalholm/brotli/state.go delete mode 100644 vendor/github.com/andybalholm/brotli/static_dict.go delete mode 100644 vendor/github.com/andybalholm/brotli/static_dict_lut.go delete mode 100644 vendor/github.com/andybalholm/brotli/symbol_list.go delete mode 100644 vendor/github.com/andybalholm/brotli/transform.go delete mode 100644 vendor/github.com/andybalholm/brotli/utf8_util.go delete mode 100644 vendor/github.com/andybalholm/brotli/util.go delete mode 100644 vendor/github.com/andybalholm/brotli/write_bits.go delete mode 100644 vendor/github.com/andybalholm/brotli/writer.go delete mode 100644 vendor/github.com/klauspost/compress/LICENSE delete mode 100644 vendor/github.com/klauspost/compress/flate/deflate.go delete mode 100644 vendor/github.com/klauspost/compress/flate/dict_decoder.go delete mode 100644 vendor/github.com/klauspost/compress/flate/fast_encoder.go delete mode 100644 vendor/github.com/klauspost/compress/flate/huffman_bit_writer.go delete mode 100644 vendor/github.com/klauspost/compress/flate/huffman_code.go delete mode 100644 vendor/github.com/klauspost/compress/flate/huffman_sortByFreq.go delete mode 100644 vendor/github.com/klauspost/compress/flate/huffman_sortByLiteral.go delete mode 100644 vendor/github.com/klauspost/compress/flate/inflate.go delete mode 100644 vendor/github.com/klauspost/compress/flate/inflate_gen.go delete mode 100644 vendor/github.com/klauspost/compress/flate/level1.go delete mode 100644 vendor/github.com/klauspost/compress/flate/level2.go delete mode 100644 vendor/github.com/klauspost/compress/flate/level3.go delete mode 100644 vendor/github.com/klauspost/compress/flate/level4.go delete mode 100644 vendor/github.com/klauspost/compress/flate/level5.go delete mode 100644 vendor/github.com/klauspost/compress/flate/level6.go delete mode 100644 vendor/github.com/klauspost/compress/flate/regmask_amd64.go delete mode 100644 vendor/github.com/klauspost/compress/flate/regmask_other.go delete mode 100644 vendor/github.com/klauspost/compress/flate/stateless.go delete mode 100644 vendor/github.com/klauspost/compress/flate/token.go delete mode 100644 vendor/github.com/klauspost/compress/gzip/gunzip.go delete mode 100644 vendor/github.com/klauspost/compress/gzip/gzip.go delete mode 100644 vendor/github.com/klauspost/compress/zlib/reader.go delete mode 100644 vendor/github.com/klauspost/compress/zlib/writer.go delete mode 100644 vendor/github.com/valyala/fasthttp/.gitignore delete mode 100644 vendor/github.com/valyala/fasthttp/LICENSE delete mode 100644 vendor/github.com/valyala/fasthttp/README.md delete mode 100644 vendor/github.com/valyala/fasthttp/SECURITY.md delete mode 100644 vendor/github.com/valyala/fasthttp/TODO delete mode 100644 vendor/github.com/valyala/fasthttp/args.go delete mode 100644 vendor/github.com/valyala/fasthttp/b2s_new.go delete mode 100644 vendor/github.com/valyala/fasthttp/b2s_old.go delete mode 100644 vendor/github.com/valyala/fasthttp/brotli.go delete mode 100644 vendor/github.com/valyala/fasthttp/bytesconv.go delete mode 100644 vendor/github.com/valyala/fasthttp/bytesconv_32.go delete mode 100644 vendor/github.com/valyala/fasthttp/bytesconv_64.go delete mode 100644 vendor/github.com/valyala/fasthttp/bytesconv_table.go delete mode 100644 vendor/github.com/valyala/fasthttp/client.go delete mode 100644 vendor/github.com/valyala/fasthttp/coarseTime.go delete mode 100644 vendor/github.com/valyala/fasthttp/compress.go delete mode 100644 vendor/github.com/valyala/fasthttp/cookie.go delete mode 100644 vendor/github.com/valyala/fasthttp/doc.go delete mode 100644 vendor/github.com/valyala/fasthttp/fasthttputil/doc.go delete mode 100644 vendor/github.com/valyala/fasthttp/fasthttputil/inmemory_listener.go delete mode 100644 vendor/github.com/valyala/fasthttp/fasthttputil/pipeconns.go delete mode 100644 vendor/github.com/valyala/fasthttp/fasthttputil/rsa.key delete mode 100644 vendor/github.com/valyala/fasthttp/fasthttputil/rsa.pem delete mode 100644 vendor/github.com/valyala/fasthttp/fs.go delete mode 100644 vendor/github.com/valyala/fasthttp/header.go delete mode 100644 vendor/github.com/valyala/fasthttp/headers.go delete mode 100644 vendor/github.com/valyala/fasthttp/http.go delete mode 100644 vendor/github.com/valyala/fasthttp/lbclient.go delete mode 100644 vendor/github.com/valyala/fasthttp/methods.go delete mode 100644 vendor/github.com/valyala/fasthttp/nocopy.go delete mode 100644 vendor/github.com/valyala/fasthttp/peripconn.go delete mode 100644 vendor/github.com/valyala/fasthttp/s2b_new.go delete mode 100644 vendor/github.com/valyala/fasthttp/s2b_old.go delete mode 100644 vendor/github.com/valyala/fasthttp/server.go delete mode 100644 vendor/github.com/valyala/fasthttp/stackless/doc.go delete mode 100644 vendor/github.com/valyala/fasthttp/stackless/func.go delete mode 100644 vendor/github.com/valyala/fasthttp/stackless/writer.go delete mode 100644 vendor/github.com/valyala/fasthttp/status.go delete mode 100644 vendor/github.com/valyala/fasthttp/stream.go delete mode 100644 vendor/github.com/valyala/fasthttp/streaming.go delete mode 100644 vendor/github.com/valyala/fasthttp/strings.go delete mode 100644 vendor/github.com/valyala/fasthttp/tcp.go delete mode 100644 vendor/github.com/valyala/fasthttp/tcp_windows.go delete mode 100644 vendor/github.com/valyala/fasthttp/tcpdialer.go delete mode 100644 vendor/github.com/valyala/fasthttp/timer.go delete mode 100644 vendor/github.com/valyala/fasthttp/tls.go delete mode 100644 vendor/github.com/valyala/fasthttp/uri.go delete mode 100644 vendor/github.com/valyala/fasthttp/uri_unix.go delete mode 100644 vendor/github.com/valyala/fasthttp/uri_windows.go delete mode 100644 vendor/github.com/valyala/fasthttp/userdata.go delete mode 100644 vendor/github.com/valyala/fasthttp/workerpool.go diff --git a/go.mod b/go.mod index 1df7bc0..eba2437 100644 --- a/go.mod +++ b/go.mod @@ -4,7 +4,7 @@ go 1.20 require ( github.com/DATA-DOG/go-sqlmock v1.5.0 - github.com/brianvoe/gofakeit/v6 v6.22.0 + github.com/brianvoe/gofakeit/v6 v6.23.1 github.com/caarlos0/env/v9 v9.0.0 github.com/goccy/go-json v0.10.2 github.com/google/go-cmp v0.5.9 @@ -19,17 +19,15 @@ require ( golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2 inet.af/netaddr v0.0.0-20230525184311-b8eac61e914a modernc.org/sqlite v1.25.0 - source.toby3d.me/toby3d/form v0.3.0 + source.toby3d.me/toby3d/form v0.4.0 willnorris.com/go/microformats v1.2.0 ) require ( - github.com/andybalholm/brotli v1.0.5 // indirect github.com/decred/dcrd/dcrec/secp256k1/v4 v4.2.0 // indirect github.com/dustin/go-humanize v1.0.1 // indirect github.com/google/uuid v1.3.0 // indirect github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 // indirect - github.com/klauspost/compress v1.16.7 // indirect github.com/lestrrat-go/blackmagic v1.0.1 // indirect github.com/lestrrat-go/httpcc v1.0.1 // indirect github.com/lestrrat-go/httprc v1.0.4 // indirect @@ -40,7 +38,6 @@ require ( github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec // indirect github.com/segmentio/asm v1.2.0 // indirect github.com/valyala/bytebufferpool v1.0.0 // indirect - github.com/valyala/fasthttp v1.48.0 // indirect go4.org/intern v0.0.0-20230525184215-6c62f75575cb // indirect go4.org/unsafe/assume-no-moving-gc v0.0.0-20230525183740-e7c30c78aeb2 // indirect golang.org/x/crypto v0.12.0 // indirect diff --git a/go.sum b/go.sum index 4a5152d..bba1c31 100644 --- a/go.sum +++ b/go.sum @@ -2,10 +2,8 @@ github.com/DATA-DOG/go-sqlmock v1.5.0 h1:Shsta01QNfFxHCfpW6YH2STWB0MudeXXEWMr20O github.com/DATA-DOG/go-sqlmock v1.5.0/go.mod h1:f/Ixk793poVmq4qj/V1dPUg2JEAKC73Q5eFN3EC/SaM= github.com/andybalholm/brotli v1.0.2/go.mod h1:loMXtMfwqflxFJPmdbJO0a3KNoPuLBgiu3qAvBg8x/Y= github.com/andybalholm/brotli v1.0.3/go.mod h1:fO7iG3H7G2nSZ7m0zPUDn85XEX2GTukHGRSepvi9Eig= -github.com/andybalholm/brotli v1.0.5 h1:8uQZIdzKmjc/iuPu7O2ioW48L81FgatrcpfFmiq/cCs= -github.com/andybalholm/brotli v1.0.5/go.mod h1:fO7iG3H7G2nSZ7m0zPUDn85XEX2GTukHGRSepvi9Eig= -github.com/brianvoe/gofakeit/v6 v6.22.0 h1:BzOsDot1o3cufTfOk+fWKE9nFYojyDV+XHdCWL2+uyE= -github.com/brianvoe/gofakeit/v6 v6.22.0/go.mod h1:Ow6qC71xtwm79anlwKRlWZW6zVq9D2XHE4QSSMP/rU8= +github.com/brianvoe/gofakeit/v6 v6.23.1 h1:k2gX0hQpJStvixDbbw8oJOvPBg0XmHJWbSOF5JkiUHw= +github.com/brianvoe/gofakeit/v6 v6.23.1/go.mod h1:Ow6qC71xtwm79anlwKRlWZW6zVq9D2XHE4QSSMP/rU8= github.com/caarlos0/env/v9 v9.0.0 h1:SI6JNsOA+y5gj9njpgybykATIylrRMklbs5ch6wO6pc= github.com/caarlos0/env/v9 v9.0.0/go.mod h1:ye5mlCVMYh6tZ+vCgrs/B95sj88cg5Tlnc0XIzgZ020= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= @@ -33,8 +31,6 @@ github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 h1:Z9n2FFNU github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51/go.mod h1:CzGEWj7cYgsdH8dAjBGEr58BoE7ScuLd+fwFZ44+/x8= github.com/klauspost/compress v1.13.4/go.mod h1:8dP1Hq4DHOhN9w426knH3Rhby4rFm6D8eO+e+Dq5Gzg= github.com/klauspost/compress v1.13.5/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= -github.com/klauspost/compress v1.16.7 h1:2mk3MPGNzKyxErAw8YaohYh69+pa4sIQSC0fPGCFR9I= -github.com/klauspost/compress v1.16.7/go.mod h1:ntbaceVETuRiXiv4DpjP66DpAtAGkEQskQzEyD//IeE= github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= github.com/lestrrat-go/blackmagic v1.0.1 h1:lS5Zts+5HIC/8og6cGHb0uCcNCa3OUt1ygh3Qz2Fe80= github.com/lestrrat-go/blackmagic v1.0.1/go.mod h1:UrEqBzIR2U6CnzVyUtfM6oZNMt/7O7Vohk2J0OGSAtU= @@ -75,8 +71,6 @@ github.com/tomnomnom/linkheader v0.0.0-20180905144013-02ca5825eb80/go.mod h1:iFy github.com/valyala/bytebufferpool v1.0.0 h1:GqA5TC/0021Y/b9FG4Oi9Mr3q7XYx6KllzawFIhcdPw= github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc= github.com/valyala/fasthttp v1.30.0/go.mod h1:2rsYD01CKFrjjsvFxx75KlEUNpWNBY9JWD3K/7o2Cus= -github.com/valyala/fasthttp v1.48.0 h1:oJWvHb9BIZToTQS3MuQ2R3bJZiNSa2KiNdeI8A+79Tc= -github.com/valyala/fasthttp v1.48.0/go.mod h1:k2zXd82h/7UZc3VOdJ2WaUqt1uZ/XpXAfE9i+HBC3lA= github.com/valyala/fasttemplate v1.2.2 h1:lxLXG0uE3Qnshl9QyaK6XJxMXlQZELvChBOCmQD0Loo= github.com/valyala/fasttemplate v1.2.2/go.mod h1:KHLXt3tVN2HBp8eijSv/kGJopbvo7S+qRAEEKiv+SiQ= github.com/valyala/quicktemplate v1.7.0 h1:LUPTJmlVcb46OOUY3IeD9DojFpAVbsG+5WFTcjMJzCM= @@ -192,7 +186,7 @@ modernc.org/tcl v1.15.2 h1:C4ybAYCGJw968e+Me18oW55kD/FexcHbqH2xak1ROSY= modernc.org/token v1.1.0 h1:Xl7Ap9dKaEs5kLoOQeQmPWevfnk/DM5qcLcYlA8ys6Y= modernc.org/token v1.1.0/go.mod h1:UGzOrNV1mAFSEB63lOFHIpNRUVMvYTc6yu1SMY/XTDM= modernc.org/z v1.7.3 h1:zDJf6iHjrnB+WRD88stbXokugjyc0/pB91ri1gO6LZY= -source.toby3d.me/toby3d/form v0.3.0 h1:kI8apdFeVr+koqTTGVoIRiR5NMqjrhCJlajYlu+1bVw= -source.toby3d.me/toby3d/form v0.3.0/go.mod h1:drlHMC+j/gb5zsttCSwx8qcYsbaRW+wFfE8bK6y+oeY= +source.toby3d.me/toby3d/form v0.4.0 h1:p4erlFQZpWi64oHQVYsNe8FKT75ZwnExELk69ZDoQO8= +source.toby3d.me/toby3d/form v0.4.0/go.mod h1:drlHMC+j/gb5zsttCSwx8qcYsbaRW+wFfE8bK6y+oeY= willnorris.com/go/microformats v1.2.0 h1:73pzJCLJM69kYE5qsLI9OOC/7sImNVOzya9EQ0+1wmM= willnorris.com/go/microformats v1.2.0/go.mod h1:RrlwCSvib4qz+JICKiN7rON4phzQ3HAT7j6s4O2cZj4= diff --git a/internal/auth/delivery/http/auth_http_schema.go b/internal/auth/delivery/http/auth_http_schema.go index 07c4ae5..f51762f 100644 --- a/internal/auth/delivery/http/auth_http_schema.go +++ b/internal/auth/delivery/http/auth_http_schema.go @@ -122,6 +122,7 @@ func (r *AuthAuthorizationRequest) bind(req *http.Request) error { r.ResponseType = domain.ResponseTypeCode } + // NOTE(toby3d): fallback for multiple same-key form values if req.URL.Query().Has("scope[]") { for _, k := range req.URL.Query()["scope[]"] { scope, err := domain.ParseScope(k) diff --git a/internal/auth/delivery/http/auth_http_test.go b/internal/auth/delivery/http/auth_http_test.go index a06b3c1..4012742 100644 --- a/internal/auth/delivery/http/auth_http_test.go +++ b/internal/auth/delivery/http/auth_http_test.go @@ -98,7 +98,7 @@ func TestAuthorize(t *testing.T) { t.Errorf("%s %s = %d, want %d", req.Method, u.String(), resp.StatusCode, http.StatusOK) } - const expResult = `Authorize application` + expResult := `Authorize ` + client.GetName() if result := string(body); !strings.Contains(result, expResult) { t.Errorf("%s %s = %s, want %s", req.Method, u.String(), result, expResult) } diff --git a/internal/domain/scope_test.go b/internal/domain/scope_test.go index 5f4fac7..1b507a5 100644 --- a/internal/domain/scope_test.go +++ b/internal/domain/scope_test.go @@ -2,7 +2,6 @@ package domain_test import ( "fmt" - "reflect" "testing" "source.toby3d.me/toby3d/auth/internal/domain" @@ -45,53 +44,6 @@ func TestParseScope(t *testing.T) { } } -func TestScopes_UnmarshalForm(t *testing.T) { - t.Parallel() - - input := []byte("profile email") - results := make(domain.Scopes, 0) - - if err := results.UnmarshalForm(input); err != nil { - t.Fatalf("%+v", err) - } - - expResults := domain.Scopes{domain.ScopeProfile, domain.ScopeEmail} - if !reflect.DeepEqual(results, expResults) { - t.Errorf("UnmarshalForm(%s) = %s, want %s", input, results, expResults) - } -} - -func TestScopes_UnmarshalJSON(t *testing.T) { - t.Parallel() - - input := []byte(`"profile email"`) - results := make(domain.Scopes, 0) - - if err := results.UnmarshalJSON(input); err != nil { - t.Fatalf("%+v", err) - } - - expResults := domain.Scopes{domain.ScopeProfile, domain.ScopeEmail} - if !reflect.DeepEqual(results, expResults) { - t.Errorf("UnmarshalJSON(%s) = %s, want %s", input, results, expResults) - } -} - -func TestScopes_MarshalJSON(t *testing.T) { - t.Parallel() - - scopes := domain.Scopes{domain.ScopeEmail, domain.ScopeProfile} - - result, err := scopes.MarshalJSON() - if err != nil { - t.Fatalf("%+v", err) - } - - if string(result) != fmt.Sprintf(`"%s"`, scopes) { - t.Errorf("MarshalJSON() = %s, want %s", result, fmt.Sprintf(`"%s"`, scopes)) - } -} - func TestScope_String(t *testing.T) { t.Parallel() @@ -123,30 +75,3 @@ func TestScope_String(t *testing.T) { }) } } - -func TestScopes_String(t *testing.T) { - t.Parallel() - - scopes := domain.Scopes{domain.ScopeProfile, domain.ScopeEmail} - if result := scopes.String(); result != fmt.Sprint(scopes) { - t.Errorf("String() = %s, want %s", result, scopes) - } -} - -func TestScopes_IsEmpty(t *testing.T) { - t.Parallel() - - scopes := domain.Scopes{domain.ScopeUnd} - if result := scopes.IsEmpty(); !result { - t.Errorf("IsEmpty() = %t, want %t", result, true) - } -} - -func TestScopes_Has(t *testing.T) { - t.Parallel() - - scopes := domain.Scopes{domain.ScopeProfile, domain.ScopeEmail} - if result := scopes.Has(domain.ScopeEmail); !result { - t.Errorf("Has(%s) = %t, want %t", domain.ScopeEmail, result, true) - } -} diff --git a/internal/domain/scopes.go b/internal/domain/scopes.go index 9ff2146..d6f86fe 100644 --- a/internal/domain/scopes.go +++ b/internal/domain/scopes.go @@ -11,23 +11,19 @@ type Scopes []Scope // UnmarshalForm implements custom unmarshler for form values. func (s *Scopes) UnmarshalForm(v []byte) error { - scopes := make(Scopes, 0) - for _, rawScope := range strings.Fields(string(v)) { scope, err := ParseScope(rawScope) if err != nil { return fmt.Errorf("Scopes: UnmarshalForm: %w", err) } - if scopes.Has(scope) { + if s.Has(scope) { continue } - scopes = append(scopes, scope) + *s = append(*s, scope) } - *s = scopes - return nil } @@ -38,23 +34,19 @@ func (s *Scopes) UnmarshalJSON(v []byte) error { return fmt.Errorf("Scopes: UnmarshalJSON: %w", err) } - result := make(Scopes, 0) - for _, rawScope := range strings.Fields(src) { scope, err := ParseScope(rawScope) if err != nil { return fmt.Errorf("Scopes: UnmarshalJSON: %w", err) } - if result.Has(scope) { + if s.Has(scope) { continue } - result = append(result, scope) + *s = append(*s, scope) } - *s = result - return nil } diff --git a/internal/domain/scopes_test.go b/internal/domain/scopes_test.go new file mode 100644 index 0000000..302b16f --- /dev/null +++ b/internal/domain/scopes_test.go @@ -0,0 +1,83 @@ +package domain_test + +import ( + "fmt" + "reflect" + "testing" + + "source.toby3d.me/toby3d/auth/internal/domain" +) + +func TestScopes_UnmarshalForm(t *testing.T) { + t.Parallel() + + input := []byte("profile email") + results := make(domain.Scopes, 0) + + if err := results.UnmarshalForm(input); err != nil { + t.Fatalf("%+v", err) + } + + expResults := domain.Scopes{domain.ScopeProfile, domain.ScopeEmail} + if !reflect.DeepEqual(results, expResults) { + t.Errorf("UnmarshalForm(%s) = %s, want %s", input, results, expResults) + } +} + +func TestScopes_UnmarshalJSON(t *testing.T) { + t.Parallel() + + input := []byte(`"profile email"`) + results := make(domain.Scopes, 0) + + if err := results.UnmarshalJSON(input); err != nil { + t.Fatalf("%+v", err) + } + + expResults := domain.Scopes{domain.ScopeProfile, domain.ScopeEmail} + if !reflect.DeepEqual(results, expResults) { + t.Errorf("UnmarshalJSON(%s) = %s, want %s", input, results, expResults) + } +} + +func TestScopes_MarshalJSON(t *testing.T) { + t.Parallel() + + scopes := domain.Scopes{domain.ScopeEmail, domain.ScopeProfile} + + result, err := scopes.MarshalJSON() + if err != nil { + t.Fatalf("%+v", err) + } + + if string(result) != fmt.Sprintf(`"%s"`, scopes) { + t.Errorf("MarshalJSON() = %s, want %s", result, fmt.Sprintf(`"%s"`, scopes)) + } +} + +func TestScopes_String(t *testing.T) { + t.Parallel() + + scopes := domain.Scopes{domain.ScopeProfile, domain.ScopeEmail} + if result := scopes.String(); result != fmt.Sprint(scopes) { + t.Errorf("String() = %s, want %s", result, scopes) + } +} + +func TestScopes_IsEmpty(t *testing.T) { + t.Parallel() + + scopes := domain.Scopes{domain.ScopeUnd} + if result := scopes.IsEmpty(); !result { + t.Errorf("IsEmpty() = %t, want %t", result, true) + } +} + +func TestScopes_Has(t *testing.T) { + t.Parallel() + + scopes := domain.Scopes{domain.ScopeProfile, domain.ScopeEmail} + if result := scopes.Has(domain.ScopeEmail); !result { + t.Errorf("Has(%s) = %t, want %t", domain.ScopeEmail, result, true) + } +} diff --git a/vendor/github.com/andybalholm/brotli/LICENSE b/vendor/github.com/andybalholm/brotli/LICENSE deleted file mode 100644 index 33b7cdd..0000000 --- a/vendor/github.com/andybalholm/brotli/LICENSE +++ /dev/null @@ -1,19 +0,0 @@ -Copyright (c) 2009, 2010, 2013-2016 by the Brotli Authors. - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. diff --git a/vendor/github.com/andybalholm/brotli/README.md b/vendor/github.com/andybalholm/brotli/README.md deleted file mode 100644 index 1ea7fdb..0000000 --- a/vendor/github.com/andybalholm/brotli/README.md +++ /dev/null @@ -1,7 +0,0 @@ -This package is a brotli compressor and decompressor implemented in Go. -It was translated from the reference implementation (https://github.com/google/brotli) -with the `c2go` tool at https://github.com/andybalholm/c2go. - -I am using it in production with https://github.com/andybalholm/redwood. - -API documentation is found at https://pkg.go.dev/github.com/andybalholm/brotli?tab=doc. diff --git a/vendor/github.com/andybalholm/brotli/backward_references.go b/vendor/github.com/andybalholm/brotli/backward_references.go deleted file mode 100644 index 008c054..0000000 --- a/vendor/github.com/andybalholm/brotli/backward_references.go +++ /dev/null @@ -1,185 +0,0 @@ -package brotli - -import ( - "sync" -) - -/* Copyright 2013 Google Inc. All Rights Reserved. - - Distributed under MIT license. - See file LICENSE for detail or copy at https://opensource.org/licenses/MIT -*/ - -/* Function to find backward reference copies. */ - -func computeDistanceCode(distance uint, max_distance uint, dist_cache []int) uint { - if distance <= max_distance { - var distance_plus_3 uint = distance + 3 - var offset0 uint = distance_plus_3 - uint(dist_cache[0]) - var offset1 uint = distance_plus_3 - uint(dist_cache[1]) - if distance == uint(dist_cache[0]) { - return 0 - } else if distance == uint(dist_cache[1]) { - return 1 - } else if offset0 < 7 { - return (0x9750468 >> (4 * offset0)) & 0xF - } else if offset1 < 7 { - return (0xFDB1ACE >> (4 * offset1)) & 0xF - } else if distance == uint(dist_cache[2]) { - return 2 - } else if distance == uint(dist_cache[3]) { - return 3 - } - } - - return distance + numDistanceShortCodes - 1 -} - -var hasherSearchResultPool sync.Pool - -func createBackwardReferences(num_bytes uint, position uint, ringbuffer []byte, ringbuffer_mask uint, params *encoderParams, hasher hasherHandle, dist_cache []int, last_insert_len *uint, commands *[]command, num_literals *uint) { - var max_backward_limit uint = maxBackwardLimit(params.lgwin) - var insert_length uint = *last_insert_len - var pos_end uint = position + num_bytes - var store_end uint - if num_bytes >= hasher.StoreLookahead() { - store_end = position + num_bytes - hasher.StoreLookahead() + 1 - } else { - store_end = position - } - var random_heuristics_window_size uint = literalSpreeLengthForSparseSearch(params) - var apply_random_heuristics uint = position + random_heuristics_window_size - var gap uint = 0 - /* Set maximum distance, see section 9.1. of the spec. */ - - const kMinScore uint = scoreBase + 100 - - /* For speed up heuristics for random data. */ - - /* Minimum score to accept a backward reference. */ - hasher.PrepareDistanceCache(dist_cache) - sr2, _ := hasherSearchResultPool.Get().(*hasherSearchResult) - if sr2 == nil { - sr2 = &hasherSearchResult{} - } - sr, _ := hasherSearchResultPool.Get().(*hasherSearchResult) - if sr == nil { - sr = &hasherSearchResult{} - } - - for position+hasher.HashTypeLength() < pos_end { - var max_length uint = pos_end - position - var max_distance uint = brotli_min_size_t(position, max_backward_limit) - sr.len = 0 - sr.len_code_delta = 0 - sr.distance = 0 - sr.score = kMinScore - hasher.FindLongestMatch(¶ms.dictionary, ringbuffer, ringbuffer_mask, dist_cache, position, max_length, max_distance, gap, params.dist.max_distance, sr) - if sr.score > kMinScore { - /* Found a match. Let's look for something even better ahead. */ - var delayed_backward_references_in_row int = 0 - max_length-- - for ; ; max_length-- { - var cost_diff_lazy uint = 175 - if params.quality < minQualityForExtensiveReferenceSearch { - sr2.len = brotli_min_size_t(sr.len-1, max_length) - } else { - sr2.len = 0 - } - sr2.len_code_delta = 0 - sr2.distance = 0 - sr2.score = kMinScore - max_distance = brotli_min_size_t(position+1, max_backward_limit) - hasher.FindLongestMatch(¶ms.dictionary, ringbuffer, ringbuffer_mask, dist_cache, position+1, max_length, max_distance, gap, params.dist.max_distance, sr2) - if sr2.score >= sr.score+cost_diff_lazy { - /* Ok, let's just write one byte for now and start a match from the - next byte. */ - position++ - - insert_length++ - *sr = *sr2 - delayed_backward_references_in_row++ - if delayed_backward_references_in_row < 4 && position+hasher.HashTypeLength() < pos_end { - continue - } - } - - break - } - - apply_random_heuristics = position + 2*sr.len + random_heuristics_window_size - max_distance = brotli_min_size_t(position, max_backward_limit) - { - /* The first 16 codes are special short-codes, - and the minimum offset is 1. */ - var distance_code uint = computeDistanceCode(sr.distance, max_distance+gap, dist_cache) - if (sr.distance <= (max_distance + gap)) && distance_code > 0 { - dist_cache[3] = dist_cache[2] - dist_cache[2] = dist_cache[1] - dist_cache[1] = dist_cache[0] - dist_cache[0] = int(sr.distance) - hasher.PrepareDistanceCache(dist_cache) - } - - *commands = append(*commands, makeCommand(¶ms.dist, insert_length, sr.len, sr.len_code_delta, distance_code)) - } - - *num_literals += insert_length - insert_length = 0 - /* Put the hash keys into the table, if there are enough bytes left. - Depending on the hasher implementation, it can push all positions - in the given range or only a subset of them. - Avoid hash poisoning with RLE data. */ - { - var range_start uint = position + 2 - var range_end uint = brotli_min_size_t(position+sr.len, store_end) - if sr.distance < sr.len>>2 { - range_start = brotli_min_size_t(range_end, brotli_max_size_t(range_start, position+sr.len-(sr.distance<<2))) - } - - hasher.StoreRange(ringbuffer, ringbuffer_mask, range_start, range_end) - } - - position += sr.len - } else { - insert_length++ - position++ - - /* If we have not seen matches for a long time, we can skip some - match lookups. Unsuccessful match lookups are very very expensive - and this kind of a heuristic speeds up compression quite - a lot. */ - if position > apply_random_heuristics { - /* Going through uncompressible data, jump. */ - if position > apply_random_heuristics+4*random_heuristics_window_size { - var kMargin uint = brotli_max_size_t(hasher.StoreLookahead()-1, 4) - /* It is quite a long time since we saw a copy, so we assume - that this data is not compressible, and store hashes less - often. Hashes of non compressible data are less likely to - turn out to be useful in the future, too, so we store less of - them to not to flood out the hash table of good compressible - data. */ - - var pos_jump uint = brotli_min_size_t(position+16, pos_end-kMargin) - for ; position < pos_jump; position += 4 { - hasher.Store(ringbuffer, ringbuffer_mask, position) - insert_length += 4 - } - } else { - var kMargin uint = brotli_max_size_t(hasher.StoreLookahead()-1, 2) - var pos_jump uint = brotli_min_size_t(position+8, pos_end-kMargin) - for ; position < pos_jump; position += 2 { - hasher.Store(ringbuffer, ringbuffer_mask, position) - insert_length += 2 - } - } - } - } - } - - insert_length += pos_end - position - *last_insert_len = insert_length - - hasherSearchResultPool.Put(sr) - hasherSearchResultPool.Put(sr2) -} diff --git a/vendor/github.com/andybalholm/brotli/backward_references_hq.go b/vendor/github.com/andybalholm/brotli/backward_references_hq.go deleted file mode 100644 index 21629c1..0000000 --- a/vendor/github.com/andybalholm/brotli/backward_references_hq.go +++ /dev/null @@ -1,796 +0,0 @@ -package brotli - -import "math" - -type zopfliNode struct { - length uint32 - distance uint32 - dcode_insert_length uint32 - u struct { - cost float32 - next uint32 - shortcut uint32 - } -} - -const maxEffectiveDistanceAlphabetSize = 544 - -const kInfinity float32 = 1.7e38 /* ~= 2 ^ 127 */ - -var kDistanceCacheIndex = []uint32{0, 1, 2, 3, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1} - -var kDistanceCacheOffset = []int{0, 0, 0, 0, -1, 1, -2, 2, -3, 3, -1, 1, -2, 2, -3, 3} - -func initZopfliNodes(array []zopfliNode, length uint) { - var stub zopfliNode - var i uint - stub.length = 1 - stub.distance = 0 - stub.dcode_insert_length = 0 - stub.u.cost = kInfinity - for i = 0; i < length; i++ { - array[i] = stub - } -} - -func zopfliNodeCopyLength(self *zopfliNode) uint32 { - return self.length & 0x1FFFFFF -} - -func zopfliNodeLengthCode(self *zopfliNode) uint32 { - var modifier uint32 = self.length >> 25 - return zopfliNodeCopyLength(self) + 9 - modifier -} - -func zopfliNodeCopyDistance(self *zopfliNode) uint32 { - return self.distance -} - -func zopfliNodeDistanceCode(self *zopfliNode) uint32 { - var short_code uint32 = self.dcode_insert_length >> 27 - if short_code == 0 { - return zopfliNodeCopyDistance(self) + numDistanceShortCodes - 1 - } else { - return short_code - 1 - } -} - -func zopfliNodeCommandLength(self *zopfliNode) uint32 { - return zopfliNodeCopyLength(self) + (self.dcode_insert_length & 0x7FFFFFF) -} - -/* Histogram based cost model for zopflification. */ -type zopfliCostModel struct { - cost_cmd_ [numCommandSymbols]float32 - cost_dist_ []float32 - distance_histogram_size uint32 - literal_costs_ []float32 - min_cost_cmd_ float32 - num_bytes_ uint -} - -func initZopfliCostModel(self *zopfliCostModel, dist *distanceParams, num_bytes uint) { - var distance_histogram_size uint32 = dist.alphabet_size - if distance_histogram_size > maxEffectiveDistanceAlphabetSize { - distance_histogram_size = maxEffectiveDistanceAlphabetSize - } - - self.num_bytes_ = num_bytes - self.literal_costs_ = make([]float32, (num_bytes + 2)) - self.cost_dist_ = make([]float32, (dist.alphabet_size)) - self.distance_histogram_size = distance_histogram_size -} - -func cleanupZopfliCostModel(self *zopfliCostModel) { - self.literal_costs_ = nil - self.cost_dist_ = nil -} - -func setCost(histogram []uint32, histogram_size uint, literal_histogram bool, cost []float32) { - var sum uint = 0 - var missing_symbol_sum uint - var log2sum float32 - var missing_symbol_cost float32 - var i uint - for i = 0; i < histogram_size; i++ { - sum += uint(histogram[i]) - } - - log2sum = float32(fastLog2(sum)) - missing_symbol_sum = sum - if !literal_histogram { - for i = 0; i < histogram_size; i++ { - if histogram[i] == 0 { - missing_symbol_sum++ - } - } - } - - missing_symbol_cost = float32(fastLog2(missing_symbol_sum)) + 2 - for i = 0; i < histogram_size; i++ { - if histogram[i] == 0 { - cost[i] = missing_symbol_cost - continue - } - - /* Shannon bits for this symbol. */ - cost[i] = log2sum - float32(fastLog2(uint(histogram[i]))) - - /* Cannot be coded with less than 1 bit */ - if cost[i] < 1 { - cost[i] = 1 - } - } -} - -func zopfliCostModelSetFromCommands(self *zopfliCostModel, position uint, ringbuffer []byte, ringbuffer_mask uint, commands []command, last_insert_len uint) { - var histogram_literal [numLiteralSymbols]uint32 - var histogram_cmd [numCommandSymbols]uint32 - var histogram_dist [maxEffectiveDistanceAlphabetSize]uint32 - var cost_literal [numLiteralSymbols]float32 - var pos uint = position - last_insert_len - var min_cost_cmd float32 = kInfinity - var cost_cmd []float32 = self.cost_cmd_[:] - var literal_costs []float32 - - histogram_literal = [numLiteralSymbols]uint32{} - histogram_cmd = [numCommandSymbols]uint32{} - histogram_dist = [maxEffectiveDistanceAlphabetSize]uint32{} - - for i := range commands { - var inslength uint = uint(commands[i].insert_len_) - var copylength uint = uint(commandCopyLen(&commands[i])) - var distcode uint = uint(commands[i].dist_prefix_) & 0x3FF - var cmdcode uint = uint(commands[i].cmd_prefix_) - var j uint - - histogram_cmd[cmdcode]++ - if cmdcode >= 128 { - histogram_dist[distcode]++ - } - - for j = 0; j < inslength; j++ { - histogram_literal[ringbuffer[(pos+j)&ringbuffer_mask]]++ - } - - pos += inslength + copylength - } - - setCost(histogram_literal[:], numLiteralSymbols, true, cost_literal[:]) - setCost(histogram_cmd[:], numCommandSymbols, false, cost_cmd) - setCost(histogram_dist[:], uint(self.distance_histogram_size), false, self.cost_dist_) - - for i := 0; i < numCommandSymbols; i++ { - min_cost_cmd = brotli_min_float(min_cost_cmd, cost_cmd[i]) - } - - self.min_cost_cmd_ = min_cost_cmd - { - literal_costs = self.literal_costs_ - var literal_carry float32 = 0.0 - num_bytes := int(self.num_bytes_) - literal_costs[0] = 0.0 - for i := 0; i < num_bytes; i++ { - literal_carry += cost_literal[ringbuffer[(position+uint(i))&ringbuffer_mask]] - literal_costs[i+1] = literal_costs[i] + literal_carry - literal_carry -= literal_costs[i+1] - literal_costs[i] - } - } -} - -func zopfliCostModelSetFromLiteralCosts(self *zopfliCostModel, position uint, ringbuffer []byte, ringbuffer_mask uint) { - var literal_costs []float32 = self.literal_costs_ - var literal_carry float32 = 0.0 - var cost_dist []float32 = self.cost_dist_ - var cost_cmd []float32 = self.cost_cmd_[:] - var num_bytes uint = self.num_bytes_ - var i uint - estimateBitCostsForLiterals(position, num_bytes, ringbuffer_mask, ringbuffer, literal_costs[1:]) - literal_costs[0] = 0.0 - for i = 0; i < num_bytes; i++ { - literal_carry += literal_costs[i+1] - literal_costs[i+1] = literal_costs[i] + literal_carry - literal_carry -= literal_costs[i+1] - literal_costs[i] - } - - for i = 0; i < numCommandSymbols; i++ { - cost_cmd[i] = float32(fastLog2(uint(11 + uint32(i)))) - } - - for i = 0; uint32(i) < self.distance_histogram_size; i++ { - cost_dist[i] = float32(fastLog2(uint(20 + uint32(i)))) - } - - self.min_cost_cmd_ = float32(fastLog2(11)) -} - -func zopfliCostModelGetCommandCost(self *zopfliCostModel, cmdcode uint16) float32 { - return self.cost_cmd_[cmdcode] -} - -func zopfliCostModelGetDistanceCost(self *zopfliCostModel, distcode uint) float32 { - return self.cost_dist_[distcode] -} - -func zopfliCostModelGetLiteralCosts(self *zopfliCostModel, from uint, to uint) float32 { - return self.literal_costs_[to] - self.literal_costs_[from] -} - -func zopfliCostModelGetMinCostCmd(self *zopfliCostModel) float32 { - return self.min_cost_cmd_ -} - -/* REQUIRES: len >= 2, start_pos <= pos */ -/* REQUIRES: cost < kInfinity, nodes[start_pos].cost < kInfinity */ -/* Maintains the "ZopfliNode array invariant". */ -func updateZopfliNode(nodes []zopfliNode, pos uint, start_pos uint, len uint, len_code uint, dist uint, short_code uint, cost float32) { - var next *zopfliNode = &nodes[pos+len] - next.length = uint32(len | (len+9-len_code)<<25) - next.distance = uint32(dist) - next.dcode_insert_length = uint32(short_code<<27 | (pos - start_pos)) - next.u.cost = cost -} - -type posData struct { - pos uint - distance_cache [4]int - costdiff float32 - cost float32 -} - -/* Maintains the smallest 8 cost difference together with their positions */ -type startPosQueue struct { - q_ [8]posData - idx_ uint -} - -func initStartPosQueue(self *startPosQueue) { - self.idx_ = 0 -} - -func startPosQueueSize(self *startPosQueue) uint { - return brotli_min_size_t(self.idx_, 8) -} - -func startPosQueuePush(self *startPosQueue, posdata *posData) { - var offset uint = ^(self.idx_) & 7 - self.idx_++ - var len uint = startPosQueueSize(self) - var i uint - var q []posData = self.q_[:] - q[offset] = *posdata - - /* Restore the sorted order. In the list of |len| items at most |len - 1| - adjacent element comparisons / swaps are required. */ - for i = 1; i < len; i++ { - if q[offset&7].costdiff > q[(offset+1)&7].costdiff { - var tmp posData = q[offset&7] - q[offset&7] = q[(offset+1)&7] - q[(offset+1)&7] = tmp - } - - offset++ - } -} - -func startPosQueueAt(self *startPosQueue, k uint) *posData { - return &self.q_[(k-self.idx_)&7] -} - -/* Returns the minimum possible copy length that can improve the cost of any */ -/* future position. */ -func computeMinimumCopyLength(start_cost float32, nodes []zopfliNode, num_bytes uint, pos uint) uint { - var min_cost float32 = start_cost - var len uint = 2 - var next_len_bucket uint = 4 - /* Compute the minimum possible cost of reaching any future position. */ - - var next_len_offset uint = 10 - for pos+len <= num_bytes && nodes[pos+len].u.cost <= min_cost { - /* We already reached (pos + len) with no more cost than the minimum - possible cost of reaching anything from this pos, so there is no point in - looking for lengths <= len. */ - len++ - - if len == next_len_offset { - /* We reached the next copy length code bucket, so we add one more - extra bit to the minimum cost. */ - min_cost += 1.0 - - next_len_offset += next_len_bucket - next_len_bucket *= 2 - } - } - - return uint(len) -} - -/* REQUIRES: nodes[pos].cost < kInfinity - REQUIRES: nodes[0..pos] satisfies that "ZopfliNode array invariant". */ -func computeDistanceShortcut(block_start uint, pos uint, max_backward_limit uint, gap uint, nodes []zopfliNode) uint32 { - var clen uint = uint(zopfliNodeCopyLength(&nodes[pos])) - var ilen uint = uint(nodes[pos].dcode_insert_length & 0x7FFFFFF) - var dist uint = uint(zopfliNodeCopyDistance(&nodes[pos])) - - /* Since |block_start + pos| is the end position of the command, the copy part - starts from |block_start + pos - clen|. Distances that are greater than - this or greater than |max_backward_limit| + |gap| are static dictionary - references, and do not update the last distances. - Also distance code 0 (last distance) does not update the last distances. */ - if pos == 0 { - return 0 - } else if dist+clen <= block_start+pos+gap && dist <= max_backward_limit+gap && zopfliNodeDistanceCode(&nodes[pos]) > 0 { - return uint32(pos) - } else { - return nodes[pos-clen-ilen].u.shortcut - } -} - -/* Fills in dist_cache[0..3] with the last four distances (as defined by - Section 4. of the Spec) that would be used at (block_start + pos) if we - used the shortest path of commands from block_start, computed from - nodes[0..pos]. The last four distances at block_start are in - starting_dist_cache[0..3]. - REQUIRES: nodes[pos].cost < kInfinity - REQUIRES: nodes[0..pos] satisfies that "ZopfliNode array invariant". */ -func computeDistanceCache(pos uint, starting_dist_cache []int, nodes []zopfliNode, dist_cache []int) { - var idx int = 0 - var p uint = uint(nodes[pos].u.shortcut) - for idx < 4 && p > 0 { - var ilen uint = uint(nodes[p].dcode_insert_length & 0x7FFFFFF) - var clen uint = uint(zopfliNodeCopyLength(&nodes[p])) - var dist uint = uint(zopfliNodeCopyDistance(&nodes[p])) - dist_cache[idx] = int(dist) - idx++ - - /* Because of prerequisite, p >= clen + ilen >= 2. */ - p = uint(nodes[p-clen-ilen].u.shortcut) - } - - for ; idx < 4; idx++ { - dist_cache[idx] = starting_dist_cache[0] - starting_dist_cache = starting_dist_cache[1:] - } -} - -/* Maintains "ZopfliNode array invariant" and pushes node to the queue, if it - is eligible. */ -func evaluateNode(block_start uint, pos uint, max_backward_limit uint, gap uint, starting_dist_cache []int, model *zopfliCostModel, queue *startPosQueue, nodes []zopfliNode) { - /* Save cost, because ComputeDistanceCache invalidates it. */ - var node_cost float32 = nodes[pos].u.cost - nodes[pos].u.shortcut = computeDistanceShortcut(block_start, pos, max_backward_limit, gap, nodes) - if node_cost <= zopfliCostModelGetLiteralCosts(model, 0, pos) { - var posdata posData - posdata.pos = pos - posdata.cost = node_cost - posdata.costdiff = node_cost - zopfliCostModelGetLiteralCosts(model, 0, pos) - computeDistanceCache(pos, starting_dist_cache, nodes, posdata.distance_cache[:]) - startPosQueuePush(queue, &posdata) - } -} - -/* Returns longest copy length. */ -func updateNodes(num_bytes uint, block_start uint, pos uint, ringbuffer []byte, ringbuffer_mask uint, params *encoderParams, max_backward_limit uint, starting_dist_cache []int, num_matches uint, matches []backwardMatch, model *zopfliCostModel, queue *startPosQueue, nodes []zopfliNode) uint { - var cur_ix uint = block_start + pos - var cur_ix_masked uint = cur_ix & ringbuffer_mask - var max_distance uint = brotli_min_size_t(cur_ix, max_backward_limit) - var max_len uint = num_bytes - pos - var max_zopfli_len uint = maxZopfliLen(params) - var max_iters uint = maxZopfliCandidates(params) - var min_len uint - var result uint = 0 - var k uint - var gap uint = 0 - - evaluateNode(block_start, pos, max_backward_limit, gap, starting_dist_cache, model, queue, nodes) - { - var posdata *posData = startPosQueueAt(queue, 0) - var min_cost float32 = (posdata.cost + zopfliCostModelGetMinCostCmd(model) + zopfliCostModelGetLiteralCosts(model, posdata.pos, pos)) - min_len = computeMinimumCopyLength(min_cost, nodes, num_bytes, pos) - } - - /* Go over the command starting positions in order of increasing cost - difference. */ - for k = 0; k < max_iters && k < startPosQueueSize(queue); k++ { - var posdata *posData = startPosQueueAt(queue, k) - var start uint = posdata.pos - var inscode uint16 = getInsertLengthCode(pos - start) - var start_costdiff float32 = posdata.costdiff - var base_cost float32 = start_costdiff + float32(getInsertExtra(inscode)) + zopfliCostModelGetLiteralCosts(model, 0, pos) - var best_len uint = min_len - 1 - var j uint = 0 - /* Look for last distance matches using the distance cache from this - starting position. */ - for ; j < numDistanceShortCodes && best_len < max_len; j++ { - var idx uint = uint(kDistanceCacheIndex[j]) - var backward uint = uint(posdata.distance_cache[idx] + kDistanceCacheOffset[j]) - var prev_ix uint = cur_ix - backward - var len uint = 0 - var continuation byte = ringbuffer[cur_ix_masked+best_len] - if cur_ix_masked+best_len > ringbuffer_mask { - break - } - - if backward > max_distance+gap { - /* Word dictionary -> ignore. */ - continue - } - - if backward <= max_distance { - /* Regular backward reference. */ - if prev_ix >= cur_ix { - continue - } - - prev_ix &= ringbuffer_mask - if prev_ix+best_len > ringbuffer_mask || continuation != ringbuffer[prev_ix+best_len] { - continue - } - - len = findMatchLengthWithLimit(ringbuffer[prev_ix:], ringbuffer[cur_ix_masked:], max_len) - } else { - continue - } - { - var dist_cost float32 = base_cost + zopfliCostModelGetDistanceCost(model, j) - var l uint - for l = best_len + 1; l <= len; l++ { - var copycode uint16 = getCopyLengthCode(l) - var cmdcode uint16 = combineLengthCodes(inscode, copycode, j == 0) - var tmp float32 - if cmdcode < 128 { - tmp = base_cost - } else { - tmp = dist_cost - } - var cost float32 = tmp + float32(getCopyExtra(copycode)) + zopfliCostModelGetCommandCost(model, cmdcode) - if cost < nodes[pos+l].u.cost { - updateZopfliNode(nodes, pos, start, l, l, backward, j+1, cost) - result = brotli_max_size_t(result, l) - } - - best_len = l - } - } - } - - /* At higher iterations look only for new last distance matches, since - looking only for new command start positions with the same distances - does not help much. */ - if k >= 2 { - continue - } - { - /* Loop through all possible copy lengths at this position. */ - var len uint = min_len - for j = 0; j < num_matches; j++ { - var match backwardMatch = matches[j] - var dist uint = uint(match.distance) - var is_dictionary_match bool = (dist > max_distance+gap) - var dist_code uint = dist + numDistanceShortCodes - 1 - var dist_symbol uint16 - var distextra uint32 - var distnumextra uint32 - var dist_cost float32 - var max_match_len uint - /* We already tried all possible last distance matches, so we can use - normal distance code here. */ - prefixEncodeCopyDistance(dist_code, uint(params.dist.num_direct_distance_codes), uint(params.dist.distance_postfix_bits), &dist_symbol, &distextra) - - distnumextra = uint32(dist_symbol) >> 10 - dist_cost = base_cost + float32(distnumextra) + zopfliCostModelGetDistanceCost(model, uint(dist_symbol)&0x3FF) - - /* Try all copy lengths up until the maximum copy length corresponding - to this distance. If the distance refers to the static dictionary, or - the maximum length is long enough, try only one maximum length. */ - max_match_len = backwardMatchLength(&match) - - if len < max_match_len && (is_dictionary_match || max_match_len > max_zopfli_len) { - len = max_match_len - } - - for ; len <= max_match_len; len++ { - var len_code uint - if is_dictionary_match { - len_code = backwardMatchLengthCode(&match) - } else { - len_code = len - } - var copycode uint16 = getCopyLengthCode(len_code) - var cmdcode uint16 = combineLengthCodes(inscode, copycode, false) - var cost float32 = dist_cost + float32(getCopyExtra(copycode)) + zopfliCostModelGetCommandCost(model, cmdcode) - if cost < nodes[pos+len].u.cost { - updateZopfliNode(nodes, pos, start, uint(len), len_code, dist, 0, cost) - if len > result { - result = len - } - } - } - } - } - } - - return result -} - -func computeShortestPathFromNodes(num_bytes uint, nodes []zopfliNode) uint { - var index uint = num_bytes - var num_commands uint = 0 - for nodes[index].dcode_insert_length&0x7FFFFFF == 0 && nodes[index].length == 1 { - index-- - } - nodes[index].u.next = math.MaxUint32 - for index != 0 { - var len uint = uint(zopfliNodeCommandLength(&nodes[index])) - index -= uint(len) - nodes[index].u.next = uint32(len) - num_commands++ - } - - return num_commands -} - -/* REQUIRES: nodes != NULL and len(nodes) >= num_bytes + 1 */ -func zopfliCreateCommands(num_bytes uint, block_start uint, nodes []zopfliNode, dist_cache []int, last_insert_len *uint, params *encoderParams, commands *[]command, num_literals *uint) { - var max_backward_limit uint = maxBackwardLimit(params.lgwin) - var pos uint = 0 - var offset uint32 = nodes[0].u.next - var i uint - var gap uint = 0 - for i = 0; offset != math.MaxUint32; i++ { - var next *zopfliNode = &nodes[uint32(pos)+offset] - var copy_length uint = uint(zopfliNodeCopyLength(next)) - var insert_length uint = uint(next.dcode_insert_length & 0x7FFFFFF) - pos += insert_length - offset = next.u.next - if i == 0 { - insert_length += *last_insert_len - *last_insert_len = 0 - } - { - var distance uint = uint(zopfliNodeCopyDistance(next)) - var len_code uint = uint(zopfliNodeLengthCode(next)) - var max_distance uint = brotli_min_size_t(block_start+pos, max_backward_limit) - var is_dictionary bool = (distance > max_distance+gap) - var dist_code uint = uint(zopfliNodeDistanceCode(next)) - *commands = append(*commands, makeCommand(¶ms.dist, insert_length, copy_length, int(len_code)-int(copy_length), dist_code)) - - if !is_dictionary && dist_code > 0 { - dist_cache[3] = dist_cache[2] - dist_cache[2] = dist_cache[1] - dist_cache[1] = dist_cache[0] - dist_cache[0] = int(distance) - } - } - - *num_literals += insert_length - pos += copy_length - } - - *last_insert_len += num_bytes - pos -} - -func zopfliIterate(num_bytes uint, position uint, ringbuffer []byte, ringbuffer_mask uint, params *encoderParams, gap uint, dist_cache []int, model *zopfliCostModel, num_matches []uint32, matches []backwardMatch, nodes []zopfliNode) uint { - var max_backward_limit uint = maxBackwardLimit(params.lgwin) - var max_zopfli_len uint = maxZopfliLen(params) - var queue startPosQueue - var cur_match_pos uint = 0 - var i uint - nodes[0].length = 0 - nodes[0].u.cost = 0 - initStartPosQueue(&queue) - for i = 0; i+3 < num_bytes; i++ { - var skip uint = updateNodes(num_bytes, position, i, ringbuffer, ringbuffer_mask, params, max_backward_limit, dist_cache, uint(num_matches[i]), matches[cur_match_pos:], model, &queue, nodes) - if skip < longCopyQuickStep { - skip = 0 - } - cur_match_pos += uint(num_matches[i]) - if num_matches[i] == 1 && backwardMatchLength(&matches[cur_match_pos-1]) > max_zopfli_len { - skip = brotli_max_size_t(backwardMatchLength(&matches[cur_match_pos-1]), skip) - } - - if skip > 1 { - skip-- - for skip != 0 { - i++ - if i+3 >= num_bytes { - break - } - evaluateNode(position, i, max_backward_limit, gap, dist_cache, model, &queue, nodes) - cur_match_pos += uint(num_matches[i]) - skip-- - } - } - } - - return computeShortestPathFromNodes(num_bytes, nodes) -} - -/* Computes the shortest path of commands from position to at most - position + num_bytes. - - On return, path->size() is the number of commands found and path[i] is the - length of the i-th command (copy length plus insert length). - Note that the sum of the lengths of all commands can be less than num_bytes. - - On return, the nodes[0..num_bytes] array will have the following - "ZopfliNode array invariant": - For each i in [1..num_bytes], if nodes[i].cost < kInfinity, then - (1) nodes[i].copy_length() >= 2 - (2) nodes[i].command_length() <= i and - (3) nodes[i - nodes[i].command_length()].cost < kInfinity - - REQUIRES: nodes != nil and len(nodes) >= num_bytes + 1 */ -func zopfliComputeShortestPath(num_bytes uint, position uint, ringbuffer []byte, ringbuffer_mask uint, params *encoderParams, dist_cache []int, hasher *h10, nodes []zopfliNode) uint { - var max_backward_limit uint = maxBackwardLimit(params.lgwin) - var max_zopfli_len uint = maxZopfliLen(params) - var model zopfliCostModel - var queue startPosQueue - var matches [2 * (maxNumMatchesH10 + 64)]backwardMatch - var store_end uint - if num_bytes >= hasher.StoreLookahead() { - store_end = position + num_bytes - hasher.StoreLookahead() + 1 - } else { - store_end = position - } - var i uint - var gap uint = 0 - var lz_matches_offset uint = 0 - nodes[0].length = 0 - nodes[0].u.cost = 0 - initZopfliCostModel(&model, ¶ms.dist, num_bytes) - zopfliCostModelSetFromLiteralCosts(&model, position, ringbuffer, ringbuffer_mask) - initStartPosQueue(&queue) - for i = 0; i+hasher.HashTypeLength()-1 < num_bytes; i++ { - var pos uint = position + i - var max_distance uint = brotli_min_size_t(pos, max_backward_limit) - var skip uint - var num_matches uint - num_matches = findAllMatchesH10(hasher, ¶ms.dictionary, ringbuffer, ringbuffer_mask, pos, num_bytes-i, max_distance, gap, params, matches[lz_matches_offset:]) - if num_matches > 0 && backwardMatchLength(&matches[num_matches-1]) > max_zopfli_len { - matches[0] = matches[num_matches-1] - num_matches = 1 - } - - skip = updateNodes(num_bytes, position, i, ringbuffer, ringbuffer_mask, params, max_backward_limit, dist_cache, num_matches, matches[:], &model, &queue, nodes) - if skip < longCopyQuickStep { - skip = 0 - } - if num_matches == 1 && backwardMatchLength(&matches[0]) > max_zopfli_len { - skip = brotli_max_size_t(backwardMatchLength(&matches[0]), skip) - } - - if skip > 1 { - /* Add the tail of the copy to the hasher. */ - hasher.StoreRange(ringbuffer, ringbuffer_mask, pos+1, brotli_min_size_t(pos+skip, store_end)) - - skip-- - for skip != 0 { - i++ - if i+hasher.HashTypeLength()-1 >= num_bytes { - break - } - evaluateNode(position, i, max_backward_limit, gap, dist_cache, &model, &queue, nodes) - skip-- - } - } - } - - cleanupZopfliCostModel(&model) - return computeShortestPathFromNodes(num_bytes, nodes) -} - -func createZopfliBackwardReferences(num_bytes uint, position uint, ringbuffer []byte, ringbuffer_mask uint, params *encoderParams, hasher *h10, dist_cache []int, last_insert_len *uint, commands *[]command, num_literals *uint) { - var nodes []zopfliNode - nodes = make([]zopfliNode, (num_bytes + 1)) - initZopfliNodes(nodes, num_bytes+1) - zopfliComputeShortestPath(num_bytes, position, ringbuffer, ringbuffer_mask, params, dist_cache, hasher, nodes) - zopfliCreateCommands(num_bytes, position, nodes, dist_cache, last_insert_len, params, commands, num_literals) - nodes = nil -} - -func createHqZopfliBackwardReferences(num_bytes uint, position uint, ringbuffer []byte, ringbuffer_mask uint, params *encoderParams, hasher hasherHandle, dist_cache []int, last_insert_len *uint, commands *[]command, num_literals *uint) { - var max_backward_limit uint = maxBackwardLimit(params.lgwin) - var num_matches []uint32 = make([]uint32, num_bytes) - var matches_size uint = 4 * num_bytes - var store_end uint - if num_bytes >= hasher.StoreLookahead() { - store_end = position + num_bytes - hasher.StoreLookahead() + 1 - } else { - store_end = position - } - var cur_match_pos uint = 0 - var i uint - var orig_num_literals uint - var orig_last_insert_len uint - var orig_dist_cache [4]int - var orig_num_commands int - var model zopfliCostModel - var nodes []zopfliNode - var matches []backwardMatch = make([]backwardMatch, matches_size) - var gap uint = 0 - var shadow_matches uint = 0 - var new_array []backwardMatch - for i = 0; i+hasher.HashTypeLength()-1 < num_bytes; i++ { - var pos uint = position + i - var max_distance uint = brotli_min_size_t(pos, max_backward_limit) - var max_length uint = num_bytes - i - var num_found_matches uint - var cur_match_end uint - var j uint - - /* Ensure that we have enough free slots. */ - if matches_size < cur_match_pos+maxNumMatchesH10+shadow_matches { - var new_size uint = matches_size - if new_size == 0 { - new_size = cur_match_pos + maxNumMatchesH10 + shadow_matches - } - - for new_size < cur_match_pos+maxNumMatchesH10+shadow_matches { - new_size *= 2 - } - - new_array = make([]backwardMatch, new_size) - if matches_size != 0 { - copy(new_array, matches[:matches_size]) - } - - matches = new_array - matches_size = new_size - } - - num_found_matches = findAllMatchesH10(hasher.(*h10), ¶ms.dictionary, ringbuffer, ringbuffer_mask, pos, max_length, max_distance, gap, params, matches[cur_match_pos+shadow_matches:]) - cur_match_end = cur_match_pos + num_found_matches - for j = cur_match_pos; j+1 < cur_match_end; j++ { - assert(backwardMatchLength(&matches[j]) <= backwardMatchLength(&matches[j+1])) - } - - num_matches[i] = uint32(num_found_matches) - if num_found_matches > 0 { - var match_len uint = backwardMatchLength(&matches[cur_match_end-1]) - if match_len > maxZopfliLenQuality11 { - var skip uint = match_len - 1 - matches[cur_match_pos] = matches[cur_match_end-1] - cur_match_pos++ - num_matches[i] = 1 - - /* Add the tail of the copy to the hasher. */ - hasher.StoreRange(ringbuffer, ringbuffer_mask, pos+1, brotli_min_size_t(pos+match_len, store_end)) - var pos uint = i - for i := 0; i < int(skip); i++ { - num_matches[pos+1:][i] = 0 - } - i += skip - } else { - cur_match_pos = cur_match_end - } - } - } - - orig_num_literals = *num_literals - orig_last_insert_len = *last_insert_len - copy(orig_dist_cache[:], dist_cache[:4]) - orig_num_commands = len(*commands) - nodes = make([]zopfliNode, (num_bytes + 1)) - initZopfliCostModel(&model, ¶ms.dist, num_bytes) - for i = 0; i < 2; i++ { - initZopfliNodes(nodes, num_bytes+1) - if i == 0 { - zopfliCostModelSetFromLiteralCosts(&model, position, ringbuffer, ringbuffer_mask) - } else { - zopfliCostModelSetFromCommands(&model, position, ringbuffer, ringbuffer_mask, (*commands)[orig_num_commands:], orig_last_insert_len) - } - - *commands = (*commands)[:orig_num_commands] - *num_literals = orig_num_literals - *last_insert_len = orig_last_insert_len - copy(dist_cache, orig_dist_cache[:4]) - zopfliIterate(num_bytes, position, ringbuffer, ringbuffer_mask, params, gap, dist_cache, &model, num_matches, matches, nodes) - zopfliCreateCommands(num_bytes, position, nodes, dist_cache, last_insert_len, params, commands, num_literals) - } - - cleanupZopfliCostModel(&model) - nodes = nil - matches = nil - num_matches = nil -} diff --git a/vendor/github.com/andybalholm/brotli/bit_cost.go b/vendor/github.com/andybalholm/brotli/bit_cost.go deleted file mode 100644 index 0005fc1..0000000 --- a/vendor/github.com/andybalholm/brotli/bit_cost.go +++ /dev/null @@ -1,436 +0,0 @@ -package brotli - -/* Copyright 2013 Google Inc. All Rights Reserved. - - Distributed under MIT license. - See file LICENSE for detail or copy at https://opensource.org/licenses/MIT -*/ - -/* Functions to estimate the bit cost of Huffman trees. */ -func shannonEntropy(population []uint32, size uint, total *uint) float64 { - var sum uint = 0 - var retval float64 = 0 - var population_end []uint32 = population[size:] - var p uint - for -cap(population) < -cap(population_end) { - p = uint(population[0]) - population = population[1:] - sum += p - retval -= float64(p) * fastLog2(p) - } - - if sum != 0 { - retval += float64(sum) * fastLog2(sum) - } - *total = sum - return retval -} - -func bitsEntropy(population []uint32, size uint) float64 { - var sum uint - var retval float64 = shannonEntropy(population, size, &sum) - if retval < float64(sum) { - /* At least one bit per literal is needed. */ - retval = float64(sum) - } - - return retval -} - -const kOneSymbolHistogramCost float64 = 12 -const kTwoSymbolHistogramCost float64 = 20 -const kThreeSymbolHistogramCost float64 = 28 -const kFourSymbolHistogramCost float64 = 37 - -func populationCostLiteral(histogram *histogramLiteral) float64 { - var data_size uint = histogramDataSizeLiteral() - var count int = 0 - var s [5]uint - var bits float64 = 0.0 - var i uint - if histogram.total_count_ == 0 { - return kOneSymbolHistogramCost - } - - for i = 0; i < data_size; i++ { - if histogram.data_[i] > 0 { - s[count] = i - count++ - if count > 4 { - break - } - } - } - - if count == 1 { - return kOneSymbolHistogramCost - } - - if count == 2 { - return kTwoSymbolHistogramCost + float64(histogram.total_count_) - } - - if count == 3 { - var histo0 uint32 = histogram.data_[s[0]] - var histo1 uint32 = histogram.data_[s[1]] - var histo2 uint32 = histogram.data_[s[2]] - var histomax uint32 = brotli_max_uint32_t(histo0, brotli_max_uint32_t(histo1, histo2)) - return kThreeSymbolHistogramCost + 2*(float64(histo0)+float64(histo1)+float64(histo2)) - float64(histomax) - } - - if count == 4 { - var histo [4]uint32 - var h23 uint32 - var histomax uint32 - for i = 0; i < 4; i++ { - histo[i] = histogram.data_[s[i]] - } - - /* Sort */ - for i = 0; i < 4; i++ { - var j uint - for j = i + 1; j < 4; j++ { - if histo[j] > histo[i] { - var tmp uint32 = histo[j] - histo[j] = histo[i] - histo[i] = tmp - } - } - } - - h23 = histo[2] + histo[3] - histomax = brotli_max_uint32_t(h23, histo[0]) - return kFourSymbolHistogramCost + 3*float64(h23) + 2*(float64(histo[0])+float64(histo[1])) - float64(histomax) - } - { - var max_depth uint = 1 - var depth_histo = [codeLengthCodes]uint32{0} - /* In this loop we compute the entropy of the histogram and simultaneously - build a simplified histogram of the code length codes where we use the - zero repeat code 17, but we don't use the non-zero repeat code 16. */ - - var log2total float64 = fastLog2(histogram.total_count_) - for i = 0; i < data_size; { - if histogram.data_[i] > 0 { - var log2p float64 = log2total - fastLog2(uint(histogram.data_[i])) - /* Compute -log2(P(symbol)) = -log2(count(symbol)/total_count) = - = log2(total_count) - log2(count(symbol)) */ - - var depth uint = uint(log2p + 0.5) - /* Approximate the bit depth by round(-log2(P(symbol))) */ - bits += float64(histogram.data_[i]) * log2p - - if depth > 15 { - depth = 15 - } - - if depth > max_depth { - max_depth = depth - } - - depth_histo[depth]++ - i++ - } else { - var reps uint32 = 1 - /* Compute the run length of zeros and add the appropriate number of 0 - and 17 code length codes to the code length code histogram. */ - - var k uint - for k = i + 1; k < data_size && histogram.data_[k] == 0; k++ { - reps++ - } - - i += uint(reps) - if i == data_size { - /* Don't add any cost for the last zero run, since these are encoded - only implicitly. */ - break - } - - if reps < 3 { - depth_histo[0] += reps - } else { - reps -= 2 - for reps > 0 { - depth_histo[repeatZeroCodeLength]++ - - /* Add the 3 extra bits for the 17 code length code. */ - bits += 3 - - reps >>= 3 - } - } - } - } - - /* Add the estimated encoding cost of the code length code histogram. */ - bits += float64(18 + 2*max_depth) - - /* Add the entropy of the code length code histogram. */ - bits += bitsEntropy(depth_histo[:], codeLengthCodes) - } - - return bits -} - -func populationCostCommand(histogram *histogramCommand) float64 { - var data_size uint = histogramDataSizeCommand() - var count int = 0 - var s [5]uint - var bits float64 = 0.0 - var i uint - if histogram.total_count_ == 0 { - return kOneSymbolHistogramCost - } - - for i = 0; i < data_size; i++ { - if histogram.data_[i] > 0 { - s[count] = i - count++ - if count > 4 { - break - } - } - } - - if count == 1 { - return kOneSymbolHistogramCost - } - - if count == 2 { - return kTwoSymbolHistogramCost + float64(histogram.total_count_) - } - - if count == 3 { - var histo0 uint32 = histogram.data_[s[0]] - var histo1 uint32 = histogram.data_[s[1]] - var histo2 uint32 = histogram.data_[s[2]] - var histomax uint32 = brotli_max_uint32_t(histo0, brotli_max_uint32_t(histo1, histo2)) - return kThreeSymbolHistogramCost + 2*(float64(histo0)+float64(histo1)+float64(histo2)) - float64(histomax) - } - - if count == 4 { - var histo [4]uint32 - var h23 uint32 - var histomax uint32 - for i = 0; i < 4; i++ { - histo[i] = histogram.data_[s[i]] - } - - /* Sort */ - for i = 0; i < 4; i++ { - var j uint - for j = i + 1; j < 4; j++ { - if histo[j] > histo[i] { - var tmp uint32 = histo[j] - histo[j] = histo[i] - histo[i] = tmp - } - } - } - - h23 = histo[2] + histo[3] - histomax = brotli_max_uint32_t(h23, histo[0]) - return kFourSymbolHistogramCost + 3*float64(h23) + 2*(float64(histo[0])+float64(histo[1])) - float64(histomax) - } - { - var max_depth uint = 1 - var depth_histo = [codeLengthCodes]uint32{0} - /* In this loop we compute the entropy of the histogram and simultaneously - build a simplified histogram of the code length codes where we use the - zero repeat code 17, but we don't use the non-zero repeat code 16. */ - - var log2total float64 = fastLog2(histogram.total_count_) - for i = 0; i < data_size; { - if histogram.data_[i] > 0 { - var log2p float64 = log2total - fastLog2(uint(histogram.data_[i])) - /* Compute -log2(P(symbol)) = -log2(count(symbol)/total_count) = - = log2(total_count) - log2(count(symbol)) */ - - var depth uint = uint(log2p + 0.5) - /* Approximate the bit depth by round(-log2(P(symbol))) */ - bits += float64(histogram.data_[i]) * log2p - - if depth > 15 { - depth = 15 - } - - if depth > max_depth { - max_depth = depth - } - - depth_histo[depth]++ - i++ - } else { - var reps uint32 = 1 - /* Compute the run length of zeros and add the appropriate number of 0 - and 17 code length codes to the code length code histogram. */ - - var k uint - for k = i + 1; k < data_size && histogram.data_[k] == 0; k++ { - reps++ - } - - i += uint(reps) - if i == data_size { - /* Don't add any cost for the last zero run, since these are encoded - only implicitly. */ - break - } - - if reps < 3 { - depth_histo[0] += reps - } else { - reps -= 2 - for reps > 0 { - depth_histo[repeatZeroCodeLength]++ - - /* Add the 3 extra bits for the 17 code length code. */ - bits += 3 - - reps >>= 3 - } - } - } - } - - /* Add the estimated encoding cost of the code length code histogram. */ - bits += float64(18 + 2*max_depth) - - /* Add the entropy of the code length code histogram. */ - bits += bitsEntropy(depth_histo[:], codeLengthCodes) - } - - return bits -} - -func populationCostDistance(histogram *histogramDistance) float64 { - var data_size uint = histogramDataSizeDistance() - var count int = 0 - var s [5]uint - var bits float64 = 0.0 - var i uint - if histogram.total_count_ == 0 { - return kOneSymbolHistogramCost - } - - for i = 0; i < data_size; i++ { - if histogram.data_[i] > 0 { - s[count] = i - count++ - if count > 4 { - break - } - } - } - - if count == 1 { - return kOneSymbolHistogramCost - } - - if count == 2 { - return kTwoSymbolHistogramCost + float64(histogram.total_count_) - } - - if count == 3 { - var histo0 uint32 = histogram.data_[s[0]] - var histo1 uint32 = histogram.data_[s[1]] - var histo2 uint32 = histogram.data_[s[2]] - var histomax uint32 = brotli_max_uint32_t(histo0, brotli_max_uint32_t(histo1, histo2)) - return kThreeSymbolHistogramCost + 2*(float64(histo0)+float64(histo1)+float64(histo2)) - float64(histomax) - } - - if count == 4 { - var histo [4]uint32 - var h23 uint32 - var histomax uint32 - for i = 0; i < 4; i++ { - histo[i] = histogram.data_[s[i]] - } - - /* Sort */ - for i = 0; i < 4; i++ { - var j uint - for j = i + 1; j < 4; j++ { - if histo[j] > histo[i] { - var tmp uint32 = histo[j] - histo[j] = histo[i] - histo[i] = tmp - } - } - } - - h23 = histo[2] + histo[3] - histomax = brotli_max_uint32_t(h23, histo[0]) - return kFourSymbolHistogramCost + 3*float64(h23) + 2*(float64(histo[0])+float64(histo[1])) - float64(histomax) - } - { - var max_depth uint = 1 - var depth_histo = [codeLengthCodes]uint32{0} - /* In this loop we compute the entropy of the histogram and simultaneously - build a simplified histogram of the code length codes where we use the - zero repeat code 17, but we don't use the non-zero repeat code 16. */ - - var log2total float64 = fastLog2(histogram.total_count_) - for i = 0; i < data_size; { - if histogram.data_[i] > 0 { - var log2p float64 = log2total - fastLog2(uint(histogram.data_[i])) - /* Compute -log2(P(symbol)) = -log2(count(symbol)/total_count) = - = log2(total_count) - log2(count(symbol)) */ - - var depth uint = uint(log2p + 0.5) - /* Approximate the bit depth by round(-log2(P(symbol))) */ - bits += float64(histogram.data_[i]) * log2p - - if depth > 15 { - depth = 15 - } - - if depth > max_depth { - max_depth = depth - } - - depth_histo[depth]++ - i++ - } else { - var reps uint32 = 1 - /* Compute the run length of zeros and add the appropriate number of 0 - and 17 code length codes to the code length code histogram. */ - - var k uint - for k = i + 1; k < data_size && histogram.data_[k] == 0; k++ { - reps++ - } - - i += uint(reps) - if i == data_size { - /* Don't add any cost for the last zero run, since these are encoded - only implicitly. */ - break - } - - if reps < 3 { - depth_histo[0] += reps - } else { - reps -= 2 - for reps > 0 { - depth_histo[repeatZeroCodeLength]++ - - /* Add the 3 extra bits for the 17 code length code. */ - bits += 3 - - reps >>= 3 - } - } - } - } - - /* Add the estimated encoding cost of the code length code histogram. */ - bits += float64(18 + 2*max_depth) - - /* Add the entropy of the code length code histogram. */ - bits += bitsEntropy(depth_histo[:], codeLengthCodes) - } - - return bits -} diff --git a/vendor/github.com/andybalholm/brotli/bit_reader.go b/vendor/github.com/andybalholm/brotli/bit_reader.go deleted file mode 100644 index fba8687..0000000 --- a/vendor/github.com/andybalholm/brotli/bit_reader.go +++ /dev/null @@ -1,266 +0,0 @@ -package brotli - -import "encoding/binary" - -/* Copyright 2013 Google Inc. All Rights Reserved. - - Distributed under MIT license. - See file LICENSE for detail or copy at https://opensource.org/licenses/MIT -*/ - -/* Bit reading helpers */ - -const shortFillBitWindowRead = (8 >> 1) - -var kBitMask = [33]uint32{ - 0x00000000, - 0x00000001, - 0x00000003, - 0x00000007, - 0x0000000F, - 0x0000001F, - 0x0000003F, - 0x0000007F, - 0x000000FF, - 0x000001FF, - 0x000003FF, - 0x000007FF, - 0x00000FFF, - 0x00001FFF, - 0x00003FFF, - 0x00007FFF, - 0x0000FFFF, - 0x0001FFFF, - 0x0003FFFF, - 0x0007FFFF, - 0x000FFFFF, - 0x001FFFFF, - 0x003FFFFF, - 0x007FFFFF, - 0x00FFFFFF, - 0x01FFFFFF, - 0x03FFFFFF, - 0x07FFFFFF, - 0x0FFFFFFF, - 0x1FFFFFFF, - 0x3FFFFFFF, - 0x7FFFFFFF, - 0xFFFFFFFF, -} - -func bitMask(n uint32) uint32 { - return kBitMask[n] -} - -type bitReader struct { - val_ uint64 - bit_pos_ uint32 - input []byte - input_len uint - byte_pos uint -} - -type bitReaderState struct { - val_ uint64 - bit_pos_ uint32 - input []byte - input_len uint - byte_pos uint -} - -/* Initializes the BrotliBitReader fields. */ - -/* Ensures that accumulator is not empty. - May consume up to sizeof(brotli_reg_t) - 1 bytes of input. - Returns false if data is required but there is no input available. - For BROTLI_ALIGNED_READ this function also prepares bit reader for aligned - reading. */ -func bitReaderSaveState(from *bitReader, to *bitReaderState) { - to.val_ = from.val_ - to.bit_pos_ = from.bit_pos_ - to.input = from.input - to.input_len = from.input_len - to.byte_pos = from.byte_pos -} - -func bitReaderRestoreState(to *bitReader, from *bitReaderState) { - to.val_ = from.val_ - to.bit_pos_ = from.bit_pos_ - to.input = from.input - to.input_len = from.input_len - to.byte_pos = from.byte_pos -} - -func getAvailableBits(br *bitReader) uint32 { - return 64 - br.bit_pos_ -} - -/* Returns amount of unread bytes the bit reader still has buffered from the - BrotliInput, including whole bytes in br->val_. */ -func getRemainingBytes(br *bitReader) uint { - return uint(uint32(br.input_len-br.byte_pos) + (getAvailableBits(br) >> 3)) -} - -/* Checks if there is at least |num| bytes left in the input ring-buffer - (excluding the bits remaining in br->val_). */ -func checkInputAmount(br *bitReader, num uint) bool { - return br.input_len-br.byte_pos >= num -} - -/* Guarantees that there are at least |n_bits| + 1 bits in accumulator. - Precondition: accumulator contains at least 1 bit. - |n_bits| should be in the range [1..24] for regular build. For portable - non-64-bit little-endian build only 16 bits are safe to request. */ -func fillBitWindow(br *bitReader, n_bits uint32) { - if br.bit_pos_ >= 32 { - br.val_ >>= 32 - br.bit_pos_ ^= 32 /* here same as -= 32 because of the if condition */ - br.val_ |= (uint64(binary.LittleEndian.Uint32(br.input[br.byte_pos:]))) << 32 - br.byte_pos += 4 - } -} - -/* Mostly like BrotliFillBitWindow, but guarantees only 16 bits and reads no - more than BROTLI_SHORT_FILL_BIT_WINDOW_READ bytes of input. */ -func fillBitWindow16(br *bitReader) { - fillBitWindow(br, 17) -} - -/* Tries to pull one byte of input to accumulator. - Returns false if there is no input available. */ -func pullByte(br *bitReader) bool { - if br.byte_pos == br.input_len { - return false - } - - br.val_ >>= 8 - br.val_ |= (uint64(br.input[br.byte_pos])) << 56 - br.bit_pos_ -= 8 - br.byte_pos++ - return true -} - -/* Returns currently available bits. - The number of valid bits could be calculated by BrotliGetAvailableBits. */ -func getBitsUnmasked(br *bitReader) uint64 { - return br.val_ >> br.bit_pos_ -} - -/* Like BrotliGetBits, but does not mask the result. - The result contains at least 16 valid bits. */ -func get16BitsUnmasked(br *bitReader) uint32 { - fillBitWindow(br, 16) - return uint32(getBitsUnmasked(br)) -} - -/* Returns the specified number of bits from |br| without advancing bit - position. */ -func getBits(br *bitReader, n_bits uint32) uint32 { - fillBitWindow(br, n_bits) - return uint32(getBitsUnmasked(br)) & bitMask(n_bits) -} - -/* Tries to peek the specified amount of bits. Returns false, if there - is not enough input. */ -func safeGetBits(br *bitReader, n_bits uint32, val *uint32) bool { - for getAvailableBits(br) < n_bits { - if !pullByte(br) { - return false - } - } - - *val = uint32(getBitsUnmasked(br)) & bitMask(n_bits) - return true -} - -/* Advances the bit pos by |n_bits|. */ -func dropBits(br *bitReader, n_bits uint32) { - br.bit_pos_ += n_bits -} - -func bitReaderUnload(br *bitReader) { - var unused_bytes uint32 = getAvailableBits(br) >> 3 - var unused_bits uint32 = unused_bytes << 3 - br.byte_pos -= uint(unused_bytes) - if unused_bits == 64 { - br.val_ = 0 - } else { - br.val_ <<= unused_bits - } - - br.bit_pos_ += unused_bits -} - -/* Reads the specified number of bits from |br| and advances the bit pos. - Precondition: accumulator MUST contain at least |n_bits|. */ -func takeBits(br *bitReader, n_bits uint32, val *uint32) { - *val = uint32(getBitsUnmasked(br)) & bitMask(n_bits) - dropBits(br, n_bits) -} - -/* Reads the specified number of bits from |br| and advances the bit pos. - Assumes that there is enough input to perform BrotliFillBitWindow. */ -func readBits(br *bitReader, n_bits uint32) uint32 { - var val uint32 - fillBitWindow(br, n_bits) - takeBits(br, n_bits, &val) - return val -} - -/* Tries to read the specified amount of bits. Returns false, if there - is not enough input. |n_bits| MUST be positive. */ -func safeReadBits(br *bitReader, n_bits uint32, val *uint32) bool { - for getAvailableBits(br) < n_bits { - if !pullByte(br) { - return false - } - } - - takeBits(br, n_bits, val) - return true -} - -/* Advances the bit reader position to the next byte boundary and verifies - that any skipped bits are set to zero. */ -func bitReaderJumpToByteBoundary(br *bitReader) bool { - var pad_bits_count uint32 = getAvailableBits(br) & 0x7 - var pad_bits uint32 = 0 - if pad_bits_count != 0 { - takeBits(br, pad_bits_count, &pad_bits) - } - - return pad_bits == 0 -} - -/* Copies remaining input bytes stored in the bit reader to the output. Value - |num| may not be larger than BrotliGetRemainingBytes. The bit reader must be - warmed up again after this. */ -func copyBytes(dest []byte, br *bitReader, num uint) { - for getAvailableBits(br) >= 8 && num > 0 { - dest[0] = byte(getBitsUnmasked(br)) - dropBits(br, 8) - dest = dest[1:] - num-- - } - - copy(dest, br.input[br.byte_pos:][:num]) - br.byte_pos += num -} - -func initBitReader(br *bitReader) { - br.val_ = 0 - br.bit_pos_ = 64 -} - -func warmupBitReader(br *bitReader) bool { - /* Fixing alignment after unaligned BrotliFillWindow would result accumulator - overflow. If unalignment is caused by BrotliSafeReadBits, then there is - enough space in accumulator to fix alignment. */ - if getAvailableBits(br) == 0 { - if !pullByte(br) { - return false - } - } - - return true -} diff --git a/vendor/github.com/andybalholm/brotli/block_splitter.go b/vendor/github.com/andybalholm/brotli/block_splitter.go deleted file mode 100644 index 978a131..0000000 --- a/vendor/github.com/andybalholm/brotli/block_splitter.go +++ /dev/null @@ -1,144 +0,0 @@ -package brotli - -/* Copyright 2013 Google Inc. All Rights Reserved. - - Distributed under MIT license. - See file LICENSE for detail or copy at https://opensource.org/licenses/MIT -*/ - -/* Block split point selection utilities. */ - -type blockSplit struct { - num_types uint - num_blocks uint - types []byte - lengths []uint32 - types_alloc_size uint - lengths_alloc_size uint -} - -const ( - kMaxLiteralHistograms uint = 100 - kMaxCommandHistograms uint = 50 - kLiteralBlockSwitchCost float64 = 28.1 - kCommandBlockSwitchCost float64 = 13.5 - kDistanceBlockSwitchCost float64 = 14.6 - kLiteralStrideLength uint = 70 - kCommandStrideLength uint = 40 - kSymbolsPerLiteralHistogram uint = 544 - kSymbolsPerCommandHistogram uint = 530 - kSymbolsPerDistanceHistogram uint = 544 - kMinLengthForBlockSplitting uint = 128 - kIterMulForRefining uint = 2 - kMinItersForRefining uint = 100 -) - -func countLiterals(cmds []command) uint { - var total_length uint = 0 - /* Count how many we have. */ - - for i := range cmds { - total_length += uint(cmds[i].insert_len_) - } - - return total_length -} - -func copyLiteralsToByteArray(cmds []command, data []byte, offset uint, mask uint, literals []byte) { - var pos uint = 0 - var from_pos uint = offset & mask - for i := range cmds { - var insert_len uint = uint(cmds[i].insert_len_) - if from_pos+insert_len > mask { - var head_size uint = mask + 1 - from_pos - copy(literals[pos:], data[from_pos:][:head_size]) - from_pos = 0 - pos += head_size - insert_len -= head_size - } - - if insert_len > 0 { - copy(literals[pos:], data[from_pos:][:insert_len]) - pos += insert_len - } - - from_pos = uint((uint32(from_pos+insert_len) + commandCopyLen(&cmds[i])) & uint32(mask)) - } -} - -func myRand(seed *uint32) uint32 { - /* Initial seed should be 7. In this case, loop length is (1 << 29). */ - *seed *= 16807 - - return *seed -} - -func bitCost(count uint) float64 { - if count == 0 { - return -2.0 - } else { - return fastLog2(count) - } -} - -const histogramsPerBatch = 64 - -const clustersPerBatch = 16 - -func initBlockSplit(self *blockSplit) { - self.num_types = 0 - self.num_blocks = 0 - self.types = self.types[:0] - self.lengths = self.lengths[:0] - self.types_alloc_size = 0 - self.lengths_alloc_size = 0 -} - -func splitBlock(cmds []command, data []byte, pos uint, mask uint, params *encoderParams, literal_split *blockSplit, insert_and_copy_split *blockSplit, dist_split *blockSplit) { - { - var literals_count uint = countLiterals(cmds) - var literals []byte = make([]byte, literals_count) - - /* Create a continuous array of literals. */ - copyLiteralsToByteArray(cmds, data, pos, mask, literals) - - /* Create the block split on the array of literals. - Literal histograms have alphabet size 256. */ - splitByteVectorLiteral(literals, literals_count, kSymbolsPerLiteralHistogram, kMaxLiteralHistograms, kLiteralStrideLength, kLiteralBlockSwitchCost, params, literal_split) - - literals = nil - } - { - var insert_and_copy_codes []uint16 = make([]uint16, len(cmds)) - /* Compute prefix codes for commands. */ - - for i := range cmds { - insert_and_copy_codes[i] = cmds[i].cmd_prefix_ - } - - /* Create the block split on the array of command prefixes. */ - splitByteVectorCommand(insert_and_copy_codes, kSymbolsPerCommandHistogram, kMaxCommandHistograms, kCommandStrideLength, kCommandBlockSwitchCost, params, insert_and_copy_split) - - /* TODO: reuse for distances? */ - - insert_and_copy_codes = nil - } - { - var distance_prefixes []uint16 = make([]uint16, len(cmds)) - var j uint = 0 - /* Create a continuous array of distance prefixes. */ - - for i := range cmds { - var cmd *command = &cmds[i] - if commandCopyLen(cmd) != 0 && cmd.cmd_prefix_ >= 128 { - distance_prefixes[j] = cmd.dist_prefix_ & 0x3FF - j++ - } - } - - /* Create the block split on the array of distance prefixes. */ - splitByteVectorDistance(distance_prefixes, j, kSymbolsPerDistanceHistogram, kMaxCommandHistograms, kCommandStrideLength, kDistanceBlockSwitchCost, params, dist_split) - - distance_prefixes = nil - } -} diff --git a/vendor/github.com/andybalholm/brotli/block_splitter_command.go b/vendor/github.com/andybalholm/brotli/block_splitter_command.go deleted file mode 100644 index 9dec13e..0000000 --- a/vendor/github.com/andybalholm/brotli/block_splitter_command.go +++ /dev/null @@ -1,434 +0,0 @@ -package brotli - -import "math" - -/* Copyright 2013 Google Inc. All Rights Reserved. - - Distributed under MIT license. - See file LICENSE for detail or copy at https://opensource.org/licenses/MIT -*/ - -func initialEntropyCodesCommand(data []uint16, length uint, stride uint, num_histograms uint, histograms []histogramCommand) { - var seed uint32 = 7 - var block_length uint = length / num_histograms - var i uint - clearHistogramsCommand(histograms, num_histograms) - for i = 0; i < num_histograms; i++ { - var pos uint = length * i / num_histograms - if i != 0 { - pos += uint(myRand(&seed) % uint32(block_length)) - } - - if pos+stride >= length { - pos = length - stride - 1 - } - - histogramAddVectorCommand(&histograms[i], data[pos:], stride) - } -} - -func randomSampleCommand(seed *uint32, data []uint16, length uint, stride uint, sample *histogramCommand) { - var pos uint = 0 - if stride >= length { - stride = length - } else { - pos = uint(myRand(seed) % uint32(length-stride+1)) - } - - histogramAddVectorCommand(sample, data[pos:], stride) -} - -func refineEntropyCodesCommand(data []uint16, length uint, stride uint, num_histograms uint, histograms []histogramCommand) { - var iters uint = kIterMulForRefining*length/stride + kMinItersForRefining - var seed uint32 = 7 - var iter uint - iters = ((iters + num_histograms - 1) / num_histograms) * num_histograms - for iter = 0; iter < iters; iter++ { - var sample histogramCommand - histogramClearCommand(&sample) - randomSampleCommand(&seed, data, length, stride, &sample) - histogramAddHistogramCommand(&histograms[iter%num_histograms], &sample) - } -} - -/* Assigns a block id from the range [0, num_histograms) to each data element - in data[0..length) and fills in block_id[0..length) with the assigned values. - Returns the number of blocks, i.e. one plus the number of block switches. */ -func findBlocksCommand(data []uint16, length uint, block_switch_bitcost float64, num_histograms uint, histograms []histogramCommand, insert_cost []float64, cost []float64, switch_signal []byte, block_id []byte) uint { - var data_size uint = histogramDataSizeCommand() - var bitmaplen uint = (num_histograms + 7) >> 3 - var num_blocks uint = 1 - var i uint - var j uint - assert(num_histograms <= 256) - if num_histograms <= 1 { - for i = 0; i < length; i++ { - block_id[i] = 0 - } - - return 1 - } - - for i := 0; i < int(data_size*num_histograms); i++ { - insert_cost[i] = 0 - } - for i = 0; i < num_histograms; i++ { - insert_cost[i] = fastLog2(uint(uint32(histograms[i].total_count_))) - } - - for i = data_size; i != 0; { - i-- - for j = 0; j < num_histograms; j++ { - insert_cost[i*num_histograms+j] = insert_cost[j] - bitCost(uint(histograms[j].data_[i])) - } - } - - for i := 0; i < int(num_histograms); i++ { - cost[i] = 0 - } - for i := 0; i < int(length*bitmaplen); i++ { - switch_signal[i] = 0 - } - - /* After each iteration of this loop, cost[k] will contain the difference - between the minimum cost of arriving at the current byte position using - entropy code k, and the minimum cost of arriving at the current byte - position. This difference is capped at the block switch cost, and if it - reaches block switch cost, it means that when we trace back from the last - position, we need to switch here. */ - for i = 0; i < length; i++ { - var byte_ix uint = i - var ix uint = byte_ix * bitmaplen - var insert_cost_ix uint = uint(data[byte_ix]) * num_histograms - var min_cost float64 = 1e99 - var block_switch_cost float64 = block_switch_bitcost - var k uint - for k = 0; k < num_histograms; k++ { - /* We are coding the symbol in data[byte_ix] with entropy code k. */ - cost[k] += insert_cost[insert_cost_ix+k] - - if cost[k] < min_cost { - min_cost = cost[k] - block_id[byte_ix] = byte(k) - } - } - - /* More blocks for the beginning. */ - if byte_ix < 2000 { - block_switch_cost *= 0.77 + 0.07*float64(byte_ix)/2000 - } - - for k = 0; k < num_histograms; k++ { - cost[k] -= min_cost - if cost[k] >= block_switch_cost { - var mask byte = byte(1 << (k & 7)) - cost[k] = block_switch_cost - assert(k>>3 < bitmaplen) - switch_signal[ix+(k>>3)] |= mask - /* Trace back from the last position and switch at the marked places. */ - } - } - } - { - var byte_ix uint = length - 1 - var ix uint = byte_ix * bitmaplen - var cur_id byte = block_id[byte_ix] - for byte_ix > 0 { - var mask byte = byte(1 << (cur_id & 7)) - assert(uint(cur_id)>>3 < bitmaplen) - byte_ix-- - ix -= bitmaplen - if switch_signal[ix+uint(cur_id>>3)]&mask != 0 { - if cur_id != block_id[byte_ix] { - cur_id = block_id[byte_ix] - num_blocks++ - } - } - - block_id[byte_ix] = cur_id - } - } - - return num_blocks -} - -var remapBlockIdsCommand_kInvalidId uint16 = 256 - -func remapBlockIdsCommand(block_ids []byte, length uint, new_id []uint16, num_histograms uint) uint { - var next_id uint16 = 0 - var i uint - for i = 0; i < num_histograms; i++ { - new_id[i] = remapBlockIdsCommand_kInvalidId - } - - for i = 0; i < length; i++ { - assert(uint(block_ids[i]) < num_histograms) - if new_id[block_ids[i]] == remapBlockIdsCommand_kInvalidId { - new_id[block_ids[i]] = next_id - next_id++ - } - } - - for i = 0; i < length; i++ { - block_ids[i] = byte(new_id[block_ids[i]]) - assert(uint(block_ids[i]) < num_histograms) - } - - assert(uint(next_id) <= num_histograms) - return uint(next_id) -} - -func buildBlockHistogramsCommand(data []uint16, length uint, block_ids []byte, num_histograms uint, histograms []histogramCommand) { - var i uint - clearHistogramsCommand(histograms, num_histograms) - for i = 0; i < length; i++ { - histogramAddCommand(&histograms[block_ids[i]], uint(data[i])) - } -} - -var clusterBlocksCommand_kInvalidIndex uint32 = math.MaxUint32 - -func clusterBlocksCommand(data []uint16, length uint, num_blocks uint, block_ids []byte, split *blockSplit) { - var histogram_symbols []uint32 = make([]uint32, num_blocks) - var block_lengths []uint32 = make([]uint32, num_blocks) - var expected_num_clusters uint = clustersPerBatch * (num_blocks + histogramsPerBatch - 1) / histogramsPerBatch - var all_histograms_size uint = 0 - var all_histograms_capacity uint = expected_num_clusters - var all_histograms []histogramCommand = make([]histogramCommand, all_histograms_capacity) - var cluster_size_size uint = 0 - var cluster_size_capacity uint = expected_num_clusters - var cluster_size []uint32 = make([]uint32, cluster_size_capacity) - var num_clusters uint = 0 - var histograms []histogramCommand = make([]histogramCommand, brotli_min_size_t(num_blocks, histogramsPerBatch)) - var max_num_pairs uint = histogramsPerBatch * histogramsPerBatch / 2 - var pairs_capacity uint = max_num_pairs + 1 - var pairs []histogramPair = make([]histogramPair, pairs_capacity) - var pos uint = 0 - var clusters []uint32 - var num_final_clusters uint - var new_index []uint32 - var i uint - var sizes = [histogramsPerBatch]uint32{0} - var new_clusters = [histogramsPerBatch]uint32{0} - var symbols = [histogramsPerBatch]uint32{0} - var remap = [histogramsPerBatch]uint32{0} - - for i := 0; i < int(num_blocks); i++ { - block_lengths[i] = 0 - } - { - var block_idx uint = 0 - for i = 0; i < length; i++ { - assert(block_idx < num_blocks) - block_lengths[block_idx]++ - if i+1 == length || block_ids[i] != block_ids[i+1] { - block_idx++ - } - } - - assert(block_idx == num_blocks) - } - - for i = 0; i < num_blocks; i += histogramsPerBatch { - var num_to_combine uint = brotli_min_size_t(num_blocks-i, histogramsPerBatch) - var num_new_clusters uint - var j uint - for j = 0; j < num_to_combine; j++ { - var k uint - histogramClearCommand(&histograms[j]) - for k = 0; uint32(k) < block_lengths[i+j]; k++ { - histogramAddCommand(&histograms[j], uint(data[pos])) - pos++ - } - - histograms[j].bit_cost_ = populationCostCommand(&histograms[j]) - new_clusters[j] = uint32(j) - symbols[j] = uint32(j) - sizes[j] = 1 - } - - num_new_clusters = histogramCombineCommand(histograms, sizes[:], symbols[:], new_clusters[:], []histogramPair(pairs), num_to_combine, num_to_combine, histogramsPerBatch, max_num_pairs) - if all_histograms_capacity < (all_histograms_size + num_new_clusters) { - var _new_size uint - if all_histograms_capacity == 0 { - _new_size = all_histograms_size + num_new_clusters - } else { - _new_size = all_histograms_capacity - } - var new_array []histogramCommand - for _new_size < (all_histograms_size + num_new_clusters) { - _new_size *= 2 - } - new_array = make([]histogramCommand, _new_size) - if all_histograms_capacity != 0 { - copy(new_array, all_histograms[:all_histograms_capacity]) - } - - all_histograms = new_array - all_histograms_capacity = _new_size - } - - brotli_ensure_capacity_uint32_t(&cluster_size, &cluster_size_capacity, cluster_size_size+num_new_clusters) - for j = 0; j < num_new_clusters; j++ { - all_histograms[all_histograms_size] = histograms[new_clusters[j]] - all_histograms_size++ - cluster_size[cluster_size_size] = sizes[new_clusters[j]] - cluster_size_size++ - remap[new_clusters[j]] = uint32(j) - } - - for j = 0; j < num_to_combine; j++ { - histogram_symbols[i+j] = uint32(num_clusters) + remap[symbols[j]] - } - - num_clusters += num_new_clusters - assert(num_clusters == cluster_size_size) - assert(num_clusters == all_histograms_size) - } - - histograms = nil - - max_num_pairs = brotli_min_size_t(64*num_clusters, (num_clusters/2)*num_clusters) - if pairs_capacity < max_num_pairs+1 { - pairs = nil - pairs = make([]histogramPair, (max_num_pairs + 1)) - } - - clusters = make([]uint32, num_clusters) - for i = 0; i < num_clusters; i++ { - clusters[i] = uint32(i) - } - - num_final_clusters = histogramCombineCommand(all_histograms, cluster_size, histogram_symbols, clusters, pairs, num_clusters, num_blocks, maxNumberOfBlockTypes, max_num_pairs) - pairs = nil - cluster_size = nil - - new_index = make([]uint32, num_clusters) - for i = 0; i < num_clusters; i++ { - new_index[i] = clusterBlocksCommand_kInvalidIndex - } - pos = 0 - { - var next_index uint32 = 0 - for i = 0; i < num_blocks; i++ { - var histo histogramCommand - var j uint - var best_out uint32 - var best_bits float64 - histogramClearCommand(&histo) - for j = 0; uint32(j) < block_lengths[i]; j++ { - histogramAddCommand(&histo, uint(data[pos])) - pos++ - } - - if i == 0 { - best_out = histogram_symbols[0] - } else { - best_out = histogram_symbols[i-1] - } - best_bits = histogramBitCostDistanceCommand(&histo, &all_histograms[best_out]) - for j = 0; j < num_final_clusters; j++ { - var cur_bits float64 = histogramBitCostDistanceCommand(&histo, &all_histograms[clusters[j]]) - if cur_bits < best_bits { - best_bits = cur_bits - best_out = clusters[j] - } - } - - histogram_symbols[i] = best_out - if new_index[best_out] == clusterBlocksCommand_kInvalidIndex { - new_index[best_out] = next_index - next_index++ - } - } - } - - clusters = nil - all_histograms = nil - brotli_ensure_capacity_uint8_t(&split.types, &split.types_alloc_size, num_blocks) - brotli_ensure_capacity_uint32_t(&split.lengths, &split.lengths_alloc_size, num_blocks) - { - var cur_length uint32 = 0 - var block_idx uint = 0 - var max_type byte = 0 - for i = 0; i < num_blocks; i++ { - cur_length += block_lengths[i] - if i+1 == num_blocks || histogram_symbols[i] != histogram_symbols[i+1] { - var id byte = byte(new_index[histogram_symbols[i]]) - split.types[block_idx] = id - split.lengths[block_idx] = cur_length - max_type = brotli_max_uint8_t(max_type, id) - cur_length = 0 - block_idx++ - } - } - - split.num_blocks = block_idx - split.num_types = uint(max_type) + 1 - } - - new_index = nil - block_lengths = nil - histogram_symbols = nil -} - -func splitByteVectorCommand(data []uint16, literals_per_histogram uint, max_histograms uint, sampling_stride_length uint, block_switch_cost float64, params *encoderParams, split *blockSplit) { - length := uint(len(data)) - var data_size uint = histogramDataSizeCommand() - var num_histograms uint = length/literals_per_histogram + 1 - var histograms []histogramCommand - if num_histograms > max_histograms { - num_histograms = max_histograms - } - - if length == 0 { - split.num_types = 1 - return - } else if length < kMinLengthForBlockSplitting { - brotli_ensure_capacity_uint8_t(&split.types, &split.types_alloc_size, split.num_blocks+1) - brotli_ensure_capacity_uint32_t(&split.lengths, &split.lengths_alloc_size, split.num_blocks+1) - split.num_types = 1 - split.types[split.num_blocks] = 0 - split.lengths[split.num_blocks] = uint32(length) - split.num_blocks++ - return - } - - histograms = make([]histogramCommand, num_histograms) - - /* Find good entropy codes. */ - initialEntropyCodesCommand(data, length, sampling_stride_length, num_histograms, histograms) - - refineEntropyCodesCommand(data, length, sampling_stride_length, num_histograms, histograms) - { - var block_ids []byte = make([]byte, length) - var num_blocks uint = 0 - var bitmaplen uint = (num_histograms + 7) >> 3 - var insert_cost []float64 = make([]float64, (data_size * num_histograms)) - var cost []float64 = make([]float64, num_histograms) - var switch_signal []byte = make([]byte, (length * bitmaplen)) - var new_id []uint16 = make([]uint16, num_histograms) - var iters uint - if params.quality < hqZopflificationQuality { - iters = 3 - } else { - iters = 10 - } - /* Find a good path through literals with the good entropy codes. */ - - var i uint - for i = 0; i < iters; i++ { - num_blocks = findBlocksCommand(data, length, block_switch_cost, num_histograms, histograms, insert_cost, cost, switch_signal, block_ids) - num_histograms = remapBlockIdsCommand(block_ids, length, new_id, num_histograms) - buildBlockHistogramsCommand(data, length, block_ids, num_histograms, histograms) - } - - insert_cost = nil - cost = nil - switch_signal = nil - new_id = nil - histograms = nil - clusterBlocksCommand(data, length, num_blocks, block_ids, split) - block_ids = nil - } -} diff --git a/vendor/github.com/andybalholm/brotli/block_splitter_distance.go b/vendor/github.com/andybalholm/brotli/block_splitter_distance.go deleted file mode 100644 index 953530d..0000000 --- a/vendor/github.com/andybalholm/brotli/block_splitter_distance.go +++ /dev/null @@ -1,433 +0,0 @@ -package brotli - -import "math" - -/* Copyright 2013 Google Inc. All Rights Reserved. - - Distributed under MIT license. - See file LICENSE for detail or copy at https://opensource.org/licenses/MIT -*/ - -func initialEntropyCodesDistance(data []uint16, length uint, stride uint, num_histograms uint, histograms []histogramDistance) { - var seed uint32 = 7 - var block_length uint = length / num_histograms - var i uint - clearHistogramsDistance(histograms, num_histograms) - for i = 0; i < num_histograms; i++ { - var pos uint = length * i / num_histograms - if i != 0 { - pos += uint(myRand(&seed) % uint32(block_length)) - } - - if pos+stride >= length { - pos = length - stride - 1 - } - - histogramAddVectorDistance(&histograms[i], data[pos:], stride) - } -} - -func randomSampleDistance(seed *uint32, data []uint16, length uint, stride uint, sample *histogramDistance) { - var pos uint = 0 - if stride >= length { - stride = length - } else { - pos = uint(myRand(seed) % uint32(length-stride+1)) - } - - histogramAddVectorDistance(sample, data[pos:], stride) -} - -func refineEntropyCodesDistance(data []uint16, length uint, stride uint, num_histograms uint, histograms []histogramDistance) { - var iters uint = kIterMulForRefining*length/stride + kMinItersForRefining - var seed uint32 = 7 - var iter uint - iters = ((iters + num_histograms - 1) / num_histograms) * num_histograms - for iter = 0; iter < iters; iter++ { - var sample histogramDistance - histogramClearDistance(&sample) - randomSampleDistance(&seed, data, length, stride, &sample) - histogramAddHistogramDistance(&histograms[iter%num_histograms], &sample) - } -} - -/* Assigns a block id from the range [0, num_histograms) to each data element - in data[0..length) and fills in block_id[0..length) with the assigned values. - Returns the number of blocks, i.e. one plus the number of block switches. */ -func findBlocksDistance(data []uint16, length uint, block_switch_bitcost float64, num_histograms uint, histograms []histogramDistance, insert_cost []float64, cost []float64, switch_signal []byte, block_id []byte) uint { - var data_size uint = histogramDataSizeDistance() - var bitmaplen uint = (num_histograms + 7) >> 3 - var num_blocks uint = 1 - var i uint - var j uint - assert(num_histograms <= 256) - if num_histograms <= 1 { - for i = 0; i < length; i++ { - block_id[i] = 0 - } - - return 1 - } - - for i := 0; i < int(data_size*num_histograms); i++ { - insert_cost[i] = 0 - } - for i = 0; i < num_histograms; i++ { - insert_cost[i] = fastLog2(uint(uint32(histograms[i].total_count_))) - } - - for i = data_size; i != 0; { - i-- - for j = 0; j < num_histograms; j++ { - insert_cost[i*num_histograms+j] = insert_cost[j] - bitCost(uint(histograms[j].data_[i])) - } - } - - for i := 0; i < int(num_histograms); i++ { - cost[i] = 0 - } - for i := 0; i < int(length*bitmaplen); i++ { - switch_signal[i] = 0 - } - - /* After each iteration of this loop, cost[k] will contain the difference - between the minimum cost of arriving at the current byte position using - entropy code k, and the minimum cost of arriving at the current byte - position. This difference is capped at the block switch cost, and if it - reaches block switch cost, it means that when we trace back from the last - position, we need to switch here. */ - for i = 0; i < length; i++ { - var byte_ix uint = i - var ix uint = byte_ix * bitmaplen - var insert_cost_ix uint = uint(data[byte_ix]) * num_histograms - var min_cost float64 = 1e99 - var block_switch_cost float64 = block_switch_bitcost - var k uint - for k = 0; k < num_histograms; k++ { - /* We are coding the symbol in data[byte_ix] with entropy code k. */ - cost[k] += insert_cost[insert_cost_ix+k] - - if cost[k] < min_cost { - min_cost = cost[k] - block_id[byte_ix] = byte(k) - } - } - - /* More blocks for the beginning. */ - if byte_ix < 2000 { - block_switch_cost *= 0.77 + 0.07*float64(byte_ix)/2000 - } - - for k = 0; k < num_histograms; k++ { - cost[k] -= min_cost - if cost[k] >= block_switch_cost { - var mask byte = byte(1 << (k & 7)) - cost[k] = block_switch_cost - assert(k>>3 < bitmaplen) - switch_signal[ix+(k>>3)] |= mask - /* Trace back from the last position and switch at the marked places. */ - } - } - } - { - var byte_ix uint = length - 1 - var ix uint = byte_ix * bitmaplen - var cur_id byte = block_id[byte_ix] - for byte_ix > 0 { - var mask byte = byte(1 << (cur_id & 7)) - assert(uint(cur_id)>>3 < bitmaplen) - byte_ix-- - ix -= bitmaplen - if switch_signal[ix+uint(cur_id>>3)]&mask != 0 { - if cur_id != block_id[byte_ix] { - cur_id = block_id[byte_ix] - num_blocks++ - } - } - - block_id[byte_ix] = cur_id - } - } - - return num_blocks -} - -var remapBlockIdsDistance_kInvalidId uint16 = 256 - -func remapBlockIdsDistance(block_ids []byte, length uint, new_id []uint16, num_histograms uint) uint { - var next_id uint16 = 0 - var i uint - for i = 0; i < num_histograms; i++ { - new_id[i] = remapBlockIdsDistance_kInvalidId - } - - for i = 0; i < length; i++ { - assert(uint(block_ids[i]) < num_histograms) - if new_id[block_ids[i]] == remapBlockIdsDistance_kInvalidId { - new_id[block_ids[i]] = next_id - next_id++ - } - } - - for i = 0; i < length; i++ { - block_ids[i] = byte(new_id[block_ids[i]]) - assert(uint(block_ids[i]) < num_histograms) - } - - assert(uint(next_id) <= num_histograms) - return uint(next_id) -} - -func buildBlockHistogramsDistance(data []uint16, length uint, block_ids []byte, num_histograms uint, histograms []histogramDistance) { - var i uint - clearHistogramsDistance(histograms, num_histograms) - for i = 0; i < length; i++ { - histogramAddDistance(&histograms[block_ids[i]], uint(data[i])) - } -} - -var clusterBlocksDistance_kInvalidIndex uint32 = math.MaxUint32 - -func clusterBlocksDistance(data []uint16, length uint, num_blocks uint, block_ids []byte, split *blockSplit) { - var histogram_symbols []uint32 = make([]uint32, num_blocks) - var block_lengths []uint32 = make([]uint32, num_blocks) - var expected_num_clusters uint = clustersPerBatch * (num_blocks + histogramsPerBatch - 1) / histogramsPerBatch - var all_histograms_size uint = 0 - var all_histograms_capacity uint = expected_num_clusters - var all_histograms []histogramDistance = make([]histogramDistance, all_histograms_capacity) - var cluster_size_size uint = 0 - var cluster_size_capacity uint = expected_num_clusters - var cluster_size []uint32 = make([]uint32, cluster_size_capacity) - var num_clusters uint = 0 - var histograms []histogramDistance = make([]histogramDistance, brotli_min_size_t(num_blocks, histogramsPerBatch)) - var max_num_pairs uint = histogramsPerBatch * histogramsPerBatch / 2 - var pairs_capacity uint = max_num_pairs + 1 - var pairs []histogramPair = make([]histogramPair, pairs_capacity) - var pos uint = 0 - var clusters []uint32 - var num_final_clusters uint - var new_index []uint32 - var i uint - var sizes = [histogramsPerBatch]uint32{0} - var new_clusters = [histogramsPerBatch]uint32{0} - var symbols = [histogramsPerBatch]uint32{0} - var remap = [histogramsPerBatch]uint32{0} - - for i := 0; i < int(num_blocks); i++ { - block_lengths[i] = 0 - } - { - var block_idx uint = 0 - for i = 0; i < length; i++ { - assert(block_idx < num_blocks) - block_lengths[block_idx]++ - if i+1 == length || block_ids[i] != block_ids[i+1] { - block_idx++ - } - } - - assert(block_idx == num_blocks) - } - - for i = 0; i < num_blocks; i += histogramsPerBatch { - var num_to_combine uint = brotli_min_size_t(num_blocks-i, histogramsPerBatch) - var num_new_clusters uint - var j uint - for j = 0; j < num_to_combine; j++ { - var k uint - histogramClearDistance(&histograms[j]) - for k = 0; uint32(k) < block_lengths[i+j]; k++ { - histogramAddDistance(&histograms[j], uint(data[pos])) - pos++ - } - - histograms[j].bit_cost_ = populationCostDistance(&histograms[j]) - new_clusters[j] = uint32(j) - symbols[j] = uint32(j) - sizes[j] = 1 - } - - num_new_clusters = histogramCombineDistance(histograms, sizes[:], symbols[:], new_clusters[:], []histogramPair(pairs), num_to_combine, num_to_combine, histogramsPerBatch, max_num_pairs) - if all_histograms_capacity < (all_histograms_size + num_new_clusters) { - var _new_size uint - if all_histograms_capacity == 0 { - _new_size = all_histograms_size + num_new_clusters - } else { - _new_size = all_histograms_capacity - } - var new_array []histogramDistance - for _new_size < (all_histograms_size + num_new_clusters) { - _new_size *= 2 - } - new_array = make([]histogramDistance, _new_size) - if all_histograms_capacity != 0 { - copy(new_array, all_histograms[:all_histograms_capacity]) - } - - all_histograms = new_array - all_histograms_capacity = _new_size - } - - brotli_ensure_capacity_uint32_t(&cluster_size, &cluster_size_capacity, cluster_size_size+num_new_clusters) - for j = 0; j < num_new_clusters; j++ { - all_histograms[all_histograms_size] = histograms[new_clusters[j]] - all_histograms_size++ - cluster_size[cluster_size_size] = sizes[new_clusters[j]] - cluster_size_size++ - remap[new_clusters[j]] = uint32(j) - } - - for j = 0; j < num_to_combine; j++ { - histogram_symbols[i+j] = uint32(num_clusters) + remap[symbols[j]] - } - - num_clusters += num_new_clusters - assert(num_clusters == cluster_size_size) - assert(num_clusters == all_histograms_size) - } - - histograms = nil - - max_num_pairs = brotli_min_size_t(64*num_clusters, (num_clusters/2)*num_clusters) - if pairs_capacity < max_num_pairs+1 { - pairs = nil - pairs = make([]histogramPair, (max_num_pairs + 1)) - } - - clusters = make([]uint32, num_clusters) - for i = 0; i < num_clusters; i++ { - clusters[i] = uint32(i) - } - - num_final_clusters = histogramCombineDistance(all_histograms, cluster_size, histogram_symbols, clusters, pairs, num_clusters, num_blocks, maxNumberOfBlockTypes, max_num_pairs) - pairs = nil - cluster_size = nil - - new_index = make([]uint32, num_clusters) - for i = 0; i < num_clusters; i++ { - new_index[i] = clusterBlocksDistance_kInvalidIndex - } - pos = 0 - { - var next_index uint32 = 0 - for i = 0; i < num_blocks; i++ { - var histo histogramDistance - var j uint - var best_out uint32 - var best_bits float64 - histogramClearDistance(&histo) - for j = 0; uint32(j) < block_lengths[i]; j++ { - histogramAddDistance(&histo, uint(data[pos])) - pos++ - } - - if i == 0 { - best_out = histogram_symbols[0] - } else { - best_out = histogram_symbols[i-1] - } - best_bits = histogramBitCostDistanceDistance(&histo, &all_histograms[best_out]) - for j = 0; j < num_final_clusters; j++ { - var cur_bits float64 = histogramBitCostDistanceDistance(&histo, &all_histograms[clusters[j]]) - if cur_bits < best_bits { - best_bits = cur_bits - best_out = clusters[j] - } - } - - histogram_symbols[i] = best_out - if new_index[best_out] == clusterBlocksDistance_kInvalidIndex { - new_index[best_out] = next_index - next_index++ - } - } - } - - clusters = nil - all_histograms = nil - brotli_ensure_capacity_uint8_t(&split.types, &split.types_alloc_size, num_blocks) - brotli_ensure_capacity_uint32_t(&split.lengths, &split.lengths_alloc_size, num_blocks) - { - var cur_length uint32 = 0 - var block_idx uint = 0 - var max_type byte = 0 - for i = 0; i < num_blocks; i++ { - cur_length += block_lengths[i] - if i+1 == num_blocks || histogram_symbols[i] != histogram_symbols[i+1] { - var id byte = byte(new_index[histogram_symbols[i]]) - split.types[block_idx] = id - split.lengths[block_idx] = cur_length - max_type = brotli_max_uint8_t(max_type, id) - cur_length = 0 - block_idx++ - } - } - - split.num_blocks = block_idx - split.num_types = uint(max_type) + 1 - } - - new_index = nil - block_lengths = nil - histogram_symbols = nil -} - -func splitByteVectorDistance(data []uint16, length uint, literals_per_histogram uint, max_histograms uint, sampling_stride_length uint, block_switch_cost float64, params *encoderParams, split *blockSplit) { - var data_size uint = histogramDataSizeDistance() - var num_histograms uint = length/literals_per_histogram + 1 - var histograms []histogramDistance - if num_histograms > max_histograms { - num_histograms = max_histograms - } - - if length == 0 { - split.num_types = 1 - return - } else if length < kMinLengthForBlockSplitting { - brotli_ensure_capacity_uint8_t(&split.types, &split.types_alloc_size, split.num_blocks+1) - brotli_ensure_capacity_uint32_t(&split.lengths, &split.lengths_alloc_size, split.num_blocks+1) - split.num_types = 1 - split.types[split.num_blocks] = 0 - split.lengths[split.num_blocks] = uint32(length) - split.num_blocks++ - return - } - - histograms = make([]histogramDistance, num_histograms) - - /* Find good entropy codes. */ - initialEntropyCodesDistance(data, length, sampling_stride_length, num_histograms, histograms) - - refineEntropyCodesDistance(data, length, sampling_stride_length, num_histograms, histograms) - { - var block_ids []byte = make([]byte, length) - var num_blocks uint = 0 - var bitmaplen uint = (num_histograms + 7) >> 3 - var insert_cost []float64 = make([]float64, (data_size * num_histograms)) - var cost []float64 = make([]float64, num_histograms) - var switch_signal []byte = make([]byte, (length * bitmaplen)) - var new_id []uint16 = make([]uint16, num_histograms) - var iters uint - if params.quality < hqZopflificationQuality { - iters = 3 - } else { - iters = 10 - } - /* Find a good path through literals with the good entropy codes. */ - - var i uint - for i = 0; i < iters; i++ { - num_blocks = findBlocksDistance(data, length, block_switch_cost, num_histograms, histograms, insert_cost, cost, switch_signal, block_ids) - num_histograms = remapBlockIdsDistance(block_ids, length, new_id, num_histograms) - buildBlockHistogramsDistance(data, length, block_ids, num_histograms, histograms) - } - - insert_cost = nil - cost = nil - switch_signal = nil - new_id = nil - histograms = nil - clusterBlocksDistance(data, length, num_blocks, block_ids, split) - block_ids = nil - } -} diff --git a/vendor/github.com/andybalholm/brotli/block_splitter_literal.go b/vendor/github.com/andybalholm/brotli/block_splitter_literal.go deleted file mode 100644 index 1c895cf..0000000 --- a/vendor/github.com/andybalholm/brotli/block_splitter_literal.go +++ /dev/null @@ -1,433 +0,0 @@ -package brotli - -import "math" - -/* Copyright 2013 Google Inc. All Rights Reserved. - - Distributed under MIT license. - See file LICENSE for detail or copy at https://opensource.org/licenses/MIT -*/ - -func initialEntropyCodesLiteral(data []byte, length uint, stride uint, num_histograms uint, histograms []histogramLiteral) { - var seed uint32 = 7 - var block_length uint = length / num_histograms - var i uint - clearHistogramsLiteral(histograms, num_histograms) - for i = 0; i < num_histograms; i++ { - var pos uint = length * i / num_histograms - if i != 0 { - pos += uint(myRand(&seed) % uint32(block_length)) - } - - if pos+stride >= length { - pos = length - stride - 1 - } - - histogramAddVectorLiteral(&histograms[i], data[pos:], stride) - } -} - -func randomSampleLiteral(seed *uint32, data []byte, length uint, stride uint, sample *histogramLiteral) { - var pos uint = 0 - if stride >= length { - stride = length - } else { - pos = uint(myRand(seed) % uint32(length-stride+1)) - } - - histogramAddVectorLiteral(sample, data[pos:], stride) -} - -func refineEntropyCodesLiteral(data []byte, length uint, stride uint, num_histograms uint, histograms []histogramLiteral) { - var iters uint = kIterMulForRefining*length/stride + kMinItersForRefining - var seed uint32 = 7 - var iter uint - iters = ((iters + num_histograms - 1) / num_histograms) * num_histograms - for iter = 0; iter < iters; iter++ { - var sample histogramLiteral - histogramClearLiteral(&sample) - randomSampleLiteral(&seed, data, length, stride, &sample) - histogramAddHistogramLiteral(&histograms[iter%num_histograms], &sample) - } -} - -/* Assigns a block id from the range [0, num_histograms) to each data element - in data[0..length) and fills in block_id[0..length) with the assigned values. - Returns the number of blocks, i.e. one plus the number of block switches. */ -func findBlocksLiteral(data []byte, length uint, block_switch_bitcost float64, num_histograms uint, histograms []histogramLiteral, insert_cost []float64, cost []float64, switch_signal []byte, block_id []byte) uint { - var data_size uint = histogramDataSizeLiteral() - var bitmaplen uint = (num_histograms + 7) >> 3 - var num_blocks uint = 1 - var i uint - var j uint - assert(num_histograms <= 256) - if num_histograms <= 1 { - for i = 0; i < length; i++ { - block_id[i] = 0 - } - - return 1 - } - - for i := 0; i < int(data_size*num_histograms); i++ { - insert_cost[i] = 0 - } - for i = 0; i < num_histograms; i++ { - insert_cost[i] = fastLog2(uint(uint32(histograms[i].total_count_))) - } - - for i = data_size; i != 0; { - i-- - for j = 0; j < num_histograms; j++ { - insert_cost[i*num_histograms+j] = insert_cost[j] - bitCost(uint(histograms[j].data_[i])) - } - } - - for i := 0; i < int(num_histograms); i++ { - cost[i] = 0 - } - for i := 0; i < int(length*bitmaplen); i++ { - switch_signal[i] = 0 - } - - /* After each iteration of this loop, cost[k] will contain the difference - between the minimum cost of arriving at the current byte position using - entropy code k, and the minimum cost of arriving at the current byte - position. This difference is capped at the block switch cost, and if it - reaches block switch cost, it means that when we trace back from the last - position, we need to switch here. */ - for i = 0; i < length; i++ { - var byte_ix uint = i - var ix uint = byte_ix * bitmaplen - var insert_cost_ix uint = uint(data[byte_ix]) * num_histograms - var min_cost float64 = 1e99 - var block_switch_cost float64 = block_switch_bitcost - var k uint - for k = 0; k < num_histograms; k++ { - /* We are coding the symbol in data[byte_ix] with entropy code k. */ - cost[k] += insert_cost[insert_cost_ix+k] - - if cost[k] < min_cost { - min_cost = cost[k] - block_id[byte_ix] = byte(k) - } - } - - /* More blocks for the beginning. */ - if byte_ix < 2000 { - block_switch_cost *= 0.77 + 0.07*float64(byte_ix)/2000 - } - - for k = 0; k < num_histograms; k++ { - cost[k] -= min_cost - if cost[k] >= block_switch_cost { - var mask byte = byte(1 << (k & 7)) - cost[k] = block_switch_cost - assert(k>>3 < bitmaplen) - switch_signal[ix+(k>>3)] |= mask - /* Trace back from the last position and switch at the marked places. */ - } - } - } - { - var byte_ix uint = length - 1 - var ix uint = byte_ix * bitmaplen - var cur_id byte = block_id[byte_ix] - for byte_ix > 0 { - var mask byte = byte(1 << (cur_id & 7)) - assert(uint(cur_id)>>3 < bitmaplen) - byte_ix-- - ix -= bitmaplen - if switch_signal[ix+uint(cur_id>>3)]&mask != 0 { - if cur_id != block_id[byte_ix] { - cur_id = block_id[byte_ix] - num_blocks++ - } - } - - block_id[byte_ix] = cur_id - } - } - - return num_blocks -} - -var remapBlockIdsLiteral_kInvalidId uint16 = 256 - -func remapBlockIdsLiteral(block_ids []byte, length uint, new_id []uint16, num_histograms uint) uint { - var next_id uint16 = 0 - var i uint - for i = 0; i < num_histograms; i++ { - new_id[i] = remapBlockIdsLiteral_kInvalidId - } - - for i = 0; i < length; i++ { - assert(uint(block_ids[i]) < num_histograms) - if new_id[block_ids[i]] == remapBlockIdsLiteral_kInvalidId { - new_id[block_ids[i]] = next_id - next_id++ - } - } - - for i = 0; i < length; i++ { - block_ids[i] = byte(new_id[block_ids[i]]) - assert(uint(block_ids[i]) < num_histograms) - } - - assert(uint(next_id) <= num_histograms) - return uint(next_id) -} - -func buildBlockHistogramsLiteral(data []byte, length uint, block_ids []byte, num_histograms uint, histograms []histogramLiteral) { - var i uint - clearHistogramsLiteral(histograms, num_histograms) - for i = 0; i < length; i++ { - histogramAddLiteral(&histograms[block_ids[i]], uint(data[i])) - } -} - -var clusterBlocksLiteral_kInvalidIndex uint32 = math.MaxUint32 - -func clusterBlocksLiteral(data []byte, length uint, num_blocks uint, block_ids []byte, split *blockSplit) { - var histogram_symbols []uint32 = make([]uint32, num_blocks) - var block_lengths []uint32 = make([]uint32, num_blocks) - var expected_num_clusters uint = clustersPerBatch * (num_blocks + histogramsPerBatch - 1) / histogramsPerBatch - var all_histograms_size uint = 0 - var all_histograms_capacity uint = expected_num_clusters - var all_histograms []histogramLiteral = make([]histogramLiteral, all_histograms_capacity) - var cluster_size_size uint = 0 - var cluster_size_capacity uint = expected_num_clusters - var cluster_size []uint32 = make([]uint32, cluster_size_capacity) - var num_clusters uint = 0 - var histograms []histogramLiteral = make([]histogramLiteral, brotli_min_size_t(num_blocks, histogramsPerBatch)) - var max_num_pairs uint = histogramsPerBatch * histogramsPerBatch / 2 - var pairs_capacity uint = max_num_pairs + 1 - var pairs []histogramPair = make([]histogramPair, pairs_capacity) - var pos uint = 0 - var clusters []uint32 - var num_final_clusters uint - var new_index []uint32 - var i uint - var sizes = [histogramsPerBatch]uint32{0} - var new_clusters = [histogramsPerBatch]uint32{0} - var symbols = [histogramsPerBatch]uint32{0} - var remap = [histogramsPerBatch]uint32{0} - - for i := 0; i < int(num_blocks); i++ { - block_lengths[i] = 0 - } - { - var block_idx uint = 0 - for i = 0; i < length; i++ { - assert(block_idx < num_blocks) - block_lengths[block_idx]++ - if i+1 == length || block_ids[i] != block_ids[i+1] { - block_idx++ - } - } - - assert(block_idx == num_blocks) - } - - for i = 0; i < num_blocks; i += histogramsPerBatch { - var num_to_combine uint = brotli_min_size_t(num_blocks-i, histogramsPerBatch) - var num_new_clusters uint - var j uint - for j = 0; j < num_to_combine; j++ { - var k uint - histogramClearLiteral(&histograms[j]) - for k = 0; uint32(k) < block_lengths[i+j]; k++ { - histogramAddLiteral(&histograms[j], uint(data[pos])) - pos++ - } - - histograms[j].bit_cost_ = populationCostLiteral(&histograms[j]) - new_clusters[j] = uint32(j) - symbols[j] = uint32(j) - sizes[j] = 1 - } - - num_new_clusters = histogramCombineLiteral(histograms, sizes[:], symbols[:], new_clusters[:], []histogramPair(pairs), num_to_combine, num_to_combine, histogramsPerBatch, max_num_pairs) - if all_histograms_capacity < (all_histograms_size + num_new_clusters) { - var _new_size uint - if all_histograms_capacity == 0 { - _new_size = all_histograms_size + num_new_clusters - } else { - _new_size = all_histograms_capacity - } - var new_array []histogramLiteral - for _new_size < (all_histograms_size + num_new_clusters) { - _new_size *= 2 - } - new_array = make([]histogramLiteral, _new_size) - if all_histograms_capacity != 0 { - copy(new_array, all_histograms[:all_histograms_capacity]) - } - - all_histograms = new_array - all_histograms_capacity = _new_size - } - - brotli_ensure_capacity_uint32_t(&cluster_size, &cluster_size_capacity, cluster_size_size+num_new_clusters) - for j = 0; j < num_new_clusters; j++ { - all_histograms[all_histograms_size] = histograms[new_clusters[j]] - all_histograms_size++ - cluster_size[cluster_size_size] = sizes[new_clusters[j]] - cluster_size_size++ - remap[new_clusters[j]] = uint32(j) - } - - for j = 0; j < num_to_combine; j++ { - histogram_symbols[i+j] = uint32(num_clusters) + remap[symbols[j]] - } - - num_clusters += num_new_clusters - assert(num_clusters == cluster_size_size) - assert(num_clusters == all_histograms_size) - } - - histograms = nil - - max_num_pairs = brotli_min_size_t(64*num_clusters, (num_clusters/2)*num_clusters) - if pairs_capacity < max_num_pairs+1 { - pairs = nil - pairs = make([]histogramPair, (max_num_pairs + 1)) - } - - clusters = make([]uint32, num_clusters) - for i = 0; i < num_clusters; i++ { - clusters[i] = uint32(i) - } - - num_final_clusters = histogramCombineLiteral(all_histograms, cluster_size, histogram_symbols, clusters, pairs, num_clusters, num_blocks, maxNumberOfBlockTypes, max_num_pairs) - pairs = nil - cluster_size = nil - - new_index = make([]uint32, num_clusters) - for i = 0; i < num_clusters; i++ { - new_index[i] = clusterBlocksLiteral_kInvalidIndex - } - pos = 0 - { - var next_index uint32 = 0 - for i = 0; i < num_blocks; i++ { - var histo histogramLiteral - var j uint - var best_out uint32 - var best_bits float64 - histogramClearLiteral(&histo) - for j = 0; uint32(j) < block_lengths[i]; j++ { - histogramAddLiteral(&histo, uint(data[pos])) - pos++ - } - - if i == 0 { - best_out = histogram_symbols[0] - } else { - best_out = histogram_symbols[i-1] - } - best_bits = histogramBitCostDistanceLiteral(&histo, &all_histograms[best_out]) - for j = 0; j < num_final_clusters; j++ { - var cur_bits float64 = histogramBitCostDistanceLiteral(&histo, &all_histograms[clusters[j]]) - if cur_bits < best_bits { - best_bits = cur_bits - best_out = clusters[j] - } - } - - histogram_symbols[i] = best_out - if new_index[best_out] == clusterBlocksLiteral_kInvalidIndex { - new_index[best_out] = next_index - next_index++ - } - } - } - - clusters = nil - all_histograms = nil - brotli_ensure_capacity_uint8_t(&split.types, &split.types_alloc_size, num_blocks) - brotli_ensure_capacity_uint32_t(&split.lengths, &split.lengths_alloc_size, num_blocks) - { - var cur_length uint32 = 0 - var block_idx uint = 0 - var max_type byte = 0 - for i = 0; i < num_blocks; i++ { - cur_length += block_lengths[i] - if i+1 == num_blocks || histogram_symbols[i] != histogram_symbols[i+1] { - var id byte = byte(new_index[histogram_symbols[i]]) - split.types[block_idx] = id - split.lengths[block_idx] = cur_length - max_type = brotli_max_uint8_t(max_type, id) - cur_length = 0 - block_idx++ - } - } - - split.num_blocks = block_idx - split.num_types = uint(max_type) + 1 - } - - new_index = nil - block_lengths = nil - histogram_symbols = nil -} - -func splitByteVectorLiteral(data []byte, length uint, literals_per_histogram uint, max_histograms uint, sampling_stride_length uint, block_switch_cost float64, params *encoderParams, split *blockSplit) { - var data_size uint = histogramDataSizeLiteral() - var num_histograms uint = length/literals_per_histogram + 1 - var histograms []histogramLiteral - if num_histograms > max_histograms { - num_histograms = max_histograms - } - - if length == 0 { - split.num_types = 1 - return - } else if length < kMinLengthForBlockSplitting { - brotli_ensure_capacity_uint8_t(&split.types, &split.types_alloc_size, split.num_blocks+1) - brotli_ensure_capacity_uint32_t(&split.lengths, &split.lengths_alloc_size, split.num_blocks+1) - split.num_types = 1 - split.types[split.num_blocks] = 0 - split.lengths[split.num_blocks] = uint32(length) - split.num_blocks++ - return - } - - histograms = make([]histogramLiteral, num_histograms) - - /* Find good entropy codes. */ - initialEntropyCodesLiteral(data, length, sampling_stride_length, num_histograms, histograms) - - refineEntropyCodesLiteral(data, length, sampling_stride_length, num_histograms, histograms) - { - var block_ids []byte = make([]byte, length) - var num_blocks uint = 0 - var bitmaplen uint = (num_histograms + 7) >> 3 - var insert_cost []float64 = make([]float64, (data_size * num_histograms)) - var cost []float64 = make([]float64, num_histograms) - var switch_signal []byte = make([]byte, (length * bitmaplen)) - var new_id []uint16 = make([]uint16, num_histograms) - var iters uint - if params.quality < hqZopflificationQuality { - iters = 3 - } else { - iters = 10 - } - /* Find a good path through literals with the good entropy codes. */ - - var i uint - for i = 0; i < iters; i++ { - num_blocks = findBlocksLiteral(data, length, block_switch_cost, num_histograms, histograms, insert_cost, cost, switch_signal, block_ids) - num_histograms = remapBlockIdsLiteral(block_ids, length, new_id, num_histograms) - buildBlockHistogramsLiteral(data, length, block_ids, num_histograms, histograms) - } - - insert_cost = nil - cost = nil - switch_signal = nil - new_id = nil - histograms = nil - clusterBlocksLiteral(data, length, num_blocks, block_ids, split) - block_ids = nil - } -} diff --git a/vendor/github.com/andybalholm/brotli/brotli_bit_stream.go b/vendor/github.com/andybalholm/brotli/brotli_bit_stream.go deleted file mode 100644 index 7acfb18..0000000 --- a/vendor/github.com/andybalholm/brotli/brotli_bit_stream.go +++ /dev/null @@ -1,1300 +0,0 @@ -package brotli - -import ( - "math" - "sync" -) - -const maxHuffmanTreeSize = (2*numCommandSymbols + 1) - -/* The maximum size of Huffman dictionary for distances assuming that - NPOSTFIX = 0 and NDIRECT = 0. */ -const maxSimpleDistanceAlphabetSize = 140 - -/* Represents the range of values belonging to a prefix code: - [offset, offset + 2^nbits) */ -type prefixCodeRange struct { - offset uint32 - nbits uint32 -} - -var kBlockLengthPrefixCode = [numBlockLenSymbols]prefixCodeRange{ - prefixCodeRange{1, 2}, - prefixCodeRange{5, 2}, - prefixCodeRange{9, 2}, - prefixCodeRange{13, 2}, - prefixCodeRange{17, 3}, - prefixCodeRange{25, 3}, - prefixCodeRange{33, 3}, - prefixCodeRange{41, 3}, - prefixCodeRange{49, 4}, - prefixCodeRange{65, 4}, - prefixCodeRange{81, 4}, - prefixCodeRange{97, 4}, - prefixCodeRange{113, 5}, - prefixCodeRange{145, 5}, - prefixCodeRange{177, 5}, - prefixCodeRange{209, 5}, - prefixCodeRange{241, 6}, - prefixCodeRange{305, 6}, - prefixCodeRange{369, 7}, - prefixCodeRange{497, 8}, - prefixCodeRange{753, 9}, - prefixCodeRange{1265, 10}, - prefixCodeRange{2289, 11}, - prefixCodeRange{4337, 12}, - prefixCodeRange{8433, 13}, - prefixCodeRange{16625, 24}, -} - -func blockLengthPrefixCode(len uint32) uint32 { - var code uint32 - if len >= 177 { - if len >= 753 { - code = 20 - } else { - code = 14 - } - } else if len >= 41 { - code = 7 - } else { - code = 0 - } - for code < (numBlockLenSymbols-1) && len >= kBlockLengthPrefixCode[code+1].offset { - code++ - } - return code -} - -func getBlockLengthPrefixCode(len uint32, code *uint, n_extra *uint32, extra *uint32) { - *code = uint(blockLengthPrefixCode(uint32(len))) - *n_extra = kBlockLengthPrefixCode[*code].nbits - *extra = len - kBlockLengthPrefixCode[*code].offset -} - -type blockTypeCodeCalculator struct { - last_type uint - second_last_type uint -} - -func initBlockTypeCodeCalculator(self *blockTypeCodeCalculator) { - self.last_type = 1 - self.second_last_type = 0 -} - -func nextBlockTypeCode(calculator *blockTypeCodeCalculator, type_ byte) uint { - var type_code uint - if uint(type_) == calculator.last_type+1 { - type_code = 1 - } else if uint(type_) == calculator.second_last_type { - type_code = 0 - } else { - type_code = uint(type_) + 2 - } - calculator.second_last_type = calculator.last_type - calculator.last_type = uint(type_) - return type_code -} - -/* |nibblesbits| represents the 2 bits to encode MNIBBLES (0-3) - REQUIRES: length > 0 - REQUIRES: length <= (1 << 24) */ -func encodeMlen(length uint, bits *uint64, numbits *uint, nibblesbits *uint64) { - var lg uint - if length == 1 { - lg = 1 - } else { - lg = uint(log2FloorNonZero(uint(uint32(length-1)))) + 1 - } - var tmp uint - if lg < 16 { - tmp = 16 - } else { - tmp = (lg + 3) - } - var mnibbles uint = tmp / 4 - assert(length > 0) - assert(length <= 1<<24) - assert(lg <= 24) - *nibblesbits = uint64(mnibbles) - 4 - *numbits = mnibbles * 4 - *bits = uint64(length) - 1 -} - -func storeCommandExtra(cmd *command, storage_ix *uint, storage []byte) { - var copylen_code uint32 = commandCopyLenCode(cmd) - var inscode uint16 = getInsertLengthCode(uint(cmd.insert_len_)) - var copycode uint16 = getCopyLengthCode(uint(copylen_code)) - var insnumextra uint32 = getInsertExtra(inscode) - var insextraval uint64 = uint64(cmd.insert_len_) - uint64(getInsertBase(inscode)) - var copyextraval uint64 = uint64(copylen_code) - uint64(getCopyBase(copycode)) - var bits uint64 = copyextraval< 0 - REQUIRES: length <= (1 << 24) */ -func storeCompressedMetaBlockHeader(is_final_block bool, length uint, storage_ix *uint, storage []byte) { - var lenbits uint64 - var nlenbits uint - var nibblesbits uint64 - var is_final uint64 - if is_final_block { - is_final = 1 - } else { - is_final = 0 - } - - /* Write ISLAST bit. */ - writeBits(1, is_final, storage_ix, storage) - - /* Write ISEMPTY bit. */ - if is_final_block { - writeBits(1, 0, storage_ix, storage) - } - - encodeMlen(length, &lenbits, &nlenbits, &nibblesbits) - writeBits(2, nibblesbits, storage_ix, storage) - writeBits(nlenbits, lenbits, storage_ix, storage) - - if !is_final_block { - /* Write ISUNCOMPRESSED bit. */ - writeBits(1, 0, storage_ix, storage) - } -} - -/* Stores the uncompressed meta-block header. - REQUIRES: length > 0 - REQUIRES: length <= (1 << 24) */ -func storeUncompressedMetaBlockHeader(length uint, storage_ix *uint, storage []byte) { - var lenbits uint64 - var nlenbits uint - var nibblesbits uint64 - - /* Write ISLAST bit. - Uncompressed block cannot be the last one, so set to 0. */ - writeBits(1, 0, storage_ix, storage) - - encodeMlen(length, &lenbits, &nlenbits, &nibblesbits) - writeBits(2, nibblesbits, storage_ix, storage) - writeBits(nlenbits, lenbits, storage_ix, storage) - - /* Write ISUNCOMPRESSED bit. */ - writeBits(1, 1, storage_ix, storage) -} - -var storeHuffmanTreeOfHuffmanTreeToBitMask_kStorageOrder = [codeLengthCodes]byte{1, 2, 3, 4, 0, 5, 17, 6, 16, 7, 8, 9, 10, 11, 12, 13, 14, 15} - -var storeHuffmanTreeOfHuffmanTreeToBitMask_kHuffmanBitLengthHuffmanCodeSymbols = [6]byte{0, 7, 3, 2, 1, 15} -var storeHuffmanTreeOfHuffmanTreeToBitMask_kHuffmanBitLengthHuffmanCodeBitLengths = [6]byte{2, 4, 3, 2, 2, 4} - -func storeHuffmanTreeOfHuffmanTreeToBitMask(num_codes int, code_length_bitdepth []byte, storage_ix *uint, storage []byte) { - var skip_some uint = 0 - var codes_to_store uint = codeLengthCodes - /* The bit lengths of the Huffman code over the code length alphabet - are compressed with the following static Huffman code: - Symbol Code - ------ ---- - 0 00 - 1 1110 - 2 110 - 3 01 - 4 10 - 5 1111 */ - - /* Throw away trailing zeros: */ - if num_codes > 1 { - for ; codes_to_store > 0; codes_to_store-- { - if code_length_bitdepth[storeHuffmanTreeOfHuffmanTreeToBitMask_kStorageOrder[codes_to_store-1]] != 0 { - break - } - } - } - - if code_length_bitdepth[storeHuffmanTreeOfHuffmanTreeToBitMask_kStorageOrder[0]] == 0 && code_length_bitdepth[storeHuffmanTreeOfHuffmanTreeToBitMask_kStorageOrder[1]] == 0 { - skip_some = 2 /* skips two. */ - if code_length_bitdepth[storeHuffmanTreeOfHuffmanTreeToBitMask_kStorageOrder[2]] == 0 { - skip_some = 3 /* skips three. */ - } - } - - writeBits(2, uint64(skip_some), storage_ix, storage) - { - var i uint - for i = skip_some; i < codes_to_store; i++ { - var l uint = uint(code_length_bitdepth[storeHuffmanTreeOfHuffmanTreeToBitMask_kStorageOrder[i]]) - writeBits(uint(storeHuffmanTreeOfHuffmanTreeToBitMask_kHuffmanBitLengthHuffmanCodeBitLengths[l]), uint64(storeHuffmanTreeOfHuffmanTreeToBitMask_kHuffmanBitLengthHuffmanCodeSymbols[l]), storage_ix, storage) - } - } -} - -func storeHuffmanTreeToBitMask(huffman_tree_size uint, huffman_tree []byte, huffman_tree_extra_bits []byte, code_length_bitdepth []byte, code_length_bitdepth_symbols []uint16, storage_ix *uint, storage []byte) { - var i uint - for i = 0; i < huffman_tree_size; i++ { - var ix uint = uint(huffman_tree[i]) - writeBits(uint(code_length_bitdepth[ix]), uint64(code_length_bitdepth_symbols[ix]), storage_ix, storage) - - /* Extra bits */ - switch ix { - case repeatPreviousCodeLength: - writeBits(2, uint64(huffman_tree_extra_bits[i]), storage_ix, storage) - - case repeatZeroCodeLength: - writeBits(3, uint64(huffman_tree_extra_bits[i]), storage_ix, storage) - } - } -} - -func storeSimpleHuffmanTree(depths []byte, symbols []uint, num_symbols uint, max_bits uint, storage_ix *uint, storage []byte) { - /* value of 1 indicates a simple Huffman code */ - writeBits(2, 1, storage_ix, storage) - - writeBits(2, uint64(num_symbols)-1, storage_ix, storage) /* NSYM - 1 */ - { - /* Sort */ - var i uint - for i = 0; i < num_symbols; i++ { - var j uint - for j = i + 1; j < num_symbols; j++ { - if depths[symbols[j]] < depths[symbols[i]] { - var tmp uint = symbols[j] - symbols[j] = symbols[i] - symbols[i] = tmp - } - } - } - } - - if num_symbols == 2 { - writeBits(max_bits, uint64(symbols[0]), storage_ix, storage) - writeBits(max_bits, uint64(symbols[1]), storage_ix, storage) - } else if num_symbols == 3 { - writeBits(max_bits, uint64(symbols[0]), storage_ix, storage) - writeBits(max_bits, uint64(symbols[1]), storage_ix, storage) - writeBits(max_bits, uint64(symbols[2]), storage_ix, storage) - } else { - writeBits(max_bits, uint64(symbols[0]), storage_ix, storage) - writeBits(max_bits, uint64(symbols[1]), storage_ix, storage) - writeBits(max_bits, uint64(symbols[2]), storage_ix, storage) - writeBits(max_bits, uint64(symbols[3]), storage_ix, storage) - - /* tree-select */ - var tmp int - if depths[symbols[0]] == 1 { - tmp = 1 - } else { - tmp = 0 - } - writeBits(1, uint64(tmp), storage_ix, storage) - } -} - -/* num = alphabet size - depths = symbol depths */ -func storeHuffmanTree(depths []byte, num uint, tree []huffmanTree, storage_ix *uint, storage []byte) { - var huffman_tree [numCommandSymbols]byte - var huffman_tree_extra_bits [numCommandSymbols]byte - var huffman_tree_size uint = 0 - var code_length_bitdepth = [codeLengthCodes]byte{0} - var code_length_bitdepth_symbols [codeLengthCodes]uint16 - var huffman_tree_histogram = [codeLengthCodes]uint32{0} - var i uint - var num_codes int = 0 - /* Write the Huffman tree into the brotli-representation. - The command alphabet is the largest, so this allocation will fit all - alphabets. */ - - var code uint = 0 - - assert(num <= numCommandSymbols) - - writeHuffmanTree(depths, num, &huffman_tree_size, huffman_tree[:], huffman_tree_extra_bits[:]) - - /* Calculate the statistics of the Huffman tree in brotli-representation. */ - for i = 0; i < huffman_tree_size; i++ { - huffman_tree_histogram[huffman_tree[i]]++ - } - - for i = 0; i < codeLengthCodes; i++ { - if huffman_tree_histogram[i] != 0 { - if num_codes == 0 { - code = i - num_codes = 1 - } else if num_codes == 1 { - num_codes = 2 - break - } - } - } - - /* Calculate another Huffman tree to use for compressing both the - earlier Huffman tree with. */ - createHuffmanTree(huffman_tree_histogram[:], codeLengthCodes, 5, tree, code_length_bitdepth[:]) - - convertBitDepthsToSymbols(code_length_bitdepth[:], codeLengthCodes, code_length_bitdepth_symbols[:]) - - /* Now, we have all the data, let's start storing it */ - storeHuffmanTreeOfHuffmanTreeToBitMask(num_codes, code_length_bitdepth[:], storage_ix, storage) - - if num_codes == 1 { - code_length_bitdepth[code] = 0 - } - - /* Store the real Huffman tree now. */ - storeHuffmanTreeToBitMask(huffman_tree_size, huffman_tree[:], huffman_tree_extra_bits[:], code_length_bitdepth[:], code_length_bitdepth_symbols[:], storage_ix, storage) -} - -/* Builds a Huffman tree from histogram[0:length] into depth[0:length] and - bits[0:length] and stores the encoded tree to the bit stream. */ -func buildAndStoreHuffmanTree(histogram []uint32, histogram_length uint, alphabet_size uint, tree []huffmanTree, depth []byte, bits []uint16, storage_ix *uint, storage []byte) { - var count uint = 0 - var s4 = [4]uint{0} - var i uint - var max_bits uint = 0 - for i = 0; i < histogram_length; i++ { - if histogram[i] != 0 { - if count < 4 { - s4[count] = i - } else if count > 4 { - break - } - - count++ - } - } - { - var max_bits_counter uint = alphabet_size - 1 - for max_bits_counter != 0 { - max_bits_counter >>= 1 - max_bits++ - } - } - - if count <= 1 { - writeBits(4, 1, storage_ix, storage) - writeBits(max_bits, uint64(s4[0]), storage_ix, storage) - depth[s4[0]] = 0 - bits[s4[0]] = 0 - return - } - - for i := 0; i < int(histogram_length); i++ { - depth[i] = 0 - } - createHuffmanTree(histogram, histogram_length, 15, tree, depth) - convertBitDepthsToSymbols(depth, histogram_length, bits) - - if count <= 4 { - storeSimpleHuffmanTree(depth, s4[:], count, max_bits, storage_ix, storage) - } else { - storeHuffmanTree(depth, histogram_length, tree, storage_ix, storage) - } -} - -func sortHuffmanTree1(v0 huffmanTree, v1 huffmanTree) bool { - return v0.total_count_ < v1.total_count_ -} - -var huffmanTreePool sync.Pool - -func buildAndStoreHuffmanTreeFast(histogram []uint32, histogram_total uint, max_bits uint, depth []byte, bits []uint16, storage_ix *uint, storage []byte) { - var count uint = 0 - var symbols = [4]uint{0} - var length uint = 0 - var total uint = histogram_total - for total != 0 { - if histogram[length] != 0 { - if count < 4 { - symbols[count] = length - } - - count++ - total -= uint(histogram[length]) - } - - length++ - } - - if count <= 1 { - writeBits(4, 1, storage_ix, storage) - writeBits(max_bits, uint64(symbols[0]), storage_ix, storage) - depth[symbols[0]] = 0 - bits[symbols[0]] = 0 - return - } - - for i := 0; i < int(length); i++ { - depth[i] = 0 - } - { - var max_tree_size uint = 2*length + 1 - tree, _ := huffmanTreePool.Get().(*[]huffmanTree) - if tree == nil || cap(*tree) < int(max_tree_size) { - tmp := make([]huffmanTree, max_tree_size) - tree = &tmp - } else { - *tree = (*tree)[:max_tree_size] - } - var count_limit uint32 - for count_limit = 1; ; count_limit *= 2 { - var node int = 0 - var l uint - for l = length; l != 0; { - l-- - if histogram[l] != 0 { - if histogram[l] >= count_limit { - initHuffmanTree(&(*tree)[node:][0], histogram[l], -1, int16(l)) - } else { - initHuffmanTree(&(*tree)[node:][0], count_limit, -1, int16(l)) - } - - node++ - } - } - { - var n int = node - /* Points to the next leaf node. */ /* Points to the next non-leaf node. */ - var sentinel huffmanTree - var i int = 0 - var j int = n + 1 - var k int - - sortHuffmanTreeItems(*tree, uint(n), huffmanTreeComparator(sortHuffmanTree1)) - - /* The nodes are: - [0, n): the sorted leaf nodes that we start with. - [n]: we add a sentinel here. - [n + 1, 2n): new parent nodes are added here, starting from - (n+1). These are naturally in ascending order. - [2n]: we add a sentinel at the end as well. - There will be (2n+1) elements at the end. */ - initHuffmanTree(&sentinel, math.MaxUint32, -1, -1) - - (*tree)[node] = sentinel - node++ - (*tree)[node] = sentinel - node++ - - for k = n - 1; k > 0; k-- { - var left int - var right int - if (*tree)[i].total_count_ <= (*tree)[j].total_count_ { - left = i - i++ - } else { - left = j - j++ - } - - if (*tree)[i].total_count_ <= (*tree)[j].total_count_ { - right = i - i++ - } else { - right = j - j++ - } - - /* The sentinel node becomes the parent node. */ - (*tree)[node-1].total_count_ = (*tree)[left].total_count_ + (*tree)[right].total_count_ - - (*tree)[node-1].index_left_ = int16(left) - (*tree)[node-1].index_right_or_value_ = int16(right) - - /* Add back the last sentinel node. */ - (*tree)[node] = sentinel - node++ - } - - if setDepth(2*n-1, *tree, depth, 14) { - /* We need to pack the Huffman tree in 14 bits. If this was not - successful, add fake entities to the lowest values and retry. */ - break - } - } - } - - huffmanTreePool.Put(tree) - } - - convertBitDepthsToSymbols(depth, length, bits) - if count <= 4 { - var i uint - - /* value of 1 indicates a simple Huffman code */ - writeBits(2, 1, storage_ix, storage) - - writeBits(2, uint64(count)-1, storage_ix, storage) /* NSYM - 1 */ - - /* Sort */ - for i = 0; i < count; i++ { - var j uint - for j = i + 1; j < count; j++ { - if depth[symbols[j]] < depth[symbols[i]] { - var tmp uint = symbols[j] - symbols[j] = symbols[i] - symbols[i] = tmp - } - } - } - - if count == 2 { - writeBits(max_bits, uint64(symbols[0]), storage_ix, storage) - writeBits(max_bits, uint64(symbols[1]), storage_ix, storage) - } else if count == 3 { - writeBits(max_bits, uint64(symbols[0]), storage_ix, storage) - writeBits(max_bits, uint64(symbols[1]), storage_ix, storage) - writeBits(max_bits, uint64(symbols[2]), storage_ix, storage) - } else { - writeBits(max_bits, uint64(symbols[0]), storage_ix, storage) - writeBits(max_bits, uint64(symbols[1]), storage_ix, storage) - writeBits(max_bits, uint64(symbols[2]), storage_ix, storage) - writeBits(max_bits, uint64(symbols[3]), storage_ix, storage) - - /* tree-select */ - var tmp int - if depth[symbols[0]] == 1 { - tmp = 1 - } else { - tmp = 0 - } - writeBits(1, uint64(tmp), storage_ix, storage) - } - } else { - var previous_value byte = 8 - var i uint - - /* Complex Huffman Tree */ - storeStaticCodeLengthCode(storage_ix, storage) - - /* Actual RLE coding. */ - for i = 0; i < length; { - var value byte = depth[i] - var reps uint = 1 - var k uint - for k = i + 1; k < length && depth[k] == value; k++ { - reps++ - } - - i += reps - if value == 0 { - writeBits(uint(kZeroRepsDepth[reps]), kZeroRepsBits[reps], storage_ix, storage) - } else { - if previous_value != value { - writeBits(uint(kCodeLengthDepth[value]), uint64(kCodeLengthBits[value]), storage_ix, storage) - reps-- - } - - if reps < 3 { - for reps != 0 { - reps-- - writeBits(uint(kCodeLengthDepth[value]), uint64(kCodeLengthBits[value]), storage_ix, storage) - } - } else { - reps -= 3 - writeBits(uint(kNonZeroRepsDepth[reps]), kNonZeroRepsBits[reps], storage_ix, storage) - } - - previous_value = value - } - } - } -} - -func indexOf(v []byte, v_size uint, value byte) uint { - var i uint = 0 - for ; i < v_size; i++ { - if v[i] == value { - return i - } - } - - return i -} - -func moveToFront(v []byte, index uint) { - var value byte = v[index] - var i uint - for i = index; i != 0; i-- { - v[i] = v[i-1] - } - - v[0] = value -} - -func moveToFrontTransform(v_in []uint32, v_size uint, v_out []uint32) { - var i uint - var mtf [256]byte - var max_value uint32 - if v_size == 0 { - return - } - - max_value = v_in[0] - for i = 1; i < v_size; i++ { - if v_in[i] > max_value { - max_value = v_in[i] - } - } - - assert(max_value < 256) - for i = 0; uint32(i) <= max_value; i++ { - mtf[i] = byte(i) - } - { - var mtf_size uint = uint(max_value + 1) - for i = 0; i < v_size; i++ { - var index uint = indexOf(mtf[:], mtf_size, byte(v_in[i])) - assert(index < mtf_size) - v_out[i] = uint32(index) - moveToFront(mtf[:], index) - } - } -} - -/* Finds runs of zeros in v[0..in_size) and replaces them with a prefix code of - the run length plus extra bits (lower 9 bits is the prefix code and the rest - are the extra bits). Non-zero values in v[] are shifted by - *max_length_prefix. Will not create prefix codes bigger than the initial - value of *max_run_length_prefix. The prefix code of run length L is simply - Log2Floor(L) and the number of extra bits is the same as the prefix code. */ -func runLengthCodeZeros(in_size uint, v []uint32, out_size *uint, max_run_length_prefix *uint32) { - var max_reps uint32 = 0 - var i uint - var max_prefix uint32 - for i = 0; i < in_size; { - var reps uint32 = 0 - for ; i < in_size && v[i] != 0; i++ { - } - for ; i < in_size && v[i] == 0; i++ { - reps++ - } - - max_reps = brotli_max_uint32_t(reps, max_reps) - } - - if max_reps > 0 { - max_prefix = log2FloorNonZero(uint(max_reps)) - } else { - max_prefix = 0 - } - max_prefix = brotli_min_uint32_t(max_prefix, *max_run_length_prefix) - *max_run_length_prefix = max_prefix - *out_size = 0 - for i = 0; i < in_size; { - assert(*out_size <= i) - if v[i] != 0 { - v[*out_size] = v[i] + *max_run_length_prefix - i++ - (*out_size)++ - } else { - var reps uint32 = 1 - var k uint - for k = i + 1; k < in_size && v[k] == 0; k++ { - reps++ - } - - i += uint(reps) - for reps != 0 { - if reps < 2< 0) - writeSingleBit(use_rle, storage_ix, storage) - if use_rle { - writeBits(4, uint64(max_run_length_prefix)-1, storage_ix, storage) - } - } - - buildAndStoreHuffmanTree(histogram[:], uint(uint32(num_clusters)+max_run_length_prefix), uint(uint32(num_clusters)+max_run_length_prefix), tree, depths[:], bits[:], storage_ix, storage) - for i = 0; i < num_rle_symbols; i++ { - var rle_symbol uint32 = rle_symbols[i] & encodeContextMap_kSymbolMask - var extra_bits_val uint32 = rle_symbols[i] >> symbolBits - writeBits(uint(depths[rle_symbol]), uint64(bits[rle_symbol]), storage_ix, storage) - if rle_symbol > 0 && rle_symbol <= max_run_length_prefix { - writeBits(uint(rle_symbol), uint64(extra_bits_val), storage_ix, storage) - } - } - - writeBits(1, 1, storage_ix, storage) /* use move-to-front */ - rle_symbols = nil -} - -/* Stores the block switch command with index block_ix to the bit stream. */ -func storeBlockSwitch(code *blockSplitCode, block_len uint32, block_type byte, is_first_block bool, storage_ix *uint, storage []byte) { - var typecode uint = nextBlockTypeCode(&code.type_code_calculator, block_type) - var lencode uint - var len_nextra uint32 - var len_extra uint32 - if !is_first_block { - writeBits(uint(code.type_depths[typecode]), uint64(code.type_bits[typecode]), storage_ix, storage) - } - - getBlockLengthPrefixCode(block_len, &lencode, &len_nextra, &len_extra) - - writeBits(uint(code.length_depths[lencode]), uint64(code.length_bits[lencode]), storage_ix, storage) - writeBits(uint(len_nextra), uint64(len_extra), storage_ix, storage) -} - -/* Builds a BlockSplitCode data structure from the block split given by the - vector of block types and block lengths and stores it to the bit stream. */ -func buildAndStoreBlockSplitCode(types []byte, lengths []uint32, num_blocks uint, num_types uint, tree []huffmanTree, code *blockSplitCode, storage_ix *uint, storage []byte) { - var type_histo [maxBlockTypeSymbols]uint32 - var length_histo [numBlockLenSymbols]uint32 - var i uint - var type_code_calculator blockTypeCodeCalculator - for i := 0; i < int(num_types+2); i++ { - type_histo[i] = 0 - } - length_histo = [numBlockLenSymbols]uint32{} - initBlockTypeCodeCalculator(&type_code_calculator) - for i = 0; i < num_blocks; i++ { - var type_code uint = nextBlockTypeCode(&type_code_calculator, types[i]) - if i != 0 { - type_histo[type_code]++ - } - length_histo[blockLengthPrefixCode(lengths[i])]++ - } - - storeVarLenUint8(num_types-1, storage_ix, storage) - if num_types > 1 { /* TODO: else? could StoreBlockSwitch occur? */ - buildAndStoreHuffmanTree(type_histo[0:], num_types+2, num_types+2, tree, code.type_depths[0:], code.type_bits[0:], storage_ix, storage) - buildAndStoreHuffmanTree(length_histo[0:], numBlockLenSymbols, numBlockLenSymbols, tree, code.length_depths[0:], code.length_bits[0:], storage_ix, storage) - storeBlockSwitch(code, lengths[0], types[0], true, storage_ix, storage) - } -} - -/* Stores a context map where the histogram type is always the block type. */ -func storeTrivialContextMap(num_types uint, context_bits uint, tree []huffmanTree, storage_ix *uint, storage []byte) { - storeVarLenUint8(num_types-1, storage_ix, storage) - if num_types > 1 { - var repeat_code uint = context_bits - 1 - var repeat_bits uint = (1 << repeat_code) - 1 - var alphabet_size uint = num_types + repeat_code - var histogram [maxContextMapSymbols]uint32 - var depths [maxContextMapSymbols]byte - var bits [maxContextMapSymbols]uint16 - var i uint - for i := 0; i < int(alphabet_size); i++ { - histogram[i] = 0 - } - - /* Write RLEMAX. */ - writeBits(1, 1, storage_ix, storage) - - writeBits(4, uint64(repeat_code)-1, storage_ix, storage) - histogram[repeat_code] = uint32(num_types) - histogram[0] = 1 - for i = context_bits; i < alphabet_size; i++ { - histogram[i] = 1 - } - - buildAndStoreHuffmanTree(histogram[:], alphabet_size, alphabet_size, tree, depths[:], bits[:], storage_ix, storage) - for i = 0; i < num_types; i++ { - var tmp uint - if i == 0 { - tmp = 0 - } else { - tmp = i + context_bits - 1 - } - var code uint = tmp - writeBits(uint(depths[code]), uint64(bits[code]), storage_ix, storage) - writeBits(uint(depths[repeat_code]), uint64(bits[repeat_code]), storage_ix, storage) - writeBits(repeat_code, uint64(repeat_bits), storage_ix, storage) - } - - /* Write IMTF (inverse-move-to-front) bit. */ - writeBits(1, 1, storage_ix, storage) - } -} - -/* Manages the encoding of one block category (literal, command or distance). */ -type blockEncoder struct { - histogram_length_ uint - num_block_types_ uint - block_types_ []byte - block_lengths_ []uint32 - num_blocks_ uint - block_split_code_ blockSplitCode - block_ix_ uint - block_len_ uint - entropy_ix_ uint - depths_ []byte - bits_ []uint16 -} - -var blockEncoderPool sync.Pool - -func getBlockEncoder(histogram_length uint, num_block_types uint, block_types []byte, block_lengths []uint32, num_blocks uint) *blockEncoder { - self, _ := blockEncoderPool.Get().(*blockEncoder) - - if self != nil { - self.block_ix_ = 0 - self.entropy_ix_ = 0 - self.depths_ = self.depths_[:0] - self.bits_ = self.bits_[:0] - } else { - self = &blockEncoder{} - } - - self.histogram_length_ = histogram_length - self.num_block_types_ = num_block_types - self.block_types_ = block_types - self.block_lengths_ = block_lengths - self.num_blocks_ = num_blocks - initBlockTypeCodeCalculator(&self.block_split_code_.type_code_calculator) - if num_blocks == 0 { - self.block_len_ = 0 - } else { - self.block_len_ = uint(block_lengths[0]) - } - - return self -} - -func cleanupBlockEncoder(self *blockEncoder) { - blockEncoderPool.Put(self) -} - -/* Creates entropy codes of block lengths and block types and stores them - to the bit stream. */ -func buildAndStoreBlockSwitchEntropyCodes(self *blockEncoder, tree []huffmanTree, storage_ix *uint, storage []byte) { - buildAndStoreBlockSplitCode(self.block_types_, self.block_lengths_, self.num_blocks_, self.num_block_types_, tree, &self.block_split_code_, storage_ix, storage) -} - -/* Stores the next symbol with the entropy code of the current block type. - Updates the block type and block length at block boundaries. */ -func storeSymbol(self *blockEncoder, symbol uint, storage_ix *uint, storage []byte) { - if self.block_len_ == 0 { - self.block_ix_++ - var block_ix uint = self.block_ix_ - var block_len uint32 = self.block_lengths_[block_ix] - var block_type byte = self.block_types_[block_ix] - self.block_len_ = uint(block_len) - self.entropy_ix_ = uint(block_type) * self.histogram_length_ - storeBlockSwitch(&self.block_split_code_, block_len, block_type, false, storage_ix, storage) - } - - self.block_len_-- - { - var ix uint = self.entropy_ix_ + symbol - writeBits(uint(self.depths_[ix]), uint64(self.bits_[ix]), storage_ix, storage) - } -} - -/* Stores the next symbol with the entropy code of the current block type and - context value. - Updates the block type and block length at block boundaries. */ -func storeSymbolWithContext(self *blockEncoder, symbol uint, context uint, context_map []uint32, storage_ix *uint, storage []byte, context_bits uint) { - if self.block_len_ == 0 { - self.block_ix_++ - var block_ix uint = self.block_ix_ - var block_len uint32 = self.block_lengths_[block_ix] - var block_type byte = self.block_types_[block_ix] - self.block_len_ = uint(block_len) - self.entropy_ix_ = uint(block_type) << context_bits - storeBlockSwitch(&self.block_split_code_, block_len, block_type, false, storage_ix, storage) - } - - self.block_len_-- - { - var histo_ix uint = uint(context_map[self.entropy_ix_+context]) - var ix uint = histo_ix*self.histogram_length_ + symbol - writeBits(uint(self.depths_[ix]), uint64(self.bits_[ix]), storage_ix, storage) - } -} - -func buildAndStoreEntropyCodesLiteral(self *blockEncoder, histograms []histogramLiteral, histograms_size uint, alphabet_size uint, tree []huffmanTree, storage_ix *uint, storage []byte) { - var table_size uint = histograms_size * self.histogram_length_ - if cap(self.depths_) < int(table_size) { - self.depths_ = make([]byte, table_size) - } else { - self.depths_ = self.depths_[:table_size] - } - if cap(self.bits_) < int(table_size) { - self.bits_ = make([]uint16, table_size) - } else { - self.bits_ = self.bits_[:table_size] - } - { - var i uint - for i = 0; i < histograms_size; i++ { - var ix uint = i * self.histogram_length_ - buildAndStoreHuffmanTree(histograms[i].data_[0:], self.histogram_length_, alphabet_size, tree, self.depths_[ix:], self.bits_[ix:], storage_ix, storage) - } - } -} - -func buildAndStoreEntropyCodesCommand(self *blockEncoder, histograms []histogramCommand, histograms_size uint, alphabet_size uint, tree []huffmanTree, storage_ix *uint, storage []byte) { - var table_size uint = histograms_size * self.histogram_length_ - if cap(self.depths_) < int(table_size) { - self.depths_ = make([]byte, table_size) - } else { - self.depths_ = self.depths_[:table_size] - } - if cap(self.bits_) < int(table_size) { - self.bits_ = make([]uint16, table_size) - } else { - self.bits_ = self.bits_[:table_size] - } - { - var i uint - for i = 0; i < histograms_size; i++ { - var ix uint = i * self.histogram_length_ - buildAndStoreHuffmanTree(histograms[i].data_[0:], self.histogram_length_, alphabet_size, tree, self.depths_[ix:], self.bits_[ix:], storage_ix, storage) - } - } -} - -func buildAndStoreEntropyCodesDistance(self *blockEncoder, histograms []histogramDistance, histograms_size uint, alphabet_size uint, tree []huffmanTree, storage_ix *uint, storage []byte) { - var table_size uint = histograms_size * self.histogram_length_ - if cap(self.depths_) < int(table_size) { - self.depths_ = make([]byte, table_size) - } else { - self.depths_ = self.depths_[:table_size] - } - if cap(self.bits_) < int(table_size) { - self.bits_ = make([]uint16, table_size) - } else { - self.bits_ = self.bits_[:table_size] - } - { - var i uint - for i = 0; i < histograms_size; i++ { - var ix uint = i * self.histogram_length_ - buildAndStoreHuffmanTree(histograms[i].data_[0:], self.histogram_length_, alphabet_size, tree, self.depths_[ix:], self.bits_[ix:], storage_ix, storage) - } - } -} - -func jumpToByteBoundary(storage_ix *uint, storage []byte) { - *storage_ix = (*storage_ix + 7) &^ 7 - storage[*storage_ix>>3] = 0 -} - -func storeMetaBlock(input []byte, start_pos uint, length uint, mask uint, prev_byte byte, prev_byte2 byte, is_last bool, params *encoderParams, literal_context_mode int, commands []command, mb *metaBlockSplit, storage_ix *uint, storage []byte) { - var pos uint = start_pos - var i uint - var num_distance_symbols uint32 = params.dist.alphabet_size - var num_effective_distance_symbols uint32 = num_distance_symbols - var tree []huffmanTree - var literal_context_lut contextLUT = getContextLUT(literal_context_mode) - var dist *distanceParams = ¶ms.dist - if params.large_window && num_effective_distance_symbols > numHistogramDistanceSymbols { - num_effective_distance_symbols = numHistogramDistanceSymbols - } - - storeCompressedMetaBlockHeader(is_last, length, storage_ix, storage) - - tree = make([]huffmanTree, maxHuffmanTreeSize) - literal_enc := getBlockEncoder(numLiteralSymbols, mb.literal_split.num_types, mb.literal_split.types, mb.literal_split.lengths, mb.literal_split.num_blocks) - command_enc := getBlockEncoder(numCommandSymbols, mb.command_split.num_types, mb.command_split.types, mb.command_split.lengths, mb.command_split.num_blocks) - distance_enc := getBlockEncoder(uint(num_effective_distance_symbols), mb.distance_split.num_types, mb.distance_split.types, mb.distance_split.lengths, mb.distance_split.num_blocks) - - buildAndStoreBlockSwitchEntropyCodes(literal_enc, tree, storage_ix, storage) - buildAndStoreBlockSwitchEntropyCodes(command_enc, tree, storage_ix, storage) - buildAndStoreBlockSwitchEntropyCodes(distance_enc, tree, storage_ix, storage) - - writeBits(2, uint64(dist.distance_postfix_bits), storage_ix, storage) - writeBits(4, uint64(dist.num_direct_distance_codes)>>dist.distance_postfix_bits, storage_ix, storage) - for i = 0; i < mb.literal_split.num_types; i++ { - writeBits(2, uint64(literal_context_mode), storage_ix, storage) - } - - if mb.literal_context_map_size == 0 { - storeTrivialContextMap(mb.literal_histograms_size, literalContextBits, tree, storage_ix, storage) - } else { - encodeContextMap(mb.literal_context_map, mb.literal_context_map_size, mb.literal_histograms_size, tree, storage_ix, storage) - } - - if mb.distance_context_map_size == 0 { - storeTrivialContextMap(mb.distance_histograms_size, distanceContextBits, tree, storage_ix, storage) - } else { - encodeContextMap(mb.distance_context_map, mb.distance_context_map_size, mb.distance_histograms_size, tree, storage_ix, storage) - } - - buildAndStoreEntropyCodesLiteral(literal_enc, mb.literal_histograms, mb.literal_histograms_size, numLiteralSymbols, tree, storage_ix, storage) - buildAndStoreEntropyCodesCommand(command_enc, mb.command_histograms, mb.command_histograms_size, numCommandSymbols, tree, storage_ix, storage) - buildAndStoreEntropyCodesDistance(distance_enc, mb.distance_histograms, mb.distance_histograms_size, uint(num_distance_symbols), tree, storage_ix, storage) - tree = nil - - for _, cmd := range commands { - var cmd_code uint = uint(cmd.cmd_prefix_) - storeSymbol(command_enc, cmd_code, storage_ix, storage) - storeCommandExtra(&cmd, storage_ix, storage) - if mb.literal_context_map_size == 0 { - var j uint - for j = uint(cmd.insert_len_); j != 0; j-- { - storeSymbol(literal_enc, uint(input[pos&mask]), storage_ix, storage) - pos++ - } - } else { - var j uint - for j = uint(cmd.insert_len_); j != 0; j-- { - var context uint = uint(getContext(prev_byte, prev_byte2, literal_context_lut)) - var literal byte = input[pos&mask] - storeSymbolWithContext(literal_enc, uint(literal), context, mb.literal_context_map, storage_ix, storage, literalContextBits) - prev_byte2 = prev_byte - prev_byte = literal - pos++ - } - } - - pos += uint(commandCopyLen(&cmd)) - if commandCopyLen(&cmd) != 0 { - prev_byte2 = input[(pos-2)&mask] - prev_byte = input[(pos-1)&mask] - if cmd.cmd_prefix_ >= 128 { - var dist_code uint = uint(cmd.dist_prefix_) & 0x3FF - var distnumextra uint32 = uint32(cmd.dist_prefix_) >> 10 - var distextra uint64 = uint64(cmd.dist_extra_) - if mb.distance_context_map_size == 0 { - storeSymbol(distance_enc, dist_code, storage_ix, storage) - } else { - var context uint = uint(commandDistanceContext(&cmd)) - storeSymbolWithContext(distance_enc, dist_code, context, mb.distance_context_map, storage_ix, storage, distanceContextBits) - } - - writeBits(uint(distnumextra), distextra, storage_ix, storage) - } - } - } - - cleanupBlockEncoder(distance_enc) - cleanupBlockEncoder(command_enc) - cleanupBlockEncoder(literal_enc) - if is_last { - jumpToByteBoundary(storage_ix, storage) - } -} - -func buildHistograms(input []byte, start_pos uint, mask uint, commands []command, lit_histo *histogramLiteral, cmd_histo *histogramCommand, dist_histo *histogramDistance) { - var pos uint = start_pos - for _, cmd := range commands { - var j uint - histogramAddCommand(cmd_histo, uint(cmd.cmd_prefix_)) - for j = uint(cmd.insert_len_); j != 0; j-- { - histogramAddLiteral(lit_histo, uint(input[pos&mask])) - pos++ - } - - pos += uint(commandCopyLen(&cmd)) - if commandCopyLen(&cmd) != 0 && cmd.cmd_prefix_ >= 128 { - histogramAddDistance(dist_histo, uint(cmd.dist_prefix_)&0x3FF) - } - } -} - -func storeDataWithHuffmanCodes(input []byte, start_pos uint, mask uint, commands []command, lit_depth []byte, lit_bits []uint16, cmd_depth []byte, cmd_bits []uint16, dist_depth []byte, dist_bits []uint16, storage_ix *uint, storage []byte) { - var pos uint = start_pos - for _, cmd := range commands { - var cmd_code uint = uint(cmd.cmd_prefix_) - var j uint - writeBits(uint(cmd_depth[cmd_code]), uint64(cmd_bits[cmd_code]), storage_ix, storage) - storeCommandExtra(&cmd, storage_ix, storage) - for j = uint(cmd.insert_len_); j != 0; j-- { - var literal byte = input[pos&mask] - writeBits(uint(lit_depth[literal]), uint64(lit_bits[literal]), storage_ix, storage) - pos++ - } - - pos += uint(commandCopyLen(&cmd)) - if commandCopyLen(&cmd) != 0 && cmd.cmd_prefix_ >= 128 { - var dist_code uint = uint(cmd.dist_prefix_) & 0x3FF - var distnumextra uint32 = uint32(cmd.dist_prefix_) >> 10 - var distextra uint32 = cmd.dist_extra_ - writeBits(uint(dist_depth[dist_code]), uint64(dist_bits[dist_code]), storage_ix, storage) - writeBits(uint(distnumextra), uint64(distextra), storage_ix, storage) - } - } -} - -func storeMetaBlockTrivial(input []byte, start_pos uint, length uint, mask uint, is_last bool, params *encoderParams, commands []command, storage_ix *uint, storage []byte) { - var lit_histo histogramLiteral - var cmd_histo histogramCommand - var dist_histo histogramDistance - var lit_depth [numLiteralSymbols]byte - var lit_bits [numLiteralSymbols]uint16 - var cmd_depth [numCommandSymbols]byte - var cmd_bits [numCommandSymbols]uint16 - var dist_depth [maxSimpleDistanceAlphabetSize]byte - var dist_bits [maxSimpleDistanceAlphabetSize]uint16 - var tree []huffmanTree - var num_distance_symbols uint32 = params.dist.alphabet_size - - storeCompressedMetaBlockHeader(is_last, length, storage_ix, storage) - - histogramClearLiteral(&lit_histo) - histogramClearCommand(&cmd_histo) - histogramClearDistance(&dist_histo) - - buildHistograms(input, start_pos, mask, commands, &lit_histo, &cmd_histo, &dist_histo) - - writeBits(13, 0, storage_ix, storage) - - tree = make([]huffmanTree, maxHuffmanTreeSize) - buildAndStoreHuffmanTree(lit_histo.data_[:], numLiteralSymbols, numLiteralSymbols, tree, lit_depth[:], lit_bits[:], storage_ix, storage) - buildAndStoreHuffmanTree(cmd_histo.data_[:], numCommandSymbols, numCommandSymbols, tree, cmd_depth[:], cmd_bits[:], storage_ix, storage) - buildAndStoreHuffmanTree(dist_histo.data_[:], maxSimpleDistanceAlphabetSize, uint(num_distance_symbols), tree, dist_depth[:], dist_bits[:], storage_ix, storage) - tree = nil - storeDataWithHuffmanCodes(input, start_pos, mask, commands, lit_depth[:], lit_bits[:], cmd_depth[:], cmd_bits[:], dist_depth[:], dist_bits[:], storage_ix, storage) - if is_last { - jumpToByteBoundary(storage_ix, storage) - } -} - -func storeMetaBlockFast(input []byte, start_pos uint, length uint, mask uint, is_last bool, params *encoderParams, commands []command, storage_ix *uint, storage []byte) { - var num_distance_symbols uint32 = params.dist.alphabet_size - var distance_alphabet_bits uint32 = log2FloorNonZero(uint(num_distance_symbols-1)) + 1 - - storeCompressedMetaBlockHeader(is_last, length, storage_ix, storage) - - writeBits(13, 0, storage_ix, storage) - - if len(commands) <= 128 { - var histogram = [numLiteralSymbols]uint32{0} - var pos uint = start_pos - var num_literals uint = 0 - var lit_depth [numLiteralSymbols]byte - var lit_bits [numLiteralSymbols]uint16 - for _, cmd := range commands { - var j uint - for j = uint(cmd.insert_len_); j != 0; j-- { - histogram[input[pos&mask]]++ - pos++ - } - - num_literals += uint(cmd.insert_len_) - pos += uint(commandCopyLen(&cmd)) - } - - buildAndStoreHuffmanTreeFast(histogram[:], num_literals, /* max_bits = */ - 8, lit_depth[:], lit_bits[:], storage_ix, storage) - - storeStaticCommandHuffmanTree(storage_ix, storage) - storeStaticDistanceHuffmanTree(storage_ix, storage) - storeDataWithHuffmanCodes(input, start_pos, mask, commands, lit_depth[:], lit_bits[:], kStaticCommandCodeDepth[:], kStaticCommandCodeBits[:], kStaticDistanceCodeDepth[:], kStaticDistanceCodeBits[:], storage_ix, storage) - } else { - var lit_histo histogramLiteral - var cmd_histo histogramCommand - var dist_histo histogramDistance - var lit_depth [numLiteralSymbols]byte - var lit_bits [numLiteralSymbols]uint16 - var cmd_depth [numCommandSymbols]byte - var cmd_bits [numCommandSymbols]uint16 - var dist_depth [maxSimpleDistanceAlphabetSize]byte - var dist_bits [maxSimpleDistanceAlphabetSize]uint16 - histogramClearLiteral(&lit_histo) - histogramClearCommand(&cmd_histo) - histogramClearDistance(&dist_histo) - buildHistograms(input, start_pos, mask, commands, &lit_histo, &cmd_histo, &dist_histo) - buildAndStoreHuffmanTreeFast(lit_histo.data_[:], lit_histo.total_count_, /* max_bits = */ - 8, lit_depth[:], lit_bits[:], storage_ix, storage) - - buildAndStoreHuffmanTreeFast(cmd_histo.data_[:], cmd_histo.total_count_, /* max_bits = */ - 10, cmd_depth[:], cmd_bits[:], storage_ix, storage) - - buildAndStoreHuffmanTreeFast(dist_histo.data_[:], dist_histo.total_count_, /* max_bits = */ - uint(distance_alphabet_bits), dist_depth[:], dist_bits[:], storage_ix, storage) - - storeDataWithHuffmanCodes(input, start_pos, mask, commands, lit_depth[:], lit_bits[:], cmd_depth[:], cmd_bits[:], dist_depth[:], dist_bits[:], storage_ix, storage) - } - - if is_last { - jumpToByteBoundary(storage_ix, storage) - } -} - -/* This is for storing uncompressed blocks (simple raw storage of - bytes-as-bytes). */ -func storeUncompressedMetaBlock(is_final_block bool, input []byte, position uint, mask uint, len uint, storage_ix *uint, storage []byte) { - var masked_pos uint = position & mask - storeUncompressedMetaBlockHeader(uint(len), storage_ix, storage) - jumpToByteBoundary(storage_ix, storage) - - if masked_pos+len > mask+1 { - var len1 uint = mask + 1 - masked_pos - copy(storage[*storage_ix>>3:], input[masked_pos:][:len1]) - *storage_ix += len1 << 3 - len -= len1 - masked_pos = 0 - } - - copy(storage[*storage_ix>>3:], input[masked_pos:][:len]) - *storage_ix += uint(len << 3) - - /* We need to clear the next 4 bytes to continue to be - compatible with BrotliWriteBits. */ - writeBitsPrepareStorage(*storage_ix, storage) - - /* Since the uncompressed block itself may not be the final block, add an - empty one after this. */ - if is_final_block { - writeBits(1, 1, storage_ix, storage) /* islast */ - writeBits(1, 1, storage_ix, storage) /* isempty */ - jumpToByteBoundary(storage_ix, storage) - } -} diff --git a/vendor/github.com/andybalholm/brotli/cluster.go b/vendor/github.com/andybalholm/brotli/cluster.go deleted file mode 100644 index df8a328..0000000 --- a/vendor/github.com/andybalholm/brotli/cluster.go +++ /dev/null @@ -1,30 +0,0 @@ -package brotli - -/* Copyright 2013 Google Inc. All Rights Reserved. - - Distributed under MIT license. - See file LICENSE for detail or copy at https://opensource.org/licenses/MIT -*/ - -/* Functions for clustering similar histograms together. */ - -type histogramPair struct { - idx1 uint32 - idx2 uint32 - cost_combo float64 - cost_diff float64 -} - -func histogramPairIsLess(p1 *histogramPair, p2 *histogramPair) bool { - if p1.cost_diff != p2.cost_diff { - return p1.cost_diff > p2.cost_diff - } - - return (p1.idx2 - p1.idx1) > (p2.idx2 - p2.idx1) -} - -/* Returns entropy reduction of the context map when we combine two clusters. */ -func clusterCostDiff(size_a uint, size_b uint) float64 { - var size_c uint = size_a + size_b - return float64(size_a)*fastLog2(size_a) + float64(size_b)*fastLog2(size_b) - float64(size_c)*fastLog2(size_c) -} diff --git a/vendor/github.com/andybalholm/brotli/cluster_command.go b/vendor/github.com/andybalholm/brotli/cluster_command.go deleted file mode 100644 index 45b569b..0000000 --- a/vendor/github.com/andybalholm/brotli/cluster_command.go +++ /dev/null @@ -1,164 +0,0 @@ -package brotli - -/* Copyright 2013 Google Inc. All Rights Reserved. - - Distributed under MIT license. - See file LICENSE for detail or copy at https://opensource.org/licenses/MIT -*/ - -/* Computes the bit cost reduction by combining out[idx1] and out[idx2] and if - it is below a threshold, stores the pair (idx1, idx2) in the *pairs queue. */ -func compareAndPushToQueueCommand(out []histogramCommand, cluster_size []uint32, idx1 uint32, idx2 uint32, max_num_pairs uint, pairs []histogramPair, num_pairs *uint) { - var is_good_pair bool = false - var p histogramPair - p.idx2 = 0 - p.idx1 = p.idx2 - p.cost_combo = 0 - p.cost_diff = p.cost_combo - if idx1 == idx2 { - return - } - - if idx2 < idx1 { - var t uint32 = idx2 - idx2 = idx1 - idx1 = t - } - - p.idx1 = idx1 - p.idx2 = idx2 - p.cost_diff = 0.5 * clusterCostDiff(uint(cluster_size[idx1]), uint(cluster_size[idx2])) - p.cost_diff -= out[idx1].bit_cost_ - p.cost_diff -= out[idx2].bit_cost_ - - if out[idx1].total_count_ == 0 { - p.cost_combo = out[idx2].bit_cost_ - is_good_pair = true - } else if out[idx2].total_count_ == 0 { - p.cost_combo = out[idx1].bit_cost_ - is_good_pair = true - } else { - var threshold float64 - if *num_pairs == 0 { - threshold = 1e99 - } else { - threshold = brotli_max_double(0.0, pairs[0].cost_diff) - } - var combo histogramCommand = out[idx1] - var cost_combo float64 - histogramAddHistogramCommand(&combo, &out[idx2]) - cost_combo = populationCostCommand(&combo) - if cost_combo < threshold-p.cost_diff { - p.cost_combo = cost_combo - is_good_pair = true - } - } - - if is_good_pair { - p.cost_diff += p.cost_combo - if *num_pairs > 0 && histogramPairIsLess(&pairs[0], &p) { - /* Replace the top of the queue if needed. */ - if *num_pairs < max_num_pairs { - pairs[*num_pairs] = pairs[0] - (*num_pairs)++ - } - - pairs[0] = p - } else if *num_pairs < max_num_pairs { - pairs[*num_pairs] = p - (*num_pairs)++ - } - } -} - -func histogramCombineCommand(out []histogramCommand, cluster_size []uint32, symbols []uint32, clusters []uint32, pairs []histogramPair, num_clusters uint, symbols_size uint, max_clusters uint, max_num_pairs uint) uint { - var cost_diff_threshold float64 = 0.0 - var min_cluster_size uint = 1 - var num_pairs uint = 0 - { - /* We maintain a vector of histogram pairs, with the property that the pair - with the maximum bit cost reduction is the first. */ - var idx1 uint - for idx1 = 0; idx1 < num_clusters; idx1++ { - var idx2 uint - for idx2 = idx1 + 1; idx2 < num_clusters; idx2++ { - compareAndPushToQueueCommand(out, cluster_size, clusters[idx1], clusters[idx2], max_num_pairs, pairs[0:], &num_pairs) - } - } - } - - for num_clusters > min_cluster_size { - var best_idx1 uint32 - var best_idx2 uint32 - var i uint - if pairs[0].cost_diff >= cost_diff_threshold { - cost_diff_threshold = 1e99 - min_cluster_size = max_clusters - continue - } - - /* Take the best pair from the top of heap. */ - best_idx1 = pairs[0].idx1 - - best_idx2 = pairs[0].idx2 - histogramAddHistogramCommand(&out[best_idx1], &out[best_idx2]) - out[best_idx1].bit_cost_ = pairs[0].cost_combo - cluster_size[best_idx1] += cluster_size[best_idx2] - for i = 0; i < symbols_size; i++ { - if symbols[i] == best_idx2 { - symbols[i] = best_idx1 - } - } - - for i = 0; i < num_clusters; i++ { - if clusters[i] == best_idx2 { - copy(clusters[i:], clusters[i+1:][:num_clusters-i-1]) - break - } - } - - num_clusters-- - { - /* Remove pairs intersecting the just combined best pair. */ - var copy_to_idx uint = 0 - for i = 0; i < num_pairs; i++ { - var p *histogramPair = &pairs[i] - if p.idx1 == best_idx1 || p.idx2 == best_idx1 || p.idx1 == best_idx2 || p.idx2 == best_idx2 { - /* Remove invalid pair from the queue. */ - continue - } - - if histogramPairIsLess(&pairs[0], p) { - /* Replace the top of the queue if needed. */ - var front histogramPair = pairs[0] - pairs[0] = *p - pairs[copy_to_idx] = front - } else { - pairs[copy_to_idx] = *p - } - - copy_to_idx++ - } - - num_pairs = copy_to_idx - } - - /* Push new pairs formed with the combined histogram to the heap. */ - for i = 0; i < num_clusters; i++ { - compareAndPushToQueueCommand(out, cluster_size, best_idx1, clusters[i], max_num_pairs, pairs[0:], &num_pairs) - } - } - - return num_clusters -} - -/* What is the bit cost of moving histogram from cur_symbol to candidate. */ -func histogramBitCostDistanceCommand(histogram *histogramCommand, candidate *histogramCommand) float64 { - if histogram.total_count_ == 0 { - return 0.0 - } else { - var tmp histogramCommand = *histogram - histogramAddHistogramCommand(&tmp, candidate) - return populationCostCommand(&tmp) - candidate.bit_cost_ - } -} diff --git a/vendor/github.com/andybalholm/brotli/cluster_distance.go b/vendor/github.com/andybalholm/brotli/cluster_distance.go deleted file mode 100644 index 1aaa86e..0000000 --- a/vendor/github.com/andybalholm/brotli/cluster_distance.go +++ /dev/null @@ -1,326 +0,0 @@ -package brotli - -import "math" - -/* Copyright 2013 Google Inc. All Rights Reserved. - - Distributed under MIT license. - See file LICENSE for detail or copy at https://opensource.org/licenses/MIT -*/ - -/* Computes the bit cost reduction by combining out[idx1] and out[idx2] and if - it is below a threshold, stores the pair (idx1, idx2) in the *pairs queue. */ -func compareAndPushToQueueDistance(out []histogramDistance, cluster_size []uint32, idx1 uint32, idx2 uint32, max_num_pairs uint, pairs []histogramPair, num_pairs *uint) { - var is_good_pair bool = false - var p histogramPair - p.idx2 = 0 - p.idx1 = p.idx2 - p.cost_combo = 0 - p.cost_diff = p.cost_combo - if idx1 == idx2 { - return - } - - if idx2 < idx1 { - var t uint32 = idx2 - idx2 = idx1 - idx1 = t - } - - p.idx1 = idx1 - p.idx2 = idx2 - p.cost_diff = 0.5 * clusterCostDiff(uint(cluster_size[idx1]), uint(cluster_size[idx2])) - p.cost_diff -= out[idx1].bit_cost_ - p.cost_diff -= out[idx2].bit_cost_ - - if out[idx1].total_count_ == 0 { - p.cost_combo = out[idx2].bit_cost_ - is_good_pair = true - } else if out[idx2].total_count_ == 0 { - p.cost_combo = out[idx1].bit_cost_ - is_good_pair = true - } else { - var threshold float64 - if *num_pairs == 0 { - threshold = 1e99 - } else { - threshold = brotli_max_double(0.0, pairs[0].cost_diff) - } - var combo histogramDistance = out[idx1] - var cost_combo float64 - histogramAddHistogramDistance(&combo, &out[idx2]) - cost_combo = populationCostDistance(&combo) - if cost_combo < threshold-p.cost_diff { - p.cost_combo = cost_combo - is_good_pair = true - } - } - - if is_good_pair { - p.cost_diff += p.cost_combo - if *num_pairs > 0 && histogramPairIsLess(&pairs[0], &p) { - /* Replace the top of the queue if needed. */ - if *num_pairs < max_num_pairs { - pairs[*num_pairs] = pairs[0] - (*num_pairs)++ - } - - pairs[0] = p - } else if *num_pairs < max_num_pairs { - pairs[*num_pairs] = p - (*num_pairs)++ - } - } -} - -func histogramCombineDistance(out []histogramDistance, cluster_size []uint32, symbols []uint32, clusters []uint32, pairs []histogramPair, num_clusters uint, symbols_size uint, max_clusters uint, max_num_pairs uint) uint { - var cost_diff_threshold float64 = 0.0 - var min_cluster_size uint = 1 - var num_pairs uint = 0 - { - /* We maintain a vector of histogram pairs, with the property that the pair - with the maximum bit cost reduction is the first. */ - var idx1 uint - for idx1 = 0; idx1 < num_clusters; idx1++ { - var idx2 uint - for idx2 = idx1 + 1; idx2 < num_clusters; idx2++ { - compareAndPushToQueueDistance(out, cluster_size, clusters[idx1], clusters[idx2], max_num_pairs, pairs[0:], &num_pairs) - } - } - } - - for num_clusters > min_cluster_size { - var best_idx1 uint32 - var best_idx2 uint32 - var i uint - if pairs[0].cost_diff >= cost_diff_threshold { - cost_diff_threshold = 1e99 - min_cluster_size = max_clusters - continue - } - - /* Take the best pair from the top of heap. */ - best_idx1 = pairs[0].idx1 - - best_idx2 = pairs[0].idx2 - histogramAddHistogramDistance(&out[best_idx1], &out[best_idx2]) - out[best_idx1].bit_cost_ = pairs[0].cost_combo - cluster_size[best_idx1] += cluster_size[best_idx2] - for i = 0; i < symbols_size; i++ { - if symbols[i] == best_idx2 { - symbols[i] = best_idx1 - } - } - - for i = 0; i < num_clusters; i++ { - if clusters[i] == best_idx2 { - copy(clusters[i:], clusters[i+1:][:num_clusters-i-1]) - break - } - } - - num_clusters-- - { - /* Remove pairs intersecting the just combined best pair. */ - var copy_to_idx uint = 0 - for i = 0; i < num_pairs; i++ { - var p *histogramPair = &pairs[i] - if p.idx1 == best_idx1 || p.idx2 == best_idx1 || p.idx1 == best_idx2 || p.idx2 == best_idx2 { - /* Remove invalid pair from the queue. */ - continue - } - - if histogramPairIsLess(&pairs[0], p) { - /* Replace the top of the queue if needed. */ - var front histogramPair = pairs[0] - pairs[0] = *p - pairs[copy_to_idx] = front - } else { - pairs[copy_to_idx] = *p - } - - copy_to_idx++ - } - - num_pairs = copy_to_idx - } - - /* Push new pairs formed with the combined histogram to the heap. */ - for i = 0; i < num_clusters; i++ { - compareAndPushToQueueDistance(out, cluster_size, best_idx1, clusters[i], max_num_pairs, pairs[0:], &num_pairs) - } - } - - return num_clusters -} - -/* What is the bit cost of moving histogram from cur_symbol to candidate. */ -func histogramBitCostDistanceDistance(histogram *histogramDistance, candidate *histogramDistance) float64 { - if histogram.total_count_ == 0 { - return 0.0 - } else { - var tmp histogramDistance = *histogram - histogramAddHistogramDistance(&tmp, candidate) - return populationCostDistance(&tmp) - candidate.bit_cost_ - } -} - -/* Find the best 'out' histogram for each of the 'in' histograms. - When called, clusters[0..num_clusters) contains the unique values from - symbols[0..in_size), but this property is not preserved in this function. - Note: we assume that out[]->bit_cost_ is already up-to-date. */ -func histogramRemapDistance(in []histogramDistance, in_size uint, clusters []uint32, num_clusters uint, out []histogramDistance, symbols []uint32) { - var i uint - for i = 0; i < in_size; i++ { - var best_out uint32 - if i == 0 { - best_out = symbols[0] - } else { - best_out = symbols[i-1] - } - var best_bits float64 = histogramBitCostDistanceDistance(&in[i], &out[best_out]) - var j uint - for j = 0; j < num_clusters; j++ { - var cur_bits float64 = histogramBitCostDistanceDistance(&in[i], &out[clusters[j]]) - if cur_bits < best_bits { - best_bits = cur_bits - best_out = clusters[j] - } - } - - symbols[i] = best_out - } - - /* Recompute each out based on raw and symbols. */ - for i = 0; i < num_clusters; i++ { - histogramClearDistance(&out[clusters[i]]) - } - - for i = 0; i < in_size; i++ { - histogramAddHistogramDistance(&out[symbols[i]], &in[i]) - } -} - -/* Reorders elements of the out[0..length) array and changes values in - symbols[0..length) array in the following way: - * when called, symbols[] contains indexes into out[], and has N unique - values (possibly N < length) - * on return, symbols'[i] = f(symbols[i]) and - out'[symbols'[i]] = out[symbols[i]], for each 0 <= i < length, - where f is a bijection between the range of symbols[] and [0..N), and - the first occurrences of values in symbols'[i] come in consecutive - increasing order. - Returns N, the number of unique values in symbols[]. */ - -var histogramReindexDistance_kInvalidIndex uint32 = math.MaxUint32 - -func histogramReindexDistance(out []histogramDistance, symbols []uint32, length uint) uint { - var new_index []uint32 = make([]uint32, length) - var next_index uint32 - var tmp []histogramDistance - var i uint - for i = 0; i < length; i++ { - new_index[i] = histogramReindexDistance_kInvalidIndex - } - - next_index = 0 - for i = 0; i < length; i++ { - if new_index[symbols[i]] == histogramReindexDistance_kInvalidIndex { - new_index[symbols[i]] = next_index - next_index++ - } - } - - /* TODO: by using idea of "cycle-sort" we can avoid allocation of - tmp and reduce the number of copying by the factor of 2. */ - tmp = make([]histogramDistance, next_index) - - next_index = 0 - for i = 0; i < length; i++ { - if new_index[symbols[i]] == next_index { - tmp[next_index] = out[symbols[i]] - next_index++ - } - - symbols[i] = new_index[symbols[i]] - } - - new_index = nil - for i = 0; uint32(i) < next_index; i++ { - out[i] = tmp[i] - } - - tmp = nil - return uint(next_index) -} - -func clusterHistogramsDistance(in []histogramDistance, in_size uint, max_histograms uint, out []histogramDistance, out_size *uint, histogram_symbols []uint32) { - var cluster_size []uint32 = make([]uint32, in_size) - var clusters []uint32 = make([]uint32, in_size) - var num_clusters uint = 0 - var max_input_histograms uint = 64 - var pairs_capacity uint = max_input_histograms * max_input_histograms / 2 - var pairs []histogramPair = make([]histogramPair, (pairs_capacity + 1)) - var i uint - - /* For the first pass of clustering, we allow all pairs. */ - for i = 0; i < in_size; i++ { - cluster_size[i] = 1 - } - - for i = 0; i < in_size; i++ { - out[i] = in[i] - out[i].bit_cost_ = populationCostDistance(&in[i]) - histogram_symbols[i] = uint32(i) - } - - for i = 0; i < in_size; i += max_input_histograms { - var num_to_combine uint = brotli_min_size_t(in_size-i, max_input_histograms) - var num_new_clusters uint - var j uint - for j = 0; j < num_to_combine; j++ { - clusters[num_clusters+j] = uint32(i + j) - } - - num_new_clusters = histogramCombineDistance(out, cluster_size, histogram_symbols[i:], clusters[num_clusters:], pairs, num_to_combine, num_to_combine, max_histograms, pairs_capacity) - num_clusters += num_new_clusters - } - { - /* For the second pass, we limit the total number of histogram pairs. - After this limit is reached, we only keep searching for the best pair. */ - var max_num_pairs uint = brotli_min_size_t(64*num_clusters, (num_clusters/2)*num_clusters) - if pairs_capacity < (max_num_pairs + 1) { - var _new_size uint - if pairs_capacity == 0 { - _new_size = max_num_pairs + 1 - } else { - _new_size = pairs_capacity - } - var new_array []histogramPair - for _new_size < (max_num_pairs + 1) { - _new_size *= 2 - } - new_array = make([]histogramPair, _new_size) - if pairs_capacity != 0 { - copy(new_array, pairs[:pairs_capacity]) - } - - pairs = new_array - pairs_capacity = _new_size - } - - /* Collapse similar histograms. */ - num_clusters = histogramCombineDistance(out, cluster_size, histogram_symbols, clusters, pairs, num_clusters, in_size, max_histograms, max_num_pairs) - } - - pairs = nil - cluster_size = nil - - /* Find the optimal map from original histograms to the final ones. */ - histogramRemapDistance(in, in_size, clusters, num_clusters, out, histogram_symbols) - - clusters = nil - - /* Convert the context map to a canonical form. */ - *out_size = histogramReindexDistance(out, histogram_symbols, in_size) -} diff --git a/vendor/github.com/andybalholm/brotli/cluster_literal.go b/vendor/github.com/andybalholm/brotli/cluster_literal.go deleted file mode 100644 index 6ba66f3..0000000 --- a/vendor/github.com/andybalholm/brotli/cluster_literal.go +++ /dev/null @@ -1,326 +0,0 @@ -package brotli - -import "math" - -/* Copyright 2013 Google Inc. All Rights Reserved. - - Distributed under MIT license. - See file LICENSE for detail or copy at https://opensource.org/licenses/MIT -*/ - -/* Computes the bit cost reduction by combining out[idx1] and out[idx2] and if - it is below a threshold, stores the pair (idx1, idx2) in the *pairs queue. */ -func compareAndPushToQueueLiteral(out []histogramLiteral, cluster_size []uint32, idx1 uint32, idx2 uint32, max_num_pairs uint, pairs []histogramPair, num_pairs *uint) { - var is_good_pair bool = false - var p histogramPair - p.idx2 = 0 - p.idx1 = p.idx2 - p.cost_combo = 0 - p.cost_diff = p.cost_combo - if idx1 == idx2 { - return - } - - if idx2 < idx1 { - var t uint32 = idx2 - idx2 = idx1 - idx1 = t - } - - p.idx1 = idx1 - p.idx2 = idx2 - p.cost_diff = 0.5 * clusterCostDiff(uint(cluster_size[idx1]), uint(cluster_size[idx2])) - p.cost_diff -= out[idx1].bit_cost_ - p.cost_diff -= out[idx2].bit_cost_ - - if out[idx1].total_count_ == 0 { - p.cost_combo = out[idx2].bit_cost_ - is_good_pair = true - } else if out[idx2].total_count_ == 0 { - p.cost_combo = out[idx1].bit_cost_ - is_good_pair = true - } else { - var threshold float64 - if *num_pairs == 0 { - threshold = 1e99 - } else { - threshold = brotli_max_double(0.0, pairs[0].cost_diff) - } - var combo histogramLiteral = out[idx1] - var cost_combo float64 - histogramAddHistogramLiteral(&combo, &out[idx2]) - cost_combo = populationCostLiteral(&combo) - if cost_combo < threshold-p.cost_diff { - p.cost_combo = cost_combo - is_good_pair = true - } - } - - if is_good_pair { - p.cost_diff += p.cost_combo - if *num_pairs > 0 && histogramPairIsLess(&pairs[0], &p) { - /* Replace the top of the queue if needed. */ - if *num_pairs < max_num_pairs { - pairs[*num_pairs] = pairs[0] - (*num_pairs)++ - } - - pairs[0] = p - } else if *num_pairs < max_num_pairs { - pairs[*num_pairs] = p - (*num_pairs)++ - } - } -} - -func histogramCombineLiteral(out []histogramLiteral, cluster_size []uint32, symbols []uint32, clusters []uint32, pairs []histogramPair, num_clusters uint, symbols_size uint, max_clusters uint, max_num_pairs uint) uint { - var cost_diff_threshold float64 = 0.0 - var min_cluster_size uint = 1 - var num_pairs uint = 0 - { - /* We maintain a vector of histogram pairs, with the property that the pair - with the maximum bit cost reduction is the first. */ - var idx1 uint - for idx1 = 0; idx1 < num_clusters; idx1++ { - var idx2 uint - for idx2 = idx1 + 1; idx2 < num_clusters; idx2++ { - compareAndPushToQueueLiteral(out, cluster_size, clusters[idx1], clusters[idx2], max_num_pairs, pairs[0:], &num_pairs) - } - } - } - - for num_clusters > min_cluster_size { - var best_idx1 uint32 - var best_idx2 uint32 - var i uint - if pairs[0].cost_diff >= cost_diff_threshold { - cost_diff_threshold = 1e99 - min_cluster_size = max_clusters - continue - } - - /* Take the best pair from the top of heap. */ - best_idx1 = pairs[0].idx1 - - best_idx2 = pairs[0].idx2 - histogramAddHistogramLiteral(&out[best_idx1], &out[best_idx2]) - out[best_idx1].bit_cost_ = pairs[0].cost_combo - cluster_size[best_idx1] += cluster_size[best_idx2] - for i = 0; i < symbols_size; i++ { - if symbols[i] == best_idx2 { - symbols[i] = best_idx1 - } - } - - for i = 0; i < num_clusters; i++ { - if clusters[i] == best_idx2 { - copy(clusters[i:], clusters[i+1:][:num_clusters-i-1]) - break - } - } - - num_clusters-- - { - /* Remove pairs intersecting the just combined best pair. */ - var copy_to_idx uint = 0 - for i = 0; i < num_pairs; i++ { - var p *histogramPair = &pairs[i] - if p.idx1 == best_idx1 || p.idx2 == best_idx1 || p.idx1 == best_idx2 || p.idx2 == best_idx2 { - /* Remove invalid pair from the queue. */ - continue - } - - if histogramPairIsLess(&pairs[0], p) { - /* Replace the top of the queue if needed. */ - var front histogramPair = pairs[0] - pairs[0] = *p - pairs[copy_to_idx] = front - } else { - pairs[copy_to_idx] = *p - } - - copy_to_idx++ - } - - num_pairs = copy_to_idx - } - - /* Push new pairs formed with the combined histogram to the heap. */ - for i = 0; i < num_clusters; i++ { - compareAndPushToQueueLiteral(out, cluster_size, best_idx1, clusters[i], max_num_pairs, pairs[0:], &num_pairs) - } - } - - return num_clusters -} - -/* What is the bit cost of moving histogram from cur_symbol to candidate. */ -func histogramBitCostDistanceLiteral(histogram *histogramLiteral, candidate *histogramLiteral) float64 { - if histogram.total_count_ == 0 { - return 0.0 - } else { - var tmp histogramLiteral = *histogram - histogramAddHistogramLiteral(&tmp, candidate) - return populationCostLiteral(&tmp) - candidate.bit_cost_ - } -} - -/* Find the best 'out' histogram for each of the 'in' histograms. - When called, clusters[0..num_clusters) contains the unique values from - symbols[0..in_size), but this property is not preserved in this function. - Note: we assume that out[]->bit_cost_ is already up-to-date. */ -func histogramRemapLiteral(in []histogramLiteral, in_size uint, clusters []uint32, num_clusters uint, out []histogramLiteral, symbols []uint32) { - var i uint - for i = 0; i < in_size; i++ { - var best_out uint32 - if i == 0 { - best_out = symbols[0] - } else { - best_out = symbols[i-1] - } - var best_bits float64 = histogramBitCostDistanceLiteral(&in[i], &out[best_out]) - var j uint - for j = 0; j < num_clusters; j++ { - var cur_bits float64 = histogramBitCostDistanceLiteral(&in[i], &out[clusters[j]]) - if cur_bits < best_bits { - best_bits = cur_bits - best_out = clusters[j] - } - } - - symbols[i] = best_out - } - - /* Recompute each out based on raw and symbols. */ - for i = 0; i < num_clusters; i++ { - histogramClearLiteral(&out[clusters[i]]) - } - - for i = 0; i < in_size; i++ { - histogramAddHistogramLiteral(&out[symbols[i]], &in[i]) - } -} - -/* Reorders elements of the out[0..length) array and changes values in - symbols[0..length) array in the following way: - * when called, symbols[] contains indexes into out[], and has N unique - values (possibly N < length) - * on return, symbols'[i] = f(symbols[i]) and - out'[symbols'[i]] = out[symbols[i]], for each 0 <= i < length, - where f is a bijection between the range of symbols[] and [0..N), and - the first occurrences of values in symbols'[i] come in consecutive - increasing order. - Returns N, the number of unique values in symbols[]. */ - -var histogramReindexLiteral_kInvalidIndex uint32 = math.MaxUint32 - -func histogramReindexLiteral(out []histogramLiteral, symbols []uint32, length uint) uint { - var new_index []uint32 = make([]uint32, length) - var next_index uint32 - var tmp []histogramLiteral - var i uint - for i = 0; i < length; i++ { - new_index[i] = histogramReindexLiteral_kInvalidIndex - } - - next_index = 0 - for i = 0; i < length; i++ { - if new_index[symbols[i]] == histogramReindexLiteral_kInvalidIndex { - new_index[symbols[i]] = next_index - next_index++ - } - } - - /* TODO: by using idea of "cycle-sort" we can avoid allocation of - tmp and reduce the number of copying by the factor of 2. */ - tmp = make([]histogramLiteral, next_index) - - next_index = 0 - for i = 0; i < length; i++ { - if new_index[symbols[i]] == next_index { - tmp[next_index] = out[symbols[i]] - next_index++ - } - - symbols[i] = new_index[symbols[i]] - } - - new_index = nil - for i = 0; uint32(i) < next_index; i++ { - out[i] = tmp[i] - } - - tmp = nil - return uint(next_index) -} - -func clusterHistogramsLiteral(in []histogramLiteral, in_size uint, max_histograms uint, out []histogramLiteral, out_size *uint, histogram_symbols []uint32) { - var cluster_size []uint32 = make([]uint32, in_size) - var clusters []uint32 = make([]uint32, in_size) - var num_clusters uint = 0 - var max_input_histograms uint = 64 - var pairs_capacity uint = max_input_histograms * max_input_histograms / 2 - var pairs []histogramPair = make([]histogramPair, (pairs_capacity + 1)) - var i uint - - /* For the first pass of clustering, we allow all pairs. */ - for i = 0; i < in_size; i++ { - cluster_size[i] = 1 - } - - for i = 0; i < in_size; i++ { - out[i] = in[i] - out[i].bit_cost_ = populationCostLiteral(&in[i]) - histogram_symbols[i] = uint32(i) - } - - for i = 0; i < in_size; i += max_input_histograms { - var num_to_combine uint = brotli_min_size_t(in_size-i, max_input_histograms) - var num_new_clusters uint - var j uint - for j = 0; j < num_to_combine; j++ { - clusters[num_clusters+j] = uint32(i + j) - } - - num_new_clusters = histogramCombineLiteral(out, cluster_size, histogram_symbols[i:], clusters[num_clusters:], pairs, num_to_combine, num_to_combine, max_histograms, pairs_capacity) - num_clusters += num_new_clusters - } - { - /* For the second pass, we limit the total number of histogram pairs. - After this limit is reached, we only keep searching for the best pair. */ - var max_num_pairs uint = brotli_min_size_t(64*num_clusters, (num_clusters/2)*num_clusters) - if pairs_capacity < (max_num_pairs + 1) { - var _new_size uint - if pairs_capacity == 0 { - _new_size = max_num_pairs + 1 - } else { - _new_size = pairs_capacity - } - var new_array []histogramPair - for _new_size < (max_num_pairs + 1) { - _new_size *= 2 - } - new_array = make([]histogramPair, _new_size) - if pairs_capacity != 0 { - copy(new_array, pairs[:pairs_capacity]) - } - - pairs = new_array - pairs_capacity = _new_size - } - - /* Collapse similar histograms. */ - num_clusters = histogramCombineLiteral(out, cluster_size, histogram_symbols, clusters, pairs, num_clusters, in_size, max_histograms, max_num_pairs) - } - - pairs = nil - cluster_size = nil - - /* Find the optimal map from original histograms to the final ones. */ - histogramRemapLiteral(in, in_size, clusters, num_clusters, out, histogram_symbols) - - clusters = nil - - /* Convert the context map to a canonical form. */ - *out_size = histogramReindexLiteral(out, histogram_symbols, in_size) -} diff --git a/vendor/github.com/andybalholm/brotli/command.go b/vendor/github.com/andybalholm/brotli/command.go deleted file mode 100644 index b1662a5..0000000 --- a/vendor/github.com/andybalholm/brotli/command.go +++ /dev/null @@ -1,254 +0,0 @@ -package brotli - -var kInsBase = []uint32{ - 0, - 1, - 2, - 3, - 4, - 5, - 6, - 8, - 10, - 14, - 18, - 26, - 34, - 50, - 66, - 98, - 130, - 194, - 322, - 578, - 1090, - 2114, - 6210, - 22594, -} - -var kInsExtra = []uint32{ - 0, - 0, - 0, - 0, - 0, - 0, - 1, - 1, - 2, - 2, - 3, - 3, - 4, - 4, - 5, - 5, - 6, - 7, - 8, - 9, - 10, - 12, - 14, - 24, -} - -var kCopyBase = []uint32{ - 2, - 3, - 4, - 5, - 6, - 7, - 8, - 9, - 10, - 12, - 14, - 18, - 22, - 30, - 38, - 54, - 70, - 102, - 134, - 198, - 326, - 582, - 1094, - 2118, -} - -var kCopyExtra = []uint32{ - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 1, - 1, - 2, - 2, - 3, - 3, - 4, - 4, - 5, - 5, - 6, - 7, - 8, - 9, - 10, - 24, -} - -func getInsertLengthCode(insertlen uint) uint16 { - if insertlen < 6 { - return uint16(insertlen) - } else if insertlen < 130 { - var nbits uint32 = log2FloorNonZero(insertlen-2) - 1 - return uint16((nbits << 1) + uint32((insertlen-2)>>nbits) + 2) - } else if insertlen < 2114 { - return uint16(log2FloorNonZero(insertlen-66) + 10) - } else if insertlen < 6210 { - return 21 - } else if insertlen < 22594 { - return 22 - } else { - return 23 - } -} - -func getCopyLengthCode(copylen uint) uint16 { - if copylen < 10 { - return uint16(copylen - 2) - } else if copylen < 134 { - var nbits uint32 = log2FloorNonZero(copylen-6) - 1 - return uint16((nbits << 1) + uint32((copylen-6)>>nbits) + 4) - } else if copylen < 2118 { - return uint16(log2FloorNonZero(copylen-70) + 12) - } else { - return 23 - } -} - -func combineLengthCodes(inscode uint16, copycode uint16, use_last_distance bool) uint16 { - var bits64 uint16 = uint16(copycode&0x7 | (inscode&0x7)<<3) - if use_last_distance && inscode < 8 && copycode < 16 { - if copycode < 8 { - return bits64 - } else { - return bits64 | 64 - } - } else { - /* Specification: 5 Encoding of ... (last table) */ - /* offset = 2 * index, where index is in range [0..8] */ - var offset uint32 = 2 * ((uint32(copycode) >> 3) + 3*(uint32(inscode)>>3)) - - /* All values in specification are K * 64, - where K = [2, 3, 6, 4, 5, 8, 7, 9, 10], - i + 1 = [1, 2, 3, 4, 5, 6, 7, 8, 9], - K - i - 1 = [1, 1, 3, 0, 0, 2, 0, 1, 2] = D. - All values in D require only 2 bits to encode. - Magic constant is shifted 6 bits left, to avoid final multiplication. */ - offset = (offset << 5) + 0x40 + ((0x520D40 >> offset) & 0xC0) - - return uint16(offset | uint32(bits64)) - } -} - -func getLengthCode(insertlen uint, copylen uint, use_last_distance bool, code *uint16) { - var inscode uint16 = getInsertLengthCode(insertlen) - var copycode uint16 = getCopyLengthCode(copylen) - *code = combineLengthCodes(inscode, copycode, use_last_distance) -} - -func getInsertBase(inscode uint16) uint32 { - return kInsBase[inscode] -} - -func getInsertExtra(inscode uint16) uint32 { - return kInsExtra[inscode] -} - -func getCopyBase(copycode uint16) uint32 { - return kCopyBase[copycode] -} - -func getCopyExtra(copycode uint16) uint32 { - return kCopyExtra[copycode] -} - -type command struct { - insert_len_ uint32 - copy_len_ uint32 - dist_extra_ uint32 - cmd_prefix_ uint16 - dist_prefix_ uint16 -} - -/* distance_code is e.g. 0 for same-as-last short code, or 16 for offset 1. */ -func makeCommand(dist *distanceParams, insertlen uint, copylen uint, copylen_code_delta int, distance_code uint) (cmd command) { - /* Don't rely on signed int representation, use honest casts. */ - var delta uint32 = uint32(byte(int8(copylen_code_delta))) - cmd.insert_len_ = uint32(insertlen) - cmd.copy_len_ = uint32(uint32(copylen) | delta<<25) - - /* The distance prefix and extra bits are stored in this Command as if - npostfix and ndirect were 0, they are only recomputed later after the - clustering if needed. */ - prefixEncodeCopyDistance(distance_code, uint(dist.num_direct_distance_codes), uint(dist.distance_postfix_bits), &cmd.dist_prefix_, &cmd.dist_extra_) - getLengthCode(insertlen, uint(int(copylen)+copylen_code_delta), (cmd.dist_prefix_&0x3FF == 0), &cmd.cmd_prefix_) - - return cmd -} - -func makeInsertCommand(insertlen uint) (cmd command) { - cmd.insert_len_ = uint32(insertlen) - cmd.copy_len_ = 4 << 25 - cmd.dist_extra_ = 0 - cmd.dist_prefix_ = numDistanceShortCodes - getLengthCode(insertlen, 4, false, &cmd.cmd_prefix_) - return cmd -} - -func commandRestoreDistanceCode(self *command, dist *distanceParams) uint32 { - if uint32(self.dist_prefix_&0x3FF) < numDistanceShortCodes+dist.num_direct_distance_codes { - return uint32(self.dist_prefix_) & 0x3FF - } else { - var dcode uint32 = uint32(self.dist_prefix_) & 0x3FF - var nbits uint32 = uint32(self.dist_prefix_) >> 10 - var extra uint32 = self.dist_extra_ - var postfix_mask uint32 = (1 << dist.distance_postfix_bits) - 1 - var hcode uint32 = (dcode - dist.num_direct_distance_codes - numDistanceShortCodes) >> dist.distance_postfix_bits - var lcode uint32 = (dcode - dist.num_direct_distance_codes - numDistanceShortCodes) & postfix_mask - var offset uint32 = ((2 + (hcode & 1)) << nbits) - 4 - return ((offset + extra) << dist.distance_postfix_bits) + lcode + dist.num_direct_distance_codes + numDistanceShortCodes - } -} - -func commandDistanceContext(self *command) uint32 { - var r uint32 = uint32(self.cmd_prefix_) >> 6 - var c uint32 = uint32(self.cmd_prefix_) & 7 - if (r == 0 || r == 2 || r == 4 || r == 7) && (c <= 2) { - return c - } - - return 3 -} - -func commandCopyLen(self *command) uint32 { - return self.copy_len_ & 0x1FFFFFF -} - -func commandCopyLenCode(self *command) uint32 { - var modifier uint32 = self.copy_len_ >> 25 - var delta int32 = int32(int8(byte(modifier | (modifier&0x40)<<1))) - return uint32(int32(self.copy_len_&0x1FFFFFF) + delta) -} diff --git a/vendor/github.com/andybalholm/brotli/compress_fragment.go b/vendor/github.com/andybalholm/brotli/compress_fragment.go deleted file mode 100644 index c9bd057..0000000 --- a/vendor/github.com/andybalholm/brotli/compress_fragment.go +++ /dev/null @@ -1,834 +0,0 @@ -package brotli - -import "encoding/binary" - -/* Copyright 2015 Google Inc. All Rights Reserved. - - Distributed under MIT license. - See file LICENSE for detail or copy at https://opensource.org/licenses/MIT -*/ - -/* Function for fast encoding of an input fragment, independently from the input - history. This function uses one-pass processing: when we find a backward - match, we immediately emit the corresponding command and literal codes to - the bit stream. - - Adapted from the CompressFragment() function in - https://github.com/google/snappy/blob/master/snappy.cc */ - -const maxDistance_compress_fragment = 262128 - -func hash5(p []byte, shift uint) uint32 { - var h uint64 = (binary.LittleEndian.Uint64(p) << 24) * uint64(kHashMul32) - return uint32(h >> shift) -} - -func hashBytesAtOffset5(v uint64, offset int, shift uint) uint32 { - assert(offset >= 0) - assert(offset <= 3) - { - var h uint64 = ((v >> uint(8*offset)) << 24) * uint64(kHashMul32) - return uint32(h >> shift) - } -} - -func isMatch5(p1 []byte, p2 []byte) bool { - return binary.LittleEndian.Uint32(p1) == binary.LittleEndian.Uint32(p2) && - p1[4] == p2[4] -} - -/* Builds a literal prefix code into "depths" and "bits" based on the statistics - of the "input" string and stores it into the bit stream. - Note that the prefix code here is built from the pre-LZ77 input, therefore - we can only approximate the statistics of the actual literal stream. - Moreover, for long inputs we build a histogram from a sample of the input - and thus have to assign a non-zero depth for each literal. - Returns estimated compression ratio millibytes/char for encoding given input - with generated code. */ -func buildAndStoreLiteralPrefixCode(input []byte, input_size uint, depths []byte, bits []uint16, storage_ix *uint, storage []byte) uint { - var histogram = [256]uint32{0} - var histogram_total uint - var i uint - if input_size < 1<<15 { - for i = 0; i < input_size; i++ { - histogram[input[i]]++ - } - - histogram_total = input_size - for i = 0; i < 256; i++ { - /* We weigh the first 11 samples with weight 3 to account for the - balancing effect of the LZ77 phase on the histogram. */ - var adjust uint32 = 2 * brotli_min_uint32_t(histogram[i], 11) - histogram[i] += adjust - histogram_total += uint(adjust) - } - } else { - const kSampleRate uint = 29 - for i = 0; i < input_size; i += kSampleRate { - histogram[input[i]]++ - } - - histogram_total = (input_size + kSampleRate - 1) / kSampleRate - for i = 0; i < 256; i++ { - /* We add 1 to each population count to avoid 0 bit depths (since this is - only a sample and we don't know if the symbol appears or not), and we - weigh the first 11 samples with weight 3 to account for the balancing - effect of the LZ77 phase on the histogram (more frequent symbols are - more likely to be in backward references instead as literals). */ - var adjust uint32 = 1 + 2*brotli_min_uint32_t(histogram[i], 11) - histogram[i] += adjust - histogram_total += uint(adjust) - } - } - - buildAndStoreHuffmanTreeFast(histogram[:], histogram_total, /* max_bits = */ - 8, depths, bits, storage_ix, storage) - { - var literal_ratio uint = 0 - for i = 0; i < 256; i++ { - if histogram[i] != 0 { - literal_ratio += uint(histogram[i] * uint32(depths[i])) - } - } - - /* Estimated encoding ratio, millibytes per symbol. */ - return (literal_ratio * 125) / histogram_total - } -} - -/* Builds a command and distance prefix code (each 64 symbols) into "depth" and - "bits" based on "histogram" and stores it into the bit stream. */ -func buildAndStoreCommandPrefixCode1(histogram []uint32, depth []byte, bits []uint16, storage_ix *uint, storage []byte) { - var tree [129]huffmanTree - var cmd_depth = [numCommandSymbols]byte{0} - /* Tree size for building a tree over 64 symbols is 2 * 64 + 1. */ - - var cmd_bits [64]uint16 - - createHuffmanTree(histogram, 64, 15, tree[:], depth) - createHuffmanTree(histogram[64:], 64, 14, tree[:], depth[64:]) - - /* We have to jump through a few hoops here in order to compute - the command bits because the symbols are in a different order than in - the full alphabet. This looks complicated, but having the symbols - in this order in the command bits saves a few branches in the Emit* - functions. */ - copy(cmd_depth[:], depth[:24]) - - copy(cmd_depth[24:][:], depth[40:][:8]) - copy(cmd_depth[32:][:], depth[24:][:8]) - copy(cmd_depth[40:][:], depth[48:][:8]) - copy(cmd_depth[48:][:], depth[32:][:8]) - copy(cmd_depth[56:][:], depth[56:][:8]) - convertBitDepthsToSymbols(cmd_depth[:], 64, cmd_bits[:]) - copy(bits, cmd_bits[:24]) - copy(bits[24:], cmd_bits[32:][:8]) - copy(bits[32:], cmd_bits[48:][:8]) - copy(bits[40:], cmd_bits[24:][:8]) - copy(bits[48:], cmd_bits[40:][:8]) - copy(bits[56:], cmd_bits[56:][:8]) - convertBitDepthsToSymbols(depth[64:], 64, bits[64:]) - { - /* Create the bit length array for the full command alphabet. */ - var i uint - for i := 0; i < int(64); i++ { - cmd_depth[i] = 0 - } /* only 64 first values were used */ - copy(cmd_depth[:], depth[:8]) - copy(cmd_depth[64:][:], depth[8:][:8]) - copy(cmd_depth[128:][:], depth[16:][:8]) - copy(cmd_depth[192:][:], depth[24:][:8]) - copy(cmd_depth[384:][:], depth[32:][:8]) - for i = 0; i < 8; i++ { - cmd_depth[128+8*i] = depth[40+i] - cmd_depth[256+8*i] = depth[48+i] - cmd_depth[448+8*i] = depth[56+i] - } - - storeHuffmanTree(cmd_depth[:], numCommandSymbols, tree[:], storage_ix, storage) - } - - storeHuffmanTree(depth[64:], 64, tree[:], storage_ix, storage) -} - -/* REQUIRES: insertlen < 6210 */ -func emitInsertLen1(insertlen uint, depth []byte, bits []uint16, histo []uint32, storage_ix *uint, storage []byte) { - if insertlen < 6 { - var code uint = insertlen + 40 - writeBits(uint(depth[code]), uint64(bits[code]), storage_ix, storage) - histo[code]++ - } else if insertlen < 130 { - var tail uint = insertlen - 2 - var nbits uint32 = log2FloorNonZero(tail) - 1 - var prefix uint = tail >> nbits - var inscode uint = uint((nbits << 1) + uint32(prefix) + 42) - writeBits(uint(depth[inscode]), uint64(bits[inscode]), storage_ix, storage) - writeBits(uint(nbits), uint64(tail)-(uint64(prefix)<> nbits - var code uint = uint((nbits << 1) + uint32(prefix) + 20) - writeBits(uint(depth[code]), uint64(bits[code]), storage_ix, storage) - writeBits(uint(nbits), uint64(tail)-(uint64(prefix)<> nbits - var code uint = uint((nbits << 1) + uint32(prefix) + 4) - writeBits(uint(depth[code]), uint64(bits[code]), storage_ix, storage) - writeBits(uint(nbits), uint64(tail)-(uint64(prefix)<> 5) + 30 - writeBits(uint(depth[code]), uint64(bits[code]), storage_ix, storage) - writeBits(5, uint64(tail)&31, storage_ix, storage) - writeBits(uint(depth[64]), uint64(bits[64]), storage_ix, storage) - histo[code]++ - histo[64]++ - } else if copylen < 2120 { - var tail uint = copylen - 72 - var nbits uint32 = log2FloorNonZero(tail) - var code uint = uint(nbits + 28) - writeBits(uint(depth[code]), uint64(bits[code]), storage_ix, storage) - writeBits(uint(nbits), uint64(tail)-(uint64(uint(1))<> nbits) & 1 - var offset uint = (2 + prefix) << nbits - var distcode uint = uint(2*(nbits-1) + uint32(prefix) + 80) - writeBits(uint(depth[distcode]), uint64(bits[distcode]), storage_ix, storage) - writeBits(uint(nbits), uint64(d)-uint64(offset), storage_ix, storage) - histo[distcode]++ -} - -func emitLiterals(input []byte, len uint, depth []byte, bits []uint16, storage_ix *uint, storage []byte) { - var j uint - for j = 0; j < len; j++ { - var lit byte = input[j] - writeBits(uint(depth[lit]), uint64(bits[lit]), storage_ix, storage) - } -} - -/* REQUIRES: len <= 1 << 24. */ -func storeMetaBlockHeader1(len uint, is_uncompressed bool, storage_ix *uint, storage []byte) { - var nibbles uint = 6 - - /* ISLAST */ - writeBits(1, 0, storage_ix, storage) - - if len <= 1<<16 { - nibbles = 4 - } else if len <= 1<<20 { - nibbles = 5 - } - - writeBits(2, uint64(nibbles)-4, storage_ix, storage) - writeBits(nibbles*4, uint64(len)-1, storage_ix, storage) - - /* ISUNCOMPRESSED */ - writeSingleBit(is_uncompressed, storage_ix, storage) -} - -func updateBits(n_bits uint, bits uint32, pos uint, array []byte) { - for n_bits > 0 { - var byte_pos uint = pos >> 3 - var n_unchanged_bits uint = pos & 7 - var n_changed_bits uint = brotli_min_size_t(n_bits, 8-n_unchanged_bits) - var total_bits uint = n_unchanged_bits + n_changed_bits - var mask uint32 = (^((1 << total_bits) - 1)) | ((1 << n_unchanged_bits) - 1) - var unchanged_bits uint32 = uint32(array[byte_pos]) & mask - var changed_bits uint32 = bits & ((1 << n_changed_bits) - 1) - array[byte_pos] = byte(changed_bits<>= n_changed_bits - pos += n_changed_bits - } -} - -func rewindBitPosition1(new_storage_ix uint, storage_ix *uint, storage []byte) { - var bitpos uint = new_storage_ix & 7 - var mask uint = (1 << bitpos) - 1 - storage[new_storage_ix>>3] &= byte(mask) - *storage_ix = new_storage_ix -} - -var shouldMergeBlock_kSampleRate uint = 43 - -func shouldMergeBlock(data []byte, len uint, depths []byte) bool { - var histo = [256]uint{0} - var i uint - for i = 0; i < len; i += shouldMergeBlock_kSampleRate { - histo[data[i]]++ - } - { - var total uint = (len + shouldMergeBlock_kSampleRate - 1) / shouldMergeBlock_kSampleRate - var r float64 = (fastLog2(total)+0.5)*float64(total) + 200 - for i = 0; i < 256; i++ { - r -= float64(histo[i]) * (float64(depths[i]) + fastLog2(histo[i])) - } - - return r >= 0.0 - } -} - -func shouldUseUncompressedMode(metablock_start []byte, next_emit []byte, insertlen uint, literal_ratio uint) bool { - var compressed uint = uint(-cap(next_emit) + cap(metablock_start)) - if compressed*50 > insertlen { - return false - } else { - return literal_ratio > 980 - } -} - -func emitUncompressedMetaBlock1(begin []byte, end []byte, storage_ix_start uint, storage_ix *uint, storage []byte) { - var len uint = uint(-cap(end) + cap(begin)) - rewindBitPosition1(storage_ix_start, storage_ix, storage) - storeMetaBlockHeader1(uint(len), true, storage_ix, storage) - *storage_ix = (*storage_ix + 7) &^ 7 - copy(storage[*storage_ix>>3:], begin[:len]) - *storage_ix += uint(len << 3) - storage[*storage_ix>>3] = 0 -} - -var kCmdHistoSeed = [128]uint32{ - 0, - 1, - 1, - 1, - 1, - 1, - 1, - 1, - 1, - 1, - 1, - 1, - 1, - 1, - 1, - 1, - 0, - 0, - 0, - 1, - 1, - 1, - 1, - 1, - 1, - 1, - 1, - 1, - 1, - 1, - 1, - 1, - 1, - 1, - 1, - 1, - 1, - 1, - 1, - 1, - 0, - 1, - 1, - 1, - 1, - 1, - 1, - 1, - 1, - 1, - 1, - 1, - 1, - 1, - 1, - 1, - 1, - 1, - 1, - 1, - 1, - 1, - 1, - 1, - 1, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 1, - 1, - 1, - 1, - 1, - 1, - 1, - 1, - 1, - 1, - 1, - 1, - 1, - 1, - 1, - 1, - 1, - 1, - 1, - 1, - 1, - 1, - 1, - 1, - 1, - 1, - 1, - 1, - 1, - 1, - 1, - 1, - 1, - 1, - 1, - 1, - 1, - 1, - 1, - 1, - 1, - 1, - 1, - 1, - 0, - 0, - 0, - 0, -} - -var compressFragmentFastImpl_kFirstBlockSize uint = 3 << 15 -var compressFragmentFastImpl_kMergeBlockSize uint = 1 << 16 - -func compressFragmentFastImpl(in []byte, input_size uint, is_last bool, table []int, table_bits uint, cmd_depth []byte, cmd_bits []uint16, cmd_code_numbits *uint, cmd_code []byte, storage_ix *uint, storage []byte) { - var cmd_histo [128]uint32 - var ip_end int - var next_emit int = 0 - var base_ip int = 0 - var input int = 0 - const kInputMarginBytes uint = windowGap - const kMinMatchLen uint = 5 - var metablock_start int = input - var block_size uint = brotli_min_size_t(input_size, compressFragmentFastImpl_kFirstBlockSize) - var total_block_size uint = block_size - var mlen_storage_ix uint = *storage_ix + 3 - var lit_depth [256]byte - var lit_bits [256]uint16 - var literal_ratio uint - var ip int - var last_distance int - var shift uint = 64 - table_bits - - /* "next_emit" is a pointer to the first byte that is not covered by a - previous copy. Bytes between "next_emit" and the start of the next copy or - the end of the input will be emitted as literal bytes. */ - - /* Save the start of the first block for position and distance computations. - */ - - /* Save the bit position of the MLEN field of the meta-block header, so that - we can update it later if we decide to extend this meta-block. */ - storeMetaBlockHeader1(block_size, false, storage_ix, storage) - - /* No block splits, no contexts. */ - writeBits(13, 0, storage_ix, storage) - - literal_ratio = buildAndStoreLiteralPrefixCode(in[input:], block_size, lit_depth[:], lit_bits[:], storage_ix, storage) - { - /* Store the pre-compressed command and distance prefix codes. */ - var i uint - for i = 0; i+7 < *cmd_code_numbits; i += 8 { - writeBits(8, uint64(cmd_code[i>>3]), storage_ix, storage) - } - } - - writeBits(*cmd_code_numbits&7, uint64(cmd_code[*cmd_code_numbits>>3]), storage_ix, storage) - - /* Initialize the command and distance histograms. We will gather - statistics of command and distance codes during the processing - of this block and use it to update the command and distance - prefix codes for the next block. */ -emit_commands: - copy(cmd_histo[:], kCmdHistoSeed[:]) - - /* "ip" is the input pointer. */ - ip = input - - last_distance = -1 - ip_end = int(uint(input) + block_size) - - if block_size >= kInputMarginBytes { - var len_limit uint = brotli_min_size_t(block_size-kMinMatchLen, input_size-kInputMarginBytes) - var ip_limit int = int(uint(input) + len_limit) - /* For the last block, we need to keep a 16 bytes margin so that we can be - sure that all distances are at most window size - 16. - For all other blocks, we only need to keep a margin of 5 bytes so that - we don't go over the block size with a copy. */ - - var next_hash uint32 - ip++ - for next_hash = hash5(in[ip:], shift); ; { - var skip uint32 = 32 - var next_ip int = ip - /* Step 1: Scan forward in the input looking for a 5-byte-long match. - If we get close to exhausting the input then goto emit_remainder. - - Heuristic match skipping: If 32 bytes are scanned with no matches - found, start looking only at every other byte. If 32 more bytes are - scanned, look at every third byte, etc.. When a match is found, - immediately go back to looking at every byte. This is a small loss - (~5% performance, ~0.1% density) for compressible data due to more - bookkeeping, but for non-compressible data (such as JPEG) it's a huge - win since the compressor quickly "realizes" the data is incompressible - and doesn't bother looking for matches everywhere. - - The "skip" variable keeps track of how many bytes there are since the - last match; dividing it by 32 (i.e. right-shifting by five) gives the - number of bytes to move ahead for each iteration. */ - - var candidate int - assert(next_emit < ip) - - trawl: - for { - var hash uint32 = next_hash - var bytes_between_hash_lookups uint32 = skip >> 5 - skip++ - assert(hash == hash5(in[next_ip:], shift)) - ip = next_ip - next_ip = int(uint32(ip) + bytes_between_hash_lookups) - if next_ip > ip_limit { - goto emit_remainder - } - - next_hash = hash5(in[next_ip:], shift) - candidate = ip - last_distance - if isMatch5(in[ip:], in[candidate:]) { - if candidate < ip { - table[hash] = int(ip - base_ip) - break - } - } - - candidate = base_ip + table[hash] - assert(candidate >= base_ip) - assert(candidate < ip) - - table[hash] = int(ip - base_ip) - if isMatch5(in[ip:], in[candidate:]) { - break - } - } - - /* Check copy distance. If candidate is not feasible, continue search. - Checking is done outside of hot loop to reduce overhead. */ - if ip-candidate > maxDistance_compress_fragment { - goto trawl - } - - /* Step 2: Emit the found match together with the literal bytes from - "next_emit" to the bit stream, and then see if we can find a next match - immediately afterwards. Repeat until we find no match for the input - without emitting some literal bytes. */ - { - var base int = ip - /* > 0 */ - var matched uint = 5 + findMatchLengthWithLimit(in[candidate+5:], in[ip+5:], uint(ip_end-ip)-5) - var distance int = int(base - candidate) - /* We have a 5-byte match at ip, and we need to emit bytes in - [next_emit, ip). */ - - var insert uint = uint(base - next_emit) - ip += int(matched) - if insert < 6210 { - emitInsertLen1(insert, cmd_depth, cmd_bits, cmd_histo[:], storage_ix, storage) - } else if shouldUseUncompressedMode(in[metablock_start:], in[next_emit:], insert, literal_ratio) { - emitUncompressedMetaBlock1(in[metablock_start:], in[base:], mlen_storage_ix-3, storage_ix, storage) - input_size -= uint(base - input) - input = base - next_emit = input - goto next_block - } else { - emitLongInsertLen(insert, cmd_depth, cmd_bits, cmd_histo[:], storage_ix, storage) - } - - emitLiterals(in[next_emit:], insert, lit_depth[:], lit_bits[:], storage_ix, storage) - if distance == last_distance { - writeBits(uint(cmd_depth[64]), uint64(cmd_bits[64]), storage_ix, storage) - cmd_histo[64]++ - } else { - emitDistance1(uint(distance), cmd_depth, cmd_bits, cmd_histo[:], storage_ix, storage) - last_distance = distance - } - - emitCopyLenLastDistance1(matched, cmd_depth, cmd_bits, cmd_histo[:], storage_ix, storage) - - next_emit = ip - if ip >= ip_limit { - goto emit_remainder - } - - /* We could immediately start working at ip now, but to improve - compression we first update "table" with the hashes of some positions - within the last copy. */ - { - var input_bytes uint64 = binary.LittleEndian.Uint64(in[ip-3:]) - var prev_hash uint32 = hashBytesAtOffset5(input_bytes, 0, shift) - var cur_hash uint32 = hashBytesAtOffset5(input_bytes, 3, shift) - table[prev_hash] = int(ip - base_ip - 3) - prev_hash = hashBytesAtOffset5(input_bytes, 1, shift) - table[prev_hash] = int(ip - base_ip - 2) - prev_hash = hashBytesAtOffset5(input_bytes, 2, shift) - table[prev_hash] = int(ip - base_ip - 1) - - candidate = base_ip + table[cur_hash] - table[cur_hash] = int(ip - base_ip) - } - } - - for isMatch5(in[ip:], in[candidate:]) { - var base int = ip - /* We have a 5-byte match at ip, and no need to emit any literal bytes - prior to ip. */ - - var matched uint = 5 + findMatchLengthWithLimit(in[candidate+5:], in[ip+5:], uint(ip_end-ip)-5) - if ip-candidate > maxDistance_compress_fragment { - break - } - ip += int(matched) - last_distance = int(base - candidate) /* > 0 */ - emitCopyLen1(matched, cmd_depth, cmd_bits, cmd_histo[:], storage_ix, storage) - emitDistance1(uint(last_distance), cmd_depth, cmd_bits, cmd_histo[:], storage_ix, storage) - - next_emit = ip - if ip >= ip_limit { - goto emit_remainder - } - - /* We could immediately start working at ip now, but to improve - compression we first update "table" with the hashes of some positions - within the last copy. */ - { - var input_bytes uint64 = binary.LittleEndian.Uint64(in[ip-3:]) - var prev_hash uint32 = hashBytesAtOffset5(input_bytes, 0, shift) - var cur_hash uint32 = hashBytesAtOffset5(input_bytes, 3, shift) - table[prev_hash] = int(ip - base_ip - 3) - prev_hash = hashBytesAtOffset5(input_bytes, 1, shift) - table[prev_hash] = int(ip - base_ip - 2) - prev_hash = hashBytesAtOffset5(input_bytes, 2, shift) - table[prev_hash] = int(ip - base_ip - 1) - - candidate = base_ip + table[cur_hash] - table[cur_hash] = int(ip - base_ip) - } - } - - ip++ - next_hash = hash5(in[ip:], shift) - } - } - -emit_remainder: - assert(next_emit <= ip_end) - input += int(block_size) - input_size -= block_size - block_size = brotli_min_size_t(input_size, compressFragmentFastImpl_kMergeBlockSize) - - /* Decide if we want to continue this meta-block instead of emitting the - last insert-only command. */ - if input_size > 0 && total_block_size+block_size <= 1<<20 && shouldMergeBlock(in[input:], block_size, lit_depth[:]) { - assert(total_block_size > 1<<16) - - /* Update the size of the current meta-block and continue emitting commands. - We can do this because the current size and the new size both have 5 - nibbles. */ - total_block_size += block_size - - updateBits(20, uint32(total_block_size-1), mlen_storage_ix, storage) - goto emit_commands - } - - /* Emit the remaining bytes as literals. */ - if next_emit < ip_end { - var insert uint = uint(ip_end - next_emit) - if insert < 6210 { - emitInsertLen1(insert, cmd_depth, cmd_bits, cmd_histo[:], storage_ix, storage) - emitLiterals(in[next_emit:], insert, lit_depth[:], lit_bits[:], storage_ix, storage) - } else if shouldUseUncompressedMode(in[metablock_start:], in[next_emit:], insert, literal_ratio) { - emitUncompressedMetaBlock1(in[metablock_start:], in[ip_end:], mlen_storage_ix-3, storage_ix, storage) - } else { - emitLongInsertLen(insert, cmd_depth, cmd_bits, cmd_histo[:], storage_ix, storage) - emitLiterals(in[next_emit:], insert, lit_depth[:], lit_bits[:], storage_ix, storage) - } - } - - next_emit = ip_end - - /* If we have more data, write a new meta-block header and prefix codes and - then continue emitting commands. */ -next_block: - if input_size > 0 { - metablock_start = input - block_size = brotli_min_size_t(input_size, compressFragmentFastImpl_kFirstBlockSize) - total_block_size = block_size - - /* Save the bit position of the MLEN field of the meta-block header, so that - we can update it later if we decide to extend this meta-block. */ - mlen_storage_ix = *storage_ix + 3 - - storeMetaBlockHeader1(block_size, false, storage_ix, storage) - - /* No block splits, no contexts. */ - writeBits(13, 0, storage_ix, storage) - - literal_ratio = buildAndStoreLiteralPrefixCode(in[input:], block_size, lit_depth[:], lit_bits[:], storage_ix, storage) - buildAndStoreCommandPrefixCode1(cmd_histo[:], cmd_depth, cmd_bits, storage_ix, storage) - goto emit_commands - } - - if !is_last { - /* If this is not the last block, update the command and distance prefix - codes for the next block and store the compressed forms. */ - cmd_code[0] = 0 - - *cmd_code_numbits = 0 - buildAndStoreCommandPrefixCode1(cmd_histo[:], cmd_depth, cmd_bits, cmd_code_numbits, cmd_code) - } -} - -/* Compresses "input" string to the "*storage" buffer as one or more complete - meta-blocks, and updates the "*storage_ix" bit position. - - If "is_last" is 1, emits an additional empty last meta-block. - - "cmd_depth" and "cmd_bits" contain the command and distance prefix codes - (see comment in encode.h) used for the encoding of this input fragment. - If "is_last" is 0, they are updated to reflect the statistics - of this input fragment, to be used for the encoding of the next fragment. - - "*cmd_code_numbits" is the number of bits of the compressed representation - of the command and distance prefix codes, and "cmd_code" is an array of - at least "(*cmd_code_numbits + 7) >> 3" size that contains the compressed - command and distance prefix codes. If "is_last" is 0, these are also - updated to represent the updated "cmd_depth" and "cmd_bits". - - REQUIRES: "input_size" is greater than zero, or "is_last" is 1. - REQUIRES: "input_size" is less or equal to maximal metablock size (1 << 24). - REQUIRES: All elements in "table[0..table_size-1]" are initialized to zero. - REQUIRES: "table_size" is an odd (9, 11, 13, 15) power of two - OUTPUT: maximal copy distance <= |input_size| - OUTPUT: maximal copy distance <= BROTLI_MAX_BACKWARD_LIMIT(18) */ -func compressFragmentFast(input []byte, input_size uint, is_last bool, table []int, table_size uint, cmd_depth []byte, cmd_bits []uint16, cmd_code_numbits *uint, cmd_code []byte, storage_ix *uint, storage []byte) { - var initial_storage_ix uint = *storage_ix - var table_bits uint = uint(log2FloorNonZero(table_size)) - - if input_size == 0 { - assert(is_last) - writeBits(1, 1, storage_ix, storage) /* islast */ - writeBits(1, 1, storage_ix, storage) /* isempty */ - *storage_ix = (*storage_ix + 7) &^ 7 - return - } - - compressFragmentFastImpl(input, input_size, is_last, table, table_bits, cmd_depth, cmd_bits, cmd_code_numbits, cmd_code, storage_ix, storage) - - /* If output is larger than single uncompressed block, rewrite it. */ - if *storage_ix-initial_storage_ix > 31+(input_size<<3) { - emitUncompressedMetaBlock1(input, input[input_size:], initial_storage_ix, storage_ix, storage) - } - - if is_last { - writeBits(1, 1, storage_ix, storage) /* islast */ - writeBits(1, 1, storage_ix, storage) /* isempty */ - *storage_ix = (*storage_ix + 7) &^ 7 - } -} diff --git a/vendor/github.com/andybalholm/brotli/compress_fragment_two_pass.go b/vendor/github.com/andybalholm/brotli/compress_fragment_two_pass.go deleted file mode 100644 index 172dc7f..0000000 --- a/vendor/github.com/andybalholm/brotli/compress_fragment_two_pass.go +++ /dev/null @@ -1,748 +0,0 @@ -package brotli - -import "encoding/binary" - -/* Copyright 2015 Google Inc. All Rights Reserved. - - Distributed under MIT license. - See file LICENSE for detail or copy at https://opensource.org/licenses/MIT -*/ - -/* Function for fast encoding of an input fragment, independently from the input - history. This function uses two-pass processing: in the first pass we save - the found backward matches and literal bytes into a buffer, and in the - second pass we emit them into the bit stream using prefix codes built based - on the actual command and literal byte histograms. */ - -const kCompressFragmentTwoPassBlockSize uint = 1 << 17 - -func hash1(p []byte, shift uint, length uint) uint32 { - var h uint64 = (binary.LittleEndian.Uint64(p) << ((8 - length) * 8)) * uint64(kHashMul32) - return uint32(h >> shift) -} - -func hashBytesAtOffset(v uint64, offset uint, shift uint, length uint) uint32 { - assert(offset <= 8-length) - { - var h uint64 = ((v >> (8 * offset)) << ((8 - length) * 8)) * uint64(kHashMul32) - return uint32(h >> shift) - } -} - -func isMatch1(p1 []byte, p2 []byte, length uint) bool { - if binary.LittleEndian.Uint32(p1) != binary.LittleEndian.Uint32(p2) { - return false - } - if length == 4 { - return true - } - return p1[4] == p2[4] && p1[5] == p2[5] -} - -/* Builds a command and distance prefix code (each 64 symbols) into "depth" and - "bits" based on "histogram" and stores it into the bit stream. */ -func buildAndStoreCommandPrefixCode(histogram []uint32, depth []byte, bits []uint16, storage_ix *uint, storage []byte) { - var tree [129]huffmanTree - var cmd_depth = [numCommandSymbols]byte{0} - /* Tree size for building a tree over 64 symbols is 2 * 64 + 1. */ - - var cmd_bits [64]uint16 - createHuffmanTree(histogram, 64, 15, tree[:], depth) - createHuffmanTree(histogram[64:], 64, 14, tree[:], depth[64:]) - - /* We have to jump through a few hoops here in order to compute - the command bits because the symbols are in a different order than in - the full alphabet. This looks complicated, but having the symbols - in this order in the command bits saves a few branches in the Emit* - functions. */ - copy(cmd_depth[:], depth[24:][:24]) - - copy(cmd_depth[24:][:], depth[:8]) - copy(cmd_depth[32:][:], depth[48:][:8]) - copy(cmd_depth[40:][:], depth[8:][:8]) - copy(cmd_depth[48:][:], depth[56:][:8]) - copy(cmd_depth[56:][:], depth[16:][:8]) - convertBitDepthsToSymbols(cmd_depth[:], 64, cmd_bits[:]) - copy(bits, cmd_bits[24:][:8]) - copy(bits[8:], cmd_bits[40:][:8]) - copy(bits[16:], cmd_bits[56:][:8]) - copy(bits[24:], cmd_bits[:24]) - copy(bits[48:], cmd_bits[32:][:8]) - copy(bits[56:], cmd_bits[48:][:8]) - convertBitDepthsToSymbols(depth[64:], 64, bits[64:]) - { - /* Create the bit length array for the full command alphabet. */ - var i uint - for i := 0; i < int(64); i++ { - cmd_depth[i] = 0 - } /* only 64 first values were used */ - copy(cmd_depth[:], depth[24:][:8]) - copy(cmd_depth[64:][:], depth[32:][:8]) - copy(cmd_depth[128:][:], depth[40:][:8]) - copy(cmd_depth[192:][:], depth[48:][:8]) - copy(cmd_depth[384:][:], depth[56:][:8]) - for i = 0; i < 8; i++ { - cmd_depth[128+8*i] = depth[i] - cmd_depth[256+8*i] = depth[8+i] - cmd_depth[448+8*i] = depth[16+i] - } - - storeHuffmanTree(cmd_depth[:], numCommandSymbols, tree[:], storage_ix, storage) - } - - storeHuffmanTree(depth[64:], 64, tree[:], storage_ix, storage) -} - -func emitInsertLen(insertlen uint32, commands *[]uint32) { - if insertlen < 6 { - (*commands)[0] = insertlen - } else if insertlen < 130 { - var tail uint32 = insertlen - 2 - var nbits uint32 = log2FloorNonZero(uint(tail)) - 1 - var prefix uint32 = tail >> nbits - var inscode uint32 = (nbits << 1) + prefix + 2 - var extra uint32 = tail - (prefix << nbits) - (*commands)[0] = inscode | extra<<8 - } else if insertlen < 2114 { - var tail uint32 = insertlen - 66 - var nbits uint32 = log2FloorNonZero(uint(tail)) - var code uint32 = nbits + 10 - var extra uint32 = tail - (1 << nbits) - (*commands)[0] = code | extra<<8 - } else if insertlen < 6210 { - var extra uint32 = insertlen - 2114 - (*commands)[0] = 21 | extra<<8 - } else if insertlen < 22594 { - var extra uint32 = insertlen - 6210 - (*commands)[0] = 22 | extra<<8 - } else { - var extra uint32 = insertlen - 22594 - (*commands)[0] = 23 | extra<<8 - } - - *commands = (*commands)[1:] -} - -func emitCopyLen(copylen uint, commands *[]uint32) { - if copylen < 10 { - (*commands)[0] = uint32(copylen + 38) - } else if copylen < 134 { - var tail uint = copylen - 6 - var nbits uint = uint(log2FloorNonZero(tail) - 1) - var prefix uint = tail >> nbits - var code uint = (nbits << 1) + prefix + 44 - var extra uint = tail - (prefix << nbits) - (*commands)[0] = uint32(code | extra<<8) - } else if copylen < 2118 { - var tail uint = copylen - 70 - var nbits uint = uint(log2FloorNonZero(tail)) - var code uint = nbits + 52 - var extra uint = tail - (uint(1) << nbits) - (*commands)[0] = uint32(code | extra<<8) - } else { - var extra uint = copylen - 2118 - (*commands)[0] = uint32(63 | extra<<8) - } - - *commands = (*commands)[1:] -} - -func emitCopyLenLastDistance(copylen uint, commands *[]uint32) { - if copylen < 12 { - (*commands)[0] = uint32(copylen + 20) - *commands = (*commands)[1:] - } else if copylen < 72 { - var tail uint = copylen - 8 - var nbits uint = uint(log2FloorNonZero(tail) - 1) - var prefix uint = tail >> nbits - var code uint = (nbits << 1) + prefix + 28 - var extra uint = tail - (prefix << nbits) - (*commands)[0] = uint32(code | extra<<8) - *commands = (*commands)[1:] - } else if copylen < 136 { - var tail uint = copylen - 8 - var code uint = (tail >> 5) + 54 - var extra uint = tail & 31 - (*commands)[0] = uint32(code | extra<<8) - *commands = (*commands)[1:] - (*commands)[0] = 64 - *commands = (*commands)[1:] - } else if copylen < 2120 { - var tail uint = copylen - 72 - var nbits uint = uint(log2FloorNonZero(tail)) - var code uint = nbits + 52 - var extra uint = tail - (uint(1) << nbits) - (*commands)[0] = uint32(code | extra<<8) - *commands = (*commands)[1:] - (*commands)[0] = 64 - *commands = (*commands)[1:] - } else { - var extra uint = copylen - 2120 - (*commands)[0] = uint32(63 | extra<<8) - *commands = (*commands)[1:] - (*commands)[0] = 64 - *commands = (*commands)[1:] - } -} - -func emitDistance(distance uint32, commands *[]uint32) { - var d uint32 = distance + 3 - var nbits uint32 = log2FloorNonZero(uint(d)) - 1 - var prefix uint32 = (d >> nbits) & 1 - var offset uint32 = (2 + prefix) << nbits - var distcode uint32 = 2*(nbits-1) + prefix + 80 - var extra uint32 = d - offset - (*commands)[0] = distcode | extra<<8 - *commands = (*commands)[1:] -} - -/* REQUIRES: len <= 1 << 24. */ -func storeMetaBlockHeader(len uint, is_uncompressed bool, storage_ix *uint, storage []byte) { - var nibbles uint = 6 - - /* ISLAST */ - writeBits(1, 0, storage_ix, storage) - - if len <= 1<<16 { - nibbles = 4 - } else if len <= 1<<20 { - nibbles = 5 - } - - writeBits(2, uint64(nibbles)-4, storage_ix, storage) - writeBits(nibbles*4, uint64(len)-1, storage_ix, storage) - - /* ISUNCOMPRESSED */ - writeSingleBit(is_uncompressed, storage_ix, storage) -} - -func createCommands(input []byte, block_size uint, input_size uint, base_ip_ptr []byte, table []int, table_bits uint, min_match uint, literals *[]byte, commands *[]uint32) { - var ip int = 0 - var shift uint = 64 - table_bits - var ip_end int = int(block_size) - var base_ip int = -cap(base_ip_ptr) + cap(input) - var next_emit int = 0 - var last_distance int = -1 - /* "ip" is the input pointer. */ - - const kInputMarginBytes uint = windowGap - - /* "next_emit" is a pointer to the first byte that is not covered by a - previous copy. Bytes between "next_emit" and the start of the next copy or - the end of the input will be emitted as literal bytes. */ - if block_size >= kInputMarginBytes { - var len_limit uint = brotli_min_size_t(block_size-min_match, input_size-kInputMarginBytes) - var ip_limit int = int(len_limit) - /* For the last block, we need to keep a 16 bytes margin so that we can be - sure that all distances are at most window size - 16. - For all other blocks, we only need to keep a margin of 5 bytes so that - we don't go over the block size with a copy. */ - - var next_hash uint32 - ip++ - for next_hash = hash1(input[ip:], shift, min_match); ; { - var skip uint32 = 32 - var next_ip int = ip - /* Step 1: Scan forward in the input looking for a 6-byte-long match. - If we get close to exhausting the input then goto emit_remainder. - - Heuristic match skipping: If 32 bytes are scanned with no matches - found, start looking only at every other byte. If 32 more bytes are - scanned, look at every third byte, etc.. When a match is found, - immediately go back to looking at every byte. This is a small loss - (~5% performance, ~0.1% density) for compressible data due to more - bookkeeping, but for non-compressible data (such as JPEG) it's a huge - win since the compressor quickly "realizes" the data is incompressible - and doesn't bother looking for matches everywhere. - - The "skip" variable keeps track of how many bytes there are since the - last match; dividing it by 32 (ie. right-shifting by five) gives the - number of bytes to move ahead for each iteration. */ - - var candidate int - - assert(next_emit < ip) - - trawl: - for { - var hash uint32 = next_hash - var bytes_between_hash_lookups uint32 = skip >> 5 - skip++ - ip = next_ip - assert(hash == hash1(input[ip:], shift, min_match)) - next_ip = int(uint32(ip) + bytes_between_hash_lookups) - if next_ip > ip_limit { - goto emit_remainder - } - - next_hash = hash1(input[next_ip:], shift, min_match) - candidate = ip - last_distance - if isMatch1(input[ip:], base_ip_ptr[candidate-base_ip:], min_match) { - if candidate < ip { - table[hash] = int(ip - base_ip) - break - } - } - - candidate = base_ip + table[hash] - assert(candidate >= base_ip) - assert(candidate < ip) - - table[hash] = int(ip - base_ip) - if isMatch1(input[ip:], base_ip_ptr[candidate-base_ip:], min_match) { - break - } - } - - /* Check copy distance. If candidate is not feasible, continue search. - Checking is done outside of hot loop to reduce overhead. */ - if ip-candidate > maxDistance_compress_fragment { - goto trawl - } - - /* Step 2: Emit the found match together with the literal bytes from - "next_emit", and then see if we can find a next match immediately - afterwards. Repeat until we find no match for the input - without emitting some literal bytes. */ - { - var base int = ip - /* > 0 */ - var matched uint = min_match + findMatchLengthWithLimit(base_ip_ptr[uint(candidate-base_ip)+min_match:], input[uint(ip)+min_match:], uint(ip_end-ip)-min_match) - var distance int = int(base - candidate) - /* We have a 6-byte match at ip, and we need to emit bytes in - [next_emit, ip). */ - - var insert int = int(base - next_emit) - ip += int(matched) - emitInsertLen(uint32(insert), commands) - copy(*literals, input[next_emit:][:uint(insert)]) - *literals = (*literals)[insert:] - if distance == last_distance { - (*commands)[0] = 64 - *commands = (*commands)[1:] - } else { - emitDistance(uint32(distance), commands) - last_distance = distance - } - - emitCopyLenLastDistance(matched, commands) - - next_emit = ip - if ip >= ip_limit { - goto emit_remainder - } - { - var input_bytes uint64 - var cur_hash uint32 - /* We could immediately start working at ip now, but to improve - compression we first update "table" with the hashes of some - positions within the last copy. */ - - var prev_hash uint32 - if min_match == 4 { - input_bytes = binary.LittleEndian.Uint64(input[ip-3:]) - cur_hash = hashBytesAtOffset(input_bytes, 3, shift, min_match) - prev_hash = hashBytesAtOffset(input_bytes, 0, shift, min_match) - table[prev_hash] = int(ip - base_ip - 3) - prev_hash = hashBytesAtOffset(input_bytes, 1, shift, min_match) - table[prev_hash] = int(ip - base_ip - 2) - prev_hash = hashBytesAtOffset(input_bytes, 0, shift, min_match) - table[prev_hash] = int(ip - base_ip - 1) - } else { - input_bytes = binary.LittleEndian.Uint64(input[ip-5:]) - prev_hash = hashBytesAtOffset(input_bytes, 0, shift, min_match) - table[prev_hash] = int(ip - base_ip - 5) - prev_hash = hashBytesAtOffset(input_bytes, 1, shift, min_match) - table[prev_hash] = int(ip - base_ip - 4) - prev_hash = hashBytesAtOffset(input_bytes, 2, shift, min_match) - table[prev_hash] = int(ip - base_ip - 3) - input_bytes = binary.LittleEndian.Uint64(input[ip-2:]) - cur_hash = hashBytesAtOffset(input_bytes, 2, shift, min_match) - prev_hash = hashBytesAtOffset(input_bytes, 0, shift, min_match) - table[prev_hash] = int(ip - base_ip - 2) - prev_hash = hashBytesAtOffset(input_bytes, 1, shift, min_match) - table[prev_hash] = int(ip - base_ip - 1) - } - - candidate = base_ip + table[cur_hash] - table[cur_hash] = int(ip - base_ip) - } - } - - for ip-candidate <= maxDistance_compress_fragment && isMatch1(input[ip:], base_ip_ptr[candidate-base_ip:], min_match) { - var base int = ip - /* We have a 6-byte match at ip, and no need to emit any - literal bytes prior to ip. */ - - var matched uint = min_match + findMatchLengthWithLimit(base_ip_ptr[uint(candidate-base_ip)+min_match:], input[uint(ip)+min_match:], uint(ip_end-ip)-min_match) - ip += int(matched) - last_distance = int(base - candidate) /* > 0 */ - emitCopyLen(matched, commands) - emitDistance(uint32(last_distance), commands) - - next_emit = ip - if ip >= ip_limit { - goto emit_remainder - } - { - var input_bytes uint64 - var cur_hash uint32 - /* We could immediately start working at ip now, but to improve - compression we first update "table" with the hashes of some - positions within the last copy. */ - - var prev_hash uint32 - if min_match == 4 { - input_bytes = binary.LittleEndian.Uint64(input[ip-3:]) - cur_hash = hashBytesAtOffset(input_bytes, 3, shift, min_match) - prev_hash = hashBytesAtOffset(input_bytes, 0, shift, min_match) - table[prev_hash] = int(ip - base_ip - 3) - prev_hash = hashBytesAtOffset(input_bytes, 1, shift, min_match) - table[prev_hash] = int(ip - base_ip - 2) - prev_hash = hashBytesAtOffset(input_bytes, 2, shift, min_match) - table[prev_hash] = int(ip - base_ip - 1) - } else { - input_bytes = binary.LittleEndian.Uint64(input[ip-5:]) - prev_hash = hashBytesAtOffset(input_bytes, 0, shift, min_match) - table[prev_hash] = int(ip - base_ip - 5) - prev_hash = hashBytesAtOffset(input_bytes, 1, shift, min_match) - table[prev_hash] = int(ip - base_ip - 4) - prev_hash = hashBytesAtOffset(input_bytes, 2, shift, min_match) - table[prev_hash] = int(ip - base_ip - 3) - input_bytes = binary.LittleEndian.Uint64(input[ip-2:]) - cur_hash = hashBytesAtOffset(input_bytes, 2, shift, min_match) - prev_hash = hashBytesAtOffset(input_bytes, 0, shift, min_match) - table[prev_hash] = int(ip - base_ip - 2) - prev_hash = hashBytesAtOffset(input_bytes, 1, shift, min_match) - table[prev_hash] = int(ip - base_ip - 1) - } - - candidate = base_ip + table[cur_hash] - table[cur_hash] = int(ip - base_ip) - } - } - - ip++ - next_hash = hash1(input[ip:], shift, min_match) - } - } - -emit_remainder: - assert(next_emit <= ip_end) - - /* Emit the remaining bytes as literals. */ - if next_emit < ip_end { - var insert uint32 = uint32(ip_end - next_emit) - emitInsertLen(insert, commands) - copy(*literals, input[next_emit:][:insert]) - *literals = (*literals)[insert:] - } -} - -var storeCommands_kNumExtraBits = [128]uint32{ - 0, - 0, - 0, - 0, - 0, - 0, - 1, - 1, - 2, - 2, - 3, - 3, - 4, - 4, - 5, - 5, - 6, - 7, - 8, - 9, - 10, - 12, - 14, - 24, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 1, - 1, - 2, - 2, - 3, - 3, - 4, - 4, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 1, - 1, - 2, - 2, - 3, - 3, - 4, - 4, - 5, - 5, - 6, - 7, - 8, - 9, - 10, - 24, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 1, - 1, - 2, - 2, - 3, - 3, - 4, - 4, - 5, - 5, - 6, - 6, - 7, - 7, - 8, - 8, - 9, - 9, - 10, - 10, - 11, - 11, - 12, - 12, - 13, - 13, - 14, - 14, - 15, - 15, - 16, - 16, - 17, - 17, - 18, - 18, - 19, - 19, - 20, - 20, - 21, - 21, - 22, - 22, - 23, - 23, - 24, - 24, -} -var storeCommands_kInsertOffset = [24]uint32{ - 0, - 1, - 2, - 3, - 4, - 5, - 6, - 8, - 10, - 14, - 18, - 26, - 34, - 50, - 66, - 98, - 130, - 194, - 322, - 578, - 1090, - 2114, - 6210, - 22594, -} - -func storeCommands(literals []byte, num_literals uint, commands []uint32, num_commands uint, storage_ix *uint, storage []byte) { - var lit_depths [256]byte - var lit_bits [256]uint16 - var lit_histo = [256]uint32{0} - var cmd_depths = [128]byte{0} - var cmd_bits = [128]uint16{0} - var cmd_histo = [128]uint32{0} - var i uint - for i = 0; i < num_literals; i++ { - lit_histo[literals[i]]++ - } - - buildAndStoreHuffmanTreeFast(lit_histo[:], num_literals, /* max_bits = */ - 8, lit_depths[:], lit_bits[:], storage_ix, storage) - - for i = 0; i < num_commands; i++ { - var code uint32 = commands[i] & 0xFF - assert(code < 128) - cmd_histo[code]++ - } - - cmd_histo[1] += 1 - cmd_histo[2] += 1 - cmd_histo[64] += 1 - cmd_histo[84] += 1 - buildAndStoreCommandPrefixCode(cmd_histo[:], cmd_depths[:], cmd_bits[:], storage_ix, storage) - - for i = 0; i < num_commands; i++ { - var cmd uint32 = commands[i] - var code uint32 = cmd & 0xFF - var extra uint32 = cmd >> 8 - assert(code < 128) - writeBits(uint(cmd_depths[code]), uint64(cmd_bits[code]), storage_ix, storage) - writeBits(uint(storeCommands_kNumExtraBits[code]), uint64(extra), storage_ix, storage) - if code < 24 { - var insert uint32 = storeCommands_kInsertOffset[code] + extra - var j uint32 - for j = 0; j < insert; j++ { - var lit byte = literals[0] - writeBits(uint(lit_depths[lit]), uint64(lit_bits[lit]), storage_ix, storage) - literals = literals[1:] - } - } - } -} - -/* Acceptable loss for uncompressible speedup is 2% */ -const minRatio = 0.98 - -const sampleRate = 43 - -func shouldCompress(input []byte, input_size uint, num_literals uint) bool { - var corpus_size float64 = float64(input_size) - if float64(num_literals) < minRatio*corpus_size { - return true - } else { - var literal_histo = [256]uint32{0} - var max_total_bit_cost float64 = corpus_size * 8 * minRatio / sampleRate - var i uint - for i = 0; i < input_size; i += sampleRate { - literal_histo[input[i]]++ - } - - return bitsEntropy(literal_histo[:], 256) < max_total_bit_cost - } -} - -func rewindBitPosition(new_storage_ix uint, storage_ix *uint, storage []byte) { - var bitpos uint = new_storage_ix & 7 - var mask uint = (1 << bitpos) - 1 - storage[new_storage_ix>>3] &= byte(mask) - *storage_ix = new_storage_ix -} - -func emitUncompressedMetaBlock(input []byte, input_size uint, storage_ix *uint, storage []byte) { - storeMetaBlockHeader(input_size, true, storage_ix, storage) - *storage_ix = (*storage_ix + 7) &^ 7 - copy(storage[*storage_ix>>3:], input[:input_size]) - *storage_ix += input_size << 3 - storage[*storage_ix>>3] = 0 -} - -func compressFragmentTwoPassImpl(input []byte, input_size uint, is_last bool, command_buf []uint32, literal_buf []byte, table []int, table_bits uint, min_match uint, storage_ix *uint, storage []byte) { - /* Save the start of the first block for position and distance computations. - */ - var base_ip []byte = input - - for input_size > 0 { - var block_size uint = brotli_min_size_t(input_size, kCompressFragmentTwoPassBlockSize) - var commands []uint32 = command_buf - var literals []byte = literal_buf - var num_literals uint - createCommands(input, block_size, input_size, base_ip, table, table_bits, min_match, &literals, &commands) - num_literals = uint(-cap(literals) + cap(literal_buf)) - if shouldCompress(input, block_size, num_literals) { - var num_commands uint = uint(-cap(commands) + cap(command_buf)) - storeMetaBlockHeader(block_size, false, storage_ix, storage) - - /* No block splits, no contexts. */ - writeBits(13, 0, storage_ix, storage) - - storeCommands(literal_buf, num_literals, command_buf, num_commands, storage_ix, storage) - } else { - /* Since we did not find many backward references and the entropy of - the data is close to 8 bits, we can simply emit an uncompressed block. - This makes compression speed of uncompressible data about 3x faster. */ - emitUncompressedMetaBlock(input, block_size, storage_ix, storage) - } - - input = input[block_size:] - input_size -= block_size - } -} - -/* Compresses "input" string to the "*storage" buffer as one or more complete - meta-blocks, and updates the "*storage_ix" bit position. - - If "is_last" is 1, emits an additional empty last meta-block. - - REQUIRES: "input_size" is greater than zero, or "is_last" is 1. - REQUIRES: "input_size" is less or equal to maximal metablock size (1 << 24). - REQUIRES: "command_buf" and "literal_buf" point to at least - kCompressFragmentTwoPassBlockSize long arrays. - REQUIRES: All elements in "table[0..table_size-1]" are initialized to zero. - REQUIRES: "table_size" is a power of two - OUTPUT: maximal copy distance <= |input_size| - OUTPUT: maximal copy distance <= BROTLI_MAX_BACKWARD_LIMIT(18) */ -func compressFragmentTwoPass(input []byte, input_size uint, is_last bool, command_buf []uint32, literal_buf []byte, table []int, table_size uint, storage_ix *uint, storage []byte) { - var initial_storage_ix uint = *storage_ix - var table_bits uint = uint(log2FloorNonZero(table_size)) - var min_match uint - if table_bits <= 15 { - min_match = 4 - } else { - min_match = 6 - } - compressFragmentTwoPassImpl(input, input_size, is_last, command_buf, literal_buf, table, table_bits, min_match, storage_ix, storage) - - /* If output is larger than single uncompressed block, rewrite it. */ - if *storage_ix-initial_storage_ix > 31+(input_size<<3) { - rewindBitPosition(initial_storage_ix, storage_ix, storage) - emitUncompressedMetaBlock(input, input_size, storage_ix, storage) - } - - if is_last { - writeBits(1, 1, storage_ix, storage) /* islast */ - writeBits(1, 1, storage_ix, storage) /* isempty */ - *storage_ix = (*storage_ix + 7) &^ 7 - } -} diff --git a/vendor/github.com/andybalholm/brotli/constants.go b/vendor/github.com/andybalholm/brotli/constants.go deleted file mode 100644 index a880dff..0000000 --- a/vendor/github.com/andybalholm/brotli/constants.go +++ /dev/null @@ -1,77 +0,0 @@ -package brotli - -/* Copyright 2016 Google Inc. All Rights Reserved. - - Distributed under MIT license. - See file LICENSE for detail or copy at https://opensource.org/licenses/MIT -*/ - -/* Specification: 7.3. Encoding of the context map */ -const contextMapMaxRle = 16 - -/* Specification: 2. Compressed representation overview */ -const maxNumberOfBlockTypes = 256 - -/* Specification: 3.3. Alphabet sizes: insert-and-copy length */ -const numLiteralSymbols = 256 - -const numCommandSymbols = 704 - -const numBlockLenSymbols = 26 - -const maxContextMapSymbols = (maxNumberOfBlockTypes + contextMapMaxRle) - -const maxBlockTypeSymbols = (maxNumberOfBlockTypes + 2) - -/* Specification: 3.5. Complex prefix codes */ -const repeatPreviousCodeLength = 16 - -const repeatZeroCodeLength = 17 - -const codeLengthCodes = (repeatZeroCodeLength + 1) - -/* "code length of 8 is repeated" */ -const initialRepeatedCodeLength = 8 - -/* "Large Window Brotli" */ -const largeMaxDistanceBits = 62 - -const largeMinWbits = 10 - -const largeMaxWbits = 30 - -/* Specification: 4. Encoding of distances */ -const numDistanceShortCodes = 16 - -const maxNpostfix = 3 - -const maxNdirect = 120 - -const maxDistanceBits = 24 - -func distanceAlphabetSize(NPOSTFIX uint, NDIRECT uint, MAXNBITS uint) uint { - return numDistanceShortCodes + NDIRECT + uint(MAXNBITS<<(NPOSTFIX+1)) -} - -/* numDistanceSymbols == 1128 */ -const numDistanceSymbols = 1128 - -const maxDistance = 0x3FFFFFC - -const maxAllowedDistance = 0x7FFFFFFC - -/* 7.1. Context modes and context ID lookup for literals */ -/* "context IDs for literals are in the range of 0..63" */ -const literalContextBits = 6 - -/* 7.2. Context ID for distances */ -const distanceContextBits = 2 - -/* 9.1. Format of the Stream Header */ -/* Number of slack bytes for window size. Don't confuse - with BROTLI_NUM_DISTANCE_SHORT_CODES. */ -const windowGap = 16 - -func maxBackwardLimit(W uint) uint { - return (uint(1) << W) - windowGap -} diff --git a/vendor/github.com/andybalholm/brotli/context.go b/vendor/github.com/andybalholm/brotli/context.go deleted file mode 100644 index 884ff8a..0000000 --- a/vendor/github.com/andybalholm/brotli/context.go +++ /dev/null @@ -1,2176 +0,0 @@ -package brotli - -/* Lookup table to map the previous two bytes to a context id. - -There are four different context modeling modes defined here: - contextLSB6: context id is the least significant 6 bits of the last byte, - contextMSB6: context id is the most significant 6 bits of the last byte, - contextUTF8: second-order context model tuned for UTF8-encoded text, - contextSigned: second-order context model tuned for signed integers. - -If |p1| and |p2| are the previous two bytes, and |mode| is current context -mode, we calculate the context as: - - context = ContextLut(mode)[p1] | ContextLut(mode)[p2 + 256]. - -For contextUTF8 mode, if the previous two bytes are ASCII characters -(i.e. < 128), this will be equivalent to - - context = 4 * context1(p1) + context2(p2), - -where context1 is based on the previous byte in the following way: - - 0 : non-ASCII control - 1 : \t, \n, \r - 2 : space - 3 : other punctuation - 4 : " ' - 5 : % - 6 : ( < [ { - 7 : ) > ] } - 8 : , ; : - 9 : . - 10 : = - 11 : number - 12 : upper-case vowel - 13 : upper-case consonant - 14 : lower-case vowel - 15 : lower-case consonant - -and context2 is based on the second last byte: - - 0 : control, space - 1 : punctuation - 2 : upper-case letter, number - 3 : lower-case letter - -If the last byte is ASCII, and the second last byte is not (in a valid UTF8 -stream it will be a continuation byte, value between 128 and 191), the -context is the same as if the second last byte was an ASCII control or space. - -If the last byte is a UTF8 lead byte (value >= 192), then the next byte will -be a continuation byte and the context id is 2 or 3 depending on the LSB of -the last byte and to a lesser extent on the second last byte if it is ASCII. - -If the last byte is a UTF8 continuation byte, the second last byte can be: - - continuation byte: the next byte is probably ASCII or lead byte (assuming - 4-byte UTF8 characters are rare) and the context id is 0 or 1. - - lead byte (192 - 207): next byte is ASCII or lead byte, context is 0 or 1 - - lead byte (208 - 255): next byte is continuation byte, context is 2 or 3 - -The possible value combinations of the previous two bytes, the range of -context ids and the type of the next byte is summarized in the table below: - -|--------\-----------------------------------------------------------------| -| \ Last byte | -| Second \---------------------------------------------------------------| -| last byte \ ASCII | cont. byte | lead byte | -| \ (0-127) | (128-191) | (192-) | -|=============|===================|=====================|==================| -| ASCII | next: ASCII/lead | not valid | next: cont. | -| (0-127) | context: 4 - 63 | | context: 2 - 3 | -|-------------|-------------------|---------------------|------------------| -| cont. byte | next: ASCII/lead | next: ASCII/lead | next: cont. | -| (128-191) | context: 4 - 63 | context: 0 - 1 | context: 2 - 3 | -|-------------|-------------------|---------------------|------------------| -| lead byte | not valid | next: ASCII/lead | not valid | -| (192-207) | | context: 0 - 1 | | -|-------------|-------------------|---------------------|------------------| -| lead byte | not valid | next: cont. | not valid | -| (208-) | | context: 2 - 3 | | -|-------------|-------------------|---------------------|------------------| -*/ - -const ( - contextLSB6 = 0 - contextMSB6 = 1 - contextUTF8 = 2 - contextSigned = 3 -) - -/* Common context lookup table for all context modes. */ -var kContextLookup = [2048]byte{ - /* CONTEXT_LSB6, last byte. */ - 0, - 1, - 2, - 3, - 4, - 5, - 6, - 7, - 8, - 9, - 10, - 11, - 12, - 13, - 14, - 15, - 16, - 17, - 18, - 19, - 20, - 21, - 22, - 23, - 24, - 25, - 26, - 27, - 28, - 29, - 30, - 31, - 32, - 33, - 34, - 35, - 36, - 37, - 38, - 39, - 40, - 41, - 42, - 43, - 44, - 45, - 46, - 47, - 48, - 49, - 50, - 51, - 52, - 53, - 54, - 55, - 56, - 57, - 58, - 59, - 60, - 61, - 62, - 63, - 0, - 1, - 2, - 3, - 4, - 5, - 6, - 7, - 8, - 9, - 10, - 11, - 12, - 13, - 14, - 15, - 16, - 17, - 18, - 19, - 20, - 21, - 22, - 23, - 24, - 25, - 26, - 27, - 28, - 29, - 30, - 31, - 32, - 33, - 34, - 35, - 36, - 37, - 38, - 39, - 40, - 41, - 42, - 43, - 44, - 45, - 46, - 47, - 48, - 49, - 50, - 51, - 52, - 53, - 54, - 55, - 56, - 57, - 58, - 59, - 60, - 61, - 62, - 63, - 0, - 1, - 2, - 3, - 4, - 5, - 6, - 7, - 8, - 9, - 10, - 11, - 12, - 13, - 14, - 15, - 16, - 17, - 18, - 19, - 20, - 21, - 22, - 23, - 24, - 25, - 26, - 27, - 28, - 29, - 30, - 31, - 32, - 33, - 34, - 35, - 36, - 37, - 38, - 39, - 40, - 41, - 42, - 43, - 44, - 45, - 46, - 47, - 48, - 49, - 50, - 51, - 52, - 53, - 54, - 55, - 56, - 57, - 58, - 59, - 60, - 61, - 62, - 63, - 0, - 1, - 2, - 3, - 4, - 5, - 6, - 7, - 8, - 9, - 10, - 11, - 12, - 13, - 14, - 15, - 16, - 17, - 18, - 19, - 20, - 21, - 22, - 23, - 24, - 25, - 26, - 27, - 28, - 29, - 30, - 31, - 32, - 33, - 34, - 35, - 36, - 37, - 38, - 39, - 40, - 41, - 42, - 43, - 44, - 45, - 46, - 47, - 48, - 49, - 50, - 51, - 52, - 53, - 54, - 55, - 56, - 57, - 58, - 59, - 60, - 61, - 62, - 63, - - /* CONTEXT_LSB6, second last byte, */ - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - - /* CONTEXT_MSB6, last byte. */ - 0, - 0, - 0, - 0, - 1, - 1, - 1, - 1, - 2, - 2, - 2, - 2, - 3, - 3, - 3, - 3, - 4, - 4, - 4, - 4, - 5, - 5, - 5, - 5, - 6, - 6, - 6, - 6, - 7, - 7, - 7, - 7, - 8, - 8, - 8, - 8, - 9, - 9, - 9, - 9, - 10, - 10, - 10, - 10, - 11, - 11, - 11, - 11, - 12, - 12, - 12, - 12, - 13, - 13, - 13, - 13, - 14, - 14, - 14, - 14, - 15, - 15, - 15, - 15, - 16, - 16, - 16, - 16, - 17, - 17, - 17, - 17, - 18, - 18, - 18, - 18, - 19, - 19, - 19, - 19, - 20, - 20, - 20, - 20, - 21, - 21, - 21, - 21, - 22, - 22, - 22, - 22, - 23, - 23, - 23, - 23, - 24, - 24, - 24, - 24, - 25, - 25, - 25, - 25, - 26, - 26, - 26, - 26, - 27, - 27, - 27, - 27, - 28, - 28, - 28, - 28, - 29, - 29, - 29, - 29, - 30, - 30, - 30, - 30, - 31, - 31, - 31, - 31, - 32, - 32, - 32, - 32, - 33, - 33, - 33, - 33, - 34, - 34, - 34, - 34, - 35, - 35, - 35, - 35, - 36, - 36, - 36, - 36, - 37, - 37, - 37, - 37, - 38, - 38, - 38, - 38, - 39, - 39, - 39, - 39, - 40, - 40, - 40, - 40, - 41, - 41, - 41, - 41, - 42, - 42, - 42, - 42, - 43, - 43, - 43, - 43, - 44, - 44, - 44, - 44, - 45, - 45, - 45, - 45, - 46, - 46, - 46, - 46, - 47, - 47, - 47, - 47, - 48, - 48, - 48, - 48, - 49, - 49, - 49, - 49, - 50, - 50, - 50, - 50, - 51, - 51, - 51, - 51, - 52, - 52, - 52, - 52, - 53, - 53, - 53, - 53, - 54, - 54, - 54, - 54, - 55, - 55, - 55, - 55, - 56, - 56, - 56, - 56, - 57, - 57, - 57, - 57, - 58, - 58, - 58, - 58, - 59, - 59, - 59, - 59, - 60, - 60, - 60, - 60, - 61, - 61, - 61, - 61, - 62, - 62, - 62, - 62, - 63, - 63, - 63, - 63, - - /* CONTEXT_MSB6, second last byte, */ - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - - /* CONTEXT_UTF8, last byte. */ - /* ASCII range. */ - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 4, - 4, - 0, - 0, - 4, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 8, - 12, - 16, - 12, - 12, - 20, - 12, - 16, - 24, - 28, - 12, - 12, - 32, - 12, - 36, - 12, - 44, - 44, - 44, - 44, - 44, - 44, - 44, - 44, - 44, - 44, - 32, - 32, - 24, - 40, - 28, - 12, - 12, - 48, - 52, - 52, - 52, - 48, - 52, - 52, - 52, - 48, - 52, - 52, - 52, - 52, - 52, - 48, - 52, - 52, - 52, - 52, - 52, - 48, - 52, - 52, - 52, - 52, - 52, - 24, - 12, - 28, - 12, - 12, - 12, - 56, - 60, - 60, - 60, - 56, - 60, - 60, - 60, - 56, - 60, - 60, - 60, - 60, - 60, - 56, - 60, - 60, - 60, - 60, - 60, - 56, - 60, - 60, - 60, - 60, - 60, - 24, - 12, - 28, - 12, - 0, - - /* UTF8 continuation byte range. */ - 0, - 1, - 0, - 1, - 0, - 1, - 0, - 1, - 0, - 1, - 0, - 1, - 0, - 1, - 0, - 1, - 0, - 1, - 0, - 1, - 0, - 1, - 0, - 1, - 0, - 1, - 0, - 1, - 0, - 1, - 0, - 1, - 0, - 1, - 0, - 1, - 0, - 1, - 0, - 1, - 0, - 1, - 0, - 1, - 0, - 1, - 0, - 1, - 0, - 1, - 0, - 1, - 0, - 1, - 0, - 1, - 0, - 1, - 0, - 1, - 0, - 1, - 0, - 1, - - /* UTF8 lead byte range. */ - 2, - 3, - 2, - 3, - 2, - 3, - 2, - 3, - 2, - 3, - 2, - 3, - 2, - 3, - 2, - 3, - 2, - 3, - 2, - 3, - 2, - 3, - 2, - 3, - 2, - 3, - 2, - 3, - 2, - 3, - 2, - 3, - 2, - 3, - 2, - 3, - 2, - 3, - 2, - 3, - 2, - 3, - 2, - 3, - 2, - 3, - 2, - 3, - 2, - 3, - 2, - 3, - 2, - 3, - 2, - 3, - 2, - 3, - 2, - 3, - 2, - 3, - 2, - 3, - - /* CONTEXT_UTF8 second last byte. */ - /* ASCII range. */ - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 1, - 1, - 1, - 1, - 1, - 1, - 1, - 1, - 1, - 1, - 1, - 1, - 1, - 1, - 1, - 2, - 2, - 2, - 2, - 2, - 2, - 2, - 2, - 2, - 2, - 1, - 1, - 1, - 1, - 1, - 1, - 1, - 2, - 2, - 2, - 2, - 2, - 2, - 2, - 2, - 2, - 2, - 2, - 2, - 2, - 2, - 2, - 2, - 2, - 2, - 2, - 2, - 2, - 2, - 2, - 2, - 2, - 2, - 1, - 1, - 1, - 1, - 1, - 1, - 3, - 3, - 3, - 3, - 3, - 3, - 3, - 3, - 3, - 3, - 3, - 3, - 3, - 3, - 3, - 3, - 3, - 3, - 3, - 3, - 3, - 3, - 3, - 3, - 3, - 3, - 1, - 1, - 1, - 1, - 0, - - /* UTF8 continuation byte range. */ - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - - /* UTF8 lead byte range. */ - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 2, - 2, - 2, - 2, - 2, - 2, - 2, - 2, - 2, - 2, - 2, - 2, - 2, - 2, - 2, - 2, - 2, - 2, - 2, - 2, - 2, - 2, - 2, - 2, - 2, - 2, - 2, - 2, - 2, - 2, - 2, - 2, - - /* CONTEXT_SIGNED, last byte, same as the above values shifted by 3 bits. */ - 0, - 8, - 8, - 8, - 8, - 8, - 8, - 8, - 8, - 8, - 8, - 8, - 8, - 8, - 8, - 8, - 16, - 16, - 16, - 16, - 16, - 16, - 16, - 16, - 16, - 16, - 16, - 16, - 16, - 16, - 16, - 16, - 16, - 16, - 16, - 16, - 16, - 16, - 16, - 16, - 16, - 16, - 16, - 16, - 16, - 16, - 16, - 16, - 16, - 16, - 16, - 16, - 16, - 16, - 16, - 16, - 16, - 16, - 16, - 16, - 16, - 16, - 16, - 16, - 24, - 24, - 24, - 24, - 24, - 24, - 24, - 24, - 24, - 24, - 24, - 24, - 24, - 24, - 24, - 24, - 24, - 24, - 24, - 24, - 24, - 24, - 24, - 24, - 24, - 24, - 24, - 24, - 24, - 24, - 24, - 24, - 24, - 24, - 24, - 24, - 24, - 24, - 24, - 24, - 24, - 24, - 24, - 24, - 24, - 24, - 24, - 24, - 24, - 24, - 24, - 24, - 24, - 24, - 24, - 24, - 24, - 24, - 24, - 24, - 24, - 24, - 24, - 24, - 32, - 32, - 32, - 32, - 32, - 32, - 32, - 32, - 32, - 32, - 32, - 32, - 32, - 32, - 32, - 32, - 32, - 32, - 32, - 32, - 32, - 32, - 32, - 32, - 32, - 32, - 32, - 32, - 32, - 32, - 32, - 32, - 32, - 32, - 32, - 32, - 32, - 32, - 32, - 32, - 32, - 32, - 32, - 32, - 32, - 32, - 32, - 32, - 32, - 32, - 32, - 32, - 32, - 32, - 32, - 32, - 32, - 32, - 32, - 32, - 32, - 32, - 32, - 32, - 40, - 40, - 40, - 40, - 40, - 40, - 40, - 40, - 40, - 40, - 40, - 40, - 40, - 40, - 40, - 40, - 40, - 40, - 40, - 40, - 40, - 40, - 40, - 40, - 40, - 40, - 40, - 40, - 40, - 40, - 40, - 40, - 40, - 40, - 40, - 40, - 40, - 40, - 40, - 40, - 40, - 40, - 40, - 40, - 40, - 40, - 40, - 40, - 48, - 48, - 48, - 48, - 48, - 48, - 48, - 48, - 48, - 48, - 48, - 48, - 48, - 48, - 48, - 56, - - /* CONTEXT_SIGNED, second last byte. */ - 0, - 1, - 1, - 1, - 1, - 1, - 1, - 1, - 1, - 1, - 1, - 1, - 1, - 1, - 1, - 1, - 2, - 2, - 2, - 2, - 2, - 2, - 2, - 2, - 2, - 2, - 2, - 2, - 2, - 2, - 2, - 2, - 2, - 2, - 2, - 2, - 2, - 2, - 2, - 2, - 2, - 2, - 2, - 2, - 2, - 2, - 2, - 2, - 2, - 2, - 2, - 2, - 2, - 2, - 2, - 2, - 2, - 2, - 2, - 2, - 2, - 2, - 2, - 2, - 3, - 3, - 3, - 3, - 3, - 3, - 3, - 3, - 3, - 3, - 3, - 3, - 3, - 3, - 3, - 3, - 3, - 3, - 3, - 3, - 3, - 3, - 3, - 3, - 3, - 3, - 3, - 3, - 3, - 3, - 3, - 3, - 3, - 3, - 3, - 3, - 3, - 3, - 3, - 3, - 3, - 3, - 3, - 3, - 3, - 3, - 3, - 3, - 3, - 3, - 3, - 3, - 3, - 3, - 3, - 3, - 3, - 3, - 3, - 3, - 3, - 3, - 3, - 3, - 4, - 4, - 4, - 4, - 4, - 4, - 4, - 4, - 4, - 4, - 4, - 4, - 4, - 4, - 4, - 4, - 4, - 4, - 4, - 4, - 4, - 4, - 4, - 4, - 4, - 4, - 4, - 4, - 4, - 4, - 4, - 4, - 4, - 4, - 4, - 4, - 4, - 4, - 4, - 4, - 4, - 4, - 4, - 4, - 4, - 4, - 4, - 4, - 4, - 4, - 4, - 4, - 4, - 4, - 4, - 4, - 4, - 4, - 4, - 4, - 4, - 4, - 4, - 4, - 5, - 5, - 5, - 5, - 5, - 5, - 5, - 5, - 5, - 5, - 5, - 5, - 5, - 5, - 5, - 5, - 5, - 5, - 5, - 5, - 5, - 5, - 5, - 5, - 5, - 5, - 5, - 5, - 5, - 5, - 5, - 5, - 5, - 5, - 5, - 5, - 5, - 5, - 5, - 5, - 5, - 5, - 5, - 5, - 5, - 5, - 5, - 5, - 6, - 6, - 6, - 6, - 6, - 6, - 6, - 6, - 6, - 6, - 6, - 6, - 6, - 6, - 6, - 7, -} - -type contextLUT []byte - -func getContextLUT(mode int) contextLUT { - return kContextLookup[mode<<9:] -} - -func getContext(p1 byte, p2 byte, lut contextLUT) byte { - return lut[p1] | lut[256+int(p2)] -} diff --git a/vendor/github.com/andybalholm/brotli/decode.go b/vendor/github.com/andybalholm/brotli/decode.go deleted file mode 100644 index 9d9513b..0000000 --- a/vendor/github.com/andybalholm/brotli/decode.go +++ /dev/null @@ -1,2581 +0,0 @@ -package brotli - -/* Copyright 2013 Google Inc. All Rights Reserved. - - Distributed under MIT license. - See file LICENSE for detail or copy at https://opensource.org/licenses/MIT -*/ - -const ( - decoderResultError = 0 - decoderResultSuccess = 1 - decoderResultNeedsMoreInput = 2 - decoderResultNeedsMoreOutput = 3 -) - -/** - * Error code for detailed logging / production debugging. - * - * See ::BrotliDecoderGetErrorCode and ::BROTLI_LAST_ERROR_CODE. - */ -const ( - decoderNoError = 0 - decoderSuccess = 1 - decoderNeedsMoreInput = 2 - decoderNeedsMoreOutput = 3 - decoderErrorFormatExuberantNibble = -1 - decoderErrorFormatReserved = -2 - decoderErrorFormatExuberantMetaNibble = -3 - decoderErrorFormatSimpleHuffmanAlphabet = -4 - decoderErrorFormatSimpleHuffmanSame = -5 - decoderErrorFormatClSpace = -6 - decoderErrorFormatHuffmanSpace = -7 - decoderErrorFormatContextMapRepeat = -8 - decoderErrorFormatBlockLength1 = -9 - decoderErrorFormatBlockLength2 = -10 - decoderErrorFormatTransform = -11 - decoderErrorFormatDictionary = -12 - decoderErrorFormatWindowBits = -13 - decoderErrorFormatPadding1 = -14 - decoderErrorFormatPadding2 = -15 - decoderErrorFormatDistance = -16 - decoderErrorDictionaryNotSet = -19 - decoderErrorInvalidArguments = -20 - decoderErrorAllocContextModes = -21 - decoderErrorAllocTreeGroups = -22 - decoderErrorAllocContextMap = -25 - decoderErrorAllocRingBuffer1 = -26 - decoderErrorAllocRingBuffer2 = -27 - decoderErrorAllocBlockTypeTrees = -30 - decoderErrorUnreachable = -31 -) - -const huffmanTableBits = 8 - -const huffmanTableMask = 0xFF - -/* We need the slack region for the following reasons: - - doing up to two 16-byte copies for fast backward copying - - inserting transformed dictionary word (5 prefix + 24 base + 8 suffix) */ -const kRingBufferWriteAheadSlack uint32 = 42 - -var kCodeLengthCodeOrder = [codeLengthCodes]byte{1, 2, 3, 4, 0, 5, 17, 6, 16, 7, 8, 9, 10, 11, 12, 13, 14, 15} - -/* Static prefix code for the complex code length code lengths. */ -var kCodeLengthPrefixLength = [16]byte{2, 2, 2, 3, 2, 2, 2, 4, 2, 2, 2, 3, 2, 2, 2, 4} - -var kCodeLengthPrefixValue = [16]byte{0, 4, 3, 2, 0, 4, 3, 1, 0, 4, 3, 2, 0, 4, 3, 5} - -/* Saves error code and converts it to BrotliDecoderResult. */ -func saveErrorCode(s *Reader, e int) int { - s.error_code = int(e) - switch e { - case decoderSuccess: - return decoderResultSuccess - - case decoderNeedsMoreInput: - return decoderResultNeedsMoreInput - - case decoderNeedsMoreOutput: - return decoderResultNeedsMoreOutput - - default: - return decoderResultError - } -} - -/* Decodes WBITS by reading 1 - 7 bits, or 0x11 for "Large Window Brotli". - Precondition: bit-reader accumulator has at least 8 bits. */ -func decodeWindowBits(s *Reader, br *bitReader) int { - var n uint32 - var large_window bool = s.large_window - s.large_window = false - takeBits(br, 1, &n) - if n == 0 { - s.window_bits = 16 - return decoderSuccess - } - - takeBits(br, 3, &n) - if n != 0 { - s.window_bits = 17 + n - return decoderSuccess - } - - takeBits(br, 3, &n) - if n == 1 { - if large_window { - takeBits(br, 1, &n) - if n == 1 { - return decoderErrorFormatWindowBits - } - - s.large_window = true - return decoderSuccess - } else { - return decoderErrorFormatWindowBits - } - } - - if n != 0 { - s.window_bits = 8 + n - return decoderSuccess - } - - s.window_bits = 17 - return decoderSuccess -} - -/* Decodes a number in the range [0..255], by reading 1 - 11 bits. */ -func decodeVarLenUint8(s *Reader, br *bitReader, value *uint32) int { - var bits uint32 - switch s.substate_decode_uint8 { - case stateDecodeUint8None: - if !safeReadBits(br, 1, &bits) { - return decoderNeedsMoreInput - } - - if bits == 0 { - *value = 0 - return decoderSuccess - } - fallthrough - - /* Fall through. */ - case stateDecodeUint8Short: - if !safeReadBits(br, 3, &bits) { - s.substate_decode_uint8 = stateDecodeUint8Short - return decoderNeedsMoreInput - } - - if bits == 0 { - *value = 1 - s.substate_decode_uint8 = stateDecodeUint8None - return decoderSuccess - } - - /* Use output value as a temporary storage. It MUST be persisted. */ - *value = bits - fallthrough - - /* Fall through. */ - case stateDecodeUint8Long: - if !safeReadBits(br, *value, &bits) { - s.substate_decode_uint8 = stateDecodeUint8Long - return decoderNeedsMoreInput - } - - *value = (1 << *value) + bits - s.substate_decode_uint8 = stateDecodeUint8None - return decoderSuccess - - default: - return decoderErrorUnreachable - } -} - -/* Decodes a metablock length and flags by reading 2 - 31 bits. */ -func decodeMetaBlockLength(s *Reader, br *bitReader) int { - var bits uint32 - var i int - for { - switch s.substate_metablock_header { - case stateMetablockHeaderNone: - if !safeReadBits(br, 1, &bits) { - return decoderNeedsMoreInput - } - - if bits != 0 { - s.is_last_metablock = 1 - } else { - s.is_last_metablock = 0 - } - s.meta_block_remaining_len = 0 - s.is_uncompressed = 0 - s.is_metadata = 0 - if s.is_last_metablock == 0 { - s.substate_metablock_header = stateMetablockHeaderNibbles - break - } - - s.substate_metablock_header = stateMetablockHeaderEmpty - fallthrough - - /* Fall through. */ - case stateMetablockHeaderEmpty: - if !safeReadBits(br, 1, &bits) { - return decoderNeedsMoreInput - } - - if bits != 0 { - s.substate_metablock_header = stateMetablockHeaderNone - return decoderSuccess - } - - s.substate_metablock_header = stateMetablockHeaderNibbles - fallthrough - - /* Fall through. */ - case stateMetablockHeaderNibbles: - if !safeReadBits(br, 2, &bits) { - return decoderNeedsMoreInput - } - - s.size_nibbles = uint(byte(bits + 4)) - s.loop_counter = 0 - if bits == 3 { - s.is_metadata = 1 - s.substate_metablock_header = stateMetablockHeaderReserved - break - } - - s.substate_metablock_header = stateMetablockHeaderSize - fallthrough - - /* Fall through. */ - case stateMetablockHeaderSize: - i = s.loop_counter - - for ; i < int(s.size_nibbles); i++ { - if !safeReadBits(br, 4, &bits) { - s.loop_counter = i - return decoderNeedsMoreInput - } - - if uint(i+1) == s.size_nibbles && s.size_nibbles > 4 && bits == 0 { - return decoderErrorFormatExuberantNibble - } - - s.meta_block_remaining_len |= int(bits << uint(i*4)) - } - - s.substate_metablock_header = stateMetablockHeaderUncompressed - fallthrough - - /* Fall through. */ - case stateMetablockHeaderUncompressed: - if s.is_last_metablock == 0 { - if !safeReadBits(br, 1, &bits) { - return decoderNeedsMoreInput - } - - if bits != 0 { - s.is_uncompressed = 1 - } else { - s.is_uncompressed = 0 - } - } - - s.meta_block_remaining_len++ - s.substate_metablock_header = stateMetablockHeaderNone - return decoderSuccess - - case stateMetablockHeaderReserved: - if !safeReadBits(br, 1, &bits) { - return decoderNeedsMoreInput - } - - if bits != 0 { - return decoderErrorFormatReserved - } - - s.substate_metablock_header = stateMetablockHeaderBytes - fallthrough - - /* Fall through. */ - case stateMetablockHeaderBytes: - if !safeReadBits(br, 2, &bits) { - return decoderNeedsMoreInput - } - - if bits == 0 { - s.substate_metablock_header = stateMetablockHeaderNone - return decoderSuccess - } - - s.size_nibbles = uint(byte(bits)) - s.substate_metablock_header = stateMetablockHeaderMetadata - fallthrough - - /* Fall through. */ - case stateMetablockHeaderMetadata: - i = s.loop_counter - - for ; i < int(s.size_nibbles); i++ { - if !safeReadBits(br, 8, &bits) { - s.loop_counter = i - return decoderNeedsMoreInput - } - - if uint(i+1) == s.size_nibbles && s.size_nibbles > 1 && bits == 0 { - return decoderErrorFormatExuberantMetaNibble - } - - s.meta_block_remaining_len |= int(bits << uint(i*8)) - } - - s.meta_block_remaining_len++ - s.substate_metablock_header = stateMetablockHeaderNone - return decoderSuccess - - default: - return decoderErrorUnreachable - } - } -} - -/* Decodes the Huffman code. - This method doesn't read data from the bit reader, BUT drops the amount of - bits that correspond to the decoded symbol. - bits MUST contain at least 15 (BROTLI_HUFFMAN_MAX_CODE_LENGTH) valid bits. */ -func decodeSymbol(bits uint32, table []huffmanCode, br *bitReader) uint32 { - table = table[bits&huffmanTableMask:] - if table[0].bits > huffmanTableBits { - var nbits uint32 = uint32(table[0].bits) - huffmanTableBits - dropBits(br, huffmanTableBits) - table = table[uint32(table[0].value)+((bits>>huffmanTableBits)&bitMask(nbits)):] - } - - dropBits(br, uint32(table[0].bits)) - return uint32(table[0].value) -} - -/* Reads and decodes the next Huffman code from bit-stream. - This method peeks 16 bits of input and drops 0 - 15 of them. */ -func readSymbol(table []huffmanCode, br *bitReader) uint32 { - return decodeSymbol(get16BitsUnmasked(br), table, br) -} - -/* Same as DecodeSymbol, but it is known that there is less than 15 bits of - input are currently available. */ -func safeDecodeSymbol(table []huffmanCode, br *bitReader, result *uint32) bool { - var val uint32 - var available_bits uint32 = getAvailableBits(br) - if available_bits == 0 { - if table[0].bits == 0 { - *result = uint32(table[0].value) - return true - } - - return false /* No valid bits at all. */ - } - - val = uint32(getBitsUnmasked(br)) - table = table[val&huffmanTableMask:] - if table[0].bits <= huffmanTableBits { - if uint32(table[0].bits) <= available_bits { - dropBits(br, uint32(table[0].bits)) - *result = uint32(table[0].value) - return true - } else { - return false /* Not enough bits for the first level. */ - } - } - - if available_bits <= huffmanTableBits { - return false /* Not enough bits to move to the second level. */ - } - - /* Speculatively drop HUFFMAN_TABLE_BITS. */ - val = (val & bitMask(uint32(table[0].bits))) >> huffmanTableBits - - available_bits -= huffmanTableBits - table = table[uint32(table[0].value)+val:] - if available_bits < uint32(table[0].bits) { - return false /* Not enough bits for the second level. */ - } - - dropBits(br, huffmanTableBits+uint32(table[0].bits)) - *result = uint32(table[0].value) - return true -} - -func safeReadSymbol(table []huffmanCode, br *bitReader, result *uint32) bool { - var val uint32 - if safeGetBits(br, 15, &val) { - *result = decodeSymbol(val, table, br) - return true - } - - return safeDecodeSymbol(table, br, result) -} - -/* Makes a look-up in first level Huffman table. Peeks 8 bits. */ -func preloadSymbol(safe int, table []huffmanCode, br *bitReader, bits *uint32, value *uint32) { - if safe != 0 { - return - } - - table = table[getBits(br, huffmanTableBits):] - *bits = uint32(table[0].bits) - *value = uint32(table[0].value) -} - -/* Decodes the next Huffman code using data prepared by PreloadSymbol. - Reads 0 - 15 bits. Also peeks 8 following bits. */ -func readPreloadedSymbol(table []huffmanCode, br *bitReader, bits *uint32, value *uint32) uint32 { - var result uint32 = *value - var ext []huffmanCode - if *bits > huffmanTableBits { - var val uint32 = get16BitsUnmasked(br) - ext = table[val&huffmanTableMask:][*value:] - var mask uint32 = bitMask((*bits - huffmanTableBits)) - dropBits(br, huffmanTableBits) - ext = ext[(val>>huffmanTableBits)&mask:] - dropBits(br, uint32(ext[0].bits)) - result = uint32(ext[0].value) - } else { - dropBits(br, *bits) - } - - preloadSymbol(0, table, br, bits, value) - return result -} - -func log2Floor(x uint32) uint32 { - var result uint32 = 0 - for x != 0 { - x >>= 1 - result++ - } - - return result -} - -/* Reads (s->symbol + 1) symbols. - Totally 1..4 symbols are read, 1..11 bits each. - The list of symbols MUST NOT contain duplicates. */ -func readSimpleHuffmanSymbols(alphabet_size uint32, max_symbol uint32, s *Reader) int { - var br *bitReader = &s.br - var max_bits uint32 = log2Floor(alphabet_size - 1) - var i uint32 = s.sub_loop_counter - /* max_bits == 1..11; symbol == 0..3; 1..44 bits will be read. */ - - var num_symbols uint32 = s.symbol - for i <= num_symbols { - var v uint32 - if !safeReadBits(br, max_bits, &v) { - s.sub_loop_counter = i - s.substate_huffman = stateHuffmanSimpleRead - return decoderNeedsMoreInput - } - - if v >= max_symbol { - return decoderErrorFormatSimpleHuffmanAlphabet - } - - s.symbols_lists_array[i] = uint16(v) - i++ - } - - for i = 0; i < num_symbols; i++ { - var k uint32 = i + 1 - for ; k <= num_symbols; k++ { - if s.symbols_lists_array[i] == s.symbols_lists_array[k] { - return decoderErrorFormatSimpleHuffmanSame - } - } - } - - return decoderSuccess -} - -/* Process single decoded symbol code length: - A) reset the repeat variable - B) remember code length (if it is not 0) - C) extend corresponding index-chain - D) reduce the Huffman space - E) update the histogram */ -func processSingleCodeLength(code_len uint32, symbol *uint32, repeat *uint32, space *uint32, prev_code_len *uint32, symbol_lists symbolList, code_length_histo []uint16, next_symbol []int) { - *repeat = 0 - if code_len != 0 { /* code_len == 1..15 */ - symbolListPut(symbol_lists, next_symbol[code_len], uint16(*symbol)) - next_symbol[code_len] = int(*symbol) - *prev_code_len = code_len - *space -= 32768 >> code_len - code_length_histo[code_len]++ - } - - (*symbol)++ -} - -/* Process repeated symbol code length. - A) Check if it is the extension of previous repeat sequence; if the decoded - value is not BROTLI_REPEAT_PREVIOUS_CODE_LENGTH, then it is a new - symbol-skip - B) Update repeat variable - C) Check if operation is feasible (fits alphabet) - D) For each symbol do the same operations as in ProcessSingleCodeLength - - PRECONDITION: code_len == BROTLI_REPEAT_PREVIOUS_CODE_LENGTH or - code_len == BROTLI_REPEAT_ZERO_CODE_LENGTH */ -func processRepeatedCodeLength(code_len uint32, repeat_delta uint32, alphabet_size uint32, symbol *uint32, repeat *uint32, space *uint32, prev_code_len *uint32, repeat_code_len *uint32, symbol_lists symbolList, code_length_histo []uint16, next_symbol []int) { - var old_repeat uint32 /* for BROTLI_REPEAT_ZERO_CODE_LENGTH */ /* for BROTLI_REPEAT_ZERO_CODE_LENGTH */ - var extra_bits uint32 = 3 - var new_len uint32 = 0 - if code_len == repeatPreviousCodeLength { - new_len = *prev_code_len - extra_bits = 2 - } - - if *repeat_code_len != new_len { - *repeat = 0 - *repeat_code_len = new_len - } - - old_repeat = *repeat - if *repeat > 0 { - *repeat -= 2 - *repeat <<= extra_bits - } - - *repeat += repeat_delta + 3 - repeat_delta = *repeat - old_repeat - if *symbol+repeat_delta > alphabet_size { - *symbol = alphabet_size - *space = 0xFFFFF - return - } - - if *repeat_code_len != 0 { - var last uint = uint(*symbol + repeat_delta) - var next int = next_symbol[*repeat_code_len] - for { - symbolListPut(symbol_lists, next, uint16(*symbol)) - next = int(*symbol) - (*symbol)++ - if (*symbol) == uint32(last) { - break - } - } - - next_symbol[*repeat_code_len] = next - *space -= repeat_delta << (15 - *repeat_code_len) - code_length_histo[*repeat_code_len] = uint16(uint32(code_length_histo[*repeat_code_len]) + repeat_delta) - } else { - *symbol += repeat_delta - } -} - -/* Reads and decodes symbol codelengths. */ -func readSymbolCodeLengths(alphabet_size uint32, s *Reader) int { - var br *bitReader = &s.br - var symbol uint32 = s.symbol - var repeat uint32 = s.repeat - var space uint32 = s.space - var prev_code_len uint32 = s.prev_code_len - var repeat_code_len uint32 = s.repeat_code_len - var symbol_lists symbolList = s.symbol_lists - var code_length_histo []uint16 = s.code_length_histo[:] - var next_symbol []int = s.next_symbol[:] - if !warmupBitReader(br) { - return decoderNeedsMoreInput - } - var p []huffmanCode - for symbol < alphabet_size && space > 0 { - p = s.table[:] - var code_len uint32 - if !checkInputAmount(br, shortFillBitWindowRead) { - s.symbol = symbol - s.repeat = repeat - s.prev_code_len = prev_code_len - s.repeat_code_len = repeat_code_len - s.space = space - return decoderNeedsMoreInput - } - - fillBitWindow16(br) - p = p[getBitsUnmasked(br)&uint64(bitMask(huffmanMaxCodeLengthCodeLength)):] - dropBits(br, uint32(p[0].bits)) /* Use 1..5 bits. */ - code_len = uint32(p[0].value) /* code_len == 0..17 */ - if code_len < repeatPreviousCodeLength { - processSingleCodeLength(code_len, &symbol, &repeat, &space, &prev_code_len, symbol_lists, code_length_histo, next_symbol) /* code_len == 16..17, extra_bits == 2..3 */ - } else { - var extra_bits uint32 - if code_len == repeatPreviousCodeLength { - extra_bits = 2 - } else { - extra_bits = 3 - } - var repeat_delta uint32 = uint32(getBitsUnmasked(br)) & bitMask(extra_bits) - dropBits(br, extra_bits) - processRepeatedCodeLength(code_len, repeat_delta, alphabet_size, &symbol, &repeat, &space, &prev_code_len, &repeat_code_len, symbol_lists, code_length_histo, next_symbol) - } - } - - s.space = space - return decoderSuccess -} - -func safeReadSymbolCodeLengths(alphabet_size uint32, s *Reader) int { - var br *bitReader = &s.br - var get_byte bool = false - var p []huffmanCode - for s.symbol < alphabet_size && s.space > 0 { - p = s.table[:] - var code_len uint32 - var available_bits uint32 - var bits uint32 = 0 - if get_byte && !pullByte(br) { - return decoderNeedsMoreInput - } - get_byte = false - available_bits = getAvailableBits(br) - if available_bits != 0 { - bits = uint32(getBitsUnmasked(br)) - } - - p = p[bits&bitMask(huffmanMaxCodeLengthCodeLength):] - if uint32(p[0].bits) > available_bits { - get_byte = true - continue - } - - code_len = uint32(p[0].value) /* code_len == 0..17 */ - if code_len < repeatPreviousCodeLength { - dropBits(br, uint32(p[0].bits)) - processSingleCodeLength(code_len, &s.symbol, &s.repeat, &s.space, &s.prev_code_len, s.symbol_lists, s.code_length_histo[:], s.next_symbol[:]) /* code_len == 16..17, extra_bits == 2..3 */ - } else { - var extra_bits uint32 = code_len - 14 - var repeat_delta uint32 = (bits >> p[0].bits) & bitMask(extra_bits) - if available_bits < uint32(p[0].bits)+extra_bits { - get_byte = true - continue - } - - dropBits(br, uint32(p[0].bits)+extra_bits) - processRepeatedCodeLength(code_len, repeat_delta, alphabet_size, &s.symbol, &s.repeat, &s.space, &s.prev_code_len, &s.repeat_code_len, s.symbol_lists, s.code_length_histo[:], s.next_symbol[:]) - } - } - - return decoderSuccess -} - -/* Reads and decodes 15..18 codes using static prefix code. - Each code is 2..4 bits long. In total 30..72 bits are used. */ -func readCodeLengthCodeLengths(s *Reader) int { - var br *bitReader = &s.br - var num_codes uint32 = s.repeat - var space uint32 = s.space - var i uint32 = s.sub_loop_counter - for ; i < codeLengthCodes; i++ { - var code_len_idx byte = kCodeLengthCodeOrder[i] - var ix uint32 - var v uint32 - if !safeGetBits(br, 4, &ix) { - var available_bits uint32 = getAvailableBits(br) - if available_bits != 0 { - ix = uint32(getBitsUnmasked(br) & 0xF) - } else { - ix = 0 - } - - if uint32(kCodeLengthPrefixLength[ix]) > available_bits { - s.sub_loop_counter = i - s.repeat = num_codes - s.space = space - s.substate_huffman = stateHuffmanComplex - return decoderNeedsMoreInput - } - } - - v = uint32(kCodeLengthPrefixValue[ix]) - dropBits(br, uint32(kCodeLengthPrefixLength[ix])) - s.code_length_code_lengths[code_len_idx] = byte(v) - if v != 0 { - space = space - (32 >> v) - num_codes++ - s.code_length_histo[v]++ - if space-1 >= 32 { - /* space is 0 or wrapped around. */ - break - } - } - } - - if num_codes != 1 && space != 0 { - return decoderErrorFormatClSpace - } - - return decoderSuccess -} - -/* Decodes the Huffman tables. - There are 2 scenarios: - A) Huffman code contains only few symbols (1..4). Those symbols are read - directly; their code lengths are defined by the number of symbols. - For this scenario 4 - 49 bits will be read. - - B) 2-phase decoding: - B.1) Small Huffman table is decoded; it is specified with code lengths - encoded with predefined entropy code. 32 - 74 bits are used. - B.2) Decoded table is used to decode code lengths of symbols in resulting - Huffman table. In worst case 3520 bits are read. */ -func readHuffmanCode(alphabet_size uint32, max_symbol uint32, table []huffmanCode, opt_table_size *uint32, s *Reader) int { - var br *bitReader = &s.br - - /* Unnecessary masking, but might be good for safety. */ - alphabet_size &= 0x7FF - - /* State machine. */ - for { - switch s.substate_huffman { - case stateHuffmanNone: - if !safeReadBits(br, 2, &s.sub_loop_counter) { - return decoderNeedsMoreInput - } - - /* The value is used as follows: - 1 for simple code; - 0 for no skipping, 2 skips 2 code lengths, 3 skips 3 code lengths */ - if s.sub_loop_counter != 1 { - s.space = 32 - s.repeat = 0 /* num_codes */ - var i int - for i = 0; i <= huffmanMaxCodeLengthCodeLength; i++ { - s.code_length_histo[i] = 0 - } - - for i = 0; i < codeLengthCodes; i++ { - s.code_length_code_lengths[i] = 0 - } - - s.substate_huffman = stateHuffmanComplex - continue - } - fallthrough - - /* Read symbols, codes & code lengths directly. */ - case stateHuffmanSimpleSize: - if !safeReadBits(br, 2, &s.symbol) { /* num_symbols */ - s.substate_huffman = stateHuffmanSimpleSize - return decoderNeedsMoreInput - } - - s.sub_loop_counter = 0 - fallthrough - - case stateHuffmanSimpleRead: - { - var result int = readSimpleHuffmanSymbols(alphabet_size, max_symbol, s) - if result != decoderSuccess { - return result - } - } - fallthrough - - case stateHuffmanSimpleBuild: - var table_size uint32 - if s.symbol == 3 { - var bits uint32 - if !safeReadBits(br, 1, &bits) { - s.substate_huffman = stateHuffmanSimpleBuild - return decoderNeedsMoreInput - } - - s.symbol += bits - } - - table_size = buildSimpleHuffmanTable(table, huffmanTableBits, s.symbols_lists_array[:], s.symbol) - if opt_table_size != nil { - *opt_table_size = table_size - } - - s.substate_huffman = stateHuffmanNone - return decoderSuccess - - /* Decode Huffman-coded code lengths. */ - case stateHuffmanComplex: - { - var i uint32 - var result int = readCodeLengthCodeLengths(s) - if result != decoderSuccess { - return result - } - - buildCodeLengthsHuffmanTable(s.table[:], s.code_length_code_lengths[:], s.code_length_histo[:]) - for i = 0; i < 16; i++ { - s.code_length_histo[i] = 0 - } - - for i = 0; i <= huffmanMaxCodeLength; i++ { - s.next_symbol[i] = int(i) - (huffmanMaxCodeLength + 1) - symbolListPut(s.symbol_lists, s.next_symbol[i], 0xFFFF) - } - - s.symbol = 0 - s.prev_code_len = initialRepeatedCodeLength - s.repeat = 0 - s.repeat_code_len = 0 - s.space = 32768 - s.substate_huffman = stateHuffmanLengthSymbols - } - fallthrough - - case stateHuffmanLengthSymbols: - var table_size uint32 - var result int = readSymbolCodeLengths(max_symbol, s) - if result == decoderNeedsMoreInput { - result = safeReadSymbolCodeLengths(max_symbol, s) - } - - if result != decoderSuccess { - return result - } - - if s.space != 0 { - return decoderErrorFormatHuffmanSpace - } - - table_size = buildHuffmanTable(table, huffmanTableBits, s.symbol_lists, s.code_length_histo[:]) - if opt_table_size != nil { - *opt_table_size = table_size - } - - s.substate_huffman = stateHuffmanNone - return decoderSuccess - - default: - return decoderErrorUnreachable - } - } -} - -/* Decodes a block length by reading 3..39 bits. */ -func readBlockLength(table []huffmanCode, br *bitReader) uint32 { - var code uint32 - var nbits uint32 - code = readSymbol(table, br) - nbits = kBlockLengthPrefixCode[code].nbits /* nbits == 2..24 */ - return kBlockLengthPrefixCode[code].offset + readBits(br, nbits) -} - -/* WARNING: if state is not BROTLI_STATE_READ_BLOCK_LENGTH_NONE, then - reading can't be continued with ReadBlockLength. */ -func safeReadBlockLength(s *Reader, result *uint32, table []huffmanCode, br *bitReader) bool { - var index uint32 - if s.substate_read_block_length == stateReadBlockLengthNone { - if !safeReadSymbol(table, br, &index) { - return false - } - } else { - index = s.block_length_index - } - { - var bits uint32 /* nbits == 2..24 */ - var nbits uint32 = kBlockLengthPrefixCode[index].nbits - if !safeReadBits(br, nbits, &bits) { - s.block_length_index = index - s.substate_read_block_length = stateReadBlockLengthSuffix - return false - } - - *result = kBlockLengthPrefixCode[index].offset + bits - s.substate_read_block_length = stateReadBlockLengthNone - return true - } -} - -/* Transform: - 1) initialize list L with values 0, 1,... 255 - 2) For each input element X: - 2.1) let Y = L[X] - 2.2) remove X-th element from L - 2.3) prepend Y to L - 2.4) append Y to output - - In most cases max(Y) <= 7, so most of L remains intact. - To reduce the cost of initialization, we reuse L, remember the upper bound - of Y values, and reinitialize only first elements in L. - - Most of input values are 0 and 1. To reduce number of branches, we replace - inner for loop with do-while. */ -func inverseMoveToFrontTransform(v []byte, v_len uint32, state *Reader) { - var mtf [256]byte - var i int - for i = 1; i < 256; i++ { - mtf[i] = byte(i) - } - var mtf_1 byte - - /* Transform the input. */ - for i = 0; uint32(i) < v_len; i++ { - var index int = int(v[i]) - var value byte = mtf[index] - v[i] = value - mtf_1 = value - for index >= 1 { - index-- - mtf[index+1] = mtf[index] - } - - mtf[0] = mtf_1 - } -} - -/* Decodes a series of Huffman table using ReadHuffmanCode function. */ -func huffmanTreeGroupDecode(group *huffmanTreeGroup, s *Reader) int { - if s.substate_tree_group != stateTreeGroupLoop { - s.next = group.codes - s.htree_index = 0 - s.substate_tree_group = stateTreeGroupLoop - } - - for s.htree_index < int(group.num_htrees) { - var table_size uint32 - var result int = readHuffmanCode(uint32(group.alphabet_size), uint32(group.max_symbol), s.next, &table_size, s) - if result != decoderSuccess { - return result - } - group.htrees[s.htree_index] = s.next - s.next = s.next[table_size:] - s.htree_index++ - } - - s.substate_tree_group = stateTreeGroupNone - return decoderSuccess -} - -/* Decodes a context map. - Decoding is done in 4 phases: - 1) Read auxiliary information (6..16 bits) and allocate memory. - In case of trivial context map, decoding is finished at this phase. - 2) Decode Huffman table using ReadHuffmanCode function. - This table will be used for reading context map items. - 3) Read context map items; "0" values could be run-length encoded. - 4) Optionally, apply InverseMoveToFront transform to the resulting map. */ -func decodeContextMap(context_map_size uint32, num_htrees *uint32, context_map_arg *[]byte, s *Reader) int { - var br *bitReader = &s.br - var result int = decoderSuccess - - switch int(s.substate_context_map) { - case stateContextMapNone: - result = decodeVarLenUint8(s, br, num_htrees) - if result != decoderSuccess { - return result - } - - (*num_htrees)++ - s.context_index = 0 - *context_map_arg = make([]byte, uint(context_map_size)) - if *context_map_arg == nil { - return decoderErrorAllocContextMap - } - - if *num_htrees <= 1 { - for i := 0; i < int(context_map_size); i++ { - (*context_map_arg)[i] = 0 - } - return decoderSuccess - } - - s.substate_context_map = stateContextMapReadPrefix - fallthrough - /* Fall through. */ - case stateContextMapReadPrefix: - { - var bits uint32 - - /* In next stage ReadHuffmanCode uses at least 4 bits, so it is safe - to peek 4 bits ahead. */ - if !safeGetBits(br, 5, &bits) { - return decoderNeedsMoreInput - } - - if bits&1 != 0 { /* Use RLE for zeros. */ - s.max_run_length_prefix = (bits >> 1) + 1 - dropBits(br, 5) - } else { - s.max_run_length_prefix = 0 - dropBits(br, 1) - } - - s.substate_context_map = stateContextMapHuffman - } - fallthrough - - /* Fall through. */ - case stateContextMapHuffman: - { - var alphabet_size uint32 = *num_htrees + s.max_run_length_prefix - result = readHuffmanCode(alphabet_size, alphabet_size, s.context_map_table[:], nil, s) - if result != decoderSuccess { - return result - } - s.code = 0xFFFF - s.substate_context_map = stateContextMapDecode - } - fallthrough - - /* Fall through. */ - case stateContextMapDecode: - { - var context_index uint32 = s.context_index - var max_run_length_prefix uint32 = s.max_run_length_prefix - var context_map []byte = *context_map_arg - var code uint32 = s.code - var skip_preamble bool = (code != 0xFFFF) - for context_index < context_map_size || skip_preamble { - if !skip_preamble { - if !safeReadSymbol(s.context_map_table[:], br, &code) { - s.code = 0xFFFF - s.context_index = context_index - return decoderNeedsMoreInput - } - - if code == 0 { - context_map[context_index] = 0 - context_index++ - continue - } - - if code > max_run_length_prefix { - context_map[context_index] = byte(code - max_run_length_prefix) - context_index++ - continue - } - } else { - skip_preamble = false - } - - /* RLE sub-stage. */ - { - var reps uint32 - if !safeReadBits(br, code, &reps) { - s.code = code - s.context_index = context_index - return decoderNeedsMoreInput - } - - reps += 1 << code - if context_index+reps > context_map_size { - return decoderErrorFormatContextMapRepeat - } - - for { - context_map[context_index] = 0 - context_index++ - reps-- - if reps == 0 { - break - } - } - } - } - } - fallthrough - - case stateContextMapTransform: - var bits uint32 - if !safeReadBits(br, 1, &bits) { - s.substate_context_map = stateContextMapTransform - return decoderNeedsMoreInput - } - - if bits != 0 { - inverseMoveToFrontTransform(*context_map_arg, context_map_size, s) - } - - s.substate_context_map = stateContextMapNone - return decoderSuccess - - default: - return decoderErrorUnreachable - } -} - -/* Decodes a command or literal and updates block type ring-buffer. - Reads 3..54 bits. */ -func decodeBlockTypeAndLength(safe int, s *Reader, tree_type int) bool { - var max_block_type uint32 = s.num_block_types[tree_type] - type_tree := s.block_type_trees[tree_type*huffmanMaxSize258:] - len_tree := s.block_len_trees[tree_type*huffmanMaxSize26:] - var br *bitReader = &s.br - var ringbuffer []uint32 = s.block_type_rb[tree_type*2:] - var block_type uint32 - if max_block_type <= 1 { - return false - } - - /* Read 0..15 + 3..39 bits. */ - if safe == 0 { - block_type = readSymbol(type_tree, br) - s.block_length[tree_type] = readBlockLength(len_tree, br) - } else { - var memento bitReaderState - bitReaderSaveState(br, &memento) - if !safeReadSymbol(type_tree, br, &block_type) { - return false - } - if !safeReadBlockLength(s, &s.block_length[tree_type], len_tree, br) { - s.substate_read_block_length = stateReadBlockLengthNone - bitReaderRestoreState(br, &memento) - return false - } - } - - if block_type == 1 { - block_type = ringbuffer[1] + 1 - } else if block_type == 0 { - block_type = ringbuffer[0] - } else { - block_type -= 2 - } - - if block_type >= max_block_type { - block_type -= max_block_type - } - - ringbuffer[0] = ringbuffer[1] - ringbuffer[1] = block_type - return true -} - -func detectTrivialLiteralBlockTypes(s *Reader) { - var i uint - for i = 0; i < 8; i++ { - s.trivial_literal_contexts[i] = 0 - } - for i = 0; uint32(i) < s.num_block_types[0]; i++ { - var offset uint = i << literalContextBits - var error uint = 0 - var sample uint = uint(s.context_map[offset]) - var j uint - for j = 0; j < 1<>5] |= 1 << (i & 31) - } - } -} - -func prepareLiteralDecoding(s *Reader) { - var context_mode byte - var trivial uint - var block_type uint32 = s.block_type_rb[1] - var context_offset uint32 = block_type << literalContextBits - s.context_map_slice = s.context_map[context_offset:] - trivial = uint(s.trivial_literal_contexts[block_type>>5]) - s.trivial_literal_context = int((trivial >> (block_type & 31)) & 1) - s.literal_htree = []huffmanCode(s.literal_hgroup.htrees[s.context_map_slice[0]]) - context_mode = s.context_modes[block_type] & 3 - s.context_lookup = getContextLUT(int(context_mode)) -} - -/* Decodes the block type and updates the state for literal context. - Reads 3..54 bits. */ -func decodeLiteralBlockSwitchInternal(safe int, s *Reader) bool { - if !decodeBlockTypeAndLength(safe, s, 0) { - return false - } - - prepareLiteralDecoding(s) - return true -} - -func decodeLiteralBlockSwitch(s *Reader) { - decodeLiteralBlockSwitchInternal(0, s) -} - -func safeDecodeLiteralBlockSwitch(s *Reader) bool { - return decodeLiteralBlockSwitchInternal(1, s) -} - -/* Block switch for insert/copy length. - Reads 3..54 bits. */ -func decodeCommandBlockSwitchInternal(safe int, s *Reader) bool { - if !decodeBlockTypeAndLength(safe, s, 1) { - return false - } - - s.htree_command = []huffmanCode(s.insert_copy_hgroup.htrees[s.block_type_rb[3]]) - return true -} - -func decodeCommandBlockSwitch(s *Reader) { - decodeCommandBlockSwitchInternal(0, s) -} - -func safeDecodeCommandBlockSwitch(s *Reader) bool { - return decodeCommandBlockSwitchInternal(1, s) -} - -/* Block switch for distance codes. - Reads 3..54 bits. */ -func decodeDistanceBlockSwitchInternal(safe int, s *Reader) bool { - if !decodeBlockTypeAndLength(safe, s, 2) { - return false - } - - s.dist_context_map_slice = s.dist_context_map[s.block_type_rb[5]< s.ringbuffer_size { - pos = uint(s.ringbuffer_size) - } else { - pos = uint(s.pos) - } - var partial_pos_rb uint = (s.rb_roundtrips * uint(s.ringbuffer_size)) + pos - return partial_pos_rb - s.partial_pos_out -} - -/* Dumps output. - Returns BROTLI_DECODER_NEEDS_MORE_OUTPUT only if there is more output to push - and either ring-buffer is as big as window size, or |force| is true. */ -func writeRingBuffer(s *Reader, available_out *uint, next_out *[]byte, total_out *uint, force bool) int { - start := s.ringbuffer[s.partial_pos_out&uint(s.ringbuffer_mask):] - var to_write uint = unwrittenBytes(s, true) - var num_written uint = *available_out - if num_written > to_write { - num_written = to_write - } - - if s.meta_block_remaining_len < 0 { - return decoderErrorFormatBlockLength1 - } - - if next_out != nil && *next_out == nil { - *next_out = start - } else { - if next_out != nil { - copy(*next_out, start[:num_written]) - *next_out = (*next_out)[num_written:] - } - } - - *available_out -= num_written - s.partial_pos_out += num_written - if total_out != nil { - *total_out = s.partial_pos_out - } - - if num_written < to_write { - if s.ringbuffer_size == 1<= s.ringbuffer_size { - s.pos -= s.ringbuffer_size - s.rb_roundtrips++ - if uint(s.pos) != 0 { - s.should_wrap_ringbuffer = 1 - } else { - s.should_wrap_ringbuffer = 0 - } - } - - return decoderSuccess -} - -func wrapRingBuffer(s *Reader) { - if s.should_wrap_ringbuffer != 0 { - copy(s.ringbuffer, s.ringbuffer_end[:uint(s.pos)]) - s.should_wrap_ringbuffer = 0 - } -} - -/* Allocates ring-buffer. - - s->ringbuffer_size MUST be updated by BrotliCalculateRingBufferSize before - this function is called. - - Last two bytes of ring-buffer are initialized to 0, so context calculation - could be done uniformly for the first two and all other positions. */ -func ensureRingBuffer(s *Reader) bool { - var old_ringbuffer []byte - if s.ringbuffer_size == s.new_ringbuffer_size { - return true - } - spaceNeeded := int(s.new_ringbuffer_size) + int(kRingBufferWriteAheadSlack) - if len(s.ringbuffer) < spaceNeeded { - old_ringbuffer = s.ringbuffer - s.ringbuffer = make([]byte, spaceNeeded) - } - - s.ringbuffer[s.new_ringbuffer_size-2] = 0 - s.ringbuffer[s.new_ringbuffer_size-1] = 0 - - if old_ringbuffer != nil { - copy(s.ringbuffer, old_ringbuffer[:uint(s.pos)]) - } - - s.ringbuffer_size = s.new_ringbuffer_size - s.ringbuffer_mask = s.new_ringbuffer_size - 1 - s.ringbuffer_end = s.ringbuffer[s.ringbuffer_size:] - - return true -} - -func copyUncompressedBlockToOutput(available_out *uint, next_out *[]byte, total_out *uint, s *Reader) int { - /* TODO: avoid allocation for single uncompressed block. */ - if !ensureRingBuffer(s) { - return decoderErrorAllocRingBuffer1 - } - - /* State machine */ - for { - switch s.substate_uncompressed { - case stateUncompressedNone: - { - var nbytes int = int(getRemainingBytes(&s.br)) - if nbytes > s.meta_block_remaining_len { - nbytes = s.meta_block_remaining_len - } - - if s.pos+nbytes > s.ringbuffer_size { - nbytes = s.ringbuffer_size - s.pos - } - - /* Copy remaining bytes from s->br.buf_ to ring-buffer. */ - copyBytes(s.ringbuffer[s.pos:], &s.br, uint(nbytes)) - - s.pos += nbytes - s.meta_block_remaining_len -= nbytes - if s.pos < 1<>1 >= min_size { - new_ringbuffer_size >>= 1 - } - } - - s.new_ringbuffer_size = new_ringbuffer_size -} - -/* Reads 1..256 2-bit context modes. */ -func readContextModes(s *Reader) int { - var br *bitReader = &s.br - var i int = s.loop_counter - - for i < int(s.num_block_types[0]) { - var bits uint32 - if !safeReadBits(br, 2, &bits) { - s.loop_counter = i - return decoderNeedsMoreInput - } - - s.context_modes[i] = byte(bits) - i++ - } - - return decoderSuccess -} - -func takeDistanceFromRingBuffer(s *Reader) { - if s.distance_code == 0 { - s.dist_rb_idx-- - s.distance_code = s.dist_rb[s.dist_rb_idx&3] - - /* Compensate double distance-ring-buffer roll for dictionary items. */ - s.distance_context = 1 - } else { - var distance_code int = s.distance_code << 1 - const kDistanceShortCodeIndexOffset uint32 = 0xAAAFFF1B - const kDistanceShortCodeValueOffset uint32 = 0xFA5FA500 - var v int = (s.dist_rb_idx + int(kDistanceShortCodeIndexOffset>>uint(distance_code))) & 0x3 - /* kDistanceShortCodeIndexOffset has 2-bit values from LSB: - 3, 2, 1, 0, 3, 3, 3, 3, 3, 3, 2, 2, 2, 2, 2, 2 */ - - /* kDistanceShortCodeValueOffset has 2-bit values from LSB: - -0, 0,-0, 0,-1, 1,-2, 2,-3, 3,-1, 1,-2, 2,-3, 3 */ - s.distance_code = s.dist_rb[v] - - v = int(kDistanceShortCodeValueOffset>>uint(distance_code)) & 0x3 - if distance_code&0x3 != 0 { - s.distance_code += v - } else { - s.distance_code -= v - if s.distance_code <= 0 { - /* A huge distance will cause a () soon. - This is a little faster than failing here. */ - s.distance_code = 0x7FFFFFFF - } - } - } -} - -func safeReadBitsMaybeZero(br *bitReader, n_bits uint32, val *uint32) bool { - if n_bits != 0 { - return safeReadBits(br, n_bits, val) - } else { - *val = 0 - return true - } -} - -/* Precondition: s->distance_code < 0. */ -func readDistanceInternal(safe int, s *Reader, br *bitReader) bool { - var distval int - var memento bitReaderState - var distance_tree []huffmanCode = []huffmanCode(s.distance_hgroup.htrees[s.dist_htree_index]) - if safe == 0 { - s.distance_code = int(readSymbol(distance_tree, br)) - } else { - var code uint32 - bitReaderSaveState(br, &memento) - if !safeReadSymbol(distance_tree, br, &code) { - return false - } - - s.distance_code = int(code) - } - - /* Convert the distance code to the actual distance by possibly - looking up past distances from the s->ringbuffer. */ - s.distance_context = 0 - - if s.distance_code&^0xF == 0 { - takeDistanceFromRingBuffer(s) - s.block_length[2]-- - return true - } - - distval = s.distance_code - int(s.num_direct_distance_codes) - if distval >= 0 { - var nbits uint32 - var postfix int - var offset int - if safe == 0 && (s.distance_postfix_bits == 0) { - nbits = (uint32(distval) >> 1) + 1 - offset = ((2 + (distval & 1)) << nbits) - 4 - s.distance_code = int(s.num_direct_distance_codes) + offset + int(readBits(br, nbits)) - } else { - /* This branch also works well when s->distance_postfix_bits == 0. */ - var bits uint32 - postfix = distval & s.distance_postfix_mask - distval >>= s.distance_postfix_bits - nbits = (uint32(distval) >> 1) + 1 - if safe != 0 { - if !safeReadBitsMaybeZero(br, nbits, &bits) { - s.distance_code = -1 /* Restore precondition. */ - bitReaderRestoreState(br, &memento) - return false - } - } else { - bits = readBits(br, nbits) - } - - offset = ((2 + (distval & 1)) << nbits) - 4 - s.distance_code = int(s.num_direct_distance_codes) + ((offset + int(bits)) << s.distance_postfix_bits) + postfix - } - } - - s.distance_code = s.distance_code - numDistanceShortCodes + 1 - s.block_length[2]-- - return true -} - -func readDistance(s *Reader, br *bitReader) { - readDistanceInternal(0, s, br) -} - -func safeReadDistance(s *Reader, br *bitReader) bool { - return readDistanceInternal(1, s, br) -} - -func readCommandInternal(safe int, s *Reader, br *bitReader, insert_length *int) bool { - var cmd_code uint32 - var insert_len_extra uint32 = 0 - var copy_length uint32 - var v cmdLutElement - var memento bitReaderState - if safe == 0 { - cmd_code = readSymbol(s.htree_command, br) - } else { - bitReaderSaveState(br, &memento) - if !safeReadSymbol(s.htree_command, br, &cmd_code) { - return false - } - } - - v = kCmdLut[cmd_code] - s.distance_code = int(v.distance_code) - s.distance_context = int(v.context) - s.dist_htree_index = s.dist_context_map_slice[s.distance_context] - *insert_length = int(v.insert_len_offset) - if safe == 0 { - if v.insert_len_extra_bits != 0 { - insert_len_extra = readBits(br, uint32(v.insert_len_extra_bits)) - } - - copy_length = readBits(br, uint32(v.copy_len_extra_bits)) - } else { - if !safeReadBitsMaybeZero(br, uint32(v.insert_len_extra_bits), &insert_len_extra) || !safeReadBitsMaybeZero(br, uint32(v.copy_len_extra_bits), ©_length) { - bitReaderRestoreState(br, &memento) - return false - } - } - - s.copy_length = int(copy_length) + int(v.copy_len_offset) - s.block_length[1]-- - *insert_length += int(insert_len_extra) - return true -} - -func readCommand(s *Reader, br *bitReader, insert_length *int) { - readCommandInternal(0, s, br, insert_length) -} - -func safeReadCommand(s *Reader, br *bitReader, insert_length *int) bool { - return readCommandInternal(1, s, br, insert_length) -} - -func checkInputAmountMaybeSafe(safe int, br *bitReader, num uint) bool { - if safe != 0 { - return true - } - - return checkInputAmount(br, num) -} - -func processCommandsInternal(safe int, s *Reader) int { - var pos int = s.pos - var i int = s.loop_counter - var result int = decoderSuccess - var br *bitReader = &s.br - var hc []huffmanCode - - if !checkInputAmountMaybeSafe(safe, br, 28) { - result = decoderNeedsMoreInput - goto saveStateAndReturn - } - - if safe == 0 { - warmupBitReader(br) - } - - /* Jump into state machine. */ - if s.state == stateCommandBegin { - goto CommandBegin - } else if s.state == stateCommandInner { - goto CommandInner - } else if s.state == stateCommandPostDecodeLiterals { - goto CommandPostDecodeLiterals - } else if s.state == stateCommandPostWrapCopy { - goto CommandPostWrapCopy - } else { - return decoderErrorUnreachable - } - -CommandBegin: - if safe != 0 { - s.state = stateCommandBegin - } - - if !checkInputAmountMaybeSafe(safe, br, 28) { /* 156 bits + 7 bytes */ - s.state = stateCommandBegin - result = decoderNeedsMoreInput - goto saveStateAndReturn - } - - if s.block_length[1] == 0 { - if safe != 0 { - if !safeDecodeCommandBlockSwitch(s) { - result = decoderNeedsMoreInput - goto saveStateAndReturn - } - } else { - decodeCommandBlockSwitch(s) - } - - goto CommandBegin - } - - /* Read the insert/copy length in the command. */ - if safe != 0 { - if !safeReadCommand(s, br, &i) { - result = decoderNeedsMoreInput - goto saveStateAndReturn - } - } else { - readCommand(s, br, &i) - } - - if i == 0 { - goto CommandPostDecodeLiterals - } - - s.meta_block_remaining_len -= i - -CommandInner: - if safe != 0 { - s.state = stateCommandInner - } - - /* Read the literals in the command. */ - if s.trivial_literal_context != 0 { - var bits uint32 - var value uint32 - preloadSymbol(safe, s.literal_htree, br, &bits, &value) - for { - if !checkInputAmountMaybeSafe(safe, br, 28) { /* 162 bits + 7 bytes */ - s.state = stateCommandInner - result = decoderNeedsMoreInput - goto saveStateAndReturn - } - - if s.block_length[0] == 0 { - if safe != 0 { - if !safeDecodeLiteralBlockSwitch(s) { - result = decoderNeedsMoreInput - goto saveStateAndReturn - } - } else { - decodeLiteralBlockSwitch(s) - } - - preloadSymbol(safe, s.literal_htree, br, &bits, &value) - if s.trivial_literal_context == 0 { - goto CommandInner - } - } - - if safe == 0 { - s.ringbuffer[pos] = byte(readPreloadedSymbol(s.literal_htree, br, &bits, &value)) - } else { - var literal uint32 - if !safeReadSymbol(s.literal_htree, br, &literal) { - result = decoderNeedsMoreInput - goto saveStateAndReturn - } - - s.ringbuffer[pos] = byte(literal) - } - - s.block_length[0]-- - pos++ - if pos == s.ringbuffer_size { - s.state = stateCommandInnerWrite - i-- - goto saveStateAndReturn - } - i-- - if i == 0 { - break - } - } - } else { - var p1 byte = s.ringbuffer[(pos-1)&s.ringbuffer_mask] - var p2 byte = s.ringbuffer[(pos-2)&s.ringbuffer_mask] - for { - var context byte - if !checkInputAmountMaybeSafe(safe, br, 28) { /* 162 bits + 7 bytes */ - s.state = stateCommandInner - result = decoderNeedsMoreInput - goto saveStateAndReturn - } - - if s.block_length[0] == 0 { - if safe != 0 { - if !safeDecodeLiteralBlockSwitch(s) { - result = decoderNeedsMoreInput - goto saveStateAndReturn - } - } else { - decodeLiteralBlockSwitch(s) - } - - if s.trivial_literal_context != 0 { - goto CommandInner - } - } - - context = getContext(p1, p2, s.context_lookup) - hc = []huffmanCode(s.literal_hgroup.htrees[s.context_map_slice[context]]) - p2 = p1 - if safe == 0 { - p1 = byte(readSymbol(hc, br)) - } else { - var literal uint32 - if !safeReadSymbol(hc, br, &literal) { - result = decoderNeedsMoreInput - goto saveStateAndReturn - } - - p1 = byte(literal) - } - - s.ringbuffer[pos] = p1 - s.block_length[0]-- - pos++ - if pos == s.ringbuffer_size { - s.state = stateCommandInnerWrite - i-- - goto saveStateAndReturn - } - i-- - if i == 0 { - break - } - } - } - - if s.meta_block_remaining_len <= 0 { - s.state = stateMetablockDone - goto saveStateAndReturn - } - -CommandPostDecodeLiterals: - if safe != 0 { - s.state = stateCommandPostDecodeLiterals - } - - if s.distance_code >= 0 { - /* Implicit distance case. */ - if s.distance_code != 0 { - s.distance_context = 0 - } else { - s.distance_context = 1 - } - - s.dist_rb_idx-- - s.distance_code = s.dist_rb[s.dist_rb_idx&3] - } else { - /* Read distance code in the command, unless it was implicitly zero. */ - if s.block_length[2] == 0 { - if safe != 0 { - if !safeDecodeDistanceBlockSwitch(s) { - result = decoderNeedsMoreInput - goto saveStateAndReturn - } - } else { - decodeDistanceBlockSwitch(s) - } - } - - if safe != 0 { - if !safeReadDistance(s, br) { - result = decoderNeedsMoreInput - goto saveStateAndReturn - } - } else { - readDistance(s, br) - } - } - - if s.max_distance != s.max_backward_distance { - if pos < s.max_backward_distance { - s.max_distance = pos - } else { - s.max_distance = s.max_backward_distance - } - } - - i = s.copy_length - - /* Apply copy of LZ77 back-reference, or static dictionary reference if - the distance is larger than the max LZ77 distance */ - if s.distance_code > s.max_distance { - /* The maximum allowed distance is BROTLI_MAX_ALLOWED_DISTANCE = 0x7FFFFFFC. - With this choice, no signed overflow can occur after decoding - a special distance code (e.g., after adding 3 to the last distance). */ - if s.distance_code > maxAllowedDistance { - return decoderErrorFormatDistance - } - - if i >= minDictionaryWordLength && i <= maxDictionaryWordLength { - var address int = s.distance_code - s.max_distance - 1 - var words *dictionary = s.dictionary - var trans *transforms = s.transforms - var offset int = int(s.dictionary.offsets_by_length[i]) - var shift uint32 = uint32(s.dictionary.size_bits_by_length[i]) - var mask int = int(bitMask(shift)) - var word_idx int = address & mask - var transform_idx int = address >> shift - - /* Compensate double distance-ring-buffer roll. */ - s.dist_rb_idx += s.distance_context - - offset += word_idx * i - if words.data == nil { - return decoderErrorDictionaryNotSet - } - - if transform_idx < int(trans.num_transforms) { - word := words.data[offset:] - var len int = i - if transform_idx == int(trans.cutOffTransforms[0]) { - copy(s.ringbuffer[pos:], word[:uint(len)]) - } else { - len = transformDictionaryWord(s.ringbuffer[pos:], word, int(len), trans, transform_idx) - } - - pos += int(len) - s.meta_block_remaining_len -= int(len) - if pos >= s.ringbuffer_size { - s.state = stateCommandPostWrite1 - goto saveStateAndReturn - } - } else { - return decoderErrorFormatTransform - } - } else { - return decoderErrorFormatDictionary - } - } else { - var src_start int = (pos - s.distance_code) & s.ringbuffer_mask - copy_dst := s.ringbuffer[pos:] - copy_src := s.ringbuffer[src_start:] - var dst_end int = pos + i - var src_end int = src_start + i - - /* Update the recent distances cache. */ - s.dist_rb[s.dist_rb_idx&3] = s.distance_code - - s.dist_rb_idx++ - s.meta_block_remaining_len -= i - - /* There are 32+ bytes of slack in the ring-buffer allocation. - Also, we have 16 short codes, that make these 16 bytes irrelevant - in the ring-buffer. Let's copy over them as a first guess. */ - copy(copy_dst, copy_src[:16]) - - if src_end > pos && dst_end > src_start { - /* Regions intersect. */ - goto CommandPostWrapCopy - } - - if dst_end >= s.ringbuffer_size || src_end >= s.ringbuffer_size { - /* At least one region wraps. */ - goto CommandPostWrapCopy - } - - pos += i - if i > 16 { - if i > 32 { - copy(copy_dst[16:], copy_src[16:][:uint(i-16)]) - } else { - /* This branch covers about 45% cases. - Fixed size short copy allows more compiler optimizations. */ - copy(copy_dst[16:], copy_src[16:][:16]) - } - } - } - - if s.meta_block_remaining_len <= 0 { - /* Next metablock, if any. */ - s.state = stateMetablockDone - - goto saveStateAndReturn - } else { - goto CommandBegin - } -CommandPostWrapCopy: - { - var wrap_guard int = s.ringbuffer_size - pos - for { - i-- - if i < 0 { - break - } - s.ringbuffer[pos] = s.ringbuffer[(pos-s.distance_code)&s.ringbuffer_mask] - pos++ - wrap_guard-- - if wrap_guard == 0 { - s.state = stateCommandPostWrite2 - goto saveStateAndReturn - } - } - } - - if s.meta_block_remaining_len <= 0 { - /* Next metablock, if any. */ - s.state = stateMetablockDone - - goto saveStateAndReturn - } else { - goto CommandBegin - } - -saveStateAndReturn: - s.pos = pos - s.loop_counter = i - return result -} - -func processCommands(s *Reader) int { - return processCommandsInternal(0, s) -} - -func safeProcessCommands(s *Reader) int { - return processCommandsInternal(1, s) -} - -/* Returns the maximum number of distance symbols which can only represent - distances not exceeding BROTLI_MAX_ALLOWED_DISTANCE. */ - -var maxDistanceSymbol_bound = [maxNpostfix + 1]uint32{0, 4, 12, 28} -var maxDistanceSymbol_diff = [maxNpostfix + 1]uint32{73, 126, 228, 424} - -func maxDistanceSymbol(ndirect uint32, npostfix uint32) uint32 { - var postfix uint32 = 1 << npostfix - if ndirect < maxDistanceSymbol_bound[npostfix] { - return ndirect + maxDistanceSymbol_diff[npostfix] + postfix - } else if ndirect > maxDistanceSymbol_bound[npostfix]+postfix { - return ndirect + maxDistanceSymbol_diff[npostfix] - } else { - return maxDistanceSymbol_bound[npostfix] + maxDistanceSymbol_diff[npostfix] + postfix - } -} - -/* Invariant: input stream is never overconsumed: - - invalid input implies that the whole stream is invalid -> any amount of - input could be read and discarded - - when result is "needs more input", then at least one more byte is REQUIRED - to complete decoding; all input data MUST be consumed by decoder, so - client could swap the input buffer - - when result is "needs more output" decoder MUST ensure that it doesn't - hold more than 7 bits in bit reader; this saves client from swapping input - buffer ahead of time - - when result is "success" decoder MUST return all unused data back to input - buffer; this is possible because the invariant is held on enter */ -func decoderDecompressStream(s *Reader, available_in *uint, next_in *[]byte, available_out *uint, next_out *[]byte) int { - var result int = decoderSuccess - var br *bitReader = &s.br - - /* Do not try to process further in a case of unrecoverable error. */ - if int(s.error_code) < 0 { - return decoderResultError - } - - if *available_out != 0 && (next_out == nil || *next_out == nil) { - return saveErrorCode(s, decoderErrorInvalidArguments) - } - - if *available_out == 0 { - next_out = nil - } - if s.buffer_length == 0 { /* Just connect bit reader to input stream. */ - br.input_len = *available_in - br.input = *next_in - br.byte_pos = 0 - } else { - /* At least one byte of input is required. More than one byte of input may - be required to complete the transaction -> reading more data must be - done in a loop -> do it in a main loop. */ - result = decoderNeedsMoreInput - - br.input = s.buffer.u8[:] - br.byte_pos = 0 - } - - /* State machine */ - for { - if result != decoderSuccess { - /* Error, needs more input/output. */ - if result == decoderNeedsMoreInput { - if s.ringbuffer != nil { /* Pro-actively push output. */ - var intermediate_result int = writeRingBuffer(s, available_out, next_out, nil, true) - - /* WriteRingBuffer checks s->meta_block_remaining_len validity. */ - if int(intermediate_result) < 0 { - result = intermediate_result - break - } - } - - if s.buffer_length != 0 { /* Used with internal buffer. */ - if br.byte_pos == br.input_len { - /* Successfully finished read transaction. - Accumulator contains less than 8 bits, because internal buffer - is expanded byte-by-byte until it is enough to complete read. */ - s.buffer_length = 0 - - /* Switch to input stream and restart. */ - result = decoderSuccess - - br.input_len = *available_in - br.input = *next_in - br.byte_pos = 0 - continue - } else if *available_in != 0 { - /* Not enough data in buffer, but can take one more byte from - input stream. */ - result = decoderSuccess - - s.buffer.u8[s.buffer_length] = (*next_in)[0] - s.buffer_length++ - br.input_len = uint(s.buffer_length) - *next_in = (*next_in)[1:] - (*available_in)-- - - /* Retry with more data in buffer. */ - continue - } - - /* Can't finish reading and no more input. */ - break - /* Input stream doesn't contain enough input. */ - } else { - /* Copy tail to internal buffer and return. */ - *next_in = br.input[br.byte_pos:] - - *available_in = br.input_len - br.byte_pos - for *available_in != 0 { - s.buffer.u8[s.buffer_length] = (*next_in)[0] - s.buffer_length++ - *next_in = (*next_in)[1:] - (*available_in)-- - } - - break - } - } - - /* Unreachable. */ - - /* Fail or needs more output. */ - if s.buffer_length != 0 { - /* Just consumed the buffered input and produced some output. Otherwise - it would result in "needs more input". Reset internal buffer. */ - s.buffer_length = 0 - } else { - /* Using input stream in last iteration. When decoder switches to input - stream it has less than 8 bits in accumulator, so it is safe to - return unused accumulator bits there. */ - bitReaderUnload(br) - - *available_in = br.input_len - br.byte_pos - *next_in = br.input[br.byte_pos:] - } - - break - } - - switch s.state { - /* Prepare to the first read. */ - case stateUninited: - if !warmupBitReader(br) { - result = decoderNeedsMoreInput - break - } - - /* Decode window size. */ - result = decodeWindowBits(s, br) /* Reads 1..8 bits. */ - if result != decoderSuccess { - break - } - - if s.large_window { - s.state = stateLargeWindowBits - break - } - - s.state = stateInitialize - - case stateLargeWindowBits: - if !safeReadBits(br, 6, &s.window_bits) { - result = decoderNeedsMoreInput - break - } - - if s.window_bits < largeMinWbits || s.window_bits > largeMaxWbits { - result = decoderErrorFormatWindowBits - break - } - - s.state = stateInitialize - fallthrough - - /* Maximum distance, see section 9.1. of the spec. */ - /* Fall through. */ - case stateInitialize: - s.max_backward_distance = (1 << s.window_bits) - windowGap - - /* Allocate memory for both block_type_trees and block_len_trees. */ - s.block_type_trees = make([]huffmanCode, (3 * (huffmanMaxSize258 + huffmanMaxSize26))) - - if s.block_type_trees == nil { - result = decoderErrorAllocBlockTypeTrees - break - } - - s.block_len_trees = s.block_type_trees[3*huffmanMaxSize258:] - - s.state = stateMetablockBegin - fallthrough - - /* Fall through. */ - case stateMetablockBegin: - decoderStateMetablockBegin(s) - - s.state = stateMetablockHeader - fallthrough - - /* Fall through. */ - case stateMetablockHeader: - result = decodeMetaBlockLength(s, br) - /* Reads 2 - 31 bits. */ - if result != decoderSuccess { - break - } - - if s.is_metadata != 0 || s.is_uncompressed != 0 { - if !bitReaderJumpToByteBoundary(br) { - result = decoderErrorFormatPadding1 - break - } - } - - if s.is_metadata != 0 { - s.state = stateMetadata - break - } - - if s.meta_block_remaining_len == 0 { - s.state = stateMetablockDone - break - } - - calculateRingBufferSize(s) - if s.is_uncompressed != 0 { - s.state = stateUncompressed - break - } - - s.loop_counter = 0 - s.state = stateHuffmanCode0 - - case stateUncompressed: - result = copyUncompressedBlockToOutput(available_out, next_out, nil, s) - if result == decoderSuccess { - s.state = stateMetablockDone - } - - case stateMetadata: - for ; s.meta_block_remaining_len > 0; s.meta_block_remaining_len-- { - var bits uint32 - - /* Read one byte and ignore it. */ - if !safeReadBits(br, 8, &bits) { - result = decoderNeedsMoreInput - break - } - } - - if result == decoderSuccess { - s.state = stateMetablockDone - } - - case stateHuffmanCode0: - if s.loop_counter >= 3 { - s.state = stateMetablockHeader2 - break - } - - /* Reads 1..11 bits. */ - result = decodeVarLenUint8(s, br, &s.num_block_types[s.loop_counter]) - - if result != decoderSuccess { - break - } - - s.num_block_types[s.loop_counter]++ - if s.num_block_types[s.loop_counter] < 2 { - s.loop_counter++ - break - } - - s.state = stateHuffmanCode1 - fallthrough - - case stateHuffmanCode1: - { - var alphabet_size uint32 = s.num_block_types[s.loop_counter] + 2 - var tree_offset int = s.loop_counter * huffmanMaxSize258 - result = readHuffmanCode(alphabet_size, alphabet_size, s.block_type_trees[tree_offset:], nil, s) - if result != decoderSuccess { - break - } - s.state = stateHuffmanCode2 - } - fallthrough - - case stateHuffmanCode2: - { - var alphabet_size uint32 = numBlockLenSymbols - var tree_offset int = s.loop_counter * huffmanMaxSize26 - result = readHuffmanCode(alphabet_size, alphabet_size, s.block_len_trees[tree_offset:], nil, s) - if result != decoderSuccess { - break - } - s.state = stateHuffmanCode3 - } - fallthrough - - case stateHuffmanCode3: - var tree_offset int = s.loop_counter * huffmanMaxSize26 - if !safeReadBlockLength(s, &s.block_length[s.loop_counter], s.block_len_trees[tree_offset:], br) { - result = decoderNeedsMoreInput - break - } - - s.loop_counter++ - s.state = stateHuffmanCode0 - - case stateMetablockHeader2: - { - var bits uint32 - if !safeReadBits(br, 6, &bits) { - result = decoderNeedsMoreInput - break - } - - s.distance_postfix_bits = bits & bitMask(2) - bits >>= 2 - s.num_direct_distance_codes = numDistanceShortCodes + (bits << s.distance_postfix_bits) - s.distance_postfix_mask = int(bitMask(s.distance_postfix_bits)) - s.context_modes = make([]byte, uint(s.num_block_types[0])) - if s.context_modes == nil { - result = decoderErrorAllocContextModes - break - } - - s.loop_counter = 0 - s.state = stateContextModes - } - fallthrough - - case stateContextModes: - result = readContextModes(s) - - if result != decoderSuccess { - break - } - - s.state = stateContextMap1 - fallthrough - - case stateContextMap1: - result = decodeContextMap(s.num_block_types[0]<= 3 { - prepareLiteralDecoding(s) - s.dist_context_map_slice = s.dist_context_map - s.htree_command = []huffmanCode(s.insert_copy_hgroup.htrees[0]) - if !ensureRingBuffer(s) { - result = decoderErrorAllocRingBuffer2 - break - } - - s.state = stateCommandBegin - } - - case stateCommandBegin, stateCommandInner, stateCommandPostDecodeLiterals, stateCommandPostWrapCopy: - result = processCommands(s) - - if result == decoderNeedsMoreInput { - result = safeProcessCommands(s) - } - - case stateCommandInnerWrite, stateCommandPostWrite1, stateCommandPostWrite2: - result = writeRingBuffer(s, available_out, next_out, nil, false) - - if result != decoderSuccess { - break - } - - wrapRingBuffer(s) - if s.ringbuffer_size == 1<